• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright (c) 2013 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Test harness for chromium clang tools."""
7
8import argparse
9import difflib
10import glob
11import json
12import os
13import os.path
14import shutil
15import subprocess
16import sys
17
18
19def _RunGit(args):
20  if sys.platform == 'win32':
21    args = ['git.bat'] + args
22  else:
23    args = ['git'] + args
24  subprocess.check_call(args)
25
26
27def _GenerateCompileCommands(files, include_paths):
28  """Returns a JSON string containing a compilation database for the input."""
29  # Note: in theory, backslashes in the compile DB should work but the tools
30  # that write compile DBs and the tools that read them don't agree on the
31  # escaping convention: https://llvm.org/bugs/show_bug.cgi?id=19687
32  files = [f.replace('\\', '/') for f in files]
33  include_path_flags = ' '.join('-I %s' % include_path.replace('\\', '/')
34                                for include_path in include_paths)
35  return json.dumps([{'directory': os.path.dirname(f),
36                      'command': 'clang++ -std=c++14 -fsyntax-only %s -c %s' % (
37                          include_path_flags, os.path.basename(f)),
38                      'file': os.path.basename(f)} for f in files], indent=2)
39
40
41def _NumberOfTestsToString(tests):
42  """Returns an English describing the number of tests."""
43  return '%d test%s' % (tests, 's' if tests != 1 else '')
44
45
46def _ApplyTool(tools_clang_scripts_directory,
47               tool_to_test,
48               tool_path,
49               tool_args,
50               test_directory_for_tool,
51               actual_files,
52               apply_edits):
53  try:
54    # Stage the test files in the git index. If they aren't staged, then
55    # run_tool.py will skip them when applying replacements.
56    args = ['add']
57    args.extend(actual_files)
58    _RunGit(args)
59
60    # Launch the following pipeline if |apply_edits| is True:
61    #     run_tool.py ... | extract_edits.py | apply_edits.py ...
62    # Otherwise just the first step is done and the result is written to
63    #   actual_files[0].
64    processes = []
65    args = ['python',
66            os.path.join(tools_clang_scripts_directory, 'run_tool.py')]
67    extra_run_tool_args_path = os.path.join(test_directory_for_tool,
68                                            'run_tool.args')
69    if os.path.exists(extra_run_tool_args_path):
70      with open(extra_run_tool_args_path, 'r') as extra_run_tool_args_file:
71        extra_run_tool_args = extra_run_tool_args_file.readlines()
72        args.extend([arg.strip() for arg in extra_run_tool_args])
73    args.extend(['--tool', tool_to_test, '-p', test_directory_for_tool])
74
75    if tool_path:
76      args.extend(['--tool-path', tool_path])
77    if tool_args:
78      for arg in tool_args:
79        args.append('--tool-arg=%s' % arg)
80
81    args.extend(actual_files)
82    processes.append(subprocess.Popen(args, stdout=subprocess.PIPE))
83
84    if apply_edits:
85      args = [
86          'python',
87          os.path.join(tools_clang_scripts_directory, 'extract_edits.py')
88      ]
89      processes.append(subprocess.Popen(
90          args, stdin=processes[-1].stdout, stdout=subprocess.PIPE))
91
92      args = [
93          'python',
94          os.path.join(tools_clang_scripts_directory, 'apply_edits.py'), '-p',
95          test_directory_for_tool
96      ]
97      processes.append(subprocess.Popen(
98          args, stdin=processes[-1].stdout, stdout=subprocess.PIPE))
99
100    # Wait for the pipeline to finish running + check exit codes.
101    stdout, _ = processes[-1].communicate()
102    for process in processes:
103      process.wait()
104      if process.returncode != 0:
105        print 'Failure while running the tool.'
106        return process.returncode
107
108    if apply_edits:
109      # Reformat the resulting edits via: git cl format.
110      args = ['cl', 'format']
111      args.extend(actual_files)
112      _RunGit(args)
113    else:
114      with open(actual_files[0], 'w') as output_file:
115        output_file.write(stdout)
116
117    return 0
118
119  finally:
120    # No matter what, unstage the git changes we made earlier to avoid polluting
121    # the index.
122    args = ['reset', '--quiet', 'HEAD']
123    args.extend(actual_files)
124    _RunGit(args)
125
126
127def main(argv):
128  parser = argparse.ArgumentParser()
129  parser.add_argument(
130      '--apply-edits',
131      action='store_true',
132      help='Applies the edits to the original test files and compares the '
133           'reformatted new files with the expected files.')
134  parser.add_argument(
135      '--tool-arg', nargs='?', action='append',
136      help='optional arguments passed to the tool')
137  parser.add_argument(
138      '--tool-path', nargs='?',
139      help='optional path to the tool directory')
140  parser.add_argument('tool_name',
141                      nargs=1,
142                      help='Clang tool to be tested.')
143  args = parser.parse_args(argv)
144  tool_to_test = args.tool_name[0]
145  print '\nTesting %s\n' % tool_to_test
146  tools_clang_scripts_directory = os.path.dirname(os.path.realpath(__file__))
147  tools_clang_directory = os.path.dirname(tools_clang_scripts_directory)
148  test_directory_for_tool = os.path.join(
149      tools_clang_directory, tool_to_test, 'tests')
150  compile_database = os.path.join(test_directory_for_tool,
151                                  'compile_commands.json')
152  source_files = glob.glob(os.path.join(test_directory_for_tool,
153                                        '*-original.cc'))
154  ext = 'cc' if args.apply_edits else 'txt'
155  actual_files = ['-'.join([source_file.rsplit('-', 1)[0], 'actual.cc'])
156                  for source_file in source_files]
157  expected_files = ['-'.join([source_file.rsplit('-', 1)[0], 'expected.' + ext])
158                    for source_file in source_files]
159  if not args.apply_edits and len(actual_files) != 1:
160    print 'Only one test file is expected for testing without apply-edits.'
161    return 1
162
163  include_paths = []
164  include_paths.append(
165      os.path.realpath(os.path.join(tools_clang_directory, '../..')))
166  # Many gtest and gmock headers expect to have testing/gtest/include and/or
167  # testing/gmock/include in the include search path.
168  include_paths.append(
169      os.path.realpath(os.path.join(tools_clang_directory,
170                                    '../..',
171                                    'testing/gtest/include')))
172  include_paths.append(
173      os.path.realpath(os.path.join(tools_clang_directory,
174                                    '../..',
175                                    'testing/gmock/include')))
176
177  if len(actual_files) == 0:
178    print 'Tool "%s" does not have compatible test files.' % tool_to_test
179    return 1
180
181  # Set up the test environment.
182  for source, actual in zip(source_files, actual_files):
183    shutil.copyfile(source, actual)
184  # Generate a temporary compilation database to run the tool over.
185  with open(compile_database, 'w') as f:
186    f.write(_GenerateCompileCommands(actual_files, include_paths))
187
188  # Run the tool.
189  os.chdir(test_directory_for_tool)
190  exitcode = _ApplyTool(tools_clang_scripts_directory, tool_to_test,
191                        args.tool_path, args.tool_arg,
192                        test_directory_for_tool, actual_files,
193                        args.apply_edits)
194  if (exitcode != 0):
195    return exitcode
196
197  # Compare actual-vs-expected results.
198  passed = 0
199  failed = 0
200  for expected, actual in zip(expected_files, actual_files):
201    print '[ RUN      ] %s' % os.path.relpath(actual)
202    expected_output = actual_output = None
203    with open(expected, 'r') as f:
204      expected_output = f.read().splitlines()
205    with open(actual, 'r') as f:
206      actual_output =  f.read().splitlines()
207    if actual_output != expected_output:
208      failed += 1
209      for line in difflib.unified_diff(expected_output, actual_output,
210                                       fromfile=os.path.relpath(expected),
211                                       tofile=os.path.relpath(actual)):
212        sys.stdout.write(line)
213      print '[  FAILED  ] %s' % os.path.relpath(actual)
214      # Don't clean up the file on failure, so the results can be referenced
215      # more easily.
216      continue
217    print '[       OK ] %s' % os.path.relpath(actual)
218    passed += 1
219    os.remove(actual)
220
221  if failed == 0:
222    os.remove(compile_database)
223
224  print '[==========] %s ran.' % _NumberOfTestsToString(len(source_files))
225  if passed > 0:
226    print '[  PASSED  ] %s.' % _NumberOfTestsToString(passed)
227  if failed > 0:
228    print '[  FAILED  ] %s.' % _NumberOfTestsToString(failed)
229    return 1
230
231
232if __name__ == '__main__':
233  sys.exit(main(sys.argv[1:]))
234