• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2014 The Chromium Authors
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import argparse
6import codecs
7import contextlib
8import json
9import os
10import logging
11import platform
12import subprocess
13import sys
14import tempfile
15import time
16import traceback
17
18logging.basicConfig(level=logging.INFO)
19
20sys.path.append(
21    os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
22# //testing imports.
23import test_env
24if sys.platform.startswith('linux'):
25  import xvfb
26
27SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
28SRC_DIR = os.path.abspath(
29    os.path.join(SCRIPT_DIR, os.path.pardir, os.path.pardir))
30
31# Use result_sink.py in //build/util/lib/results/ for uploading the
32# results of non-isolated script tests.
33BUILD_UTIL_DIR = os.path.join(SRC_DIR, 'build', 'util')
34sys.path.insert(0, BUILD_UTIL_DIR)
35try:
36  from lib.results import result_sink
37  from lib.results import result_types
38except ImportError:
39  # Some build-time scripts import this file and run into issues with
40  # result_sink's dependency on requests since we can't depend on vpython
41  # during build-time. So silently swallow the error in that case.
42  result_sink = None
43
44# run_web_tests.py returns the number of failures as the return
45# code, but caps the return code at 101 to avoid overflow or colliding
46# with reserved values from the shell.
47MAX_FAILURES_EXIT_STATUS = 101
48
49# Exit code to indicate infrastructure issue.
50INFRA_FAILURE_EXIT_CODE = 87
51
52# ACL might be explicitly set or inherited.
53CORRECT_ACL_VARIANTS = [
54    'APPLICATION PACKAGE AUTHORITY' \
55    '\\ALL RESTRICTED APPLICATION PACKAGES:(OI)(CI)(RX)', \
56    'APPLICATION PACKAGE AUTHORITY' \
57    '\\ALL RESTRICTED APPLICATION PACKAGES:(I)(OI)(CI)(RX)'
58]
59
60# pylint: disable=useless-object-inheritance
61
62
63def set_lpac_acls(acl_dir, is_test_script=False):
64  """Sets LPAC ACLs on a directory. Windows 10 only."""
65  if platform.release() != '10':
66    return
67  try:
68    existing_acls = subprocess.check_output(['icacls', acl_dir],
69                                            stderr=subprocess.STDOUT,
70                                            universal_newlines=True)
71  except subprocess.CalledProcessError as e:
72    logging.error('Failed to retrieve existing ACLs for directory %s', acl_dir)
73    logging.error('Command output: %s', e.output)
74    sys.exit(e.returncode)
75  acls_correct = False
76  for acl in CORRECT_ACL_VARIANTS:
77    if acl in existing_acls:
78      acls_correct = True
79  if not acls_correct:
80    try:
81      existing_acls = subprocess.check_output(
82          ['icacls', acl_dir, '/grant', '*S-1-15-2-2:(OI)(CI)(RX)'],
83          stderr=subprocess.STDOUT)
84    except subprocess.CalledProcessError as e:
85      logging.error('Failed to retrieve existing ACLs for directory %s',
86                    acl_dir)
87      logging.error('Command output: %s', e.output)
88      sys.exit(e.returncode)
89  if not is_test_script:
90    return
91  # Bots running on luci use hardlinks that do not have correct ACLs so these
92  # must be manually overridden here.
93  with temporary_file() as tempfile_path:
94    subprocess.check_output(
95        ['icacls', acl_dir, '/save', tempfile_path, '/t', '/q', '/c'],
96        stderr=subprocess.STDOUT)
97    # ACL files look like this, e.g. for c:\a\b\c\d\Release_x64
98    #
99    # Release_x64
100    # D:AI(A;OICI;0x1200a9;;;S-1-15-2-2)(A;OICIID;FA;;;BA)
101    # Release_x64\icudtl_extra.dat
102    # D:AI(A;ID;0x1200a9;;;S-1-15-2-2)(A;ID;FA;;;BA)(A;ID;0x1301bf;;;BU)
103    with codecs.open(tempfile_path, encoding='utf_16_le') as aclfile:
104      for filename in aclfile:
105        acl = next(aclfile).strip()
106        full_filename = os.path.abspath(
107            os.path.join(acl_dir, os.pardir, filename.strip()))
108        if 'S-1-15-2-2' in acl:
109          continue
110        if os.path.isdir(full_filename):
111          continue
112        subprocess.check_output(
113            ['icacls', full_filename, '/grant', '*S-1-15-2-2:(RX)'],
114            stderr=subprocess.STDOUT)
115
116
117def run_script(argv, funcs):
118
119  def parse_json(path):
120    with open(path) as f:
121      return json.load(f)
122
123  parser = argparse.ArgumentParser()
124  parser.add_argument('--build-dir',
125                      help='Absolute path to build-dir.',
126                      required=True)
127  parser.add_argument('--paths', type=parse_json, default={})
128  # Properties describe the environment of the build, and are the same per
129  # script invocation.
130  parser.add_argument('--properties', type=parse_json, default={})
131  # Args contains per-invocation arguments that potentially change the
132  # behavior of the script.
133  parser.add_argument('--args', type=parse_json, default=[])
134
135  subparsers = parser.add_subparsers()
136
137  run_parser = subparsers.add_parser('run')
138  run_parser.add_argument('--output',
139                          type=argparse.FileType('w'),
140                          required=True)
141  run_parser.add_argument('--filter-file', type=argparse.FileType('r'))
142  run_parser.set_defaults(func=funcs['run'])
143
144  run_parser = subparsers.add_parser('compile_targets')
145  run_parser.add_argument('--output',
146                          type=argparse.FileType('w'),
147                          required=True)
148  run_parser.set_defaults(func=funcs['compile_targets'])
149
150  args = parser.parse_args(argv)
151  return args.func(args)
152
153
154def run_command(argv, env=None, cwd=None):
155  print('Running %r in %r (env: %r)' % (argv, cwd, env), file=sys.stderr)
156  rc = test_env.run_command(argv, env=env, cwd=cwd)
157  print('Command %r returned exit code %d' % (argv, rc), file=sys.stderr)
158  return rc
159
160
161@contextlib.contextmanager
162def temporary_file():
163  fd, path = tempfile.mkstemp()
164  os.close(fd)
165  try:
166    yield path
167  finally:
168    os.remove(path)
169
170
171def record_local_script_results(name, output_fd, failures, valid):
172  """Records to a local json file and to RDB the results of the script test.
173
174  For legacy reasons, local script tests (ie: script tests that run
175  locally and that don't conform to the isolated-test API) are expected to
176  record their results using a specific format. This method encapsulates
177  that format and also uploads those results to Result DB.
178
179  Args:
180    name: Name of the script test.
181    output_fd: A .write()-supporting file descriptor to write results to.
182    failures: List of strings representing test failures.
183    valid: Whether the results are valid.
184  """
185  local_script_results = {'valid': valid, 'failures': failures}
186  with open(output_fd.name, 'w') as fd:
187    json.dump(local_script_results, fd)
188
189  if not result_sink:
190    return
191  result_sink_client = result_sink.TryInitClient()
192  if not result_sink_client:
193    return
194  status = result_types.PASS
195  if not valid:
196    status = result_types.UNKNOWN
197  elif failures:
198    status = result_types.FAIL
199  test_log = '\n'.join(failures)
200  result_sink_client.Post(name, status, None, test_log, None)
201
202
203def parse_common_test_results(json_results, test_separator='/'):
204
205  def convert_trie_to_flat_paths(trie, prefix=None):
206    # Also see blinkpy.web_tests.layout_package.json_results_generator
207    result = {}
208    for name, data in trie.items():
209      if prefix:
210        name = prefix + test_separator + name
211      if len(data) and not 'actual' in data and not 'expected' in data:
212        result.update(convert_trie_to_flat_paths(data, name))
213      else:
214        result[name] = data
215    return result
216
217  results = {
218      'passes': {},
219      'unexpected_passes': {},
220      'failures': {},
221      'unexpected_failures': {},
222      'flakes': {},
223      'unexpected_flakes': {},
224  }
225
226  # TODO(dpranke): crbug.com/357866 - we should simplify the handling of
227  # both the return code and parsing the actual results, below.
228
229  passing_statuses = ('PASS', 'SLOW', 'NEEDSREBASELINE')
230
231  for test, result in convert_trie_to_flat_paths(json_results['tests']).items():
232    key = 'unexpected_' if result.get('is_unexpected') else ''
233    data = result['actual']
234    actual_results = data.split()
235    last_result = actual_results[-1]
236    expected_results = result['expected'].split()
237
238    if (len(actual_results) > 1 and
239        (last_result in expected_results or last_result in passing_statuses)):
240      key += 'flakes'
241    elif last_result in passing_statuses:
242      key += 'passes'
243      # TODO(dpranke): crbug.com/357867 ...  Why are we assigning result
244      # instead of actual_result here. Do we even need these things to be
245      # hashes, or just lists?
246      data = result
247    else:
248      key += 'failures'
249    results[key][test] = data
250
251  return results
252
253
254def write_interrupted_test_results_to(filepath, test_start_time):
255  """Writes a test results JSON file* to filepath.
256
257  This JSON file is formatted to explain that something went wrong.
258
259  *src/docs/testing/json_test_results_format.md
260
261  Args:
262    filepath: A path to a file to write the output to.
263    test_start_time: The start time of the test run expressed as a
264      floating-point offset in seconds from the UNIX epoch.
265  """
266  with open(filepath, 'w') as fh:
267    output = {
268        'interrupted': True,
269        'num_failures_by_type': {},
270        'seconds_since_epoch': test_start_time,
271        'tests': {},
272        'version': 3,
273    }
274    json.dump(output, fh)
275
276
277def get_gtest_summary_passes(output):
278  """Returns a mapping of test to boolean indicating if the test passed.
279
280  Only partially parses the format. This code is based on code in tools/build,
281  specifically
282  https://chromium.googlesource.com/chromium/tools/build/+/17fef98756c5f250b20bf716829a0004857235ff/scripts/slave/recipe_modules/test_utils/util.py#189
283  """
284  if not output:
285    return {}
286
287  mapping = {}
288
289  for cur_iteration_data in output.get('per_iteration_data', []):
290    for test_fullname, results in cur_iteration_data.items():
291      # Results is a list with one entry per test try. Last one is the final
292      # result.
293      last_result = results[-1]
294
295      if last_result['status'] == 'SUCCESS':
296        mapping[test_fullname] = True
297      elif last_result['status'] != 'SKIPPED':
298        mapping[test_fullname] = False
299
300  return mapping
301
302
303def extract_filter_list(filter_list):
304  """Helper for isolated script test wrappers. Parses the
305  --isolated-script-test-filter command line argument. Currently, double-colon
306  ('::') is used as the separator between test names, because a single colon may
307  be used in the names of perf benchmarks, which contain URLs.
308  """
309  return filter_list.split('::')
310
311
312def add_emulator_args(parser):
313  parser.add_argument(
314      '--avd-config',
315      type=os.path.realpath,
316      help=('Path to the avd config. Required for Android products. '
317            '(See //tools/android/avd/proto for message definition '
318            'and existing *.textpb files.)'))
319  parser.add_argument('--emulator-window',
320                      action='store_true',
321                      default=False,
322                      help='Enable graphical window display on the emulator.')
323
324
325class BaseIsolatedScriptArgsAdapter:
326  """The base class for all script adapters that need to translate flags
327  set by isolated script test contract into the specific test script's flags.
328  """
329
330  def __init__(self):
331    self._parser = argparse.ArgumentParser()
332    self._options = None
333    self._rest_args = None
334    self._script_writes_output_json = None
335    self._parser.add_argument(
336        '--isolated-outdir',
337        type=str,
338        required=False,
339        help='value of $ISOLATED_OUTDIR from swarming task')
340    self._parser.add_argument('--isolated-script-test-output',
341                              type=os.path.abspath,
342                              required=False,
343                              help='path to write test results JSON object to')
344    self._parser.add_argument('--isolated-script-test-filter',
345                              type=str,
346                              required=False)
347    self._parser.add_argument('--isolated-script-test-repeat',
348                              type=int,
349                              required=False)
350    self._parser.add_argument('--isolated-script-test-launcher-retry-limit',
351                              type=int,
352                              required=False)
353    self._parser.add_argument('--isolated-script-test-also-run-disabled-tests',
354                              default=False,
355                              action='store_true',
356                              required=False)
357
358    self._parser.add_argument(
359        '--xvfb',
360        help='start xvfb. Ignored on unsupported platforms',
361        action='store_true')
362    # Used to create the correct subclass.
363    self._parser.add_argument('--script-type',
364                              choices=['isolated', 'typ', 'bare'],
365                              help='Which script adapter to use')
366
367    # Arguments that are ignored, but added here because it's easier to ignore
368    # them to to update bot configs to not pass them.
369    self._parser.add_argument('--isolated-script-test-chartjson-output')
370    self._parser.add_argument('--isolated-script-test-perf-output')
371
372  def parse_args(self, args=None):
373    self._options, self._rest_args = self._parser.parse_known_args(args)
374
375  @property
376  def parser(self):
377    return self._parser
378
379  @property
380  def options(self):
381    return self._options
382
383  @property
384  def rest_args(self):
385    return self._rest_args
386
387  def generate_test_output_args(self, output):
388    del output  # unused
389    return []
390
391  def generate_test_filter_args(self, test_filter_str):
392    del test_filter_str  # unused
393    raise RuntimeError('Flag not supported.')
394
395  def generate_test_repeat_args(self, repeat_count):
396    del repeat_count  # unused
397    raise RuntimeError('Flag not supported.')
398
399  def generate_test_launcher_retry_limit_args(self, retry_limit):
400    del retry_limit  # unused
401    raise RuntimeError('Flag not supported.')
402
403  def generate_sharding_args(self, total_shards, shard_index):
404    del total_shards, shard_index  # unused
405    raise RuntimeError('Flag not supported.')
406
407  def generate_test_also_run_disabled_tests_args(self):
408    raise RuntimeError('Flag not supported.')
409
410  def select_python_executable(self):
411    return sys.executable
412
413  def generate_isolated_script_cmd(self):
414    isolated_script_cmd = [self.select_python_executable()] + self.rest_args
415
416    if self.options.isolated_script_test_output:
417      output_args = self.generate_test_output_args(
418          self.options.isolated_script_test_output)
419      self._script_writes_output_json = bool(output_args)
420      isolated_script_cmd += output_args
421
422    # Augment test filter args if needed
423    if self.options.isolated_script_test_filter:
424      isolated_script_cmd += self.generate_test_filter_args(
425          self.options.isolated_script_test_filter)
426
427    # Augment test repeat if needed
428    if self.options.isolated_script_test_repeat is not None:
429      isolated_script_cmd += self.generate_test_repeat_args(
430          self.options.isolated_script_test_repeat)
431
432    # Augment test launcher retry limit args if needed
433    if self.options.isolated_script_test_launcher_retry_limit is not None:
434      isolated_script_cmd += self.generate_test_launcher_retry_limit_args(
435          self.options.isolated_script_test_launcher_retry_limit)
436
437    # Augment test also run disable tests args if needed
438    if self.options.isolated_script_test_also_run_disabled_tests:
439      isolated_script_cmd += self.generate_test_also_run_disabled_tests_args()
440
441    # Augment shard args if needed
442    env = os.environ.copy()
443
444    total_shards = None
445    shard_index = None
446
447    if 'GTEST_TOTAL_SHARDS' in env:
448      total_shards = int(env['GTEST_TOTAL_SHARDS'])
449    if 'GTEST_SHARD_INDEX' in env:
450      shard_index = int(env['GTEST_SHARD_INDEX'])
451    if total_shards is not None and shard_index is not None:
452      isolated_script_cmd += self.generate_sharding_args(
453          total_shards, shard_index)
454
455    return isolated_script_cmd
456
457  def clean_up_after_test_run(self):
458    pass
459
460  def do_pre_test_run_tasks(self):
461    pass
462
463  def do_post_test_run_tasks(self):
464    pass
465
466  def _write_simple_test_results(self, start_time, exit_code):
467    if exit_code is None:
468      failure_type = 'CRASH'
469    elif exit_code == 0:
470      failure_type = 'PASS'
471    else:
472      failure_type = 'FAIL'
473
474    test_name = os.path.basename(self._rest_args[0])
475    # See //docs/testing/json_test_results_format.md
476    results_json = {
477        'version': 3,
478        'interrupted': False,
479        'num_failures_by_type': {
480            failure_type: 1
481        },
482        'path_delimiter': '/',
483        'seconds_since_epoch': start_time,
484        'tests': {
485            test_name: {
486                'expected': 'PASS',
487                'actual': failure_type,
488                'time': time.time() - start_time,
489            },
490        },
491    }
492    with open(self.options.isolated_script_test_output, 'w') as fp:
493      json.dump(results_json, fp)
494
495  def run_test(self, cwd=None):
496    self.parse_args()
497    cmd = self.generate_isolated_script_cmd()
498
499    self.do_pre_test_run_tasks()
500
501    env = os.environ.copy()
502
503    env['CHROME_HEADLESS'] = '1'
504    print('Running command: %s\nwith env: %r' % (' '.join(cmd), env))
505    sys.stdout.flush()
506    start_time = time.time()
507    try:
508      if self.options.xvfb and sys.platform.startswith('linux'):
509        exit_code = xvfb.run_executable(cmd, env, cwd=cwd)
510      else:
511        exit_code = test_env.run_command(cmd, env=env, cwd=cwd, log=False)
512      print('Command returned exit code %d' % exit_code)
513      sys.stdout.flush()
514      self.do_post_test_run_tasks()
515    except Exception:  # pylint: disable=broad-except
516      traceback.print_exc()
517      exit_code = None
518    finally:
519      self.clean_up_after_test_run()
520
521    if (self.options.isolated_script_test_output
522        and not self._script_writes_output_json):
523      self._write_simple_test_results(start_time, exit_code)
524
525    return exit_code if exit_code is not None else 2
526