• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2014 The Chromium Authors
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5from __future__ import print_function
6import argparse
7import codecs
8import contextlib
9import json
10import os
11import logging
12import platform
13import subprocess
14import sys
15import tempfile
16import time
17import traceback
18
19logging.basicConfig(level=logging.INFO)
20
21# Add src/testing/ into sys.path for importing xvfb and test_env.
22sys.path.append(
23    os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
24import test_env
25if sys.platform.startswith('linux'):
26  import xvfb
27
28
29SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
30SRC_DIR = os.path.abspath(
31    os.path.join(SCRIPT_DIR, os.path.pardir, os.path.pardir))
32
33# Use result_sink.py in //build/util/lib/results/ for uploading the
34# results of non-isolated script tests.
35BUILD_UTIL_DIR = os.path.join(SRC_DIR, 'build', 'util')
36sys.path.insert(0, BUILD_UTIL_DIR)
37try:
38  from lib.results import result_sink
39  from lib.results import result_types
40except ImportError:
41  # Some build-time scripts import this file and run into issues with
42  # result_sink's dependency on requests since we can't depend on vpython
43  # during build-time. So silently swallow the error in that case.
44  result_sink = None
45
46# run_web_tests.py returns the number of failures as the return
47# code, but caps the return code at 101 to avoid overflow or colliding
48# with reserved values from the shell.
49MAX_FAILURES_EXIT_STATUS = 101
50
51
52# Exit code to indicate infrastructure issue.
53INFRA_FAILURE_EXIT_CODE = 87
54
55
56# ACL might be explicitly set or inherited.
57CORRECT_ACL_VARIANTS = [
58    'APPLICATION PACKAGE AUTHORITY' \
59    '\\ALL RESTRICTED APPLICATION PACKAGES:(OI)(CI)(RX)', \
60    'APPLICATION PACKAGE AUTHORITY' \
61    '\\ALL RESTRICTED APPLICATION PACKAGES:(I)(OI)(CI)(RX)'
62]
63
64# pylint: disable=useless-object-inheritance
65
66
67def set_lpac_acls(acl_dir, is_test_script=False):
68  """Sets LPAC ACLs on a directory. Windows 10 only."""
69  if platform.release() != '10':
70    return
71  try:
72    existing_acls = subprocess.check_output(['icacls', acl_dir],
73                                            stderr=subprocess.STDOUT,
74                                            universal_newlines=True)
75  except subprocess.CalledProcessError as e:
76    logging.error('Failed to retrieve existing ACLs for directory %s', acl_dir)
77    logging.error('Command output: %s', e.output)
78    sys.exit(e.returncode)
79  acls_correct = False
80  for acl in CORRECT_ACL_VARIANTS:
81    if acl in existing_acls:
82      acls_correct = True
83  if not acls_correct:
84    try:
85      existing_acls = subprocess.check_output(
86          ['icacls', acl_dir, '/grant', '*S-1-15-2-2:(OI)(CI)(RX)'],
87          stderr=subprocess.STDOUT)
88    except subprocess.CalledProcessError as e:
89      logging.error(
90          'Failed to retrieve existing ACLs for directory %s', acl_dir)
91      logging.error('Command output: %s', e.output)
92      sys.exit(e.returncode)
93  if not is_test_script:
94    return
95  # Bots running on luci use hardlinks that do not have correct ACLs so these
96  # must be manually overridden here.
97  with temporary_file() as tempfile_path:
98    subprocess.check_output(
99        ['icacls', acl_dir, '/save', tempfile_path, '/t', '/q', '/c'],
100        stderr=subprocess.STDOUT)
101    # ACL files look like this, e.g. for c:\a\b\c\d\Release_x64
102    #
103    # Release_x64
104    # D:AI(A;OICI;0x1200a9;;;S-1-15-2-2)(A;OICIID;FA;;;BA)
105    # Release_x64\icudtl_extra.dat
106    # D:AI(A;ID;0x1200a9;;;S-1-15-2-2)(A;ID;FA;;;BA)(A;ID;0x1301bf;;;BU)
107    with codecs.open(tempfile_path, encoding='utf_16_le') as aclfile:
108      for filename in aclfile:
109        acl = next(aclfile).strip()
110        full_filename = os.path.abspath(
111            os.path.join(acl_dir, os.pardir, filename.strip()))
112        if 'S-1-15-2-2' in acl:
113          continue
114        if os.path.isdir(full_filename):
115          continue
116        subprocess.check_output(
117            ['icacls', full_filename, '/grant', '*S-1-15-2-2:(RX)'],
118            stderr=subprocess.STDOUT)
119
120
121def run_script(argv, funcs):
122  def parse_json(path):
123    with open(path) as f:
124      return json.load(f)
125  parser = argparse.ArgumentParser()
126  # TODO(phajdan.jr): Make build-config-fs required after passing it in recipe.
127  parser.add_argument('--build-config-fs')
128  parser.add_argument('--paths', type=parse_json, default={})
129  # Properties describe the environment of the build, and are the same per
130  # script invocation.
131  parser.add_argument('--properties', type=parse_json, default={})
132  # Args contains per-invocation arguments that potentially change the
133  # behavior of the script.
134  parser.add_argument('--args', type=parse_json, default=[])
135
136  subparsers = parser.add_subparsers()
137
138  run_parser = subparsers.add_parser('run')
139  run_parser.add_argument(
140      '--output', type=argparse.FileType('w'), required=True)
141  run_parser.add_argument('--filter-file', type=argparse.FileType('r'))
142  run_parser.set_defaults(func=funcs['run'])
143
144  run_parser = subparsers.add_parser('compile_targets')
145  run_parser.add_argument(
146      '--output', type=argparse.FileType('w'), required=True)
147  run_parser.set_defaults(func=funcs['compile_targets'])
148
149  args = parser.parse_args(argv)
150  return args.func(args)
151
152
153def run_command(argv, env=None, cwd=None):
154  print('Running %r in %r (env: %r)' % (argv, cwd, env), file=sys.stderr)
155  rc = test_env.run_command(argv, env=env, cwd=cwd)
156  print('Command %r returned exit code %d' % (argv, rc), file=sys.stderr)
157  return rc
158
159
160@contextlib.contextmanager
161def temporary_file():
162  fd, path = tempfile.mkstemp()
163  os.close(fd)
164  try:
165    yield path
166  finally:
167    os.remove(path)
168
169
170def record_local_script_results(name, output_fd, failures, valid):
171  """Records to a local json file and to RDB the results of the script test.
172
173  For legacy reasons, local script tests (ie: script tests that run
174  locally and that don't conform to the isolated-test API) are expected to
175  record their results using a specific format. This method encapsulates
176  that format and also uploads those results to Result DB.
177
178  Args:
179    name: Name of the script test.
180    output_fd: A .write()-supporting file descriptor to write results to.
181    failures: List of strings representing test failures.
182    valid: Whether the results are valid.
183  """
184  local_script_results = {
185      'valid': valid,
186      'failures': failures
187  }
188  json.dump(local_script_results, output_fd)
189
190  if not result_sink:
191    return
192  result_sink_client = result_sink.TryInitClient()
193  if not result_sink_client:
194    return
195  status = result_types.PASS
196  if not valid:
197    status = result_types.UNKNOWN
198  elif failures:
199    status = result_types.FAIL
200  test_log = '\n'.join(failures)
201  result_sink_client.Post(name, status, None, test_log, None)
202
203
204def parse_common_test_results(json_results, test_separator='/'):
205  def convert_trie_to_flat_paths(trie, prefix=None):
206    # Also see blinkpy.web_tests.layout_package.json_results_generator
207    result = {}
208    for name, data in trie.items():
209      if prefix:
210        name = prefix + test_separator + name
211      if len(data) and not 'actual' in data and not 'expected' in data:
212        result.update(convert_trie_to_flat_paths(data, name))
213      else:
214        result[name] = data
215    return result
216
217  results = {
218    'passes': {},
219    'unexpected_passes': {},
220    'failures': {},
221    'unexpected_failures': {},
222    'flakes': {},
223    'unexpected_flakes': {},
224  }
225
226  # TODO(dpranke): crbug.com/357866 - we should simplify the handling of
227  # both the return code and parsing the actual results, below.
228
229  passing_statuses = ('PASS', 'SLOW', 'NEEDSREBASELINE')
230
231  for test, result in convert_trie_to_flat_paths(
232      json_results['tests']).items():
233    key = 'unexpected_' if result.get('is_unexpected') else ''
234    data = result['actual']
235    actual_results = data.split()
236    last_result = actual_results[-1]
237    expected_results = result['expected'].split()
238
239    if (len(actual_results) > 1 and
240        (last_result in expected_results or last_result in passing_statuses)):
241      key += 'flakes'
242    elif last_result in passing_statuses:
243      key += 'passes'
244      # TODO(dpranke): crbug.com/357867 ...  Why are we assigning result
245      # instead of actual_result here. Do we even need these things to be
246      # hashes, or just lists?
247      data = result
248    else:
249      key += 'failures'
250    results[key][test] = data
251
252  return results
253
254
255def write_interrupted_test_results_to(filepath, test_start_time):
256  """Writes a test results JSON file* to filepath.
257
258  This JSON file is formatted to explain that something went wrong.
259
260  *src/docs/testing/json_test_results_format.md
261
262  Args:
263    filepath: A path to a file to write the output to.
264    test_start_time: The start time of the test run expressed as a
265      floating-point offset in seconds from the UNIX epoch.
266  """
267  with open(filepath, 'w') as fh:
268    output = {
269        'interrupted': True,
270        'num_failures_by_type': {},
271        'seconds_since_epoch': test_start_time,
272        'tests': {},
273        'version': 3,
274    }
275    json.dump(output, fh)
276
277
278def get_gtest_summary_passes(output):
279  """Returns a mapping of test to boolean indicating if the test passed.
280
281  Only partially parses the format. This code is based on code in tools/build,
282  specifically
283  https://chromium.googlesource.com/chromium/tools/build/+/17fef98756c5f250b20bf716829a0004857235ff/scripts/slave/recipe_modules/test_utils/util.py#189
284  """
285  if not output:
286    return {}
287
288  mapping = {}
289
290  for cur_iteration_data in output.get('per_iteration_data', []):
291    for test_fullname, results in cur_iteration_data.items():
292      # Results is a list with one entry per test try. Last one is the final
293      # result.
294      last_result = results[-1]
295
296      if last_result['status'] == 'SUCCESS':
297        mapping[test_fullname] = True
298      elif last_result['status'] != 'SKIPPED':
299        mapping[test_fullname] = False
300
301  return mapping
302
303
304def extract_filter_list(filter_list):
305  """Helper for isolated script test wrappers. Parses the
306  --isolated-script-test-filter command line argument. Currently, double-colon
307  ('::') is used as the separator between test names, because a single colon may
308  be used in the names of perf benchmarks, which contain URLs.
309  """
310  return filter_list.split('::')
311
312
313def add_emulator_args(parser):
314    parser.add_argument(
315        '--avd-config',
316        type=os.path.realpath,
317        help=('Path to the avd config. Required for Android products. '
318              '(See //tools/android/avd/proto for message definition '
319              'and existing *.textpb files.)'))
320    parser.add_argument(
321        '--emulator-window',
322        action='store_true',
323        default=False,
324        help='Enable graphical window display on the emulator.')
325
326
327class BaseIsolatedScriptArgsAdapter:
328  """The base class for all script adapters that need to translate flags
329  set by isolated script test contract into the specific test script's flags.
330  """
331
332  def __init__(self):
333    self._parser = argparse.ArgumentParser()
334    self._options = None
335    self._rest_args = None
336    self._script_writes_output_json = None
337    self._parser.add_argument(
338        '--isolated-outdir', type=str,
339        required=False,
340        help='value of $ISOLATED_OUTDIR from swarming task')
341    self._parser.add_argument(
342        '--isolated-script-test-output', type=os.path.abspath,
343        required=False,
344        help='path to write test results JSON object to')
345    self._parser.add_argument(
346        '--isolated-script-test-filter', type=str,
347        required=False)
348    self._parser.add_argument(
349        '--isolated-script-test-repeat', type=int,
350        required=False)
351    self._parser.add_argument(
352        '--isolated-script-test-launcher-retry-limit', type=int,
353        required=False)
354    self._parser.add_argument(
355        '--isolated-script-test-also-run-disabled-tests',
356        default=False, action='store_true', required=False)
357
358    self._parser.add_argument(
359        '--xvfb',
360        help='start xvfb. Ignored on unsupported platforms',
361        action='store_true')
362    # Used to create the correct subclass.
363    self._parser.add_argument(
364        '--script-type', choices=['isolated', 'typ', 'bare'],
365        help='Which script adapter to use')
366
367    # Arguments that are ignored, but added here because it's easier to ignore
368    # them to to update bot configs to not pass them.
369    self._parser.add_argument('--isolated-script-test-chartjson-output')
370    self._parser.add_argument('--isolated-script-test-perf-output')
371
372  def parse_args(self, args=None):
373    self._options, self._rest_args = self._parser.parse_known_args(args)
374
375  @property
376  def parser(self):
377    return self._parser
378
379  @property
380  def options(self):
381    return self._options
382
383  @property
384  def rest_args(self):
385    return self._rest_args
386
387  def generate_test_output_args(self, output):
388    del output  # unused
389    return []
390
391  def generate_test_filter_args(self, test_filter_str):
392    del test_filter_str  # unused
393    raise RuntimeError('Flag not supported.')
394
395  def generate_test_repeat_args(self, repeat_count):
396    del repeat_count  # unused
397    raise RuntimeError('Flag not supported.')
398
399  def generate_test_launcher_retry_limit_args(self, retry_limit):
400    del retry_limit  # unused
401    raise RuntimeError('Flag not supported.')
402
403  def generate_sharding_args(self, total_shards, shard_index):
404    del total_shards, shard_index  # unused
405    raise RuntimeError('Flag not supported.')
406
407  def generate_test_also_run_disabled_tests_args(self):
408    raise RuntimeError('Flag not supported.')
409
410  def select_python_executable(self):
411    return sys.executable
412
413  def generate_isolated_script_cmd(self):
414    isolated_script_cmd = [ self.select_python_executable() ] + self.rest_args
415
416    if self.options.isolated_script_test_output:
417      output_args = self.generate_test_output_args(
418          self.options.isolated_script_test_output)
419      self._script_writes_output_json = bool(output_args)
420      isolated_script_cmd += output_args
421
422    # Augment test filter args if needed
423    if self.options.isolated_script_test_filter:
424      isolated_script_cmd += self.generate_test_filter_args(
425          self.options.isolated_script_test_filter)
426
427    # Augment test repeat if needed
428    if self.options.isolated_script_test_repeat is not None:
429      isolated_script_cmd += self.generate_test_repeat_args(
430          self.options.isolated_script_test_repeat)
431
432    # Augment test launcher retry limit args if needed
433    if self.options.isolated_script_test_launcher_retry_limit is not None:
434      isolated_script_cmd += self.generate_test_launcher_retry_limit_args(
435          self.options.isolated_script_test_launcher_retry_limit)
436
437    # Augment test also run disable tests args if needed
438    if self.options.isolated_script_test_also_run_disabled_tests:
439      isolated_script_cmd += self.generate_test_also_run_disabled_tests_args()
440
441    # Augment shard args if needed
442    env = os.environ.copy()
443
444    total_shards = None
445    shard_index = None
446
447    if 'GTEST_TOTAL_SHARDS' in env:
448      total_shards = int(env['GTEST_TOTAL_SHARDS'])
449    if 'GTEST_SHARD_INDEX' in env:
450      shard_index = int(env['GTEST_SHARD_INDEX'])
451    if total_shards is not None and shard_index is not None:
452      isolated_script_cmd += self.generate_sharding_args(
453          total_shards, shard_index)
454
455    return isolated_script_cmd
456
457  def clean_up_after_test_run(self):
458    pass
459
460  def do_pre_test_run_tasks(self):
461    pass
462
463  def do_post_test_run_tasks(self):
464    pass
465
466  def _write_simple_test_results(self, start_time, exit_code):
467    if exit_code is None:
468      failure_type = 'CRASH'
469    elif exit_code == 0:
470      failure_type = 'PASS'
471    else:
472      failure_type = 'FAIL'
473
474    test_name = os.path.basename(self._rest_args[0])
475    # See //docs/testing/json_test_results_format.md
476    results_json = {
477        'version': 3,
478        'interrupted': False,
479        'num_failures_by_type': { failure_type: 1 },
480        'path_delimiter': '/',
481        'seconds_since_epoch': start_time,
482        'tests': {
483            test_name: {
484              'expected': 'PASS',
485              'actual': failure_type,
486              'time': time.time() - start_time,
487            },
488        },
489    }
490    with open(self.options.isolated_script_test_output, 'w') as fp:
491      json.dump(results_json, fp)
492
493
494  def run_test(self, cwd=None):
495    self.parse_args()
496    cmd = self.generate_isolated_script_cmd()
497
498    self.do_pre_test_run_tasks()
499
500    env = os.environ.copy()
501
502    env['CHROME_HEADLESS'] = '1'
503    print('Running command: %s\nwith env: %r' % (
504        ' '.join(cmd), env))
505    sys.stdout.flush()
506    start_time = time.time()
507    try:
508      if self.options.xvfb and sys.platform.startswith('linux'):
509        exit_code = xvfb.run_executable(cmd, env, cwd=cwd)
510      else:
511        exit_code = test_env.run_command(cmd, env=env, cwd=cwd, log=False)
512      print('Command returned exit code %d' % exit_code)
513      sys.stdout.flush()
514      self.do_post_test_run_tasks()
515    except Exception:
516      traceback.print_exc()
517      exit_code = None
518    finally:
519      self.clean_up_after_test_run()
520
521    if (self.options.isolated_script_test_output
522        and not self._script_writes_output_json):
523      self._write_simple_test_results(start_time, exit_code)
524
525    return exit_code if exit_code is not None else 2
526