• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env vpython
2#
3# [VPYTHON:BEGIN]
4# wheel: <
5#  name: "infra/python/wheels/psutil/${vpython_platform}"
6#  version: "version:5.2.2"
7# >
8# wheel: <
9#  name: "infra/python/wheels/six-py2_py3"
10#  version: "version:1.10.0"
11# >
12# [VPYTHON:END]
13#
14# Copyright 2020 The ANGLE Project Authors. All rights reserved.
15# Use of this source code is governed by a BSD-style license that can be
16# found in the LICENSE file.
17#
18# restricted_trace_gold_tests.py:
19#   Uses Skia Gold (https://skia.org/dev/testing/skiagold) to run pixel tests with ANGLE traces.
20#
21#   Requires vpython to run standalone. Run with --help for usage instructions.
22
23import argparse
24import contextlib
25import fnmatch
26import json
27import logging
28import os
29import platform
30import re
31import shutil
32import sys
33import tempfile
34import time
35import traceback
36
37from skia_gold import angle_skia_gold_properties
38from skia_gold import angle_skia_gold_session_manager
39
40# Add //src/testing into sys.path for importing xvfb and test_env, and
41# //src/testing/scripts for importing common.
42d = os.path.dirname
43THIS_DIR = d(os.path.abspath(__file__))
44ANGLE_SRC_DIR = d(d(d(THIS_DIR)))
45sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'testing'))
46sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'testing', 'scripts'))
47# Handle the Chromium-relative directory as well. As long as one directory
48# is valid, Python is happy.
49CHROMIUM_SRC_DIR = d(d(ANGLE_SRC_DIR))
50sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'testing'))
51sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'testing', 'scripts'))
52
53import common
54import test_env
55import xvfb
56
57
58def IsWindows():
59    return sys.platform == 'cygwin' or sys.platform.startswith('win')
60
61
62DEFAULT_TEST_SUITE = 'angle_perftests'
63DEFAULT_TEST_PREFIX = 'TracePerfTest.Run/vulkan_'
64DEFAULT_SCREENSHOT_PREFIX = 'angle_vulkan_'
65DEFAULT_BATCH_SIZE = 5
66DEFAULT_LOG = 'info'
67
68# Filters out stuff like: " I   72.572s run_tests_on_device(96071FFAZ00096) "
69ANDROID_LOGGING_PREFIX = r'I +\d+.\d+s \w+\(\w+\)  '
70ANDROID_BEGIN_SYSTEM_INFO = '>>ScopedMainEntryLogger'
71
72# Test expectations
73FAIL = 'FAIL'
74PASS = 'PASS'
75SKIP = 'SKIP'
76
77
78@contextlib.contextmanager
79def temporary_dir(prefix=''):
80    path = tempfile.mkdtemp(prefix=prefix)
81    try:
82        yield path
83    finally:
84        logging.info("Removing temporary directory: %s" % path)
85        shutil.rmtree(path)
86
87
88def add_skia_gold_args(parser):
89    group = parser.add_argument_group('Skia Gold Arguments')
90    group.add_argument('--git-revision', help='Revision being tested.', default=None)
91    group.add_argument(
92        '--gerrit-issue', help='For Skia Gold integration. Gerrit issue ID.', default='')
93    group.add_argument(
94        '--gerrit-patchset',
95        help='For Skia Gold integration. Gerrit patch set number.',
96        default='')
97    group.add_argument(
98        '--buildbucket-id', help='For Skia Gold integration. Buildbucket build ID.', default='')
99    group.add_argument(
100        '--bypass-skia-gold-functionality',
101        action='store_true',
102        default=False,
103        help='Bypass all interaction with Skia Gold, effectively disabling the '
104        'image comparison portion of any tests that use Gold. Only meant to '
105        'be used in case a Gold outage occurs and cannot be fixed quickly.')
106    local_group = group.add_mutually_exclusive_group()
107    local_group.add_argument(
108        '--local-pixel-tests',
109        action='store_true',
110        default=None,
111        help='Specifies to run the test harness in local run mode or not. When '
112        'run in local mode, uploading to Gold is disabled and links to '
113        'help with local debugging are output. Running in local mode also '
114        'implies --no-luci-auth. If both this and --no-local-pixel-tests are '
115        'left unset, the test harness will attempt to detect whether it is '
116        'running on a workstation or not and set this option accordingly.')
117    local_group.add_argument(
118        '--no-local-pixel-tests',
119        action='store_false',
120        dest='local_pixel_tests',
121        help='Specifies to run the test harness in non-local (bot) mode. When '
122        'run in this mode, data is actually uploaded to Gold and triage links '
123        'arge generated. If both this and --local-pixel-tests are left unset, '
124        'the test harness will attempt to detect whether it is running on a '
125        'workstation or not and set this option accordingly.')
126    group.add_argument(
127        '--no-luci-auth',
128        action='store_true',
129        default=False,
130        help='Don\'t use the service account provided by LUCI for '
131        'authentication for Skia Gold, instead relying on gsutil to be '
132        'pre-authenticated. Meant for testing locally instead of on the bots.')
133
134
135def run_wrapper(args, cmd, env, stdoutfile=None):
136    if args.xvfb:
137        return xvfb.run_executable(cmd, env, stdoutfile=stdoutfile)
138    else:
139        return test_env.run_command_with_output(cmd, env=env, stdoutfile=stdoutfile)
140
141
142def to_hex(num):
143    return hex(int(num))
144
145
146def to_hex_or_none(num):
147    return 'None' if num == None else to_hex(num)
148
149
150def to_non_empty_string_or_none(val):
151    return 'None' if val == '' else str(val)
152
153
154def to_non_empty_string_or_none_dict(d, key):
155    return 'None' if not key in d else to_non_empty_string_or_none(d[key])
156
157
158def get_binary_name(binary):
159    if IsWindows():
160        return '.\\%s.exe' % binary
161    else:
162        return './%s' % binary
163
164
165def get_skia_gold_keys(args, env):
166    """Get all the JSON metadata that will be passed to golctl."""
167    # All values need to be strings, otherwise goldctl fails.
168
169    # Only call this method one time
170    if hasattr(get_skia_gold_keys, 'called') and get_skia_gold_keys.called:
171        logging.exception('get_skia_gold_keys may only be called once')
172    get_skia_gold_keys.called = True
173
174    class Filter:
175
176        def __init__(self):
177            self.accepting_lines = True
178            self.done_accepting_lines = False
179            self.android_prefix = re.compile(ANDROID_LOGGING_PREFIX)
180            self.lines = []
181            self.is_android = False
182
183        def append(self, line):
184            if self.done_accepting_lines:
185                return
186            if 'Additional test environment' in line or 'android/test_runner.py' in line:
187                self.accepting_lines = False
188                self.is_android = True
189            if ANDROID_BEGIN_SYSTEM_INFO in line:
190                self.accepting_lines = True
191                return
192            if not self.accepting_lines:
193                return
194
195            if self.is_android:
196                line = self.android_prefix.sub('', line)
197
198            if line[0] == '}':
199                self.done_accepting_lines = True
200
201            self.lines.append(line)
202
203        def get(self):
204            return self.lines
205
206    with common.temporary_file() as tempfile_path:
207        binary = get_binary_name('angle_system_info_test')
208        if run_wrapper(args, [binary, '--vulkan', '-v'], env, tempfile_path):
209            raise Exception('Error getting system info.')
210
211        filter = Filter()
212
213        with open(tempfile_path) as f:
214            for line in f:
215                filter.append(line)
216
217        str = ''.join(filter.get())
218        logging.info(str)
219        json_data = json.loads(str)
220
221    if len(json_data.get('gpus', [])) == 0 or not 'activeGPUIndex' in json_data:
222        raise Exception('Error getting system info.')
223
224    active_gpu = json_data['gpus'][json_data['activeGPUIndex']]
225
226    angle_keys = {
227        'vendor_id': to_hex_or_none(active_gpu['vendorId']),
228        'device_id': to_hex_or_none(active_gpu['deviceId']),
229        'model_name': to_non_empty_string_or_none_dict(active_gpu, 'machineModelVersion'),
230        'manufacturer_name': to_non_empty_string_or_none_dict(active_gpu, 'machineManufacturer'),
231        'os': to_non_empty_string_or_none(platform.system()),
232        'os_version': to_non_empty_string_or_none(platform.version()),
233        'driver_version': to_non_empty_string_or_none_dict(active_gpu, 'driverVersion'),
234        'driver_vendor': to_non_empty_string_or_none_dict(active_gpu, 'driverVendor'),
235    }
236
237    return angle_keys
238
239
240def output_diff_local_files(gold_session, image_name):
241    """Logs the local diff image files from the given SkiaGoldSession.
242
243  Args:
244    gold_session: A skia_gold_session.SkiaGoldSession instance to pull files
245        from.
246    image_name: A string containing the name of the image/test that was
247        compared.
248  """
249    given_file = gold_session.GetGivenImageLink(image_name)
250    closest_file = gold_session.GetClosestImageLink(image_name)
251    diff_file = gold_session.GetDiffImageLink(image_name)
252    failure_message = 'Unable to retrieve link'
253    logging.error('Generated image: %s', given_file or failure_message)
254    logging.error('Closest image: %s', closest_file or failure_message)
255    logging.error('Diff image: %s', diff_file or failure_message)
256
257
258def upload_test_result_to_skia_gold(args, gold_session_manager, gold_session, gold_properties,
259                                    screenshot_dir, image_name, artifacts):
260    """Compares the given image using Skia Gold and uploads the result.
261
262    No uploading is done if the test is being run in local run mode. Compares
263    the given screenshot to baselines provided by Gold, raising an Exception if
264    a match is not found.
265
266    Args:
267      args: Command line options.
268      gold_session_manager: Skia Gold session manager.
269      gold_session: Skia Gold session.
270      gold_properties: Skia Gold properties.
271      screenshot_dir: directory where the test stores screenshots.
272      image_name: the name of the image being checked.
273      artifacts: dictionary of JSON artifacts to pass to the result merger.
274    """
275
276    use_luci = not (gold_properties.local_pixel_tests or gold_properties.no_luci_auth)
277
278    # Note: this would be better done by iterating the screenshot directory.
279    png_file_name = os.path.join(screenshot_dir, DEFAULT_SCREENSHOT_PREFIX + image_name + '.png')
280
281    if not os.path.isfile(png_file_name):
282        logging.info('Screenshot not found, test skipped.')
283        return SKIP
284
285    status, error = gold_session.RunComparison(
286        name=image_name, png_file=png_file_name, use_luci=use_luci)
287
288    artifact_name = os.path.basename(png_file_name)
289    artifacts[artifact_name] = [artifact_name]
290
291    if not status:
292        return PASS
293
294    status_codes = gold_session_manager.GetSessionClass().StatusCodes
295    if status == status_codes.AUTH_FAILURE:
296        logging.error('Gold authentication failed with output %s', error)
297    elif status == status_codes.INIT_FAILURE:
298        logging.error('Gold initialization failed with output %s', error)
299    elif status == status_codes.COMPARISON_FAILURE_REMOTE:
300        _, triage_link = gold_session.GetTriageLinks(image_name)
301        if not triage_link:
302            logging.error('Failed to get triage link for %s, raw output: %s', image_name, error)
303            logging.error('Reason for no triage link: %s',
304                          gold_session.GetTriageLinkOmissionReason(image_name))
305        elif gold_properties.IsTryjobRun():
306            artifacts['triage_link_for_entire_cl'] = [triage_link]
307        else:
308            artifacts['gold_triage_link'] = [triage_link]
309    elif status == status_codes.COMPARISON_FAILURE_LOCAL:
310        logging.error('Local comparison failed. Local diff files:')
311        output_diff_local_files(gold_session, image_name)
312    elif status == status_codes.LOCAL_DIFF_FAILURE:
313        logging.error(
314            'Local comparison failed and an error occurred during diff '
315            'generation: %s', error)
316        # There might be some files, so try outputting them.
317        logging.error('Local diff files:')
318        output_diff_local_files(gold_session, image_name)
319    else:
320        logging.error('Given unhandled SkiaGoldSession StatusCode %s with error %s', status, error)
321
322    return FAIL
323
324
325def _get_batches(traces, batch_size):
326    for i in range(0, len(traces), batch_size):
327        yield traces[i:i + batch_size]
328
329
330def _get_gtest_filter_for_batch(batch):
331    expanded = ['%s%s' % (DEFAULT_TEST_PREFIX, trace) for trace in batch]
332    return '--gtest_filter=%s' % ':'.join(expanded)
333
334
335def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_results):
336    keys = get_skia_gold_keys(args, env)
337
338    with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir:
339        gold_properties = angle_skia_gold_properties.ANGLESkiaGoldProperties(args)
340        gold_session_manager = angle_skia_gold_session_manager.ANGLESkiaGoldSessionManager(
341            skia_gold_temp_dir, gold_properties)
342        gold_session = gold_session_manager.GetSkiaGoldSession(keys)
343
344        traces = [trace.split(' ')[0] for trace in tests]
345
346        if args.isolated_script_test_filter:
347            filtered = []
348            for trace in traces:
349                # Apply test filter if present.
350                full_name = 'angle_restricted_trace_gold_tests.%s' % trace
351                if not fnmatch.fnmatch(full_name, args.isolated_script_test_filter):
352                    logging.info('Skipping test %s because it does not match filter %s' %
353                                 (full_name, args.isolated_script_test_filter))
354                else:
355                    filtered += [trace]
356            traces = filtered
357
358        batches = _get_batches(traces, args.batch_size)
359
360        for batch in batches:
361            for iteration in range(0, args.flaky_retries + 1):
362                with common.temporary_file() as tempfile_path:
363                    # This is how we signal early exit
364                    if not batch:
365                        logging.debug('All tests in batch completed.')
366                        break
367                    if iteration > 0:
368                        logging.info('Test run failed, running retry #%d...' % iteration)
369
370                    gtest_filter = _get_gtest_filter_for_batch(batch)
371                    cmd = [
372                        args.test_suite,
373                        gtest_filter,
374                        '--render-test-output-dir=%s' % screenshot_dir,
375                        '--one-frame-only',
376                        '--verbose-logging',
377                    ] + extra_flags
378                    batch_result = PASS if run_wrapper(args, cmd, env,
379                                                       tempfile_path) == 0 else FAIL
380
381                    next_batch = []
382                    for trace in batch:
383                        artifacts = {}
384
385                        if batch_result == PASS:
386                            logging.debug('upload test result: %s' % trace)
387                            result = upload_test_result_to_skia_gold(args, gold_session_manager,
388                                                                     gold_session, gold_properties,
389                                                                     screenshot_dir, trace,
390                                                                     artifacts)
391                        else:
392                            result = batch_result
393
394                        expected_result = SKIP if result == SKIP else PASS
395                        test_results[trace] = {'expected': expected_result, 'actual': result}
396                        if len(artifacts) > 0:
397                            test_results[trace]['artifacts'] = artifacts
398                        if result == FAIL:
399                            next_batch.append(trace)
400                    batch = next_batch
401
402        # These properties are recorded after iteration to ensure they only happen once.
403        for _, trace_results in test_results.items():
404            result = trace_results['actual']
405            results['num_failures_by_type'][result] += 1
406            if result == FAIL:
407                trace_results['is_unexpected'] = True
408
409        return results['num_failures_by_type'][FAIL] == 0
410
411
412def _shard_tests(tests, shard_count, shard_index):
413    return [tests[index] for index in range(shard_index, len(tests), shard_count)]
414
415
416def main():
417    parser = argparse.ArgumentParser()
418    parser.add_argument('--isolated-script-test-output', type=str)
419    parser.add_argument('--isolated-script-test-perf-output', type=str)
420    parser.add_argument('--isolated-script-test-filter', type=str)
421    parser.add_argument('--test-suite', help='Test suite to run.', default=DEFAULT_TEST_SUITE)
422    parser.add_argument('--render-test-output-dir', help='Directory to store screenshots')
423    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
424    parser.add_argument(
425        '--flaky-retries', help='Number of times to retry failed tests.', type=int, default=0)
426    parser.add_argument(
427        '--shard-count',
428        help='Number of shards for test splitting. Default is 1.',
429        type=int,
430        default=1)
431    parser.add_argument(
432        '--shard-index',
433        help='Index of the current shard for test splitting. Default is 0.',
434        type=int,
435        default=0)
436    parser.add_argument(
437        '--batch-size',
438        help='Number of tests to run in a group. Default: %d' % DEFAULT_BATCH_SIZE,
439        type=int,
440        default=DEFAULT_BATCH_SIZE)
441    parser.add_argument(
442        '-l', '--log', help='Log output level. Default is %s.' % DEFAULT_LOG, default=DEFAULT_LOG)
443
444    add_skia_gold_args(parser)
445
446    args, extra_flags = parser.parse_known_args()
447    logging.basicConfig(level=args.log.upper())
448
449    env = os.environ.copy()
450
451    if 'GTEST_TOTAL_SHARDS' in env and int(env['GTEST_TOTAL_SHARDS']) != 1:
452        if 'GTEST_SHARD_INDEX' not in env:
453            logging.error('Sharding params must be specified together.')
454            sys.exit(1)
455        args.shard_count = int(env.pop('GTEST_TOTAL_SHARDS'))
456        args.shard_index = int(env.pop('GTEST_SHARD_INDEX'))
457
458    results = {
459        'tests': {},
460        'interrupted': False,
461        'seconds_since_epoch': time.time(),
462        'path_delimiter': '.',
463        'version': 3,
464        'num_failures_by_type': {
465            FAIL: 0,
466            PASS: 0,
467            SKIP: 0,
468        },
469    }
470
471    test_results = {}
472
473    rc = 0
474
475    try:
476        if IsWindows():
477            args.test_suite = '.\\%s.exe' % args.test_suite
478        else:
479            args.test_suite = './%s' % args.test_suite
480
481        # read test set
482        json_name = os.path.join(ANGLE_SRC_DIR, 'src', 'tests', 'restricted_traces',
483                                 'restricted_traces.json')
484        with open(json_name) as fp:
485            tests = json.load(fp)
486
487        # Split tests according to sharding
488        sharded_tests = _shard_tests(tests['traces'], args.shard_count, args.shard_index)
489
490        if args.render_test_output_dir:
491            if not _run_tests(args, sharded_tests, extra_flags, env, args.render_test_output_dir,
492                              results, test_results):
493                rc = 1
494        elif 'ISOLATED_OUTDIR' in env:
495            if not _run_tests(args, sharded_tests, extra_flags, env, env['ISOLATED_OUTDIR'],
496                              results, test_results):
497                rc = 1
498        else:
499            with temporary_dir('angle_trace_') as temp_dir:
500                if not _run_tests(args, sharded_tests, extra_flags, env, temp_dir, results,
501                                  test_results):
502                    rc = 1
503
504    except Exception:
505        traceback.print_exc()
506        results['interrupted'] = True
507        rc = 1
508
509    if test_results:
510        results['tests']['angle_restricted_trace_gold_tests'] = test_results
511
512    if args.isolated_script_test_output:
513        with open(args.isolated_script_test_output, 'w') as out_file:
514            out_file.write(json.dumps(results, indent=2))
515
516    if args.isolated_script_test_perf_output:
517        with open(args.isolated_script_test_perf_output, 'w') as out_file:
518            out_file.write(json.dumps({}))
519
520    return rc
521
522
523# This is not really a "script test" so does not need to manually add
524# any additional compile targets.
525def main_compile_targets(args):
526    json.dump([], args.output)
527
528
529if __name__ == '__main__':
530    # Conform minimally to the protocol defined by ScriptTest.
531    if 'compile_targets' in sys.argv:
532        funcs = {
533            'run': None,
534            'compile_targets': main_compile_targets,
535        }
536        sys.exit(common.run_script(sys.argv[1:], funcs))
537    sys.exit(main())
538