• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env vpython3
2#
3# Copyright 2020 The ANGLE Project Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7# restricted_trace_gold_tests.py:
8#   Uses Skia Gold (https://skia.org/dev/testing/skiagold) to run pixel tests with ANGLE traces.
9#
10#   Requires vpython to run standalone. Run with --help for usage instructions.
11
12import argparse
13import contextlib
14import fnmatch
15import json
16import logging
17import os
18import platform
19import re
20import shutil
21import sys
22import tempfile
23import time
24import traceback
25
26# Add //src/testing into sys.path for importing xvfb and test_env, and
27# //src/testing/scripts for importing common.
28d = os.path.dirname
29THIS_DIR = d(os.path.abspath(__file__))
30sys.path.insert(0, d(THIS_DIR))
31
32from skia_gold import angle_skia_gold_properties
33from skia_gold import angle_skia_gold_session_manager
34
35ANGLE_SRC_DIR = d(d(d(THIS_DIR)))
36sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'testing'))
37sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'testing', 'scripts'))
38# Handle the Chromium-relative directory as well. As long as one directory
39# is valid, Python is happy.
40CHROMIUM_SRC_DIR = d(d(ANGLE_SRC_DIR))
41sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'testing'))
42sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'testing', 'scripts'))
43
44import common
45import test_env
46import xvfb
47
48
49def IsWindows():
50    return sys.platform == 'cygwin' or sys.platform.startswith('win')
51
52
53DEFAULT_TEST_SUITE = 'angle_perftests'
54DEFAULT_TEST_PREFIX = 'TracePerfTest.Run/vulkan_'
55SWIFTSHADER_TEST_PREFIX = 'TracePerfTest.Run/vulkan_swiftshader_'
56DEFAULT_SCREENSHOT_PREFIX = 'angle_vulkan_'
57SWIFTSHADER_SCREENSHOT_PREFIX = 'angle_vulkan_swiftshader_'
58DEFAULT_BATCH_SIZE = 5
59DEFAULT_LOG = 'info'
60
61# Filters out stuff like: " I   72.572s run_tests_on_device(96071FFAZ00096) "
62ANDROID_LOGGING_PREFIX = r'I +\d+.\d+s \w+\(\w+\)  '
63ANDROID_BEGIN_SYSTEM_INFO = '>>ScopedMainEntryLogger'
64
65# Test expectations
66FAIL = 'FAIL'
67PASS = 'PASS'
68SKIP = 'SKIP'
69
70
71@contextlib.contextmanager
72def temporary_dir(prefix=''):
73    path = tempfile.mkdtemp(prefix=prefix)
74    try:
75        yield path
76    finally:
77        logging.info("Removing temporary directory: %s" % path)
78        shutil.rmtree(path)
79
80
81def add_skia_gold_args(parser):
82    group = parser.add_argument_group('Skia Gold Arguments')
83    group.add_argument('--git-revision', help='Revision being tested.', default=None)
84    group.add_argument(
85        '--gerrit-issue', help='For Skia Gold integration. Gerrit issue ID.', default='')
86    group.add_argument(
87        '--gerrit-patchset',
88        help='For Skia Gold integration. Gerrit patch set number.',
89        default='')
90    group.add_argument(
91        '--buildbucket-id', help='For Skia Gold integration. Buildbucket build ID.', default='')
92    group.add_argument(
93        '--bypass-skia-gold-functionality',
94        action='store_true',
95        default=False,
96        help='Bypass all interaction with Skia Gold, effectively disabling the '
97        'image comparison portion of any tests that use Gold. Only meant to '
98        'be used in case a Gold outage occurs and cannot be fixed quickly.')
99    local_group = group.add_mutually_exclusive_group()
100    local_group.add_argument(
101        '--local-pixel-tests',
102        action='store_true',
103        default=None,
104        help='Specifies to run the test harness in local run mode or not. When '
105        'run in local mode, uploading to Gold is disabled and links to '
106        'help with local debugging are output. Running in local mode also '
107        'implies --no-luci-auth. If both this and --no-local-pixel-tests are '
108        'left unset, the test harness will attempt to detect whether it is '
109        'running on a workstation or not and set this option accordingly.')
110    local_group.add_argument(
111        '--no-local-pixel-tests',
112        action='store_false',
113        dest='local_pixel_tests',
114        help='Specifies to run the test harness in non-local (bot) mode. When '
115        'run in this mode, data is actually uploaded to Gold and triage links '
116        'arge generated. If both this and --local-pixel-tests are left unset, '
117        'the test harness will attempt to detect whether it is running on a '
118        'workstation or not and set this option accordingly.')
119    group.add_argument(
120        '--no-luci-auth',
121        action='store_true',
122        default=False,
123        help='Don\'t use the service account provided by LUCI for '
124        'authentication for Skia Gold, instead relying on gsutil to be '
125        'pre-authenticated. Meant for testing locally instead of on the bots.')
126
127
128def run_wrapper(args, cmd, env, stdoutfile=None):
129    if args.xvfb:
130        return xvfb.run_executable(cmd, env, stdoutfile=stdoutfile)
131    else:
132        return test_env.run_command_with_output(cmd, env=env, stdoutfile=stdoutfile)
133
134
135def to_hex(num):
136    return hex(int(num))
137
138
139def to_hex_or_none(num):
140    return 'None' if num == None else to_hex(num)
141
142
143def to_non_empty_string_or_none(val):
144    return 'None' if val == '' else str(val)
145
146
147def to_non_empty_string_or_none_dict(d, key):
148    return 'None' if not key in d else to_non_empty_string_or_none(d[key])
149
150
151def get_binary_name(binary):
152    if IsWindows():
153        return '.\\%s.exe' % binary
154    else:
155        return './%s' % binary
156
157
158def get_skia_gold_keys(args, env):
159    """Get all the JSON metadata that will be passed to golctl."""
160    # All values need to be strings, otherwise goldctl fails.
161
162    # Only call this method one time
163    if hasattr(get_skia_gold_keys, 'called') and get_skia_gold_keys.called:
164        logging.exception('get_skia_gold_keys may only be called once')
165    get_skia_gold_keys.called = True
166
167    class Filter:
168
169        def __init__(self):
170            self.accepting_lines = True
171            self.done_accepting_lines = False
172            self.android_prefix = re.compile(ANDROID_LOGGING_PREFIX)
173            self.lines = []
174            self.is_android = False
175
176        def append(self, line):
177            if self.done_accepting_lines:
178                return
179            if 'Additional test environment' in line or 'android/test_runner.py' in line:
180                self.accepting_lines = False
181                self.is_android = True
182            if ANDROID_BEGIN_SYSTEM_INFO in line:
183                self.accepting_lines = True
184                return
185            if not self.accepting_lines:
186                return
187
188            if self.is_android:
189                line = self.android_prefix.sub('', line)
190
191            if line[0] == '}':
192                self.done_accepting_lines = True
193
194            self.lines.append(line)
195
196        def get(self):
197            return self.lines
198
199    with common.temporary_file() as tempfile_path:
200        binary = get_binary_name('angle_system_info_test')
201        sysinfo_args = [binary, '--vulkan', '-v']
202        if args.swiftshader:
203            sysinfo_args.append('--swiftshader')
204        if run_wrapper(args, sysinfo_args, env, tempfile_path):
205            raise Exception('Error getting system info.')
206
207        filter = Filter()
208
209        with open(tempfile_path) as f:
210            for line in f:
211                filter.append(line)
212
213        str = ''.join(filter.get())
214        logging.info(str)
215        json_data = json.loads(str)
216
217    if len(json_data.get('gpus', [])) == 0 or not 'activeGPUIndex' in json_data:
218        raise Exception('Error getting system info.')
219
220    active_gpu = json_data['gpus'][json_data['activeGPUIndex']]
221
222    angle_keys = {
223        'vendor_id': to_hex_or_none(active_gpu['vendorId']),
224        'device_id': to_hex_or_none(active_gpu['deviceId']),
225        'model_name': to_non_empty_string_or_none_dict(active_gpu, 'machineModelVersion'),
226        'manufacturer_name': to_non_empty_string_or_none_dict(active_gpu, 'machineManufacturer'),
227        'os': to_non_empty_string_or_none(platform.system()),
228        'os_version': to_non_empty_string_or_none(platform.version()),
229        'driver_version': to_non_empty_string_or_none_dict(active_gpu, 'driverVersion'),
230        'driver_vendor': to_non_empty_string_or_none_dict(active_gpu, 'driverVendor'),
231    }
232
233    return angle_keys
234
235
236def output_diff_local_files(gold_session, image_name):
237    """Logs the local diff image files from the given SkiaGoldSession.
238
239  Args:
240    gold_session: A skia_gold_session.SkiaGoldSession instance to pull files
241        from.
242    image_name: A string containing the name of the image/test that was
243        compared.
244  """
245    given_file = gold_session.GetGivenImageLink(image_name)
246    closest_file = gold_session.GetClosestImageLink(image_name)
247    diff_file = gold_session.GetDiffImageLink(image_name)
248    failure_message = 'Unable to retrieve link'
249    logging.error('Generated image: %s', given_file or failure_message)
250    logging.error('Closest image: %s', closest_file or failure_message)
251    logging.error('Diff image: %s', diff_file or failure_message)
252
253
254def upload_test_result_to_skia_gold(args, gold_session_manager, gold_session, gold_properties,
255                                    screenshot_dir, image_name, artifacts):
256    """Compares the given image using Skia Gold and uploads the result.
257
258    No uploading is done if the test is being run in local run mode. Compares
259    the given screenshot to baselines provided by Gold, raising an Exception if
260    a match is not found.
261
262    Args:
263      args: Command line options.
264      gold_session_manager: Skia Gold session manager.
265      gold_session: Skia Gold session.
266      gold_properties: Skia Gold properties.
267      screenshot_dir: directory where the test stores screenshots.
268      image_name: the name of the image being checked.
269      artifacts: dictionary of JSON artifacts to pass to the result merger.
270    """
271
272    use_luci = not (gold_properties.local_pixel_tests or gold_properties.no_luci_auth)
273
274    # Note: this would be better done by iterating the screenshot directory.
275    prefix = SWIFTSHADER_SCREENSHOT_PREFIX if args.swiftshader else DEFAULT_SCREENSHOT_PREFIX
276    png_file_name = os.path.join(screenshot_dir, prefix + image_name + '.png')
277
278    if not os.path.isfile(png_file_name):
279        logging.info('Screenshot not found, test skipped.')
280        return SKIP
281
282    status, error = gold_session.RunComparison(
283        name=image_name, png_file=png_file_name, use_luci=use_luci)
284
285    artifact_name = os.path.basename(png_file_name)
286    artifacts[artifact_name] = [artifact_name]
287
288    if not status:
289        return PASS
290
291    status_codes = gold_session_manager.GetSessionClass().StatusCodes
292    if status == status_codes.AUTH_FAILURE:
293        logging.error('Gold authentication failed with output %s', error)
294    elif status == status_codes.INIT_FAILURE:
295        logging.error('Gold initialization failed with output %s', error)
296    elif status == status_codes.COMPARISON_FAILURE_REMOTE:
297        _, triage_link = gold_session.GetTriageLinks(image_name)
298        if not triage_link:
299            logging.error('Failed to get triage link for %s, raw output: %s', image_name, error)
300            logging.error('Reason for no triage link: %s',
301                          gold_session.GetTriageLinkOmissionReason(image_name))
302        if gold_properties.IsTryjobRun():
303            # Pick "show all results" so we can see the tryjob images by default.
304            triage_link += '&master=true'
305            artifacts['triage_link_for_entire_cl'] = [triage_link]
306        else:
307            artifacts['gold_triage_link'] = [triage_link]
308    elif status == status_codes.COMPARISON_FAILURE_LOCAL:
309        logging.error('Local comparison failed. Local diff files:')
310        output_diff_local_files(gold_session, image_name)
311    elif status == status_codes.LOCAL_DIFF_FAILURE:
312        logging.error(
313            'Local comparison failed and an error occurred during diff '
314            'generation: %s', error)
315        # There might be some files, so try outputting them.
316        logging.error('Local diff files:')
317        output_diff_local_files(gold_session, image_name)
318    else:
319        logging.error('Given unhandled SkiaGoldSession StatusCode %s with error %s', status, error)
320
321    return FAIL
322
323
324def _get_batches(traces, batch_size):
325    for i in range(0, len(traces), batch_size):
326        yield traces[i:i + batch_size]
327
328
329def _get_gtest_filter_for_batch(args, batch):
330    prefix = SWIFTSHADER_TEST_PREFIX if args.swiftshader else DEFAULT_TEST_PREFIX
331    expanded = ['%s%s' % (prefix, trace) for trace in batch]
332    return '--gtest_filter=%s' % ':'.join(expanded)
333
334
335def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_results):
336    keys = get_skia_gold_keys(args, env)
337
338    with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir:
339        gold_properties = angle_skia_gold_properties.ANGLESkiaGoldProperties(args)
340        gold_session_manager = angle_skia_gold_session_manager.ANGLESkiaGoldSessionManager(
341            skia_gold_temp_dir, gold_properties)
342        gold_session = gold_session_manager.GetSkiaGoldSession(keys)
343
344        traces = [trace.split(' ')[0] for trace in tests]
345
346        if args.isolated_script_test_filter:
347            filtered = []
348            for trace in traces:
349                # Apply test filter if present.
350                full_name = 'angle_restricted_trace_gold_tests.%s' % trace
351                if not fnmatch.fnmatch(full_name, args.isolated_script_test_filter):
352                    logging.info('Skipping test %s because it does not match filter %s' %
353                                 (full_name, args.isolated_script_test_filter))
354                else:
355                    filtered += [trace]
356            traces = filtered
357
358        batches = _get_batches(traces, args.batch_size)
359
360        for batch in batches:
361            for iteration in range(0, args.flaky_retries + 1):
362                with common.temporary_file() as tempfile_path:
363                    # This is how we signal early exit
364                    if not batch:
365                        logging.debug('All tests in batch completed.')
366                        break
367                    if iteration > 0:
368                        logging.info('Test run failed, running retry #%d...' % iteration)
369
370                    gtest_filter = _get_gtest_filter_for_batch(args, batch)
371                    cmd = [
372                        args.test_suite,
373                        gtest_filter,
374                        '--render-test-output-dir=%s' % screenshot_dir,
375                        '--one-frame-only',
376                        '--verbose-logging',
377                        '--enable-all-trace-tests',
378                    ] + extra_flags
379                    batch_result = PASS if run_wrapper(args, cmd, env,
380                                                       tempfile_path) == 0 else FAIL
381
382                    next_batch = []
383                    for trace in batch:
384                        artifacts = {}
385
386                        if batch_result == PASS:
387                            logging.debug('upload test result: %s' % trace)
388                            result = upload_test_result_to_skia_gold(args, gold_session_manager,
389                                                                     gold_session, gold_properties,
390                                                                     screenshot_dir, trace,
391                                                                     artifacts)
392                        else:
393                            result = batch_result
394
395                        expected_result = SKIP if result == SKIP else PASS
396                        test_results[trace] = {'expected': expected_result, 'actual': result}
397                        if len(artifacts) > 0:
398                            test_results[trace]['artifacts'] = artifacts
399                        if result == FAIL:
400                            next_batch.append(trace)
401                    batch = next_batch
402
403        # These properties are recorded after iteration to ensure they only happen once.
404        for _, trace_results in test_results.items():
405            result = trace_results['actual']
406            results['num_failures_by_type'][result] += 1
407            if result == FAIL:
408                trace_results['is_unexpected'] = True
409
410        return results['num_failures_by_type'][FAIL] == 0
411
412
413def _shard_tests(tests, shard_count, shard_index):
414    return [tests[index] for index in range(shard_index, len(tests), shard_count)]
415
416
417def main():
418    parser = argparse.ArgumentParser()
419    parser.add_argument('--isolated-script-test-output', type=str)
420    parser.add_argument('--isolated-script-test-perf-output', type=str)
421    parser.add_argument('-f', '--isolated-script-test-filter', '--filter', type=str)
422    parser.add_argument('--test-suite', help='Test suite to run.', default=DEFAULT_TEST_SUITE)
423    parser.add_argument('--render-test-output-dir', help='Directory to store screenshots')
424    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
425    parser.add_argument(
426        '--flaky-retries', help='Number of times to retry failed tests.', type=int, default=0)
427    parser.add_argument(
428        '--shard-count',
429        help='Number of shards for test splitting. Default is 1.',
430        type=int,
431        default=1)
432    parser.add_argument(
433        '--shard-index',
434        help='Index of the current shard for test splitting. Default is 0.',
435        type=int,
436        default=0)
437    parser.add_argument(
438        '--batch-size',
439        help='Number of tests to run in a group. Default: %d' % DEFAULT_BATCH_SIZE,
440        type=int,
441        default=DEFAULT_BATCH_SIZE)
442    parser.add_argument(
443        '-l', '--log', help='Log output level. Default is %s.' % DEFAULT_LOG, default=DEFAULT_LOG)
444    parser.add_argument('--swiftshader', help='Test with SwiftShader.', action='store_true')
445
446    add_skia_gold_args(parser)
447
448    args, extra_flags = parser.parse_known_args()
449    logging.basicConfig(level=args.log.upper())
450
451    env = os.environ.copy()
452
453    if 'GTEST_TOTAL_SHARDS' in env and int(env['GTEST_TOTAL_SHARDS']) != 1:
454        if 'GTEST_SHARD_INDEX' not in env:
455            logging.error('Sharding params must be specified together.')
456            sys.exit(1)
457        args.shard_count = int(env.pop('GTEST_TOTAL_SHARDS'))
458        args.shard_index = int(env.pop('GTEST_SHARD_INDEX'))
459
460    results = {
461        'tests': {},
462        'interrupted': False,
463        'seconds_since_epoch': time.time(),
464        'path_delimiter': '.',
465        'version': 3,
466        'num_failures_by_type': {
467            FAIL: 0,
468            PASS: 0,
469            SKIP: 0,
470        },
471    }
472
473    test_results = {}
474
475    rc = 0
476
477    try:
478        if IsWindows():
479            args.test_suite = '.\\%s.exe' % args.test_suite
480        else:
481            args.test_suite = './%s' % args.test_suite
482
483        # read test set
484        json_name = os.path.join(ANGLE_SRC_DIR, 'src', 'tests', 'restricted_traces',
485                                 'restricted_traces.json')
486        with open(json_name) as fp:
487            tests = json.load(fp)
488
489        # Split tests according to sharding
490        sharded_tests = _shard_tests(tests['traces'], args.shard_count, args.shard_index)
491
492        if args.render_test_output_dir:
493            if not _run_tests(args, sharded_tests, extra_flags, env, args.render_test_output_dir,
494                              results, test_results):
495                rc = 1
496        elif 'ISOLATED_OUTDIR' in env:
497            if not _run_tests(args, sharded_tests, extra_flags, env, env['ISOLATED_OUTDIR'],
498                              results, test_results):
499                rc = 1
500        else:
501            with temporary_dir('angle_trace_') as temp_dir:
502                if not _run_tests(args, sharded_tests, extra_flags, env, temp_dir, results,
503                                  test_results):
504                    rc = 1
505
506    except Exception:
507        traceback.print_exc()
508        results['interrupted'] = True
509        rc = 1
510
511    if test_results:
512        results['tests']['angle_restricted_trace_gold_tests'] = test_results
513
514    if args.isolated_script_test_output:
515        with open(args.isolated_script_test_output, 'w') as out_file:
516            out_file.write(json.dumps(results, indent=2))
517
518    if args.isolated_script_test_perf_output:
519        with open(args.isolated_script_test_perf_output, 'w') as out_file:
520            out_file.write(json.dumps({}))
521
522    return rc
523
524
525# This is not really a "script test" so does not need to manually add
526# any additional compile targets.
527def main_compile_targets(args):
528    json.dump([], args.output)
529
530
531if __name__ == '__main__':
532    # Conform minimally to the protocol defined by ScriptTest.
533    if 'compile_targets' in sys.argv:
534        funcs = {
535            'run': None,
536            'compile_targets': main_compile_targets,
537        }
538        sys.exit(common.run_script(sys.argv[1:], funcs))
539    sys.exit(main())
540