• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env vpython3
2#
3# Copyright 2020 The ANGLE Project Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7# restricted_trace_gold_tests.py:
8#   Uses Skia Gold (https://skia.org/dev/testing/skiagold) to run pixel tests with ANGLE traces.
9#
10#   Requires vpython to run standalone. Run with --help for usage instructions.
11
12import argparse
13import contextlib
14import fnmatch
15import json
16import logging
17import os
18import platform
19import re
20import shutil
21import sys
22import tempfile
23import time
24import traceback
25
26
27def _AddToPathIfNeeded(path):
28    if path not in sys.path:
29        sys.path.insert(0, path)
30
31
32_AddToPathIfNeeded(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'py_utils')))
33import android_helper
34import angle_path_util
35from skia_gold import angle_skia_gold_properties
36from skia_gold import angle_skia_gold_session_manager
37
38angle_path_util.AddDepsDirToPath('testing/scripts')
39import common
40import test_env
41import xvfb
42
43
44def IsWindows():
45    return sys.platform == 'cygwin' or sys.platform.startswith('win')
46
47
48DEFAULT_TEST_SUITE = 'angle_perftests'
49DEFAULT_TEST_PREFIX = 'TracePerfTest.Run/vulkan_'
50SWIFTSHADER_TEST_PREFIX = 'TracePerfTest.Run/vulkan_swiftshader_'
51DEFAULT_SCREENSHOT_PREFIX = 'angle_vulkan_'
52SWIFTSHADER_SCREENSHOT_PREFIX = 'angle_vulkan_swiftshader_'
53DEFAULT_BATCH_SIZE = 5
54DEFAULT_LOG = 'info'
55DEFAULT_GOLD_INSTANCE = 'angle'
56
57# Filters out stuff like: " I   72.572s run_tests_on_device(96071FFAZ00096) "
58ANDROID_LOGGING_PREFIX = r'I +\d+.\d+s \w+\(\w+\)  '
59ANDROID_BEGIN_SYSTEM_INFO = '>>ScopedMainEntryLogger'
60
61# Test expectations
62FAIL = 'FAIL'
63PASS = 'PASS'
64SKIP = 'SKIP'
65
66
67@contextlib.contextmanager
68def temporary_dir(prefix=''):
69    path = tempfile.mkdtemp(prefix=prefix)
70    try:
71        yield path
72    finally:
73        logging.info("Removing temporary directory: %s" % path)
74        shutil.rmtree(path)
75
76
77def add_skia_gold_args(parser):
78    group = parser.add_argument_group('Skia Gold Arguments')
79    group.add_argument('--git-revision', help='Revision being tested.', default=None)
80    group.add_argument(
81        '--gerrit-issue', help='For Skia Gold integration. Gerrit issue ID.', default='')
82    group.add_argument(
83        '--gerrit-patchset',
84        help='For Skia Gold integration. Gerrit patch set number.',
85        default='')
86    group.add_argument(
87        '--buildbucket-id', help='For Skia Gold integration. Buildbucket build ID.', default='')
88    group.add_argument(
89        '--bypass-skia-gold-functionality',
90        action='store_true',
91        default=False,
92        help='Bypass all interaction with Skia Gold, effectively disabling the '
93        'image comparison portion of any tests that use Gold. Only meant to '
94        'be used in case a Gold outage occurs and cannot be fixed quickly.')
95    local_group = group.add_mutually_exclusive_group()
96    local_group.add_argument(
97        '--local-pixel-tests',
98        action='store_true',
99        default=None,
100        help='Specifies to run the test harness in local run mode or not. When '
101        'run in local mode, uploading to Gold is disabled and links to '
102        'help with local debugging are output. Running in local mode also '
103        'implies --no-luci-auth. If both this and --no-local-pixel-tests are '
104        'left unset, the test harness will attempt to detect whether it is '
105        'running on a workstation or not and set this option accordingly.')
106    local_group.add_argument(
107        '--no-local-pixel-tests',
108        action='store_false',
109        dest='local_pixel_tests',
110        help='Specifies to run the test harness in non-local (bot) mode. When '
111        'run in this mode, data is actually uploaded to Gold and triage links '
112        'arge generated. If both this and --local-pixel-tests are left unset, '
113        'the test harness will attempt to detect whether it is running on a '
114        'workstation or not and set this option accordingly.')
115    group.add_argument(
116        '--no-luci-auth',
117        action='store_true',
118        default=False,
119        help='Don\'t use the service account provided by LUCI for '
120        'authentication for Skia Gold, instead relying on gsutil to be '
121        'pre-authenticated. Meant for testing locally instead of on the bots.')
122
123
124def _adb_if_android(args):
125    if android_helper.ApkFileExists(args.test_suite):
126        return android_helper.Adb()
127
128    return None
129
130
131def run_wrapper(test_suite, cmd_args, args, env, stdoutfile, output_dir=None):
132    cmd = [get_binary_name(test_suite)] + cmd_args
133    if output_dir:
134        cmd += ['--render-test-output-dir=%s' % output_dir]
135
136    if args.xvfb:
137        return xvfb.run_executable(cmd, env, stdoutfile=stdoutfile)
138    else:
139        adb = _adb_if_android(args)
140        if adb:
141            try:
142                android_helper.RunTests(adb, test_suite, cmd_args, stdoutfile, output_dir)
143                return 0
144            except Exception as e:
145                logging.exception(e)
146                return 1
147        else:
148            return test_env.run_command_with_output(cmd, env=env, stdoutfile=stdoutfile)
149
150
151def run_angle_system_info_test(sysinfo_args, args, env):
152    with temporary_dir() as temp_dir:
153        tempfile_path = os.path.join(temp_dir, 'stdout')
154        sysinfo_args += ['--render-test-output-dir=' + temp_dir]
155
156        if run_wrapper('angle_system_info_test', sysinfo_args, args, env, tempfile_path):
157            raise Exception('Error getting system info.')
158
159        with open(os.path.join(temp_dir, 'angle_system_info.json')) as f:
160            return json.load(f)
161
162
163def to_hex(num):
164    return hex(int(num))
165
166
167def to_hex_or_none(num):
168    return 'None' if num == None else to_hex(num)
169
170
171def to_non_empty_string_or_none(val):
172    return 'None' if val == '' else str(val)
173
174
175def to_non_empty_string_or_none_dict(d, key):
176    return 'None' if not key in d else to_non_empty_string_or_none(d[key])
177
178
179def get_binary_name(binary):
180    if IsWindows():
181        return '.\\%s.exe' % binary
182    else:
183        return './%s' % binary
184
185
186def get_skia_gold_keys(args, env):
187    """Get all the JSON metadata that will be passed to golctl."""
188    # All values need to be strings, otherwise goldctl fails.
189
190    # Only call this method one time
191    if hasattr(get_skia_gold_keys, 'called') and get_skia_gold_keys.called:
192        logging.exception('get_skia_gold_keys may only be called once')
193    get_skia_gold_keys.called = True
194
195    sysinfo_args = ['--vulkan', '-v']
196    if args.swiftshader:
197        sysinfo_args.append('--swiftshader')
198
199    adb = _adb_if_android(args)
200    if adb:
201        json_data = android_helper.AngleSystemInfo(adb, sysinfo_args)
202        logging.info(json_data)
203    else:
204        json_data = run_angle_system_info_test(sysinfo_args, args, env)
205
206    if len(json_data.get('gpus', [])) == 0 or not 'activeGPUIndex' in json_data:
207        raise Exception('Error getting system info.')
208
209    active_gpu = json_data['gpus'][json_data['activeGPUIndex']]
210
211    angle_keys = {
212        'vendor_id': to_hex_or_none(active_gpu['vendorId']),
213        'device_id': to_hex_or_none(active_gpu['deviceId']),
214        'model_name': to_non_empty_string_or_none_dict(active_gpu, 'machineModelVersion'),
215        'manufacturer_name': to_non_empty_string_or_none_dict(active_gpu, 'machineManufacturer'),
216        'os': to_non_empty_string_or_none(platform.system()),
217        'os_version': to_non_empty_string_or_none(platform.version()),
218        'driver_version': to_non_empty_string_or_none_dict(active_gpu, 'driverVersion'),
219        'driver_vendor': to_non_empty_string_or_none_dict(active_gpu, 'driverVendor'),
220    }
221
222    return angle_keys
223
224
225def output_diff_local_files(gold_session, image_name):
226    """Logs the local diff image files from the given SkiaGoldSession.
227
228  Args:
229    gold_session: A skia_gold_session.SkiaGoldSession instance to pull files
230        from.
231    image_name: A string containing the name of the image/test that was
232        compared.
233  """
234    given_file = gold_session.GetGivenImageLink(image_name)
235    closest_file = gold_session.GetClosestImageLink(image_name)
236    diff_file = gold_session.GetDiffImageLink(image_name)
237    failure_message = 'Unable to retrieve link'
238    logging.error('Generated image: %s', given_file or failure_message)
239    logging.error('Closest image: %s', closest_file or failure_message)
240    logging.error('Diff image: %s', diff_file or failure_message)
241
242
243def upload_test_result_to_skia_gold(args, gold_session_manager, gold_session, gold_properties,
244                                    screenshot_dir, image_name, artifacts):
245    """Compares the given image using Skia Gold and uploads the result.
246
247    No uploading is done if the test is being run in local run mode. Compares
248    the given screenshot to baselines provided by Gold, raising an Exception if
249    a match is not found.
250
251    Args:
252      args: Command line options.
253      gold_session_manager: Skia Gold session manager.
254      gold_session: Skia Gold session.
255      gold_properties: Skia Gold properties.
256      screenshot_dir: directory where the test stores screenshots.
257      image_name: the name of the image being checked.
258      artifacts: dictionary of JSON artifacts to pass to the result merger.
259    """
260
261    use_luci = not (gold_properties.local_pixel_tests or gold_properties.no_luci_auth)
262
263    # Note: this would be better done by iterating the screenshot directory.
264    prefix = SWIFTSHADER_SCREENSHOT_PREFIX if args.swiftshader else DEFAULT_SCREENSHOT_PREFIX
265    png_file_name = os.path.join(screenshot_dir, prefix + image_name + '.png')
266
267    if not os.path.isfile(png_file_name):
268        raise Exception('Screenshot not found: ' + png_file_name)
269
270    status, error = gold_session.RunComparison(
271        name=image_name, png_file=png_file_name, use_luci=use_luci)
272
273    artifact_name = os.path.basename(png_file_name)
274    artifacts[artifact_name] = [artifact_name]
275
276    if not status:
277        return PASS
278
279    status_codes = gold_session_manager.GetSessionClass().StatusCodes
280    if status == status_codes.AUTH_FAILURE:
281        logging.error('Gold authentication failed with output %s', error)
282    elif status == status_codes.INIT_FAILURE:
283        logging.error('Gold initialization failed with output %s', error)
284    elif status == status_codes.COMPARISON_FAILURE_REMOTE:
285        _, triage_link = gold_session.GetTriageLinks(image_name)
286        if not triage_link:
287            logging.error('Failed to get triage link for %s, raw output: %s', image_name, error)
288            logging.error('Reason for no triage link: %s',
289                          gold_session.GetTriageLinkOmissionReason(image_name))
290        if gold_properties.IsTryjobRun():
291            # Pick "show all results" so we can see the tryjob images by default.
292            triage_link += '&master=true'
293            artifacts['triage_link_for_entire_cl'] = [triage_link]
294        else:
295            artifacts['gold_triage_link'] = [triage_link]
296    elif status == status_codes.COMPARISON_FAILURE_LOCAL:
297        logging.error('Local comparison failed. Local diff files:')
298        output_diff_local_files(gold_session, image_name)
299    elif status == status_codes.LOCAL_DIFF_FAILURE:
300        logging.error(
301            'Local comparison failed and an error occurred during diff '
302            'generation: %s', error)
303        # There might be some files, so try outputting them.
304        logging.error('Local diff files:')
305        output_diff_local_files(gold_session, image_name)
306    else:
307        logging.error('Given unhandled SkiaGoldSession StatusCode %s with error %s', status, error)
308
309    return FAIL
310
311
312def _get_batches(traces, batch_size):
313    for i in range(0, len(traces), batch_size):
314        yield traces[i:i + batch_size]
315
316
317def _get_gtest_filter_for_batch(args, batch):
318    prefix = SWIFTSHADER_TEST_PREFIX if args.swiftshader else DEFAULT_TEST_PREFIX
319    expanded = ['%s%s' % (prefix, trace) for trace in batch]
320    return '--gtest_filter=%s' % ':'.join(expanded)
321
322
323def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_results):
324    keys = get_skia_gold_keys(args, env)
325
326    adb = _adb_if_android(args)
327    if adb:
328        android_helper.PrepareTestSuite(adb, args.test_suite)
329
330    with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir:
331        gold_properties = angle_skia_gold_properties.ANGLESkiaGoldProperties(args)
332        gold_session_manager = angle_skia_gold_session_manager.ANGLESkiaGoldSessionManager(
333            skia_gold_temp_dir, gold_properties)
334        gold_session = gold_session_manager.GetSkiaGoldSession(keys, instance=args.instance)
335
336        traces = [trace.split(' ')[0] for trace in tests]
337
338        if args.isolated_script_test_filter:
339            filtered = []
340            for trace in traces:
341                # Apply test filter if present.
342                full_name = 'angle_restricted_trace_gold_tests.%s' % trace
343                if not fnmatch.fnmatch(full_name, args.isolated_script_test_filter):
344                    logging.info('Skipping test %s because it does not match filter %s' %
345                                 (full_name, args.isolated_script_test_filter))
346                else:
347                    filtered += [trace]
348            traces = filtered
349
350        batches = _get_batches(traces, args.batch_size)
351
352        for batch in batches:
353            if adb:
354                android_helper.PrepareRestrictedTraces(adb, batch)
355
356            for iteration in range(0, args.flaky_retries + 1):
357                with common.temporary_file() as tempfile_path:
358                    # This is how we signal early exit
359                    if not batch:
360                        logging.debug('All tests in batch completed.')
361                        break
362                    if iteration > 0:
363                        logging.info('Test run failed, running retry #%d...' % iteration)
364
365                    gtest_filter = _get_gtest_filter_for_batch(args, batch)
366                    cmd_args = [
367                        gtest_filter,
368                        '--one-frame-only',
369                        '--verbose-logging',
370                        '--enable-all-trace-tests',
371                    ] + extra_flags
372                    batch_result = PASS if run_wrapper(
373                        args.test_suite,
374                        cmd_args,
375                        args,
376                        env,
377                        tempfile_path,
378                        output_dir=screenshot_dir) == 0 else FAIL
379
380                    with open(tempfile_path) as f:
381                        test_output = f.read() + '\n'
382
383                    next_batch = []
384                    for trace in batch:
385                        artifacts = {}
386
387                        if batch_result == PASS:
388                            test_prefix = SWIFTSHADER_TEST_PREFIX if args.swiftshader else DEFAULT_TEST_PREFIX
389                            trace_skipped_notice = '[  SKIPPED ] ' + test_prefix + trace + '\n'
390                            if trace_skipped_notice in test_output:
391                                result = SKIP
392                            else:
393                                logging.debug('upload test result: %s' % trace)
394                                result = upload_test_result_to_skia_gold(
395                                    args, gold_session_manager, gold_session, gold_properties,
396                                    screenshot_dir, trace, artifacts)
397                        else:
398                            result = batch_result
399
400                        expected_result = SKIP if result == SKIP else PASS
401                        test_results[trace] = {'expected': expected_result, 'actual': result}
402                        if len(artifacts) > 0:
403                            test_results[trace]['artifacts'] = artifacts
404                        if result == FAIL:
405                            next_batch.append(trace)
406                    batch = next_batch
407
408        # These properties are recorded after iteration to ensure they only happen once.
409        for _, trace_results in test_results.items():
410            result = trace_results['actual']
411            results['num_failures_by_type'][result] += 1
412            if result == FAIL:
413                trace_results['is_unexpected'] = True
414
415        return results['num_failures_by_type'][FAIL] == 0
416
417
418def _shard_tests(tests, shard_count, shard_index):
419    return [tests[index] for index in range(shard_index, len(tests), shard_count)]
420
421
422def main():
423    parser = argparse.ArgumentParser()
424    parser.add_argument('--isolated-script-test-output', type=str)
425    parser.add_argument('--isolated-script-test-perf-output', type=str)
426    parser.add_argument('-f', '--isolated-script-test-filter', '--filter', type=str)
427    parser.add_argument('--test-suite', help='Test suite to run.', default=DEFAULT_TEST_SUITE)
428    parser.add_argument('--render-test-output-dir', help='Directory to store screenshots')
429    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
430    parser.add_argument(
431        '--flaky-retries', help='Number of times to retry failed tests.', type=int, default=0)
432    parser.add_argument(
433        '--shard-count',
434        help='Number of shards for test splitting. Default is 1.',
435        type=int,
436        default=1)
437    parser.add_argument(
438        '--shard-index',
439        help='Index of the current shard for test splitting. Default is 0.',
440        type=int,
441        default=0)
442    parser.add_argument(
443        '--batch-size',
444        help='Number of tests to run in a group. Default: %d' % DEFAULT_BATCH_SIZE,
445        type=int,
446        default=DEFAULT_BATCH_SIZE)
447    parser.add_argument(
448        '-l', '--log', help='Log output level. Default is %s.' % DEFAULT_LOG, default=DEFAULT_LOG)
449    parser.add_argument('--swiftshader', help='Test with SwiftShader.', action='store_true')
450    parser.add_argument(
451        '-i',
452        '--instance',
453        '--gold-instance',
454        '--skia-gold-instance',
455        help='Skia Gold instance. Default is "%s".' % DEFAULT_GOLD_INSTANCE,
456        default=DEFAULT_GOLD_INSTANCE)
457
458    add_skia_gold_args(parser)
459
460    args, extra_flags = parser.parse_known_args()
461    logging.basicConfig(level=args.log.upper())
462
463    env = os.environ.copy()
464
465    if 'GTEST_TOTAL_SHARDS' in env and int(env['GTEST_TOTAL_SHARDS']) != 1:
466        if 'GTEST_SHARD_INDEX' not in env:
467            logging.error('Sharding params must be specified together.')
468            sys.exit(1)
469        args.shard_count = int(env.pop('GTEST_TOTAL_SHARDS'))
470        args.shard_index = int(env.pop('GTEST_SHARD_INDEX'))
471
472    # The harness currently uploads all traces in a batch, which is very slow.
473    # TODO: Reduce lag from trace uploads and remove this. http://anglebug.com/6854
474    env['DEVICE_TIMEOUT_MULTIPLIER'] = '20'
475
476    results = {
477        'tests': {},
478        'interrupted': False,
479        'seconds_since_epoch': time.time(),
480        'path_delimiter': '.',
481        'version': 3,
482        'num_failures_by_type': {
483            FAIL: 0,
484            PASS: 0,
485            SKIP: 0,
486        },
487    }
488
489    test_results = {}
490
491    rc = 0
492
493    try:
494        # read test set
495        json_name = os.path.join(angle_path_util.ANGLE_ROOT_DIR, 'src', 'tests',
496                                 'restricted_traces', 'restricted_traces.json')
497        with open(json_name) as fp:
498            tests = json.load(fp)
499
500        # Split tests according to sharding
501        sharded_tests = _shard_tests(tests['traces'], args.shard_count, args.shard_index)
502
503        if args.render_test_output_dir:
504            if not _run_tests(args, sharded_tests, extra_flags, env, args.render_test_output_dir,
505                              results, test_results):
506                rc = 1
507        elif 'ISOLATED_OUTDIR' in env:
508            if not _run_tests(args, sharded_tests, extra_flags, env, env['ISOLATED_OUTDIR'],
509                              results, test_results):
510                rc = 1
511        else:
512            with temporary_dir('angle_trace_') as temp_dir:
513                if not _run_tests(args, sharded_tests, extra_flags, env, temp_dir, results,
514                                  test_results):
515                    rc = 1
516
517    except Exception:
518        traceback.print_exc()
519        results['interrupted'] = True
520        rc = 1
521
522    if test_results:
523        results['tests']['angle_restricted_trace_gold_tests'] = test_results
524
525    if args.isolated_script_test_output:
526        with open(args.isolated_script_test_output, 'w') as out_file:
527            out_file.write(json.dumps(results, indent=2))
528
529    if args.isolated_script_test_perf_output:
530        with open(args.isolated_script_test_perf_output, 'w') as out_file:
531            out_file.write(json.dumps({}))
532
533    return rc
534
535
536# This is not really a "script test" so does not need to manually add
537# any additional compile targets.
538def main_compile_targets(args):
539    json.dump([], args.output)
540
541
542if __name__ == '__main__':
543    # Conform minimally to the protocol defined by ScriptTest.
544    if 'compile_targets' in sys.argv:
545        funcs = {
546            'run': None,
547            'compile_targets': main_compile_targets,
548        }
549        sys.exit(common.run_script(sys.argv[1:], funcs))
550    sys.exit(main())
551