• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env python3
2#
3# Copyright 2020 The ANGLE Project Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7'''
8Script that re-captures the traces in the restricted trace folder. We can
9use this to update traces without needing to re-run the app on a device.
10'''
11
12import argparse
13import fnmatch
14import json
15import logging
16import os
17import pathlib
18import shutil
19import stat
20import subprocess
21import sys
22import tempfile
23import time
24
25from gen_restricted_traces import read_json as read_json, write_json as write_json
26from pathlib import Path
27
28from gen_restricted_traces import read_json as read_json
29
30SCRIPT_DIR = str(pathlib.Path(__file__).resolve().parent)
31PY_UTILS = str(pathlib.Path(SCRIPT_DIR) / '..' / 'py_utils')
32if PY_UTILS not in sys.path:
33    os.stat(PY_UTILS) and sys.path.insert(0, PY_UTILS)
34import android_helper
35import angle_test_util
36
37DEFAULT_TEST_SUITE = angle_test_util.ANGLE_TRACE_TEST_SUITE
38DEFAULT_TEST_JSON = 'restricted_traces.json'
39DEFAULT_LOG_LEVEL = 'info'
40DEFAULT_BACKUP_FOLDER = 'retrace-backups'
41
42EXIT_SUCCESS = 0
43EXIT_FAILURE = 1
44
45# Test expectations
46FAIL = 'FAIL'
47PASS = 'PASS'
48SKIP = 'SKIP'
49
50
51def get_trace_json_path(trace):
52    return os.path.join(get_script_dir(), trace, f'{trace}.json')
53
54
55def load_trace_json(trace):
56    json_file_name = get_trace_json_path(trace)
57    return read_json(json_file_name)
58
59
60def get_context(trace):
61    """Returns the trace context number."""
62    json_data = load_trace_json(trace)
63    return str(json_data['WindowSurfaceContextID'])
64
65
66def get_script_dir():
67    return os.path.dirname(sys.argv[0])
68
69
70def context_header(trace, trace_path):
71    context_id = get_context(trace)
72    header = '%s_context%s.h' % (trace, context_id)
73    return os.path.join(trace_path, header)
74
75
76def src_trace_path(trace):
77    return os.path.join(get_script_dir(), trace)
78
79
80def get_num_frames(json_data):
81    metadata = json_data['TraceMetadata']
82    return metadata['FrameEnd'] - metadata['FrameStart'] + 1
83
84
85def get_gles_version(json_data):
86    metadata = json_data['TraceMetadata']
87    return (metadata['ContextClientMajorVersion'], metadata['ContextClientMinorVersion'])
88
89
90def set_gles_version(json_data, version):
91    metadata = json_data['TraceMetadata']
92    metadata['ContextClientMajorVersion'] = version[0]
93    metadata['ContextClientMinorVersion'] = version[1]
94
95
96def save_trace_json(trace, data):
97    json_file_name = get_trace_json_path(trace)
98    return write_json(json_file_name, data)
99
100
101def path_contains_header(path):
102    if not os.path.isdir(path):
103        return False
104    for file in os.listdir(path):
105        if fnmatch.fnmatch(file, '*.h'):
106            return True
107    return False
108
109
110def chmod_directory(directory, perm):
111    assert os.path.isdir(directory)
112    for file in os.listdir(directory):
113        fn = os.path.join(directory, file)
114        os.chmod(fn, perm)
115
116
117def ensure_rmdir(directory):
118    if os.path.isdir(directory):
119        chmod_directory(directory, stat.S_IWRITE)
120        shutil.rmtree(directory)
121
122
123def copy_trace_folder(old_path, new_path):
124    logging.info('%s -> %s' % (old_path, new_path))
125    ensure_rmdir(new_path)
126    shutil.copytree(old_path, new_path)
127
128
129def touch_trace_folder(trace_path):
130    for file in os.listdir(trace_path):
131        (Path(trace_path) / file).touch()
132
133
134def backup_single_trace(trace, backup_path):
135    trace_path = src_trace_path(trace)
136    trace_backup_path = os.path.join(backup_path, trace)
137    copy_trace_folder(trace_path, trace_backup_path)
138
139
140def backup_traces(args, traces):
141    for trace in angle_test_util.FilterTests(traces, args.traces):
142        backup_single_trace(trace, args.out_path)
143
144
145def restore_single_trace(trace, backup_path):
146    trace_path = src_trace_path(trace)
147    trace_backup_path = os.path.join(backup_path, trace)
148    if not os.path.isdir(trace_backup_path):
149        logging.error('Trace folder not found at %s' % trace_backup_path)
150        return False
151    else:
152        copy_trace_folder(trace_backup_path, trace_path)
153        touch_trace_folder(trace_path)
154        return True
155
156
157def restore_traces(args, traces):
158    for trace in angle_test_util.FilterTests(traces, args.traces):
159        restore_single_trace(trace, args.out_path)
160
161
162def run_autoninja(args):
163    autoninja_binary = 'autoninja'
164    if os.name == 'nt':
165        autoninja_binary += '.bat'
166
167    autoninja_args = [autoninja_binary, '-C', args.gn_path, args.test_suite]
168    logging.debug('Calling %s' % ' '.join(autoninja_args))
169    if args.show_test_stdout:
170        subprocess.run(autoninja_args, check=True)
171    else:
172        subprocess.check_output(autoninja_args)
173
174
175def run_test_suite(args, trace_binary, trace, max_steps, additional_args, additional_env):
176    run_args = [
177        angle_test_util.ExecutablePathInCurrentDir(trace_binary),
178        '--gtest_filter=TraceTest.%s' % trace,
179        '--max-steps-performed',
180        str(max_steps),
181    ] + additional_args
182    if not args.no_swiftshader:
183        run_args += ['--use-angle=swiftshader']
184
185    env = {**os.environ.copy(), **additional_env}
186    env_string = ' '.join(['%s=%s' % item for item in additional_env.items()])
187    if env_string:
188        env_string += ' '
189
190    logging.info('%s%s' % (env_string, ' '.join(run_args)))
191    p = subprocess.run(run_args, env=env, capture_output=True, check=True)
192    if args.show_test_stdout:
193        logging.info('Test stdout:\n%s' % p.stdout.decode())
194
195
196def upgrade_single_trace(args, trace_binary, trace, out_path, no_overwrite, c_sources):
197    logging.debug('Tracing %s' % trace)
198
199    trace_path = os.path.abspath(os.path.join(out_path, trace))
200    if no_overwrite and path_contains_header(trace_path):
201        logging.info('Skipping "%s" because the out folder already exists' % trace)
202        return
203
204    json_data = load_trace_json(trace)
205    num_frames = get_num_frames(json_data)
206
207    metadata = json_data['TraceMetadata']
208    logging.debug('Read metadata: %s' % str(metadata))
209
210    max_steps = min(args.limit, num_frames) if args.limit else num_frames
211
212    # We start tracing from frame 2. --retrace-mode issues a Swap() after Setup() so we can
213    # accurately re-trace the MEC.
214    additional_env = {
215        'ANGLE_CAPTURE_LABEL': trace,
216        'ANGLE_CAPTURE_OUT_DIR': trace_path,
217        'ANGLE_CAPTURE_FRAME_START': '2',
218        'ANGLE_CAPTURE_FRAME_END': str(max_steps + 1),
219    }
220    if args.validation:
221        additional_env['ANGLE_CAPTURE_VALIDATION'] = '1'
222        # Also turn on shader output init to ensure we have no undefined values.
223        # This feature is also enabled in replay when using --validation.
224        additional_env[
225            'ANGLE_FEATURE_OVERRIDES_ENABLED'] = 'allocateNonZeroMemory:forceInitShaderVariables'
226    if args.validation_expr:
227        additional_env['ANGLE_CAPTURE_VALIDATION_EXPR'] = args.validation_expr
228    # TODO: Remove when default. http://anglebug.com/7753
229    if c_sources:
230        additional_env['ANGLE_CAPTURE_SOURCE_EXT'] = 'c'
231
232    additional_args = ['--retrace-mode']
233
234    try:
235        if not os.path.isdir(trace_path):
236            os.makedirs(trace_path)
237
238        run_test_suite(args, trace_binary, trace, max_steps, additional_args, additional_env)
239
240        json_file = "{}/{}.json".format(trace_path, trace)
241        if not os.path.exists(json_file):
242            logging.error(
243                f'There was a problem tracing "{trace}", could not find json file: {json_file}')
244            return False
245
246        # Copy over the list obtained by get_min_reqs if present
247        if 'RequiredExtensions' in json_data:
248            new_data = read_json(json_file)
249            new_data['RequiredExtensions'] = json_data['RequiredExtensions']
250            write_json(json_file, new_data)
251
252    except subprocess.CalledProcessError as e:
253        logging.exception('There was an exception running "%s":\n%s' % (trace, e.output.decode()))
254        return False
255
256    return True
257
258
259def upgrade_traces(args, traces):
260    run_autoninja(args)
261    trace_binary = os.path.join(args.gn_path, args.test_suite)
262
263    failures = []
264
265    for trace in angle_test_util.FilterTests(traces, args.traces):
266        if not upgrade_single_trace(args, trace_binary, trace, args.out_path, args.no_overwrite,
267                                    args.c_sources):
268            failures += [trace]
269
270    if failures:
271        print('The following traces failed to upgrade:\n')
272        print('\n'.join(['  ' + trace for trace in failures]))
273        return EXIT_FAILURE
274
275    return EXIT_SUCCESS
276
277
278def validate_single_trace(args, trace_binary, trace, additional_args, additional_env):
279    json_data = load_trace_json(trace)
280    num_frames = get_num_frames(json_data)
281    max_steps = min(args.limit, num_frames) if args.limit else num_frames
282    try:
283        run_test_suite(args, trace_binary, trace, max_steps, additional_args, additional_env)
284    except subprocess.CalledProcessError as e:
285        logging.error('There was a failure running "%s":\n%s' % (trace, e.output.decode()))
286        return False
287    return True
288
289
290def validate_traces(args, traces):
291    restore_traces(args, traces)
292    run_autoninja(args)
293
294    additional_args = ['--validation']
295    additional_env = {
296        'ANGLE_FEATURE_OVERRIDES_ENABLED': 'allocateNonZeroMemory:forceInitShaderVariables'
297    }
298
299    failures = []
300    trace_binary = os.path.join(args.gn_path, args.test_suite)
301
302    for trace in angle_test_util.FilterTests(traces, args.traces):
303        if not validate_single_trace(args, trace_binary, trace, additional_args, additional_env):
304            failures += [trace]
305
306    if failures:
307        print('The following traces failed to validate:\n')
308        print('\n'.join(['  ' + trace for trace in failures]))
309        return EXIT_FAILURE
310
311    return EXIT_SUCCESS
312
313
314def interpret_traces(args, traces):
315    test_name = 'angle_trace_interpreter_tests'
316    results = {
317        'tests': {
318            test_name: {}
319        },
320        'interrupted': False,
321        'seconds_since_epoch': time.time(),
322        'path_delimiter': '.',
323        'version': 3,
324        'num_failures_by_type': {
325            FAIL: 0,
326            PASS: 0,
327            SKIP: 0,
328        },
329    }
330
331    if args.path:
332        trace_binary = os.path.join(args.path, args.test_suite)
333    else:
334        trace_binary = args.test_suite
335
336    for trace in angle_test_util.FilterTests(traces, args.traces):
337        with tempfile.TemporaryDirectory() as backup_path:
338            backup_single_trace(trace, backup_path)
339            result = FAIL
340            try:
341                with tempfile.TemporaryDirectory() as out_path:
342                    logging.debug('Using temporary path %s.' % out_path)
343                    if upgrade_single_trace(args, trace_binary, trace, out_path, False, True):
344                        if restore_single_trace(trace, out_path):
345                            validate_args = ['--trace-interpreter=c']
346                            if args.verbose:
347                                validate_args += ['--verbose-logging']
348                            if validate_single_trace(args, trace_binary, trace, validate_args, {}):
349                                logging.info('%s passed!' % trace)
350                                result = PASS
351            finally:
352                restore_single_trace(trace, backup_path)
353            results['num_failures_by_type'][result] += 1
354            results['tests'][test_name][trace] = {'expected': PASS, 'actual': result}
355
356    if results['num_failures_by_type'][FAIL]:
357        logging.error('Some tests failed.')
358        return EXIT_FAILURE
359
360    if results['num_failures_by_type'][PASS] == 0:
361        logging.error('No tests ran. Please check your command line arguments.')
362        return EXIT_FAILURE
363
364    if args.test_output:
365        with open(args.test_output, 'w') as out_file:
366            out_file.write(json.dumps(results, indent=2))
367
368    return EXIT_SUCCESS
369
370
371def add_upgrade_args(parser):
372    parser.add_argument(
373        '--validation', help='Enable state serialization validation calls.', action='store_true')
374    parser.add_argument(
375        '--validation-expr',
376        help='Validation expression, used to add more validation checkpoints.')
377    parser.add_argument(
378        '-L',
379        '--limit',
380        '--frame-limit',
381        type=int,
382        help='Limits the number of captured frames to produce a shorter trace than the original.')
383
384
385def get_min_reqs(args, traces):
386    run_autoninja(args)
387
388    env = {}
389    default_args = ["--no-warmup"]
390
391    skipped_traces = []
392    trace_binary = os.path.join(args.gn_path, args.test_suite)
393
394    for trace in angle_test_util.FilterTests(traces, args.traces):
395        print(f"Finding requirements for {trace}")
396        extensions = []
397        json_data = load_trace_json(trace)
398        max_steps = get_num_frames(json_data)
399
400        # exts: a list of extensions to use with run_test_suite. If empty,
401        #       then run_test_suite runs with all extensions enabled by default.
402        def run_test_suite_with_exts(exts):
403            additional_args = default_args.copy()
404            additional_args += ['--request-extensions', ' '.join(exts)]
405
406            try:
407                run_test_suite(args, trace_binary, trace, max_steps, additional_args, env)
408            except subprocess.CalledProcessError as error:
409                return False
410            return True
411
412        original_gles_version = get_gles_version(json_data)
413        original_extensions = None if 'RequiredExtensions' not in json_data else json_data[
414            'RequiredExtensions']
415
416        def restore_trace():
417            if original_extensions is not None:
418                json_data['RequiredExtensions'] = original_extensions
419            set_gles_version(json_data, original_gles_version)
420            save_trace_json(trace, json_data)
421
422        try:
423            # Use the highest GLES version we have and empty the required
424            # extensions so that previous data doesn't affect the current
425            # run.
426            json_data['RequiredExtensions'] = []
427            save_trace_json(trace, json_data)
428            if not run_test_suite_with_exts([]):
429                skipped_traces.append(
430                    (trace, "Fails to run in default configuration on this machine"))
431                restore_trace()
432                continue
433
434            # Find minimum GLES version.
435            gles_versions = [(1, 0), (1, 1), (2, 0), (3, 0), (3, 1), (3, 2)]
436            min_version = None
437            for idx in range(len(gles_versions)):
438                min_version = gles_versions[idx]
439                set_gles_version(json_data, min_version)
440                save_trace_json(trace, json_data)
441                try:
442                    run_test_suite(args, trace_binary, trace, max_steps, default_args, env)
443                except subprocess.CalledProcessError as error:
444                    continue
445                break
446
447            # Get the list of requestable extensions for the GLES version.
448            try:
449                # Get the list of requestable extensions
450                with tempfile.NamedTemporaryFile() as tmp:
451                    # Some operating systems will not allow a file to be open for writing
452                    # by multiple processes. So close the temp file we just made before
453                    # running the test suite.
454                    tmp.close()
455                    additional_args = ["--print-extensions-to-file", tmp.name]
456                    run_test_suite(args, trace_binary, trace, max_steps, additional_args, env)
457                    with open(tmp.name) as f:
458                        for line in f:
459                            extensions.append(line.strip())
460            except Exception:
461                skipped_traces.append(
462                    (trace, "Failed to read extension list, likely that test is skipped"))
463                restore_trace()
464                continue
465
466            if len(extensions) > 0 and not run_test_suite_with_exts(extensions):
467                skipped_traces.append((trace, "Requesting all extensions results in test failure"))
468                restore_trace()
469                continue
470
471            # Reset RequiredExtensions so it doesn't interfere with our search
472            json_data['RequiredExtensions'] = []
473            save_trace_json(trace, json_data)
474
475            # Use a divide and conquer strategy to find the required extensions.
476            # Max depth is log(N) where N is the number of extensions. Expected
477            # runtime is p*log(N), where p is the number of required extensions.
478            # p*log(N)
479            # others: A list that contains one or more required extensions,
480            #         but is not actively being searched
481            # exts: The list of extensions actively being searched
482            def recurse_run(others, exts, depth=0):
483                if len(exts) <= 1:
484                    return exts
485                middle = int(len(exts) / 2)
486                left_partition = exts[:middle]
487                right_partition = exts[middle:]
488                left_passed = run_test_suite_with_exts(others + left_partition)
489
490                if depth > 0 and left_passed:
491                    # We know right_passed must be False because one stack up
492                    # run_test_suite(exts) returned False.
493                    return recurse_run(others, left_partition)
494
495                right_passed = run_test_suite_with_exts(others + right_partition)
496                if left_passed and right_passed:
497                    # Neither left nor right contain necessary extensions
498                    return []
499                elif left_passed:
500                    # Only left contains necessary extensions
501                    return recurse_run(others, left_partition, depth + 1)
502                elif right_passed:
503                    # Only right contains necessary extensions
504                    return recurse_run(others, right_partition, depth + 1)
505                else:
506                    # Both left and right contain necessary extensions
507                    left_reqs = recurse_run(others + right_partition, left_partition, depth + 1)
508                    right_reqs = recurse_run(others + left_reqs, right_partition, depth + 1)
509                    return left_reqs + right_reqs
510
511            recurse_reqs = recurse_run([], extensions, 0)
512
513            json_data['RequiredExtensions'] = recurse_reqs
514            save_trace_json(trace, json_data)
515        except BaseException as e:
516            restore_trace()
517            raise e
518
519    if skipped_traces:
520        print("Finished get_min_reqs, skipped traces:")
521        for trace, reason in skipped_traces:
522            print(f"\t{trace}: {reason}")
523    else:
524        print("Finished get_min_reqs for all traces specified")
525
526
527def main():
528    parser = argparse.ArgumentParser()
529    parser.add_argument('-l', '--log', help='Logging level.', default=DEFAULT_LOG_LEVEL)
530    parser.add_argument(
531        '--test-suite',
532        help='Test Suite. Default is %s' % DEFAULT_TEST_SUITE,
533        default=DEFAULT_TEST_SUITE)
534    parser.add_argument(
535        '--no-swiftshader',
536        help='Trace against native Vulkan.',
537        action='store_true',
538        default=False)
539    parser.add_argument(
540        '--test-output', '--isolated-script-test-output', help='Where to write results JSON.')
541
542    subparsers = parser.add_subparsers(dest='command', required=True, help='Command to run.')
543
544    backup_parser = subparsers.add_parser(
545        'backup', help='Copies trace contents into a saved folder.')
546    backup_parser.add_argument(
547        'traces', help='Traces to back up. Supports fnmatch expressions.', default='*')
548    backup_parser.add_argument(
549        '-o',
550        '--out-path',
551        '--backup-path',
552        help='Destination folder. Default is "%s".' % DEFAULT_BACKUP_FOLDER,
553        default=DEFAULT_BACKUP_FOLDER)
554
555    restore_parser = subparsers.add_parser(
556        'restore', help='Copies traces from a saved folder to the trace folder.')
557    restore_parser.add_argument(
558        '-o',
559        '--out-path',
560        '--backup-path',
561        help='Path the traces were saved. Default is "%s".' % DEFAULT_BACKUP_FOLDER,
562        default=DEFAULT_BACKUP_FOLDER)
563    restore_parser.add_argument(
564        'traces', help='Traces to restore. Supports fnmatch expressions.', default='*')
565
566    upgrade_parser = subparsers.add_parser(
567        'upgrade', help='Re-trace existing traces, upgrading the format.')
568    upgrade_parser.add_argument('gn_path', help='GN build path')
569    upgrade_parser.add_argument('out_path', help='Output directory')
570    upgrade_parser.add_argument(
571        '-f', '--traces', '--filter', help='Trace filter. Defaults to all.', default='*')
572    upgrade_parser.add_argument(
573        '-n',
574        '--no-overwrite',
575        help='Skip traces which already exist in the out directory.',
576        action='store_true')
577    upgrade_parser.add_argument(
578        '-c', '--c-sources', help='Output to c sources instead of cpp.', action='store_true')
579    add_upgrade_args(upgrade_parser)
580    upgrade_parser.add_argument(
581        '--show-test-stdout', help='Log test output.', action='store_true', default=False)
582
583    validate_parser = subparsers.add_parser(
584        'validate', help='Runs the an updated test suite with validation enabled.')
585    validate_parser.add_argument('gn_path', help='GN build path')
586    validate_parser.add_argument('out_path', help='Path to the upgraded trace folder.')
587    validate_parser.add_argument(
588        'traces', help='Traces to validate. Supports fnmatch expressions.', default='*')
589    validate_parser.add_argument(
590        '-L', '--limit', '--frame-limit', type=int, help='Limits the number of tested frames.')
591    validate_parser.add_argument(
592        '--show-test-stdout', help='Log test output.', action='store_true', default=False)
593
594    interpret_parser = subparsers.add_parser(
595        'interpret', help='Complete trace interpreter self-test.')
596    interpret_parser.add_argument(
597        '-p', '--path', help='Path to trace executable. Default: look in CWD.')
598    interpret_parser.add_argument(
599        'traces', help='Traces to test. Supports fnmatch expressions.', default='*')
600    add_upgrade_args(interpret_parser)
601    interpret_parser.add_argument(
602        '--show-test-stdout', help='Log test output.', action='store_true', default=False)
603    interpret_parser.add_argument(
604        '-v',
605        '--verbose',
606        help='Verbose logging in the trace tests.',
607        action='store_true',
608        default=False)
609
610    get_min_reqs_parser = subparsers.add_parser(
611        'get_min_reqs',
612        help='Finds the minimum required extensions for a trace to successfully run.')
613    get_min_reqs_parser.add_argument('gn_path', help='GN build path')
614    get_min_reqs_parser.add_argument(
615        '--traces',
616        help='Traces to get minimum requirements for. Supports fnmatch expressions.',
617        default='*')
618    get_min_reqs_parser.add_argument(
619        '--show-test-stdout', help='Log test output.', action='store_true', default=False)
620
621    args, extra_flags = parser.parse_known_args()
622
623    logging.basicConfig(level=args.log.upper())
624
625    # Load trace names
626    with open(os.path.join(get_script_dir(), DEFAULT_TEST_JSON)) as f:
627        traces = json.loads(f.read())
628
629    traces = [trace.split(' ')[0] for trace in traces['traces']]
630
631    try:
632        if args.command == 'backup':
633            return backup_traces(args, traces)
634        elif args.command == 'restore':
635            return restore_traces(args, traces)
636        elif args.command == 'upgrade':
637            return upgrade_traces(args, traces)
638        elif args.command == 'validate':
639            return validate_traces(args, traces)
640        elif args.command == 'interpret':
641            return interpret_traces(args, traces)
642        elif args.command == 'get_min_reqs':
643            return get_min_reqs(args, traces)
644        else:
645            logging.fatal('Unknown command: %s' % args.command)
646            return EXIT_FAILURE
647    except subprocess.CalledProcessError as e:
648        if args.show_test_stdout:
649            logging.exception('There was an exception running "%s"' % traces)
650        else:
651            logging.exception('There was an exception running "%s": %s' %
652                              (traces, e.output.decode()))
653
654        return EXIT_FAILURE
655
656
657if __name__ == '__main__':
658    sys.exit(main())
659