• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env vpython3
2#
3# Copyright 2020 The ANGLE Project Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7"""
8Script testing capture_replay with angle_end2end_tests
9"""
10
11# Automation script will:
12# 1. Build all tests in angle_end2end with frame capture enabled
13# 2. Run each test with frame capture
14# 3. Build CaptureReplayTest with cpp trace files
15# 4. Run CaptureReplayTest
16# 5. Output the number of test successes and failures. A test succeeds if no error occurs during
17# its capture and replay, and the GL states at the end of two runs match. Any unexpected failure
18# will return non-zero exit code
19
20# Run this script with Python to test capture replay on angle_end2end tests
21# python path/to/capture_replay_tests.py
22# Command line arguments: run with --help for a full list.
23
24import argparse
25import difflib
26import distutils.util
27import fnmatch
28import json
29import logging
30import math
31import multiprocessing
32import os
33import psutil
34import queue
35import re
36import shutil
37import subprocess
38import sys
39import time
40import traceback
41
42PIPE_STDOUT = True
43DEFAULT_OUT_DIR = "out/CaptureReplayTest"  # relative to angle folder
44DEFAULT_FILTER = "*/ES2_Vulkan_SwiftShader"
45DEFAULT_TEST_SUITE = "angle_end2end_tests"
46REPLAY_SAMPLE_FOLDER = "src/tests/capture_replay_tests"  # relative to angle folder
47DEFAULT_BATCH_COUNT = 8  # number of tests batched together
48TRACE_FILE_SUFFIX = "_context"  # because we only deal with 1 context right now
49RESULT_TAG = "*RESULT"
50STATUS_MESSAGE_PERIOD = 20  # in seconds
51SUBPROCESS_TIMEOUT = 600  # in seconds
52DEFAULT_RESULT_FILE = "results.txt"
53DEFAULT_LOG_LEVEL = "info"
54DEFAULT_MAX_JOBS = 8
55DEFAULT_MAX_NINJA_JOBS = 3
56REPLAY_BINARY = "capture_replay_tests"
57if sys.platform == "win32":
58    REPLAY_BINARY += ".exe"
59TRACE_FOLDER = "traces"
60
61EXIT_SUCCESS = 0
62EXIT_FAILURE = 1
63
64switch_case_without_return_template = """\
65        case {case}:
66            {namespace}::{call}({params});
67            break;
68"""
69
70switch_case_with_return_template = """\
71        case {case}:
72            return {namespace}::{call}({params});
73"""
74
75default_case_without_return_template = """\
76        default:
77            break;"""
78default_case_with_return_template = """\
79        default:
80            return {default_val};"""
81
82
83def winext(name, ext):
84    return ("%s.%s" % (name, ext)) if sys.platform == "win32" else name
85
86
87def AutodetectGoma():
88    return winext('compiler_proxy', 'exe') in (p.name() for p in psutil.process_iter())
89
90
91class SubProcess():
92
93    def __init__(self, command, logger, env=os.environ, pipe_stdout=PIPE_STDOUT):
94        # shell=False so that only 1 subprocess is spawned.
95        # if shell=True, a shell process is spawned, which in turn spawns the process running
96        # the command. Since we do not have a handle to the 2nd process, we cannot terminate it.
97        if pipe_stdout:
98            self.proc_handle = subprocess.Popen(
99                command, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
100        else:
101            self.proc_handle = subprocess.Popen(command, env=env, shell=False)
102        self._logger = logger
103
104    def Join(self, timeout):
105        self._logger.debug('Joining with subprocess %d, timeout %s' % (self.Pid(), str(timeout)))
106        output = self.proc_handle.communicate(timeout=timeout)[0]
107        if output:
108            output = output.decode('utf-8')
109        else:
110            output = ''
111        return self.proc_handle.returncode, output
112
113    def Pid(self):
114        return self.proc_handle.pid
115
116    def Kill(self):
117        self.proc_handle.terminate()
118        self.proc_handle.wait()
119
120
121# class that manages all child processes of a process. Any process thats spawns subprocesses
122# should have this. This object is created inside the main process, and each worker process.
123class ChildProcessesManager():
124
125    @classmethod
126    def _GetGnAndNinjaAbsolutePaths(self):
127        path = os.path.join('third_party', 'depot_tools')
128        return os.path.join(path, winext('gn', 'bat')), os.path.join(path, winext('ninja', 'exe'))
129
130    def __init__(self, args, logger, ninja_lock):
131        # a dictionary of Subprocess, with pid as key
132        self.subprocesses = {}
133        # list of Python multiprocess.Process handles
134        self.workers = []
135
136        self._gn_path, self._ninja_path = self._GetGnAndNinjaAbsolutePaths()
137        self._use_goma = AutodetectGoma()
138        self._logger = logger
139        self._ninja_lock = ninja_lock
140        self.runtimes = {}
141        self._args = args
142
143    def RunSubprocess(self, command, env=None, pipe_stdout=True, timeout=None):
144        proc = SubProcess(command, self._logger, env, pipe_stdout)
145        self._logger.debug('Created subprocess: %s with pid %d' % (' '.join(command), proc.Pid()))
146        self.subprocesses[proc.Pid()] = proc
147        start_time = time.time()
148        try:
149            returncode, output = self.subprocesses[proc.Pid()].Join(timeout)
150            elapsed_time = time.time() - start_time
151            cmd_name = os.path.basename(command[0])
152            self.runtimes.setdefault(cmd_name, 0.0)
153            self.runtimes[cmd_name] += elapsed_time
154            self.RemoveSubprocess(proc.Pid())
155            if returncode != 0:
156                return -1, output
157            return returncode, output
158        except KeyboardInterrupt:
159            raise
160        except subprocess.TimeoutExpired as e:
161            self.RemoveSubprocess(proc.Pid())
162            return -2, str(e)
163        except Exception as e:
164            self.RemoveSubprocess(proc.Pid())
165            return -1, str(e)
166
167    def RemoveSubprocess(self, subprocess_id):
168        assert subprocess_id in self.subprocesses
169        self.subprocesses[subprocess_id].Kill()
170        del self.subprocesses[subprocess_id]
171
172    def AddWorker(self, worker):
173        self.workers.append(worker)
174
175    def KillAll(self):
176        for subprocess_id in self.subprocesses:
177            self.subprocesses[subprocess_id].Kill()
178        for worker in self.workers:
179            worker.terminate()
180            worker.join()
181            worker.close()  # to release file descriptors immediately
182        self.subprocesses = {}
183        self.workers = []
184
185    def JoinWorkers(self):
186        for worker in self.workers:
187            worker.join()
188            worker.close()
189        self.workers = []
190
191    def IsAnyWorkerAlive(self):
192        return any([worker.is_alive() for worker in self.workers])
193
194    def GetRemainingWorkers(self):
195        count = 0
196        for worker in self.workers:
197            if worker.is_alive():
198                count += 1
199        return count
200
201    def RunGNGen(self, build_dir, pipe_stdout, extra_gn_args=[]):
202        gn_args = [('angle_with_capture_by_default', 'true')] + extra_gn_args
203        if self._use_goma:
204            gn_args.append(('use_goma', 'true'))
205            if self._args.goma_dir:
206                gn_args.append(('goma_dir', '"%s"' % self._args.goma_dir))
207        if not self._args.debug:
208            gn_args.append(('is_debug', 'false'))
209            gn_args.append(('symbol_level', '1'))
210            gn_args.append(('angle_assert_always_on', 'true'))
211        if self._args.asan:
212            gn_args.append(('is_asan', 'true'))
213        args_str = ' '.join(['%s=%s' % (k, v) for (k, v) in gn_args])
214        cmd = [self._gn_path, 'gen', '--args=%s' % args_str, build_dir]
215        self._logger.info(' '.join(cmd))
216        return self.RunSubprocess(cmd, pipe_stdout=pipe_stdout)
217
218    def RunNinja(self, build_dir, target, pipe_stdout):
219        cmd = [self._ninja_path]
220
221        # This code is taken from depot_tools/autoninja.py
222        if self._use_goma:
223            num_cores = multiprocessing.cpu_count()
224            cmd.append('-j')
225            core_multiplier = 40
226            j_value = num_cores * core_multiplier
227
228            if sys.platform.startswith('win'):
229                # On windows, j value higher than 1000 does not improve build performance.
230                j_value = min(j_value, 1000)
231            elif sys.platform == 'darwin':
232                # On Mac, j value higher than 500 causes 'Too many open files' error
233                # (crbug.com/936864).
234                j_value = min(j_value, 500)
235
236            cmd.append('%d' % j_value)
237        else:
238            cmd.append('-l')
239            cmd.append('%d' % os.cpu_count())
240
241        cmd += ['-C', build_dir, target]
242        with self._ninja_lock:
243            self._logger.info(' '.join(cmd))
244            return self.RunSubprocess(cmd, pipe_stdout=pipe_stdout)
245
246
247def GetTestsListForFilter(args, test_path, filter, logger):
248    cmd = GetRunCommand(args, test_path) + ["--list-tests", "--gtest_filter=%s" % filter]
249    logger.info('Getting test list from "%s"' % " ".join(cmd))
250    return subprocess.check_output(cmd, text=True)
251
252
253def ParseTestNamesFromTestList(output, test_expectation, also_run_skipped_for_capture_tests,
254                               logger):
255    output_lines = output.splitlines()
256    tests = []
257    seen_start_of_tests = False
258    disabled = 0
259    for line in output_lines:
260        l = line.strip()
261        if l == 'Tests list:':
262            seen_start_of_tests = True
263        elif l == 'End tests list.':
264            break
265        elif not seen_start_of_tests:
266            pass
267        elif not test_expectation.TestIsSkippedForCapture(l) or also_run_skipped_for_capture_tests:
268            tests.append(l)
269        else:
270            disabled += 1
271
272    logger.info('Found %s tests and %d disabled tests.' % (len(tests), disabled))
273    return tests
274
275
276def GetRunCommand(args, command):
277    if args.xvfb:
278        return ['vpython', 'testing/xvfb.py', command]
279    else:
280        return [command]
281
282
283class GroupedResult():
284    Passed = "Pass"
285    Failed = "Fail"
286    TimedOut = "Timeout"
287    Crashed = "Crashed"
288    CompileFailed = "CompileFailed"
289    Skipped = "Skipped"
290
291    ResultTypes = [Passed, Failed, TimedOut, Crashed, CompileFailed, Skipped]
292
293    def __init__(self, resultcode, message, output, tests):
294        self.resultcode = resultcode
295        self.message = message
296        self.output = output
297        self.tests = []
298        for test in tests:
299            self.tests.append(test)
300
301
302class TestBatchResult():
303
304    display_output_lines = 20
305
306    def __init__(self, grouped_results, verbose):
307        self.results = {}
308        for result_type in GroupedResult.ResultTypes:
309            self.results[result_type] = []
310
311        for grouped_result in grouped_results:
312            for test in grouped_result.tests:
313                self.results[grouped_result.resultcode].append(test.full_test_name)
314
315        self.repr_str = ""
316        self.GenerateRepresentationString(grouped_results, verbose)
317
318    def __str__(self):
319        return self.repr_str
320
321    def GenerateRepresentationString(self, grouped_results, verbose):
322        for grouped_result in grouped_results:
323            self.repr_str += grouped_result.resultcode + ": " + grouped_result.message + "\n"
324            for test in grouped_result.tests:
325                self.repr_str += "\t" + test.full_test_name + "\n"
326            if verbose:
327                self.repr_str += grouped_result.output
328            else:
329                if grouped_result.resultcode == GroupedResult.CompileFailed:
330                    self.repr_str += TestBatchResult.ExtractErrors(grouped_result.output)
331                elif grouped_result.resultcode != GroupedResult.Passed:
332                    self.repr_str += TestBatchResult.GetAbbreviatedOutput(grouped_result.output)
333
334    def ExtractErrors(output):
335        lines = output.splitlines()
336        error_lines = []
337        for i in range(len(lines)):
338            if ": error:" in lines[i]:
339                error_lines.append(lines[i] + "\n")
340                if i + 1 < len(lines):
341                    error_lines.append(lines[i + 1] + "\n")
342        return "".join(error_lines)
343
344    def GetAbbreviatedOutput(output):
345        # Get all lines after and including the last occurance of "Run".
346        lines = output.splitlines()
347        line_count = 0
348        for line_index in reversed(range(len(lines))):
349            line_count += 1
350            if "[ RUN      ]" in lines[line_index]:
351                break
352
353        return '\n' + '\n'.join(lines[-line_count:]) + '\n'
354
355
356class Test():
357
358    def __init__(self, test_name):
359        self.full_test_name = test_name
360        self.params = test_name.split('/')[1]
361        self.context_id = 0
362        self.test_index = -1  # index of test within a test batch
363        self._label = self.full_test_name.replace(".", "_").replace("/", "_")
364
365    def __str__(self):
366        return self.full_test_name + " Params: " + self.params
367
368    def GetLabel(self):
369        return self._label
370
371    def CanRunReplay(self, trace_folder_path):
372        test_files = []
373        label = self.GetLabel()
374        assert (self.context_id == 0)
375        for f in os.listdir(trace_folder_path):
376            if os.path.isfile(os.path.join(trace_folder_path, f)) and f.startswith(label):
377                test_files.append(f)
378        frame_files_count = 0
379        context_header_count = 0
380        context_source_count = 0
381        source_json_count = 0
382        context_id = 0
383        for f in test_files:
384            if "_001.cpp" in f:
385                frame_files_count += 1
386            elif f.endswith(".json"):
387                source_json_count += 1
388            elif f.endswith(".h"):
389                context_header_count += 1
390                if TRACE_FILE_SUFFIX in f:
391                    context = f.split(TRACE_FILE_SUFFIX)[1][:-2]
392                    context_id = int(context)
393            elif f.endswith(".cpp"):
394                context_source_count += 1
395        can_run_replay = frame_files_count >= 1 and context_header_count >= 1 \
396            and context_source_count >= 1 and source_json_count == 1
397        if not can_run_replay:
398            return False
399        self.context_id = context_id
400        return True
401
402
403def _FormatEnv(env):
404    return ' '.join(['%s=%s' % (k, v) for (k, v) in env.items()])
405
406
407class TestBatch():
408
409    CAPTURE_FRAME_END = 100
410
411    def __init__(self, args, logger):
412        self.args = args
413        self.tests = []
414        self.results = []
415        self.logger = logger
416
417    def SetWorkerId(self, worker_id):
418        self.trace_dir = "%s%d" % (TRACE_FOLDER, worker_id)
419        self.trace_folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, self.trace_dir)
420
421    def RunWithCapture(self, args, child_processes_manager):
422        test_exe_path = os.path.join(args.out_dir, 'Capture', args.test_suite)
423
424        extra_env = {
425            'ANGLE_CAPTURE_FRAME_END': '{}'.format(self.CAPTURE_FRAME_END),
426            'ANGLE_CAPTURE_SERIALIZE_STATE': '1',
427            'ANGLE_FEATURE_OVERRIDES_ENABLED': 'forceRobustResourceInit:forceInitShaderVariables',
428            'ANGLE_CAPTURE_ENABLED': '1',
429            'ANGLE_CAPTURE_OUT_DIR': self.trace_folder_path,
430        }
431
432        if args.expose_nonconformant_features:
433            extra_env[
434                'ANGLE_FEATURE_OVERRIDES_ENABLED'] += ':exposeNonConformantExtensionsAndVersions'
435
436        env = {**os.environ.copy(), **extra_env}
437
438        if not self.args.keep_temp_files:
439            ClearFolderContent(self.trace_folder_path)
440        filt = ':'.join([test.full_test_name for test in self.tests])
441
442        cmd = GetRunCommand(args, test_exe_path)
443        cmd += ['--gtest_filter=%s' % filt, '--angle-per-test-capture-label']
444        self.logger.info('%s %s' % (_FormatEnv(extra_env), ' '.join(cmd)))
445
446        returncode, output = child_processes_manager.RunSubprocess(
447            cmd, env, timeout=SUBPROCESS_TIMEOUT)
448        if args.show_capture_stdout:
449            self.logger.info("Capture stdout: %s" % output)
450        if returncode == -1:
451            self.results.append(GroupedResult(GroupedResult.Crashed, "", output, self.tests))
452            return False
453        elif returncode == -2:
454            self.results.append(GroupedResult(GroupedResult.TimedOut, "", "", self.tests))
455            return False
456        return True
457
458    def RemoveTestsThatDoNotProduceAppropriateTraceFiles(self):
459        continued_tests = []
460        skipped_tests = []
461        for test in self.tests:
462            if not test.CanRunReplay(self.trace_folder_path):
463                skipped_tests.append(test)
464            else:
465                continued_tests.append(test)
466        if len(skipped_tests) > 0:
467            self.results.append(
468                GroupedResult(
469                    GroupedResult.Skipped,
470                    "Skipping replay since capture didn't produce necessary trace files", "",
471                    skipped_tests))
472        return continued_tests
473
474    def BuildReplay(self, replay_build_dir, composite_file_id, tests, child_processes_manager):
475        # write gni file that holds all the traces files in a list
476        self.CreateTestNamesFile(composite_file_id, tests)
477
478        gn_args = [('angle_build_capture_replay_tests', 'true'),
479                   ('angle_capture_replay_test_trace_dir', '"%s"' % self.trace_dir),
480                   ('angle_capture_replay_composite_file_id', str(composite_file_id))]
481        returncode, output = child_processes_manager.RunGNGen(replay_build_dir, True, gn_args)
482        if returncode != 0:
483            self.logger.warning('GN failure output: %s' % output)
484            self.results.append(
485                GroupedResult(GroupedResult.CompileFailed, "Build replay failed at gn generation",
486                              output, tests))
487            return False
488        returncode, output = child_processes_manager.RunNinja(replay_build_dir, REPLAY_BINARY,
489                                                              True)
490        if returncode != 0:
491            self.logger.warning('Ninja failure output: %s' % output)
492            self.results.append(
493                GroupedResult(GroupedResult.CompileFailed, "Build replay failed at ninja", output,
494                              tests))
495            return False
496        return True
497
498    def RunReplay(self, replay_build_dir, replay_exe_path, child_processes_manager, tests,
499                  expose_nonconformant_features):
500        extra_env = {
501            'ANGLE_CAPTURE_ENABLED': '0',
502            'ANGLE_FEATURE_OVERRIDES_ENABLED': 'enable_capture_limits',
503        }
504
505        if expose_nonconformant_features:
506            extra_env[
507                'ANGLE_FEATURE_OVERRIDES_ENABLED'] += ':exposeNonConformantExtensionsAndVersions'
508
509        env = {**os.environ.copy(), **extra_env}
510
511        run_cmd = GetRunCommand(self.args, replay_exe_path)
512        self.logger.info('%s %s' % (_FormatEnv(extra_env), ' '.join(run_cmd)))
513
514        returncode, output = child_processes_manager.RunSubprocess(
515            run_cmd, env, timeout=SUBPROCESS_TIMEOUT)
516        if returncode == -1:
517            cmd = replay_exe_path
518            self.results.append(
519                GroupedResult(GroupedResult.Crashed, "Replay run crashed (%s)" % cmd, output,
520                              tests))
521            return
522        elif returncode == -2:
523            self.results.append(
524                GroupedResult(GroupedResult.TimedOut, "Replay run timed out", output, tests))
525            return
526
527        output_lines = output.splitlines()
528        passes = []
529        fails = []
530        count = 0
531        for output_line in output_lines:
532            words = output_line.split(" ")
533            if len(words) == 3 and words[0] == RESULT_TAG:
534                if int(words[2]) == 0:
535                    passes.append(self.FindTestByLabel(words[1]))
536                else:
537                    fails.append(self.FindTestByLabel(words[1]))
538                    self.logger.info("Context comparison failed: {}".format(
539                        self.FindTestByLabel(words[1])))
540                    self.PrintContextDiff(replay_build_dir, words[1])
541
542                count += 1
543        if len(passes) > 0:
544            self.results.append(GroupedResult(GroupedResult.Passed, "", output, passes))
545        if len(fails) > 0:
546            self.results.append(GroupedResult(GroupedResult.Failed, "", output, fails))
547
548    def PrintContextDiff(self, replay_build_dir, test_name):
549        frame = 1
550        while True:
551            capture_file = "{}/{}_ContextCaptured{}.json".format(replay_build_dir, test_name,
552                                                                 frame)
553            replay_file = "{}/{}_ContextReplayed{}.json".format(replay_build_dir, test_name, frame)
554            if os.path.exists(capture_file) and os.path.exists(replay_file):
555                captured_context = open(capture_file, "r").readlines()
556                replayed_context = open(replay_file, "r").readlines()
557                for line in difflib.unified_diff(
558                        captured_context, replayed_context, fromfile=capture_file,
559                        tofile=replay_file):
560                    print(line, end="")
561            else:
562                if frame > self.CAPTURE_FRAME_END:
563                    break
564            frame = frame + 1
565
566    def FindTestByLabel(self, label):
567        for test in self.tests:
568            if test.GetLabel() == label:
569                return test
570        return None
571
572    def AddTest(self, test):
573        assert len(self.tests) <= self.args.batch_count
574        test.index = len(self.tests)
575        self.tests.append(test)
576
577    def CreateTestNamesFile(self, composite_file_id, tests):
578        data = {'traces': [test.GetLabel() for test in tests]}
579        names_path = os.path.join(self.trace_folder_path, 'test_names_%d.json' % composite_file_id)
580        with open(names_path, 'w') as f:
581            f.write(json.dumps(data))
582
583    def __str__(self):
584        repr_str = "TestBatch:\n"
585        for test in self.tests:
586            repr_str += ("\t" + str(test) + "\n")
587        return repr_str
588
589    def __getitem__(self, index):
590        assert index < len(self.tests)
591        return self.tests[index]
592
593    def __iter__(self):
594        return iter(self.tests)
595
596    def GetResults(self):
597        return TestBatchResult(self.results, self.args.verbose)
598
599
600class TestExpectation():
601    # tests that must not be run as list
602    skipped_for_capture_tests = {}
603    skipped_for_capture_tests_re = {}
604
605    # test expectations for tests that do not pass
606    non_pass_results = {}
607
608    # tests that must run in a one-test batch
609    run_single = {}
610    run_single_re = {}
611
612    flaky_tests = []
613
614    non_pass_re = {}
615
616    # yapf: disable
617    # we want each pair on one line
618    result_map = { "FAIL" : GroupedResult.Failed,
619                   "TIMEOUT" : GroupedResult.TimedOut,
620                   "CRASH" : GroupedResult.Crashed,
621                   "COMPILE_FAIL" : GroupedResult.CompileFailed,
622                   "NOT_RUN" : GroupedResult.Skipped,
623                   "SKIP_FOR_CAPTURE" : GroupedResult.Skipped,
624                   "PASS" : GroupedResult.Passed}
625    # yapf: enable
626
627    def __init__(self, args):
628        expected_results_filename = "capture_replay_expectations.txt"
629        expected_results_path = os.path.join(REPLAY_SAMPLE_FOLDER, expected_results_filename)
630        self._asan = args.asan
631        with open(expected_results_path, "rt") as f:
632            for line in f:
633                l = line.strip()
634                if l != "" and not l.startswith("#"):
635                    self.ReadOneExpectation(l, args.debug)
636
637    def _CheckTagsWithConfig(self, tags, config_tags):
638        for tag in tags:
639            if tag not in config_tags:
640                return False
641        return True
642
643    def ReadOneExpectation(self, line, is_debug):
644        (testpattern, result) = line.split('=')
645        (test_info_string, test_name_string) = testpattern.split(':')
646        test_name = test_name_string.strip()
647        test_info = test_info_string.strip().split()
648        result_stripped = result.strip()
649
650        tags = []
651        if len(test_info) > 1:
652            tags = test_info[1:]
653
654        config_tags = [GetPlatformForSkip()]
655        if self._asan:
656            config_tags += ['ASAN']
657        if is_debug:
658            config_tags += ['DEBUG']
659
660        if self._CheckTagsWithConfig(tags, config_tags):
661            test_name_regex = re.compile('^' + test_name.replace('*', '.*') + '$')
662            if result_stripped == 'CRASH' or result_stripped == 'COMPILE_FAIL':
663                self.run_single[test_name] = self.result_map[result_stripped]
664                self.run_single_re[test_name] = test_name_regex
665            if result_stripped == 'SKIP_FOR_CAPTURE' or result_stripped == 'TIMEOUT':
666                self.skipped_for_capture_tests[test_name] = self.result_map[result_stripped]
667                self.skipped_for_capture_tests_re[test_name] = test_name_regex
668            elif result_stripped == 'FLAKY':
669                self.flaky_tests.append(test_name_regex)
670            else:
671                self.non_pass_results[test_name] = self.result_map[result_stripped]
672                self.non_pass_re[test_name] = test_name_regex
673
674    def TestIsSkippedForCapture(self, test_name):
675        for p in self.skipped_for_capture_tests_re.values():
676            m = p.match(test_name)
677            if m is not None:
678                return True
679        return False
680
681    def TestNeedsToRunSingle(self, test_name):
682        for p in self.run_single_re.values():
683            m = p.match(test_name)
684            if m is not None:
685                return True
686            for p in self.skipped_for_capture_tests_re.values():
687                m = p.match(test_name)
688                if m is not None:
689                    return True
690        return False
691
692    def Filter(self, test_list, run_all_tests):
693        result = {}
694        for t in test_list:
695            for key in self.non_pass_results.keys():
696                if self.non_pass_re[key].match(t) is not None:
697                    result[t] = self.non_pass_results[key]
698            for key in self.run_single.keys():
699                if self.run_single_re[key].match(t) is not None:
700                    result[t] = self.run_single[key]
701            if run_all_tests:
702                for [key, r] in self.skipped_for_capture_tests.items():
703                    if self.skipped_for_capture_tests_re[key].match(t) is not None:
704                        result[t] = r
705        return result
706
707    def IsFlaky(self, test_name):
708        for flaky in self.flaky_tests:
709            if flaky.match(test_name) is not None:
710                return True
711        return False
712
713
714def ClearFolderContent(path):
715    all_files = []
716    for f in os.listdir(path):
717        if os.path.isfile(os.path.join(path, f)):
718            os.remove(os.path.join(path, f))
719
720def SetCWDToAngleFolder():
721    cwd = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
722    os.chdir(cwd)
723    return cwd
724
725
726def RunTests(args, worker_id, job_queue, result_list, message_queue, logger, ninja_lock):
727    replay_build_dir = os.path.join(args.out_dir, 'Replay%d' % worker_id)
728    replay_exec_path = os.path.join(replay_build_dir, REPLAY_BINARY)
729
730    child_processes_manager = ChildProcessesManager(args, logger, ninja_lock)
731    # used to differentiate between multiple composite files when there are multiple test batchs
732    # running on the same worker and --deleted_trace is set to False
733    composite_file_id = 1
734    while not job_queue.empty():
735        try:
736            test_batch = job_queue.get()
737            logger.info('Starting {} tests on worker {}. Unstarted jobs: {}'.format(
738                len(test_batch.tests), worker_id, job_queue.qsize()))
739
740            test_batch.SetWorkerId(worker_id)
741
742            success = test_batch.RunWithCapture(args, child_processes_manager)
743            if not success:
744                result_list.append(test_batch.GetResults())
745                logger.info(str(test_batch.GetResults()))
746                continue
747            continued_tests = test_batch.RemoveTestsThatDoNotProduceAppropriateTraceFiles()
748            if len(continued_tests) == 0:
749                result_list.append(test_batch.GetResults())
750                logger.info(str(test_batch.GetResults()))
751                continue
752            success = test_batch.BuildReplay(replay_build_dir, composite_file_id, continued_tests,
753                                             child_processes_manager)
754            if args.keep_temp_files:
755                composite_file_id += 1
756            if not success:
757                result_list.append(test_batch.GetResults())
758                logger.info(str(test_batch.GetResults()))
759                continue
760            test_batch.RunReplay(replay_build_dir, replay_exec_path, child_processes_manager,
761                                 continued_tests, args.expose_nonconformant_features)
762            result_list.append(test_batch.GetResults())
763            logger.info(str(test_batch.GetResults()))
764        except KeyboardInterrupt:
765            child_processes_manager.KillAll()
766            raise
767        except queue.Empty:
768            child_processes_manager.KillAll()
769            break
770        except Exception as e:
771            logger.error('RunTestsException: %s\n%s' % (repr(e), traceback.format_exc()))
772            child_processes_manager.KillAll()
773            pass
774    message_queue.put(child_processes_manager.runtimes)
775    child_processes_manager.KillAll()
776
777
778def SafeDeleteFolder(folder_name):
779    while os.path.isdir(folder_name):
780        try:
781            shutil.rmtree(folder_name)
782        except KeyboardInterrupt:
783            raise
784        except PermissionError:
785            pass
786
787
788def DeleteReplayBuildFolders(folder_num, replay_build_dir, trace_folder):
789    for i in range(folder_num):
790        folder_name = replay_build_dir + str(i)
791        if os.path.isdir(folder_name):
792            SafeDeleteFolder(folder_name)
793
794
795def CreateTraceFolders(folder_num):
796    for i in range(folder_num):
797        folder_name = TRACE_FOLDER + str(i)
798        folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, folder_name)
799        if os.path.isdir(folder_path):
800            shutil.rmtree(folder_path)
801        os.makedirs(folder_path)
802
803
804def DeleteTraceFolders(folder_num):
805    for i in range(folder_num):
806        folder_name = TRACE_FOLDER + str(i)
807        folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, folder_name)
808        if os.path.isdir(folder_path):
809            SafeDeleteFolder(folder_path)
810
811
812def GetPlatformForSkip():
813    # yapf: disable
814    # we want each pair on one line
815    platform_map = { 'win32' : 'WIN',
816                     'linux' : 'LINUX' }
817    # yapf: enable
818    return platform_map.get(sys.platform, 'UNKNOWN')
819
820
821def main(args):
822    logger = multiprocessing.log_to_stderr()
823    logger.setLevel(level=args.log.upper())
824
825    ninja_lock = multiprocessing.Semaphore(args.max_ninja_jobs)
826    child_processes_manager = ChildProcessesManager(args, logger, ninja_lock)
827    try:
828        start_time = time.time()
829        # set the number of workers to be cpu_count - 1 (since the main process already takes up a
830        # CPU core). Whenever a worker is available, it grabs the next job from the job queue and
831        # runs it. The worker closes down when there is no more job.
832        worker_count = min(multiprocessing.cpu_count() - 1, args.max_jobs)
833        cwd = SetCWDToAngleFolder()
834
835        CreateTraceFolders(worker_count)
836        capture_build_dir = os.path.normpath(r'%s/Capture' % args.out_dir)
837        returncode, output = child_processes_manager.RunGNGen(capture_build_dir, False)
838        if returncode != 0:
839            logger.error(output)
840            child_processes_manager.KillAll()
841            return EXIT_FAILURE
842        # run ninja to build all tests
843        returncode, output = child_processes_manager.RunNinja(capture_build_dir, args.test_suite,
844                                                              False)
845        if returncode != 0:
846            logger.error(output)
847            child_processes_manager.KillAll()
848            return EXIT_FAILURE
849        # get a list of tests
850        test_path = os.path.join(capture_build_dir, args.test_suite)
851        test_list = GetTestsListForFilter(args, test_path, args.filter, logger)
852        test_expectation = TestExpectation(args)
853        test_names = ParseTestNamesFromTestList(test_list, test_expectation,
854                                                args.also_run_skipped_for_capture_tests, logger)
855        test_expectation_for_list = test_expectation.Filter(
856            test_names, args.also_run_skipped_for_capture_tests)
857        # objects created by manager can be shared by multiple processes. We use it to create
858        # collections that are shared by multiple processes such as job queue or result list.
859        manager = multiprocessing.Manager()
860        job_queue = manager.Queue()
861        test_batch_num = 0
862
863        num_tests = len(test_names)
864        test_index = 0
865
866        # Put the tests into batches and these into the job queue; jobs that areexpected to crash,
867        # timeout, or fail compilation will be run in batches of size one, because a crash or
868        # failing to compile brings down the whole batch, so that we would give false negatives if
869        # such a batch contains jobs that would otherwise poss or fail differently.
870        while test_index < num_tests:
871            batch = TestBatch(args, logger)
872
873            while test_index < num_tests and len(batch.tests) < args.batch_count:
874                test_name = test_names[test_index]
875                test_obj = Test(test_name)
876
877                if test_expectation.TestNeedsToRunSingle(test_name):
878                    single_batch = TestBatch(args, logger)
879                    single_batch.AddTest(test_obj)
880                    job_queue.put(single_batch)
881                    test_batch_num += 1
882                else:
883                    batch.AddTest(test_obj)
884
885                test_index += 1
886
887            if len(batch.tests) > 0:
888                job_queue.put(batch)
889                test_batch_num += 1
890
891        passed_count = 0
892        failed_count = 0
893        timedout_count = 0
894        crashed_count = 0
895        compile_failed_count = 0
896        skipped_count = 0
897
898        unexpected_count = {}
899        unexpected_test_results = {}
900
901        for type in GroupedResult.ResultTypes:
902            unexpected_count[type] = 0
903            unexpected_test_results[type] = []
904
905        # result list is created by manager and can be shared by multiple processes. Each
906        # subprocess populates the result list with the results of its test runs. After all
907        # subprocesses finish, the main process processes the results in the result list.
908        # An item in the result list is a tuple with 3 values (testname, result, output).
909        # The "result" can take 3 values "Passed", "Failed", "Skipped". The output is the
910        # stdout and the stderr of the test appended together.
911        result_list = manager.list()
912        message_queue = manager.Queue()
913        # so that we do not spawn more processes than we actually need
914        worker_count = min(worker_count, test_batch_num)
915        # spawning and starting up workers
916        for worker_id in range(worker_count):
917            proc = multiprocessing.Process(
918                target=RunTests,
919                args=(args, worker_id, job_queue, result_list, message_queue, logger, ninja_lock))
920            child_processes_manager.AddWorker(proc)
921            proc.start()
922
923        # print out periodic status messages
924        while child_processes_manager.IsAnyWorkerAlive():
925            logger.info('%d workers running, %d jobs left.' %
926                        (child_processes_manager.GetRemainingWorkers(), (job_queue.qsize())))
927            # If only a few tests are run it is likely that the workers are finished before
928            # the STATUS_MESSAGE_PERIOD has passed, and the tests script sits idle for the
929            # reminder of the wait time. Therefore, limit waiting by the number of
930            # unfinished jobs.
931            unfinished_jobs = job_queue.qsize() + child_processes_manager.GetRemainingWorkers()
932            time.sleep(min(STATUS_MESSAGE_PERIOD, unfinished_jobs))
933
934        child_processes_manager.JoinWorkers()
935        end_time = time.time()
936
937        summed_runtimes = child_processes_manager.runtimes
938        while not message_queue.empty():
939            runtimes = message_queue.get()
940            for k, v in runtimes.items():
941                summed_runtimes.setdefault(k, 0.0)
942                summed_runtimes[k] += v
943
944        # print out results
945        logger.info('')
946        logger.info('Results:')
947
948        flaky_results = []
949
950        regression_error_log = []
951
952        for test_batch in result_list:
953            test_batch_result = test_batch.results
954            logger.debug(str(test_batch_result))
955
956            batch_has_regression = False
957
958            passed_count += len(test_batch_result[GroupedResult.Passed])
959            failed_count += len(test_batch_result[GroupedResult.Failed])
960            timedout_count += len(test_batch_result[GroupedResult.TimedOut])
961            crashed_count += len(test_batch_result[GroupedResult.Crashed])
962            compile_failed_count += len(test_batch_result[GroupedResult.CompileFailed])
963            skipped_count += len(test_batch_result[GroupedResult.Skipped])
964
965            for real_result, test_list in test_batch_result.items():
966                for test in test_list:
967                    if test_expectation.IsFlaky(test):
968                        flaky_results.append('{} ({})'.format(test, real_result))
969                        continue
970
971                    # Passing tests are not in the list
972                    if test not in test_expectation_for_list.keys():
973                        if real_result != GroupedResult.Passed:
974                            batch_has_regression = True
975                            unexpected_count[real_result] += 1
976                            unexpected_test_results[real_result].append(
977                                '{} {} (expected Pass or is new test)'.format(test, real_result))
978                    else:
979                        expected_result = test_expectation_for_list[test]
980                        if real_result != expected_result:
981                            if real_result != GroupedResult.Passed:
982                                batch_has_regression = True
983                            unexpected_count[real_result] += 1
984                            unexpected_test_results[real_result].append(
985                                '{} {} (expected {})'.format(test, real_result, expected_result))
986            if batch_has_regression:
987                regression_error_log.append(str(test_batch))
988
989        if len(regression_error_log) > 0:
990            logger.info('Logging output of test batches with regressions')
991            logger.info(
992                '==================================================================================================='
993            )
994            for log in regression_error_log:
995                logger.info(log)
996                logger.info(
997                    '---------------------------------------------------------------------------------------------------'
998                )
999                logger.info('')
1000
1001        logger.info('')
1002        logger.info('Elapsed time: %.2lf seconds' % (end_time - start_time))
1003        logger.info('')
1004        logger.info('Runtimes by process:\n%s' %
1005                    '\n'.join('%s: %.2lf seconds' % (k, v) for (k, v) in summed_runtimes.items()))
1006
1007        if len(flaky_results):
1008            logger.info("Flaky test(s):")
1009            for line in flaky_results:
1010                logger.info("    {}".format(line))
1011            logger.info("")
1012
1013        logger.info(
1014            'Summary: Passed: %d, Comparison Failed: %d, Crashed: %d, CompileFailed %d, Skipped: %d, Timeout: %d'
1015            % (passed_count, failed_count, crashed_count, compile_failed_count, skipped_count,
1016               timedout_count))
1017
1018        retval = EXIT_SUCCESS
1019
1020        unexpected_test_results_count = 0
1021        for count in unexpected_count.values():
1022            unexpected_test_results_count += count
1023
1024        if unexpected_test_results_count > 0:
1025            retval = EXIT_FAILURE
1026            logger.info('')
1027            logger.info('Failure: Obtained {} results that differ from expectation:'.format(
1028                unexpected_test_results_count))
1029            logger.info('')
1030            for result, count in unexpected_count.items():
1031                if count > 0:
1032                    logger.info("Unexpected '{}' ({}):".format(result, count))
1033                    for test_result in unexpected_test_results[result]:
1034                        logger.info('     {}'.format(test_result))
1035                    logger.info('')
1036
1037        logger.info('')
1038
1039        # delete generated folders if --keep-temp-files flag is set to false
1040        if args.purge:
1041            DeleteTraceFolders(worker_count)
1042            if os.path.isdir(args.out_dir):
1043                SafeDeleteFolder(args.out_dir)
1044
1045        # Try hard to ensure output is finished before ending the test.
1046        logging.shutdown()
1047        sys.stdout.flush()
1048        time.sleep(2.0)
1049        return retval
1050
1051    except KeyboardInterrupt:
1052        child_processes_manager.KillAll()
1053        return EXIT_FAILURE
1054
1055
1056if __name__ == '__main__':
1057    parser = argparse.ArgumentParser()
1058    parser.add_argument(
1059        '--out-dir',
1060        default=DEFAULT_OUT_DIR,
1061        help='Where to build ANGLE for capture and replay. Relative to the ANGLE folder. Default is "%s".'
1062        % DEFAULT_OUT_DIR)
1063    parser.add_argument(
1064        '-f',
1065        '--filter',
1066        '--gtest_filter',
1067        default=DEFAULT_FILTER,
1068        help='Same as GoogleTest\'s filter argument. Default is "%s".' % DEFAULT_FILTER)
1069    parser.add_argument(
1070        '--test-suite',
1071        default=DEFAULT_TEST_SUITE,
1072        help='Test suite binary to execute. Default is "%s".' % DEFAULT_TEST_SUITE)
1073    parser.add_argument(
1074        '--batch-count',
1075        default=DEFAULT_BATCH_COUNT,
1076        type=int,
1077        help='Number of tests in a batch. Default is %d.' % DEFAULT_BATCH_COUNT)
1078    parser.add_argument(
1079        '--keep-temp-files',
1080        action='store_true',
1081        help='Whether to keep the temp files and folders. Off by default')
1082    parser.add_argument('--purge', help='Purge all build directories on exit.')
1083    parser.add_argument(
1084        '--goma-dir',
1085        default='',
1086        help='Set custom goma directory. Uses the goma in path by default.')
1087    parser.add_argument(
1088        '--output-to-file',
1089        action='store_true',
1090        help='Whether to write output to a result file. Off by default')
1091    parser.add_argument(
1092        '--result-file',
1093        default=DEFAULT_RESULT_FILE,
1094        help='Name of the result file in the capture_replay_tests folder. Default is "%s".' %
1095        DEFAULT_RESULT_FILE)
1096    parser.add_argument('-v', '--verbose', action='store_true', help='Shows full test output.')
1097    parser.add_argument(
1098        '-l',
1099        '--log',
1100        default=DEFAULT_LOG_LEVEL,
1101        help='Controls the logging level. Default is "%s".' % DEFAULT_LOG_LEVEL)
1102    parser.add_argument(
1103        '-j',
1104        '--max-jobs',
1105        default=DEFAULT_MAX_JOBS,
1106        type=int,
1107        help='Maximum number of test processes. Default is %d.' % DEFAULT_MAX_JOBS)
1108    parser.add_argument(
1109        '-a',
1110        '--also-run-skipped-for-capture-tests',
1111        action='store_true',
1112        help='Also run tests that are disabled in the expectations by SKIP_FOR_CAPTURE')
1113    parser.add_argument(
1114        '--max-ninja-jobs',
1115        type=int,
1116        default=DEFAULT_MAX_NINJA_JOBS,
1117        help='Maximum number of concurrent ninja jobs to run at once.')
1118    parser.add_argument('--xvfb', action='store_true', help='Run with xvfb.')
1119    parser.add_argument('--asan', action='store_true', help='Build with ASAN.')
1120    parser.add_argument(
1121        '-E',
1122        '--expose-nonconformant-features',
1123        action='store_true',
1124        help='Expose non-conformant features to advertise GLES 3.2')
1125    parser.add_argument(
1126        '--show-capture-stdout', action='store_true', help='Print test stdout during capture.')
1127    parser.add_argument('--debug', action='store_true', help='Debug builds (default is Release).')
1128    args = parser.parse_args()
1129    if args.debug and (args.out_dir == DEFAULT_OUT_DIR):
1130        args.out_dir = args.out_dir + "Debug"
1131
1132    if sys.platform == "win32":
1133        args.test_suite += ".exe"
1134    if args.output_to_file:
1135        logging.basicConfig(level=args.log.upper(), filename=args.result_file)
1136    else:
1137        logging.basicConfig(level=args.log.upper())
1138
1139    sys.exit(main(args))
1140