• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env vpython3
2#
3# Copyright 2020 The ANGLE Project Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7"""
8Script testing capture_replay with angle_end2end_tests
9"""
10
11# Automation script will:
12# 1. Build all tests in angle_end2end with frame capture enabled
13# 2. Run each test with frame capture
14# 3. Build CaptureReplayTest with cpp trace files
15# 4. Run CaptureReplayTest
16# 5. Output the number of test successes and failures. A test succeeds if no error occurs during
17# its capture and replay, and the GL states at the end of two runs match. Any unexpected failure
18# will return non-zero exit code
19
20# Run this script with Python to test capture replay on angle_end2end tests
21# python path/to/capture_replay_tests.py
22# Command line arguments: run with --help for a full list.
23
24import argparse
25import difflib
26import distutils.util
27import fnmatch
28import json
29import logging
30import math
31import multiprocessing
32import os
33import psutil
34import queue
35import re
36import shutil
37import subprocess
38import sys
39import tempfile
40import time
41import traceback
42
43PIPE_STDOUT = True
44DEFAULT_OUT_DIR = "out/CaptureReplayTest"  # relative to angle folder
45DEFAULT_FILTER = "*/ES2_Vulkan_SwiftShader"
46DEFAULT_TEST_SUITE = "angle_end2end_tests"
47REPLAY_SAMPLE_FOLDER = "src/tests/capture_replay_tests"  # relative to angle folder
48DEFAULT_BATCH_COUNT = 8  # number of tests batched together
49TRACE_FILE_SUFFIX = "_context"  # because we only deal with 1 context right now
50RESULT_TAG = "*RESULT"
51STATUS_MESSAGE_PERIOD = 20  # in seconds
52SUBPROCESS_TIMEOUT = 600  # in seconds
53DEFAULT_RESULT_FILE = "results.txt"
54DEFAULT_LOG_LEVEL = "info"
55DEFAULT_MAX_JOBS = 8
56DEFAULT_MAX_NINJA_JOBS = 3
57REPLAY_BINARY = "capture_replay_tests"
58if sys.platform == "win32":
59    REPLAY_BINARY += ".exe"
60TRACE_FOLDER = "traces"
61
62EXIT_SUCCESS = 0
63EXIT_FAILURE = 1
64REPLAY_INITIALIZATION_FAILURE = -1
65REPLAY_SERIALIZATION_FAILURE = -2
66
67switch_case_without_return_template = """\
68        case {case}:
69            {namespace}::{call}({params});
70            break;
71"""
72
73switch_case_with_return_template = """\
74        case {case}:
75            return {namespace}::{call}({params});
76"""
77
78default_case_without_return_template = """\
79        default:
80            break;"""
81default_case_with_return_template = """\
82        default:
83            return {default_val};"""
84
85
86def winext(name, ext):
87    return ("%s.%s" % (name, ext)) if sys.platform == "win32" else name
88
89
90def AutodetectGoma():
91    for p in psutil.process_iter():
92        try:
93            if winext('compiler_proxy', 'exe') == p.name():
94                return True
95        except:
96            pass
97    return False
98
99
100class SubProcess():
101
102    def __init__(self, command, logger, env=os.environ, pipe_stdout=PIPE_STDOUT):
103        # shell=False so that only 1 subprocess is spawned.
104        # if shell=True, a shell process is spawned, which in turn spawns the process running
105        # the command. Since we do not have a handle to the 2nd process, we cannot terminate it.
106        if pipe_stdout:
107            self.proc_handle = subprocess.Popen(
108                command, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
109        else:
110            self.proc_handle = subprocess.Popen(command, env=env, shell=False)
111        self._logger = logger
112
113    def Join(self, timeout):
114        self._logger.debug('Joining with subprocess %d, timeout %s' % (self.Pid(), str(timeout)))
115        output = self.proc_handle.communicate(timeout=timeout)[0]
116        if output:
117            output = output.decode('utf-8')
118        else:
119            output = ''
120        return self.proc_handle.returncode, output
121
122    def Pid(self):
123        return self.proc_handle.pid
124
125    def Kill(self):
126        self.proc_handle.terminate()
127        self.proc_handle.wait()
128
129
130# class that manages all child processes of a process. Any process thats spawns subprocesses
131# should have this. This object is created inside the main process, and each worker process.
132class ChildProcessesManager():
133
134    @classmethod
135    def _GetGnAbsolutePaths(self):
136        return os.path.join('third_party', 'depot_tools', winext('gn', 'bat'))
137
138    @classmethod
139    def _GetNinjaAbsolutePaths(self):
140        return os.path.join('third_party', 'ninja', 'ninja')
141
142    def __init__(self, args, logger, ninja_lock):
143        # a dictionary of Subprocess, with pid as key
144        self.subprocesses = {}
145        # list of Python multiprocess.Process handles
146        self.workers = []
147
148        self._gn_path = self._GetGnAbsolutePaths()
149        self._ninja_path = self._GetNinjaAbsolutePaths()
150        self._use_goma = AutodetectGoma()
151        self._logger = logger
152        self._ninja_lock = ninja_lock
153        self.runtimes = {}
154        self._args = args
155
156    def RunSubprocess(self, command, env=None, pipe_stdout=True, timeout=None):
157        proc = SubProcess(command, self._logger, env, pipe_stdout)
158        self._logger.debug('Created subprocess: %s with pid %d' % (' '.join(command), proc.Pid()))
159        self.subprocesses[proc.Pid()] = proc
160        start_time = time.time()
161        try:
162            returncode, output = self.subprocesses[proc.Pid()].Join(timeout)
163            elapsed_time = time.time() - start_time
164            cmd_name = os.path.basename(command[0])
165            self.runtimes.setdefault(cmd_name, 0.0)
166            self.runtimes[cmd_name] += elapsed_time
167            self.RemoveSubprocess(proc.Pid())
168            if returncode != 0:
169                return -1, output
170            return returncode, output
171        except KeyboardInterrupt:
172            raise
173        except subprocess.TimeoutExpired as e:
174            self.RemoveSubprocess(proc.Pid())
175            return -2, str(e)
176        except Exception as e:
177            self.RemoveSubprocess(proc.Pid())
178            return -1, str(e)
179
180    def RemoveSubprocess(self, subprocess_id):
181        assert subprocess_id in self.subprocesses
182        self.subprocesses[subprocess_id].Kill()
183        del self.subprocesses[subprocess_id]
184
185    def AddWorker(self, worker):
186        self.workers.append(worker)
187
188    def KillAll(self):
189        for subprocess_id in self.subprocesses:
190            self.subprocesses[subprocess_id].Kill()
191        for worker in self.workers:
192            worker.terminate()
193            worker.join()
194            worker.close()  # to release file descriptors immediately
195        self.subprocesses = {}
196        self.workers = []
197
198    def JoinWorkers(self):
199        for worker in self.workers:
200            worker.join()
201            worker.close()
202        self.workers = []
203
204    def IsAnyWorkerAlive(self):
205        return any([worker.is_alive() for worker in self.workers])
206
207    def GetRemainingWorkers(self):
208        count = 0
209        for worker in self.workers:
210            if worker.is_alive():
211                count += 1
212        return count
213
214    def RunGNGen(self, build_dir, pipe_stdout, extra_gn_args=[]):
215        gn_args = [('angle_with_capture_by_default', 'true')] + extra_gn_args
216        if self._use_goma:
217            gn_args.append(('use_goma', 'true'))
218            if self._args.goma_dir:
219                gn_args.append(('goma_dir', '"%s"' % self._args.goma_dir))
220        if not self._args.debug:
221            gn_args.append(('is_debug', 'false'))
222            gn_args.append(('symbol_level', '1'))
223            gn_args.append(('angle_assert_always_on', 'true'))
224        if self._args.asan:
225            gn_args.append(('is_asan', 'true'))
226        args_str = ' '.join(['%s=%s' % (k, v) for (k, v) in gn_args])
227        cmd = [self._gn_path, 'gen', '--args=%s' % args_str, build_dir]
228        self._logger.info(' '.join(cmd))
229        return self.RunSubprocess(cmd, pipe_stdout=pipe_stdout)
230
231    def RunNinja(self, build_dir, target, pipe_stdout):
232        cmd = [self._ninja_path]
233
234        # This code is taken from depot_tools/autoninja.py
235        if self._use_goma:
236            num_cores = multiprocessing.cpu_count()
237            cmd.append('-j')
238            core_multiplier = 40
239            j_value = num_cores * core_multiplier
240
241            if sys.platform.startswith('win'):
242                # On windows, j value higher than 1000 does not improve build performance.
243                j_value = min(j_value, 1000)
244            elif sys.platform == 'darwin':
245                # On Mac, j value higher than 500 causes 'Too many open files' error
246                # (crbug.com/936864).
247                j_value = min(j_value, 500)
248
249            cmd.append('%d' % j_value)
250        else:
251            cmd.append('-l')
252            cmd.append('%d' % os.cpu_count())
253
254        cmd += ['-C', build_dir, target]
255        with self._ninja_lock:
256            self._logger.info(' '.join(cmd))
257            return self.RunSubprocess(cmd, pipe_stdout=pipe_stdout)
258
259
260def GetTestsListForFilter(args, test_path, filter, logger):
261    cmd = GetRunCommand(args, test_path) + ["--list-tests", "--gtest_filter=%s" % filter]
262    logger.info('Getting test list from "%s"' % " ".join(cmd))
263    return subprocess.check_output(cmd, text=True)
264
265
266def ParseTestNamesFromTestList(output, test_expectation, also_run_skipped_for_capture_tests,
267                               logger):
268    output_lines = output.splitlines()
269    tests = []
270    seen_start_of_tests = False
271    disabled = 0
272    for line in output_lines:
273        l = line.strip()
274        if l == 'Tests list:':
275            seen_start_of_tests = True
276        elif l == 'End tests list.':
277            break
278        elif not seen_start_of_tests:
279            pass
280        elif not test_expectation.TestIsSkippedForCapture(l) or also_run_skipped_for_capture_tests:
281            tests.append(l)
282        else:
283            disabled += 1
284
285    logger.info('Found %s tests and %d disabled tests.' % (len(tests), disabled))
286    return tests
287
288
289def GetRunCommand(args, command):
290    if args.xvfb:
291        return ['vpython', 'testing/xvfb.py', command]
292    else:
293        return [command]
294
295
296class GroupedResult():
297    Passed = "Pass"
298    Failed = "Fail"
299    TimedOut = "Timeout"
300    Crashed = "Crashed"
301    CompileFailed = "CompileFailed"
302    Skipped = "Skipped"
303    FailedToTrace = "FailedToTrace"
304
305    ResultTypes = [Passed, Failed, TimedOut, Crashed, CompileFailed, Skipped, FailedToTrace]
306
307    def __init__(self, resultcode, message, output, tests):
308        self.resultcode = resultcode
309        self.message = message
310        self.output = output
311        self.tests = []
312        for test in tests:
313            self.tests.append(test)
314
315
316class TestBatchResult():
317
318    display_output_lines = 20
319
320    def __init__(self, grouped_results, verbose):
321        self.results = {}
322        for result_type in GroupedResult.ResultTypes:
323            self.results[result_type] = []
324
325        for grouped_result in grouped_results:
326            for test in grouped_result.tests:
327                self.results[grouped_result.resultcode].append(test.full_test_name)
328
329        self.repr_str = ""
330        self.GenerateRepresentationString(grouped_results, verbose)
331
332    def __str__(self):
333        return self.repr_str
334
335    def GenerateRepresentationString(self, grouped_results, verbose):
336        for grouped_result in grouped_results:
337            self.repr_str += grouped_result.resultcode + ": " + grouped_result.message + "\n"
338            for test in grouped_result.tests:
339                self.repr_str += "\t" + test.full_test_name + "\n"
340            if verbose:
341                self.repr_str += grouped_result.output
342            else:
343                if grouped_result.resultcode == GroupedResult.CompileFailed:
344                    self.repr_str += TestBatchResult.ExtractErrors(grouped_result.output)
345                elif grouped_result.resultcode != GroupedResult.Passed:
346                    self.repr_str += TestBatchResult.GetAbbreviatedOutput(grouped_result.output)
347
348    def ExtractErrors(output):
349        lines = output.splitlines()
350        error_lines = []
351        for i in range(len(lines)):
352            if ": error:" in lines[i]:
353                error_lines.append(lines[i] + "\n")
354                if i + 1 < len(lines):
355                    error_lines.append(lines[i + 1] + "\n")
356        return "".join(error_lines)
357
358    def GetAbbreviatedOutput(output):
359        # Get all lines after and including the last occurance of "Run".
360        lines = output.splitlines()
361        line_count = 0
362        for line_index in reversed(range(len(lines))):
363            line_count += 1
364            if "[ RUN      ]" in lines[line_index]:
365                break
366
367        return '\n' + '\n'.join(lines[-line_count:]) + '\n'
368
369
370class Test():
371
372    def __init__(self, test_name):
373        self.full_test_name = test_name
374        self.params = test_name.split('/')[1]
375        self.context_id = 0
376        self.test_index = -1  # index of test within a test batch
377        self._label = self.full_test_name.replace(".", "_").replace("/", "_")
378        self.skipped_by_suite = False
379
380    def __str__(self):
381        return self.full_test_name + " Params: " + self.params
382
383    def GetLabel(self):
384        return self._label
385
386    def CanRunReplay(self, trace_folder_path):
387        test_files = []
388        label = self.GetLabel()
389        assert (self.context_id == 0)
390        for f in os.listdir(trace_folder_path):
391            if os.path.isfile(os.path.join(trace_folder_path, f)) and f.startswith(label):
392                test_files.append(f)
393        frame_files_count = 0
394        context_header_count = 0
395        context_source_count = 0
396        source_json_count = 0
397        context_id = 0
398        for f in test_files:
399            # TODO: Consolidate. http://anglebug.com/7753
400            if "_001.cpp" in f or "_001.c" in f:
401                frame_files_count += 1
402            elif f.endswith(".json"):
403                source_json_count += 1
404            elif f.endswith(".h"):
405                context_header_count += 1
406                if TRACE_FILE_SUFFIX in f:
407                    context = f.split(TRACE_FILE_SUFFIX)[1][:-2]
408                    context_id = int(context)
409            # TODO: Consolidate. http://anglebug.com/7753
410            elif f.endswith(".cpp") or f.endswith(".c"):
411                context_source_count += 1
412        can_run_replay = frame_files_count >= 1 and context_header_count >= 1 \
413            and context_source_count >= 1 and source_json_count == 1
414        if not can_run_replay:
415            return False
416        self.context_id = context_id
417        return True
418
419
420def _FormatEnv(env):
421    return ' '.join(['%s=%s' % (k, v) for (k, v) in env.items()])
422
423
424class TestBatch():
425
426    CAPTURE_FRAME_END = 100
427
428    def __init__(self, args, logger):
429        self.args = args
430        self.tests = []
431        self.results = []
432        self.logger = logger
433
434    def SetWorkerId(self, worker_id):
435        self.trace_dir = "%s%d" % (TRACE_FOLDER, worker_id)
436        self.trace_folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, self.trace_dir)
437
438    def RunWithCapture(self, args, child_processes_manager):
439        test_exe_path = os.path.join(args.out_dir, 'Capture', args.test_suite)
440
441        extra_env = {
442            'ANGLE_CAPTURE_SERIALIZE_STATE': '1',
443            'ANGLE_FEATURE_OVERRIDES_ENABLED': 'forceRobustResourceInit:forceInitShaderVariables',
444            'ANGLE_CAPTURE_ENABLED': '1',
445            'ANGLE_CAPTURE_OUT_DIR': self.trace_folder_path,
446        }
447
448        if args.mec > 0:
449            extra_env['ANGLE_CAPTURE_FRAME_START'] = '{}'.format(args.mec)
450            extra_env['ANGLE_CAPTURE_FRAME_END'] = '{}'.format(args.mec + 1)
451        else:
452            extra_env['ANGLE_CAPTURE_FRAME_END'] = '{}'.format(self.CAPTURE_FRAME_END)
453
454        if args.expose_nonconformant_features:
455            extra_env[
456                'ANGLE_FEATURE_OVERRIDES_ENABLED'] += ':exposeNonConformantExtensionsAndVersions'
457
458        env = {**os.environ.copy(), **extra_env}
459
460        if not self.args.keep_temp_files:
461            ClearFolderContent(self.trace_folder_path)
462        filt = ':'.join([test.full_test_name for test in self.tests])
463
464        cmd = GetRunCommand(args, test_exe_path)
465        results_file = tempfile.mktemp()
466        cmd += [
467            '--gtest_filter=%s' % filt,
468            '--angle-per-test-capture-label',
469            '--results-file=' + results_file,
470        ]
471        self.logger.info('%s %s' % (_FormatEnv(extra_env), ' '.join(cmd)))
472
473        returncode, output = child_processes_manager.RunSubprocess(
474            cmd, env, timeout=SUBPROCESS_TIMEOUT)
475
476        if args.show_capture_stdout:
477            self.logger.info("Capture stdout: %s" % output)
478
479        if returncode == -1:
480            self.results.append(GroupedResult(GroupedResult.Crashed, "", output, self.tests))
481            return False
482        elif returncode == -2:
483            self.results.append(GroupedResult(GroupedResult.TimedOut, "", "", self.tests))
484            return False
485
486        with open(results_file) as f:
487            test_results = json.load(f)
488        os.unlink(results_file)
489        for test in self.tests:
490            test_result = test_results['tests'][test.full_test_name]
491            if test_result['actual'] == 'SKIP':
492                test.skipped_by_suite = True
493
494        return True
495
496    def RemoveTestsThatDoNotProduceAppropriateTraceFiles(self):
497        continued_tests = []
498        skipped_tests = []
499        failed_to_trace_tests = []
500        for test in self.tests:
501            if not test.CanRunReplay(self.trace_folder_path):
502                if test.skipped_by_suite:
503                    skipped_tests.append(test)
504                else:
505                    failed_to_trace_tests.append(test)
506            else:
507                continued_tests.append(test)
508        if len(skipped_tests) > 0:
509            self.results.append(
510                GroupedResult(GroupedResult.Skipped, "Skipping replay since test skipped by suite",
511                              "", skipped_tests))
512        if len(failed_to_trace_tests) > 0:
513            self.results.append(
514                GroupedResult(GroupedResult.FailedToTrace,
515                              "Test not skipped but failed to produce trace files", "",
516                              failed_to_trace_tests))
517
518        return continued_tests
519
520    def BuildReplay(self, replay_build_dir, composite_file_id, tests, child_processes_manager):
521        # write gni file that holds all the traces files in a list
522        self.CreateTestNamesFile(composite_file_id, tests)
523
524        gn_args = [('angle_build_capture_replay_tests', 'true'),
525                   ('angle_capture_replay_test_trace_dir', '"%s"' % self.trace_dir),
526                   ('angle_capture_replay_composite_file_id', str(composite_file_id))]
527        returncode, output = child_processes_manager.RunGNGen(replay_build_dir, True, gn_args)
528        if returncode != 0:
529            self.logger.warning('GN failure output: %s' % output)
530            self.results.append(
531                GroupedResult(GroupedResult.CompileFailed, "Build replay failed at gn generation",
532                              output, tests))
533            return False
534        returncode, output = child_processes_manager.RunNinja(replay_build_dir, REPLAY_BINARY,
535                                                              True)
536        if returncode != 0:
537            self.logger.warning('Ninja failure output: %s' % output)
538            self.results.append(
539                GroupedResult(GroupedResult.CompileFailed, "Build replay failed at ninja", output,
540                              tests))
541            return False
542        return True
543
544    def RunReplay(self, args, replay_build_dir, replay_exe_path, child_processes_manager, tests):
545        extra_env = {}
546        if args.expose_nonconformant_features:
547            extra_env[
548                'ANGLE_FEATURE_OVERRIDES_ENABLED'] = 'exposeNonConformantExtensionsAndVersions'
549
550        env = {**os.environ.copy(), **extra_env}
551
552        run_cmd = GetRunCommand(self.args, replay_exe_path)
553        self.logger.info('%s %s' % (_FormatEnv(extra_env), ' '.join(run_cmd)))
554
555        for test in tests:
556            self.UnlinkContextStateJsonFilesIfPresent(replay_build_dir, test.GetLabel())
557
558        returncode, output = child_processes_manager.RunSubprocess(
559            run_cmd, env, timeout=SUBPROCESS_TIMEOUT)
560        if returncode == -1:
561            cmd = replay_exe_path
562            self.results.append(
563                GroupedResult(GroupedResult.Crashed, "Replay run crashed (%s)" % cmd, output,
564                              tests))
565            return
566        elif returncode == -2:
567            self.results.append(
568                GroupedResult(GroupedResult.TimedOut, "Replay run timed out", output, tests))
569            return
570
571        if args.show_replay_stdout:
572            self.logger.info("Replay stdout: %s" % output)
573
574        output_lines = output.splitlines()
575        passes = []
576        fails = []
577        count = 0
578        for output_line in output_lines:
579            words = output_line.split(" ")
580            if len(words) == 3 and words[0] == RESULT_TAG:
581                test_name = self.FindTestByLabel(words[1])
582                result = int(words[2])
583                if result == 0:
584                    passes.append(test_name)
585                elif result == REPLAY_INITIALIZATION_FAILURE:
586                    fails.append(test_name)
587                    self.logger.info("Initialization failure: %s" % test_name)
588                elif result == REPLAY_SERIALIZATION_FAILURE:
589                    fails.append(test_name)
590                    self.logger.info("Context comparison failed: %s" % test_name)
591                    self.PrintContextDiff(replay_build_dir, words[1])
592                else:
593                    fails.append(test_name)
594                    self.logger.error("Unknown test result code: %s -> %d" % (test_name, result))
595                count += 1
596
597        if len(passes) > 0:
598            self.results.append(GroupedResult(GroupedResult.Passed, "", output, passes))
599        if len(fails) > 0:
600            self.results.append(GroupedResult(GroupedResult.Failed, "", output, fails))
601
602    def UnlinkContextStateJsonFilesIfPresent(self, replay_build_dir, test_name):
603        frame = 1
604        while True:
605            capture_file = "{}/{}_ContextCaptured{}.json".format(replay_build_dir, test_name,
606                                                                 frame)
607            replay_file = "{}/{}_ContextReplayed{}.json".format(replay_build_dir, test_name, frame)
608            if os.path.exists(capture_file):
609                os.unlink(capture_file)
610            if os.path.exists(replay_file):
611                os.unlink(replay_file)
612
613            if frame > self.CAPTURE_FRAME_END:
614                break
615            frame = frame + 1
616
617    def PrintContextDiff(self, replay_build_dir, test_name):
618        frame = 1
619        found = False
620        while True:
621            capture_file = "{}/{}_ContextCaptured{}.json".format(replay_build_dir, test_name,
622                                                                 frame)
623            replay_file = "{}/{}_ContextReplayed{}.json".format(replay_build_dir, test_name, frame)
624            if os.path.exists(capture_file) and os.path.exists(replay_file):
625                found = True
626                captured_context = open(capture_file, "r").readlines()
627                replayed_context = open(replay_file, "r").readlines()
628                for line in difflib.unified_diff(
629                        captured_context, replayed_context, fromfile=capture_file,
630                        tofile=replay_file):
631                    print(line, end="")
632            else:
633                if frame > self.CAPTURE_FRAME_END:
634                    break
635            frame = frame + 1
636        if not found:
637            self.logger.error("Could not find serialization diff files for %s" % test_name)
638
639    def FindTestByLabel(self, label):
640        for test in self.tests:
641            if test.GetLabel() == label:
642                return test
643        return None
644
645    def AddTest(self, test):
646        assert len(self.tests) <= self.args.batch_count
647        test.index = len(self.tests)
648        self.tests.append(test)
649
650    def CreateTestNamesFile(self, composite_file_id, tests):
651        data = {'traces': [test.GetLabel() for test in tests]}
652        names_path = os.path.join(self.trace_folder_path, 'test_names_%d.json' % composite_file_id)
653        with open(names_path, 'w') as f:
654            f.write(json.dumps(data))
655
656    def __str__(self):
657        repr_str = "TestBatch:\n"
658        for test in self.tests:
659            repr_str += ("\t" + str(test) + "\n")
660        return repr_str
661
662    def __getitem__(self, index):
663        assert index < len(self.tests)
664        return self.tests[index]
665
666    def __iter__(self):
667        return iter(self.tests)
668
669    def GetResults(self):
670        return TestBatchResult(self.results, self.args.verbose)
671
672
673class TestExpectation():
674    # tests that must not be run as list
675    skipped_for_capture_tests = {}
676    skipped_for_capture_tests_re = {}
677
678    # test expectations for tests that do not pass
679    non_pass_results = {}
680
681    # tests that must run in a one-test batch
682    run_single = {}
683    run_single_re = {}
684
685    flaky_tests = []
686
687    non_pass_re = {}
688
689    # yapf: disable
690    # we want each pair on one line
691    result_map = { "FAIL" : GroupedResult.Failed,
692                   "TIMEOUT" : GroupedResult.TimedOut,
693                   "CRASH" : GroupedResult.Crashed,
694                   "COMPILE_FAIL" : GroupedResult.CompileFailed,
695                   "NOT_RUN" : GroupedResult.Skipped,
696                   "SKIP_FOR_CAPTURE" : GroupedResult.Skipped,
697                   "PASS" : GroupedResult.Passed}
698    # yapf: enable
699
700    def __init__(self, args):
701        expected_results_filename = "capture_replay_expectations.txt"
702        expected_results_path = os.path.join(REPLAY_SAMPLE_FOLDER, expected_results_filename)
703        self._asan = args.asan
704        with open(expected_results_path, "rt") as f:
705            for line in f:
706                l = line.strip()
707                if l != "" and not l.startswith("#"):
708                    self.ReadOneExpectation(l, args.debug)
709
710    def _CheckTagsWithConfig(self, tags, config_tags):
711        for tag in tags:
712            if tag not in config_tags:
713                return False
714        return True
715
716    def ReadOneExpectation(self, line, is_debug):
717        (testpattern, result) = line.split('=')
718        (test_info_string, test_name_string) = testpattern.split(':')
719        test_name = test_name_string.strip()
720        test_info = test_info_string.strip().split()
721        result_stripped = result.strip()
722
723        tags = []
724        if len(test_info) > 1:
725            tags = test_info[1:]
726
727        config_tags = [GetPlatformForSkip()]
728        if self._asan:
729            config_tags += ['ASAN']
730        if is_debug:
731            config_tags += ['DEBUG']
732
733        if self._CheckTagsWithConfig(tags, config_tags):
734            test_name_regex = re.compile('^' + test_name.replace('*', '.*') + '$')
735            if result_stripped == 'CRASH' or result_stripped == 'COMPILE_FAIL':
736                self.run_single[test_name] = self.result_map[result_stripped]
737                self.run_single_re[test_name] = test_name_regex
738            if result_stripped == 'SKIP_FOR_CAPTURE' or result_stripped == 'TIMEOUT':
739                self.skipped_for_capture_tests[test_name] = self.result_map[result_stripped]
740                self.skipped_for_capture_tests_re[test_name] = test_name_regex
741            elif result_stripped == 'FLAKY':
742                self.flaky_tests.append(test_name_regex)
743            else:
744                self.non_pass_results[test_name] = self.result_map[result_stripped]
745                self.non_pass_re[test_name] = test_name_regex
746
747    def TestIsSkippedForCapture(self, test_name):
748        for p in self.skipped_for_capture_tests_re.values():
749            m = p.match(test_name)
750            if m is not None:
751                return True
752        return False
753
754    def TestNeedsToRunSingle(self, test_name):
755        for p in self.run_single_re.values():
756            m = p.match(test_name)
757            if m is not None:
758                return True
759            for p in self.skipped_for_capture_tests_re.values():
760                m = p.match(test_name)
761                if m is not None:
762                    return True
763        return False
764
765    def Filter(self, test_list, run_all_tests):
766        result = {}
767        for t in test_list:
768            for key in self.non_pass_results.keys():
769                if self.non_pass_re[key].match(t) is not None:
770                    result[t] = self.non_pass_results[key]
771            for key in self.run_single.keys():
772                if self.run_single_re[key].match(t) is not None:
773                    result[t] = self.run_single[key]
774            if run_all_tests:
775                for [key, r] in self.skipped_for_capture_tests.items():
776                    if self.skipped_for_capture_tests_re[key].match(t) is not None:
777                        result[t] = r
778        return result
779
780    def IsFlaky(self, test_name):
781        for flaky in self.flaky_tests:
782            if flaky.match(test_name) is not None:
783                return True
784        return False
785
786
787def ClearFolderContent(path):
788    all_files = []
789    for f in os.listdir(path):
790        if os.path.isfile(os.path.join(path, f)):
791            os.remove(os.path.join(path, f))
792
793def SetCWDToAngleFolder():
794    cwd = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
795    os.chdir(cwd)
796    return cwd
797
798
799def RunTests(args, worker_id, job_queue, result_list, message_queue, logger, ninja_lock):
800    replay_build_dir = os.path.join(args.out_dir, 'Replay%d' % worker_id)
801    replay_exec_path = os.path.join(replay_build_dir, REPLAY_BINARY)
802
803    child_processes_manager = ChildProcessesManager(args, logger, ninja_lock)
804    # used to differentiate between multiple composite files when there are multiple test batchs
805    # running on the same worker and --deleted_trace is set to False
806    composite_file_id = 1
807    while not job_queue.empty():
808        try:
809            test_batch = job_queue.get()
810            logger.info('Starting {} tests on worker {}. Unstarted jobs: {}'.format(
811                len(test_batch.tests), worker_id, job_queue.qsize()))
812
813            test_batch.SetWorkerId(worker_id)
814
815            success = test_batch.RunWithCapture(args, child_processes_manager)
816            if not success:
817                result_list.append(test_batch.GetResults())
818                logger.info(str(test_batch.GetResults()))
819                continue
820            continued_tests = test_batch.RemoveTestsThatDoNotProduceAppropriateTraceFiles()
821            if len(continued_tests) == 0:
822                result_list.append(test_batch.GetResults())
823                logger.info(str(test_batch.GetResults()))
824                continue
825            success = test_batch.BuildReplay(replay_build_dir, composite_file_id, continued_tests,
826                                             child_processes_manager)
827            if args.keep_temp_files:
828                composite_file_id += 1
829            if not success:
830                result_list.append(test_batch.GetResults())
831                logger.info(str(test_batch.GetResults()))
832                continue
833            test_batch.RunReplay(args, replay_build_dir, replay_exec_path, child_processes_manager,
834                                 continued_tests)
835            result_list.append(test_batch.GetResults())
836            logger.info(str(test_batch.GetResults()))
837        except KeyboardInterrupt:
838            child_processes_manager.KillAll()
839            raise
840        except queue.Empty:
841            child_processes_manager.KillAll()
842            break
843        except Exception as e:
844            logger.error('RunTestsException: %s\n%s' % (repr(e), traceback.format_exc()))
845            child_processes_manager.KillAll()
846            pass
847    message_queue.put(child_processes_manager.runtimes)
848    child_processes_manager.KillAll()
849
850
851def SafeDeleteFolder(folder_name):
852    while os.path.isdir(folder_name):
853        try:
854            shutil.rmtree(folder_name)
855        except KeyboardInterrupt:
856            raise
857        except PermissionError:
858            pass
859
860
861def DeleteReplayBuildFolders(folder_num, replay_build_dir, trace_folder):
862    for i in range(folder_num):
863        folder_name = replay_build_dir + str(i)
864        if os.path.isdir(folder_name):
865            SafeDeleteFolder(folder_name)
866
867
868def CreateTraceFolders(folder_num):
869    for i in range(folder_num):
870        folder_name = TRACE_FOLDER + str(i)
871        folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, folder_name)
872        if os.path.isdir(folder_path):
873            shutil.rmtree(folder_path)
874        os.makedirs(folder_path)
875
876
877def DeleteTraceFolders(folder_num):
878    for i in range(folder_num):
879        folder_name = TRACE_FOLDER + str(i)
880        folder_path = os.path.join(REPLAY_SAMPLE_FOLDER, folder_name)
881        if os.path.isdir(folder_path):
882            SafeDeleteFolder(folder_path)
883
884
885def GetPlatformForSkip():
886    # yapf: disable
887    # we want each pair on one line
888    platform_map = { 'win32' : 'WIN',
889                     'linux' : 'LINUX' }
890    # yapf: enable
891    return platform_map.get(sys.platform, 'UNKNOWN')
892
893
894def main(args):
895    logger = multiprocessing.log_to_stderr()
896    logger.setLevel(level=args.log.upper())
897
898    is_bot = bool(args.goma_dir)  # flag set in recipes/recipe_modules/angle/api.py
899    if sys.platform == 'linux' and is_bot:
900        logger.warning('Test is currently a no-op https://anglebug.com/6085')
901        return EXIT_SUCCESS
902
903    ninja_lock = multiprocessing.Semaphore(args.max_ninja_jobs)
904    child_processes_manager = ChildProcessesManager(args, logger, ninja_lock)
905    try:
906        start_time = time.time()
907        # set the number of workers to be cpu_count - 1 (since the main process already takes up a
908        # CPU core). Whenever a worker is available, it grabs the next job from the job queue and
909        # runs it. The worker closes down when there is no more job.
910        worker_count = min(multiprocessing.cpu_count() - 1, args.max_jobs)
911        cwd = SetCWDToAngleFolder()
912
913        CreateTraceFolders(worker_count)
914        capture_build_dir = os.path.normpath(r'%s/Capture' % args.out_dir)
915        returncode, output = child_processes_manager.RunGNGen(capture_build_dir, False)
916        if returncode != 0:
917            logger.error(output)
918            child_processes_manager.KillAll()
919            return EXIT_FAILURE
920        # run ninja to build all tests
921        returncode, output = child_processes_manager.RunNinja(capture_build_dir, args.test_suite,
922                                                              False)
923        if returncode != 0:
924            logger.error(output)
925            child_processes_manager.KillAll()
926            return EXIT_FAILURE
927        # get a list of tests
928        test_path = os.path.join(capture_build_dir, args.test_suite)
929        test_list = GetTestsListForFilter(args, test_path, args.filter, logger)
930        test_expectation = TestExpectation(args)
931        test_names = ParseTestNamesFromTestList(test_list, test_expectation,
932                                                args.also_run_skipped_for_capture_tests, logger)
933        test_expectation_for_list = test_expectation.Filter(
934            test_names, args.also_run_skipped_for_capture_tests)
935        # objects created by manager can be shared by multiple processes. We use it to create
936        # collections that are shared by multiple processes such as job queue or result list.
937        manager = multiprocessing.Manager()
938        job_queue = manager.Queue()
939        test_batch_num = 0
940
941        num_tests = len(test_names)
942        test_index = 0
943
944        # Put the tests into batches and these into the job queue; jobs that areexpected to crash,
945        # timeout, or fail compilation will be run in batches of size one, because a crash or
946        # failing to compile brings down the whole batch, so that we would give false negatives if
947        # such a batch contains jobs that would otherwise poss or fail differently.
948        while test_index < num_tests:
949            batch = TestBatch(args, logger)
950
951            while test_index < num_tests and len(batch.tests) < args.batch_count:
952                test_name = test_names[test_index]
953                test_obj = Test(test_name)
954
955                if test_expectation.TestNeedsToRunSingle(test_name):
956                    single_batch = TestBatch(args, logger)
957                    single_batch.AddTest(test_obj)
958                    job_queue.put(single_batch)
959                    test_batch_num += 1
960                else:
961                    batch.AddTest(test_obj)
962
963                test_index += 1
964
965            if len(batch.tests) > 0:
966                job_queue.put(batch)
967                test_batch_num += 1
968
969        passed_count = 0
970        failed_count = 0
971        timedout_count = 0
972        crashed_count = 0
973        compile_failed_count = 0
974        skipped_count = 0
975
976        unexpected_count = {}
977        unexpected_test_results = {}
978
979        for type in GroupedResult.ResultTypes:
980            unexpected_count[type] = 0
981            unexpected_test_results[type] = []
982
983        # result list is created by manager and can be shared by multiple processes. Each
984        # subprocess populates the result list with the results of its test runs. After all
985        # subprocesses finish, the main process processes the results in the result list.
986        # An item in the result list is a tuple with 3 values (testname, result, output).
987        # The "result" can take 3 values "Passed", "Failed", "Skipped". The output is the
988        # stdout and the stderr of the test appended together.
989        result_list = manager.list()
990        message_queue = manager.Queue()
991        # so that we do not spawn more processes than we actually need
992        worker_count = min(worker_count, test_batch_num)
993        # spawning and starting up workers
994        for worker_id in range(worker_count):
995            proc = multiprocessing.Process(
996                target=RunTests,
997                args=(args, worker_id, job_queue, result_list, message_queue, logger, ninja_lock))
998            child_processes_manager.AddWorker(proc)
999            proc.start()
1000
1001        # print out periodic status messages
1002        while child_processes_manager.IsAnyWorkerAlive():
1003            logger.info('%d workers running, %d jobs left.' %
1004                        (child_processes_manager.GetRemainingWorkers(), (job_queue.qsize())))
1005            # If only a few tests are run it is likely that the workers are finished before
1006            # the STATUS_MESSAGE_PERIOD has passed, and the tests script sits idle for the
1007            # reminder of the wait time. Therefore, limit waiting by the number of
1008            # unfinished jobs.
1009            unfinished_jobs = job_queue.qsize() + child_processes_manager.GetRemainingWorkers()
1010            time.sleep(min(STATUS_MESSAGE_PERIOD, unfinished_jobs))
1011
1012        child_processes_manager.JoinWorkers()
1013        end_time = time.time()
1014
1015        summed_runtimes = child_processes_manager.runtimes
1016        while not message_queue.empty():
1017            runtimes = message_queue.get()
1018            for k, v in runtimes.items():
1019                summed_runtimes.setdefault(k, 0.0)
1020                summed_runtimes[k] += v
1021
1022        # print out results
1023        logger.info('')
1024        logger.info('Results:')
1025
1026        flaky_results = []
1027
1028        regression_error_log = []
1029
1030        for test_batch in result_list:
1031            test_batch_result = test_batch.results
1032            logger.debug(str(test_batch_result))
1033
1034            batch_has_regression = False
1035
1036            passed_count += len(test_batch_result[GroupedResult.Passed])
1037            failed_count += len(test_batch_result[GroupedResult.Failed])
1038            timedout_count += len(test_batch_result[GroupedResult.TimedOut])
1039            crashed_count += len(test_batch_result[GroupedResult.Crashed])
1040            compile_failed_count += len(test_batch_result[GroupedResult.CompileFailed])
1041            skipped_count += len(test_batch_result[GroupedResult.Skipped])
1042
1043            for real_result, test_list in test_batch_result.items():
1044                for test in test_list:
1045                    if test_expectation.IsFlaky(test):
1046                        flaky_results.append('{} ({})'.format(test, real_result))
1047                        continue
1048
1049                    # Passing tests are not in the list
1050                    if test not in test_expectation_for_list.keys():
1051                        if real_result != GroupedResult.Passed:
1052                            batch_has_regression = True
1053                            unexpected_count[real_result] += 1
1054                            unexpected_test_results[real_result].append(
1055                                '{} {} (expected Pass or is new test)'.format(test, real_result))
1056                    else:
1057                        expected_result = test_expectation_for_list[test]
1058                        if real_result != expected_result:
1059                            if real_result != GroupedResult.Passed:
1060                                batch_has_regression = True
1061                            unexpected_count[real_result] += 1
1062                            unexpected_test_results[real_result].append(
1063                                '{} {} (expected {})'.format(test, real_result, expected_result))
1064            if batch_has_regression:
1065                regression_error_log.append(str(test_batch))
1066
1067        if len(regression_error_log) > 0:
1068            logger.info('Logging output of test batches with regressions')
1069            logger.info(
1070                '==================================================================================================='
1071            )
1072            for log in regression_error_log:
1073                logger.info(log)
1074                logger.info(
1075                    '---------------------------------------------------------------------------------------------------'
1076                )
1077                logger.info('')
1078
1079        logger.info('')
1080        logger.info('Elapsed time: %.2lf seconds' % (end_time - start_time))
1081        logger.info('')
1082        logger.info('Runtimes by process:\n%s' %
1083                    '\n'.join('%s: %.2lf seconds' % (k, v) for (k, v) in summed_runtimes.items()))
1084
1085        if len(flaky_results):
1086            logger.info("Flaky test(s):")
1087            for line in flaky_results:
1088                logger.info("    {}".format(line))
1089            logger.info("")
1090
1091        logger.info(
1092            'Summary: Passed: %d, Comparison Failed: %d, Crashed: %d, CompileFailed %d, Skipped: %d, Timeout: %d'
1093            % (passed_count, failed_count, crashed_count, compile_failed_count, skipped_count,
1094               timedout_count))
1095
1096        retval = EXIT_SUCCESS
1097
1098        unexpected_test_results_count = 0
1099        for result, count in unexpected_count.items():
1100            if result != GroupedResult.Skipped:  # Suite skipping tests is ok
1101                unexpected_test_results_count += count
1102
1103        if unexpected_test_results_count > 0:
1104            retval = EXIT_FAILURE
1105            logger.info('')
1106            logger.info('Failure: Obtained {} results that differ from expectation:'.format(
1107                unexpected_test_results_count))
1108            logger.info('')
1109            for result, count in unexpected_count.items():
1110                if count > 0 and result != GroupedResult.Skipped:
1111                    logger.info("Unexpected '{}' ({}):".format(result, count))
1112                    for test_result in unexpected_test_results[result]:
1113                        logger.info('     {}'.format(test_result))
1114                    logger.info('')
1115
1116        logger.info('')
1117
1118        # delete generated folders if --keep-temp-files flag is set to false
1119        if args.purge:
1120            DeleteTraceFolders(worker_count)
1121            if os.path.isdir(args.out_dir):
1122                SafeDeleteFolder(args.out_dir)
1123
1124        # Try hard to ensure output is finished before ending the test.
1125        logging.shutdown()
1126        sys.stdout.flush()
1127        time.sleep(2.0)
1128        return retval
1129
1130    except KeyboardInterrupt:
1131        child_processes_manager.KillAll()
1132        return EXIT_FAILURE
1133
1134
1135if __name__ == '__main__':
1136    parser = argparse.ArgumentParser()
1137    parser.add_argument(
1138        '--out-dir',
1139        default=DEFAULT_OUT_DIR,
1140        help='Where to build ANGLE for capture and replay. Relative to the ANGLE folder. Default is "%s".'
1141        % DEFAULT_OUT_DIR)
1142    parser.add_argument(
1143        '-f',
1144        '--filter',
1145        '--gtest_filter',
1146        default=DEFAULT_FILTER,
1147        help='Same as GoogleTest\'s filter argument. Default is "%s".' % DEFAULT_FILTER)
1148    parser.add_argument(
1149        '--test-suite',
1150        default=DEFAULT_TEST_SUITE,
1151        help='Test suite binary to execute. Default is "%s".' % DEFAULT_TEST_SUITE)
1152    parser.add_argument(
1153        '--batch-count',
1154        default=DEFAULT_BATCH_COUNT,
1155        type=int,
1156        help='Number of tests in a batch. Default is %d.' % DEFAULT_BATCH_COUNT)
1157    parser.add_argument(
1158        '--keep-temp-files',
1159        action='store_true',
1160        help='Whether to keep the temp files and folders. Off by default')
1161    parser.add_argument('--purge', help='Purge all build directories on exit.')
1162    parser.add_argument(
1163        '--goma-dir',
1164        default='',
1165        help='Set custom goma directory. Uses the goma in path by default.')
1166    parser.add_argument(
1167        '--output-to-file',
1168        action='store_true',
1169        help='Whether to write output to a result file. Off by default')
1170    parser.add_argument(
1171        '--result-file',
1172        default=DEFAULT_RESULT_FILE,
1173        help='Name of the result file in the capture_replay_tests folder. Default is "%s".' %
1174        DEFAULT_RESULT_FILE)
1175    parser.add_argument('-v', '--verbose', action='store_true', help='Shows full test output.')
1176    parser.add_argument(
1177        '-l',
1178        '--log',
1179        default=DEFAULT_LOG_LEVEL,
1180        help='Controls the logging level. Default is "%s".' % DEFAULT_LOG_LEVEL)
1181    parser.add_argument(
1182        '-j',
1183        '--max-jobs',
1184        default=DEFAULT_MAX_JOBS,
1185        type=int,
1186        help='Maximum number of test processes. Default is %d.' % DEFAULT_MAX_JOBS)
1187    parser.add_argument(
1188        '-M',
1189        '--mec',
1190        default=0,
1191        type=int,
1192        help='Enable mid execution capture starting at specified frame, (default: 0 = normal capture)'
1193    )
1194    parser.add_argument(
1195        '-a',
1196        '--also-run-skipped-for-capture-tests',
1197        action='store_true',
1198        help='Also run tests that are disabled in the expectations by SKIP_FOR_CAPTURE')
1199    parser.add_argument(
1200        '--max-ninja-jobs',
1201        type=int,
1202        default=DEFAULT_MAX_NINJA_JOBS,
1203        help='Maximum number of concurrent ninja jobs to run at once.')
1204    parser.add_argument('--xvfb', action='store_true', help='Run with xvfb.')
1205    parser.add_argument('--asan', action='store_true', help='Build with ASAN.')
1206    parser.add_argument(
1207        '-E',
1208        '--expose-nonconformant-features',
1209        action='store_true',
1210        help='Expose non-conformant features to advertise GLES 3.2')
1211    parser.add_argument(
1212        '--show-capture-stdout', action='store_true', help='Print test stdout during capture.')
1213    parser.add_argument(
1214        '--show-replay-stdout', action='store_true', help='Print test stdout during replay.')
1215    parser.add_argument('--debug', action='store_true', help='Debug builds (default is Release).')
1216    args = parser.parse_args()
1217    if args.debug and (args.out_dir == DEFAULT_OUT_DIR):
1218        args.out_dir = args.out_dir + "Debug"
1219
1220    if sys.platform == "win32":
1221        args.test_suite += ".exe"
1222    if args.output_to_file:
1223        logging.basicConfig(level=args.log.upper(), filename=args.result_file)
1224    else:
1225        logging.basicConfig(level=args.log.upper())
1226
1227    sys.exit(main(args))
1228