• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env vpython3
2# Copyright 2022 The Chromium Authors
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5"""Implements commands for standalone CFv2 test executables."""
6
7import argparse
8import logging
9import os
10import shutil
11import subprocess
12import sys
13
14from typing import List, Optional
15
16from common import get_component_uri, get_host_arch, \
17                   register_common_args, register_device_args, \
18                   register_log_args
19from compatible_utils import map_filter_file_to_package_file
20from ffx_integration import FfxTestRunner, run_symbolizer
21from test_runner import TestRunner
22from test_server import setup_test_server
23
24DEFAULT_TEST_SERVER_CONCURRENCY = 4
25
26
27def _copy_custom_output_file(test_runner: FfxTestRunner, file: str,
28                             dest: str) -> None:
29    """Copy custom test output file from the device to the host."""
30
31    artifact_dir = test_runner.get_custom_artifact_directory()
32    if not artifact_dir:
33        logging.error(
34            'Failed to parse custom artifact directory from test summary '
35            'output files. Not copying %s from the device', file)
36        return
37    shutil.copy(os.path.join(artifact_dir, file), dest)
38
39
40def _copy_coverage_files(test_runner: FfxTestRunner, dest: str) -> None:
41    """Copy debug data file from the device to the host if it exists."""
42
43    coverage_dir = test_runner.get_debug_data_directory()
44    if not coverage_dir:
45        logging.info(
46            'Failed to parse coverage data directory from test summary '
47            'output files. Not copying coverage files from the device.')
48        return
49    shutil.copytree(coverage_dir, dest, dirs_exist_ok=True)
50
51
52def _get_vulkan_args(use_vulkan: Optional[str]) -> List[str]:
53    """Helper function to set vulkan related flag."""
54
55    vulkan_args = []
56    if not use_vulkan:
57        if get_host_arch() == 'x64':
58            # TODO(crbug.com/1261646) Remove once Vulkan is enabled by
59            # default.
60            use_vulkan = 'native'
61        else:
62            # Use swiftshader on arm64 by default because most arm64 bots
63            # currently don't support Vulkan emulation.
64            use_vulkan = 'swiftshader'
65            vulkan_args.append('--ozone-platform=headless')
66    vulkan_args.append(f'--use-vulkan={use_vulkan}')
67    return vulkan_args
68
69
70class ExecutableTestRunner(TestRunner):
71    """Test runner for running standalone test executables."""
72
73    def __init__(  # pylint: disable=too-many-arguments
74            self,
75            out_dir: str,
76            test_args: List[str],
77            test_name: str,
78            target_id: Optional[str],
79            code_coverage_dir: str,
80            logs_dir: Optional[str] = None) -> None:
81        super().__init__(out_dir, test_args, [test_name], target_id)
82        if not self._test_args:
83            self._test_args = []
84        self._test_name = test_name
85        self._code_coverage_dir = os.path.basename(code_coverage_dir)
86        self._custom_artifact_directory = None
87        self._isolated_script_test_output = None
88        self._isolated_script_test_perf_output = None
89        self._logs_dir = logs_dir
90        self._test_launcher_summary_output = None
91        self._test_server = None
92
93    def _get_args(self) -> List[str]:
94        parser = argparse.ArgumentParser()
95        parser.add_argument(
96            '--isolated-script-test-output',
97            help='If present, store test results on this path.')
98        parser.add_argument('--isolated-script-test-perf-output',
99                            help='If present, store chartjson results on this '
100                            'path.')
101        parser.add_argument(
102            '--test-launcher-shard-index',
103            type=int,
104            default=os.environ.get('GTEST_SHARD_INDEX'),
105            help='Index of this instance amongst swarming shards.')
106        parser.add_argument(
107            '--test-launcher-summary-output',
108            help='Where the test launcher will output its json.')
109        parser.add_argument(
110            '--test-launcher-total-shards',
111            type=int,
112            default=os.environ.get('GTEST_TOTAL_SHARDS'),
113            help='Total number of swarming shards of this suite.')
114        parser.add_argument(
115            '--test-launcher-filter-file',
116            help='Filter file(s) passed to target test process. Use ";" to '
117            'separate multiple filter files.')
118        parser.add_argument('--test-launcher-jobs',
119                            type=int,
120                            help='Sets the number of parallel test jobs.')
121        parser.add_argument('--enable-test-server',
122                            action='store_true',
123                            default=False,
124                            help='Enable Chrome test server spawner.')
125        parser.add_argument('--test-arg',
126                            dest='test_args',
127                            action='append',
128                            help='Legacy flag to pass in arguments for '
129                            'the test process. These arguments can now be '
130                            'passed in without a preceding "--" flag.')
131        parser.add_argument('--use-vulkan',
132                            help='\'native\', \'swiftshader\' or \'none\'.')
133        args, child_args = parser.parse_known_args(self._test_args)
134        if args.isolated_script_test_output:
135            self._isolated_script_test_output = args.isolated_script_test_output
136            child_args.append(
137                '--isolated-script-test-output=/custom_artifacts/%s' %
138                os.path.basename(self._isolated_script_test_output))
139        if args.isolated_script_test_perf_output:
140            self._isolated_script_test_perf_output = \
141                args.isolated_script_test_perf_output
142            child_args.append(
143                '--isolated-script-test-perf-output=/custom_artifacts/%s' %
144                os.path.basename(self._isolated_script_test_perf_output))
145        if args.test_launcher_shard_index is not None:
146            child_args.append('--test-launcher-shard-index=%d' %
147                              args.test_launcher_shard_index)
148        if args.test_launcher_total_shards is not None:
149            child_args.append('--test-launcher-total-shards=%d' %
150                              args.test_launcher_total_shards)
151        if args.test_launcher_summary_output:
152            self._test_launcher_summary_output = \
153                args.test_launcher_summary_output
154            child_args.append(
155                '--test-launcher-summary-output=/custom_artifacts/%s' %
156                os.path.basename(self._test_launcher_summary_output))
157        if args.test_launcher_filter_file:
158            test_launcher_filter_files = map(
159                map_filter_file_to_package_file,
160                args.test_launcher_filter_file.split(';'))
161            child_args.append('--test-launcher-filter-file=' +
162                              ';'.join(test_launcher_filter_files))
163        if args.test_launcher_jobs is not None:
164            test_concurrency = args.test_launcher_jobs
165        else:
166            test_concurrency = DEFAULT_TEST_SERVER_CONCURRENCY
167        if args.enable_test_server:
168            self._test_server, spawner_url_base = setup_test_server(
169                self._target_id, test_concurrency)
170            child_args.append('--remote-test-server-spawner-url-base=%s' %
171                              spawner_url_base)
172        child_args.extend(_get_vulkan_args(args.use_vulkan))
173        if args.test_args:
174            child_args.extend(args.test_args)
175        return child_args
176
177    def _postprocess(self, test_runner: FfxTestRunner) -> None:
178        if self._test_server:
179            self._test_server.Stop()
180        if self._test_launcher_summary_output:
181            _copy_custom_output_file(
182                test_runner,
183                os.path.basename(self._test_launcher_summary_output),
184                self._test_launcher_summary_output)
185        if self._isolated_script_test_output:
186            _copy_custom_output_file(
187                test_runner,
188                os.path.basename(self._isolated_script_test_output),
189                self._isolated_script_test_output)
190        if self._isolated_script_test_perf_output:
191            _copy_custom_output_file(
192                test_runner,
193                os.path.basename(self._isolated_script_test_perf_output),
194                self._isolated_script_test_perf_output)
195        _copy_coverage_files(test_runner, self._code_coverage_dir)
196
197    def run_test(self) -> subprocess.Popen:
198        test_args = self._get_args()
199        with FfxTestRunner(self._logs_dir) as test_runner:
200            test_proc = test_runner.run_test(
201                get_component_uri(self._test_name), test_args, self._target_id)
202
203            symbol_paths = []
204            for pkg_path in self._package_deps.values():
205                symbol_paths.append(
206                    os.path.join(os.path.dirname(pkg_path), 'ids.txt'))
207            # Symbolize output from test process and print to terminal.
208            symbolizer_proc = run_symbolizer(symbol_paths, test_proc.stdout,
209                                             sys.stdout)
210            symbolizer_proc.communicate()
211
212            if test_proc.wait() == 0:
213                logging.info('Process exited normally with status code 0.')
214            else:
215                # The test runner returns an error status code if *any*
216                # tests fail, so we should proceed anyway.
217                logging.warning('Process exited with status code %d.',
218                                test_proc.returncode)
219            self._postprocess(test_runner)
220        return test_proc
221
222
223def create_executable_test_runner(runner_args: argparse.Namespace,
224                                  test_args: List[str]):
225    """Helper for creating an ExecutableTestRunner."""
226
227    return ExecutableTestRunner(runner_args.out_dir, test_args,
228                                runner_args.test_type, runner_args.target_id,
229                                runner_args.code_coverage_dir,
230                                runner_args.logs_dir)
231
232
233def register_executable_test_args(parser: argparse.ArgumentParser) -> None:
234    """Register common arguments for ExecutableTestRunner."""
235
236    test_args = parser.add_argument_group('test', 'arguments for test running')
237    test_args.add_argument('--code-coverage-dir',
238                           default=os.getcwd(),
239                           help='Directory to place code coverage '
240                           'information. Only relevant when the target was '
241                           'built with |fuchsia_code_coverage| set to true. '
242                           'Defaults to current directory.')
243    test_args.add_argument('--test-name',
244                           dest='test_type',
245                           help='Name of the test package (e.g. '
246                           'unit_tests).')
247
248
249def main():
250    """Stand-alone function for running executable tests."""
251
252    parser = argparse.ArgumentParser()
253    register_common_args(parser)
254    register_device_args(parser)
255    register_log_args(parser)
256    register_executable_test_args(parser)
257    runner_args, test_args = parser.parse_known_args()
258    runner = create_executable_test_runner(runner_args, test_args)
259    return runner.run_test().returncode
260
261
262if __name__ == '__main__':
263    sys.exit(main())
264