• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2# Copyright 2015 gRPC authors.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#     http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15"""Run tests in parallel."""
16
17from __future__ import print_function
18
19import argparse
20import ast
21import collections
22import glob
23import itertools
24import json
25import logging
26import multiprocessing
27import os
28import os.path
29import pipes
30import platform
31import random
32import re
33import socket
34import subprocess
35import sys
36import tempfile
37import time
38import traceback
39import uuid
40
41import six
42from six.moves import urllib
43
44import python_utils.jobset as jobset
45import python_utils.report_utils as report_utils
46import python_utils.start_port_server as start_port_server
47import python_utils.watch_dirs as watch_dirs
48
49try:
50    from python_utils.upload_test_results import upload_results_to_bq
51except (ImportError):
52    pass  # It's ok to not import because this is only necessary to upload results to BQ.
53
54gcp_utils_dir = os.path.abspath(
55    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
56sys.path.append(gcp_utils_dir)
57
58_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
59os.chdir(_ROOT)
60
61_FORCE_ENVIRON_FOR_WRAPPERS = {
62    'GRPC_VERBOSITY': 'DEBUG',
63}
64
65_POLLING_STRATEGIES = {
66    'linux': ['epoll1', 'poll'],
67    'mac': ['poll'],
68}
69
70
71def platform_string():
72    return jobset.platform_string()
73
74
75_DEFAULT_TIMEOUT_SECONDS = 5 * 60
76_PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
77
78
79def run_shell_command(cmd, env=None, cwd=None):
80    try:
81        subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
82    except subprocess.CalledProcessError as e:
83        logging.exception(
84            "Error while running command '%s'. Exit status %d. Output:\n%s",
85            e.cmd, e.returncode, e.output)
86        raise
87
88
89def max_parallel_tests_for_current_platform():
90    # Too much test parallelization has only been seen to be a problem
91    # so far on windows.
92    if jobset.platform_string() == 'windows':
93        return 64
94    return 1024
95
96
97def _print_debug_info_epilogue(dockerfile_dir=None):
98    """Use to print useful info for debug/repro just before exiting."""
99    print('')
100    print('=== run_tests.py DEBUG INFO ===')
101    print('command: \"%s\"' % ' '.join(sys.argv))
102    if dockerfile_dir:
103        print('dockerfile: %s' % dockerfile_dir)
104    kokoro_job_name = os.getenv('KOKORO_JOB_NAME')
105    if kokoro_job_name:
106        print('kokoro job name: %s' % kokoro_job_name)
107    print('===============================')
108
109
110# SimpleConfig: just compile with CONFIG=config, and run the binary to test
111class Config(object):
112
113    def __init__(self,
114                 config,
115                 environ=None,
116                 timeout_multiplier=1,
117                 tool_prefix=[],
118                 iomgr_platform='native'):
119        if environ is None:
120            environ = {}
121        self.build_config = config
122        self.environ = environ
123        self.environ['CONFIG'] = config
124        self.tool_prefix = tool_prefix
125        self.timeout_multiplier = timeout_multiplier
126        self.iomgr_platform = iomgr_platform
127
128    def job_spec(self,
129                 cmdline,
130                 timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
131                 shortname=None,
132                 environ={},
133                 cpu_cost=1.0,
134                 flaky=False):
135        """Construct a jobset.JobSpec for a test under this config
136
137       Args:
138         cmdline:      a list of strings specifying the command line the test
139                       would like to run
140    """
141        actual_environ = self.environ.copy()
142        for k, v in environ.items():
143            actual_environ[k] = v
144        if not flaky and shortname and shortname in flaky_tests:
145            flaky = True
146        if shortname in shortname_to_cpu:
147            cpu_cost = shortname_to_cpu[shortname]
148        return jobset.JobSpec(
149            cmdline=self.tool_prefix + cmdline,
150            shortname=shortname,
151            environ=actual_environ,
152            cpu_cost=cpu_cost,
153            timeout_seconds=(self.timeout_multiplier *
154                             timeout_seconds if timeout_seconds else None),
155            flake_retries=4 if flaky or args.allow_flakes else 0,
156            timeout_retries=1 if flaky or args.allow_flakes else 0)
157
158
159def get_c_tests(travis, test_lang):
160    out = []
161    platforms_str = 'ci_platforms' if travis else 'platforms'
162    with open('tools/run_tests/generated/tests.json') as f:
163        js = json.load(f)
164        return [
165            tgt for tgt in js
166            if tgt['language'] == test_lang and platform_string() in
167            tgt[platforms_str] and not (travis and tgt['flaky'])
168        ]
169
170
171def _check_compiler(compiler, supported_compilers):
172    if compiler not in supported_compilers:
173        raise Exception('Compiler %s not supported (on this platform).' %
174                        compiler)
175
176
177def _check_arch(arch, supported_archs):
178    if arch not in supported_archs:
179        raise Exception('Architecture %s not supported.' % arch)
180
181
182def _is_use_docker_child():
183    """Returns True if running running as a --use_docker child."""
184    return True if os.getenv('DOCKER_RUN_SCRIPT_COMMAND') else False
185
186
187_PythonConfigVars = collections.namedtuple('_ConfigVars', [
188    'shell',
189    'builder',
190    'builder_prefix_arguments',
191    'venv_relative_python',
192    'toolchain',
193    'runner',
194])
195
196
197def _python_config_generator(name, major, minor, bits, config_vars):
198    build = (config_vars.shell + config_vars.builder +
199             config_vars.builder_prefix_arguments +
200             [_python_pattern_function(major=major, minor=minor, bits=bits)] +
201             [name] + config_vars.venv_relative_python + config_vars.toolchain)
202    run = (config_vars.shell + config_vars.runner + [
203        os.path.join(name, config_vars.venv_relative_python[0]),
204    ])
205    return PythonConfig(name, build, run)
206
207
208def _pypy_config_generator(name, major, config_vars):
209    return PythonConfig(
210        name, config_vars.shell + config_vars.builder +
211        config_vars.builder_prefix_arguments +
212        [_pypy_pattern_function(major=major)] + [name] +
213        config_vars.venv_relative_python + config_vars.toolchain,
214        config_vars.shell + config_vars.runner +
215        [os.path.join(name, config_vars.venv_relative_python[0])])
216
217
218def _python_pattern_function(major, minor, bits):
219    # Bit-ness is handled by the test machine's environment
220    if os.name == "nt":
221        if bits == "64":
222            return '/c/Python{major}{minor}/python.exe'.format(major=major,
223                                                               minor=minor,
224                                                               bits=bits)
225        else:
226            return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
227                major=major, minor=minor, bits=bits)
228    else:
229        return 'python{major}.{minor}'.format(major=major, minor=minor)
230
231
232def _pypy_pattern_function(major):
233    if major == '2':
234        return 'pypy'
235    elif major == '3':
236        return 'pypy3'
237    else:
238        raise ValueError("Unknown PyPy major version")
239
240
241class CLanguage(object):
242
243    def __init__(self, lang_suffix, test_lang):
244        self.lang_suffix = lang_suffix
245        self.platform = platform_string()
246        self.test_lang = test_lang
247
248    def configure(self, config, args):
249        self.config = config
250        self.args = args
251        if self.platform == 'windows':
252            _check_compiler(self.args.compiler, [
253                'default',
254                'cmake',
255                'cmake_ninja_vs2019',
256                'cmake_vs2019',
257            ])
258            _check_arch(self.args.arch, ['default', 'x64', 'x86'])
259
260            activate_vs_tools = ''
261            if self.args.compiler == 'cmake_ninja_vs2019' or self.args.compiler == 'cmake' or self.args.compiler == 'default':
262                # cmake + ninja build is the default because it is faster and supports boringssl assembly optimizations
263                # the compiler used is exactly the same as for cmake_vs2017
264                cmake_generator = 'Ninja'
265                activate_vs_tools = '2019'
266            elif self.args.compiler == 'cmake_vs2019':
267                cmake_generator = 'Visual Studio 16 2019'
268            else:
269                print('should never reach here.')
270                sys.exit(1)
271
272            self._cmake_configure_extra_args = []
273            self._cmake_generator_windows = cmake_generator
274            # required to pass as cmake "-A" configuration for VS builds (but not for Ninja)
275            self._cmake_architecture_windows = 'x64' if self.args.arch == 'x64' else 'Win32'
276            # when builing with Ninja, the VS common tools need to be activated first
277            self._activate_vs_tools_windows = activate_vs_tools
278            # "x64_x86" means create 32bit binaries, but use 64bit toolkit to secure more memory for the build
279            self._vs_tools_architecture_windows = 'x64' if self.args.arch == 'x64' else 'x64_x86'
280
281        else:
282            if self.platform == 'linux':
283                # Allow all the known architectures. _check_arch_option has already checked that we're not doing
284                # something illegal when not running under docker.
285                _check_arch(self.args.arch, ['default', 'x64', 'x86', 'arm64'])
286            else:
287                _check_arch(self.args.arch, ['default'])
288
289            self._docker_distro, self._cmake_configure_extra_args = self._compiler_options(
290                self.args.use_docker, self.args.compiler)
291
292            if self.args.arch == 'x86':
293                # disable boringssl asm optimizations when on x86
294                # see https://github.com/grpc/grpc/blob/b5b8578b3f8b4a9ce61ed6677e19d546e43c5c68/tools/run_tests/artifacts/artifact_targets.py#L253
295                self._cmake_configure_extra_args.append('-DOPENSSL_NO_ASM=ON')
296
297    def test_specs(self):
298        out = []
299        binaries = get_c_tests(self.args.travis, self.test_lang)
300        for target in binaries:
301            if target.get('boringssl', False):
302                # cmake doesn't build boringssl tests
303                continue
304            auto_timeout_scaling = target.get('auto_timeout_scaling', True)
305            polling_strategies = (_POLLING_STRATEGIES.get(
306                self.platform, ['all']) if target.get('uses_polling', True) else
307                                  ['none'])
308            for polling_strategy in polling_strategies:
309                env = {
310                    'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
311                        _ROOT + '/src/core/tsi/test_creds/ca.pem',
312                    'GRPC_POLL_STRATEGY':
313                        polling_strategy,
314                    'GRPC_VERBOSITY':
315                        'DEBUG'
316                }
317                resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
318                if resolver:
319                    env['GRPC_DNS_RESOLVER'] = resolver
320                shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
321                if polling_strategy in target.get('excluded_poll_engines', []):
322                    continue
323
324                timeout_scaling = 1
325                if auto_timeout_scaling:
326                    config = self.args.config
327                    if ('asan' in config or config == 'msan' or
328                            config == 'tsan' or config == 'ubsan' or
329                            config == 'helgrind' or config == 'memcheck'):
330                        # Scale overall test timeout if running under various sanitizers.
331                        # scaling value is based on historical data analysis
332                        timeout_scaling *= 3
333
334                if self.config.build_config in target['exclude_configs']:
335                    continue
336                if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
337                    continue
338
339                if self.platform == 'windows':
340                    if self._cmake_generator_windows == 'Ninja':
341                        binary = 'cmake/build/%s.exe' % target['name']
342                    else:
343                        binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[
344                            self.config.build_config], target['name'])
345                else:
346                    binary = 'cmake/build/%s' % target['name']
347
348                cpu_cost = target['cpu_cost']
349                if cpu_cost == 'capacity':
350                    cpu_cost = multiprocessing.cpu_count()
351                if os.path.isfile(binary):
352                    list_test_command = None
353                    filter_test_command = None
354
355                    # these are the flag defined by gtest and benchmark framework to list
356                    # and filter test runs. We use them to split each individual test
357                    # into its own JobSpec, and thus into its own process.
358                    if 'benchmark' in target and target['benchmark']:
359                        with open(os.devnull, 'w') as fnull:
360                            tests = subprocess.check_output(
361                                [binary, '--benchmark_list_tests'],
362                                stderr=fnull)
363                        for line in tests.decode().split('\n'):
364                            test = line.strip()
365                            if not test:
366                                continue
367                            cmdline = [binary,
368                                       '--benchmark_filter=%s$' % test
369                                      ] + target['args']
370                            out.append(
371                                self.config.job_spec(
372                                    cmdline,
373                                    shortname='%s %s' %
374                                    (' '.join(cmdline), shortname_ext),
375                                    cpu_cost=cpu_cost,
376                                    timeout_seconds=target.get(
377                                        'timeout_seconds',
378                                        _DEFAULT_TIMEOUT_SECONDS) *
379                                    timeout_scaling,
380                                    environ=env))
381                    elif 'gtest' in target and target['gtest']:
382                        # here we parse the output of --gtest_list_tests to build up a complete
383                        # list of the tests contained in a binary for each test, we then
384                        # add a job to run, filtering for just that test.
385                        with open(os.devnull, 'w') as fnull:
386                            tests = subprocess.check_output(
387                                [binary, '--gtest_list_tests'], stderr=fnull)
388                        base = None
389                        for line in tests.decode().split('\n'):
390                            i = line.find('#')
391                            if i >= 0:
392                                line = line[:i]
393                            if not line:
394                                continue
395                            if line[0] != ' ':
396                                base = line.strip()
397                            else:
398                                assert base is not None
399                                assert line[1] == ' '
400                                test = base + line.strip()
401                                cmdline = [binary,
402                                           '--gtest_filter=%s' % test
403                                          ] + target['args']
404                                out.append(
405                                    self.config.job_spec(
406                                        cmdline,
407                                        shortname='%s %s' %
408                                        (' '.join(cmdline), shortname_ext),
409                                        cpu_cost=cpu_cost,
410                                        timeout_seconds=target.get(
411                                            'timeout_seconds',
412                                            _DEFAULT_TIMEOUT_SECONDS) *
413                                        timeout_scaling,
414                                        environ=env))
415                    else:
416                        cmdline = [binary] + target['args']
417                        shortname = target.get(
418                            'shortname',
419                            ' '.join(pipes.quote(arg) for arg in cmdline))
420                        shortname += shortname_ext
421                        out.append(
422                            self.config.job_spec(
423                                cmdline,
424                                shortname=shortname,
425                                cpu_cost=cpu_cost,
426                                flaky=target.get('flaky', False),
427                                timeout_seconds=target.get(
428                                    'timeout_seconds',
429                                    _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
430                                environ=env))
431                elif self.args.regex == '.*' or self.platform == 'windows':
432                    print('\nWARNING: binary not found, skipping', binary)
433        return sorted(out)
434
435    def pre_build_steps(self):
436        return []
437
438    def build_steps(self):
439        if self.platform == 'windows':
440            return [[
441                'tools\\run_tests\\helper_scripts\\build_cxx.bat',
442                '-DgRPC_BUILD_MSVC_MP_COUNT=%d' % self.args.jobs
443            ] + self._cmake_configure_extra_args]
444        else:
445            return [['tools/run_tests/helper_scripts/build_cxx.sh'] +
446                    self._cmake_configure_extra_args]
447
448    def build_steps_environ(self):
449        """Extra environment variables set for pre_build_steps and build_steps jobs."""
450        environ = {'GRPC_RUN_TESTS_CXX_LANGUAGE_SUFFIX': self.lang_suffix}
451        if self.platform == 'windows':
452            environ['GRPC_CMAKE_GENERATOR'] = self._cmake_generator_windows
453            environ[
454                'GRPC_CMAKE_ARCHITECTURE'] = self._cmake_architecture_windows
455            environ[
456                'GRPC_BUILD_ACTIVATE_VS_TOOLS'] = self._activate_vs_tools_windows
457            environ[
458                'GRPC_BUILD_VS_TOOLS_ARCHITECTURE'] = self._vs_tools_architecture_windows
459        return environ
460
461    def post_tests_steps(self):
462        if self.platform == 'windows':
463            return []
464        else:
465            return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
466
467    def _clang_cmake_configure_extra_args(self, version_suffix=''):
468        return [
469            '-DCMAKE_C_COMPILER=clang%s' % version_suffix,
470            '-DCMAKE_CXX_COMPILER=clang++%s' % version_suffix,
471        ]
472
473    def _compiler_options(self, use_docker, compiler):
474        """Returns docker distro and cmake configure args to use for given compiler."""
475        if not use_docker and not _is_use_docker_child():
476            # if not running under docker, we cannot ensure the right compiler version will be used,
477            # so we only allow the non-specific choices.
478            _check_compiler(compiler, ['default', 'cmake'])
479
480        if compiler == 'default' or compiler == 'cmake':
481            return ('debian11', [])
482        elif compiler == 'gcc7':
483            return ('gcc_7', [])
484        elif compiler == 'gcc10.2':
485            return ('debian11', [])
486        elif compiler == 'gcc10.2_openssl102':
487            return ('debian11_openssl102', [
488                "-DgRPC_SSL_PROVIDER=package",
489            ])
490        elif compiler == 'gcc12':
491            return ('gcc_12', ["-DCMAKE_CXX_STANDARD=20"])
492        elif compiler == 'gcc_musl':
493            return ('alpine', [])
494        elif compiler == 'clang6':
495            return ('clang_6', self._clang_cmake_configure_extra_args())
496        elif compiler == 'clang15':
497            return ('clang_15', self._clang_cmake_configure_extra_args())
498        else:
499            raise Exception('Compiler %s not supported.' % compiler)
500
501    def dockerfile_dir(self):
502        return 'tools/dockerfile/test/cxx_%s_%s' % (
503            self._docker_distro, _docker_arch_suffix(self.args.arch))
504
505    def __str__(self):
506        return self.lang_suffix
507
508
509# This tests Node on grpc/grpc-node and will become the standard for Node testing
510class RemoteNodeLanguage(object):
511
512    def __init__(self):
513        self.platform = platform_string()
514
515    def configure(self, config, args):
516        self.config = config
517        self.args = args
518        # Note: electron ABI only depends on major and minor version, so that's all
519        # we should specify in the compiler argument
520        _check_compiler(self.args.compiler, [
521            'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
522            'electron1.3', 'electron1.6'
523        ])
524        if self.args.compiler == 'default':
525            self.runtime = 'node'
526            self.node_version = '8'
527        else:
528            if self.args.compiler.startswith('electron'):
529                self.runtime = 'electron'
530                self.node_version = self.args.compiler[8:]
531            else:
532                self.runtime = 'node'
533                # Take off the word "node"
534                self.node_version = self.args.compiler[4:]
535
536    # TODO: update with Windows/electron scripts when available for grpc/grpc-node
537    def test_specs(self):
538        if self.platform == 'windows':
539            return [
540                self.config.job_spec(
541                    ['tools\\run_tests\\helper_scripts\\run_node.bat'])
542            ]
543        else:
544            return [
545                self.config.job_spec(
546                    ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
547                    None,
548                    environ=_FORCE_ENVIRON_FOR_WRAPPERS)
549            ]
550
551    def pre_build_steps(self):
552        return []
553
554    def build_steps(self):
555        return []
556
557    def build_steps_environ(self):
558        """Extra environment variables set for pre_build_steps and build_steps jobs."""
559        return {}
560
561    def post_tests_steps(self):
562        return []
563
564    def dockerfile_dir(self):
565        return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
566            self.args.arch)
567
568    def __str__(self):
569        return 'grpc-node'
570
571
572class Php7Language(object):
573
574    def configure(self, config, args):
575        self.config = config
576        self.args = args
577        _check_compiler(self.args.compiler, ['default'])
578
579    def test_specs(self):
580        return [
581            self.config.job_spec(['src/php/bin/run_tests.sh'],
582                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
583        ]
584
585    def pre_build_steps(self):
586        return []
587
588    def build_steps(self):
589        return [['tools/run_tests/helper_scripts/build_php.sh']]
590
591    def build_steps_environ(self):
592        """Extra environment variables set for pre_build_steps and build_steps jobs."""
593        return {}
594
595    def post_tests_steps(self):
596        return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
597
598    def dockerfile_dir(self):
599        return 'tools/dockerfile/test/php7_debian11_%s' % _docker_arch_suffix(
600            self.args.arch)
601
602    def __str__(self):
603        return 'php7'
604
605
606class PythonConfig(
607        collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
608    """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
609
610
611class PythonLanguage(object):
612
613    _TEST_SPECS_FILE = {
614        'native': ['src/python/grpcio_tests/tests/tests.json'],
615        'gevent': [
616            'src/python/grpcio_tests/tests/tests.json',
617            'src/python/grpcio_tests/tests_gevent/tests.json',
618        ],
619        'asyncio': ['src/python/grpcio_tests/tests_aio/tests.json'],
620    }
621
622    _TEST_COMMAND = {
623        'native': 'test_lite',
624        'gevent': 'test_gevent',
625        'asyncio': 'test_aio',
626    }
627
628    def configure(self, config, args):
629        self.config = config
630        self.args = args
631        self.pythons = self._get_pythons(self.args)
632
633    def test_specs(self):
634        # load list of known test suites
635        jobs = []
636        for io_platform in self._TEST_SPECS_FILE:
637            test_cases = []
638            for tests_json_file_name in self._TEST_SPECS_FILE[io_platform]:
639                with open(tests_json_file_name) as tests_json_file:
640                    test_cases.extend(json.load(tests_json_file))
641
642            environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
643            # TODO(https://github.com/grpc/grpc/issues/21401) Fork handlers is not
644            # designed for non-native IO manager. It has a side-effect that
645            # overrides threading settings in C-Core.
646            if io_platform != 'native':
647                environment['GRPC_ENABLE_FORK_SUPPORT'] = '0'
648            for python_config in self.pythons:
649                jobs.extend([
650                    self.config.job_spec(
651                        python_config.run + [self._TEST_COMMAND[io_platform]],
652                        timeout_seconds=8 * 60,
653                        environ=dict(
654                            GRPC_PYTHON_TESTRUNNER_FILTER=str(test_case),
655                            **environment),
656                        shortname='%s.%s.%s' %
657                        (python_config.name, io_platform, test_case),
658                    ) for test_case in test_cases
659                ])
660        return jobs
661
662    def pre_build_steps(self):
663        return []
664
665    def build_steps(self):
666        return [config.build for config in self.pythons]
667
668    def build_steps_environ(self):
669        """Extra environment variables set for pre_build_steps and build_steps jobs."""
670        return {}
671
672    def post_tests_steps(self):
673        if self.config.build_config != 'gcov':
674            return []
675        else:
676            return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
677
678    def dockerfile_dir(self):
679        return 'tools/dockerfile/test/python_%s_%s' % (
680            self._python_docker_distro_name(),
681            _docker_arch_suffix(self.args.arch))
682
683    def _python_docker_distro_name(self):
684        """Choose the docker image to use based on python version."""
685        if self.args.compiler == 'python_alpine':
686            return 'alpine'
687        else:
688            return 'debian11_default'
689
690    def _get_pythons(self, args):
691        """Get python runtimes to test with, based on current platform, architecture, compiler etc."""
692        if args.iomgr_platform != 'native':
693            raise ValueError(
694                'Python builds no longer differentiate IO Manager platforms, please use "native"'
695            )
696
697        if args.arch == 'x86':
698            bits = '32'
699        else:
700            bits = '64'
701
702        if os.name == 'nt':
703            shell = ['bash']
704            builder = [
705                os.path.abspath(
706                    'tools/run_tests/helper_scripts/build_python_msys2.sh')
707            ]
708            builder_prefix_arguments = ['MINGW{}'.format(bits)]
709            venv_relative_python = ['Scripts/python.exe']
710            toolchain = ['mingw32']
711        else:
712            shell = []
713            builder = [
714                os.path.abspath(
715                    'tools/run_tests/helper_scripts/build_python.sh')
716            ]
717            builder_prefix_arguments = []
718            venv_relative_python = ['bin/python']
719            toolchain = ['unix']
720
721        runner = [
722            os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
723        ]
724
725        config_vars = _PythonConfigVars(shell, builder,
726                                        builder_prefix_arguments,
727                                        venv_relative_python, toolchain, runner)
728        python37_config = _python_config_generator(name='py37',
729                                                   major='3',
730                                                   minor='7',
731                                                   bits=bits,
732                                                   config_vars=config_vars)
733        python38_config = _python_config_generator(name='py38',
734                                                   major='3',
735                                                   minor='8',
736                                                   bits=bits,
737                                                   config_vars=config_vars)
738        python39_config = _python_config_generator(name='py39',
739                                                   major='3',
740                                                   minor='9',
741                                                   bits=bits,
742                                                   config_vars=config_vars)
743        python310_config = _python_config_generator(name='py310',
744                                                    major='3',
745                                                    minor='10',
746                                                    bits=bits,
747                                                    config_vars=config_vars)
748        pypy27_config = _pypy_config_generator(name='pypy',
749                                               major='2',
750                                               config_vars=config_vars)
751        pypy32_config = _pypy_config_generator(name='pypy3',
752                                               major='3',
753                                               config_vars=config_vars)
754
755        if args.compiler == 'default':
756            if os.name == 'nt':
757                return (python38_config,)
758            elif os.uname()[0] == 'Darwin':
759                # NOTE(rbellevi): Testing takes significantly longer on
760                # MacOS, so we restrict the number of interpreter versions
761                # tested.
762                return (python38_config,)
763            elif platform.machine() == 'aarch64':
764                # Currently the python_debian11_default_arm64 docker image
765                # only has python3.9 installed (and that seems sufficient
766                # for arm64 testing)
767                return (python39_config,)
768            else:
769                return (
770                    python37_config,
771                    python38_config,
772                )
773        elif args.compiler == 'python3.7':
774            return (python37_config,)
775        elif args.compiler == 'python3.8':
776            return (python38_config,)
777        elif args.compiler == 'python3.9':
778            return (python39_config,)
779        elif args.compiler == 'python3.10':
780            return (python310_config,)
781        elif args.compiler == 'pypy':
782            return (pypy27_config,)
783        elif args.compiler == 'pypy3':
784            return (pypy32_config,)
785        elif args.compiler == 'python_alpine':
786            return (python39_config,)
787        elif args.compiler == 'all_the_cpythons':
788            return (
789                python37_config,
790                python38_config,
791                python39_config,
792                python310_config,
793            )
794        else:
795            raise Exception('Compiler %s not supported.' % args.compiler)
796
797    def __str__(self):
798        return 'python'
799
800
801class RubyLanguage(object):
802
803    def configure(self, config, args):
804        self.config = config
805        self.args = args
806        _check_compiler(self.args.compiler, ['default'])
807
808    def test_specs(self):
809        tests = [
810            self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
811                                 timeout_seconds=10 * 60,
812                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
813        ]
814        # TODO(apolcyn): re-enable the following tests after
815        # https://bugs.ruby-lang.org/issues/15499 is fixed:
816        # They previously worked on ruby 2.5 but needed to be disabled
817        # after dropping support for ruby 2.5:
818        #   - src/ruby/end2end/channel_state_test.rb
819        #   - src/ruby/end2end/sig_int_during_channel_watch_test.rb
820        # TODO(apolcyn): the following test is skipped because it sometimes
821        # hits "Bus Error" crashes while requiring the grpc/ruby C-extension.
822        # This crashes have been unreproducible outside of CI. Also see
823        # b/266212253.
824        #   - src/ruby/end2end/grpc_class_init_test.rb
825        for test in [
826                'src/ruby/end2end/sig_handling_test.rb',
827                'src/ruby/end2end/channel_closing_test.rb',
828                'src/ruby/end2end/killed_client_thread_test.rb',
829                'src/ruby/end2end/forking_client_test.rb',
830                'src/ruby/end2end/multiple_killed_watching_threads_test.rb',
831                'src/ruby/end2end/load_grpc_with_gc_stress_test.rb',
832                'src/ruby/end2end/client_memory_usage_test.rb',
833                'src/ruby/end2end/package_with_underscore_test.rb',
834                'src/ruby/end2end/graceful_sig_handling_test.rb',
835                'src/ruby/end2end/graceful_sig_stop_test.rb',
836                'src/ruby/end2end/errors_load_before_grpc_lib_test.rb',
837                'src/ruby/end2end/logger_load_before_grpc_lib_test.rb',
838                'src/ruby/end2end/status_codes_load_before_grpc_lib_test.rb',
839                'src/ruby/end2end/call_credentials_timeout_test.rb',
840                'src/ruby/end2end/call_credentials_returning_bad_metadata_doesnt_kill_background_thread_test.rb'
841        ]:
842            tests.append(
843                self.config.job_spec(['ruby', test],
844                                     shortname=test,
845                                     timeout_seconds=20 * 60,
846                                     environ=_FORCE_ENVIRON_FOR_WRAPPERS))
847        return tests
848
849    def pre_build_steps(self):
850        return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
851
852    def build_steps(self):
853        return [['tools/run_tests/helper_scripts/build_ruby.sh']]
854
855    def build_steps_environ(self):
856        """Extra environment variables set for pre_build_steps and build_steps jobs."""
857        return {}
858
859    def post_tests_steps(self):
860        return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
861
862    def dockerfile_dir(self):
863        return 'tools/dockerfile/test/ruby_debian11_%s' % _docker_arch_suffix(
864            self.args.arch)
865
866    def __str__(self):
867        return 'ruby'
868
869
870class CSharpLanguage(object):
871
872    def __init__(self):
873        self.platform = platform_string()
874
875    def configure(self, config, args):
876        self.config = config
877        self.args = args
878        _check_compiler(self.args.compiler, ['default', 'coreclr', 'mono'])
879        if self.args.compiler == 'default':
880            # test both runtimes by default
881            self.test_runtimes = ['coreclr', 'mono']
882        else:
883            # only test the specified runtime
884            self.test_runtimes = [self.args.compiler]
885
886        if self.platform == 'windows':
887            _check_arch(self.args.arch, ['default'])
888            self._cmake_arch_option = 'x64'
889        else:
890            self._docker_distro = 'debian11'
891
892    def test_specs(self):
893        with open('src/csharp/tests.json') as f:
894            tests_by_assembly = json.load(f)
895
896        msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
897        nunit_args = ['--labels=All', '--noresult', '--workers=1']
898
899        specs = []
900        for test_runtime in self.test_runtimes:
901            if test_runtime == 'coreclr':
902                assembly_extension = '.dll'
903                assembly_subdir = 'bin/%s/netcoreapp3.1' % msbuild_config
904                runtime_cmd = ['dotnet', 'exec']
905            elif test_runtime == 'mono':
906                assembly_extension = '.exe'
907                assembly_subdir = 'bin/%s/net45' % msbuild_config
908                if self.platform == 'windows':
909                    runtime_cmd = []
910                elif self.platform == 'mac':
911                    # mono before version 5.2 on MacOS defaults to 32bit runtime
912                    runtime_cmd = ['mono', '--arch=64']
913                else:
914                    runtime_cmd = ['mono']
915            else:
916                raise Exception('Illegal runtime "%s" was specified.')
917
918            for assembly in six.iterkeys(tests_by_assembly):
919                assembly_file = 'src/csharp/%s/%s/%s%s' % (
920                    assembly, assembly_subdir, assembly, assembly_extension)
921
922                # normally, run each test as a separate process
923                for test in tests_by_assembly[assembly]:
924                    cmdline = runtime_cmd + [assembly_file,
925                                             '--test=%s' % test] + nunit_args
926                    specs.append(
927                        self.config.job_spec(
928                            cmdline,
929                            shortname='csharp.%s.%s' % (test_runtime, test),
930                            environ=_FORCE_ENVIRON_FOR_WRAPPERS))
931        return specs
932
933    def pre_build_steps(self):
934        if self.platform == 'windows':
935            return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat']]
936        else:
937            return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
938
939    def build_steps(self):
940        if self.platform == 'windows':
941            return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
942        else:
943            return [['tools/run_tests/helper_scripts/build_csharp.sh']]
944
945    def build_steps_environ(self):
946        """Extra environment variables set for pre_build_steps and build_steps jobs."""
947        if self.platform == 'windows':
948            return {'ARCHITECTURE': self._cmake_arch_option}
949        else:
950            return {}
951
952    def post_tests_steps(self):
953        if self.platform == 'windows':
954            return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
955        else:
956            return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
957
958    def dockerfile_dir(self):
959        return 'tools/dockerfile/test/csharp_%s_%s' % (
960            self._docker_distro, _docker_arch_suffix(self.args.arch))
961
962    def __str__(self):
963        return 'csharp'
964
965
966class ObjCLanguage(object):
967
968    def configure(self, config, args):
969        self.config = config
970        self.args = args
971        _check_compiler(self.args.compiler, ['default'])
972
973    def test_specs(self):
974        out = []
975        out.append(
976            self.config.job_spec(
977                ['src/objective-c/tests/build_one_example.sh'],
978                timeout_seconds=20 * 60,
979                shortname='ios-buildtest-example-sample',
980                cpu_cost=1e6,
981                environ={
982                    'SCHEME': 'Sample',
983                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
984                }))
985        # TODO(jtattermusch): Create bazel target for the sample and remove the test task from here.
986        out.append(
987            self.config.job_spec(
988                ['src/objective-c/tests/build_one_example.sh'],
989                timeout_seconds=20 * 60,
990                shortname='ios-buildtest-example-switftsample',
991                cpu_cost=1e6,
992                environ={
993                    'SCHEME': 'SwiftSample',
994                    'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
995                }))
996        # Disabled due to #20258
997        # TODO (mxyan): Reenable this test when #20258 is resolved.
998        # out.append(
999        #     self.config.job_spec(
1000        #         ['src/objective-c/tests/build_one_example_bazel.sh'],
1001        #         timeout_seconds=20 * 60,
1002        #         shortname='ios-buildtest-example-watchOS-sample',
1003        #         cpu_cost=1e6,
1004        #         environ={
1005        #             'SCHEME': 'watchOS-sample-WatchKit-App',
1006        #             'EXAMPLE_PATH': 'src/objective-c/examples/watchOS-sample',
1007        #             'FRAMEWORKS': 'NO'
1008        #         }))
1009
1010        # TODO(jtattermusch): move the test out of the test/core/iomgr/CFStreamTests directory?
1011        # How does one add the cfstream dependency in bazel?
1012        out.append(
1013            self.config.job_spec(
1014                ['test/core/iomgr/ios/CFStreamTests/build_and_run_tests.sh'],
1015                timeout_seconds=60 * 60,
1016                shortname='ios-test-cfstream-tests',
1017                cpu_cost=1e6,
1018                environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1019        return sorted(out)
1020
1021    def pre_build_steps(self):
1022        return []
1023
1024    def build_steps(self):
1025        return []
1026
1027    def build_steps_environ(self):
1028        """Extra environment variables set for pre_build_steps and build_steps jobs."""
1029        return {}
1030
1031    def post_tests_steps(self):
1032        return []
1033
1034    def dockerfile_dir(self):
1035        return None
1036
1037    def __str__(self):
1038        return 'objc'
1039
1040
1041class Sanity(object):
1042
1043    def __init__(self, config_file):
1044        self.config_file = config_file
1045
1046    def configure(self, config, args):
1047        self.config = config
1048        self.args = args
1049        _check_compiler(self.args.compiler, ['default'])
1050
1051    def test_specs(self):
1052        import yaml
1053        with open('tools/run_tests/sanity/%s' % self.config_file, 'r') as f:
1054            environ = {'TEST': 'true'}
1055            if _is_use_docker_child():
1056                environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
1057                environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
1058                environ['IWYU_SKIP_DOCKER'] = 'true'
1059                # sanity tests run tools/bazel wrapper concurrently
1060                # and that can result in a download/run race in the wrapper.
1061                # under docker we already have the right version of bazel
1062                # so we can just disable the wrapper.
1063                environ['DISABLE_BAZEL_WRAPPER'] = 'true'
1064            return [
1065                self.config.job_spec(cmd['script'].split(),
1066                                     timeout_seconds=45 * 60,
1067                                     environ=environ,
1068                                     cpu_cost=cmd.get('cpu_cost', 1))
1069                for cmd in yaml.safe_load(f)
1070            ]
1071
1072    def pre_build_steps(self):
1073        return []
1074
1075    def build_steps(self):
1076        return []
1077
1078    def build_steps_environ(self):
1079        """Extra environment variables set for pre_build_steps and build_steps jobs."""
1080        return {}
1081
1082    def post_tests_steps(self):
1083        return []
1084
1085    def dockerfile_dir(self):
1086        return 'tools/dockerfile/test/sanity'
1087
1088    def __str__(self):
1089        return 'sanity'
1090
1091
1092# different configurations we can run under
1093with open('tools/run_tests/generated/configs.json') as f:
1094    _CONFIGS = dict(
1095        (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
1096
1097_LANGUAGES = {
1098    'c++': CLanguage('cxx', 'c++'),
1099    'c': CLanguage('c', 'c'),
1100    'grpc-node': RemoteNodeLanguage(),
1101    'php7': Php7Language(),
1102    'python': PythonLanguage(),
1103    'ruby': RubyLanguage(),
1104    'csharp': CSharpLanguage(),
1105    'objc': ObjCLanguage(),
1106    'sanity': Sanity('sanity_tests.yaml'),
1107    'clang-tidy': Sanity('clang_tidy_tests.yaml'),
1108    'iwyu': Sanity('iwyu_tests.yaml'),
1109}
1110
1111_MSBUILD_CONFIG = {
1112    'dbg': 'Debug',
1113    'opt': 'Release',
1114    'gcov': 'Debug',
1115}
1116
1117
1118def _build_step_environ(cfg, extra_env={}):
1119    """Environment variables set for each build step."""
1120    environ = {'CONFIG': cfg, 'GRPC_RUN_TESTS_JOBS': str(args.jobs)}
1121    msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
1122    if msbuild_cfg:
1123        environ['MSBUILD_CONFIG'] = msbuild_cfg
1124    environ.update(extra_env)
1125    return environ
1126
1127
1128def _windows_arch_option(arch):
1129    """Returns msbuild cmdline option for selected architecture."""
1130    if arch == 'default' or arch == 'x86':
1131        return '/p:Platform=Win32'
1132    elif arch == 'x64':
1133        return '/p:Platform=x64'
1134    else:
1135        print('Architecture %s not supported.' % arch)
1136        sys.exit(1)
1137
1138
1139def _check_arch_option(arch):
1140    """Checks that architecture option is valid."""
1141    if platform_string() == 'windows':
1142        _windows_arch_option(arch)
1143    elif platform_string() == 'linux':
1144        # On linux, we need to be running under docker with the right architecture.
1145        runtime_machine = platform.machine()
1146        runtime_arch = platform.architecture()[0]
1147        if arch == 'default':
1148            return
1149        elif runtime_machine == 'x86_64' and runtime_arch == '64bit' and arch == 'x64':
1150            return
1151        elif runtime_machine == 'x86_64' and runtime_arch == '32bit' and arch == 'x86':
1152            return
1153        elif runtime_machine == 'aarch64' and runtime_arch == '64bit' and arch == 'arm64':
1154            return
1155        else:
1156            print(
1157                'Architecture %s does not match current runtime architecture.' %
1158                arch)
1159            sys.exit(1)
1160    else:
1161        if args.arch != 'default':
1162            print('Architecture %s not supported on current platform.' %
1163                  args.arch)
1164            sys.exit(1)
1165
1166
1167def _docker_arch_suffix(arch):
1168    """Returns suffix to dockerfile dir to use."""
1169    if arch == 'default' or arch == 'x64':
1170        return 'x64'
1171    elif arch == 'x86':
1172        return 'x86'
1173    elif arch == 'arm64':
1174        return 'arm64'
1175    else:
1176        print('Architecture %s not supported with current settings.' % arch)
1177        sys.exit(1)
1178
1179
1180def runs_per_test_type(arg_str):
1181    """Auxiliary function to parse the "runs_per_test" flag.
1182
1183       Returns:
1184           A positive integer or 0, the latter indicating an infinite number of
1185           runs.
1186
1187       Raises:
1188           argparse.ArgumentTypeError: Upon invalid input.
1189    """
1190    if arg_str == 'inf':
1191        return 0
1192    try:
1193        n = int(arg_str)
1194        if n <= 0:
1195            raise ValueError
1196        return n
1197    except:
1198        msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
1199        raise argparse.ArgumentTypeError(msg)
1200
1201
1202def percent_type(arg_str):
1203    pct = float(arg_str)
1204    if pct > 100 or pct < 0:
1205        raise argparse.ArgumentTypeError(
1206            "'%f' is not a valid percentage in the [0, 100] range" % pct)
1207    return pct
1208
1209
1210# This is math.isclose in python >= 3.5
1211def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
1212    return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
1213
1214
1215def _shut_down_legacy_server(legacy_server_port):
1216    """Shut down legacy version of port server."""
1217    try:
1218        version = int(
1219            urllib.request.urlopen('http://localhost:%d/version_number' %
1220                                   legacy_server_port,
1221                                   timeout=10).read())
1222    except:
1223        pass
1224    else:
1225        urllib.request.urlopen('http://localhost:%d/quitquitquit' %
1226                               legacy_server_port).read()
1227
1228
1229def _calculate_num_runs_failures(list_of_results):
1230    """Calculate number of runs and failures for a particular test.
1231
1232  Args:
1233    list_of_results: (List) of JobResult object.
1234  Returns:
1235    A tuple of total number of runs and failures.
1236  """
1237    num_runs = len(list_of_results)  # By default, there is 1 run per JobResult.
1238    num_failures = 0
1239    for jobresult in list_of_results:
1240        if jobresult.retries > 0:
1241            num_runs += jobresult.retries
1242        if jobresult.num_failures > 0:
1243            num_failures += jobresult.num_failures
1244    return num_runs, num_failures
1245
1246
1247class BuildAndRunError(object):
1248    """Represents error type in _build_and_run."""
1249
1250    BUILD = object()
1251    TEST = object()
1252    POST_TEST = object()
1253
1254
1255# returns a list of things that failed (or an empty list on success)
1256def _build_and_run(check_cancelled,
1257                   newline_on_success,
1258                   xml_report=None,
1259                   build_only=False):
1260    """Do one pass of building & running tests."""
1261    # build latest sequentially
1262    num_failures, resultset = jobset.run(build_steps,
1263                                         maxjobs=1,
1264                                         stop_on_failure=True,
1265                                         newline_on_success=newline_on_success,
1266                                         travis=args.travis)
1267    if num_failures:
1268        return [BuildAndRunError.BUILD]
1269
1270    if build_only:
1271        if xml_report:
1272            report_utils.render_junit_xml_report(
1273                resultset, xml_report, suite_name=args.report_suite_name)
1274        return []
1275
1276    # start antagonists
1277    antagonists = [
1278        subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
1279        for _ in range(0, args.antagonists)
1280    ]
1281    start_port_server.start_port_server()
1282    resultset = None
1283    num_test_failures = 0
1284    try:
1285        infinite_runs = runs_per_test == 0
1286        one_run = set(spec for language in languages
1287                      for spec in language.test_specs()
1288                      if (re.search(args.regex, spec.shortname) and
1289                          (args.regex_exclude == '' or
1290                           not re.search(args.regex_exclude, spec.shortname))))
1291        # When running on travis, we want out test runs to be as similar as possible
1292        # for reproducibility purposes.
1293        if args.travis and args.max_time <= 0:
1294            massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
1295        else:
1296            # whereas otherwise, we want to shuffle things up to give all tests a
1297            # chance to run.
1298            massaged_one_run = list(
1299                one_run)  # random.sample needs an indexable seq.
1300            num_jobs = len(massaged_one_run)
1301            # for a random sample, get as many as indicated by the 'sample_percent'
1302            # argument. By default this arg is 100, resulting in a shuffle of all
1303            # jobs.
1304            sample_size = int(num_jobs * args.sample_percent / 100.0)
1305            massaged_one_run = random.sample(massaged_one_run, sample_size)
1306            if not isclose(args.sample_percent, 100.0):
1307                assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
1308                print("Running %d tests out of %d (~%d%%)" %
1309                      (sample_size, num_jobs, args.sample_percent))
1310        if infinite_runs:
1311            assert len(massaged_one_run
1312                      ) > 0, 'Must have at least one test for a -n inf run'
1313        runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
1314                         else itertools.repeat(massaged_one_run, runs_per_test))
1315        all_runs = itertools.chain.from_iterable(runs_sequence)
1316
1317        if args.quiet_success:
1318            jobset.message(
1319                'START',
1320                'Running tests quietly, only failing tests will be reported',
1321                do_newline=True)
1322        num_test_failures, resultset = jobset.run(
1323            all_runs,
1324            check_cancelled,
1325            newline_on_success=newline_on_success,
1326            travis=args.travis,
1327            maxjobs=args.jobs,
1328            maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
1329            stop_on_failure=args.stop_on_failure,
1330            quiet_success=args.quiet_success,
1331            max_time=args.max_time)
1332        if resultset:
1333            for k, v in sorted(resultset.items()):
1334                num_runs, num_failures = _calculate_num_runs_failures(v)
1335                if num_failures > 0:
1336                    if num_failures == num_runs:  # what about infinite_runs???
1337                        jobset.message('FAILED', k, do_newline=True)
1338                    else:
1339                        jobset.message('FLAKE',
1340                                       '%s [%d/%d runs flaked]' %
1341                                       (k, num_failures, num_runs),
1342                                       do_newline=True)
1343    finally:
1344        for antagonist in antagonists:
1345            antagonist.kill()
1346        if args.bq_result_table and resultset:
1347            upload_extra_fields = {
1348                'compiler': args.compiler,
1349                'config': args.config,
1350                'iomgr_platform': args.iomgr_platform,
1351                'language': args.language[
1352                    0
1353                ],  # args.language is a list but will always have one element when uploading to BQ is enabled.
1354                'platform': platform_string()
1355            }
1356            try:
1357                upload_results_to_bq(resultset, args.bq_result_table,
1358                                     upload_extra_fields)
1359            except NameError as e:
1360                logging.warning(
1361                    e)  # It's fine to ignore since this is not critical
1362        if xml_report and resultset:
1363            report_utils.render_junit_xml_report(
1364                resultset,
1365                xml_report,
1366                suite_name=args.report_suite_name,
1367                multi_target=args.report_multi_target)
1368
1369    number_failures, _ = jobset.run(post_tests_steps,
1370                                    maxjobs=1,
1371                                    stop_on_failure=False,
1372                                    newline_on_success=newline_on_success,
1373                                    travis=args.travis)
1374
1375    out = []
1376    if number_failures:
1377        out.append(BuildAndRunError.POST_TEST)
1378    if num_test_failures:
1379        out.append(BuildAndRunError.TEST)
1380
1381    return out
1382
1383
1384# parse command line
1385argp = argparse.ArgumentParser(description='Run grpc tests.')
1386argp.add_argument('-c',
1387                  '--config',
1388                  choices=sorted(_CONFIGS.keys()),
1389                  default='opt')
1390argp.add_argument(
1391    '-n',
1392    '--runs_per_test',
1393    default=1,
1394    type=runs_per_test_type,
1395    help='A positive integer or "inf". If "inf", all tests will run in an '
1396    'infinite loop. Especially useful in combination with "-f"')
1397argp.add_argument('-r', '--regex', default='.*', type=str)
1398argp.add_argument('--regex_exclude', default='', type=str)
1399argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
1400argp.add_argument('-s', '--slowdown', default=1.0, type=float)
1401argp.add_argument('-p',
1402                  '--sample_percent',
1403                  default=100.0,
1404                  type=percent_type,
1405                  help='Run a random sample with that percentage of tests')
1406argp.add_argument(
1407    '-t',
1408    '--travis',
1409    default=False,
1410    action='store_const',
1411    const=True,
1412    help='When set, indicates that the script is running on CI (= not locally).'
1413)
1414argp.add_argument('--newline_on_success',
1415                  default=False,
1416                  action='store_const',
1417                  const=True)
1418argp.add_argument('-l',
1419                  '--language',
1420                  choices=sorted(_LANGUAGES.keys()),
1421                  nargs='+',
1422                  required=True)
1423argp.add_argument('-S',
1424                  '--stop_on_failure',
1425                  default=False,
1426                  action='store_const',
1427                  const=True)
1428argp.add_argument('--use_docker',
1429                  default=False,
1430                  action='store_const',
1431                  const=True,
1432                  help='Run all the tests under docker. That provides ' +
1433                  'additional isolation and prevents the need to install ' +
1434                  'language specific prerequisites. Only available on Linux.')
1435argp.add_argument(
1436    '--allow_flakes',
1437    default=False,
1438    action='store_const',
1439    const=True,
1440    help=
1441    'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
1442)
1443argp.add_argument(
1444    '--arch',
1445    choices=['default', 'x86', 'x64', 'arm64'],
1446    default='default',
1447    help=
1448    'Selects architecture to target. For some platforms "default" is the only supported choice.'
1449)
1450argp.add_argument(
1451    '--compiler',
1452    choices=[
1453        'default',
1454        'gcc7',
1455        'gcc10.2',
1456        'gcc10.2_openssl102',
1457        'gcc12',
1458        'gcc_musl',
1459        'clang6',
1460        'clang15',
1461        'python2.7',
1462        'python3.5',
1463        'python3.7',
1464        'python3.8',
1465        'python3.9',
1466        'pypy',
1467        'pypy3',
1468        'python_alpine',
1469        'all_the_cpythons',
1470        'electron1.3',
1471        'electron1.6',
1472        'coreclr',
1473        'cmake',
1474        'cmake_ninja_vs2019',
1475        'cmake_vs2019',
1476        'mono',
1477    ],
1478    default='default',
1479    help=
1480    'Selects compiler to use. Allowed values depend on the platform and language.'
1481)
1482argp.add_argument('--iomgr_platform',
1483                  choices=['native', 'gevent', 'asyncio'],
1484                  default='native',
1485                  help='Selects iomgr platform to build on')
1486argp.add_argument('--build_only',
1487                  default=False,
1488                  action='store_const',
1489                  const=True,
1490                  help='Perform all the build steps but don\'t run any tests.')
1491argp.add_argument('--measure_cpu_costs',
1492                  default=False,
1493                  action='store_const',
1494                  const=True,
1495                  help='Measure the cpu costs of tests')
1496argp.add_argument('-a', '--antagonists', default=0, type=int)
1497argp.add_argument('-x',
1498                  '--xml_report',
1499                  default=None,
1500                  type=str,
1501                  help='Generates a JUnit-compatible XML report')
1502argp.add_argument('--report_suite_name',
1503                  default='tests',
1504                  type=str,
1505                  help='Test suite name to use in generated JUnit XML report')
1506argp.add_argument(
1507    '--report_multi_target',
1508    default=False,
1509    const=True,
1510    action='store_const',
1511    help='Generate separate XML report for each test job (Looks better in UIs).'
1512)
1513argp.add_argument(
1514    '--quiet_success',
1515    default=False,
1516    action='store_const',
1517    const=True,
1518    help=
1519    'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
1520    + 'Useful when running many iterations of each test (argument -n).')
1521argp.add_argument(
1522    '--force_default_poller',
1523    default=False,
1524    action='store_const',
1525    const=True,
1526    help='Don\'t try to iterate over many polling strategies when they exist')
1527argp.add_argument(
1528    '--force_use_pollers',
1529    default=None,
1530    type=str,
1531    help='Only use the specified comma-delimited list of polling engines. '
1532    'Example: --force_use_pollers epoll1,poll '
1533    ' (This flag has no effect if --force_default_poller flag is also used)')
1534argp.add_argument('--max_time',
1535                  default=-1,
1536                  type=int,
1537                  help='Maximum test runtime in seconds')
1538argp.add_argument('--bq_result_table',
1539                  default='',
1540                  type=str,
1541                  nargs='?',
1542                  help='Upload test results to a specified BQ table.')
1543args = argp.parse_args()
1544
1545flaky_tests = set()
1546shortname_to_cpu = {}
1547
1548if args.force_default_poller:
1549    _POLLING_STRATEGIES = {}
1550elif args.force_use_pollers:
1551    _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
1552
1553jobset.measure_cpu_costs = args.measure_cpu_costs
1554
1555# grab config
1556run_config = _CONFIGS[args.config]
1557build_config = run_config.build_config
1558
1559# TODO(jtattermusch): is this setting applied/being used?
1560if args.travis:
1561    _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
1562
1563languages = set(_LANGUAGES[l] for l in args.language)
1564for l in languages:
1565    l.configure(run_config, args)
1566
1567if len(languages) != 1:
1568    print('Building multiple languages simultaneously is not supported!')
1569    sys.exit(1)
1570
1571# If --use_docker was used, respawn the run_tests.py script under a docker container
1572# instead of continuing.
1573if args.use_docker:
1574    if not args.travis:
1575        print('Seen --use_docker flag, will run tests under docker.')
1576        print('')
1577        print(
1578            'IMPORTANT: The changes you are testing need to be locally committed'
1579        )
1580        print(
1581            'because only the committed changes in the current branch will be')
1582        print('copied to the docker environment.')
1583        time.sleep(5)
1584
1585    dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
1586    if len(dockerfile_dirs) > 1:
1587        print('Languages to be tested require running under different docker '
1588              'images.')
1589        sys.exit(1)
1590    else:
1591        dockerfile_dir = next(iter(dockerfile_dirs))
1592
1593    child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
1594    run_tests_cmd = 'python3 tools/run_tests/run_tests.py %s' % ' '.join(
1595        child_argv[1:])
1596
1597    env = os.environ.copy()
1598    env['DOCKERFILE_DIR'] = dockerfile_dir
1599    env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run.sh'
1600    env['DOCKER_RUN_SCRIPT_COMMAND'] = run_tests_cmd
1601
1602    retcode = subprocess.call(
1603        'tools/run_tests/dockerize/build_and_run_docker.sh',
1604        shell=True,
1605        env=env)
1606    _print_debug_info_epilogue(dockerfile_dir=dockerfile_dir)
1607    sys.exit(retcode)
1608
1609_check_arch_option(args.arch)
1610
1611# collect pre-build steps (which get retried if they fail, e.g. to avoid
1612# flakes on downloading dependencies etc.)
1613build_steps = list(
1614    set(
1615        jobset.JobSpec(cmdline,
1616                       environ=_build_step_environ(
1617                           build_config, extra_env=l.build_steps_environ()),
1618                       timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
1619                       flake_retries=2)
1620        for l in languages
1621        for cmdline in l.pre_build_steps()))
1622
1623# collect build steps
1624build_steps.extend(
1625    set(
1626        jobset.JobSpec(cmdline,
1627                       environ=_build_step_environ(
1628                           build_config, extra_env=l.build_steps_environ()),
1629                       timeout_seconds=None)
1630        for l in languages
1631        for cmdline in l.build_steps()))
1632
1633# collect post test steps
1634post_tests_steps = list(
1635    set(
1636        jobset.JobSpec(cmdline,
1637                       environ=_build_step_environ(
1638                           build_config, extra_env=l.build_steps_environ()))
1639        for l in languages
1640        for cmdline in l.post_tests_steps()))
1641runs_per_test = args.runs_per_test
1642
1643errors = _build_and_run(check_cancelled=lambda: False,
1644                        newline_on_success=args.newline_on_success,
1645                        xml_report=args.xml_report,
1646                        build_only=args.build_only)
1647if not errors:
1648    jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1649else:
1650    jobset.message('FAILED', 'Some tests failed', do_newline=True)
1651
1652if not _is_use_docker_child():
1653    # if --use_docker was used, the outer invocation of run_tests.py will
1654    # print the debug info instead.
1655    _print_debug_info_epilogue()
1656
1657exit_code = 0
1658if BuildAndRunError.BUILD in errors:
1659    exit_code |= 1
1660if BuildAndRunError.TEST in errors:
1661    exit_code |= 2
1662if BuildAndRunError.POST_TEST in errors:
1663    exit_code |= 4
1664sys.exit(exit_code)
1665