• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright 2015 gRPC authors.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#     http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15"""Run tests in parallel."""
16
17from __future__ import print_function
18
19import argparse
20import ast
21import collections
22import glob
23import itertools
24import json
25import logging
26import multiprocessing
27import os
28import os.path
29import pipes
30import platform
31import random
32import re
33import socket
34import subprocess
35import sys
36import tempfile
37import traceback
38import time
39from six.moves import urllib
40import uuid
41import six
42
43import python_utils.jobset as jobset
44import python_utils.report_utils as report_utils
45import python_utils.watch_dirs as watch_dirs
46import python_utils.start_port_server as start_port_server
47try:
48    from python_utils.upload_test_results import upload_results_to_bq
49except (ImportError):
50    pass  # It's ok to not import because this is only necessary to upload results to BQ.
51
52gcp_utils_dir = os.path.abspath(
53    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
54sys.path.append(gcp_utils_dir)
55
56_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
57os.chdir(_ROOT)
58
59_FORCE_ENVIRON_FOR_WRAPPERS = {
60    'GRPC_VERBOSITY': 'DEBUG',
61}
62
63_POLLING_STRATEGIES = {
64    'linux': ['epollex', 'epoll1', 'poll'],
65    'mac': ['poll'],
66}
67
68
69def platform_string():
70    return jobset.platform_string()
71
72
73_DEFAULT_TIMEOUT_SECONDS = 5 * 60
74_PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
75
76
77def run_shell_command(cmd, env=None, cwd=None):
78    try:
79        subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
80    except subprocess.CalledProcessError as e:
81        logging.exception(
82            "Error while running command '%s'. Exit status %d. Output:\n%s",
83            e.cmd, e.returncode, e.output)
84        raise
85
86
87def max_parallel_tests_for_current_platform():
88    # Too much test parallelization has only been seen to be a problem
89    # so far on windows.
90    if jobset.platform_string() == 'windows':
91        return 64
92    return 1024
93
94
95# SimpleConfig: just compile with CONFIG=config, and run the binary to test
96class Config(object):
97
98    def __init__(self,
99                 config,
100                 environ=None,
101                 timeout_multiplier=1,
102                 tool_prefix=[],
103                 iomgr_platform='native'):
104        if environ is None:
105            environ = {}
106        self.build_config = config
107        self.environ = environ
108        self.environ['CONFIG'] = config
109        self.tool_prefix = tool_prefix
110        self.timeout_multiplier = timeout_multiplier
111        self.iomgr_platform = iomgr_platform
112
113    def job_spec(self,
114                 cmdline,
115                 timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
116                 shortname=None,
117                 environ={},
118                 cpu_cost=1.0,
119                 flaky=False):
120        """Construct a jobset.JobSpec for a test under this config
121
122       Args:
123         cmdline:      a list of strings specifying the command line the test
124                       would like to run
125    """
126        actual_environ = self.environ.copy()
127        for k, v in environ.items():
128            actual_environ[k] = v
129        if not flaky and shortname and shortname in flaky_tests:
130            flaky = True
131        if shortname in shortname_to_cpu:
132            cpu_cost = shortname_to_cpu[shortname]
133        return jobset.JobSpec(
134            cmdline=self.tool_prefix + cmdline,
135            shortname=shortname,
136            environ=actual_environ,
137            cpu_cost=cpu_cost,
138            timeout_seconds=(self.timeout_multiplier *
139                             timeout_seconds if timeout_seconds else None),
140            flake_retries=4 if flaky or args.allow_flakes else 0,
141            timeout_retries=1 if flaky or args.allow_flakes else 0)
142
143
144def get_c_tests(travis, test_lang):
145    out = []
146    platforms_str = 'ci_platforms' if travis else 'platforms'
147    with open('tools/run_tests/generated/tests.json') as f:
148        js = json.load(f)
149        return [
150            tgt for tgt in js
151            if tgt['language'] == test_lang and platform_string() in
152            tgt[platforms_str] and not (travis and tgt['flaky'])
153        ]
154
155
156def _check_compiler(compiler, supported_compilers):
157    if compiler not in supported_compilers:
158        raise Exception('Compiler %s not supported (on this platform).' %
159                        compiler)
160
161
162def _check_arch(arch, supported_archs):
163    if arch not in supported_archs:
164        raise Exception('Architecture %s not supported.' % arch)
165
166
167def _is_use_docker_child():
168    """Returns True if running running as a --use_docker child."""
169    return True if os.getenv('RUN_TESTS_COMMAND') else False
170
171
172_PythonConfigVars = collections.namedtuple('_ConfigVars', [
173    'shell',
174    'builder',
175    'builder_prefix_arguments',
176    'venv_relative_python',
177    'toolchain',
178    'runner',
179    'test_name',
180    'iomgr_platform',
181])
182
183
184def _python_config_generator(name, major, minor, bits, config_vars):
185    name += '_' + config_vars.iomgr_platform
186    return PythonConfig(
187        name, config_vars.shell + config_vars.builder +
188        config_vars.builder_prefix_arguments +
189        [_python_pattern_function(major=major, minor=minor, bits=bits)] +
190        [name] + config_vars.venv_relative_python + config_vars.toolchain,
191        config_vars.shell + config_vars.runner + [
192            os.path.join(name, config_vars.venv_relative_python[0]),
193            config_vars.test_name
194        ])
195
196
197def _pypy_config_generator(name, major, config_vars):
198    return PythonConfig(
199        name, config_vars.shell + config_vars.builder +
200        config_vars.builder_prefix_arguments +
201        [_pypy_pattern_function(major=major)] + [name] +
202        config_vars.venv_relative_python + config_vars.toolchain,
203        config_vars.shell + config_vars.runner +
204        [os.path.join(name, config_vars.venv_relative_python[0])])
205
206
207def _python_pattern_function(major, minor, bits):
208    # Bit-ness is handled by the test machine's environment
209    if os.name == "nt":
210        if bits == "64":
211            return '/c/Python{major}{minor}/python.exe'.format(major=major,
212                                                               minor=minor,
213                                                               bits=bits)
214        else:
215            return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
216                major=major, minor=minor, bits=bits)
217    else:
218        return 'python{major}.{minor}'.format(major=major, minor=minor)
219
220
221def _pypy_pattern_function(major):
222    if major == '2':
223        return 'pypy'
224    elif major == '3':
225        return 'pypy3'
226    else:
227        raise ValueError("Unknown PyPy major version")
228
229
230class CLanguage(object):
231
232    def __init__(self, make_target, test_lang):
233        self.make_target = make_target
234        self.platform = platform_string()
235        self.test_lang = test_lang
236
237    def configure(self, config, args):
238        self.config = config
239        self.args = args
240        self._make_options = []
241        self._use_cmake = True
242        if self.platform == 'windows':
243            _check_compiler(self.args.compiler, [
244                'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017',
245                'cmake_vs2019'
246            ])
247            _check_arch(self.args.arch, ['default', 'x64', 'x86'])
248            if self.args.compiler == 'cmake_vs2019':
249                cmake_generator_option = 'Visual Studio 16 2019'
250            elif self.args.compiler == 'cmake_vs2017':
251                cmake_generator_option = 'Visual Studio 15 2017'
252            else:
253                cmake_generator_option = 'Visual Studio 14 2015'
254            cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
255            self._cmake_configure_extra_args = [
256                '-G', cmake_generator_option, '-A', cmake_arch_option
257            ]
258        else:
259            if self.platform == 'linux':
260                # Allow all the known architectures. _check_arch_option has already checked that we're not doing
261                # something illegal when not running under docker.
262                _check_arch(self.args.arch, ['default', 'x64', 'x86'])
263            else:
264                _check_arch(self.args.arch, ['default'])
265
266            self._docker_distro, self._cmake_configure_extra_args = self._compiler_options(
267                self.args.use_docker, self.args.compiler)
268
269            if self.args.arch == 'x86':
270                # disable boringssl asm optimizations when on x86
271                # see https://github.com/grpc/grpc/blob/b5b8578b3f8b4a9ce61ed6677e19d546e43c5c68/tools/run_tests/artifacts/artifact_targets.py#L253
272                self._cmake_configure_extra_args.append('-DOPENSSL_NO_ASM=ON')
273
274        if args.iomgr_platform == "uv":
275            cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
276            try:
277                cflags += subprocess.check_output(
278                    ['pkg-config', '--cflags', 'libuv']).strip() + ' '
279            except (subprocess.CalledProcessError, OSError):
280                pass
281            try:
282                ldflags = subprocess.check_output(
283                    ['pkg-config', '--libs', 'libuv']).strip() + ' '
284            except (subprocess.CalledProcessError, OSError):
285                ldflags = '-luv '
286            self._make_options += [
287                'EXTRA_CPPFLAGS={}'.format(cflags),
288                'EXTRA_LDLIBS={}'.format(ldflags)
289            ]
290
291    def test_specs(self):
292        out = []
293        binaries = get_c_tests(self.args.travis, self.test_lang)
294        for target in binaries:
295            if self._use_cmake and target.get('boringssl', False):
296                # cmake doesn't build boringssl tests
297                continue
298            auto_timeout_scaling = target.get('auto_timeout_scaling', True)
299            polling_strategies = (_POLLING_STRATEGIES.get(
300                self.platform, ['all']) if target.get('uses_polling', True) else
301                                  ['none'])
302            if self.args.iomgr_platform == 'uv':
303                polling_strategies = ['all']
304            for polling_strategy in polling_strategies:
305                env = {
306                    'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
307                        _ROOT + '/src/core/tsi/test_creds/ca.pem',
308                    'GRPC_POLL_STRATEGY':
309                        polling_strategy,
310                    'GRPC_VERBOSITY':
311                        'DEBUG'
312                }
313                resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
314                if resolver:
315                    env['GRPC_DNS_RESOLVER'] = resolver
316                shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
317                if polling_strategy in target.get('excluded_poll_engines', []):
318                    continue
319
320                timeout_scaling = 1
321                if auto_timeout_scaling:
322                    config = self.args.config
323                    if ('asan' in config or config == 'msan' or
324                            config == 'tsan' or config == 'ubsan' or
325                            config == 'helgrind' or config == 'memcheck'):
326                        # Scale overall test timeout if running under various sanitizers.
327                        # scaling value is based on historical data analysis
328                        timeout_scaling *= 3
329
330                if self.config.build_config in target['exclude_configs']:
331                    continue
332                if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
333                    continue
334                if self.platform == 'windows':
335                    binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[
336                        self.config.build_config], target['name'])
337                else:
338                    if self._use_cmake:
339                        binary = 'cmake/build/%s' % target['name']
340                    else:
341                        binary = 'bins/%s/%s' % (self.config.build_config,
342                                                 target['name'])
343                cpu_cost = target['cpu_cost']
344                if cpu_cost == 'capacity':
345                    cpu_cost = multiprocessing.cpu_count()
346                if os.path.isfile(binary):
347                    list_test_command = None
348                    filter_test_command = None
349
350                    # these are the flag defined by gtest and benchmark framework to list
351                    # and filter test runs. We use them to split each individual test
352                    # into its own JobSpec, and thus into its own process.
353                    if 'benchmark' in target and target['benchmark']:
354                        with open(os.devnull, 'w') as fnull:
355                            tests = subprocess.check_output(
356                                [binary, '--benchmark_list_tests'],
357                                stderr=fnull)
358                        for line in tests.decode().split('\n'):
359                            test = line.strip()
360                            if not test:
361                                continue
362                            cmdline = [binary,
363                                       '--benchmark_filter=%s$' % test
364                                      ] + target['args']
365                            out.append(
366                                self.config.job_spec(
367                                    cmdline,
368                                    shortname='%s %s' %
369                                    (' '.join(cmdline), shortname_ext),
370                                    cpu_cost=cpu_cost,
371                                    timeout_seconds=target.get(
372                                        'timeout_seconds',
373                                        _DEFAULT_TIMEOUT_SECONDS) *
374                                    timeout_scaling,
375                                    environ=env))
376                    elif 'gtest' in target and target['gtest']:
377                        # here we parse the output of --gtest_list_tests to build up a complete
378                        # list of the tests contained in a binary for each test, we then
379                        # add a job to run, filtering for just that test.
380                        with open(os.devnull, 'w') as fnull:
381                            tests = subprocess.check_output(
382                                [binary, '--gtest_list_tests'], stderr=fnull)
383                        base = None
384                        for line in tests.decode().split('\n'):
385                            i = line.find('#')
386                            if i >= 0:
387                                line = line[:i]
388                            if not line:
389                                continue
390                            if line[0] != ' ':
391                                base = line.strip()
392                            else:
393                                assert base is not None
394                                assert line[1] == ' '
395                                test = base + line.strip()
396                                cmdline = [binary,
397                                           '--gtest_filter=%s' % test
398                                          ] + target['args']
399                                out.append(
400                                    self.config.job_spec(
401                                        cmdline,
402                                        shortname='%s %s' %
403                                        (' '.join(cmdline), shortname_ext),
404                                        cpu_cost=cpu_cost,
405                                        timeout_seconds=target.get(
406                                            'timeout_seconds',
407                                            _DEFAULT_TIMEOUT_SECONDS) *
408                                        timeout_scaling,
409                                        environ=env))
410                    else:
411                        cmdline = [binary] + target['args']
412                        shortname = target.get(
413                            'shortname',
414                            ' '.join(pipes.quote(arg) for arg in cmdline))
415                        shortname += shortname_ext
416                        out.append(
417                            self.config.job_spec(
418                                cmdline,
419                                shortname=shortname,
420                                cpu_cost=cpu_cost,
421                                flaky=target.get('flaky', False),
422                                timeout_seconds=target.get(
423                                    'timeout_seconds',
424                                    _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
425                                environ=env))
426                elif self.args.regex == '.*' or self.platform == 'windows':
427                    print('\nWARNING: binary not found, skipping', binary)
428        return sorted(out)
429
430    def make_targets(self):
431        if self.platform == 'windows':
432            # don't build tools on windows just yet
433            return ['buildtests_%s' % self.make_target]
434        return [
435            'buildtests_%s' % self.make_target,
436            'tools_%s' % self.make_target, 'check_epollexclusive'
437        ]
438
439    def make_options(self):
440        return self._make_options
441
442    def pre_build_steps(self):
443        if self.platform == 'windows':
444            return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat'] +
445                    self._cmake_configure_extra_args]
446        elif self._use_cmake:
447            return [['tools/run_tests/helper_scripts/pre_build_cmake.sh'] +
448                    self._cmake_configure_extra_args]
449        else:
450            return []
451
452    def build_steps(self):
453        return []
454
455    def post_tests_steps(self):
456        if self.platform == 'windows':
457            return []
458        else:
459            return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
460
461    def makefile_name(self):
462        if self._use_cmake:
463            return 'cmake/build/Makefile'
464        else:
465            return 'Makefile'
466
467    def _clang_cmake_configure_extra_args(self, version_suffix=''):
468        return [
469            '-DCMAKE_C_COMPILER=clang%s' % version_suffix,
470            '-DCMAKE_CXX_COMPILER=clang++%s' % version_suffix,
471        ]
472
473    def _compiler_options(self, use_docker, compiler):
474        """Returns docker distro and cmake configure args to use for given compiler."""
475        if not use_docker and not _is_use_docker_child():
476            # if not running under docker, we cannot ensure the right compiler version will be used,
477            # so we only allow the non-specific choices.
478            _check_compiler(compiler, ['default', 'cmake'])
479
480        if compiler == 'gcc4.9' or compiler == 'default' or compiler == 'cmake':
481            return ('jessie', [])
482        elif compiler == 'gcc5.3':
483            return ('ubuntu1604', [])
484        elif compiler == 'gcc7.4':
485            return ('ubuntu1804', [])
486        elif compiler == 'gcc8.3':
487            return ('buster', [])
488        elif compiler == 'gcc_musl':
489            return ('alpine', [])
490        elif compiler == 'clang4.0':
491            return ('ubuntu1604',
492                    self._clang_cmake_configure_extra_args(
493                        version_suffix='-4.0'))
494        elif compiler == 'clang5.0':
495            return ('ubuntu1604',
496                    self._clang_cmake_configure_extra_args(
497                        version_suffix='-5.0'))
498        else:
499            raise Exception('Compiler %s not supported.' % compiler)
500
501    def dockerfile_dir(self):
502        return 'tools/dockerfile/test/cxx_%s_%s' % (
503            self._docker_distro, _docker_arch_suffix(self.args.arch))
504
505    def __str__(self):
506        return self.make_target
507
508
509# This tests Node on grpc/grpc-node and will become the standard for Node testing
510class RemoteNodeLanguage(object):
511
512    def __init__(self):
513        self.platform = platform_string()
514
515    def configure(self, config, args):
516        self.config = config
517        self.args = args
518        # Note: electron ABI only depends on major and minor version, so that's all
519        # we should specify in the compiler argument
520        _check_compiler(self.args.compiler, [
521            'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
522            'electron1.3', 'electron1.6'
523        ])
524        if self.args.compiler == 'default':
525            self.runtime = 'node'
526            self.node_version = '8'
527        else:
528            if self.args.compiler.startswith('electron'):
529                self.runtime = 'electron'
530                self.node_version = self.args.compiler[8:]
531            else:
532                self.runtime = 'node'
533                # Take off the word "node"
534                self.node_version = self.args.compiler[4:]
535
536    # TODO: update with Windows/electron scripts when available for grpc/grpc-node
537    def test_specs(self):
538        if self.platform == 'windows':
539            return [
540                self.config.job_spec(
541                    ['tools\\run_tests\\helper_scripts\\run_node.bat'])
542            ]
543        else:
544            return [
545                self.config.job_spec(
546                    ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
547                    None,
548                    environ=_FORCE_ENVIRON_FOR_WRAPPERS)
549            ]
550
551    def pre_build_steps(self):
552        return []
553
554    def make_targets(self):
555        return []
556
557    def make_options(self):
558        return []
559
560    def build_steps(self):
561        return []
562
563    def post_tests_steps(self):
564        return []
565
566    def makefile_name(self):
567        return 'Makefile'
568
569    def dockerfile_dir(self):
570        return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
571            self.args.arch)
572
573    def __str__(self):
574        return 'grpc-node'
575
576
577class Php7Language(object):
578
579    def configure(self, config, args):
580        self.config = config
581        self.args = args
582        _check_compiler(self.args.compiler, ['default'])
583        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
584
585    def test_specs(self):
586        return [
587            self.config.job_spec(['src/php/bin/run_tests.sh'],
588                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
589        ]
590
591    def pre_build_steps(self):
592        return []
593
594    def make_targets(self):
595        return ['static_c', 'shared_c']
596
597    def make_options(self):
598        return self._make_options
599
600    def build_steps(self):
601        return [['tools/run_tests/helper_scripts/build_php.sh']]
602
603    def post_tests_steps(self):
604        return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
605
606    def makefile_name(self):
607        return 'Makefile'
608
609    def dockerfile_dir(self):
610        return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
611            self.args.arch)
612
613    def __str__(self):
614        return 'php7'
615
616
617class PythonConfig(
618        collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
619    """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
620
621
622class PythonLanguage(object):
623
624    _TEST_SPECS_FILE = {
625        'native': 'src/python/grpcio_tests/tests/tests.json',
626        'gevent': 'src/python/grpcio_tests/tests/tests.json',
627        'asyncio': 'src/python/grpcio_tests/tests_aio/tests.json',
628    }
629    _TEST_FOLDER = {
630        'native': 'test',
631        'gevent': 'test',
632        'asyncio': 'test_aio',
633    }
634
635    def configure(self, config, args):
636        self.config = config
637        self.args = args
638        self.pythons = self._get_pythons(self.args)
639
640    def test_specs(self):
641        # load list of known test suites
642        with open(self._TEST_SPECS_FILE[
643                self.args.iomgr_platform]) as tests_json_file:
644            tests_json = json.load(tests_json_file)
645        environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
646        # TODO(https://github.com/grpc/grpc/issues/21401) Fork handlers is not
647        # designed for non-native IO manager. It has a side-effect that
648        # overrides threading settings in C-Core.
649        if args.iomgr_platform != 'native':
650            environment['GRPC_ENABLE_FORK_SUPPORT'] = '0'
651        return [
652            self.config.job_spec(
653                config.run,
654                timeout_seconds=8 * 60,
655                environ=dict(GRPC_PYTHON_TESTRUNNER_FILTER=str(suite_name),
656                             **environment),
657                shortname='%s.%s.%s' %
658                (config.name, self._TEST_FOLDER[self.args.iomgr_platform],
659                 suite_name),
660            ) for suite_name in tests_json for config in self.pythons
661        ]
662
663    def pre_build_steps(self):
664        return []
665
666    def make_targets(self):
667        return []
668
669    def make_options(self):
670        return []
671
672    def build_steps(self):
673        return [config.build for config in self.pythons]
674
675    def post_tests_steps(self):
676        if self.config.build_config != 'gcov':
677            return []
678        else:
679            return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
680
681    def makefile_name(self):
682        return 'Makefile'
683
684    def dockerfile_dir(self):
685        return 'tools/dockerfile/test/python_%s_%s' % (
686            self._python_manager_name(), _docker_arch_suffix(self.args.arch))
687
688    def _python_manager_name(self):
689        """Choose the docker image to use based on python version."""
690        if self.args.compiler in [
691                'python2.7', 'python3.5', 'python3.6', 'python3.7', 'python3.8'
692        ]:
693            return 'stretch_' + self.args.compiler[len('python'):]
694        elif self.args.compiler == 'python_alpine':
695            return 'alpine'
696        else:
697            return 'stretch_default'
698
699    def _get_pythons(self, args):
700        """Get python runtimes to test with, based on current platform, architecture, compiler etc."""
701        if args.arch == 'x86':
702            bits = '32'
703        else:
704            bits = '64'
705
706        if os.name == 'nt':
707            shell = ['bash']
708            builder = [
709                os.path.abspath(
710                    'tools/run_tests/helper_scripts/build_python_msys2.sh')
711            ]
712            builder_prefix_arguments = ['MINGW{}'.format(bits)]
713            venv_relative_python = ['Scripts/python.exe']
714            toolchain = ['mingw32']
715        else:
716            shell = []
717            builder = [
718                os.path.abspath(
719                    'tools/run_tests/helper_scripts/build_python.sh')
720            ]
721            builder_prefix_arguments = []
722            venv_relative_python = ['bin/python']
723            toolchain = ['unix']
724
725        # Selects the corresponding testing mode.
726        # See src/python/grpcio_tests/commands.py for implementation details.
727        if args.iomgr_platform == 'native':
728            test_command = 'test_lite'
729        elif args.iomgr_platform == 'gevent':
730            test_command = 'test_gevent'
731        elif args.iomgr_platform == 'asyncio':
732            test_command = 'test_aio'
733        else:
734            raise ValueError('Unsupported IO Manager platform: %s' %
735                             args.iomgr_platform)
736        runner = [
737            os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
738        ]
739
740        config_vars = _PythonConfigVars(shell, builder,
741                                        builder_prefix_arguments,
742                                        venv_relative_python, toolchain, runner,
743                                        test_command, args.iomgr_platform)
744        python27_config = _python_config_generator(name='py27',
745                                                   major='2',
746                                                   minor='7',
747                                                   bits=bits,
748                                                   config_vars=config_vars)
749        python35_config = _python_config_generator(name='py35',
750                                                   major='3',
751                                                   minor='5',
752                                                   bits=bits,
753                                                   config_vars=config_vars)
754        python36_config = _python_config_generator(name='py36',
755                                                   major='3',
756                                                   minor='6',
757                                                   bits=bits,
758                                                   config_vars=config_vars)
759        python37_config = _python_config_generator(name='py37',
760                                                   major='3',
761                                                   minor='7',
762                                                   bits=bits,
763                                                   config_vars=config_vars)
764        python38_config = _python_config_generator(name='py38',
765                                                   major='3',
766                                                   minor='8',
767                                                   bits=bits,
768                                                   config_vars=config_vars)
769        pypy27_config = _pypy_config_generator(name='pypy',
770                                               major='2',
771                                               config_vars=config_vars)
772        pypy32_config = _pypy_config_generator(name='pypy3',
773                                               major='3',
774                                               config_vars=config_vars)
775
776        if args.iomgr_platform == 'asyncio':
777            if args.compiler not in ('default', 'python3.6', 'python3.7',
778                                     'python3.8'):
779                raise Exception(
780                    'Compiler %s not supported with IO Manager platform: %s' %
781                    (args.compiler, args.iomgr_platform))
782
783        if args.compiler == 'default':
784            if os.name == 'nt':
785                if args.iomgr_platform == 'gevent':
786                    # TODO(https://github.com/grpc/grpc/issues/23784) allow
787                    # gevent to run on later version once issue solved.
788                    return (python36_config,)
789                else:
790                    return (python38_config,)
791            else:
792                if args.iomgr_platform == 'asyncio':
793                    return (python36_config, python38_config)
794                elif os.uname()[0] == 'Darwin':
795                    # NOTE(rbellevi): Testing takes significantly longer on
796                    # MacOS, so we restrict the number of interpreter versions
797                    # tested.
798                    return (
799                        python27_config,
800                        python38_config,
801                    )
802                else:
803                    return (
804                        python27_config,
805                        python35_config,
806                        python37_config,
807                        python38_config,
808                    )
809        elif args.compiler == 'python2.7':
810            return (python27_config,)
811        elif args.compiler == 'python3.5':
812            return (python35_config,)
813        elif args.compiler == 'python3.6':
814            return (python36_config,)
815        elif args.compiler == 'python3.7':
816            return (python37_config,)
817        elif args.compiler == 'python3.8':
818            return (python38_config,)
819        elif args.compiler == 'pypy':
820            return (pypy27_config,)
821        elif args.compiler == 'pypy3':
822            return (pypy32_config,)
823        elif args.compiler == 'python_alpine':
824            return (python27_config,)
825        elif args.compiler == 'all_the_cpythons':
826            return (
827                python27_config,
828                python35_config,
829                python36_config,
830                python37_config,
831                python38_config,
832            )
833        else:
834            raise Exception('Compiler %s not supported.' % args.compiler)
835
836    def __str__(self):
837        return 'python'
838
839
840class RubyLanguage(object):
841
842    def configure(self, config, args):
843        self.config = config
844        self.args = args
845        _check_compiler(self.args.compiler, ['default'])
846
847    def test_specs(self):
848        tests = [
849            self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
850                                 timeout_seconds=10 * 60,
851                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
852        ]
853        for test in [
854                'src/ruby/end2end/sig_handling_test.rb',
855                'src/ruby/end2end/channel_state_test.rb',
856                'src/ruby/end2end/channel_closing_test.rb',
857                'src/ruby/end2end/sig_int_during_channel_watch_test.rb',
858                'src/ruby/end2end/killed_client_thread_test.rb',
859                'src/ruby/end2end/forking_client_test.rb',
860                'src/ruby/end2end/grpc_class_init_test.rb',
861                'src/ruby/end2end/multiple_killed_watching_threads_test.rb',
862                'src/ruby/end2end/load_grpc_with_gc_stress_test.rb',
863                'src/ruby/end2end/client_memory_usage_test.rb',
864                'src/ruby/end2end/package_with_underscore_test.rb',
865                'src/ruby/end2end/graceful_sig_handling_test.rb',
866                'src/ruby/end2end/graceful_sig_stop_test.rb',
867                'src/ruby/end2end/errors_load_before_grpc_lib_test.rb',
868                'src/ruby/end2end/logger_load_before_grpc_lib_test.rb',
869                'src/ruby/end2end/status_codes_load_before_grpc_lib_test.rb',
870                'src/ruby/end2end/call_credentials_timeout_test.rb',
871                'src/ruby/end2end/call_credentials_returning_bad_metadata_doesnt_kill_background_thread_test.rb'
872        ]:
873            tests.append(
874                self.config.job_spec(['ruby', test],
875                                     shortname=test,
876                                     timeout_seconds=20 * 60,
877                                     environ=_FORCE_ENVIRON_FOR_WRAPPERS))
878        return tests
879
880    def pre_build_steps(self):
881        return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
882
883    def make_targets(self):
884        return []
885
886    def make_options(self):
887        return []
888
889    def build_steps(self):
890        return [['tools/run_tests/helper_scripts/build_ruby.sh']]
891
892    def post_tests_steps(self):
893        return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
894
895    def makefile_name(self):
896        return 'Makefile'
897
898    def dockerfile_dir(self):
899        return 'tools/dockerfile/test/ruby_buster_%s' % _docker_arch_suffix(
900            self.args.arch)
901
902    def __str__(self):
903        return 'ruby'
904
905
906class CSharpLanguage(object):
907
908    def __init__(self):
909        self.platform = platform_string()
910
911    def configure(self, config, args):
912        self.config = config
913        self.args = args
914        if self.platform == 'windows':
915            _check_compiler(self.args.compiler, ['default', 'coreclr'])
916            _check_arch(self.args.arch, ['default'])
917            self._cmake_arch_option = 'x64'
918        else:
919            _check_compiler(self.args.compiler, ['default', 'coreclr'])
920            self._docker_distro = 'stretch'
921
922    def test_specs(self):
923        with open('src/csharp/tests.json') as f:
924            tests_by_assembly = json.load(f)
925
926        msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
927        nunit_args = ['--labels=All', '--noresult', '--workers=1']
928        assembly_subdir = 'bin/%s' % msbuild_config
929        assembly_extension = '.exe'
930
931        if self.args.compiler == 'coreclr':
932            assembly_subdir += '/netcoreapp2.1'
933            runtime_cmd = ['dotnet', 'exec']
934            assembly_extension = '.dll'
935        else:
936            assembly_subdir += '/net45'
937            if self.platform == 'windows':
938                runtime_cmd = []
939            elif self.platform == 'mac':
940                # mono before version 5.2 on MacOS defaults to 32bit runtime
941                runtime_cmd = ['mono', '--arch=64']
942            else:
943                runtime_cmd = ['mono']
944
945        specs = []
946        for assembly in six.iterkeys(tests_by_assembly):
947            assembly_file = 'src/csharp/%s/%s/%s%s' % (
948                assembly, assembly_subdir, assembly, assembly_extension)
949            if self.config.build_config != 'gcov' or self.platform != 'windows':
950                # normally, run each test as a separate process
951                for test in tests_by_assembly[assembly]:
952                    cmdline = runtime_cmd + [assembly_file,
953                                             '--test=%s' % test] + nunit_args
954                    specs.append(
955                        self.config.job_spec(
956                            cmdline,
957                            shortname='csharp.%s' % test,
958                            environ=_FORCE_ENVIRON_FOR_WRAPPERS))
959            else:
960                # For C# test coverage, run all tests from the same assembly at once
961                # using OpenCover.Console (only works on Windows).
962                cmdline = [
963                    'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
964                    '-target:%s' % assembly_file, '-targetdir:src\\csharp',
965                    '-targetargs:%s' % ' '.join(nunit_args),
966                    '-filter:+[Grpc.Core]*', '-register:user',
967                    '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
968                ]
969
970                # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
971                # to prevent problems with registering the profiler.
972                run_exclusive = 1000000
973                specs.append(
974                    self.config.job_spec(cmdline,
975                                         shortname='csharp.coverage.%s' %
976                                         assembly,
977                                         cpu_cost=run_exclusive,
978                                         environ=_FORCE_ENVIRON_FOR_WRAPPERS))
979        return specs
980
981    def pre_build_steps(self):
982        if self.platform == 'windows':
983            return [[
984                'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
985                self._cmake_arch_option
986            ]]
987        else:
988            return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
989
990    def make_targets(self):
991        return ['grpc_csharp_ext']
992
993    def make_options(self):
994        return []
995
996    def build_steps(self):
997        if self.platform == 'windows':
998            return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
999        else:
1000            return [['tools/run_tests/helper_scripts/build_csharp.sh']]
1001
1002    def post_tests_steps(self):
1003        if self.platform == 'windows':
1004            return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
1005        else:
1006            return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
1007
1008    def makefile_name(self):
1009        if self.platform == 'windows':
1010            return 'cmake/build/%s/Makefile' % self._cmake_arch_option
1011        else:
1012            # no need to set x86 specific flags as run_tests.py
1013            # currently forbids x86 C# builds on both Linux and MacOS.
1014            return 'cmake/build/Makefile'
1015
1016    def dockerfile_dir(self):
1017        return 'tools/dockerfile/test/csharp_%s_%s' % (
1018            self._docker_distro, _docker_arch_suffix(self.args.arch))
1019
1020    def __str__(self):
1021        return 'csharp'
1022
1023
1024class ObjCLanguage(object):
1025
1026    def configure(self, config, args):
1027        self.config = config
1028        self.args = args
1029        _check_compiler(self.args.compiler, ['default'])
1030
1031    def test_specs(self):
1032        out = []
1033        out.append(
1034            self.config.job_spec(
1035                ['src/objective-c/tests/build_one_example_bazel.sh'],
1036                timeout_seconds=10 * 60,
1037                shortname='ios-buildtest-example-sample',
1038                cpu_cost=1e6,
1039                environ={
1040                    'SCHEME': 'Sample',
1041                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1042                    'FRAMEWORKS': 'NO'
1043                }))
1044        # Currently not supporting compiling as frameworks in Bazel
1045        out.append(
1046            self.config.job_spec(
1047                ['src/objective-c/tests/build_one_example.sh'],
1048                timeout_seconds=20 * 60,
1049                shortname='ios-buildtest-example-sample-frameworks',
1050                cpu_cost=1e6,
1051                environ={
1052                    'SCHEME': 'Sample',
1053                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1054                    'FRAMEWORKS': 'YES'
1055                }))
1056        out.append(
1057            self.config.job_spec(
1058                ['src/objective-c/tests/build_one_example.sh'],
1059                timeout_seconds=20 * 60,
1060                shortname='ios-buildtest-example-switftsample',
1061                cpu_cost=1e6,
1062                environ={
1063                    'SCHEME': 'SwiftSample',
1064                    'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
1065                }))
1066        out.append(
1067            self.config.job_spec(
1068                ['src/objective-c/tests/build_one_example_bazel.sh'],
1069                timeout_seconds=10 * 60,
1070                shortname='ios-buildtest-example-tvOS-sample',
1071                cpu_cost=1e6,
1072                environ={
1073                    'SCHEME': 'tvOS-sample',
1074                    'EXAMPLE_PATH': 'src/objective-c/examples/tvOS-sample',
1075                    'FRAMEWORKS': 'NO'
1076                }))
1077        # Disabled due to #20258
1078        # TODO (mxyan): Reenable this test when #20258 is resolved.
1079        # out.append(
1080        #     self.config.job_spec(
1081        #         ['src/objective-c/tests/build_one_example_bazel.sh'],
1082        #         timeout_seconds=20 * 60,
1083        #         shortname='ios-buildtest-example-watchOS-sample',
1084        #         cpu_cost=1e6,
1085        #         environ={
1086        #             'SCHEME': 'watchOS-sample-WatchKit-App',
1087        #             'EXAMPLE_PATH': 'src/objective-c/examples/watchOS-sample',
1088        #             'FRAMEWORKS': 'NO'
1089        #         }))
1090        out.append(
1091            self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
1092                                 timeout_seconds=60 * 60,
1093                                 shortname='ios-test-plugintest',
1094                                 cpu_cost=1e6,
1095                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1096        out.append(
1097            self.config.job_spec(
1098                ['src/objective-c/tests/run_plugin_option_tests.sh'],
1099                timeout_seconds=60 * 60,
1100                shortname='ios-test-plugin-option-test',
1101                cpu_cost=1e6,
1102                environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1103        out.append(
1104            self.config.job_spec(
1105                ['test/core/iomgr/ios/CFStreamTests/build_and_run_tests.sh'],
1106                timeout_seconds=60 * 60,
1107                shortname='ios-test-cfstream-tests',
1108                cpu_cost=1e6,
1109                environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1110        # TODO: replace with run_one_test_bazel.sh when Bazel-Xcode is stable
1111        out.append(
1112            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1113                                 timeout_seconds=60 * 60,
1114                                 shortname='ios-test-unittests',
1115                                 cpu_cost=1e6,
1116                                 environ={'SCHEME': 'UnitTests'}))
1117        out.append(
1118            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1119                                 timeout_seconds=60 * 60,
1120                                 shortname='ios-test-interoptests',
1121                                 cpu_cost=1e6,
1122                                 environ={'SCHEME': 'InteropTests'}))
1123        out.append(
1124            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1125                                 timeout_seconds=60 * 60,
1126                                 shortname='ios-test-cronettests',
1127                                 cpu_cost=1e6,
1128                                 environ={'SCHEME': 'CronetTests'}))
1129        out.append(
1130            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1131                                 timeout_seconds=30 * 60,
1132                                 shortname='ios-perf-test',
1133                                 cpu_cost=1e6,
1134                                 environ={'SCHEME': 'PerfTests'}))
1135        out.append(
1136            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1137                                 timeout_seconds=30 * 60,
1138                                 shortname='ios-perf-test-posix',
1139                                 cpu_cost=1e6,
1140                                 environ={'SCHEME': 'PerfTestsPosix'}))
1141        out.append(
1142            self.config.job_spec(['test/cpp/ios/build_and_run_tests.sh'],
1143                                 timeout_seconds=60 * 60,
1144                                 shortname='ios-cpp-test-cronet',
1145                                 cpu_cost=1e6,
1146                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1147        out.append(
1148            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1149                                 timeout_seconds=60 * 60,
1150                                 shortname='mac-test-basictests',
1151                                 cpu_cost=1e6,
1152                                 environ={
1153                                     'SCHEME': 'MacTests',
1154                                     'PLATFORM': 'macos'
1155                                 }))
1156        out.append(
1157            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1158                                 timeout_seconds=30 * 60,
1159                                 shortname='tvos-test-basictests',
1160                                 cpu_cost=1e6,
1161                                 environ={
1162                                     'SCHEME': 'TvTests',
1163                                     'PLATFORM': 'tvos'
1164                                 }))
1165
1166        return sorted(out)
1167
1168    def pre_build_steps(self):
1169        return []
1170
1171    def make_targets(self):
1172        return []
1173
1174    def make_options(self):
1175        return []
1176
1177    def build_steps(self):
1178        return []
1179
1180    def post_tests_steps(self):
1181        return []
1182
1183    def makefile_name(self):
1184        return 'Makefile'
1185
1186    def dockerfile_dir(self):
1187        return None
1188
1189    def __str__(self):
1190        return 'objc'
1191
1192
1193class Sanity(object):
1194
1195    def configure(self, config, args):
1196        self.config = config
1197        self.args = args
1198        _check_compiler(self.args.compiler, ['default'])
1199
1200    def test_specs(self):
1201        import yaml
1202        with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
1203            environ = {'TEST': 'true'}
1204            if _is_use_docker_child():
1205                environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
1206                environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
1207                # sanity tests run tools/bazel wrapper concurrently
1208                # and that can result in a download/run race in the wrapper.
1209                # under docker we already have the right version of bazel
1210                # so we can just disable the wrapper.
1211                environ['DISABLE_BAZEL_WRAPPER'] = 'true'
1212            return [
1213                self.config.job_spec(cmd['script'].split(),
1214                                     timeout_seconds=30 * 60,
1215                                     environ=environ,
1216                                     cpu_cost=cmd.get('cpu_cost', 1))
1217                for cmd in yaml.load(f)
1218            ]
1219
1220    def pre_build_steps(self):
1221        return []
1222
1223    def make_targets(self):
1224        return ['run_dep_checks']
1225
1226    def make_options(self):
1227        return []
1228
1229    def build_steps(self):
1230        return []
1231
1232    def post_tests_steps(self):
1233        return []
1234
1235    def makefile_name(self):
1236        return 'Makefile'
1237
1238    def dockerfile_dir(self):
1239        return 'tools/dockerfile/test/sanity'
1240
1241    def __str__(self):
1242        return 'sanity'
1243
1244
1245# different configurations we can run under
1246with open('tools/run_tests/generated/configs.json') as f:
1247    _CONFIGS = dict(
1248        (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
1249
1250_LANGUAGES = {
1251    'c++': CLanguage('cxx', 'c++'),
1252    'c': CLanguage('c', 'c'),
1253    'grpc-node': RemoteNodeLanguage(),
1254    'php7': Php7Language(),
1255    'python': PythonLanguage(),
1256    'ruby': RubyLanguage(),
1257    'csharp': CSharpLanguage(),
1258    'objc': ObjCLanguage(),
1259    'sanity': Sanity()
1260}
1261
1262_MSBUILD_CONFIG = {
1263    'dbg': 'Debug',
1264    'opt': 'Release',
1265    'gcov': 'Debug',
1266}
1267
1268
1269def _windows_arch_option(arch):
1270    """Returns msbuild cmdline option for selected architecture."""
1271    if arch == 'default' or arch == 'x86':
1272        return '/p:Platform=Win32'
1273    elif arch == 'x64':
1274        return '/p:Platform=x64'
1275    else:
1276        print('Architecture %s not supported.' % arch)
1277        sys.exit(1)
1278
1279
1280def _check_arch_option(arch):
1281    """Checks that architecture option is valid."""
1282    if platform_string() == 'windows':
1283        _windows_arch_option(arch)
1284    elif platform_string() == 'linux':
1285        # On linux, we need to be running under docker with the right architecture.
1286        runtime_arch = platform.architecture()[0]
1287        if arch == 'default':
1288            return
1289        elif runtime_arch == '64bit' and arch == 'x64':
1290            return
1291        elif runtime_arch == '32bit' and arch == 'x86':
1292            return
1293        else:
1294            print(
1295                'Architecture %s does not match current runtime architecture.' %
1296                arch)
1297            sys.exit(1)
1298    else:
1299        if args.arch != 'default':
1300            print('Architecture %s not supported on current platform.' %
1301                  args.arch)
1302            sys.exit(1)
1303
1304
1305def _docker_arch_suffix(arch):
1306    """Returns suffix to dockerfile dir to use."""
1307    if arch == 'default' or arch == 'x64':
1308        return 'x64'
1309    elif arch == 'x86':
1310        return 'x86'
1311    else:
1312        print('Architecture %s not supported with current settings.' % arch)
1313        sys.exit(1)
1314
1315
1316def runs_per_test_type(arg_str):
1317    """Auxiliary function to parse the "runs_per_test" flag.
1318
1319       Returns:
1320           A positive integer or 0, the latter indicating an infinite number of
1321           runs.
1322
1323       Raises:
1324           argparse.ArgumentTypeError: Upon invalid input.
1325    """
1326    if arg_str == 'inf':
1327        return 0
1328    try:
1329        n = int(arg_str)
1330        if n <= 0:
1331            raise ValueError
1332        return n
1333    except:
1334        msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
1335        raise argparse.ArgumentTypeError(msg)
1336
1337
1338def percent_type(arg_str):
1339    pct = float(arg_str)
1340    if pct > 100 or pct < 0:
1341        raise argparse.ArgumentTypeError(
1342            "'%f' is not a valid percentage in the [0, 100] range" % pct)
1343    return pct
1344
1345
1346# This is math.isclose in python >= 3.5
1347def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
1348    return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
1349
1350
1351# parse command line
1352argp = argparse.ArgumentParser(description='Run grpc tests.')
1353argp.add_argument('-c',
1354                  '--config',
1355                  choices=sorted(_CONFIGS.keys()),
1356                  default='opt')
1357argp.add_argument(
1358    '-n',
1359    '--runs_per_test',
1360    default=1,
1361    type=runs_per_test_type,
1362    help='A positive integer or "inf". If "inf", all tests will run in an '
1363    'infinite loop. Especially useful in combination with "-f"')
1364argp.add_argument('-r', '--regex', default='.*', type=str)
1365argp.add_argument('--regex_exclude', default='', type=str)
1366argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
1367argp.add_argument('-s', '--slowdown', default=1.0, type=float)
1368argp.add_argument('-p',
1369                  '--sample_percent',
1370                  default=100.0,
1371                  type=percent_type,
1372                  help='Run a random sample with that percentage of tests')
1373argp.add_argument('-f',
1374                  '--forever',
1375                  default=False,
1376                  action='store_const',
1377                  const=True)
1378argp.add_argument('-t',
1379                  '--travis',
1380                  default=False,
1381                  action='store_const',
1382                  const=True)
1383argp.add_argument('--newline_on_success',
1384                  default=False,
1385                  action='store_const',
1386                  const=True)
1387argp.add_argument('-l',
1388                  '--language',
1389                  choices=sorted(_LANGUAGES.keys()),
1390                  nargs='+',
1391                  required=True)
1392argp.add_argument('-S',
1393                  '--stop_on_failure',
1394                  default=False,
1395                  action='store_const',
1396                  const=True)
1397argp.add_argument('--use_docker',
1398                  default=False,
1399                  action='store_const',
1400                  const=True,
1401                  help='Run all the tests under docker. That provides ' +
1402                  'additional isolation and prevents the need to install ' +
1403                  'language specific prerequisites. Only available on Linux.')
1404argp.add_argument(
1405    '--allow_flakes',
1406    default=False,
1407    action='store_const',
1408    const=True,
1409    help=
1410    'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
1411)
1412argp.add_argument(
1413    '--arch',
1414    choices=['default', 'x86', 'x64'],
1415    default='default',
1416    help=
1417    'Selects architecture to target. For some platforms "default" is the only supported choice.'
1418)
1419argp.add_argument(
1420    '--compiler',
1421    choices=[
1422        'default',
1423        'gcc4.9',
1424        'gcc5.3',
1425        'gcc7.4',
1426        'gcc8.3',
1427        'gcc_musl',
1428        'clang4.0',
1429        'clang5.0',
1430        'python2.7',
1431        'python3.5',
1432        'python3.6',
1433        'python3.7',
1434        'python3.8',
1435        'pypy',
1436        'pypy3',
1437        'python_alpine',
1438        'all_the_cpythons',
1439        'electron1.3',
1440        'electron1.6',
1441        'coreclr',
1442        'cmake',
1443        'cmake_vs2015',
1444        'cmake_vs2017',
1445        'cmake_vs2019',
1446    ],
1447    default='default',
1448    help=
1449    'Selects compiler to use. Allowed values depend on the platform and language.'
1450)
1451argp.add_argument('--iomgr_platform',
1452                  choices=['native', 'uv', 'gevent', 'asyncio'],
1453                  default='native',
1454                  help='Selects iomgr platform to build on')
1455argp.add_argument('--build_only',
1456                  default=False,
1457                  action='store_const',
1458                  const=True,
1459                  help='Perform all the build steps but don\'t run any tests.')
1460argp.add_argument('--measure_cpu_costs',
1461                  default=False,
1462                  action='store_const',
1463                  const=True,
1464                  help='Measure the cpu costs of tests')
1465argp.add_argument(
1466    '--update_submodules',
1467    default=[],
1468    nargs='*',
1469    help=
1470    'Update some submodules before building. If any are updated, also run generate_projects. '
1471    +
1472    'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
1473)
1474argp.add_argument('-a', '--antagonists', default=0, type=int)
1475argp.add_argument('-x',
1476                  '--xml_report',
1477                  default=None,
1478                  type=str,
1479                  help='Generates a JUnit-compatible XML report')
1480argp.add_argument('--report_suite_name',
1481                  default='tests',
1482                  type=str,
1483                  help='Test suite name to use in generated JUnit XML report')
1484argp.add_argument(
1485    '--report_multi_target',
1486    default=False,
1487    const=True,
1488    action='store_const',
1489    help='Generate separate XML report for each test job (Looks better in UIs).'
1490)
1491argp.add_argument(
1492    '--quiet_success',
1493    default=False,
1494    action='store_const',
1495    const=True,
1496    help=
1497    'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
1498    + 'Useful when running many iterations of each test (argument -n).')
1499argp.add_argument(
1500    '--force_default_poller',
1501    default=False,
1502    action='store_const',
1503    const=True,
1504    help='Don\'t try to iterate over many polling strategies when they exist')
1505argp.add_argument(
1506    '--force_use_pollers',
1507    default=None,
1508    type=str,
1509    help='Only use the specified comma-delimited list of polling engines. '
1510    'Example: --force_use_pollers epoll1,poll '
1511    ' (This flag has no effect if --force_default_poller flag is also used)')
1512argp.add_argument('--max_time',
1513                  default=-1,
1514                  type=int,
1515                  help='Maximum test runtime in seconds')
1516argp.add_argument('--bq_result_table',
1517                  default='',
1518                  type=str,
1519                  nargs='?',
1520                  help='Upload test results to a specified BQ table.')
1521args = argp.parse_args()
1522
1523flaky_tests = set()
1524shortname_to_cpu = {}
1525
1526if args.force_default_poller:
1527    _POLLING_STRATEGIES = {}
1528elif args.force_use_pollers:
1529    _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
1530
1531jobset.measure_cpu_costs = args.measure_cpu_costs
1532
1533# update submodules if necessary
1534need_to_regenerate_projects = False
1535for spec in args.update_submodules:
1536    spec = spec.split(':', 1)
1537    if len(spec) == 1:
1538        submodule = spec[0]
1539        branch = 'master'
1540    elif len(spec) == 2:
1541        submodule = spec[0]
1542        branch = spec[1]
1543    cwd = 'third_party/%s' % submodule
1544
1545    def git(cmd, cwd=cwd):
1546        print('in %s: git %s' % (cwd, cmd))
1547        run_shell_command('git %s' % cmd, cwd=cwd)
1548
1549    git('fetch')
1550    git('checkout %s' % branch)
1551    git('pull origin %s' % branch)
1552    if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
1553        need_to_regenerate_projects = True
1554if need_to_regenerate_projects:
1555    if jobset.platform_string() == 'linux':
1556        run_shell_command('tools/buildgen/generate_projects.sh')
1557    else:
1558        print(
1559            'WARNING: may need to regenerate projects, but since we are not on')
1560        print(
1561            '         Linux this step is being skipped. Compilation MAY fail.')
1562
1563# grab config
1564run_config = _CONFIGS[args.config]
1565build_config = run_config.build_config
1566
1567if args.travis:
1568    _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
1569
1570languages = set(_LANGUAGES[l] for l in args.language)
1571for l in languages:
1572    l.configure(run_config, args)
1573
1574language_make_options = []
1575if any(language.make_options() for language in languages):
1576    if not 'gcov' in args.config and len(languages) != 1:
1577        print(
1578            'languages with custom make options cannot be built simultaneously with other languages'
1579        )
1580        sys.exit(1)
1581    else:
1582        # Combining make options is not clean and just happens to work. It allows C & C++ to build
1583        # together, and is only used under gcov. All other configs should build languages individually.
1584        language_make_options = list(
1585            set([
1586                make_option for lang in languages
1587                for make_option in lang.make_options()
1588            ]))
1589
1590if args.use_docker:
1591    if not args.travis:
1592        print('Seen --use_docker flag, will run tests under docker.')
1593        print('')
1594        print(
1595            'IMPORTANT: The changes you are testing need to be locally committed'
1596        )
1597        print(
1598            'because only the committed changes in the current branch will be')
1599        print('copied to the docker environment.')
1600        time.sleep(5)
1601
1602    dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
1603    if len(dockerfile_dirs) > 1:
1604        print('Languages to be tested require running under different docker '
1605              'images.')
1606        sys.exit(1)
1607    else:
1608        dockerfile_dir = next(iter(dockerfile_dirs))
1609
1610    child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
1611    run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
1612        child_argv[1:])
1613
1614    env = os.environ.copy()
1615    env['RUN_TESTS_COMMAND'] = run_tests_cmd
1616    env['DOCKERFILE_DIR'] = dockerfile_dir
1617    env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
1618    if args.xml_report:
1619        env['XML_REPORT'] = args.xml_report
1620    if not args.travis:
1621        env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
1622
1623    subprocess.check_call(
1624        'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
1625        shell=True,
1626        env=env)
1627    sys.exit(0)
1628
1629_check_arch_option(args.arch)
1630
1631
1632def make_jobspec(cfg, targets, makefile='Makefile'):
1633    if platform_string() == 'windows':
1634        return [
1635            jobset.JobSpec([
1636                'cmake', '--build', '.', '--target',
1637                '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
1638            ],
1639                           cwd=os.path.dirname(makefile),
1640                           timeout_seconds=None) for target in targets
1641        ]
1642    else:
1643        if targets and makefile.startswith('cmake/build/'):
1644            # With cmake, we've passed all the build configuration in the pre-build step already
1645            return [
1646                jobset.JobSpec(
1647                    [os.getenv('MAKE', 'make'), '-j',
1648                     '%d' % args.jobs] + targets,
1649                    cwd='cmake/build',
1650                    timeout_seconds=None)
1651            ]
1652        if targets:
1653            return [
1654                jobset.JobSpec(
1655                    [
1656                        os.getenv('MAKE', 'make'), '-f', makefile, '-j',
1657                        '%d' % args.jobs,
1658                        'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
1659                        args.slowdown,
1660                        'CONFIG=%s' % cfg, 'Q='
1661                    ] + language_make_options +
1662                    ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
1663                    timeout_seconds=None)
1664            ]
1665        else:
1666            return []
1667
1668
1669make_targets = {}
1670for l in languages:
1671    makefile = l.makefile_name()
1672    make_targets[makefile] = make_targets.get(makefile, set()).union(
1673        set(l.make_targets()))
1674
1675
1676def build_step_environ(cfg):
1677    environ = {'CONFIG': cfg}
1678    msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
1679    if msbuild_cfg:
1680        environ['MSBUILD_CONFIG'] = msbuild_cfg
1681    return environ
1682
1683
1684build_steps = list(
1685    set(
1686        jobset.JobSpec(cmdline,
1687                       environ=build_step_environ(build_config),
1688                       timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
1689                       flake_retries=2)
1690        for l in languages
1691        for cmdline in l.pre_build_steps()))
1692if make_targets:
1693    make_commands = itertools.chain.from_iterable(
1694        make_jobspec(build_config, list(targets), makefile)
1695        for (makefile, targets) in make_targets.items())
1696    build_steps.extend(set(make_commands))
1697build_steps.extend(
1698    set(
1699        jobset.JobSpec(cmdline,
1700                       environ=build_step_environ(build_config),
1701                       timeout_seconds=None)
1702        for l in languages
1703        for cmdline in l.build_steps()))
1704
1705post_tests_steps = list(
1706    set(
1707        jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
1708        for l in languages
1709        for cmdline in l.post_tests_steps()))
1710runs_per_test = args.runs_per_test
1711forever = args.forever
1712
1713
1714def _shut_down_legacy_server(legacy_server_port):
1715    try:
1716        version = int(
1717            urllib.request.urlopen('http://localhost:%d/version_number' %
1718                                   legacy_server_port,
1719                                   timeout=10).read())
1720    except:
1721        pass
1722    else:
1723        urllib.request.urlopen('http://localhost:%d/quitquitquit' %
1724                               legacy_server_port).read()
1725
1726
1727def _calculate_num_runs_failures(list_of_results):
1728    """Calculate number of runs and failures for a particular test.
1729
1730  Args:
1731    list_of_results: (List) of JobResult object.
1732  Returns:
1733    A tuple of total number of runs and failures.
1734  """
1735    num_runs = len(list_of_results)  # By default, there is 1 run per JobResult.
1736    num_failures = 0
1737    for jobresult in list_of_results:
1738        if jobresult.retries > 0:
1739            num_runs += jobresult.retries
1740        if jobresult.num_failures > 0:
1741            num_failures += jobresult.num_failures
1742    return num_runs, num_failures
1743
1744
1745# _build_and_run results
1746class BuildAndRunError(object):
1747
1748    BUILD = object()
1749    TEST = object()
1750    POST_TEST = object()
1751
1752
1753def _has_epollexclusive():
1754    binary = 'bins/%s/check_epollexclusive' % args.config
1755    if not os.path.exists(binary):
1756        return False
1757    try:
1758        subprocess.check_call(binary)
1759        return True
1760    except subprocess.CalledProcessError as e:
1761        return False
1762    except OSError as e:
1763        # For languages other than C and Windows the binary won't exist
1764        return False
1765
1766
1767# returns a list of things that failed (or an empty list on success)
1768def _build_and_run(check_cancelled,
1769                   newline_on_success,
1770                   xml_report=None,
1771                   build_only=False):
1772    """Do one pass of building & running tests."""
1773    # build latest sequentially
1774    num_failures, resultset = jobset.run(build_steps,
1775                                         maxjobs=1,
1776                                         stop_on_failure=True,
1777                                         newline_on_success=newline_on_success,
1778                                         travis=args.travis)
1779    if num_failures:
1780        return [BuildAndRunError.BUILD]
1781
1782    if build_only:
1783        if xml_report:
1784            report_utils.render_junit_xml_report(
1785                resultset, xml_report, suite_name=args.report_suite_name)
1786        return []
1787
1788    if not args.travis and not _has_epollexclusive() and platform_string(
1789    ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
1790            platform_string()]:
1791        print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
1792        _POLLING_STRATEGIES[platform_string()].remove('epollex')
1793
1794    # start antagonists
1795    antagonists = [
1796        subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
1797        for _ in range(0, args.antagonists)
1798    ]
1799    start_port_server.start_port_server()
1800    resultset = None
1801    num_test_failures = 0
1802    try:
1803        infinite_runs = runs_per_test == 0
1804        one_run = set(spec for language in languages
1805                      for spec in language.test_specs()
1806                      if (re.search(args.regex, spec.shortname) and
1807                          (args.regex_exclude == '' or
1808                           not re.search(args.regex_exclude, spec.shortname))))
1809        # When running on travis, we want out test runs to be as similar as possible
1810        # for reproducibility purposes.
1811        if args.travis and args.max_time <= 0:
1812            massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
1813        else:
1814            # whereas otherwise, we want to shuffle things up to give all tests a
1815            # chance to run.
1816            massaged_one_run = list(
1817                one_run)  # random.sample needs an indexable seq.
1818            num_jobs = len(massaged_one_run)
1819            # for a random sample, get as many as indicated by the 'sample_percent'
1820            # argument. By default this arg is 100, resulting in a shuffle of all
1821            # jobs.
1822            sample_size = int(num_jobs * args.sample_percent / 100.0)
1823            massaged_one_run = random.sample(massaged_one_run, sample_size)
1824            if not isclose(args.sample_percent, 100.0):
1825                assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
1826                print("Running %d tests out of %d (~%d%%)" %
1827                      (sample_size, num_jobs, args.sample_percent))
1828        if infinite_runs:
1829            assert len(massaged_one_run
1830                      ) > 0, 'Must have at least one test for a -n inf run'
1831        runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
1832                         else itertools.repeat(massaged_one_run, runs_per_test))
1833        all_runs = itertools.chain.from_iterable(runs_sequence)
1834
1835        if args.quiet_success:
1836            jobset.message(
1837                'START',
1838                'Running tests quietly, only failing tests will be reported',
1839                do_newline=True)
1840        num_test_failures, resultset = jobset.run(
1841            all_runs,
1842            check_cancelled,
1843            newline_on_success=newline_on_success,
1844            travis=args.travis,
1845            maxjobs=args.jobs,
1846            maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
1847            stop_on_failure=args.stop_on_failure,
1848            quiet_success=args.quiet_success,
1849            max_time=args.max_time)
1850        if resultset:
1851            for k, v in sorted(resultset.items()):
1852                num_runs, num_failures = _calculate_num_runs_failures(v)
1853                if num_failures > 0:
1854                    if num_failures == num_runs:  # what about infinite_runs???
1855                        jobset.message('FAILED', k, do_newline=True)
1856                    else:
1857                        jobset.message('FLAKE',
1858                                       '%s [%d/%d runs flaked]' %
1859                                       (k, num_failures, num_runs),
1860                                       do_newline=True)
1861    finally:
1862        for antagonist in antagonists:
1863            antagonist.kill()
1864        if args.bq_result_table and resultset:
1865            upload_extra_fields = {
1866                'compiler': args.compiler,
1867                'config': args.config,
1868                'iomgr_platform': args.iomgr_platform,
1869                'language': args.language[
1870                    0
1871                ],  # args.language is a list but will always have one element when uploading to BQ is enabled.
1872                'platform': platform_string()
1873            }
1874            try:
1875                upload_results_to_bq(resultset, args.bq_result_table,
1876                                     upload_extra_fields)
1877            except NameError as e:
1878                logging.warning(
1879                    e)  # It's fine to ignore since this is not critical
1880        if xml_report and resultset:
1881            report_utils.render_junit_xml_report(
1882                resultset,
1883                xml_report,
1884                suite_name=args.report_suite_name,
1885                multi_target=args.report_multi_target)
1886
1887    number_failures, _ = jobset.run(post_tests_steps,
1888                                    maxjobs=1,
1889                                    stop_on_failure=False,
1890                                    newline_on_success=newline_on_success,
1891                                    travis=args.travis)
1892
1893    out = []
1894    if number_failures:
1895        out.append(BuildAndRunError.POST_TEST)
1896    if num_test_failures:
1897        out.append(BuildAndRunError.TEST)
1898
1899    return out
1900
1901
1902if forever:
1903    success = True
1904    while True:
1905        dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
1906        initial_time = dw.most_recent_change()
1907        have_files_changed = lambda: dw.most_recent_change() != initial_time
1908        previous_success = success
1909        errors = _build_and_run(check_cancelled=have_files_changed,
1910                                newline_on_success=False,
1911                                build_only=args.build_only) == 0
1912        if not previous_success and not errors:
1913            jobset.message('SUCCESS',
1914                           'All tests are now passing properly',
1915                           do_newline=True)
1916        jobset.message('IDLE', 'No change detected')
1917        while not have_files_changed():
1918            time.sleep(1)
1919else:
1920    errors = _build_and_run(check_cancelled=lambda: False,
1921                            newline_on_success=args.newline_on_success,
1922                            xml_report=args.xml_report,
1923                            build_only=args.build_only)
1924    if not errors:
1925        jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1926    else:
1927        jobset.message('FAILED', 'Some tests failed', do_newline=True)
1928    exit_code = 0
1929    if BuildAndRunError.BUILD in errors:
1930        exit_code |= 1
1931    if BuildAndRunError.TEST in errors:
1932        exit_code |= 2
1933    if BuildAndRunError.POST_TEST in errors:
1934        exit_code |= 4
1935    sys.exit(exit_code)
1936