• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright 2015 gRPC authors.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#     http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15"""Run tests in parallel."""
16
17from __future__ import print_function
18
19import argparse
20import ast
21import collections
22import glob
23import itertools
24import json
25import logging
26import multiprocessing
27import os
28import os.path
29import pipes
30import platform
31import random
32import re
33import socket
34import subprocess
35import sys
36import tempfile
37import traceback
38import time
39from six.moves import urllib
40import uuid
41import six
42
43import python_utils.jobset as jobset
44import python_utils.report_utils as report_utils
45import python_utils.watch_dirs as watch_dirs
46import python_utils.start_port_server as start_port_server
47try:
48    from python_utils.upload_test_results import upload_results_to_bq
49except (ImportError):
50    pass  # It's ok to not import because this is only necessary to upload results to BQ.
51
52gcp_utils_dir = os.path.abspath(
53    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
54sys.path.append(gcp_utils_dir)
55
56_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
57os.chdir(_ROOT)
58
59_FORCE_ENVIRON_FOR_WRAPPERS = {
60    'GRPC_VERBOSITY': 'DEBUG',
61}
62
63_POLLING_STRATEGIES = {
64    'linux': ['epollex', 'epoll1', 'poll'],
65    'mac': ['poll'],
66}
67
68BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
69
70
71def get_bqtest_data(limit=None):
72    import big_query_utils
73
74    bq = big_query_utils.create_big_query()
75    query = """
76SELECT
77  filtered_test_name,
78  SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
79  MAX(cpu_measured) + 0.01 as cpu
80  FROM (
81  SELECT
82    REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
83    result, cpu_measured
84  FROM
85    [grpc-testing:jenkins_test_results.aggregate_results]
86  WHERE
87    timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
88    AND platform = '""" + platform_string() + """'
89    AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
90GROUP BY
91  filtered_test_name"""
92    if limit:
93        query += " limit {}".format(limit)
94    query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
95    page = bq.jobs().getQueryResults(
96        pageToken=None, **query_job['jobReference']).execute(num_retries=3)
97    test_data = [
98        BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
99                         float(row['f'][2]['v'])) for row in page['rows']
100    ]
101    return test_data
102
103
104def platform_string():
105    return jobset.platform_string()
106
107
108_DEFAULT_TIMEOUT_SECONDS = 5 * 60
109_PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
110
111
112def run_shell_command(cmd, env=None, cwd=None):
113    try:
114        subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
115    except subprocess.CalledProcessError as e:
116        logging.exception(
117            "Error while running command '%s'. Exit status %d. Output:\n%s",
118            e.cmd, e.returncode, e.output)
119        raise
120
121
122def max_parallel_tests_for_current_platform():
123    # Too much test parallelization has only been seen to be a problem
124    # so far on windows.
125    if jobset.platform_string() == 'windows':
126        return 64
127    return 1024
128
129
130# SimpleConfig: just compile with CONFIG=config, and run the binary to test
131class Config(object):
132
133    def __init__(self,
134                 config,
135                 environ=None,
136                 timeout_multiplier=1,
137                 tool_prefix=[],
138                 iomgr_platform='native'):
139        if environ is None:
140            environ = {}
141        self.build_config = config
142        self.environ = environ
143        self.environ['CONFIG'] = config
144        self.tool_prefix = tool_prefix
145        self.timeout_multiplier = timeout_multiplier
146        self.iomgr_platform = iomgr_platform
147
148    def job_spec(self,
149                 cmdline,
150                 timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
151                 shortname=None,
152                 environ={},
153                 cpu_cost=1.0,
154                 flaky=False):
155        """Construct a jobset.JobSpec for a test under this config
156
157       Args:
158         cmdline:      a list of strings specifying the command line the test
159                       would like to run
160    """
161        actual_environ = self.environ.copy()
162        for k, v in environ.items():
163            actual_environ[k] = v
164        if not flaky and shortname and shortname in flaky_tests:
165            flaky = True
166        if shortname in shortname_to_cpu:
167            cpu_cost = shortname_to_cpu[shortname]
168        return jobset.JobSpec(
169            cmdline=self.tool_prefix + cmdline,
170            shortname=shortname,
171            environ=actual_environ,
172            cpu_cost=cpu_cost,
173            timeout_seconds=(self.timeout_multiplier *
174                             timeout_seconds if timeout_seconds else None),
175            flake_retries=4 if flaky or args.allow_flakes else 0,
176            timeout_retries=1 if flaky or args.allow_flakes else 0)
177
178
179def get_c_tests(travis, test_lang):
180    out = []
181    platforms_str = 'ci_platforms' if travis else 'platforms'
182    with open('tools/run_tests/generated/tests.json') as f:
183        js = json.load(f)
184        return [
185            tgt for tgt in js
186            if tgt['language'] == test_lang and platform_string() in
187            tgt[platforms_str] and not (travis and tgt['flaky'])
188        ]
189
190
191def _check_compiler(compiler, supported_compilers):
192    if compiler not in supported_compilers:
193        raise Exception('Compiler %s not supported (on this platform).' %
194                        compiler)
195
196
197def _check_arch(arch, supported_archs):
198    if arch not in supported_archs:
199        raise Exception('Architecture %s not supported.' % arch)
200
201
202def _is_use_docker_child():
203    """Returns True if running running as a --use_docker child."""
204    return True if os.getenv('RUN_TESTS_COMMAND') else False
205
206
207_PythonConfigVars = collections.namedtuple('_ConfigVars', [
208    'shell',
209    'builder',
210    'builder_prefix_arguments',
211    'venv_relative_python',
212    'toolchain',
213    'runner',
214    'test_name',
215    'iomgr_platform',
216])
217
218
219def _python_config_generator(name, major, minor, bits, config_vars):
220    name += '_' + config_vars.iomgr_platform
221    return PythonConfig(
222        name, config_vars.shell + config_vars.builder +
223        config_vars.builder_prefix_arguments +
224        [_python_pattern_function(major=major, minor=minor, bits=bits)] +
225        [name] + config_vars.venv_relative_python + config_vars.toolchain,
226        config_vars.shell + config_vars.runner + [
227            os.path.join(name, config_vars.venv_relative_python[0]),
228            config_vars.test_name
229        ])
230
231
232def _pypy_config_generator(name, major, config_vars):
233    return PythonConfig(
234        name, config_vars.shell + config_vars.builder +
235        config_vars.builder_prefix_arguments +
236        [_pypy_pattern_function(major=major)] + [name] +
237        config_vars.venv_relative_python + config_vars.toolchain,
238        config_vars.shell + config_vars.runner +
239        [os.path.join(name, config_vars.venv_relative_python[0])])
240
241
242def _python_pattern_function(major, minor, bits):
243    # Bit-ness is handled by the test machine's environment
244    if os.name == "nt":
245        if bits == "64":
246            return '/c/Python{major}{minor}/python.exe'.format(major=major,
247                                                               minor=minor,
248                                                               bits=bits)
249        else:
250            return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
251                major=major, minor=minor, bits=bits)
252    else:
253        return 'python{major}.{minor}'.format(major=major, minor=minor)
254
255
256def _pypy_pattern_function(major):
257    if major == '2':
258        return 'pypy'
259    elif major == '3':
260        return 'pypy3'
261    else:
262        raise ValueError("Unknown PyPy major version")
263
264
265class CLanguage(object):
266
267    def __init__(self, make_target, test_lang):
268        self.make_target = make_target
269        self.platform = platform_string()
270        self.test_lang = test_lang
271
272    def configure(self, config, args):
273        self.config = config
274        self.args = args
275        if self.platform == 'windows':
276            _check_compiler(
277                self.args.compiler,
278                ['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
279            _check_arch(self.args.arch, ['default', 'x64', 'x86'])
280            self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
281            self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
282            self._use_cmake = True
283            self._make_options = []
284        elif self.args.compiler == 'cmake':
285            _check_arch(self.args.arch, ['default'])
286            self._use_cmake = True
287            self._docker_distro = 'jessie'
288            self._make_options = []
289        else:
290            self._use_cmake = False
291            self._docker_distro, self._make_options = self._compiler_options(
292                self.args.use_docker, self.args.compiler)
293        if args.iomgr_platform == "uv":
294            cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
295            try:
296                cflags += subprocess.check_output(
297                    ['pkg-config', '--cflags', 'libuv']).strip() + ' '
298            except (subprocess.CalledProcessError, OSError):
299                pass
300            try:
301                ldflags = subprocess.check_output(
302                    ['pkg-config', '--libs', 'libuv']).strip() + ' '
303            except (subprocess.CalledProcessError, OSError):
304                ldflags = '-luv '
305            self._make_options += [
306                'EXTRA_CPPFLAGS={}'.format(cflags),
307                'EXTRA_LDLIBS={}'.format(ldflags)
308            ]
309
310    def test_specs(self):
311        out = []
312        binaries = get_c_tests(self.args.travis, self.test_lang)
313        for target in binaries:
314            if self._use_cmake and target.get('boringssl', False):
315                # cmake doesn't build boringssl tests
316                continue
317            auto_timeout_scaling = target.get('auto_timeout_scaling', True)
318            polling_strategies = (_POLLING_STRATEGIES.get(
319                self.platform, ['all']) if target.get('uses_polling', True) else
320                                  ['none'])
321            if self.args.iomgr_platform == 'uv':
322                polling_strategies = ['all']
323            for polling_strategy in polling_strategies:
324                env = {
325                    'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
326                        _ROOT + '/src/core/tsi/test_creds/ca.pem',
327                    'GRPC_POLL_STRATEGY':
328                        polling_strategy,
329                    'GRPC_VERBOSITY':
330                        'DEBUG'
331                }
332                resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
333                if resolver:
334                    env['GRPC_DNS_RESOLVER'] = resolver
335                shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
336                if polling_strategy in target.get('excluded_poll_engines', []):
337                    continue
338
339                timeout_scaling = 1
340                if auto_timeout_scaling:
341                    config = self.args.config
342                    if ('asan' in config or config == 'msan' or
343                            config == 'tsan' or config == 'ubsan' or
344                            config == 'helgrind' or config == 'memcheck'):
345                        # Scale overall test timeout if running under various sanitizers.
346                        # scaling value is based on historical data analysis
347                        timeout_scaling *= 3
348
349                if self.config.build_config in target['exclude_configs']:
350                    continue
351                if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
352                    continue
353                if self.platform == 'windows':
354                    binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[
355                        self.config.build_config], target['name'])
356                else:
357                    if self._use_cmake:
358                        binary = 'cmake/build/%s' % target['name']
359                    else:
360                        binary = 'bins/%s/%s' % (self.config.build_config,
361                                                 target['name'])
362                cpu_cost = target['cpu_cost']
363                if cpu_cost == 'capacity':
364                    cpu_cost = multiprocessing.cpu_count()
365                if os.path.isfile(binary):
366                    list_test_command = None
367                    filter_test_command = None
368
369                    # these are the flag defined by gtest and benchmark framework to list
370                    # and filter test runs. We use them to split each individual test
371                    # into its own JobSpec, and thus into its own process.
372                    if 'benchmark' in target and target['benchmark']:
373                        with open(os.devnull, 'w') as fnull:
374                            tests = subprocess.check_output(
375                                [binary, '--benchmark_list_tests'],
376                                stderr=fnull)
377                        for line in tests.split('\n'):
378                            test = line.strip()
379                            if not test: continue
380                            cmdline = [binary,
381                                       '--benchmark_filter=%s$' % test
382                                      ] + target['args']
383                            out.append(
384                                self.config.job_spec(
385                                    cmdline,
386                                    shortname='%s %s' %
387                                    (' '.join(cmdline), shortname_ext),
388                                    cpu_cost=cpu_cost,
389                                    timeout_seconds=target.get(
390                                        'timeout_seconds',
391                                        _DEFAULT_TIMEOUT_SECONDS) *
392                                    timeout_scaling,
393                                    environ=env))
394                    elif 'gtest' in target and target['gtest']:
395                        # here we parse the output of --gtest_list_tests to build up a complete
396                        # list of the tests contained in a binary for each test, we then
397                        # add a job to run, filtering for just that test.
398                        with open(os.devnull, 'w') as fnull:
399                            tests = subprocess.check_output(
400                                [binary, '--gtest_list_tests'], stderr=fnull)
401                        base = None
402                        for line in tests.split('\n'):
403                            i = line.find('#')
404                            if i >= 0: line = line[:i]
405                            if not line: continue
406                            if line[0] != ' ':
407                                base = line.strip()
408                            else:
409                                assert base is not None
410                                assert line[1] == ' '
411                                test = base + line.strip()
412                                cmdline = [binary,
413                                           '--gtest_filter=%s' % test
414                                          ] + target['args']
415                                out.append(
416                                    self.config.job_spec(
417                                        cmdline,
418                                        shortname='%s %s' %
419                                        (' '.join(cmdline), shortname_ext),
420                                        cpu_cost=cpu_cost,
421                                        timeout_seconds=target.get(
422                                            'timeout_seconds',
423                                            _DEFAULT_TIMEOUT_SECONDS) *
424                                        timeout_scaling,
425                                        environ=env))
426                    else:
427                        cmdline = [binary] + target['args']
428                        shortname = target.get(
429                            'shortname',
430                            ' '.join(pipes.quote(arg) for arg in cmdline))
431                        shortname += shortname_ext
432                        out.append(
433                            self.config.job_spec(
434                                cmdline,
435                                shortname=shortname,
436                                cpu_cost=cpu_cost,
437                                flaky=target.get('flaky', False),
438                                timeout_seconds=target.get(
439                                    'timeout_seconds',
440                                    _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
441                                environ=env))
442                elif self.args.regex == '.*' or self.platform == 'windows':
443                    print('\nWARNING: binary not found, skipping', binary)
444        return sorted(out)
445
446    def make_targets(self):
447        if self.platform == 'windows':
448            # don't build tools on windows just yet
449            return ['buildtests_%s' % self.make_target]
450        return [
451            'buildtests_%s' % self.make_target,
452            'tools_%s' % self.make_target, 'check_epollexclusive'
453        ]
454
455    def make_options(self):
456        return self._make_options
457
458    def pre_build_steps(self):
459        if self.platform == 'windows':
460            return [[
461                'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
462                self._cmake_generator_option, self._cmake_arch_option
463            ]]
464        elif self._use_cmake:
465            return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
466        else:
467            return []
468
469    def build_steps(self):
470        return []
471
472    def post_tests_steps(self):
473        if self.platform == 'windows':
474            return []
475        else:
476            return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
477
478    def makefile_name(self):
479        if self._use_cmake:
480            return 'cmake/build/Makefile'
481        else:
482            return 'Makefile'
483
484    def _clang_make_options(self, version_suffix=''):
485        if self.args.config == 'ubsan':
486            return [
487                'CC=clang%s' % version_suffix,
488                'CXX=clang++%s' % version_suffix,
489                'LD=clang++%s' % version_suffix,
490                'LDXX=clang++%s' % version_suffix
491            ]
492
493        return [
494            'CC=clang%s' % version_suffix,
495            'CXX=clang++%s' % version_suffix,
496            'LD=clang%s' % version_suffix,
497            'LDXX=clang++%s' % version_suffix
498        ]
499
500    def _gcc_make_options(self, version_suffix):
501        return [
502            'CC=gcc%s' % version_suffix,
503            'CXX=g++%s' % version_suffix,
504            'LD=gcc%s' % version_suffix,
505            'LDXX=g++%s' % version_suffix
506        ]
507
508    def _compiler_options(self, use_docker, compiler):
509        """Returns docker distro and make options to use for given compiler."""
510        if not use_docker and not _is_use_docker_child():
511            _check_compiler(compiler, ['default'])
512
513        if compiler == 'gcc4.9' or compiler == 'default':
514            return ('jessie', [])
515        elif compiler == 'gcc5.3':
516            return ('ubuntu1604', [])
517        elif compiler == 'gcc7.4':
518            return ('ubuntu1804', [])
519        elif compiler == 'gcc8.3':
520            return ('buster', [])
521        elif compiler == 'gcc_musl':
522            return ('alpine', [])
523        elif compiler == 'clang3.4':
524            # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
525            return ('ubuntu1404', self._clang_make_options())
526        elif compiler == 'clang3.5':
527            return ('jessie', self._clang_make_options(version_suffix='-3.5'))
528        elif compiler == 'clang3.6':
529            return ('ubuntu1604',
530                    self._clang_make_options(version_suffix='-3.6'))
531        elif compiler == 'clang3.7':
532            return ('ubuntu1604',
533                    self._clang_make_options(version_suffix='-3.7'))
534        elif compiler == 'clang7.0':
535            # clang++-7.0 alias doesn't exist and there are no other clang versions
536            # installed.
537            return ('sanitizers_jessie', self._clang_make_options())
538        else:
539            raise Exception('Compiler %s not supported.' % compiler)
540
541    def dockerfile_dir(self):
542        return 'tools/dockerfile/test/cxx_%s_%s' % (
543            self._docker_distro, _docker_arch_suffix(self.args.arch))
544
545    def __str__(self):
546        return self.make_target
547
548
549# This tests Node on grpc/grpc-node and will become the standard for Node testing
550class RemoteNodeLanguage(object):
551
552    def __init__(self):
553        self.platform = platform_string()
554
555    def configure(self, config, args):
556        self.config = config
557        self.args = args
558        # Note: electron ABI only depends on major and minor version, so that's all
559        # we should specify in the compiler argument
560        _check_compiler(self.args.compiler, [
561            'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
562            'electron1.3', 'electron1.6'
563        ])
564        if self.args.compiler == 'default':
565            self.runtime = 'node'
566            self.node_version = '8'
567        else:
568            if self.args.compiler.startswith('electron'):
569                self.runtime = 'electron'
570                self.node_version = self.args.compiler[8:]
571            else:
572                self.runtime = 'node'
573                # Take off the word "node"
574                self.node_version = self.args.compiler[4:]
575
576    # TODO: update with Windows/electron scripts when available for grpc/grpc-node
577    def test_specs(self):
578        if self.platform == 'windows':
579            return [
580                self.config.job_spec(
581                    ['tools\\run_tests\\helper_scripts\\run_node.bat'])
582            ]
583        else:
584            return [
585                self.config.job_spec(
586                    ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
587                    None,
588                    environ=_FORCE_ENVIRON_FOR_WRAPPERS)
589            ]
590
591    def pre_build_steps(self):
592        return []
593
594    def make_targets(self):
595        return []
596
597    def make_options(self):
598        return []
599
600    def build_steps(self):
601        return []
602
603    def post_tests_steps(self):
604        return []
605
606    def makefile_name(self):
607        return 'Makefile'
608
609    def dockerfile_dir(self):
610        return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
611            self.args.arch)
612
613    def __str__(self):
614        return 'grpc-node'
615
616
617class PhpLanguage(object):
618
619    def configure(self, config, args):
620        self.config = config
621        self.args = args
622        _check_compiler(self.args.compiler, ['default'])
623        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
624
625    def test_specs(self):
626        return [
627            self.config.job_spec(['src/php/bin/run_tests.sh'],
628                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
629        ]
630
631    def pre_build_steps(self):
632        return []
633
634    def make_targets(self):
635        return ['static_c', 'shared_c']
636
637    def make_options(self):
638        return self._make_options
639
640    def build_steps(self):
641        return [['tools/run_tests/helper_scripts/build_php.sh']]
642
643    def post_tests_steps(self):
644        return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
645
646    def makefile_name(self):
647        return 'Makefile'
648
649    def dockerfile_dir(self):
650        return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
651            self.args.arch)
652
653    def __str__(self):
654        return 'php'
655
656
657class Php7Language(object):
658
659    def configure(self, config, args):
660        self.config = config
661        self.args = args
662        _check_compiler(self.args.compiler, ['default'])
663        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
664
665    def test_specs(self):
666        return [
667            self.config.job_spec(['src/php/bin/run_tests.sh'],
668                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
669        ]
670
671    def pre_build_steps(self):
672        return []
673
674    def make_targets(self):
675        return ['static_c', 'shared_c']
676
677    def make_options(self):
678        return self._make_options
679
680    def build_steps(self):
681        return [['tools/run_tests/helper_scripts/build_php.sh']]
682
683    def post_tests_steps(self):
684        return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
685
686    def makefile_name(self):
687        return 'Makefile'
688
689    def dockerfile_dir(self):
690        return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
691            self.args.arch)
692
693    def __str__(self):
694        return 'php7'
695
696
697class PythonConfig(
698        collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
699    """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
700
701
702class PythonLanguage(object):
703
704    _TEST_SPECS_FILE = {
705        'native': 'src/python/grpcio_tests/tests/tests.json',
706        'gevent': 'src/python/grpcio_tests/tests/tests.json',
707        'asyncio': 'src/python/grpcio_tests/tests_aio/tests.json',
708    }
709    _TEST_FOLDER = {
710        'native': 'test',
711        'gevent': 'test',
712        'asyncio': 'test_aio',
713    }
714
715    def configure(self, config, args):
716        self.config = config
717        self.args = args
718        self.pythons = self._get_pythons(self.args)
719
720    def test_specs(self):
721        # load list of known test suites
722        with open(self._TEST_SPECS_FILE[
723                self.args.iomgr_platform]) as tests_json_file:
724            tests_json = json.load(tests_json_file)
725        environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
726        # TODO(https://github.com/grpc/grpc/issues/21401) Fork handlers is not
727        # designed for non-native IO manager. It has a side-effect that
728        # overrides threading settings in C-Core.
729        if args.iomgr_platform != 'native':
730            environment['GRPC_ENABLE_FORK_SUPPORT'] = '0'
731        return [
732            self.config.job_spec(
733                config.run,
734                timeout_seconds=5 * 60,
735                environ=dict(GRPC_PYTHON_TESTRUNNER_FILTER=str(suite_name),
736                             **environment),
737                shortname='%s.%s.%s' %
738                (config.name, self._TEST_FOLDER[self.args.iomgr_platform],
739                 suite_name),
740            ) for suite_name in tests_json for config in self.pythons
741        ]
742
743    def pre_build_steps(self):
744        return []
745
746    def make_targets(self):
747        return []
748
749    def make_options(self):
750        return []
751
752    def build_steps(self):
753        return [config.build for config in self.pythons]
754
755    def post_tests_steps(self):
756        if self.config.build_config != 'gcov':
757            return []
758        else:
759            return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
760
761    def makefile_name(self):
762        return 'Makefile'
763
764    def dockerfile_dir(self):
765        return 'tools/dockerfile/test/python_%s_%s' % (
766            self._python_manager_name(), _docker_arch_suffix(self.args.arch))
767
768    def _python_manager_name(self):
769        """Choose the docker image to use based on python version."""
770        if self.args.compiler in [
771                'python2.7', 'python3.5', 'python3.6', 'python3.7', 'python3.8'
772        ]:
773            return 'stretch_' + self.args.compiler[len('python'):]
774        elif self.args.compiler == 'python_alpine':
775            return 'alpine'
776        else:
777            return 'stretch_default'
778
779    def _get_pythons(self, args):
780        """Get python runtimes to test with, based on current platform, architecture, compiler etc."""
781        if args.arch == 'x86':
782            bits = '32'
783        else:
784            bits = '64'
785
786        if os.name == 'nt':
787            shell = ['bash']
788            builder = [
789                os.path.abspath(
790                    'tools/run_tests/helper_scripts/build_python_msys2.sh')
791            ]
792            builder_prefix_arguments = ['MINGW{}'.format(bits)]
793            venv_relative_python = ['Scripts/python.exe']
794            toolchain = ['mingw32']
795        else:
796            shell = []
797            builder = [
798                os.path.abspath(
799                    'tools/run_tests/helper_scripts/build_python.sh')
800            ]
801            builder_prefix_arguments = []
802            venv_relative_python = ['bin/python']
803            toolchain = ['unix']
804
805        # Selects the corresponding testing mode.
806        # See src/python/grpcio_tests/commands.py for implementation details.
807        if args.iomgr_platform == 'native':
808            test_command = 'test_lite'
809        elif args.iomgr_platform == 'gevent':
810            test_command = 'test_gevent'
811        elif args.iomgr_platform == 'asyncio':
812            test_command = 'test_aio'
813        else:
814            raise ValueError('Unsupported IO Manager platform: %s' %
815                             args.iomgr_platform)
816        runner = [
817            os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
818        ]
819
820        config_vars = _PythonConfigVars(shell, builder,
821                                        builder_prefix_arguments,
822                                        venv_relative_python, toolchain, runner,
823                                        test_command, args.iomgr_platform)
824        python27_config = _python_config_generator(name='py27',
825                                                   major='2',
826                                                   minor='7',
827                                                   bits=bits,
828                                                   config_vars=config_vars)
829        python35_config = _python_config_generator(name='py35',
830                                                   major='3',
831                                                   minor='5',
832                                                   bits=bits,
833                                                   config_vars=config_vars)
834        python36_config = _python_config_generator(name='py36',
835                                                   major='3',
836                                                   minor='6',
837                                                   bits=bits,
838                                                   config_vars=config_vars)
839        python37_config = _python_config_generator(name='py37',
840                                                   major='3',
841                                                   minor='7',
842                                                   bits=bits,
843                                                   config_vars=config_vars)
844        python38_config = _python_config_generator(name='py38',
845                                                   major='3',
846                                                   minor='8',
847                                                   bits=bits,
848                                                   config_vars=config_vars)
849        pypy27_config = _pypy_config_generator(name='pypy',
850                                               major='2',
851                                               config_vars=config_vars)
852        pypy32_config = _pypy_config_generator(name='pypy3',
853                                               major='3',
854                                               config_vars=config_vars)
855
856        if args.iomgr_platform == 'asyncio':
857            if args.compiler not in ('default', 'python3.6', 'python3.7',
858                                     'python3.8'):
859                raise Exception(
860                    'Compiler %s not supported with IO Manager platform: %s' %
861                    (args.compiler, args.iomgr_platform))
862
863        if args.compiler == 'default':
864            if os.name == 'nt':
865                return (python36_config,)
866            else:
867                if args.iomgr_platform == 'asyncio':
868                    return (python36_config,)
869                elif os.uname()[0] == 'Darwin':
870                    # NOTE(rbellevi): Testing takes significantly longer on
871                    # MacOS, so we restrict the number of interpreter versions
872                    # tested.
873                    return (
874                        python27_config,
875                        python36_config,
876                        python37_config,
877                    )
878                else:
879                    return (
880                        python27_config,
881                        python35_config,
882                        python36_config,
883                        python37_config,
884                    )
885        elif args.compiler == 'python2.7':
886            return (python27_config,)
887        elif args.compiler == 'python3.5':
888            return (python35_config,)
889        elif args.compiler == 'python3.6':
890            return (python36_config,)
891        elif args.compiler == 'python3.7':
892            return (python37_config,)
893        elif args.compiler == 'python3.8':
894            return (python38_config,)
895        elif args.compiler == 'pypy':
896            return (pypy27_config,)
897        elif args.compiler == 'pypy3':
898            return (pypy32_config,)
899        elif args.compiler == 'python_alpine':
900            return (python27_config,)
901        elif args.compiler == 'all_the_cpythons':
902            return (
903                python27_config,
904                python35_config,
905                python36_config,
906                python37_config,
907                python38_config,
908            )
909        else:
910            raise Exception('Compiler %s not supported.' % args.compiler)
911
912    def __str__(self):
913        return 'python'
914
915
916class RubyLanguage(object):
917
918    def configure(self, config, args):
919        self.config = config
920        self.args = args
921        _check_compiler(self.args.compiler, ['default'])
922
923    def test_specs(self):
924        tests = [
925            self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
926                                 timeout_seconds=10 * 60,
927                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
928        ]
929        tests.append(
930            self.config.job_spec(
931                ['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
932                timeout_seconds=20 * 60,
933                environ=_FORCE_ENVIRON_FOR_WRAPPERS))
934        return tests
935
936    def pre_build_steps(self):
937        return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
938
939    def make_targets(self):
940        return []
941
942    def make_options(self):
943        return []
944
945    def build_steps(self):
946        return [['tools/run_tests/helper_scripts/build_ruby.sh']]
947
948    def post_tests_steps(self):
949        return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
950
951    def makefile_name(self):
952        return 'Makefile'
953
954    def dockerfile_dir(self):
955        return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
956            self.args.arch)
957
958    def __str__(self):
959        return 'ruby'
960
961
962class CSharpLanguage(object):
963
964    def __init__(self):
965        self.platform = platform_string()
966
967    def configure(self, config, args):
968        self.config = config
969        self.args = args
970        if self.platform == 'windows':
971            _check_compiler(self.args.compiler, ['default', 'coreclr'])
972            _check_arch(self.args.arch, ['default'])
973            self._cmake_arch_option = 'x64'
974        else:
975            _check_compiler(self.args.compiler, ['default', 'coreclr'])
976            self._docker_distro = 'stretch'
977
978    def test_specs(self):
979        with open('src/csharp/tests.json') as f:
980            tests_by_assembly = json.load(f)
981
982        msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
983        nunit_args = ['--labels=All', '--noresult', '--workers=1']
984        assembly_subdir = 'bin/%s' % msbuild_config
985        assembly_extension = '.exe'
986
987        if self.args.compiler == 'coreclr':
988            assembly_subdir += '/netcoreapp2.1'
989            runtime_cmd = ['dotnet', 'exec']
990            assembly_extension = '.dll'
991        else:
992            assembly_subdir += '/net45'
993            if self.platform == 'windows':
994                runtime_cmd = []
995            elif self.platform == 'mac':
996                # mono before version 5.2 on MacOS defaults to 32bit runtime
997                runtime_cmd = ['mono', '--arch=64']
998            else:
999                runtime_cmd = ['mono']
1000
1001        specs = []
1002        for assembly in six.iterkeys(tests_by_assembly):
1003            assembly_file = 'src/csharp/%s/%s/%s%s' % (
1004                assembly, assembly_subdir, assembly, assembly_extension)
1005            if self.config.build_config != 'gcov' or self.platform != 'windows':
1006                # normally, run each test as a separate process
1007                for test in tests_by_assembly[assembly]:
1008                    cmdline = runtime_cmd + [assembly_file,
1009                                             '--test=%s' % test] + nunit_args
1010                    specs.append(
1011                        self.config.job_spec(
1012                            cmdline,
1013                            shortname='csharp.%s' % test,
1014                            environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1015            else:
1016                # For C# test coverage, run all tests from the same assembly at once
1017                # using OpenCover.Console (only works on Windows).
1018                cmdline = [
1019                    'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
1020                    '-target:%s' % assembly_file, '-targetdir:src\\csharp',
1021                    '-targetargs:%s' % ' '.join(nunit_args),
1022                    '-filter:+[Grpc.Core]*', '-register:user',
1023                    '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
1024                ]
1025
1026                # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
1027                # to prevent problems with registering the profiler.
1028                run_exclusive = 1000000
1029                specs.append(
1030                    self.config.job_spec(cmdline,
1031                                         shortname='csharp.coverage.%s' %
1032                                         assembly,
1033                                         cpu_cost=run_exclusive,
1034                                         environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1035        return specs
1036
1037    def pre_build_steps(self):
1038        if self.platform == 'windows':
1039            return [[
1040                'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
1041                self._cmake_arch_option
1042            ]]
1043        else:
1044            return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
1045
1046    def make_targets(self):
1047        return ['grpc_csharp_ext']
1048
1049    def make_options(self):
1050        return []
1051
1052    def build_steps(self):
1053        if self.platform == 'windows':
1054            return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
1055        else:
1056            return [['tools/run_tests/helper_scripts/build_csharp.sh']]
1057
1058    def post_tests_steps(self):
1059        if self.platform == 'windows':
1060            return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
1061        else:
1062            return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
1063
1064    def makefile_name(self):
1065        if self.platform == 'windows':
1066            return 'cmake/build/%s/Makefile' % self._cmake_arch_option
1067        else:
1068            # no need to set x86 specific flags as run_tests.py
1069            # currently forbids x86 C# builds on both Linux and MacOS.
1070            return 'cmake/build/Makefile'
1071
1072    def dockerfile_dir(self):
1073        return 'tools/dockerfile/test/csharp_%s_%s' % (
1074            self._docker_distro, _docker_arch_suffix(self.args.arch))
1075
1076    def __str__(self):
1077        return 'csharp'
1078
1079
1080class ObjCLanguage(object):
1081
1082    def configure(self, config, args):
1083        self.config = config
1084        self.args = args
1085        _check_compiler(self.args.compiler, ['default'])
1086
1087    def test_specs(self):
1088        out = []
1089        out.append(
1090            self.config.job_spec(
1091                ['src/objective-c/tests/build_one_example_bazel.sh'],
1092                timeout_seconds=10 * 60,
1093                shortname='ios-buildtest-example-sample',
1094                cpu_cost=1e6,
1095                environ={
1096                    'SCHEME': 'Sample',
1097                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1098                    'FRAMEWORKS': 'NO'
1099                }))
1100        # Currently not supporting compiling as frameworks in Bazel
1101        out.append(
1102            self.config.job_spec(
1103                ['src/objective-c/tests/build_one_example.sh'],
1104                timeout_seconds=20 * 60,
1105                shortname='ios-buildtest-example-sample-frameworks',
1106                cpu_cost=1e6,
1107                environ={
1108                    'SCHEME': 'Sample',
1109                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1110                    'FRAMEWORKS': 'YES'
1111                }))
1112        out.append(
1113            self.config.job_spec(
1114                ['src/objective-c/tests/build_one_example.sh'],
1115                timeout_seconds=20 * 60,
1116                shortname='ios-buildtest-example-switftsample',
1117                cpu_cost=1e6,
1118                environ={
1119                    'SCHEME': 'SwiftSample',
1120                    'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
1121                }))
1122        out.append(
1123            self.config.job_spec(
1124                ['src/objective-c/tests/build_one_example_bazel.sh'],
1125                timeout_seconds=10 * 60,
1126                shortname='ios-buildtest-example-tvOS-sample',
1127                cpu_cost=1e6,
1128                environ={
1129                    'SCHEME': 'tvOS-sample',
1130                    'EXAMPLE_PATH': 'src/objective-c/examples/tvOS-sample',
1131                    'FRAMEWORKS': 'NO'
1132                }))
1133        # Disabled due to #20258
1134        # TODO (mxyan): Reenable this test when #20258 is resolved.
1135        # out.append(
1136        #     self.config.job_spec(
1137        #         ['src/objective-c/tests/build_one_example_bazel.sh'],
1138        #         timeout_seconds=20 * 60,
1139        #         shortname='ios-buildtest-example-watchOS-sample',
1140        #         cpu_cost=1e6,
1141        #         environ={
1142        #             'SCHEME': 'watchOS-sample-WatchKit-App',
1143        #             'EXAMPLE_PATH': 'src/objective-c/examples/watchOS-sample',
1144        #             'FRAMEWORKS': 'NO'
1145        #         }))
1146        out.append(
1147            self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
1148                                 timeout_seconds=60 * 60,
1149                                 shortname='ios-test-plugintest',
1150                                 cpu_cost=1e6,
1151                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1152        out.append(
1153            self.config.job_spec(
1154                ['src/objective-c/tests/run_plugin_option_tests.sh'],
1155                timeout_seconds=60 * 60,
1156                shortname='ios-test-plugin-option-test',
1157                cpu_cost=1e6,
1158                environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1159        out.append(
1160            self.config.job_spec(
1161                ['test/core/iomgr/ios/CFStreamTests/build_and_run_tests.sh'],
1162                timeout_seconds=20 * 60,
1163                shortname='ios-test-cfstream-tests',
1164                cpu_cost=1e6,
1165                environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1166        # TODO: replace with run_one_test_bazel.sh when Bazel-Xcode is stable
1167        out.append(
1168            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1169                                 timeout_seconds=60 * 60,
1170                                 shortname='ios-test-unittests',
1171                                 cpu_cost=1e6,
1172                                 environ={'SCHEME': 'UnitTests'}))
1173        out.append(
1174            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1175                                 timeout_seconds=60 * 60,
1176                                 shortname='ios-test-interoptests',
1177                                 cpu_cost=1e6,
1178                                 environ={'SCHEME': 'InteropTests'}))
1179        out.append(
1180            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1181                                 timeout_seconds=60 * 60,
1182                                 shortname='ios-test-cronettests',
1183                                 cpu_cost=1e6,
1184                                 environ={'SCHEME': 'CronetTests'}))
1185        out.append(
1186            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1187                                 timeout_seconds=30 * 60,
1188                                 shortname='ios-perf-test',
1189                                 cpu_cost=1e6,
1190                                 environ={'SCHEME': 'PerfTests'}))
1191        out.append(
1192            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1193                                 timeout_seconds=30 * 60,
1194                                 shortname='ios-perf-test-posix',
1195                                 cpu_cost=1e6,
1196                                 environ={'SCHEME': 'PerfTestsPosix'}))
1197        out.append(
1198            self.config.job_spec(['test/cpp/ios/build_and_run_tests.sh'],
1199                                 timeout_seconds=30 * 60,
1200                                 shortname='ios-cpp-test-cronet',
1201                                 cpu_cost=1e6,
1202                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1203        out.append(
1204            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1205                                 timeout_seconds=60 * 60,
1206                                 shortname='mac-test-basictests',
1207                                 cpu_cost=1e6,
1208                                 environ={
1209                                     'SCHEME': 'MacTests',
1210                                     'PLATFORM': 'macos'
1211                                 }))
1212        out.append(
1213            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1214                                 timeout_seconds=30 * 60,
1215                                 shortname='tvos-test-basictests',
1216                                 cpu_cost=1e6,
1217                                 environ={
1218                                     'SCHEME': 'TvTests',
1219                                     'PLATFORM': 'tvos'
1220                                 }))
1221
1222        return sorted(out)
1223
1224    def pre_build_steps(self):
1225        return []
1226
1227    def make_targets(self):
1228        return []
1229
1230    def make_options(self):
1231        return []
1232
1233    def build_steps(self):
1234        return []
1235
1236    def post_tests_steps(self):
1237        return []
1238
1239    def makefile_name(self):
1240        return 'Makefile'
1241
1242    def dockerfile_dir(self):
1243        return None
1244
1245    def __str__(self):
1246        return 'objc'
1247
1248
1249class Sanity(object):
1250
1251    def configure(self, config, args):
1252        self.config = config
1253        self.args = args
1254        _check_compiler(self.args.compiler, ['default'])
1255
1256    def test_specs(self):
1257        import yaml
1258        with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
1259            environ = {'TEST': 'true'}
1260            if _is_use_docker_child():
1261                environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
1262                environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
1263                # sanity tests run tools/bazel wrapper concurrently
1264                # and that can result in a download/run race in the wrapper.
1265                # under docker we already have the right version of bazel
1266                # so we can just disable the wrapper.
1267                environ['DISABLE_BAZEL_WRAPPER'] = 'true'
1268            return [
1269                self.config.job_spec(cmd['script'].split(),
1270                                     timeout_seconds=30 * 60,
1271                                     environ=environ,
1272                                     cpu_cost=cmd.get('cpu_cost', 1))
1273                for cmd in yaml.load(f)
1274            ]
1275
1276    def pre_build_steps(self):
1277        return []
1278
1279    def make_targets(self):
1280        return ['run_dep_checks']
1281
1282    def make_options(self):
1283        return []
1284
1285    def build_steps(self):
1286        return []
1287
1288    def post_tests_steps(self):
1289        return []
1290
1291    def makefile_name(self):
1292        return 'Makefile'
1293
1294    def dockerfile_dir(self):
1295        return 'tools/dockerfile/test/sanity'
1296
1297    def __str__(self):
1298        return 'sanity'
1299
1300
1301# different configurations we can run under
1302with open('tools/run_tests/generated/configs.json') as f:
1303    _CONFIGS = dict(
1304        (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
1305
1306_LANGUAGES = {
1307    'c++': CLanguage('cxx', 'c++'),
1308    'c': CLanguage('c', 'c'),
1309    'grpc-node': RemoteNodeLanguage(),
1310    'php': PhpLanguage(),
1311    'php7': Php7Language(),
1312    'python': PythonLanguage(),
1313    'ruby': RubyLanguage(),
1314    'csharp': CSharpLanguage(),
1315    'objc': ObjCLanguage(),
1316    'sanity': Sanity()
1317}
1318
1319_MSBUILD_CONFIG = {
1320    'dbg': 'Debug',
1321    'opt': 'Release',
1322    'gcov': 'Debug',
1323}
1324
1325
1326def _windows_arch_option(arch):
1327    """Returns msbuild cmdline option for selected architecture."""
1328    if arch == 'default' or arch == 'x86':
1329        return '/p:Platform=Win32'
1330    elif arch == 'x64':
1331        return '/p:Platform=x64'
1332    else:
1333        print('Architecture %s not supported.' % arch)
1334        sys.exit(1)
1335
1336
1337def _check_arch_option(arch):
1338    """Checks that architecture option is valid."""
1339    if platform_string() == 'windows':
1340        _windows_arch_option(arch)
1341    elif platform_string() == 'linux':
1342        # On linux, we need to be running under docker with the right architecture.
1343        runtime_arch = platform.architecture()[0]
1344        if arch == 'default':
1345            return
1346        elif runtime_arch == '64bit' and arch == 'x64':
1347            return
1348        elif runtime_arch == '32bit' and arch == 'x86':
1349            return
1350        else:
1351            print(
1352                'Architecture %s does not match current runtime architecture.' %
1353                arch)
1354            sys.exit(1)
1355    else:
1356        if args.arch != 'default':
1357            print('Architecture %s not supported on current platform.' %
1358                  args.arch)
1359            sys.exit(1)
1360
1361
1362def _docker_arch_suffix(arch):
1363    """Returns suffix to dockerfile dir to use."""
1364    if arch == 'default' or arch == 'x64':
1365        return 'x64'
1366    elif arch == 'x86':
1367        return 'x86'
1368    else:
1369        print('Architecture %s not supported with current settings.' % arch)
1370        sys.exit(1)
1371
1372
1373def runs_per_test_type(arg_str):
1374    """Auxiliary function to parse the "runs_per_test" flag.
1375
1376       Returns:
1377           A positive integer or 0, the latter indicating an infinite number of
1378           runs.
1379
1380       Raises:
1381           argparse.ArgumentTypeError: Upon invalid input.
1382    """
1383    if arg_str == 'inf':
1384        return 0
1385    try:
1386        n = int(arg_str)
1387        if n <= 0: raise ValueError
1388        return n
1389    except:
1390        msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
1391        raise argparse.ArgumentTypeError(msg)
1392
1393
1394def percent_type(arg_str):
1395    pct = float(arg_str)
1396    if pct > 100 or pct < 0:
1397        raise argparse.ArgumentTypeError(
1398            "'%f' is not a valid percentage in the [0, 100] range" % pct)
1399    return pct
1400
1401
1402# This is math.isclose in python >= 3.5
1403def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
1404    return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
1405
1406
1407# parse command line
1408argp = argparse.ArgumentParser(description='Run grpc tests.')
1409argp.add_argument('-c',
1410                  '--config',
1411                  choices=sorted(_CONFIGS.keys()),
1412                  default='opt')
1413argp.add_argument(
1414    '-n',
1415    '--runs_per_test',
1416    default=1,
1417    type=runs_per_test_type,
1418    help='A positive integer or "inf". If "inf", all tests will run in an '
1419    'infinite loop. Especially useful in combination with "-f"')
1420argp.add_argument('-r', '--regex', default='.*', type=str)
1421argp.add_argument('--regex_exclude', default='', type=str)
1422argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
1423argp.add_argument('-s', '--slowdown', default=1.0, type=float)
1424argp.add_argument('-p',
1425                  '--sample_percent',
1426                  default=100.0,
1427                  type=percent_type,
1428                  help='Run a random sample with that percentage of tests')
1429argp.add_argument('-f',
1430                  '--forever',
1431                  default=False,
1432                  action='store_const',
1433                  const=True)
1434argp.add_argument('-t',
1435                  '--travis',
1436                  default=False,
1437                  action='store_const',
1438                  const=True)
1439argp.add_argument('--newline_on_success',
1440                  default=False,
1441                  action='store_const',
1442                  const=True)
1443argp.add_argument('-l',
1444                  '--language',
1445                  choices=sorted(_LANGUAGES.keys()),
1446                  nargs='+',
1447                  required=True)
1448argp.add_argument('-S',
1449                  '--stop_on_failure',
1450                  default=False,
1451                  action='store_const',
1452                  const=True)
1453argp.add_argument('--use_docker',
1454                  default=False,
1455                  action='store_const',
1456                  const=True,
1457                  help='Run all the tests under docker. That provides ' +
1458                  'additional isolation and prevents the need to install ' +
1459                  'language specific prerequisites. Only available on Linux.')
1460argp.add_argument(
1461    '--allow_flakes',
1462    default=False,
1463    action='store_const',
1464    const=True,
1465    help=
1466    'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
1467)
1468argp.add_argument(
1469    '--arch',
1470    choices=['default', 'x86', 'x64'],
1471    default='default',
1472    help=
1473    'Selects architecture to target. For some platforms "default" is the only supported choice.'
1474)
1475argp.add_argument(
1476    '--compiler',
1477    choices=[
1478        'default', 'gcc4.9', 'gcc5.3', 'gcc7.4', 'gcc8.3', 'gcc_musl',
1479        'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'clang7.0', 'python2.7',
1480        'python3.5', 'python3.6', 'python3.7', 'python3.8', 'pypy', 'pypy3',
1481        'python_alpine', 'all_the_cpythons', 'electron1.3', 'electron1.6',
1482        'coreclr', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
1483    ],
1484    default='default',
1485    help=
1486    'Selects compiler to use. Allowed values depend on the platform and language.'
1487)
1488argp.add_argument('--iomgr_platform',
1489                  choices=['native', 'uv', 'gevent', 'asyncio'],
1490                  default='native',
1491                  help='Selects iomgr platform to build on')
1492argp.add_argument('--build_only',
1493                  default=False,
1494                  action='store_const',
1495                  const=True,
1496                  help='Perform all the build steps but don\'t run any tests.')
1497argp.add_argument('--measure_cpu_costs',
1498                  default=False,
1499                  action='store_const',
1500                  const=True,
1501                  help='Measure the cpu costs of tests')
1502argp.add_argument(
1503    '--update_submodules',
1504    default=[],
1505    nargs='*',
1506    help=
1507    'Update some submodules before building. If any are updated, also run generate_projects. '
1508    +
1509    'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
1510)
1511argp.add_argument('-a', '--antagonists', default=0, type=int)
1512argp.add_argument('-x',
1513                  '--xml_report',
1514                  default=None,
1515                  type=str,
1516                  help='Generates a JUnit-compatible XML report')
1517argp.add_argument('--report_suite_name',
1518                  default='tests',
1519                  type=str,
1520                  help='Test suite name to use in generated JUnit XML report')
1521argp.add_argument(
1522    '--report_multi_target',
1523    default=False,
1524    const=True,
1525    action='store_const',
1526    help='Generate separate XML report for each test job (Looks better in UIs).'
1527)
1528argp.add_argument(
1529    '--quiet_success',
1530    default=False,
1531    action='store_const',
1532    const=True,
1533    help=
1534    'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
1535    + 'Useful when running many iterations of each test (argument -n).')
1536argp.add_argument(
1537    '--force_default_poller',
1538    default=False,
1539    action='store_const',
1540    const=True,
1541    help='Don\'t try to iterate over many polling strategies when they exist')
1542argp.add_argument(
1543    '--force_use_pollers',
1544    default=None,
1545    type=str,
1546    help='Only use the specified comma-delimited list of polling engines. '
1547    'Example: --force_use_pollers epoll1,poll '
1548    ' (This flag has no effect if --force_default_poller flag is also used)')
1549argp.add_argument('--max_time',
1550                  default=-1,
1551                  type=int,
1552                  help='Maximum test runtime in seconds')
1553argp.add_argument('--bq_result_table',
1554                  default='',
1555                  type=str,
1556                  nargs='?',
1557                  help='Upload test results to a specified BQ table.')
1558argp.add_argument(
1559    '--auto_set_flakes',
1560    default=False,
1561    const=True,
1562    action='store_const',
1563    help=
1564    'Allow repeated runs for tests that have been failing recently (based on BQ historical data).'
1565)
1566args = argp.parse_args()
1567
1568flaky_tests = set()
1569shortname_to_cpu = {}
1570if args.auto_set_flakes:
1571    try:
1572        for test in get_bqtest_data():
1573            if test.flaky: flaky_tests.add(test.name)
1574            if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
1575    except:
1576        print("Unexpected error getting flaky tests: %s" %
1577              traceback.format_exc())
1578
1579if args.force_default_poller:
1580    _POLLING_STRATEGIES = {}
1581elif args.force_use_pollers:
1582    _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
1583
1584jobset.measure_cpu_costs = args.measure_cpu_costs
1585
1586# update submodules if necessary
1587need_to_regenerate_projects = False
1588for spec in args.update_submodules:
1589    spec = spec.split(':', 1)
1590    if len(spec) == 1:
1591        submodule = spec[0]
1592        branch = 'master'
1593    elif len(spec) == 2:
1594        submodule = spec[0]
1595        branch = spec[1]
1596    cwd = 'third_party/%s' % submodule
1597
1598    def git(cmd, cwd=cwd):
1599        print('in %s: git %s' % (cwd, cmd))
1600        run_shell_command('git %s' % cmd, cwd=cwd)
1601
1602    git('fetch')
1603    git('checkout %s' % branch)
1604    git('pull origin %s' % branch)
1605    if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
1606        need_to_regenerate_projects = True
1607if need_to_regenerate_projects:
1608    if jobset.platform_string() == 'linux':
1609        run_shell_command('tools/buildgen/generate_projects.sh')
1610    else:
1611        print(
1612            'WARNING: may need to regenerate projects, but since we are not on')
1613        print(
1614            '         Linux this step is being skipped. Compilation MAY fail.')
1615
1616# grab config
1617run_config = _CONFIGS[args.config]
1618build_config = run_config.build_config
1619
1620if args.travis:
1621    _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
1622
1623languages = set(_LANGUAGES[l] for l in args.language)
1624for l in languages:
1625    l.configure(run_config, args)
1626
1627language_make_options = []
1628if any(language.make_options() for language in languages):
1629    if not 'gcov' in args.config and len(languages) != 1:
1630        print(
1631            'languages with custom make options cannot be built simultaneously with other languages'
1632        )
1633        sys.exit(1)
1634    else:
1635        # Combining make options is not clean and just happens to work. It allows C & C++ to build
1636        # together, and is only used under gcov. All other configs should build languages individually.
1637        language_make_options = list(
1638            set([
1639                make_option for lang in languages
1640                for make_option in lang.make_options()
1641            ]))
1642
1643if args.use_docker:
1644    if not args.travis:
1645        print('Seen --use_docker flag, will run tests under docker.')
1646        print('')
1647        print(
1648            'IMPORTANT: The changes you are testing need to be locally committed'
1649        )
1650        print(
1651            'because only the committed changes in the current branch will be')
1652        print('copied to the docker environment.')
1653        time.sleep(5)
1654
1655    dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
1656    if len(dockerfile_dirs) > 1:
1657        print('Languages to be tested require running under different docker '
1658              'images.')
1659        sys.exit(1)
1660    else:
1661        dockerfile_dir = next(iter(dockerfile_dirs))
1662
1663    child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
1664    run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
1665        child_argv[1:])
1666
1667    env = os.environ.copy()
1668    env['RUN_TESTS_COMMAND'] = run_tests_cmd
1669    env['DOCKERFILE_DIR'] = dockerfile_dir
1670    env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
1671    if args.xml_report:
1672        env['XML_REPORT'] = args.xml_report
1673    if not args.travis:
1674        env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
1675
1676    subprocess.check_call(
1677        'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
1678        shell=True,
1679        env=env)
1680    sys.exit(0)
1681
1682_check_arch_option(args.arch)
1683
1684
1685def make_jobspec(cfg, targets, makefile='Makefile'):
1686    if platform_string() == 'windows':
1687        return [
1688            jobset.JobSpec([
1689                'cmake', '--build', '.', '--target',
1690                '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
1691            ],
1692                           cwd=os.path.dirname(makefile),
1693                           timeout_seconds=None) for target in targets
1694        ]
1695    else:
1696        if targets and makefile.startswith('cmake/build/'):
1697            # With cmake, we've passed all the build configuration in the pre-build step already
1698            return [
1699                jobset.JobSpec(
1700                    [os.getenv('MAKE', 'make'), '-j',
1701                     '%d' % args.jobs] + targets,
1702                    cwd='cmake/build',
1703                    timeout_seconds=None)
1704            ]
1705        if targets:
1706            return [
1707                jobset.JobSpec(
1708                    [
1709                        os.getenv('MAKE', 'make'), '-f', makefile, '-j',
1710                        '%d' % args.jobs,
1711                        'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
1712                        args.slowdown,
1713                        'CONFIG=%s' % cfg, 'Q='
1714                    ] + language_make_options +
1715                    ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
1716                    timeout_seconds=None)
1717            ]
1718        else:
1719            return []
1720
1721
1722make_targets = {}
1723for l in languages:
1724    makefile = l.makefile_name()
1725    make_targets[makefile] = make_targets.get(makefile, set()).union(
1726        set(l.make_targets()))
1727
1728
1729def build_step_environ(cfg):
1730    environ = {'CONFIG': cfg}
1731    msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
1732    if msbuild_cfg:
1733        environ['MSBUILD_CONFIG'] = msbuild_cfg
1734    return environ
1735
1736
1737build_steps = list(
1738    set(
1739        jobset.JobSpec(cmdline,
1740                       environ=build_step_environ(build_config),
1741                       timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
1742                       flake_retries=2)
1743        for l in languages
1744        for cmdline in l.pre_build_steps()))
1745if make_targets:
1746    make_commands = itertools.chain.from_iterable(
1747        make_jobspec(build_config, list(targets), makefile)
1748        for (makefile, targets) in make_targets.items())
1749    build_steps.extend(set(make_commands))
1750build_steps.extend(
1751    set(
1752        jobset.JobSpec(cmdline,
1753                       environ=build_step_environ(build_config),
1754                       timeout_seconds=None)
1755        for l in languages
1756        for cmdline in l.build_steps()))
1757
1758post_tests_steps = list(
1759    set(
1760        jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
1761        for l in languages
1762        for cmdline in l.post_tests_steps()))
1763runs_per_test = args.runs_per_test
1764forever = args.forever
1765
1766
1767def _shut_down_legacy_server(legacy_server_port):
1768    try:
1769        version = int(
1770            urllib.request.urlopen('http://localhost:%d/version_number' %
1771                                   legacy_server_port,
1772                                   timeout=10).read())
1773    except:
1774        pass
1775    else:
1776        urllib.request.urlopen('http://localhost:%d/quitquitquit' %
1777                               legacy_server_port).read()
1778
1779
1780def _calculate_num_runs_failures(list_of_results):
1781    """Calculate number of runs and failures for a particular test.
1782
1783  Args:
1784    list_of_results: (List) of JobResult object.
1785  Returns:
1786    A tuple of total number of runs and failures.
1787  """
1788    num_runs = len(list_of_results)  # By default, there is 1 run per JobResult.
1789    num_failures = 0
1790    for jobresult in list_of_results:
1791        if jobresult.retries > 0:
1792            num_runs += jobresult.retries
1793        if jobresult.num_failures > 0:
1794            num_failures += jobresult.num_failures
1795    return num_runs, num_failures
1796
1797
1798# _build_and_run results
1799class BuildAndRunError(object):
1800
1801    BUILD = object()
1802    TEST = object()
1803    POST_TEST = object()
1804
1805
1806def _has_epollexclusive():
1807    binary = 'bins/%s/check_epollexclusive' % args.config
1808    if not os.path.exists(binary):
1809        return False
1810    try:
1811        subprocess.check_call(binary)
1812        return True
1813    except subprocess.CalledProcessError as e:
1814        return False
1815    except OSError as e:
1816        # For languages other than C and Windows the binary won't exist
1817        return False
1818
1819
1820# returns a list of things that failed (or an empty list on success)
1821def _build_and_run(check_cancelled,
1822                   newline_on_success,
1823                   xml_report=None,
1824                   build_only=False):
1825    """Do one pass of building & running tests."""
1826    # build latest sequentially
1827    num_failures, resultset = jobset.run(build_steps,
1828                                         maxjobs=1,
1829                                         stop_on_failure=True,
1830                                         newline_on_success=newline_on_success,
1831                                         travis=args.travis)
1832    if num_failures:
1833        return [BuildAndRunError.BUILD]
1834
1835    if build_only:
1836        if xml_report:
1837            report_utils.render_junit_xml_report(
1838                resultset, xml_report, suite_name=args.report_suite_name)
1839        return []
1840
1841    if not args.travis and not _has_epollexclusive() and platform_string(
1842    ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
1843            platform_string()]:
1844        print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
1845        _POLLING_STRATEGIES[platform_string()].remove('epollex')
1846
1847    # start antagonists
1848    antagonists = [
1849        subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
1850        for _ in range(0, args.antagonists)
1851    ]
1852    start_port_server.start_port_server()
1853    resultset = None
1854    num_test_failures = 0
1855    try:
1856        infinite_runs = runs_per_test == 0
1857        one_run = set(spec for language in languages
1858                      for spec in language.test_specs()
1859                      if (re.search(args.regex, spec.shortname) and
1860                          (args.regex_exclude == '' or
1861                           not re.search(args.regex_exclude, spec.shortname))))
1862        # When running on travis, we want out test runs to be as similar as possible
1863        # for reproducibility purposes.
1864        if args.travis and args.max_time <= 0:
1865            massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
1866        else:
1867            # whereas otherwise, we want to shuffle things up to give all tests a
1868            # chance to run.
1869            massaged_one_run = list(
1870                one_run)  # random.sample needs an indexable seq.
1871            num_jobs = len(massaged_one_run)
1872            # for a random sample, get as many as indicated by the 'sample_percent'
1873            # argument. By default this arg is 100, resulting in a shuffle of all
1874            # jobs.
1875            sample_size = int(num_jobs * args.sample_percent / 100.0)
1876            massaged_one_run = random.sample(massaged_one_run, sample_size)
1877            if not isclose(args.sample_percent, 100.0):
1878                assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
1879                print("Running %d tests out of %d (~%d%%)" %
1880                      (sample_size, num_jobs, args.sample_percent))
1881        if infinite_runs:
1882            assert len(massaged_one_run
1883                      ) > 0, 'Must have at least one test for a -n inf run'
1884        runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
1885                         else itertools.repeat(massaged_one_run, runs_per_test))
1886        all_runs = itertools.chain.from_iterable(runs_sequence)
1887
1888        if args.quiet_success:
1889            jobset.message(
1890                'START',
1891                'Running tests quietly, only failing tests will be reported',
1892                do_newline=True)
1893        num_test_failures, resultset = jobset.run(
1894            all_runs,
1895            check_cancelled,
1896            newline_on_success=newline_on_success,
1897            travis=args.travis,
1898            maxjobs=args.jobs,
1899            maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
1900            stop_on_failure=args.stop_on_failure,
1901            quiet_success=args.quiet_success,
1902            max_time=args.max_time)
1903        if resultset:
1904            for k, v in sorted(resultset.items()):
1905                num_runs, num_failures = _calculate_num_runs_failures(v)
1906                if num_failures > 0:
1907                    if num_failures == num_runs:  # what about infinite_runs???
1908                        jobset.message('FAILED', k, do_newline=True)
1909                    else:
1910                        jobset.message('FLAKE',
1911                                       '%s [%d/%d runs flaked]' %
1912                                       (k, num_failures, num_runs),
1913                                       do_newline=True)
1914    finally:
1915        for antagonist in antagonists:
1916            antagonist.kill()
1917        if args.bq_result_table and resultset:
1918            upload_extra_fields = {
1919                'compiler': args.compiler,
1920                'config': args.config,
1921                'iomgr_platform': args.iomgr_platform,
1922                'language': args.language[
1923                    0
1924                ],  # args.language is a list but will always have one element when uploading to BQ is enabled.
1925                'platform': platform_string()
1926            }
1927            try:
1928                upload_results_to_bq(resultset, args.bq_result_table,
1929                                     upload_extra_fields)
1930            except NameError as e:
1931                logging.warning(
1932                    e)  # It's fine to ignore since this is not critical
1933        if xml_report and resultset:
1934            report_utils.render_junit_xml_report(
1935                resultset,
1936                xml_report,
1937                suite_name=args.report_suite_name,
1938                multi_target=args.report_multi_target)
1939
1940    number_failures, _ = jobset.run(post_tests_steps,
1941                                    maxjobs=1,
1942                                    stop_on_failure=False,
1943                                    newline_on_success=newline_on_success,
1944                                    travis=args.travis)
1945
1946    out = []
1947    if number_failures:
1948        out.append(BuildAndRunError.POST_TEST)
1949    if num_test_failures:
1950        out.append(BuildAndRunError.TEST)
1951
1952    return out
1953
1954
1955if forever:
1956    success = True
1957    while True:
1958        dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
1959        initial_time = dw.most_recent_change()
1960        have_files_changed = lambda: dw.most_recent_change() != initial_time
1961        previous_success = success
1962        errors = _build_and_run(check_cancelled=have_files_changed,
1963                                newline_on_success=False,
1964                                build_only=args.build_only) == 0
1965        if not previous_success and not errors:
1966            jobset.message('SUCCESS',
1967                           'All tests are now passing properly',
1968                           do_newline=True)
1969        jobset.message('IDLE', 'No change detected')
1970        while not have_files_changed():
1971            time.sleep(1)
1972else:
1973    errors = _build_and_run(check_cancelled=lambda: False,
1974                            newline_on_success=args.newline_on_success,
1975                            xml_report=args.xml_report,
1976                            build_only=args.build_only)
1977    if not errors:
1978        jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1979    else:
1980        jobset.message('FAILED', 'Some tests failed', do_newline=True)
1981    exit_code = 0
1982    if BuildAndRunError.BUILD in errors:
1983        exit_code |= 1
1984    if BuildAndRunError.TEST in errors:
1985        exit_code |= 2
1986    if BuildAndRunError.POST_TEST in errors:
1987        exit_code |= 4
1988    sys.exit(exit_code)
1989