• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright 2015 gRPC authors.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#     http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15"""Run tests in parallel."""
16
17from __future__ import print_function
18
19import argparse
20import ast
21import collections
22import glob
23import itertools
24import json
25import logging
26import multiprocessing
27import os
28import os.path
29import pipes
30import platform
31import random
32import re
33import socket
34import subprocess
35import sys
36import tempfile
37import traceback
38import time
39from six.moves import urllib
40import uuid
41import six
42
43import python_utils.jobset as jobset
44import python_utils.report_utils as report_utils
45import python_utils.watch_dirs as watch_dirs
46import python_utils.start_port_server as start_port_server
47try:
48    from python_utils.upload_test_results import upload_results_to_bq
49except (ImportError):
50    pass  # It's ok to not import because this is only necessary to upload results to BQ.
51
52gcp_utils_dir = os.path.abspath(
53    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
54sys.path.append(gcp_utils_dir)
55
56_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
57os.chdir(_ROOT)
58
59_FORCE_ENVIRON_FOR_WRAPPERS = {
60    'GRPC_VERBOSITY': 'DEBUG',
61}
62
63_POLLING_STRATEGIES = {
64    'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
65    'mac': ['poll'],
66}
67
68BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
69
70
71def get_bqtest_data(limit=None):
72    import big_query_utils
73
74    bq = big_query_utils.create_big_query()
75    query = """
76SELECT
77  filtered_test_name,
78  SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
79  MAX(cpu_measured) + 0.01 as cpu
80  FROM (
81  SELECT
82    REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
83    result, cpu_measured
84  FROM
85    [grpc-testing:jenkins_test_results.aggregate_results]
86  WHERE
87    timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
88    AND platform = '""" + platform_string() + """'
89    AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
90GROUP BY
91  filtered_test_name"""
92    if limit:
93        query += " limit {}".format(limit)
94    query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
95    page = bq.jobs().getQueryResults(
96        pageToken=None, **query_job['jobReference']).execute(num_retries=3)
97    test_data = [
98        BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
99                         float(row['f'][2]['v'])) for row in page['rows']
100    ]
101    return test_data
102
103
104def platform_string():
105    return jobset.platform_string()
106
107
108_DEFAULT_TIMEOUT_SECONDS = 5 * 60
109
110
111def run_shell_command(cmd, env=None, cwd=None):
112    try:
113        subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
114    except subprocess.CalledProcessError as e:
115        logging.exception(
116            "Error while running command '%s'. Exit status %d. Output:\n%s",
117            e.cmd, e.returncode, e.output)
118        raise
119
120
121def max_parallel_tests_for_current_platform():
122    # Too much test parallelization has only been seen to be a problem
123    # so far on windows.
124    if jobset.platform_string() == 'windows':
125        return 64
126    return 1024
127
128
129# SimpleConfig: just compile with CONFIG=config, and run the binary to test
130class Config(object):
131
132    def __init__(self,
133                 config,
134                 environ=None,
135                 timeout_multiplier=1,
136                 tool_prefix=[],
137                 iomgr_platform='native'):
138        if environ is None:
139            environ = {}
140        self.build_config = config
141        self.environ = environ
142        self.environ['CONFIG'] = config
143        self.tool_prefix = tool_prefix
144        self.timeout_multiplier = timeout_multiplier
145        self.iomgr_platform = iomgr_platform
146
147    def job_spec(self,
148                 cmdline,
149                 timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
150                 shortname=None,
151                 environ={},
152                 cpu_cost=1.0,
153                 flaky=False):
154        """Construct a jobset.JobSpec for a test under this config
155
156       Args:
157         cmdline:      a list of strings specifying the command line the test
158                       would like to run
159    """
160        actual_environ = self.environ.copy()
161        for k, v in environ.items():
162            actual_environ[k] = v
163        if not flaky and shortname and shortname in flaky_tests:
164            flaky = True
165        if shortname in shortname_to_cpu:
166            cpu_cost = shortname_to_cpu[shortname]
167        return jobset.JobSpec(
168            cmdline=self.tool_prefix + cmdline,
169            shortname=shortname,
170            environ=actual_environ,
171            cpu_cost=cpu_cost,
172            timeout_seconds=(self.timeout_multiplier * timeout_seconds
173                             if timeout_seconds else None),
174            flake_retries=4 if flaky or args.allow_flakes else 0,
175            timeout_retries=1 if flaky or args.allow_flakes else 0)
176
177
178def get_c_tests(travis, test_lang):
179    out = []
180    platforms_str = 'ci_platforms' if travis else 'platforms'
181    with open('tools/run_tests/generated/tests.json') as f:
182        js = json.load(f)
183        return [
184            tgt for tgt in js
185            if tgt['language'] == test_lang and platform_string() in
186            tgt[platforms_str] and not (travis and tgt['flaky'])
187        ]
188
189
190def _check_compiler(compiler, supported_compilers):
191    if compiler not in supported_compilers:
192        raise Exception(
193            'Compiler %s not supported (on this platform).' % compiler)
194
195
196def _check_arch(arch, supported_archs):
197    if arch not in supported_archs:
198        raise Exception('Architecture %s not supported.' % arch)
199
200
201def _is_use_docker_child():
202    """Returns True if running running as a --use_docker child."""
203    return True if os.getenv('RUN_TESTS_COMMAND') else False
204
205
206_PythonConfigVars = collections.namedtuple('_ConfigVars', [
207    'shell',
208    'builder',
209    'builder_prefix_arguments',
210    'venv_relative_python',
211    'toolchain',
212    'runner',
213    'test_name',
214    'iomgr_platform',
215])
216
217
218def _python_config_generator(name, major, minor, bits, config_vars):
219    name += '_' + config_vars.iomgr_platform
220    return PythonConfig(
221        name, config_vars.shell + config_vars.builder +
222        config_vars.builder_prefix_arguments + [
223            _python_pattern_function(major=major, minor=minor, bits=bits)
224        ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
225        config_vars.shell + config_vars.runner + [
226            os.path.join(name, config_vars.venv_relative_python[0]),
227            config_vars.test_name
228        ])
229
230
231def _pypy_config_generator(name, major, config_vars):
232    return PythonConfig(
233        name,
234        config_vars.shell + config_vars.builder +
235        config_vars.builder_prefix_arguments + [
236            _pypy_pattern_function(major=major)
237        ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
238        config_vars.shell + config_vars.runner +
239        [os.path.join(name, config_vars.venv_relative_python[0])])
240
241
242def _python_pattern_function(major, minor, bits):
243    # Bit-ness is handled by the test machine's environment
244    if os.name == "nt":
245        if bits == "64":
246            return '/c/Python{major}{minor}/python.exe'.format(
247                major=major, minor=minor, bits=bits)
248        else:
249            return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
250                major=major, minor=minor, bits=bits)
251    else:
252        return 'python{major}.{minor}'.format(major=major, minor=minor)
253
254
255def _pypy_pattern_function(major):
256    if major == '2':
257        return 'pypy'
258    elif major == '3':
259        return 'pypy3'
260    else:
261        raise ValueError("Unknown PyPy major version")
262
263
264class CLanguage(object):
265
266    def __init__(self, make_target, test_lang):
267        self.make_target = make_target
268        self.platform = platform_string()
269        self.test_lang = test_lang
270
271    def configure(self, config, args):
272        self.config = config
273        self.args = args
274        if self.platform == 'windows':
275            _check_compiler(
276                self.args.compiler,
277                ['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
278            _check_arch(self.args.arch, ['default', 'x64', 'x86'])
279            self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
280            self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
281            self._use_cmake = True
282            self._make_options = []
283        elif self.args.compiler == 'cmake':
284            _check_arch(self.args.arch, ['default'])
285            self._use_cmake = True
286            self._docker_distro = 'jessie'
287            self._make_options = []
288        else:
289            self._use_cmake = False
290            self._docker_distro, self._make_options = self._compiler_options(
291                self.args.use_docker, self.args.compiler)
292        if args.iomgr_platform == "uv":
293            cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
294            try:
295                cflags += subprocess.check_output(
296                    ['pkg-config', '--cflags', 'libuv']).strip() + ' '
297            except (subprocess.CalledProcessError, OSError):
298                pass
299            try:
300                ldflags = subprocess.check_output(
301                    ['pkg-config', '--libs', 'libuv']).strip() + ' '
302            except (subprocess.CalledProcessError, OSError):
303                ldflags = '-luv '
304            self._make_options += [
305                'EXTRA_CPPFLAGS={}'.format(cflags),
306                'EXTRA_LDLIBS={}'.format(ldflags)
307            ]
308
309    def test_specs(self):
310        out = []
311        binaries = get_c_tests(self.args.travis, self.test_lang)
312        for target in binaries:
313            if self._use_cmake and target.get('boringssl', False):
314                # cmake doesn't build boringssl tests
315                continue
316            auto_timeout_scaling = target.get('auto_timeout_scaling', True)
317            polling_strategies = (_POLLING_STRATEGIES.get(
318                self.platform, ['all']) if target.get('uses_polling', True) else
319                                  ['none'])
320            if self.args.iomgr_platform == 'uv':
321                polling_strategies = ['all']
322            for polling_strategy in polling_strategies:
323                env = {
324                    'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
325                    _ROOT + '/src/core/tsi/test_creds/ca.pem',
326                    'GRPC_POLL_STRATEGY':
327                    polling_strategy,
328                    'GRPC_VERBOSITY':
329                    'DEBUG'
330                }
331                resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
332                if resolver:
333                    env['GRPC_DNS_RESOLVER'] = resolver
334                shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
335                if polling_strategy in target.get('excluded_poll_engines', []):
336                    continue
337
338                timeout_scaling = 1
339                if auto_timeout_scaling:
340                    config = self.args.config
341                    if ('asan' in config or config == 'msan' or
342                            config == 'tsan' or config == 'ubsan' or
343                            config == 'helgrind' or config == 'memcheck'):
344                        # Scale overall test timeout if running under various sanitizers.
345                        # scaling value is based on historical data analysis
346                        timeout_scaling *= 3
347                    elif polling_strategy == 'poll-cv':
348                        # scale test timeout if running with poll-cv
349                        # sanitizer and poll-cv scaling is not cumulative to ensure
350                        # reasonable timeout values.
351                        # TODO(jtattermusch): based on historical data and 5min default
352                        # test timeout poll-cv scaling is currently not useful.
353                        # Leaving here so it can be reintroduced if the default test timeout
354                        # is decreased in the future.
355                        timeout_scaling *= 1
356
357                if self.config.build_config in target['exclude_configs']:
358                    continue
359                if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
360                    continue
361                if self.platform == 'windows':
362                    binary = 'cmake/build/%s/%s.exe' % (
363                        _MSBUILD_CONFIG[self.config.build_config],
364                        target['name'])
365                else:
366                    if self._use_cmake:
367                        binary = 'cmake/build/%s' % target['name']
368                    else:
369                        binary = 'bins/%s/%s' % (self.config.build_config,
370                                                 target['name'])
371                cpu_cost = target['cpu_cost']
372                if cpu_cost == 'capacity':
373                    cpu_cost = multiprocessing.cpu_count()
374                if os.path.isfile(binary):
375                    list_test_command = None
376                    filter_test_command = None
377
378                    # these are the flag defined by gtest and benchmark framework to list
379                    # and filter test runs. We use them to split each individual test
380                    # into its own JobSpec, and thus into its own process.
381                    if 'benchmark' in target and target['benchmark']:
382                        with open(os.devnull, 'w') as fnull:
383                            tests = subprocess.check_output(
384                                [binary, '--benchmark_list_tests'],
385                                stderr=fnull)
386                        for line in tests.split('\n'):
387                            test = line.strip()
388                            if not test: continue
389                            cmdline = [binary,
390                                       '--benchmark_filter=%s$' % test
391                                      ] + target['args']
392                            out.append(
393                                self.config.job_spec(
394                                    cmdline,
395                                    shortname='%s %s' % (' '.join(cmdline),
396                                                         shortname_ext),
397                                    cpu_cost=cpu_cost,
398                                    timeout_seconds=target.get(
399                                        'timeout_seconds',
400                                        _DEFAULT_TIMEOUT_SECONDS) *
401                                    timeout_scaling,
402                                    environ=env))
403                    elif 'gtest' in target and target['gtest']:
404                        # here we parse the output of --gtest_list_tests to build up a complete
405                        # list of the tests contained in a binary for each test, we then
406                        # add a job to run, filtering for just that test.
407                        with open(os.devnull, 'w') as fnull:
408                            tests = subprocess.check_output(
409                                [binary, '--gtest_list_tests'], stderr=fnull)
410                        base = None
411                        for line in tests.split('\n'):
412                            i = line.find('#')
413                            if i >= 0: line = line[:i]
414                            if not line: continue
415                            if line[0] != ' ':
416                                base = line.strip()
417                            else:
418                                assert base is not None
419                                assert line[1] == ' '
420                                test = base + line.strip()
421                                cmdline = [binary,
422                                           '--gtest_filter=%s' % test
423                                          ] + target['args']
424                                out.append(
425                                    self.config.job_spec(
426                                        cmdline,
427                                        shortname='%s %s' % (' '.join(cmdline),
428                                                             shortname_ext),
429                                        cpu_cost=cpu_cost,
430                                        timeout_seconds=target.get(
431                                            'timeout_seconds',
432                                            _DEFAULT_TIMEOUT_SECONDS) *
433                                        timeout_scaling,
434                                        environ=env))
435                    else:
436                        cmdline = [binary] + target['args']
437                        shortname = target.get('shortname', ' '.join(
438                            pipes.quote(arg) for arg in cmdline))
439                        shortname += shortname_ext
440                        out.append(
441                            self.config.job_spec(
442                                cmdline,
443                                shortname=shortname,
444                                cpu_cost=cpu_cost,
445                                flaky=target.get('flaky', False),
446                                timeout_seconds=target.get(
447                                    'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
448                                * timeout_scaling,
449                                environ=env))
450                elif self.args.regex == '.*' or self.platform == 'windows':
451                    print('\nWARNING: binary not found, skipping', binary)
452        return sorted(out)
453
454    def make_targets(self):
455        if self.platform == 'windows':
456            # don't build tools on windows just yet
457            return ['buildtests_%s' % self.make_target]
458        return [
459            'buildtests_%s' % self.make_target,
460            'tools_%s' % self.make_target, 'check_epollexclusive'
461        ]
462
463    def make_options(self):
464        return self._make_options
465
466    def pre_build_steps(self):
467        if self.platform == 'windows':
468            return [[
469                'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
470                self._cmake_generator_option, self._cmake_arch_option
471            ]]
472        elif self._use_cmake:
473            return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
474        else:
475            return []
476
477    def build_steps(self):
478        return []
479
480    def post_tests_steps(self):
481        if self.platform == 'windows':
482            return []
483        else:
484            return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
485
486    def makefile_name(self):
487        if self._use_cmake:
488            return 'cmake/build/Makefile'
489        else:
490            return 'Makefile'
491
492    def _clang_make_options(self, version_suffix=''):
493        if self.args.config == 'ubsan':
494            return [
495                'CC=clang%s' % version_suffix,
496                'CXX=clang++%s' % version_suffix,
497                'LD=clang++%s' % version_suffix,
498                'LDXX=clang++%s' % version_suffix
499            ]
500
501        return [
502            'CC=clang%s' % version_suffix,
503            'CXX=clang++%s' % version_suffix,
504            'LD=clang%s' % version_suffix,
505            'LDXX=clang++%s' % version_suffix
506        ]
507
508    def _gcc_make_options(self, version_suffix):
509        return [
510            'CC=gcc%s' % version_suffix,
511            'CXX=g++%s' % version_suffix,
512            'LD=gcc%s' % version_suffix,
513            'LDXX=g++%s' % version_suffix
514        ]
515
516    def _compiler_options(self, use_docker, compiler):
517        """Returns docker distro and make options to use for given compiler."""
518        if not use_docker and not _is_use_docker_child():
519            _check_compiler(compiler, ['default'])
520
521        if compiler == 'gcc4.9' or compiler == 'default':
522            return ('jessie', [])
523        elif compiler == 'gcc4.8':
524            return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
525        elif compiler == 'gcc5.3':
526            return ('ubuntu1604', [])
527        elif compiler == 'gcc7.2':
528            return ('ubuntu1710', [])
529        elif compiler == 'gcc_musl':
530            return ('alpine', [])
531        elif compiler == 'clang3.4':
532            # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
533            return ('ubuntu1404', self._clang_make_options())
534        elif compiler == 'clang3.5':
535            return ('jessie', self._clang_make_options(version_suffix='-3.5'))
536        elif compiler == 'clang3.6':
537            return ('ubuntu1604',
538                    self._clang_make_options(version_suffix='-3.6'))
539        elif compiler == 'clang3.7':
540            return ('ubuntu1604',
541                    self._clang_make_options(version_suffix='-3.7'))
542        elif compiler == 'clang7.0':
543            # clang++-7.0 alias doesn't exist and there are no other clang versions
544            # installed.
545            return ('sanitizers_jessie', self._clang_make_options())
546        else:
547            raise Exception('Compiler %s not supported.' % compiler)
548
549    def dockerfile_dir(self):
550        return 'tools/dockerfile/test/cxx_%s_%s' % (
551            self._docker_distro, _docker_arch_suffix(self.args.arch))
552
553    def __str__(self):
554        return self.make_target
555
556
557# This tests Node on grpc/grpc-node and will become the standard for Node testing
558class RemoteNodeLanguage(object):
559
560    def __init__(self):
561        self.platform = platform_string()
562
563    def configure(self, config, args):
564        self.config = config
565        self.args = args
566        # Note: electron ABI only depends on major and minor version, so that's all
567        # we should specify in the compiler argument
568        _check_compiler(self.args.compiler, [
569            'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
570            'electron1.3', 'electron1.6'
571        ])
572        if self.args.compiler == 'default':
573            self.runtime = 'node'
574            self.node_version = '8'
575        else:
576            if self.args.compiler.startswith('electron'):
577                self.runtime = 'electron'
578                self.node_version = self.args.compiler[8:]
579            else:
580                self.runtime = 'node'
581                # Take off the word "node"
582                self.node_version = self.args.compiler[4:]
583
584    # TODO: update with Windows/electron scripts when available for grpc/grpc-node
585    def test_specs(self):
586        if self.platform == 'windows':
587            return [
588                self.config.job_spec(
589                    ['tools\\run_tests\\helper_scripts\\run_node.bat'])
590            ]
591        else:
592            return [
593                self.config.job_spec(
594                    ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
595                    None,
596                    environ=_FORCE_ENVIRON_FOR_WRAPPERS)
597            ]
598
599    def pre_build_steps(self):
600        return []
601
602    def make_targets(self):
603        return []
604
605    def make_options(self):
606        return []
607
608    def build_steps(self):
609        return []
610
611    def post_tests_steps(self):
612        return []
613
614    def makefile_name(self):
615        return 'Makefile'
616
617    def dockerfile_dir(self):
618        return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
619            self.args.arch)
620
621    def __str__(self):
622        return 'grpc-node'
623
624
625class PhpLanguage(object):
626
627    def configure(self, config, args):
628        self.config = config
629        self.args = args
630        _check_compiler(self.args.compiler, ['default'])
631        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
632
633    def test_specs(self):
634        return [
635            self.config.job_spec(
636                ['src/php/bin/run_tests.sh'],
637                environ=_FORCE_ENVIRON_FOR_WRAPPERS)
638        ]
639
640    def pre_build_steps(self):
641        return []
642
643    def make_targets(self):
644        return ['static_c', 'shared_c']
645
646    def make_options(self):
647        return self._make_options
648
649    def build_steps(self):
650        return [['tools/run_tests/helper_scripts/build_php.sh']]
651
652    def post_tests_steps(self):
653        return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
654
655    def makefile_name(self):
656        return 'Makefile'
657
658    def dockerfile_dir(self):
659        return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
660            self.args.arch)
661
662    def __str__(self):
663        return 'php'
664
665
666class Php7Language(object):
667
668    def configure(self, config, args):
669        self.config = config
670        self.args = args
671        _check_compiler(self.args.compiler, ['default'])
672        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
673
674    def test_specs(self):
675        return [
676            self.config.job_spec(
677                ['src/php/bin/run_tests.sh'],
678                environ=_FORCE_ENVIRON_FOR_WRAPPERS)
679        ]
680
681    def pre_build_steps(self):
682        return []
683
684    def make_targets(self):
685        return ['static_c', 'shared_c']
686
687    def make_options(self):
688        return self._make_options
689
690    def build_steps(self):
691        return [['tools/run_tests/helper_scripts/build_php.sh']]
692
693    def post_tests_steps(self):
694        return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
695
696    def makefile_name(self):
697        return 'Makefile'
698
699    def dockerfile_dir(self):
700        return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
701            self.args.arch)
702
703    def __str__(self):
704        return 'php7'
705
706
707class PythonConfig(
708        collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
709    """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
710
711
712class PythonLanguage(object):
713
714    def configure(self, config, args):
715        self.config = config
716        self.args = args
717        self.pythons = self._get_pythons(self.args)
718
719    def test_specs(self):
720        # load list of known test suites
721        with open(
722                'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
723            tests_json = json.load(tests_json_file)
724        environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
725        return [
726            self.config.job_spec(
727                config.run,
728                timeout_seconds=5 * 60,
729                environ=dict(
730                    list(environment.items()) + [(
731                        'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
732                shortname='%s.test.%s' % (config.name, suite_name),
733            ) for suite_name in tests_json for config in self.pythons
734        ]
735
736    def pre_build_steps(self):
737        return []
738
739    def make_targets(self):
740        return []
741
742    def make_options(self):
743        return []
744
745    def build_steps(self):
746        return [config.build for config in self.pythons]
747
748    def post_tests_steps(self):
749        if self.config.build_config != 'gcov':
750            return []
751        else:
752            return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
753
754    def makefile_name(self):
755        return 'Makefile'
756
757    def dockerfile_dir(self):
758        return 'tools/dockerfile/test/python_%s_%s' % (
759            self.python_manager_name(), _docker_arch_suffix(self.args.arch))
760
761    def python_manager_name(self):
762        if self.args.compiler in ['python3.5', 'python3.6']:
763            return 'pyenv'
764        elif self.args.compiler == 'python_alpine':
765            return 'alpine'
766        else:
767            return 'jessie'
768
769    def _get_pythons(self, args):
770        if args.arch == 'x86':
771            bits = '32'
772        else:
773            bits = '64'
774
775        if os.name == 'nt':
776            shell = ['bash']
777            builder = [
778                os.path.abspath(
779                    'tools/run_tests/helper_scripts/build_python_msys2.sh')
780            ]
781            builder_prefix_arguments = ['MINGW{}'.format(bits)]
782            venv_relative_python = ['Scripts/python.exe']
783            toolchain = ['mingw32']
784        else:
785            shell = []
786            builder = [
787                os.path.abspath(
788                    'tools/run_tests/helper_scripts/build_python.sh')
789            ]
790            builder_prefix_arguments = []
791            venv_relative_python = ['bin/python']
792            toolchain = ['unix']
793
794        test_command = 'test_lite'
795        if args.iomgr_platform == 'gevent':
796            test_command = 'test_gevent'
797        runner = [
798            os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
799        ]
800
801        config_vars = _PythonConfigVars(
802            shell, builder, builder_prefix_arguments, venv_relative_python,
803            toolchain, runner, test_command, args.iomgr_platform)
804        python27_config = _python_config_generator(
805            name='py27',
806            major='2',
807            minor='7',
808            bits=bits,
809            config_vars=config_vars)
810        python34_config = _python_config_generator(
811            name='py34',
812            major='3',
813            minor='4',
814            bits=bits,
815            config_vars=config_vars)
816        python35_config = _python_config_generator(
817            name='py35',
818            major='3',
819            minor='5',
820            bits=bits,
821            config_vars=config_vars)
822        python36_config = _python_config_generator(
823            name='py36',
824            major='3',
825            minor='6',
826            bits=bits,
827            config_vars=config_vars)
828        pypy27_config = _pypy_config_generator(
829            name='pypy', major='2', config_vars=config_vars)
830        pypy32_config = _pypy_config_generator(
831            name='pypy3', major='3', config_vars=config_vars)
832
833        if args.compiler == 'default':
834            if os.name == 'nt':
835                return (python35_config,)
836            else:
837                return (
838                    python27_config,
839                    python34_config,
840                )
841        elif args.compiler == 'python2.7':
842            return (python27_config,)
843        elif args.compiler == 'python3.4':
844            return (python34_config,)
845        elif args.compiler == 'python3.5':
846            return (python35_config,)
847        elif args.compiler == 'python3.6':
848            return (python36_config,)
849        elif args.compiler == 'pypy':
850            return (pypy27_config,)
851        elif args.compiler == 'pypy3':
852            return (pypy32_config,)
853        elif args.compiler == 'python_alpine':
854            return (python27_config,)
855        elif args.compiler == 'all_the_cpythons':
856            return (
857                python27_config,
858                python34_config,
859                python35_config,
860                python36_config,
861            )
862        else:
863            raise Exception('Compiler %s not supported.' % args.compiler)
864
865    def __str__(self):
866        return 'python'
867
868
869class RubyLanguage(object):
870
871    def configure(self, config, args):
872        self.config = config
873        self.args = args
874        _check_compiler(self.args.compiler, ['default'])
875
876    def test_specs(self):
877        tests = [
878            self.config.job_spec(
879                ['tools/run_tests/helper_scripts/run_ruby.sh'],
880                timeout_seconds=10 * 60,
881                environ=_FORCE_ENVIRON_FOR_WRAPPERS)
882        ]
883        tests.append(
884            self.config.job_spec(
885                ['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
886                timeout_seconds=20 * 60,
887                environ=_FORCE_ENVIRON_FOR_WRAPPERS))
888        return tests
889
890    def pre_build_steps(self):
891        return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
892
893    def make_targets(self):
894        return []
895
896    def make_options(self):
897        return []
898
899    def build_steps(self):
900        return [['tools/run_tests/helper_scripts/build_ruby.sh']]
901
902    def post_tests_steps(self):
903        return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
904
905    def makefile_name(self):
906        return 'Makefile'
907
908    def dockerfile_dir(self):
909        return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
910            self.args.arch)
911
912    def __str__(self):
913        return 'ruby'
914
915
916class CSharpLanguage(object):
917
918    def __init__(self):
919        self.platform = platform_string()
920
921    def configure(self, config, args):
922        self.config = config
923        self.args = args
924        if self.platform == 'windows':
925            _check_compiler(self.args.compiler, ['coreclr', 'default'])
926            _check_arch(self.args.arch, ['default'])
927            self._cmake_arch_option = 'x64'
928            self._make_options = []
929        else:
930            _check_compiler(self.args.compiler, ['default', 'coreclr'])
931            self._docker_distro = 'jessie'
932
933            if self.platform == 'mac':
934                # TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
935                self._make_options = ['EMBED_OPENSSL=true']
936            else:
937                self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
938
939    def test_specs(self):
940        with open('src/csharp/tests.json') as f:
941            tests_by_assembly = json.load(f)
942
943        msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
944        nunit_args = ['--labels=All', '--noresult', '--workers=1']
945        assembly_subdir = 'bin/%s' % msbuild_config
946        assembly_extension = '.exe'
947
948        if self.args.compiler == 'coreclr':
949            assembly_subdir += '/netcoreapp1.0'
950            runtime_cmd = ['dotnet', 'exec']
951            assembly_extension = '.dll'
952        else:
953            assembly_subdir += '/net45'
954            if self.platform == 'windows':
955                runtime_cmd = []
956            elif self.platform == 'mac':
957                # mono before version 5.2 on MacOS defaults to 32bit runtime
958                runtime_cmd = ['mono', '--arch=64']
959            else:
960                runtime_cmd = ['mono']
961
962        specs = []
963        for assembly in six.iterkeys(tests_by_assembly):
964            assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
965                                                       assembly_subdir,
966                                                       assembly,
967                                                       assembly_extension)
968            if self.config.build_config != 'gcov' or self.platform != 'windows':
969                # normally, run each test as a separate process
970                for test in tests_by_assembly[assembly]:
971                    cmdline = runtime_cmd + [assembly_file,
972                                             '--test=%s' % test] + nunit_args
973                    specs.append(
974                        self.config.job_spec(
975                            cmdline,
976                            shortname='csharp.%s' % test,
977                            environ=_FORCE_ENVIRON_FOR_WRAPPERS))
978            else:
979                # For C# test coverage, run all tests from the same assembly at once
980                # using OpenCover.Console (only works on Windows).
981                cmdline = [
982                    'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
983                    '-target:%s' % assembly_file, '-targetdir:src\\csharp',
984                    '-targetargs:%s' % ' '.join(nunit_args),
985                    '-filter:+[Grpc.Core]*', '-register:user',
986                    '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
987                ]
988
989                # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
990                # to prevent problems with registering the profiler.
991                run_exclusive = 1000000
992                specs.append(
993                    self.config.job_spec(
994                        cmdline,
995                        shortname='csharp.coverage.%s' % assembly,
996                        cpu_cost=run_exclusive,
997                        environ=_FORCE_ENVIRON_FOR_WRAPPERS))
998        return specs
999
1000    def pre_build_steps(self):
1001        if self.platform == 'windows':
1002            return [[
1003                'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
1004                self._cmake_arch_option
1005            ]]
1006        else:
1007            return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
1008
1009    def make_targets(self):
1010        return ['grpc_csharp_ext']
1011
1012    def make_options(self):
1013        return self._make_options
1014
1015    def build_steps(self):
1016        if self.platform == 'windows':
1017            return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
1018        else:
1019            return [['tools/run_tests/helper_scripts/build_csharp.sh']]
1020
1021    def post_tests_steps(self):
1022        if self.platform == 'windows':
1023            return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
1024        else:
1025            return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
1026
1027    def makefile_name(self):
1028        if self.platform == 'windows':
1029            return 'cmake/build/%s/Makefile' % self._cmake_arch_option
1030        else:
1031            return 'Makefile'
1032
1033    def dockerfile_dir(self):
1034        return 'tools/dockerfile/test/csharp_%s_%s' % (
1035            self._docker_distro, _docker_arch_suffix(self.args.arch))
1036
1037    def __str__(self):
1038        return 'csharp'
1039
1040
1041class ObjCLanguage(object):
1042
1043    def configure(self, config, args):
1044        self.config = config
1045        self.args = args
1046        _check_compiler(self.args.compiler, ['default'])
1047
1048    def test_specs(self):
1049        return [
1050            self.config.job_spec(
1051                ['src/objective-c/tests/run_tests.sh'],
1052                timeout_seconds=60 * 60,
1053                shortname='objc-tests',
1054                cpu_cost=1e6,
1055                environ=_FORCE_ENVIRON_FOR_WRAPPERS),
1056            self.config.job_spec(
1057                ['src/objective-c/tests/run_plugin_tests.sh'],
1058                timeout_seconds=60 * 60,
1059                shortname='objc-plugin-tests',
1060                cpu_cost=1e6,
1061                environ=_FORCE_ENVIRON_FOR_WRAPPERS),
1062            self.config.job_spec(
1063                ['src/objective-c/tests/build_one_example.sh'],
1064                timeout_seconds=10 * 60,
1065                shortname='objc-build-example-helloworld',
1066                cpu_cost=1e6,
1067                environ={
1068                    'SCHEME': 'HelloWorld',
1069                    'EXAMPLE_PATH': 'examples/objective-c/helloworld'
1070                }),
1071            self.config.job_spec(
1072                ['src/objective-c/tests/build_one_example.sh'],
1073                timeout_seconds=10 * 60,
1074                shortname='objc-build-example-routeguide',
1075                cpu_cost=1e6,
1076                environ={
1077                    'SCHEME': 'RouteGuideClient',
1078                    'EXAMPLE_PATH': 'examples/objective-c/route_guide'
1079                }),
1080            self.config.job_spec(
1081                ['src/objective-c/tests/build_one_example.sh'],
1082                timeout_seconds=10 * 60,
1083                shortname='objc-build-example-authsample',
1084                cpu_cost=1e6,
1085                environ={
1086                    'SCHEME': 'AuthSample',
1087                    'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
1088                }),
1089            self.config.job_spec(
1090                ['src/objective-c/tests/build_one_example.sh'],
1091                timeout_seconds=10 * 60,
1092                shortname='objc-build-example-sample',
1093                cpu_cost=1e6,
1094                environ={
1095                    'SCHEME': 'Sample',
1096                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
1097                }),
1098            self.config.job_spec(
1099                ['src/objective-c/tests/build_one_example.sh'],
1100                timeout_seconds=10 * 60,
1101                shortname='objc-build-example-sample-frameworks',
1102                cpu_cost=1e6,
1103                environ={
1104                    'SCHEME': 'Sample',
1105                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1106                    'FRAMEWORKS': 'YES'
1107                }),
1108            self.config.job_spec(
1109                ['src/objective-c/tests/build_one_example.sh'],
1110                timeout_seconds=10 * 60,
1111                shortname='objc-build-example-switftsample',
1112                cpu_cost=1e6,
1113                environ={
1114                    'SCHEME': 'SwiftSample',
1115                    'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
1116                }),
1117            self.config.job_spec(
1118                ['test/core/iomgr/ios/CFStreamTests/run_tests.sh'],
1119                timeout_seconds=10 * 60,
1120                shortname='cfstream-tests',
1121                cpu_cost=1e6,
1122                environ=_FORCE_ENVIRON_FOR_WRAPPERS),
1123        ]
1124
1125    def pre_build_steps(self):
1126        return []
1127
1128    def make_targets(self):
1129        return ['interop_server']
1130
1131    def make_options(self):
1132        return []
1133
1134    def build_steps(self):
1135        return [
1136            ['src/objective-c/tests/build_tests.sh'],
1137            ['test/core/iomgr/ios/CFStreamTests/build_tests.sh'],
1138        ]
1139
1140    def post_tests_steps(self):
1141        return []
1142
1143    def makefile_name(self):
1144        return 'Makefile'
1145
1146    def dockerfile_dir(self):
1147        return None
1148
1149    def __str__(self):
1150        return 'objc'
1151
1152
1153class Sanity(object):
1154
1155    def configure(self, config, args):
1156        self.config = config
1157        self.args = args
1158        _check_compiler(self.args.compiler, ['default'])
1159
1160    def test_specs(self):
1161        import yaml
1162        with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
1163            environ = {'TEST': 'true'}
1164            if _is_use_docker_child():
1165                environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
1166                environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
1167            return [
1168                self.config.job_spec(
1169                    cmd['script'].split(),
1170                    timeout_seconds=30 * 60,
1171                    environ=environ,
1172                    cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
1173            ]
1174
1175    def pre_build_steps(self):
1176        return []
1177
1178    def make_targets(self):
1179        return ['run_dep_checks']
1180
1181    def make_options(self):
1182        return []
1183
1184    def build_steps(self):
1185        return []
1186
1187    def post_tests_steps(self):
1188        return []
1189
1190    def makefile_name(self):
1191        return 'Makefile'
1192
1193    def dockerfile_dir(self):
1194        return 'tools/dockerfile/test/sanity'
1195
1196    def __str__(self):
1197        return 'sanity'
1198
1199
1200# different configurations we can run under
1201with open('tools/run_tests/generated/configs.json') as f:
1202    _CONFIGS = dict(
1203        (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
1204
1205_LANGUAGES = {
1206    'c++': CLanguage('cxx', 'c++'),
1207    'c': CLanguage('c', 'c'),
1208    'grpc-node': RemoteNodeLanguage(),
1209    'php': PhpLanguage(),
1210    'php7': Php7Language(),
1211    'python': PythonLanguage(),
1212    'ruby': RubyLanguage(),
1213    'csharp': CSharpLanguage(),
1214    'objc': ObjCLanguage(),
1215    'sanity': Sanity()
1216}
1217
1218_MSBUILD_CONFIG = {
1219    'dbg': 'Debug',
1220    'opt': 'Release',
1221    'gcov': 'Debug',
1222}
1223
1224
1225def _windows_arch_option(arch):
1226    """Returns msbuild cmdline option for selected architecture."""
1227    if arch == 'default' or arch == 'x86':
1228        return '/p:Platform=Win32'
1229    elif arch == 'x64':
1230        return '/p:Platform=x64'
1231    else:
1232        print('Architecture %s not supported.' % arch)
1233        sys.exit(1)
1234
1235
1236def _check_arch_option(arch):
1237    """Checks that architecture option is valid."""
1238    if platform_string() == 'windows':
1239        _windows_arch_option(arch)
1240    elif platform_string() == 'linux':
1241        # On linux, we need to be running under docker with the right architecture.
1242        runtime_arch = platform.architecture()[0]
1243        if arch == 'default':
1244            return
1245        elif runtime_arch == '64bit' and arch == 'x64':
1246            return
1247        elif runtime_arch == '32bit' and arch == 'x86':
1248            return
1249        else:
1250            print('Architecture %s does not match current runtime architecture.'
1251                  % arch)
1252            sys.exit(1)
1253    else:
1254        if args.arch != 'default':
1255            print('Architecture %s not supported on current platform.' %
1256                  args.arch)
1257            sys.exit(1)
1258
1259
1260def _docker_arch_suffix(arch):
1261    """Returns suffix to dockerfile dir to use."""
1262    if arch == 'default' or arch == 'x64':
1263        return 'x64'
1264    elif arch == 'x86':
1265        return 'x86'
1266    else:
1267        print('Architecture %s not supported with current settings.' % arch)
1268        sys.exit(1)
1269
1270
1271def runs_per_test_type(arg_str):
1272    """Auxilary function to parse the "runs_per_test" flag.
1273
1274       Returns:
1275           A positive integer or 0, the latter indicating an infinite number of
1276           runs.
1277
1278       Raises:
1279           argparse.ArgumentTypeError: Upon invalid input.
1280    """
1281    if arg_str == 'inf':
1282        return 0
1283    try:
1284        n = int(arg_str)
1285        if n <= 0: raise ValueError
1286        return n
1287    except:
1288        msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
1289        raise argparse.ArgumentTypeError(msg)
1290
1291
1292def percent_type(arg_str):
1293    pct = float(arg_str)
1294    if pct > 100 or pct < 0:
1295        raise argparse.ArgumentTypeError(
1296            "'%f' is not a valid percentage in the [0, 100] range" % pct)
1297    return pct
1298
1299
1300# This is math.isclose in python >= 3.5
1301def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
1302    return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
1303
1304
1305# parse command line
1306argp = argparse.ArgumentParser(description='Run grpc tests.')
1307argp.add_argument(
1308    '-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
1309argp.add_argument(
1310    '-n',
1311    '--runs_per_test',
1312    default=1,
1313    type=runs_per_test_type,
1314    help='A positive integer or "inf". If "inf", all tests will run in an '
1315    'infinite loop. Especially useful in combination with "-f"')
1316argp.add_argument('-r', '--regex', default='.*', type=str)
1317argp.add_argument('--regex_exclude', default='', type=str)
1318argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
1319argp.add_argument('-s', '--slowdown', default=1.0, type=float)
1320argp.add_argument(
1321    '-p',
1322    '--sample_percent',
1323    default=100.0,
1324    type=percent_type,
1325    help='Run a random sample with that percentage of tests')
1326argp.add_argument(
1327    '-f', '--forever', default=False, action='store_const', const=True)
1328argp.add_argument(
1329    '-t', '--travis', default=False, action='store_const', const=True)
1330argp.add_argument(
1331    '--newline_on_success', default=False, action='store_const', const=True)
1332argp.add_argument(
1333    '-l',
1334    '--language',
1335    choices=['all'] + sorted(_LANGUAGES.keys()),
1336    nargs='+',
1337    default=['all'])
1338argp.add_argument(
1339    '-S', '--stop_on_failure', default=False, action='store_const', const=True)
1340argp.add_argument(
1341    '--use_docker',
1342    default=False,
1343    action='store_const',
1344    const=True,
1345    help='Run all the tests under docker. That provides ' +
1346    'additional isolation and prevents the need to install ' +
1347    'language specific prerequisites. Only available on Linux.')
1348argp.add_argument(
1349    '--allow_flakes',
1350    default=False,
1351    action='store_const',
1352    const=True,
1353    help=
1354    'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
1355)
1356argp.add_argument(
1357    '--arch',
1358    choices=['default', 'x86', 'x64'],
1359    default='default',
1360    help=
1361    'Selects architecture to target. For some platforms "default" is the only supported choice.'
1362)
1363argp.add_argument(
1364    '--compiler',
1365    choices=[
1366        'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc7.2',
1367        'gcc_musl', 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'clang7.0',
1368        'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3',
1369        'python_alpine', 'all_the_cpythons', 'electron1.3', 'electron1.6',
1370        'coreclr', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
1371    ],
1372    default='default',
1373    help=
1374    'Selects compiler to use. Allowed values depend on the platform and language.'
1375)
1376argp.add_argument(
1377    '--iomgr_platform',
1378    choices=['native', 'uv', 'gevent'],
1379    default='native',
1380    help='Selects iomgr platform to build on')
1381argp.add_argument(
1382    '--build_only',
1383    default=False,
1384    action='store_const',
1385    const=True,
1386    help='Perform all the build steps but don\'t run any tests.')
1387argp.add_argument(
1388    '--measure_cpu_costs',
1389    default=False,
1390    action='store_const',
1391    const=True,
1392    help='Measure the cpu costs of tests')
1393argp.add_argument(
1394    '--update_submodules',
1395    default=[],
1396    nargs='*',
1397    help=
1398    'Update some submodules before building. If any are updated, also run generate_projects. '
1399    +
1400    'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
1401)
1402argp.add_argument('-a', '--antagonists', default=0, type=int)
1403argp.add_argument(
1404    '-x',
1405    '--xml_report',
1406    default=None,
1407    type=str,
1408    help='Generates a JUnit-compatible XML report')
1409argp.add_argument(
1410    '--report_suite_name',
1411    default='tests',
1412    type=str,
1413    help='Test suite name to use in generated JUnit XML report')
1414argp.add_argument(
1415    '--quiet_success',
1416    default=False,
1417    action='store_const',
1418    const=True,
1419    help=
1420    'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
1421    + 'Useful when running many iterations of each test (argument -n).')
1422argp.add_argument(
1423    '--force_default_poller',
1424    default=False,
1425    action='store_const',
1426    const=True,
1427    help='Don\'t try to iterate over many polling strategies when they exist')
1428argp.add_argument(
1429    '--force_use_pollers',
1430    default=None,
1431    type=str,
1432    help='Only use the specified comma-delimited list of polling engines. '
1433    'Example: --force_use_pollers epollsig,poll '
1434    ' (This flag has no effect if --force_default_poller flag is also used)')
1435argp.add_argument(
1436    '--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
1437argp.add_argument(
1438    '--bq_result_table',
1439    default='',
1440    type=str,
1441    nargs='?',
1442    help='Upload test results to a specified BQ table.')
1443argp.add_argument(
1444    '--auto_set_flakes',
1445    default=False,
1446    const=True,
1447    action='store_const',
1448    help=
1449    'Allow repeated runs for tests that have been failing recently (based on BQ historical data).'
1450)
1451args = argp.parse_args()
1452
1453flaky_tests = set()
1454shortname_to_cpu = {}
1455if args.auto_set_flakes:
1456    try:
1457        for test in get_bqtest_data():
1458            if test.flaky: flaky_tests.add(test.name)
1459            if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
1460    except:
1461        print(
1462            "Unexpected error getting flaky tests: %s" % traceback.format_exc())
1463
1464if args.force_default_poller:
1465    _POLLING_STRATEGIES = {}
1466elif args.force_use_pollers:
1467    _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
1468
1469jobset.measure_cpu_costs = args.measure_cpu_costs
1470
1471# update submodules if necessary
1472need_to_regenerate_projects = False
1473for spec in args.update_submodules:
1474    spec = spec.split(':', 1)
1475    if len(spec) == 1:
1476        submodule = spec[0]
1477        branch = 'master'
1478    elif len(spec) == 2:
1479        submodule = spec[0]
1480        branch = spec[1]
1481    cwd = 'third_party/%s' % submodule
1482
1483    def git(cmd, cwd=cwd):
1484        print('in %s: git %s' % (cwd, cmd))
1485        run_shell_command('git %s' % cmd, cwd=cwd)
1486
1487    git('fetch')
1488    git('checkout %s' % branch)
1489    git('pull origin %s' % branch)
1490    if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
1491        need_to_regenerate_projects = True
1492if need_to_regenerate_projects:
1493    if jobset.platform_string() == 'linux':
1494        run_shell_command('tools/buildgen/generate_projects.sh')
1495    else:
1496        print(
1497            'WARNING: may need to regenerate projects, but since we are not on')
1498        print(
1499            '         Linux this step is being skipped. Compilation MAY fail.')
1500
1501# grab config
1502run_config = _CONFIGS[args.config]
1503build_config = run_config.build_config
1504
1505if args.travis:
1506    _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
1507
1508if 'all' in args.language:
1509    lang_list = _LANGUAGES.keys()
1510else:
1511    lang_list = args.language
1512# We don't support code coverage on some languages
1513if 'gcov' in args.config:
1514    for bad in ['grpc-node', 'objc', 'sanity']:
1515        if bad in lang_list:
1516            lang_list.remove(bad)
1517
1518languages = set(_LANGUAGES[l] for l in lang_list)
1519for l in languages:
1520    l.configure(run_config, args)
1521
1522language_make_options = []
1523if any(language.make_options() for language in languages):
1524    if not 'gcov' in args.config and len(languages) != 1:
1525        print(
1526            'languages with custom make options cannot be built simultaneously with other languages'
1527        )
1528        sys.exit(1)
1529    else:
1530        # Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
1531        # together, and is only used under gcov. All other configs should build languages individually.
1532        language_make_options = list(
1533            set([
1534                make_option
1535                for lang in languages
1536                for make_option in lang.make_options()
1537            ]))
1538
1539if args.use_docker:
1540    if not args.travis:
1541        print('Seen --use_docker flag, will run tests under docker.')
1542        print('')
1543        print(
1544            'IMPORTANT: The changes you are testing need to be locally committed'
1545        )
1546        print(
1547            'because only the committed changes in the current branch will be')
1548        print('copied to the docker environment.')
1549        time.sleep(5)
1550
1551    dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
1552    if len(dockerfile_dirs) > 1:
1553        if 'gcov' in args.config:
1554            dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
1555            print(
1556                'Using multilang_jessie_x64 docker image for code coverage for '
1557                'all languages.')
1558        else:
1559            print(
1560                'Languages to be tested require running under different docker '
1561                'images.')
1562            sys.exit(1)
1563    else:
1564        dockerfile_dir = next(iter(dockerfile_dirs))
1565
1566    child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
1567    run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
1568        child_argv[1:])
1569
1570    env = os.environ.copy()
1571    env['RUN_TESTS_COMMAND'] = run_tests_cmd
1572    env['DOCKERFILE_DIR'] = dockerfile_dir
1573    env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
1574    if args.xml_report:
1575        env['XML_REPORT'] = args.xml_report
1576    if not args.travis:
1577        env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
1578
1579    subprocess.check_call(
1580        'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
1581        shell=True,
1582        env=env)
1583    sys.exit(0)
1584
1585_check_arch_option(args.arch)
1586
1587
1588def make_jobspec(cfg, targets, makefile='Makefile'):
1589    if platform_string() == 'windows':
1590        return [
1591            jobset.JobSpec(
1592                [
1593                    'cmake', '--build', '.', '--target',
1594                    '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
1595                ],
1596                cwd=os.path.dirname(makefile),
1597                timeout_seconds=None) for target in targets
1598        ]
1599    else:
1600        if targets and makefile.startswith('cmake/build/'):
1601            # With cmake, we've passed all the build configuration in the pre-build step already
1602            return [
1603                jobset.JobSpec(
1604                    [os.getenv('MAKE', 'make'), '-j',
1605                     '%d' % args.jobs] + targets,
1606                    cwd='cmake/build',
1607                    timeout_seconds=None)
1608            ]
1609        if targets:
1610            return [
1611                jobset.JobSpec(
1612                    [
1613                        os.getenv('MAKE', 'make'), '-f', makefile, '-j',
1614                        '%d' % args.jobs,
1615                        'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
1616                        args.slowdown,
1617                        'CONFIG=%s' % cfg, 'Q='
1618                    ] + language_make_options +
1619                    ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
1620                    timeout_seconds=None)
1621            ]
1622        else:
1623            return []
1624
1625
1626make_targets = {}
1627for l in languages:
1628    makefile = l.makefile_name()
1629    make_targets[makefile] = make_targets.get(makefile, set()).union(
1630        set(l.make_targets()))
1631
1632
1633def build_step_environ(cfg):
1634    environ = {'CONFIG': cfg}
1635    msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
1636    if msbuild_cfg:
1637        environ['MSBUILD_CONFIG'] = msbuild_cfg
1638    return environ
1639
1640
1641build_steps = list(
1642    set(
1643        jobset.JobSpec(
1644            cmdline, environ=build_step_environ(build_config), flake_retries=2)
1645        for l in languages
1646        for cmdline in l.pre_build_steps()))
1647if make_targets:
1648    make_commands = itertools.chain.from_iterable(
1649        make_jobspec(build_config, list(targets), makefile)
1650        for (makefile, targets) in make_targets.items())
1651    build_steps.extend(set(make_commands))
1652build_steps.extend(
1653    set(
1654        jobset.JobSpec(
1655            cmdline,
1656            environ=build_step_environ(build_config),
1657            timeout_seconds=None)
1658        for l in languages
1659        for cmdline in l.build_steps()))
1660
1661post_tests_steps = list(
1662    set(
1663        jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
1664        for l in languages
1665        for cmdline in l.post_tests_steps()))
1666runs_per_test = args.runs_per_test
1667forever = args.forever
1668
1669
1670def _shut_down_legacy_server(legacy_server_port):
1671    try:
1672        version = int(
1673            urllib.request.urlopen(
1674                'http://localhost:%d/version_number' % legacy_server_port,
1675                timeout=10).read())
1676    except:
1677        pass
1678    else:
1679        urllib.request.urlopen(
1680            'http://localhost:%d/quitquitquit' % legacy_server_port).read()
1681
1682
1683def _calculate_num_runs_failures(list_of_results):
1684    """Caculate number of runs and failures for a particular test.
1685
1686  Args:
1687    list_of_results: (List) of JobResult object.
1688  Returns:
1689    A tuple of total number of runs and failures.
1690  """
1691    num_runs = len(list_of_results)  # By default, there is 1 run per JobResult.
1692    num_failures = 0
1693    for jobresult in list_of_results:
1694        if jobresult.retries > 0:
1695            num_runs += jobresult.retries
1696        if jobresult.num_failures > 0:
1697            num_failures += jobresult.num_failures
1698    return num_runs, num_failures
1699
1700
1701# _build_and_run results
1702class BuildAndRunError(object):
1703
1704    BUILD = object()
1705    TEST = object()
1706    POST_TEST = object()
1707
1708
1709def _has_epollexclusive():
1710    binary = 'bins/%s/check_epollexclusive' % args.config
1711    if not os.path.exists(binary):
1712        return False
1713    try:
1714        subprocess.check_call(binary)
1715        return True
1716    except subprocess.CalledProcessError, e:
1717        return False
1718    except OSError, e:
1719        # For languages other than C and Windows the binary won't exist
1720        return False
1721
1722
1723# returns a list of things that failed (or an empty list on success)
1724def _build_and_run(check_cancelled,
1725                   newline_on_success,
1726                   xml_report=None,
1727                   build_only=False):
1728    """Do one pass of building & running tests."""
1729    # build latest sequentially
1730    num_failures, resultset = jobset.run(
1731        build_steps,
1732        maxjobs=1,
1733        stop_on_failure=True,
1734        newline_on_success=newline_on_success,
1735        travis=args.travis)
1736    if num_failures:
1737        return [BuildAndRunError.BUILD]
1738
1739    if build_only:
1740        if xml_report:
1741            report_utils.render_junit_xml_report(
1742                resultset, xml_report, suite_name=args.report_suite_name)
1743        return []
1744
1745    if not args.travis and not _has_epollexclusive() and platform_string(
1746    ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string(
1747    )]:
1748        print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
1749        _POLLING_STRATEGIES[platform_string()].remove('epollex')
1750
1751    # start antagonists
1752    antagonists = [
1753        subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
1754        for _ in range(0, args.antagonists)
1755    ]
1756    start_port_server.start_port_server()
1757    resultset = None
1758    num_test_failures = 0
1759    try:
1760        infinite_runs = runs_per_test == 0
1761        one_run = set(
1762            spec for language in languages for spec in language.test_specs()
1763            if (re.search(args.regex, spec.shortname) and
1764                (args.regex_exclude == '' or
1765                 not re.search(args.regex_exclude, spec.shortname))))
1766        # When running on travis, we want out test runs to be as similar as possible
1767        # for reproducibility purposes.
1768        if args.travis and args.max_time <= 0:
1769            massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
1770        else:
1771            # whereas otherwise, we want to shuffle things up to give all tests a
1772            # chance to run.
1773            massaged_one_run = list(
1774                one_run)  # random.sample needs an indexable seq.
1775            num_jobs = len(massaged_one_run)
1776            # for a random sample, get as many as indicated by the 'sample_percent'
1777            # argument. By default this arg is 100, resulting in a shuffle of all
1778            # jobs.
1779            sample_size = int(num_jobs * args.sample_percent / 100.0)
1780            massaged_one_run = random.sample(massaged_one_run, sample_size)
1781            if not isclose(args.sample_percent, 100.0):
1782                assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
1783                print("Running %d tests out of %d (~%d%%)" %
1784                      (sample_size, num_jobs, args.sample_percent))
1785        if infinite_runs:
1786            assert len(massaged_one_run
1787                      ) > 0, 'Must have at least one test for a -n inf run'
1788        runs_sequence = (itertools.repeat(massaged_one_run)
1789                         if infinite_runs else itertools.repeat(
1790                             massaged_one_run, runs_per_test))
1791        all_runs = itertools.chain.from_iterable(runs_sequence)
1792
1793        if args.quiet_success:
1794            jobset.message(
1795                'START',
1796                'Running tests quietly, only failing tests will be reported',
1797                do_newline=True)
1798        num_test_failures, resultset = jobset.run(
1799            all_runs,
1800            check_cancelled,
1801            newline_on_success=newline_on_success,
1802            travis=args.travis,
1803            maxjobs=args.jobs,
1804            maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
1805            stop_on_failure=args.stop_on_failure,
1806            quiet_success=args.quiet_success,
1807            max_time=args.max_time)
1808        if resultset:
1809            for k, v in sorted(resultset.items()):
1810                num_runs, num_failures = _calculate_num_runs_failures(v)
1811                if num_failures > 0:
1812                    if num_failures == num_runs:  # what about infinite_runs???
1813                        jobset.message('FAILED', k, do_newline=True)
1814                    else:
1815                        jobset.message(
1816                            'FLAKE',
1817                            '%s [%d/%d runs flaked]' % (k, num_failures,
1818                                                        num_runs),
1819                            do_newline=True)
1820    finally:
1821        for antagonist in antagonists:
1822            antagonist.kill()
1823        if args.bq_result_table and resultset:
1824            upload_extra_fields = {
1825                'compiler': args.compiler,
1826                'config': args.config,
1827                'iomgr_platform': args.iomgr_platform,
1828                'language': args.language[
1829                    0],  # args.language is a list but will always have one element when uploading to BQ is enabled.
1830                'platform': platform_string()
1831            }
1832            upload_results_to_bq(resultset, args.bq_result_table,
1833                                 upload_extra_fields)
1834        if xml_report and resultset:
1835            report_utils.render_junit_xml_report(
1836                resultset, xml_report, suite_name=args.report_suite_name)
1837
1838    number_failures, _ = jobset.run(
1839        post_tests_steps,
1840        maxjobs=1,
1841        stop_on_failure=False,
1842        newline_on_success=newline_on_success,
1843        travis=args.travis)
1844
1845    out = []
1846    if number_failures:
1847        out.append(BuildAndRunError.POST_TEST)
1848    if num_test_failures:
1849        out.append(BuildAndRunError.TEST)
1850
1851    return out
1852
1853
1854if forever:
1855    success = True
1856    while True:
1857        dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
1858        initial_time = dw.most_recent_change()
1859        have_files_changed = lambda: dw.most_recent_change() != initial_time
1860        previous_success = success
1861        errors = _build_and_run(
1862            check_cancelled=have_files_changed,
1863            newline_on_success=False,
1864            build_only=args.build_only) == 0
1865        if not previous_success and not errors:
1866            jobset.message(
1867                'SUCCESS',
1868                'All tests are now passing properly',
1869                do_newline=True)
1870        jobset.message('IDLE', 'No change detected')
1871        while not have_files_changed():
1872            time.sleep(1)
1873else:
1874    errors = _build_and_run(
1875        check_cancelled=lambda: False,
1876        newline_on_success=args.newline_on_success,
1877        xml_report=args.xml_report,
1878        build_only=args.build_only)
1879    if not errors:
1880        jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1881    else:
1882        jobset.message('FAILED', 'Some tests failed', do_newline=True)
1883    exit_code = 0
1884    if BuildAndRunError.BUILD in errors:
1885        exit_code |= 1
1886    if BuildAndRunError.TEST in errors:
1887        exit_code |= 2
1888    if BuildAndRunError.POST_TEST in errors:
1889        exit_code |= 4
1890    sys.exit(exit_code)
1891