1#!/usr/bin/env python2.7 2# Copyright 2017 gRPC authors. 3# 4# Licensed under the Apache License, Version 2.0 (the "License"); 5# you may not use this file except in compliance with the License. 6# You may obtain a copy of the License at 7# 8# http://www.apache.org/licenses/LICENSE-2.0 9# 10# Unless required by applicable law or agreed to in writing, software 11# distributed under the License is distributed on an "AS IS" BASIS, 12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13# See the License for the specific language governing permissions and 14# limitations under the License. 15"""Run tests using docker images in Google Container Registry per matrix.""" 16 17from __future__ import print_function 18 19import argparse 20import atexit 21import json 22import multiprocessing 23import os 24import re 25import subprocess 26import sys 27import uuid 28 29# Language Runtime Matrix 30import client_matrix 31 32python_util_dir = os.path.abspath( 33 os.path.join(os.path.dirname(__file__), '../run_tests/python_utils')) 34sys.path.append(python_util_dir) 35import dockerjob 36import jobset 37import report_utils 38import upload_test_results 39 40_TEST_TIMEOUT_SECONDS = 60 41_PULL_IMAGE_TIMEOUT_SECONDS = 15 * 60 42_MAX_PARALLEL_DOWNLOADS = 6 43_LANGUAGES = client_matrix.LANG_RUNTIME_MATRIX.keys() 44# All gRPC release tags, flattened, deduped and sorted. 45_RELEASES = sorted( 46 list( 47 set(release 48 for release_dict in client_matrix.LANG_RELEASE_MATRIX.values() 49 for release in release_dict.keys()))) 50 51argp = argparse.ArgumentParser(description='Run interop tests.') 52argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int) 53argp.add_argument('--gcr_path', 54 default='gcr.io/grpc-testing', 55 help='Path of docker images in Google Container Registry') 56argp.add_argument('--release', 57 default='all', 58 choices=['all'] + _RELEASES, 59 help='Release tags to test. When testing all ' 60 'releases defined in client_matrix.py, use "all".') 61argp.add_argument('-l', 62 '--language', 63 choices=['all'] + sorted(_LANGUAGES), 64 nargs='+', 65 default=['all'], 66 help='Languages to test') 67argp.add_argument( 68 '--keep', 69 action='store_true', 70 help='keep the created local images after finishing the tests.') 71argp.add_argument('--report_file', 72 default='report.xml', 73 help='The result file to create.') 74argp.add_argument('--allow_flakes', 75 default=False, 76 action='store_const', 77 const=True, 78 help=('Allow flaky tests to show as passing (re-runs failed ' 79 'tests up to five times)')) 80argp.add_argument('--bq_result_table', 81 default='', 82 type=str, 83 nargs='?', 84 help='Upload test results to a specified BQ table.') 85# Requests will be routed through specified VIP by default. 86# See go/grpc-interop-tests (internal-only) for details. 87argp.add_argument('--server_host', 88 default='74.125.206.210', 89 type=str, 90 nargs='?', 91 help='The gateway to backend services.') 92 93 94def _get_test_images_for_lang(lang, release_arg, image_path_prefix): 95 """Find docker images for a language across releases and runtimes. 96 97 Returns dictionary of list of (<tag>, <image-full-path>) keyed by runtime. 98 """ 99 if release_arg == 'all': 100 # Use all defined releases for given language 101 releases = client_matrix.get_release_tags(lang) 102 else: 103 # Look for a particular release. 104 if release_arg not in client_matrix.get_release_tags(lang): 105 jobset.message('SKIPPED', 106 'release %s for %s is not defined' % 107 (release_arg, lang), 108 do_newline=True) 109 return {} 110 releases = [release_arg] 111 112 # Image tuples keyed by runtime. 113 images = {} 114 for tag in releases: 115 for runtime in client_matrix.get_runtimes_for_lang_release(lang, tag): 116 image_name = '%s/grpc_interop_%s:%s' % (image_path_prefix, runtime, 117 tag) 118 image_tuple = (tag, image_name) 119 120 if not images.has_key(runtime): 121 images[runtime] = [] 122 images[runtime].append(image_tuple) 123 return images 124 125 126def _read_test_cases_file(lang, runtime, release): 127 """Read test cases from a bash-like file and return a list of commands""" 128 # Check to see if we need to use a particular version of test cases. 129 release_info = client_matrix.LANG_RELEASE_MATRIX[lang].get(release) 130 if release_info: 131 testcases_file = release_info.testcases_file 132 if not testcases_file: 133 # TODO(jtattermusch): remove the double-underscore, it is pointless 134 testcases_file = '%s__master' % lang 135 136 # For csharp, the testcases file used depends on the runtime 137 # TODO(jtattermusch): remove this odd specialcase 138 if lang == 'csharp' and runtime == 'csharpcoreclr': 139 testcases_file = testcases_file.replace('csharp_', 'csharpcoreclr_') 140 141 testcases_filepath = os.path.join(os.path.dirname(__file__), 'testcases', 142 testcases_file) 143 lines = [] 144 with open(testcases_filepath) as f: 145 for line in f.readlines(): 146 line = re.sub('\\#.*$', '', line) # remove hash comments 147 line = line.strip() 148 if line and not line.startswith('echo'): 149 # Each non-empty line is a treated as a test case command 150 lines.append(line) 151 return lines 152 153 154def _cleanup_docker_image(image): 155 jobset.message('START', 'Cleanup docker image %s' % image, do_newline=True) 156 dockerjob.remove_image(image, skip_nonexistent=True) 157 158 159args = argp.parse_args() 160 161 162# caches test cases (list of JobSpec) loaded from file. Keyed by lang and runtime. 163def _generate_test_case_jobspecs(lang, runtime, release, suite_name): 164 """Returns the list of test cases from testcase files per lang/release.""" 165 testcase_lines = _read_test_cases_file(lang, runtime, release) 166 167 job_spec_list = [] 168 for line in testcase_lines: 169 # TODO(jtattermusch): revisit the logic for updating test case commands 170 # what it currently being done seems fragile. 171 172 # Extract test case name from the command line 173 m = re.search(r'--test_case=(\w+)', line) 174 testcase_name = m.group(1) if m else 'unknown_test' 175 176 # Extract the server name from the command line 177 if '--server_host_override=' in line: 178 m = re.search( 179 r'--server_host_override=((.*).sandbox.googleapis.com)', line) 180 else: 181 m = re.search(r'--server_host=((.*).sandbox.googleapis.com)', line) 182 server = m.group(1) if m else 'unknown_server' 183 server_short = m.group(2) if m else 'unknown_server' 184 185 # replace original server_host argument 186 assert '--server_host=' in line 187 line = re.sub(r'--server_host=[^ ]*', 188 r'--server_host=%s' % args.server_host, line) 189 190 # some interop tests don't set server_host_override (see #17407), 191 # but we need to use it if different host is set via cmdline args. 192 if args.server_host != server and not '--server_host_override=' in line: 193 line = re.sub(r'(--server_host=[^ ]*)', 194 r'\1 --server_host_override=%s' % server, line) 195 196 spec = jobset.JobSpec(cmdline=line, 197 shortname='%s:%s:%s:%s' % 198 (suite_name, lang, server_short, testcase_name), 199 timeout_seconds=_TEST_TIMEOUT_SECONDS, 200 shell=True, 201 flake_retries=5 if args.allow_flakes else 0) 202 job_spec_list.append(spec) 203 return job_spec_list 204 205 206def _pull_image_for_lang(lang, image, release): 207 """Pull an image for a given language form the image registry.""" 208 cmdline = [ 209 'time gcloud docker -- pull %s && time docker run --rm=true %s /bin/true' 210 % (image, image) 211 ] 212 return jobset.JobSpec(cmdline=cmdline, 213 shortname='pull_image_{}'.format(image), 214 timeout_seconds=_PULL_IMAGE_TIMEOUT_SECONDS, 215 shell=True, 216 flake_retries=2) 217 218 219def _test_release(lang, runtime, release, image, xml_report_tree, skip_tests): 220 total_num_failures = 0 221 suite_name = '%s__%s_%s' % (lang, runtime, release) 222 job_spec_list = _generate_test_case_jobspecs(lang, runtime, release, 223 suite_name) 224 225 if not job_spec_list: 226 jobset.message('FAILED', 'No test cases were found.', do_newline=True) 227 total_num_failures += 1 228 else: 229 num_failures, resultset = jobset.run(job_spec_list, 230 newline_on_success=True, 231 add_env={'docker_image': image}, 232 maxjobs=args.jobs, 233 skip_jobs=skip_tests) 234 if args.bq_result_table and resultset: 235 upload_test_results.upload_interop_results_to_bq( 236 resultset, args.bq_result_table) 237 if skip_tests: 238 jobset.message('FAILED', 'Tests were skipped', do_newline=True) 239 total_num_failures += 1 240 if num_failures: 241 total_num_failures += num_failures 242 243 report_utils.append_junit_xml_results(xml_report_tree, resultset, 244 'grpc_interop_matrix', suite_name, 245 str(uuid.uuid4())) 246 return total_num_failures 247 248 249def _run_tests_for_lang(lang, runtime, images, xml_report_tree): 250 """Find and run all test cases for a language. 251 252 images is a list of (<release-tag>, <image-full-path>) tuple. 253 """ 254 skip_tests = False 255 total_num_failures = 0 256 257 max_pull_jobs = min(args.jobs, _MAX_PARALLEL_DOWNLOADS) 258 max_chunk_size = max_pull_jobs 259 chunk_count = (len(images) + max_chunk_size) // max_chunk_size 260 261 for chunk_index in range(chunk_count): 262 chunk_start = chunk_index * max_chunk_size 263 chunk_size = min(max_chunk_size, len(images) - chunk_start) 264 chunk_end = chunk_start + chunk_size 265 pull_specs = [] 266 if not skip_tests: 267 for release, image in images[chunk_start:chunk_end]: 268 pull_specs.append(_pull_image_for_lang(lang, image, release)) 269 270 # NOTE(rbellevi): We batch docker pull operations to maximize 271 # parallelism, without letting the disk usage grow unbounded. 272 pull_failures, _ = jobset.run(pull_specs, 273 newline_on_success=True, 274 maxjobs=max_pull_jobs) 275 if pull_failures: 276 jobset.message( 277 'FAILED', 278 'Image download failed. Skipping tests for language "%s"' % 279 lang, 280 do_newline=True) 281 skip_tests = True 282 for release, image in images[chunk_start:chunk_end]: 283 total_num_failures += _test_release(lang, runtime, release, image, 284 xml_report_tree, skip_tests) 285 if not args.keep: 286 for _, image in images[chunk_start:chunk_end]: 287 _cleanup_docker_image(image) 288 if not total_num_failures: 289 jobset.message('SUCCESS', 290 'All {} tests passed'.format(lang), 291 do_newline=True) 292 else: 293 jobset.message('FAILED', 294 'Some {} tests failed'.format(lang), 295 do_newline=True) 296 297 return total_num_failures 298 299 300languages = args.language if args.language != ['all'] else _LANGUAGES 301total_num_failures = 0 302_xml_report_tree = report_utils.new_junit_xml_tree() 303for lang in languages: 304 docker_images = _get_test_images_for_lang(lang, args.release, args.gcr_path) 305 for runtime in sorted(docker_images.keys()): 306 total_num_failures += _run_tests_for_lang(lang, runtime, 307 docker_images[runtime], 308 _xml_report_tree) 309 310report_utils.create_xml_report_file(_xml_report_tree, args.report_file) 311 312if total_num_failures: 313 sys.exit(1) 314sys.exit(0) 315