1#!/usr/bin/env python3 2# Copyright 2017 gRPC authors. 3# 4# Licensed under the Apache License, Version 2.0 (the "License"); 5# you may not use this file except in compliance with the License. 6# You may obtain a copy of the License at 7# 8# http://www.apache.org/licenses/LICENSE-2.0 9# 10# Unless required by applicable law or agreed to in writing, software 11# distributed under the License is distributed on an "AS IS" BASIS, 12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13# See the License for the specific language governing permissions and 14# limitations under the License. 15 16import argparse 17import html 18import multiprocessing 19import os 20import subprocess 21import sys 22 23import python_utils.jobset as jobset 24import python_utils.start_port_server as start_port_server 25 26sys.path.append( 27 os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling', 28 'microbenchmarks', 'bm_diff')) 29import bm_constants 30 31flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph') 32 33os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..')) 34if not os.path.exists('reports'): 35 os.makedirs('reports') 36 37start_port_server.start_port_server() 38 39 40def fnize(s): 41 out = '' 42 for c in s: 43 if c in '<>, /': 44 if len(out) and out[-1] == '_': 45 continue 46 out += '_' 47 else: 48 out += c 49 return out 50 51 52# index html 53index_html = """ 54<html> 55<head> 56<title>Microbenchmark Results</title> 57</head> 58<body> 59""" 60 61 62def heading(name): 63 global index_html 64 index_html += "<h1>%s</h1>\n" % name 65 66 67def link(txt, tgt): 68 global index_html 69 index_html += "<p><a href=\"%s\">%s</a></p>\n" % (html.escape( 70 tgt, quote=True), html.escape(txt)) 71 72 73def text(txt): 74 global index_html 75 index_html += "<p><pre>%s</pre></p>\n" % html.escape(txt) 76 77 78def _bazel_build_benchmark(bm_name, cfg): 79 """Build given benchmark with bazel""" 80 subprocess.check_call([ 81 'tools/bazel', 'build', 82 '--config=%s' % cfg, 83 '//test/cpp/microbenchmarks:%s' % bm_name 84 ]) 85 86 87def collect_latency(bm_name, args): 88 """generate latency profiles""" 89 benchmarks = [] 90 profile_analysis = [] 91 cleanup = [] 92 93 heading('Latency Profiles: %s' % bm_name) 94 _bazel_build_benchmark(bm_name, 'basicprof') 95 for line in subprocess.check_output([ 96 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name, 97 '--benchmark_list_tests' 98 ]).decode('UTF-8').splitlines(): 99 link(line, '%s.txt' % fnize(line)) 100 benchmarks.append( 101 jobset.JobSpec([ 102 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name, 103 '--benchmark_filter=^%s$' % line, '--benchmark_min_time=0.05' 104 ], 105 environ={ 106 'GRPC_LATENCY_TRACE': '%s.trace' % fnize(line) 107 }, 108 shortname='profile-%s' % fnize(line))) 109 profile_analysis.append( 110 jobset.JobSpec([ 111 sys.executable, 112 'tools/profiling/latency_profile/profile_analyzer.py', 113 '--source', 114 '%s.trace' % fnize(line), '--fmt', 'simple', '--out', 115 'reports/%s.txt' % fnize(line) 116 ], 117 timeout_seconds=20 * 60, 118 shortname='analyze-%s' % fnize(line))) 119 cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)])) 120 # periodically flush out the list of jobs: profile_analysis jobs at least 121 # consume upwards of five gigabytes of ram in some cases, and so analysing 122 # hundreds of them at once is impractical -- but we want at least some 123 # concurrency or the work takes too long 124 if len(benchmarks) >= min(16, multiprocessing.cpu_count()): 125 # run up to half the cpu count: each benchmark can use up to two cores 126 # (one for the microbenchmark, one for the data flush) 127 jobset.run(benchmarks, 128 maxjobs=max(1, 129 multiprocessing.cpu_count() / 2)) 130 jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) 131 jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) 132 benchmarks = [] 133 profile_analysis = [] 134 cleanup = [] 135 # run the remaining benchmarks that weren't flushed 136 if len(benchmarks): 137 jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2)) 138 jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) 139 jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) 140 141 142def collect_perf(bm_name, args): 143 """generate flamegraphs""" 144 heading('Flamegraphs: %s' % bm_name) 145 _bazel_build_benchmark(bm_name, 'mutrace') 146 benchmarks = [] 147 profile_analysis = [] 148 cleanup = [] 149 for line in subprocess.check_output([ 150 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name, 151 '--benchmark_list_tests' 152 ]).decode('UTF-8').splitlines(): 153 link(line, '%s.svg' % fnize(line)) 154 benchmarks.append( 155 jobset.JobSpec([ 156 'perf', 'record', '-o', 157 '%s-perf.data' % fnize(line), '-g', '-F', '997', 158 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name, 159 '--benchmark_filter=^%s$' % line, '--benchmark_min_time=10' 160 ], 161 shortname='perf-%s' % fnize(line))) 162 profile_analysis.append( 163 jobset.JobSpec( 164 [ 165 'tools/run_tests/performance/process_local_perf_flamegraphs.sh' 166 ], 167 environ={ 168 'PERF_BASE_NAME': fnize(line), 169 'OUTPUT_DIR': 'reports', 170 'OUTPUT_FILENAME': fnize(line), 171 }, 172 shortname='flame-%s' % fnize(line))) 173 cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)])) 174 cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)])) 175 # periodically flush out the list of jobs: temporary space required for this 176 # processing is large 177 if len(benchmarks) >= 20: 178 # run up to half the cpu count: each benchmark can use up to two cores 179 # (one for the microbenchmark, one for the data flush) 180 jobset.run(benchmarks, maxjobs=1) 181 jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) 182 jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) 183 benchmarks = [] 184 profile_analysis = [] 185 cleanup = [] 186 # run the remaining benchmarks that weren't flushed 187 if len(benchmarks): 188 jobset.run(benchmarks, maxjobs=1) 189 jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) 190 jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) 191 192 193def run_summary(bm_name, cfg, base_json_name): 194 _bazel_build_benchmark(bm_name, cfg) 195 cmd = [ 196 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name, 197 '--benchmark_out=%s.%s.json' % (base_json_name, cfg), 198 '--benchmark_out_format=json' 199 ] 200 if args.summary_time is not None: 201 cmd += ['--benchmark_min_time=%d' % args.summary_time] 202 return subprocess.check_output(cmd).decode('UTF-8') 203 204 205def collect_summary(bm_name, args): 206 heading('Summary: %s [no counters]' % bm_name) 207 text(run_summary(bm_name, 'opt', bm_name)) 208 heading('Summary: %s [with counters]' % bm_name) 209 text(run_summary(bm_name, 'counters', bm_name)) 210 if args.bigquery_upload: 211 with open('%s.csv' % bm_name, 'w') as f: 212 f.write( 213 subprocess.check_output([ 214 'tools/profiling/microbenchmarks/bm2bq.py', 215 '%s.counters.json' % bm_name, 216 '%s.opt.json' % bm_name 217 ]).decode('UTF-8')) 218 subprocess.check_call([ 219 'bq', 'load', 'microbenchmarks.microbenchmarks', 220 '%s.csv' % bm_name 221 ]) 222 223 224collectors = { 225 'latency': collect_latency, 226 'perf': collect_perf, 227 'summary': collect_summary, 228} 229 230argp = argparse.ArgumentParser(description='Collect data from microbenchmarks') 231argp.add_argument('-c', 232 '--collect', 233 choices=sorted(collectors.keys()), 234 nargs='*', 235 default=sorted(collectors.keys()), 236 help='Which collectors should be run against each benchmark') 237argp.add_argument('-b', 238 '--benchmarks', 239 choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, 240 default=bm_constants._AVAILABLE_BENCHMARK_TESTS, 241 nargs='+', 242 type=str, 243 help='Which microbenchmarks should be run') 244argp.add_argument('--bigquery_upload', 245 default=False, 246 action='store_const', 247 const=True, 248 help='Upload results from summary collection to bigquery') 249argp.add_argument( 250 '--summary_time', 251 default=None, 252 type=int, 253 help='Minimum time to run benchmarks for the summary collection') 254args = argp.parse_args() 255 256try: 257 for collect in args.collect: 258 for bm_name in args.benchmarks: 259 collectors[collect](bm_name, args) 260finally: 261 if not os.path.exists('reports'): 262 os.makedirs('reports') 263 index_html += "</body>\n</html>\n" 264 with open('reports/index.html', 'w') as f: 265 f.write(index_html) 266