1#!/usr/bin/env python3 2# 3# Copyright 2017 gRPC authors. 4# 5# Licensed under the Apache License, Version 2.0 (the "License"); 6# you may not use this file except in compliance with the License. 7# You may obtain a copy of the License at 8# 9# http://www.apache.org/licenses/LICENSE-2.0 10# 11# Unless required by applicable law or agreed to in writing, software 12# distributed under the License is distributed on an "AS IS" BASIS, 13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14# See the License for the specific language governing permissions and 15# limitations under the License. 16""" Runs the entire bm_*.py pipeline, and possible comments on the PR """ 17 18import argparse 19import multiprocessing 20import os 21import random 22import subprocess 23import sys 24 25sys.path.append( 26 os.path.join( 27 os.path.dirname(sys.argv[0]), "..", "..", "run_tests", "python_utils" 28 ) 29) 30 31sys.path.append( 32 os.path.join( 33 os.path.dirname(sys.argv[0]), 34 "..", 35 "..", 36 "..", 37 "run_tests", 38 "python_utils", 39 ) 40) 41 42import bm_build 43import bm_constants 44import bm_diff 45import bm_run 46import check_on_pr 47import jobset 48 49 50def _args(): 51 argp = argparse.ArgumentParser( 52 description="Perform diff on microbenchmarks" 53 ) 54 argp.add_argument( 55 "-t", 56 "--track", 57 choices=sorted(bm_constants._INTERESTING), 58 nargs="+", 59 default=sorted(bm_constants._INTERESTING), 60 help="Which metrics to track", 61 ) 62 argp.add_argument( 63 "-b", 64 "--benchmarks", 65 nargs="+", 66 choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, 67 default=bm_constants._AVAILABLE_BENCHMARK_TESTS, 68 help="Which benchmarks to run", 69 ) 70 argp.add_argument( 71 "-d", 72 "--diff_base", 73 type=str, 74 help="Commit or branch to compare the current one to", 75 ) 76 argp.add_argument( 77 "-o", 78 "--old", 79 default="old", 80 type=str, 81 help='Name of baseline run to compare to. Usually just called "old"', 82 ) 83 argp.add_argument( 84 "-r", 85 "--regex", 86 type=str, 87 default="", 88 help="Regex to filter benchmarks run", 89 ) 90 argp.add_argument( 91 "-l", 92 "--loops", 93 type=int, 94 default=10, 95 help=( 96 "Number of times to loops the benchmarks. More loops cuts down on" 97 " noise" 98 ), 99 ) 100 argp.add_argument( 101 "-j", 102 "--jobs", 103 type=int, 104 default=multiprocessing.cpu_count(), 105 help="Number of CPUs to use", 106 ) 107 argp.add_argument( 108 "--pr_comment_name", 109 type=str, 110 default="microbenchmarks", 111 help="Name that Jenkins will use to comment on the PR", 112 ) 113 args = argp.parse_args() 114 assert args.diff_base or args.old, "One of diff_base or old must be set!" 115 if args.loops < 3: 116 print("WARNING: This run will likely be noisy. Increase loops.") 117 return args 118 119 120def eintr_be_gone(fn): 121 """Run fn until it doesn't stop because of EINTR""" 122 123 def inner(*args): 124 while True: 125 try: 126 return fn(*args) 127 except IOError as e: 128 if e.errno != errno.EINTR: 129 raise 130 131 return inner 132 133 134def main(args): 135 bm_build.build("new", args.benchmarks, args.jobs) 136 137 old = args.old 138 if args.diff_base: 139 old = "old" 140 where_am_i = subprocess.check_output( 141 ["git", "rev-parse", "--abbrev-ref", "HEAD"] 142 ).strip() 143 subprocess.check_call(["git", "checkout", args.diff_base]) 144 try: 145 bm_build.build(old, args.benchmarks, args.jobs) 146 finally: 147 subprocess.check_call(["git", "checkout", where_am_i]) 148 subprocess.check_call(["git", "submodule", "update"]) 149 150 jobs_list = [] 151 jobs_list += bm_run.create_jobs( 152 "new", args.benchmarks, args.loops, args.regex 153 ) 154 jobs_list += bm_run.create_jobs( 155 old, args.benchmarks, args.loops, args.regex 156 ) 157 158 # shuffle all jobs to eliminate noise from GCE CPU drift 159 random.shuffle(jobs_list, random.SystemRandom().random) 160 jobset.run(jobs_list, maxjobs=args.jobs) 161 162 diff, note, significance = bm_diff.diff( 163 args.benchmarks, args.loops, args.regex, args.track, old, "new" 164 ) 165 if diff: 166 text = "[%s] Performance differences noted:\n%s" % ( 167 args.pr_comment_name, 168 diff, 169 ) 170 else: 171 text = ( 172 "[%s] No significant performance differences" % args.pr_comment_name 173 ) 174 if note: 175 text = note + "\n\n" + text 176 print("%s" % text) 177 check_on_pr.check_on_pr("Benchmark", "```\n%s\n```" % text) 178 179 180if __name__ == "__main__": 181 args = _args() 182 main(args) 183