1#!/usr/bin/env python3 2# 3# Copyright 2017 gRPC authors. 4# 5# Licensed under the Apache License, Version 2.0 (the "License"); 6# you may not use this file except in compliance with the License. 7# You may obtain a copy of the License at 8# 9# http://www.apache.org/licenses/LICENSE-2.0 10# 11# Unless required by applicable law or agreed to in writing, software 12# distributed under the License is distributed on an "AS IS" BASIS, 13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14# See the License for the specific language governing permissions and 15# limitations under the License. 16""" Python utility to run opt and counters benchmarks and save json output """ 17 18import argparse 19import itertools 20import multiprocessing 21import os 22import random 23import subprocess 24import sys 25 26import bm_constants 27import jobset 28 29sys.path.append( 30 os.path.join( 31 os.path.dirname(sys.argv[0]), 32 "..", 33 "..", 34 "..", 35 "run_tests", 36 "python_utils", 37 ) 38) 39 40 41def _args(): 42 argp = argparse.ArgumentParser(description="Runs microbenchmarks") 43 argp.add_argument( 44 "-b", 45 "--benchmarks", 46 nargs="+", 47 choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, 48 default=bm_constants._AVAILABLE_BENCHMARK_TESTS, 49 help="Benchmarks to run", 50 ) 51 argp.add_argument( 52 "-j", 53 "--jobs", 54 type=int, 55 default=multiprocessing.cpu_count(), 56 help="Number of CPUs to use", 57 ) 58 argp.add_argument( 59 "-n", 60 "--name", 61 type=str, 62 help=( 63 "Unique name of the build to run. Needs to match the handle passed" 64 " to bm_build.py" 65 ), 66 ) 67 argp.add_argument( 68 "-r", 69 "--regex", 70 type=str, 71 default="", 72 help="Regex to filter benchmarks run", 73 ) 74 argp.add_argument( 75 "-l", 76 "--loops", 77 type=int, 78 default=20, 79 help=( 80 "Number of times to loops the benchmarks. More loops cuts down on" 81 " noise" 82 ), 83 ) 84 argp.add_argument("--counters", dest="counters", action="store_true") 85 argp.add_argument("--no-counters", dest="counters", action="store_false") 86 argp.set_defaults(counters=True) 87 args = argp.parse_args() 88 assert args.name 89 if args.loops < 3: 90 print( 91 "WARNING: This run will likely be noisy. Increase loops to at " 92 "least 3." 93 ) 94 return args 95 96 97def _collect_bm_data(bm, cfg, name, regex, idx, loops): 98 jobs_list = [] 99 for line in subprocess.check_output( 100 [ 101 "bm_diff_%s/%s/%s" % (name, cfg, bm), 102 "--benchmark_list_tests", 103 "--benchmark_filter=%s" % regex, 104 ] 105 ).splitlines(): 106 line = line.decode("UTF-8") 107 stripped_line = ( 108 line.strip() 109 .replace("/", "_") 110 .replace("<", "_") 111 .replace(">", "_") 112 .replace(", ", "_") 113 ) 114 cmd = [ 115 "bm_diff_%s/%s/%s" % (name, cfg, bm), 116 "--benchmark_filter=^%s$" % line, 117 "--benchmark_out=%s.%s.%s.%s.%d.json" 118 % (bm, stripped_line, cfg, name, idx), 119 "--benchmark_out_format=json", 120 ] 121 jobs_list.append( 122 jobset.JobSpec( 123 cmd, 124 shortname="%s %s %s %s %d/%d" 125 % (bm, line, cfg, name, idx + 1, loops), 126 verbose_success=True, 127 cpu_cost=2, 128 timeout_seconds=60 * 60, 129 ) 130 ) # one hour 131 return jobs_list 132 133 134def create_jobs(name, benchmarks, loops, regex): 135 jobs_list = [] 136 for loop in range(0, loops): 137 for bm in benchmarks: 138 jobs_list += _collect_bm_data(bm, "opt", name, regex, loop, loops) 139 random.shuffle(jobs_list, random.SystemRandom().random) 140 return jobs_list 141 142 143if __name__ == "__main__": 144 args = _args() 145 jobs_list = create_jobs( 146 args.name, args.benchmarks, args.loops, args.regex, args.counters 147 ) 148 jobset.run(jobs_list, maxjobs=args.jobs) 149