1#!/usr/bin/env python2.7 2# 3# Copyright 2017 gRPC authors. 4# 5# Licensed under the Apache License, Version 2.0 (the "License"); 6# you may not use this file except in compliance with the License. 7# You may obtain a copy of the License at 8# 9# http://www.apache.org/licenses/LICENSE-2.0 10# 11# Unless required by applicable law or agreed to in writing, software 12# distributed under the License is distributed on an "AS IS" BASIS, 13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14# See the License for the specific language governing permissions and 15# limitations under the License. 16""" Python utility to run opt and counters benchmarks and save json output """ 17 18import bm_constants 19 20import argparse 21import subprocess 22import multiprocessing 23import random 24import itertools 25import sys 26import os 27 28sys.path.append( 29 os.path.join(os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests', 30 'python_utils')) 31import jobset 32 33 34def _args(): 35 argp = argparse.ArgumentParser(description='Runs microbenchmarks') 36 argp.add_argument('-b', 37 '--benchmarks', 38 nargs='+', 39 choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, 40 default=bm_constants._AVAILABLE_BENCHMARK_TESTS, 41 help='Benchmarks to run') 42 argp.add_argument('-j', 43 '--jobs', 44 type=int, 45 default=multiprocessing.cpu_count(), 46 help='Number of CPUs to use') 47 argp.add_argument( 48 '-n', 49 '--name', 50 type=str, 51 help= 52 'Unique name of the build to run. Needs to match the handle passed to bm_build.py' 53 ) 54 argp.add_argument('-r', 55 '--regex', 56 type=str, 57 default="", 58 help='Regex to filter benchmarks run') 59 argp.add_argument( 60 '-l', 61 '--loops', 62 type=int, 63 default=20, 64 help= 65 'Number of times to loops the benchmarks. More loops cuts down on noise' 66 ) 67 argp.add_argument('--counters', dest='counters', action='store_true') 68 argp.add_argument('--no-counters', dest='counters', action='store_false') 69 argp.set_defaults(counters=True) 70 args = argp.parse_args() 71 assert args.name 72 if args.loops < 3: 73 print "WARNING: This run will likely be noisy. Increase loops to at least 3." 74 return args 75 76 77def _collect_bm_data(bm, cfg, name, regex, idx, loops): 78 jobs_list = [] 79 for line in subprocess.check_output([ 80 'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_list_tests', 81 '--benchmark_filter=%s' % regex 82 ]).splitlines(): 83 stripped_line = line.strip().replace("/", 84 "_").replace("<", "_").replace( 85 ">", "_").replace(", ", "_") 86 cmd = [ 87 'bm_diff_%s/%s/%s' % (name, cfg, bm), 88 '--benchmark_filter=^%s$' % line, 89 '--benchmark_out=%s.%s.%s.%s.%d.json' % 90 (bm, stripped_line, cfg, name, idx), 91 '--benchmark_out_format=json', 92 ] 93 jobs_list.append( 94 jobset.JobSpec(cmd, 95 shortname='%s %s %s %s %d/%d' % 96 (bm, line, cfg, name, idx + 1, loops), 97 verbose_success=True, 98 cpu_cost=2, 99 timeout_seconds=60 * 60)) # one hour 100 return jobs_list 101 102 103def create_jobs(name, benchmarks, loops, regex, counters): 104 jobs_list = [] 105 for loop in range(0, loops): 106 for bm in benchmarks: 107 jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops) 108 if counters: 109 jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop, 110 loops) 111 random.shuffle(jobs_list, random.SystemRandom().random) 112 return jobs_list 113 114 115if __name__ == '__main__': 116 args = _args() 117 jobs_list = create_jobs(args.name, args.benchmarks, args.loops, args.regex, 118 args.counters) 119 jobset.run(jobs_list, maxjobs=args.jobs) 120