1#!/usr/bin/python 2 3# Copyright (c) 2014 The Chromium Authors. All rights reserved. 4# Use of this source code is governed by a BSD-style license that can be 5# found in the LICENSE file. 6 7 8"""Generate new bench expectations from results of trybots on a code review.""" 9 10 11import collections 12import compare_codereview 13import os 14import re 15import shutil 16import subprocess 17import sys 18 19 20BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/*' 21CHECKOUT_PATH = os.path.realpath(os.path.join( 22 os.path.dirname(os.path.abspath(__file__)), os.pardir)) 23TMP_BENCH_DATA_DIR = os.path.join(CHECKOUT_PATH, '.bench_data') 24 25 26TryBuild = collections.namedtuple( 27 'TryBuild', ['builder_name', 'build_number', 'is_finished']) 28 29 30def find_all_builds(codereview_url): 31 """Finds and returns information about trybot runs for a code review. 32 33 Args: 34 codereview_url: URL of the codereview in question. 35 36 Returns: 37 List of NamedTuples: (builder_name, build_number, is_finished) 38 """ 39 results = compare_codereview.CodeReviewHTMLParser().parse(codereview_url) 40 try_builds = [] 41 for builder, data in results.iteritems(): 42 if builder.startswith('Perf'): 43 build_num = data.url.split('/')[-1] if data.url else None 44 is_finished = (data.status not in ('pending', 'try-pending') and 45 build_num is not None) 46 try_builds.append(TryBuild(builder_name=builder, 47 build_number=build_num, 48 is_finished=is_finished)) 49 return try_builds 50 51 52def _all_trybots_finished(try_builds): 53 """Return True iff all of the given try jobs have finished. 54 55 Args: 56 try_builds: list of TryBuild instances. 57 58 Returns: 59 True if all of the given try jobs have finished, otherwise False. 60 """ 61 for try_build in try_builds: 62 if not try_build.is_finished: 63 return False 64 return True 65 66 67def all_trybots_finished(codereview_url): 68 """Return True iff all of the try jobs on the given codereview have finished. 69 70 Args: 71 codereview_url: string; URL of the codereview. 72 73 Returns: 74 True if all of the try jobs have finished, otherwise False. 75 """ 76 return _all_trybots_finished(find_all_builds(codereview_url)) 77 78 79def get_bench_data(builder, build_num, dest_dir): 80 """Download the bench data for the given builder at the given build_num. 81 82 Args: 83 builder: string; name of the builder. 84 build_num: string; build number. 85 dest_dir: string; destination directory for the bench data. 86 """ 87 url = BENCH_DATA_URL % (builder, build_num) 88 subprocess.check_call(['gsutil', 'cp', '-R', url, dest_dir], 89 stdout=subprocess.PIPE, 90 stderr=subprocess.PIPE) 91 92 93def find_revision_from_downloaded_data(dest_dir): 94 """Finds the revision at which the downloaded data was generated. 95 96 Args: 97 dest_dir: string; directory holding the downloaded data. 98 99 Returns: 100 The revision (git commit hash) at which the downloaded data was 101 generated, or None if no revision can be found. 102 """ 103 for data_file in os.listdir(dest_dir): 104 match = re.match('bench_(?P<revision>[0-9a-fA-F]{2,40})_data.*', data_file) 105 if match: 106 return match.group('revision') 107 return None 108 109 110class TrybotNotFinishedError(Exception): 111 pass 112 113 114def gen_bench_expectations_from_codereview(codereview_url, 115 error_on_unfinished=True): 116 """Generate bench expectations from a code review. 117 118 Scans the given code review for Perf trybot runs. Downloads the results of 119 finished trybots and uses them to generate new expectations for their 120 waterfall counterparts. 121 122 Args: 123 url: string; URL of the code review. 124 error_on_unfinished: bool; throw an error if any trybot has not finished. 125 """ 126 try_builds = find_all_builds(codereview_url) 127 128 # Verify that all trybots have finished running. 129 if error_on_unfinished and not _all_trybots_finished(try_builds): 130 raise TrybotNotFinishedError('Not all trybots have finished.') 131 132 failed_data_pull = [] 133 failed_gen_expectations = [] 134 135 if os.path.isdir(TMP_BENCH_DATA_DIR): 136 shutil.rmtree(TMP_BENCH_DATA_DIR) 137 138 for try_build in try_builds: 139 try_builder = try_build.builder_name 140 builder = try_builder.replace('-Trybot', '') 141 142 # Download the data. 143 dest_dir = os.path.join(TMP_BENCH_DATA_DIR, builder) 144 os.makedirs(dest_dir) 145 try: 146 get_bench_data(try_builder, try_build.build_number, dest_dir) 147 except subprocess.CalledProcessError: 148 failed_data_pull.append(try_builder) 149 continue 150 151 # Find the revision at which the data was generated. 152 revision = find_revision_from_downloaded_data(dest_dir) 153 if not revision: 154 # If we can't find a revision, then something is wrong with the data we 155 # downloaded. Skip this builder. 156 failed_data_pull.append(try_builder) 157 continue 158 159 # Generate new expectations. 160 output_file = os.path.join(CHECKOUT_PATH, 'expectations', 'bench', 161 'bench_expectations_%s.txt' % builder) 162 try: 163 subprocess.check_call(['python', 164 os.path.join(CHECKOUT_PATH, 'bench', 165 'gen_bench_expectations.py'), 166 '-b', builder, '-o', output_file, 167 '-d', dest_dir, '-r', revision]) 168 except subprocess.CalledProcessError: 169 failed_gen_expectations.append(builder) 170 171 failure = '' 172 if failed_data_pull: 173 failure += 'Failed to load data for: %s\n\n' % ','.join(failed_data_pull) 174 if failed_gen_expectations: 175 failure += 'Failed to generate expectations for: %s\n\n' % ','.join( 176 failed_gen_expectations) 177 if failure: 178 raise Exception(failure) 179 180 181if __name__ == '__main__': 182 gen_bench_expectations_from_codereview(sys.argv[1]) 183 184