1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5"""Uploads performance data to the performance dashboard. 6 7Performance tests may output data that needs to be displayed on the performance 8dashboard. The autotest TKO parser invokes this module with each test 9associated with a job. If a test has performance data associated with it, it 10is uploaded to the performance dashboard. The performance dashboard is owned 11by Chrome team and is available here: https://chromeperf.appspot.com/. Users 12must be logged in with an @google.com account to view chromeOS perf data there. 13 14""" 15 16import httplib 17import json 18import os 19import re 20import urllib 21import urllib2 22 23import common 24from autotest_lib.tko import utils as tko_utils 25 26_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) 27_PRESENTATION_CONFIG_FILE = os.path.join( 28 _ROOT_DIR, 'perf_dashboard_config.json') 29_PRESENTATION_SHADOW_CONFIG_FILE = os.path.join( 30 _ROOT_DIR, 'perf_dashboard_shadow_config.json') 31_DASHBOARD_UPLOAD_URL = 'https://chromeperf.appspot.com/add_point' 32 33# Format for Chrome and Chrome OS version strings. 34VERSION_REGEXP = r'^(\d+)\.(\d+)\.(\d+)\.(\d+)$' 35 36 37class PerfUploadingError(Exception): 38 """Exception raised in perf_uploader""" 39 pass 40 41 42def _parse_config_file(config_file): 43 """Parses a presentation config file and stores the info into a dict. 44 45 The config file contains information about how to present the perf data 46 on the perf dashboard. This is required if the default presentation 47 settings aren't desired for certain tests. 48 49 @param config_file: Path to the configuration file to be parsed. 50 51 @returns A dictionary mapping each unique autotest name to a dictionary 52 of presentation config information. 53 54 @raises PerfUploadingError if config data or master name for the test 55 is missing from the config file. 56 57 """ 58 json_obj = [] 59 if os.path.exists(config_file): 60 with open(config_file, 'r') as fp: 61 json_obj = json.load(fp) 62 config_dict = {} 63 for entry in json_obj: 64 if 'autotest_regex' in entry: 65 config_dict[entry['autotest_regex']] = entry 66 else: 67 config_dict['^' + re.escape(entry['autotest_name']) + '$'] = entry 68 return config_dict 69 70 71def _gather_presentation_info(config_data, test_name): 72 """Gathers presentation info from config data for the given test name. 73 74 @param config_data: A dictionary of dashboard presentation info for all 75 tests, as returned by _parse_config_file(). Info is keyed by autotest 76 name. 77 @param test_name: The name of an autotest. 78 79 @return A dictionary containing presentation information extracted from 80 |config_data| for the given autotest name. 81 82 @raises PerfUploadingError if some required data is missing. 83 """ 84 presentation_dict = None 85 for regex in config_data: 86 match = re.match(regex, test_name) 87 if match: 88 if presentation_dict: 89 raise PerfUploadingError('Duplicate config data refer to the ' 90 'same test %s' % test_name) 91 presentation_dict = config_data[regex] 92 93 if not presentation_dict: 94 raise PerfUploadingError( 95 'No config data is specified for test %s in %s.' % 96 (test_name, _PRESENTATION_CONFIG_FILE)) 97 try: 98 master_name = presentation_dict['master_name'] 99 except KeyError: 100 raise PerfUploadingError( 101 'No master name is specified for test %s in %s.' % 102 (test_name, _PRESENTATION_CONFIG_FILE)) 103 if 'dashboard_test_name' in presentation_dict: 104 test_name = presentation_dict['dashboard_test_name'] 105 return {'master_name': master_name, 'test_name': test_name} 106 107 108def _format_for_upload(board_name, cros_version, chrome_version, 109 hardware_id, hardware_hostname, perf_data, 110 presentation_info, jobname): 111 """Formats perf data suitable to upload to the perf dashboard. 112 113 The perf dashboard expects perf data to be uploaded as a 114 specially-formatted JSON string. In particular, the JSON object must be a 115 dictionary with key "data", and value being a list of dictionaries where 116 each dictionary contains all the information associated with a single 117 measured perf value: master name, bot name, test name, perf value, error 118 value, units, and build version numbers. 119 120 @param board_name: The string name of the image board name. 121 @param cros_version: The string chromeOS version number. 122 @param chrome_version: The string chrome version number. 123 @param hardware_id: String that identifies the type of hardware the test was 124 executed on. 125 @param hardware_hostname: String that identifies the name of the device the 126 test was executed on. 127 @param perf_data: A dictionary of measured perf data as computed by 128 _compute_avg_stddev(). 129 @param presentation_info: A dictionary of dashboard presentation info for 130 the given test, as identified by _gather_presentation_info(). 131 @param jobname: A string uniquely identifying the test run, this enables 132 linking back from a test result to the logs of the test run. 133 134 @return A dictionary containing the formatted information ready to upload 135 to the performance dashboard. 136 137 """ 138 perf_values = perf_data 139 # Client side case - server side comes with its own charts data section. 140 if 'charts' not in perf_values: 141 perf_values = { 142 'format_version': '1.0', 143 'benchmark_name': presentation_info['test_name'], 144 'charts': perf_data, 145 } 146 147 dash_entry = { 148 'master': presentation_info['master_name'], 149 'bot': 'cros-' + board_name, # Prefix to clarify it's ChromeOS. 150 'point_id': _get_id_from_version(chrome_version, cros_version), 151 'versions': { 152 'cros_version': cros_version, 153 'chrome_version': chrome_version, 154 }, 155 'supplemental': { 156 'default_rev': 'r_cros_version', 157 'hardware_identifier': hardware_id, 158 'hardware_hostname': hardware_hostname, 159 'jobname': jobname, 160 }, 161 'chart_data': perf_values, 162 } 163 return {'data': json.dumps(dash_entry)} 164 165 166def _get_version_numbers(test_attributes): 167 """Gets the version numbers from the test attributes and validates them. 168 169 @param test_attributes: The attributes property (which is a dict) of an 170 autotest tko.models.test object. 171 172 @return A pair of strings (Chrome OS version, Chrome version). 173 174 @raises PerfUploadingError if a version isn't formatted as expected. 175 """ 176 chrome_version = test_attributes.get('CHROME_VERSION', '') 177 cros_version = test_attributes.get('CHROMEOS_RELEASE_VERSION', '') 178 cros_milestone = test_attributes.get('CHROMEOS_RELEASE_CHROME_MILESTONE') 179 # Use the release milestone as the milestone if present, othewise prefix the 180 # cros version with the with the Chrome browser milestone. 181 if cros_milestone: 182 cros_version = "%s.%s" % (cros_milestone, cros_version) 183 else: 184 cros_version = chrome_version[:chrome_version.find('.') + 1] + cros_version 185 if not re.match(VERSION_REGEXP, cros_version): 186 raise PerfUploadingError('CrOS version "%s" does not match expected ' 187 'format.' % cros_version) 188 if not re.match(VERSION_REGEXP, chrome_version): 189 raise PerfUploadingError('Chrome version "%s" does not match expected ' 190 'format.' % chrome_version) 191 return (cros_version, chrome_version) 192 193 194def _get_id_from_version(chrome_version, cros_version): 195 """Computes the point ID to use, from Chrome and ChromeOS version numbers. 196 197 For ChromeOS row data, data values are associated with both a Chrome 198 version number and a ChromeOS version number (unlike for Chrome row data 199 that is associated with a single revision number). This function takes 200 both version numbers as input, then computes a single, unique integer ID 201 from them, which serves as a 'fake' revision number that can uniquely 202 identify each ChromeOS data point, and which will allow ChromeOS data points 203 to be sorted by Chrome version number, with ties broken by ChromeOS version 204 number. 205 206 To compute the integer ID, we take the portions of each version number that 207 serve as the shortest unambiguous names for each (as described here: 208 http://www.chromium.org/developers/version-numbers). We then force each 209 component of each portion to be a fixed width (padded by zeros if needed), 210 concatenate all digits together (with those coming from the Chrome version 211 number first), and convert the entire string of digits into an integer. 212 We ensure that the total number of digits does not exceed that which is 213 allowed by AppEngine NDB for an integer (64-bit signed value). 214 215 For example: 216 Chrome version: 27.0.1452.2 (shortest unambiguous name: 1452.2) 217 ChromeOS version: 27.3906.0.0 (shortest unambiguous name: 3906.0.0) 218 concatenated together with padding for fixed-width columns: 219 ('01452' + '002') + ('03906' + '000' + '00') = '014520020390600000' 220 Final integer ID: 14520020390600000 221 222 @param chrome_ver: The Chrome version number as a string. 223 @param cros_ver: The ChromeOS version number as a string. 224 225 @return A unique integer ID associated with the two given version numbers. 226 227 """ 228 229 # Number of digits to use from each part of the version string for Chrome 230 # and Chrome OS versions when building a point ID out of these two versions. 231 chrome_version_col_widths = [0, 0, 5, 3] 232 cros_version_col_widths = [0, 5, 3, 2] 233 234 def get_digits_from_version(version_num, column_widths): 235 if re.match(VERSION_REGEXP, version_num): 236 computed_string = '' 237 version_parts = version_num.split('.') 238 for i, version_part in enumerate(version_parts): 239 if column_widths[i]: 240 computed_string += version_part.zfill(column_widths[i]) 241 return computed_string 242 else: 243 return None 244 245 chrome_digits = get_digits_from_version( 246 chrome_version, chrome_version_col_widths) 247 cros_digits = get_digits_from_version( 248 cros_version, cros_version_col_widths) 249 if not chrome_digits or not cros_digits: 250 return None 251 result_digits = chrome_digits + cros_digits 252 max_digits = sum(chrome_version_col_widths + cros_version_col_widths) 253 if len(result_digits) > max_digits: 254 return None 255 return int(result_digits) 256 257 258def _send_to_dashboard(data_obj): 259 """Sends formatted perf data to the perf dashboard. 260 261 @param data_obj: A formatted data object as returned by 262 _format_for_upload(). 263 264 @raises PerfUploadingError if an exception was raised when uploading. 265 266 """ 267 encoded = urllib.urlencode(data_obj) 268 req = urllib2.Request(_DASHBOARD_UPLOAD_URL, encoded) 269 try: 270 urllib2.urlopen(req) 271 except urllib2.HTTPError as e: 272 raise PerfUploadingError('HTTPError: %d %s for JSON %s\n' % ( 273 e.code, e.msg, data_obj['data'])) 274 except urllib2.URLError as e: 275 raise PerfUploadingError( 276 'URLError: %s for JSON %s\n' % 277 (str(e.reason), data_obj['data'])) 278 except httplib.HTTPException: 279 raise PerfUploadingError( 280 'HTTPException for JSON %s\n' % data_obj['data']) 281 282 283def _get_image_board_name(platform, image): 284 """Returns the board name of the tested image. 285 286 Note that it can be different from the board name of DUTs the test was 287 scheduled to. 288 289 @param platform: The DUT platform in lab. eg. eve 290 @param image: The image installed in the DUT. eg. eve-arcnext-release. 291 @return: the image board name. 292 """ 293 # This is a hacky way to resolve the mixture of reports in chromeperf 294 # dashboard. This solution is copied from our other reporting 295 # pipeline. 296 image_board_name = platform 297 298 suffixes = ['-arcnext', '-ndktranslation', '-arcvm', '-kernelnext'] 299 300 for suffix in suffixes: 301 if not platform.endswith(suffix) and (suffix + '-') in image: 302 image_board_name += suffix 303 return image_board_name 304 305 306def upload_test(job, test, jobname): 307 """Uploads any perf data associated with a test to the perf dashboard. 308 309 @param job: An autotest tko.models.job object that is associated with the 310 given |test|. 311 @param test: An autotest tko.models.test object that may or may not be 312 associated with measured perf data. 313 @param jobname: A string uniquely identifying the test run, this enables 314 linking back from a test result to the logs of the test run. 315 316 """ 317 318 # Format the perf data for the upload, then upload it. 319 test_name = test.testname 320 image_board_name = _get_image_board_name( 321 job.machine_group, job.keyval_dict.get('build', job.machine_group)) 322 # Append the platform name with '.arc' if the suffix of the control 323 # filename is '.arc'. 324 if job.label and re.match('.*\.arc$', job.label): 325 image_board_name += '.arc' 326 hardware_id = test.attributes.get('hwid', '') 327 hardware_hostname = test.machine 328 config_data = _parse_config_file(_PRESENTATION_CONFIG_FILE) 329 try: 330 shadow_config_data = _parse_config_file(_PRESENTATION_SHADOW_CONFIG_FILE) 331 config_data.update(shadow_config_data) 332 except ValueError as e: 333 tko_utils.dprint('Failed to parse config file %s: %s.' % 334 (_PRESENTATION_SHADOW_CONFIG_FILE, e)) 335 try: 336 cros_version, chrome_version = _get_version_numbers(test.attributes) 337 presentation_info = _gather_presentation_info(config_data, test_name) 338 formatted_data = _format_for_upload(image_board_name, cros_version, 339 chrome_version, hardware_id, 340 hardware_hostname, test.perf_values, 341 presentation_info, jobname) 342 _send_to_dashboard(formatted_data) 343 except PerfUploadingError as e: 344 tko_utils.dprint('Error when uploading perf data to the perf ' 345 'dashboard for test %s: %s' % (test_name, e)) 346 else: 347 tko_utils.dprint('Successfully uploaded perf data to the perf ' 348 'dashboard for test %s.' % test_name) 349 350