1# Copyright (c) 2017 The Chromium OS Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5import collections 6import json 7import logging 8import numpy 9import operator 10import os 11import re 12import time 13import urllib 14import urllib2 15 16from autotest_lib.client.bin import utils 17from autotest_lib.client.common_lib import error 18from autotest_lib.client.common_lib import lsbrelease_utils 19from autotest_lib.client.common_lib.cros import retry 20from autotest_lib.client.cros.power import power_status 21from autotest_lib.client.cros.power import power_utils 22 23_HTML_CHART_STR = ''' 24<!DOCTYPE html> 25<html> 26<head> 27<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"> 28</script> 29<script type="text/javascript"> 30 google.charts.load('current', {{'packages':['corechart']}}); 31 google.charts.setOnLoadCallback(drawChart); 32 function drawChart() {{ 33 var data = google.visualization.arrayToDataTable([ 34{data} 35 ]); 36 var unit = '{unit}'; 37 var options = {{ 38 width: 1600, 39 height: 1200, 40 lineWidth: 1, 41 legend: {{ position: 'top', maxLines: 3 }}, 42 vAxis: {{ viewWindow: {{min: 0}}, title: '{type} ({unit})' }}, 43 hAxis: {{ viewWindow: {{min: 0}}, title: 'time (second)' }}, 44 }}; 45 var element = document.getElementById('{type}'); 46 var chart; 47 if (unit == 'percent') {{ 48 options['isStacked'] = true; 49 chart = new google.visualization.SteppedAreaChart(element); 50 }} else {{ 51 chart = new google.visualization.LineChart(element); 52 }} 53 chart.draw(data, options); 54 }} 55</script> 56</head> 57<body> 58<div id="{type}"></div> 59</body> 60</html> 61''' 62 63 64class BaseDashboard(object): 65 """Base class that implements method for prepare and upload data to power 66 dashboard. 67 """ 68 69 def __init__(self, logger, testname, start_ts=None, resultsdir=None, 70 uploadurl=None): 71 """Create BaseDashboard objects. 72 73 Args: 74 logger: object that store the log. This will get convert to 75 dictionary by self._convert() 76 testname: name of current test 77 start_ts: timestamp of when test started in seconds since epoch 78 resultsdir: directory to save the power json 79 uploadurl: url to upload power data 80 """ 81 self._logger = logger 82 self._testname = testname 83 self._start_ts = start_ts if start_ts else time.time() 84 self._resultsdir = resultsdir 85 self._uploadurl = uploadurl 86 87 def _create_powerlog_dict(self, raw_measurement): 88 """Create powerlog dictionary from raw measurement data 89 Data format in go/power-dashboard-data. 90 91 Args: 92 raw_measurement: dictionary contains raw measurement data 93 94 Returns: 95 A dictionary of powerlog 96 """ 97 powerlog_dict = { 98 'format_version': 5, 99 'timestamp': self._start_ts, 100 'test': self._testname, 101 'dut': self._create_dut_info_dict(raw_measurement['data'].keys()), 102 'power': raw_measurement, 103 } 104 105 return powerlog_dict 106 107 def _create_dut_info_dict(self, power_rails): 108 """Create a dictionary that contain information of the DUT. 109 110 MUST be implemented in subclass. 111 112 Args: 113 power_rails: list of measured power rails 114 115 Returns: 116 DUT info dictionary 117 """ 118 raise NotImplementedError 119 120 def _save_json(self, powerlog_dict, resultsdir, filename='power_log.json'): 121 """Convert powerlog dict to human readable formatted JSON and 122 append to <resultsdir>/<filename>. 123 124 Args: 125 powerlog_dict: dictionary of power data 126 resultsdir: directory to save formatted JSON object 127 filename: filename to append to 128 """ 129 if not os.path.exists(resultsdir): 130 raise error.TestError('resultsdir %s does not exist.' % resultsdir) 131 filename = os.path.join(resultsdir, filename) 132 json_str = json.dumps(powerlog_dict, indent=4, separators=(',', ': '), 133 ensure_ascii=False) 134 json_str = utils.strip_non_printable(json_str) 135 with file(filename, 'a') as f: 136 f.write(json_str) 137 138 def _save_html(self, powerlog_dict, resultsdir, filename='power_log.html'): 139 """Convert powerlog dict to chart in HTML page and append to 140 <resultsdir>/<filename>. 141 142 Note that this results in multiple HTML objects in one file but Chrome 143 can render all of it in one page. 144 145 Args: 146 powerlog_dict: dictionary of power data 147 resultsdir: directory to save HTML page 148 filename: filename to append to 149 """ 150 # Create dict from type to sorted list of rail names. 151 rail_type = collections.defaultdict(list) 152 for r, t in powerlog_dict['power']['type'].iteritems(): 153 rail_type[t].append(r) 154 for t in rail_type: 155 rail_type[t] = sorted(rail_type[t]) 156 157 html_str = '' 158 row_indent = ' ' * 12 159 for t in rail_type: 160 data_str_list = [] 161 162 # Generate rail name data string. 163 header = ['time'] + rail_type[t] 164 header_str = row_indent + "['" + "', '".join(header) + "']" 165 data_str_list.append(header_str) 166 167 # Generate measurements data string. 168 for i in range(powerlog_dict['power']['sample_count']): 169 row = [str(i * powerlog_dict['power']['sample_duration'])] 170 for r in rail_type[t]: 171 row.append(str(powerlog_dict['power']['data'][r][i])) 172 row_str = row_indent + '[' + ', '.join(row) + ']' 173 data_str_list.append(row_str) 174 175 data_str = ',\n'.join(data_str_list) 176 unit = powerlog_dict['power']['unit'][rail_type[t][0]] 177 html_str += _HTML_CHART_STR.format(data=data_str, unit=unit, type=t) 178 179 if not os.path.exists(resultsdir): 180 raise error.TestError('resultsdir %s does not exist.' % resultsdir) 181 filename = os.path.join(resultsdir, filename) 182 with file(filename, 'a') as f: 183 f.write(html_str) 184 185 def _upload(self, powerlog_dict, uploadurl): 186 """Convert powerlog dict to minimal size JSON and upload to dashboard. 187 188 Args: 189 powerlog_dict: dictionary of power data 190 uploadurl: url to upload the power data 191 """ 192 json_str = json.dumps(powerlog_dict, ensure_ascii=False) 193 data_obj = {'data': utils.strip_non_printable(json_str)} 194 encoded = urllib.urlencode(data_obj) 195 req = urllib2.Request(uploadurl, encoded) 196 197 @retry.retry(urllib2.URLError, blacklist=[urllib2.HTTPError], 198 timeout_min=5.0, delay_sec=1, backoff=2) 199 def _do_upload(): 200 urllib2.urlopen(req) 201 202 _do_upload() 203 204 def _create_checkpoint_dict(self): 205 """Create dictionary for checkpoint. 206 207 @returns a dictionary of tags to their corresponding intervals in the 208 following format: 209 { 210 tag1: [(start1, end1), (start2, end2), ...], 211 tag2: [(start3, end3), (start4, end4), ...], 212 ... 213 } 214 """ 215 raise NotImplementedError 216 217 def _tag_with_checkpoint(self, power_dict): 218 """Tag power_dict with checkpoint data. 219 220 This function translates the checkpoint intervals into a list of tags 221 for each data point. 222 223 @param power_dict: a dictionary with power data; assume this dictionary 224 has attributes 'sample_count' and 'sample_duration'. 225 """ 226 checkpoint_dict = self._create_checkpoint_dict() 227 228 # Create list of check point event tuple. 229 # Tuple format: (checkpoint_name:str, event_time:float, is_start:bool) 230 checkpoint_event_list = [] 231 for name, intervals in checkpoint_dict.iteritems(): 232 for start, finish in intervals: 233 checkpoint_event_list.append((name, start, True)) 234 checkpoint_event_list.append((name, finish, False)) 235 236 checkpoint_event_list = sorted(checkpoint_event_list, 237 key=operator.itemgetter(1)) 238 239 # Add dummy check point at 1e9 seconds. 240 checkpoint_event_list.append(('dummy', 1e9, True)) 241 242 interval_set = set() 243 event_index = 0 244 checkpoint_list = [] 245 for i in range(power_dict['sample_count']): 246 curr_time = i * power_dict['sample_duration'] 247 248 # Process every checkpoint event until current point of time 249 while checkpoint_event_list[event_index][1] <= curr_time: 250 name, _, is_start = checkpoint_event_list[event_index] 251 if is_start: 252 interval_set.add(name) 253 else: 254 interval_set.discard(name) 255 event_index += 1 256 257 checkpoint_list.append(list(interval_set)) 258 power_dict['checkpoint'] = checkpoint_list 259 260 def _convert(self): 261 """Convert data from self._logger object to raw power measurement 262 dictionary. 263 264 MUST be implemented in subclass. 265 266 Return: 267 raw measurement dictionary 268 """ 269 raise NotImplementedError 270 271 def upload(self): 272 """Upload powerlog to dashboard and save data to results directory. 273 """ 274 raw_measurement = self._convert() 275 if raw_measurement is None: 276 return 277 278 powerlog_dict = self._create_powerlog_dict(raw_measurement) 279 if self._resultsdir is not None: 280 self._save_json(powerlog_dict, self._resultsdir) 281 self._save_html(powerlog_dict, self._resultsdir) 282 if self._uploadurl is not None: 283 self._upload(powerlog_dict, self._uploadurl) 284 285 286class ClientTestDashboard(BaseDashboard): 287 """Dashboard class for autotests that run on client side. 288 """ 289 290 def __init__(self, logger, testname, start_ts=None, resultsdir=None, 291 uploadurl=None, note=''): 292 """Create BaseDashboard objects. 293 294 Args: 295 logger: object that store the log. This will get convert to 296 dictionary by self._convert() 297 testname: name of current test 298 start_ts: timestamp of when test started in seconds since epoch 299 resultsdir: directory to save the power json 300 uploadurl: url to upload power data 301 note: note for current test run 302 """ 303 super(ClientTestDashboard, self).__init__(logger, testname, start_ts, 304 resultsdir, uploadurl) 305 self._note = note 306 307 308 def _create_dut_info_dict(self, power_rails): 309 """Create a dictionary that contain information of the DUT. 310 311 Args: 312 power_rails: list of measured power rails 313 314 Returns: 315 DUT info dictionary 316 """ 317 board = utils.get_board() 318 platform = utils.get_platform() 319 320 if not platform.startswith(board): 321 board += '_' + platform 322 323 if power_utils.has_hammer(): 324 board += '_hammer' 325 326 dut_info_dict = { 327 'board': board, 328 'version': { 329 'hw': utils.get_hardware_revision(), 330 'milestone': lsbrelease_utils.get_chromeos_release_milestone(), 331 'os': lsbrelease_utils.get_chromeos_release_version(), 332 'channel': lsbrelease_utils.get_chromeos_channel(), 333 'firmware': utils.get_firmware_version(), 334 'ec': utils.get_ec_version(), 335 'kernel': utils.get_kernel_version(), 336 }, 337 'sku': { 338 'cpu': utils.get_cpu_name(), 339 'memory_size': utils.get_mem_total_gb(), 340 'storage_size': utils.get_disk_size_gb(utils.get_root_device()), 341 'display_resolution': utils.get_screen_resolution(), 342 }, 343 'ina': { 344 'version': 0, 345 'ina': power_rails, 346 }, 347 'note': self._note, 348 } 349 350 if power_utils.has_battery(): 351 status = power_status.get_status() 352 if status.battery: 353 # Round the battery size to nearest tenth because it is 354 # fluctuated for platform without battery nominal voltage data. 355 dut_info_dict['sku']['battery_size'] = round( 356 status.battery[0].energy_full_design, 1) 357 dut_info_dict['sku']['battery_shutdown_percent'] = \ 358 power_utils.get_low_battery_shutdown_percent() 359 return dut_info_dict 360 361 362class MeasurementLoggerDashboard(ClientTestDashboard): 363 """Dashboard class for power_status.MeasurementLogger. 364 """ 365 366 def __init__(self, logger, testname, resultsdir=None, uploadurl=None, 367 note=''): 368 super(MeasurementLoggerDashboard, self).__init__(logger, testname, None, 369 resultsdir, uploadurl, 370 note) 371 self._unit = None 372 self._type = None 373 self._padded_domains = None 374 375 def _create_powerlog_dict(self, raw_measurement): 376 """Create powerlog dictionary from raw measurement data 377 Data format in go/power-dashboard-data. 378 379 Args: 380 raw_measurement: dictionary contains raw measurement data 381 382 Returns: 383 A dictionary of powerlog 384 """ 385 powerlog_dict = \ 386 super(MeasurementLoggerDashboard, self)._create_powerlog_dict( 387 raw_measurement) 388 389 # Using start time of the logger as the timestamp of powerlog dict. 390 powerlog_dict['timestamp'] = self._logger.times[0] 391 392 return powerlog_dict 393 394 def _create_padded_domains(self): 395 """Pad the domains name for dashboard to make the domain name better 396 sorted in alphabetical order""" 397 pass 398 399 def _create_checkpoint_dict(self): 400 """Create dictionary for checkpoint. 401 """ 402 start_time = self._logger.times[0] 403 return self._logger._checkpoint_logger.convert_relative(start_time) 404 405 def _convert(self): 406 """Convert data from power_status.MeasurementLogger object to raw 407 power measurement dictionary. 408 409 Return: 410 raw measurement dictionary or None if no readings 411 """ 412 if len(self._logger.readings) == 0: 413 logging.warn('No readings in logger ... ignoring') 414 return None 415 416 power_dict = collections.defaultdict(dict, { 417 'sample_count': len(self._logger.readings) - 1, 418 'sample_duration': 0, 419 'average': dict(), 420 'data': dict(), 421 }) 422 if power_dict['sample_count'] > 1: 423 total_duration = self._logger.times[-1] - self._logger.times[0] 424 power_dict['sample_duration'] = \ 425 1.0 * total_duration / power_dict['sample_count'] 426 427 self._create_padded_domains() 428 for i, domain_readings in enumerate(zip(*self._logger.readings)): 429 if self._padded_domains: 430 domain = self._padded_domains[i] 431 else: 432 domain = self._logger.domains[i] 433 # Remove first item because that is the log before the test begin. 434 power_dict['data'][domain] = domain_readings[1:] 435 power_dict['average'][domain] = \ 436 numpy.average(power_dict['data'][domain]) 437 if self._unit: 438 power_dict['unit'][domain] = self._unit 439 if self._type: 440 power_dict['type'][domain] = self._type 441 442 self._tag_with_checkpoint(power_dict) 443 return power_dict 444 445 446class PowerLoggerDashboard(MeasurementLoggerDashboard): 447 """Dashboard class for power_status.PowerLogger. 448 """ 449 450 def __init__(self, logger, testname, resultsdir=None, uploadurl=None, 451 note=''): 452 if uploadurl is None: 453 uploadurl = 'http://chrome-power.appspot.com/rapl' 454 super(PowerLoggerDashboard, self).__init__(logger, testname, resultsdir, 455 uploadurl, note) 456 self._unit = 'watt' 457 self._type = 'power' 458 459 460class TempLoggerDashboard(MeasurementLoggerDashboard): 461 """Dashboard class for power_status.PowerLogger. 462 """ 463 464 def __init__(self, logger, testname, resultsdir=None, uploadurl=None, 465 note=''): 466 if uploadurl is None: 467 uploadurl = 'http://chrome-power.appspot.com/rapl' 468 super(TempLoggerDashboard, self).__init__(logger, testname, resultsdir, 469 uploadurl, note) 470 self._unit = 'celsius' 471 self._type = 'temperature' 472 473 474class SimplePowerLoggerDashboard(ClientTestDashboard): 475 """Dashboard class for simple system power measurement taken and publishing 476 it to the dashboard. 477 """ 478 479 def __init__(self, duration_secs, power_watts, testname, start_ts, 480 resultsdir=None, uploadurl=None, note=''): 481 482 if uploadurl is None: 483 uploadurl = 'http://chrome-power.appspot.com/rapl' 484 super(SimplePowerLoggerDashboard, self).__init__( 485 None, testname, start_ts, resultsdir, uploadurl, note) 486 487 self._unit = 'watt' 488 self._type = 'power' 489 self._duration_secs = duration_secs 490 self._power_watts = power_watts 491 self._testname = testname 492 493 def _convert(self): 494 """Convert vbat to raw power measurement dictionary. 495 496 Return: 497 raw measurement dictionary 498 """ 499 power_dict = { 500 'sample_count': 1, 501 'sample_duration': self._duration_secs, 502 'average': {'system': self._power_watts}, 503 'data': {'system': [self._power_watts]}, 504 'unit': {'system': self._unit}, 505 'type': {'system': self._type}, 506 'checkpoint': [[self._testname]], 507 } 508 return power_dict 509 510 511class CPUStatsLoggerDashboard(MeasurementLoggerDashboard): 512 """Dashboard class for power_status.CPUStatsLogger. 513 """ 514 515 def __init__(self, logger, testname, resultsdir=None, uploadurl=None, 516 note=''): 517 if uploadurl is None: 518 uploadurl = 'http://chrome-power.appspot.com/rapl' 519 super(CPUStatsLoggerDashboard, self).__init__( 520 logger, testname, resultsdir, uploadurl, note) 521 522 @staticmethod 523 def _split_domain(domain): 524 """Return domain_type and domain_name for given domain. 525 526 Example: Split ................... to ........... and ....... 527 cpuidle_C1E-SKL cpuidle C1E-SKL 528 cpuidle_0_3_C0 cpuidle_0_3 C0 529 cpupkg_C0_C1 cpupkg C0_C1 530 cpufreq_0_3_1512000 cpufreq_0_3 1512000 531 532 Args: 533 domain: cpu stat domain name to split 534 535 Return: 536 tuple of domain_type and domain_name 537 """ 538 # Regex explanation 539 # .*? matches type non-greedily (cpuidle) 540 # (?:_\d+)* matches cpu part, ?: makes it not a group (_0_1_2_3) 541 # .* matches name greedily (C0_C1) 542 return re.match(r'(.*?(?:_\d+)*)_(.*)', domain).groups() 543 544 def _convert(self): 545 power_dict = super(CPUStatsLoggerDashboard, self)._convert() 546 remove_rail = [] 547 for rail in power_dict['data']: 548 if rail.startswith('wavg_cpu'): 549 power_dict['type'][rail] = 'cpufreq_wavg' 550 power_dict['unit'][rail] = 'kilohertz' 551 elif rail.startswith('wavg_gpu'): 552 power_dict['type'][rail] = 'gpufreq_wavg' 553 power_dict['unit'][rail] = 'megahertz' 554 else: 555 # Remove all aggregate stats, only 'non-c0' and 'non-C0_C1' now 556 if self._split_domain(rail)[1].startswith('non'): 557 remove_rail.append(rail) 558 continue 559 power_dict['type'][rail] = self._split_domain(rail)[0] 560 power_dict['unit'][rail] = 'percent' 561 for rail in remove_rail: 562 del power_dict['data'][rail] 563 del power_dict['average'][rail] 564 return power_dict 565 566 def _create_padded_domains(self): 567 """Padded number in the domain name with dot to make it sorted 568 alphabetically. 569 570 Example: 571 cpuidle_C1-SKL, cpuidle_C1E-SKL, cpuidle_C2-SKL, cpuidle_C10-SKL 572 will be changed to 573 cpuidle_C.1-SKL, cpuidle_C.1E-SKL, cpuidle_C.2-SKL, cpuidle_C10-SKL 574 which make it in alphabetically order. 575 """ 576 longest = collections.defaultdict(int) 577 searcher = re.compile(r'\d+') 578 number_strs = [] 579 splitted_domains = \ 580 [self._split_domain(domain) for domain in self._logger.domains] 581 for domain_type, domain_name in splitted_domains: 582 result = searcher.search(domain_name) 583 if not result: 584 number_strs.append('') 585 continue 586 number_str = result.group(0) 587 number_strs.append(number_str) 588 longest[domain_type] = max(longest[domain_type], len(number_str)) 589 590 self._padded_domains = [] 591 for i in range(len(self._logger.domains)): 592 if not number_strs[i]: 593 self._padded_domains.append(self._logger.domains[i]) 594 continue 595 596 domain_type, domain_name = splitted_domains[i] 597 formatter_component = '{:.>%ds}' % longest[domain_type] 598 599 # Change "cpuidle_C1E-SKL" to "cpuidle_C{:.>2s}E-SKL" 600 formatter_str = domain_type + '_' + \ 601 searcher.sub(formatter_component, domain_name, count=1) 602 603 # Run "cpuidle_C{:_>2s}E-SKL".format("1") to get "cpuidle_C.1E-SKL" 604 self._padded_domains.append(formatter_str.format(number_strs[i])) 605