1# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5import collections 6import contextlib 7import ctypes 8import fcntl 9import glob 10import itertools 11import json 12import logging 13import math 14import numpy 15import os 16import re 17import struct 18import threading 19import time 20 21from autotest_lib.client.bin import utils 22from autotest_lib.client.common_lib import autotest_enum 23from autotest_lib.client.common_lib import error 24from autotest_lib.client.common_lib.cros import retry 25from autotest_lib.client.common_lib.utils import poll_for_condition_ex 26from autotest_lib.client.cros import kernel_trace 27from autotest_lib.client.cros.power import power_utils 28 29BatteryDataReportType = autotest_enum.AutotestEnum('CHARGE', 'ENERGY') 30 31# For devices whose full capacity is significantly lower than design full 32# capacity, scale down their design full capacity. 33BATTERY_DESIGN_FULL_SCALE = {'jinlon': 0.95, # b/161307060 34 'berknip': 0.94, # b/172625511 35 } 36# battery data reported at 1e6 scale 37BATTERY_DATA_SCALE = 1e6 38# number of times to retry reading the battery in the case of bad data 39BATTERY_RETRY_COUNT = 3 40# default filename when saving CheckpointLogger data to file 41CHECKPOINT_LOG_DEFAULT_FNAME = 'checkpoint_log.json' 42 43 44class DevStat(object): 45 """ 46 Device power status. This class implements generic status initialization 47 and parsing routines. 48 """ 49 50 def __init__(self, fields, path=None): 51 self.fields = fields 52 self.path = path 53 54 55 def reset_fields(self): 56 """ 57 Reset all class fields to None to mark their status as unknown. 58 """ 59 for field in self.fields.iterkeys(): 60 setattr(self, field, None) 61 62 63 def read_val(self, file_name, field_type): 64 """Read a value from file. 65 """ 66 try: 67 path = file_name 68 if not file_name.startswith('/'): 69 path = os.path.join(self.path, file_name) 70 f = open(path, 'r') 71 out = f.readline().rstrip('\n') 72 val = field_type(out) 73 return val 74 75 except: 76 return field_type(0) 77 78 79 def read_all_vals(self): 80 """Read all values. 81 """ 82 for field, prop in self.fields.iteritems(): 83 if prop[0]: 84 val = self.read_val(prop[0], prop[1]) 85 setattr(self, field, val) 86 87 def update(self): 88 """Update the DevStat. 89 90 Need to implement in subclass. 91 """ 92 pass 93 94class ThermalStatACPI(DevStat): 95 """ 96 ACPI-based thermal status. 97 98 Fields: 99 (All temperatures are in millidegrees Celsius.) 100 101 str enabled: Whether thermal zone is enabled 102 int temp: Current temperature 103 str type: Thermal zone type 104 int num_trip_points: Number of thermal trip points that activate 105 cooling devices 106 int num_points_tripped: Temperature is above this many trip points 107 str trip_point_N_type: Trip point #N's type 108 int trip_point_N_temp: Trip point #N's temperature value 109 int cdevX_trip_point: Trip point o cooling device #X (index) 110 """ 111 112 MAX_TRIP_POINTS = 20 113 114 thermal_fields = { 115 'enabled': ['enabled', str], 116 'temp': ['temp', int], 117 'type': ['type', str], 118 'num_points_tripped': ['', ''] 119 } 120 path = '/sys/class/thermal/thermal_zone*' 121 122 def __init__(self, path=None): 123 # Browse the thermal folder for trip point fields. 124 self.num_trip_points = 0 125 126 thermal_fields = glob.glob(path + '/*') 127 for file in thermal_fields: 128 field = file[len(path + '/'):] 129 if field.find('trip_point') != -1: 130 if field.find('temp'): 131 field_type = int 132 else: 133 field_type = str 134 self.thermal_fields[field] = [field, field_type] 135 136 # Count the number of trip points. 137 if field.find('_type') != -1: 138 self.num_trip_points += 1 139 140 super(ThermalStatACPI, self).__init__(self.thermal_fields, path) 141 self.update() 142 143 def update(self): 144 if not os.path.exists(self.path): 145 return 146 147 self.read_all_vals() 148 self.num_points_tripped = 0 149 150 for field in self.thermal_fields: 151 if field.find('trip_point_') != -1 and field.find('_temp') != -1 \ 152 and self.temp > self.read_val(field, int): 153 self.num_points_tripped += 1 154 logging.info('Temperature trip point #%s tripped.', \ 155 field[len('trip_point_'):field.rfind('_temp')]) 156 157 158class ThermalStatHwmon(DevStat): 159 """ 160 hwmon-based thermal status. 161 162 Fields: 163 int <tname>_temp<num>_input: Current temperature in millidegrees Celsius 164 where: 165 <tname> : name of hwmon device in sysfs 166 <num> : number of temp as some hwmon devices have multiple 167 168 """ 169 path = '/sys/class/hwmon' 170 171 thermal_fields = {} 172 def __init__(self, rootpath=None): 173 if not rootpath: 174 rootpath = self.path 175 for subpath1 in glob.glob('%s/hwmon*' % rootpath): 176 for subpath2 in ['','device/']: 177 gpaths = glob.glob("%s/%stemp*_input" % (subpath1, subpath2)) 178 for gpath in gpaths: 179 bname = os.path.basename(gpath) 180 field_path = os.path.join(subpath1, subpath2, bname) 181 182 tname_path = os.path.join(os.path.dirname(gpath), "name") 183 tname = utils.read_one_line(tname_path) 184 185 field_key = "%s_%s" % (tname, bname) 186 self.thermal_fields[field_key] = [field_path, int] 187 188 super(ThermalStatHwmon, self).__init__(self.thermal_fields, rootpath) 189 self.update() 190 191 def update(self): 192 if not os.path.exists(self.path): 193 return 194 195 self.read_all_vals() 196 197 def read_val(self, file_name, field_type): 198 try: 199 path = os.path.join(self.path, file_name) 200 f = open(path, 'r') 201 out = f.readline() 202 return field_type(out) 203 except: 204 return field_type(0) 205 206 207class ThermalStat(object): 208 """helper class to instantiate various thermal devices.""" 209 def __init__(self): 210 self._thermals = [] 211 self.min_temp = 999999999 212 self.max_temp = -999999999 213 214 thermal_stat_types = [(ThermalStatHwmon.path, ThermalStatHwmon), 215 (ThermalStatACPI.path, ThermalStatACPI)] 216 for thermal_glob_path, thermal_type in thermal_stat_types: 217 try: 218 thermal_path = glob.glob(thermal_glob_path)[0] 219 logging.debug('Using %s for thermal info.', thermal_path) 220 self._thermals.append(thermal_type(thermal_path)) 221 except: 222 logging.debug('Could not find thermal path %s, skipping.', 223 thermal_glob_path) 224 225 226 def get_temps(self): 227 """Get temperature readings. 228 229 Returns: 230 string of temperature readings. 231 """ 232 temp_str = '' 233 for thermal in self._thermals: 234 thermal.update() 235 for kname in thermal.fields: 236 if kname is 'temp' or kname.endswith('_input'): 237 val = getattr(thermal, kname) 238 temp_str += '%s:%d ' % (kname, val) 239 if val > self.max_temp: 240 self.max_temp = val 241 if val < self.min_temp: 242 self.min_temp = val 243 244 245 return temp_str 246 247 248class BatteryStat(DevStat): 249 """ 250 Battery status. 251 252 Fields: 253 254 float charge_full: Last full capacity reached [Ah] 255 float charge_full_design: Full capacity by design [Ah] 256 float charge_now: Remaining charge [Ah] 257 float current_now: Battery discharge rate [A] 258 float energy: Current battery charge [Wh] 259 float energy_full: Last full capacity reached [Wh] 260 float energy_full_design: Full capacity by design [Wh] 261 float energy_rate: Battery discharge rate [W] 262 float power_now: Battery discharge rate [W] 263 float remaining_time: Remaining discharging time [h] 264 float voltage_min_design: Minimum voltage by design [V] 265 float voltage_max_design: Maximum voltage by design [V] 266 float voltage_now: Voltage now [V] 267 """ 268 269 battery_fields = { 270 'status': ['status', str], 271 'charge_full': ['charge_full', float], 272 'charge_full_design': ['charge_full_design', float], 273 'charge_now': ['charge_now', float], 274 'current_now': ['current_now', float], 275 'voltage_min_design': ['voltage_min_design', float], 276 'voltage_max_design': ['voltage_max_design', float], 277 'voltage_now': ['voltage_now', float], 278 'energy': ['energy_now', float], 279 'energy_full': ['energy_full', float], 280 'energy_full_design': ['energy_full_design', float], 281 'power_now': ['power_now', float], 282 'present': ['present', int], 283 'energy_rate': ['', ''], 284 'remaining_time': ['', ''] 285 } 286 287 def __init__(self, path=None): 288 super(BatteryStat, self).__init__(self.battery_fields, path) 289 self.update() 290 291 292 def update(self): 293 for _ in xrange(BATTERY_RETRY_COUNT): 294 try: 295 self._read_battery() 296 return 297 except error.TestError as e: 298 logging.warn(e) 299 for field, prop in self.battery_fields.iteritems(): 300 logging.warn(field + ': ' + repr(getattr(self, field))) 301 continue 302 raise error.TestError('Failed to read battery state') 303 304 305 def _read_battery(self): 306 self.read_all_vals() 307 308 if self.charge_full == 0 and self.energy_full != 0: 309 battery_type = BatteryDataReportType.ENERGY 310 else: 311 battery_type = BatteryDataReportType.CHARGE 312 313 if self.voltage_min_design != 0: 314 voltage_nominal = self.voltage_min_design 315 else: 316 voltage_nominal = self.voltage_now 317 318 if voltage_nominal == 0: 319 raise error.TestError('Failed to determine battery voltage') 320 321 battery_design_full_scale = 1 322 model = utils.get_platform() 323 if model in BATTERY_DESIGN_FULL_SCALE: 324 battery_design_full_scale = BATTERY_DESIGN_FULL_SCALE.get(model) 325 logging.info( 326 'Apply %f scale to design full battery capacity for model ' 327 '%s', battery_design_full_scale, model) 328 329 # Since charge data is present, calculate parameters based upon 330 # reported charge data. 331 if battery_type == BatteryDataReportType.CHARGE: 332 self.charge_full_design *= battery_design_full_scale 333 334 self.charge_full = self.charge_full / BATTERY_DATA_SCALE 335 self.charge_full_design = self.charge_full_design / \ 336 BATTERY_DATA_SCALE 337 self.charge_now = self.charge_now / BATTERY_DATA_SCALE 338 339 self.current_now = math.fabs(self.current_now) / \ 340 BATTERY_DATA_SCALE 341 342 self.energy = voltage_nominal * \ 343 self.charge_now / \ 344 BATTERY_DATA_SCALE 345 self.energy_full = voltage_nominal * \ 346 self.charge_full / \ 347 BATTERY_DATA_SCALE 348 self.energy_full_design = voltage_nominal * \ 349 self.charge_full_design / \ 350 BATTERY_DATA_SCALE 351 352 # Charge data not present, so calculate parameters based upon 353 # reported energy data. 354 elif battery_type == BatteryDataReportType.ENERGY: 355 self.energy_full_design *= battery_design_full_scale 356 357 self.charge_full = self.energy_full / voltage_nominal 358 self.charge_full_design = self.energy_full_design / \ 359 voltage_nominal 360 self.charge_now = self.energy / voltage_nominal 361 362 # TODO(shawnn): check if power_now can really be reported 363 # as negative, in the same way current_now can 364 self.current_now = math.fabs(self.power_now) / \ 365 voltage_nominal 366 367 self.energy = self.energy / BATTERY_DATA_SCALE 368 self.energy_full = self.energy_full / BATTERY_DATA_SCALE 369 self.energy_full_design = self.energy_full_design / \ 370 BATTERY_DATA_SCALE 371 372 self.voltage_min_design = self.voltage_min_design / \ 373 BATTERY_DATA_SCALE 374 self.voltage_max_design = self.voltage_max_design / \ 375 BATTERY_DATA_SCALE 376 self.voltage_now = self.voltage_now / \ 377 BATTERY_DATA_SCALE 378 voltage_nominal = voltage_nominal / \ 379 BATTERY_DATA_SCALE 380 381 self.energy_rate = self.voltage_now * self.current_now 382 383 self.remaining_time = 0 384 if self.current_now and self.energy_rate: 385 self.remaining_time = self.energy / self.energy_rate 386 387 if self.charge_full > (self.charge_full_design * 1.5): 388 raise error.TestError('Unreasonable charge_full value') 389 if self.charge_now > (self.charge_full_design * 1.5): 390 raise error.TestError('Unreasonable charge_now value') 391 392 393class LineStatDummy(DevStat): 394 """ 395 Dummy line stat for devices which don't provide power_supply related sysfs 396 interface. 397 """ 398 def __init__(self): 399 self.online = True 400 401 402 def update(self): 403 pass 404 405class LineStat(DevStat): 406 """ 407 Power line status. 408 409 Fields: 410 411 bool online: Line power online 412 """ 413 414 linepower_fields = { 415 'is_online': ['online', int], 416 'status': ['status', str] 417 } 418 419 420 def __init__(self, path=None): 421 super(LineStat, self).__init__(self.linepower_fields, path) 422 logging.debug("line path: %s", path) 423 self.update() 424 425 426 def update(self): 427 self.read_all_vals() 428 self.online = self.is_online == 1 429 430 431class SysStat(object): 432 """ 433 System power status for a given host. 434 435 Fields: 436 437 battery: A BatteryStat object. 438 linepower: A list of LineStat objects. 439 """ 440 psu_types = ['Mains', 'USB', 'USB_ACA', 'USB_C', 'USB_CDP', 'USB_DCP', 441 'USB_PD', 'USB_PD_DRP', 'Unknown'] 442 443 def __init__(self): 444 power_supply_path = '/sys/class/power_supply/*' 445 self.battery = None 446 self.linepower = [] 447 self.thermal = None 448 self.battery_path = None 449 self.linepower_path = [] 450 451 power_supplies = glob.glob(power_supply_path) 452 for path in power_supplies: 453 type_path = os.path.join(path,'type') 454 if not os.path.exists(type_path): 455 continue 456 power_type = utils.read_one_line(type_path) 457 if power_type == 'Battery': 458 scope_path = os.path.join(path,'scope') 459 if (os.path.exists(scope_path) and 460 utils.read_one_line(scope_path) == 'Device'): 461 continue 462 self.battery_path = path 463 elif power_type in self.psu_types: 464 self.linepower_path.append(path) 465 466 if not self.battery_path or not self.linepower_path: 467 logging.warning("System does not provide power sysfs interface") 468 469 self.thermal = ThermalStat() 470 if self.battery_path: 471 self.sys_low_batt_p = float(utils.system_output( 472 'check_powerd_config --low_battery_shutdown_percent', 473 ignore_status=True) or 4.0) 474 475 476 def refresh(self): 477 """ 478 Initialize device power status objects. 479 """ 480 self.linepower = [] 481 482 if self.battery_path: 483 self.battery = BatteryStat(self.battery_path) 484 485 for path in self.linepower_path: 486 self.linepower.append(LineStat(path)) 487 if not self.linepower: 488 self.linepower = [ LineStatDummy() ] 489 490 temp_str = self.thermal.get_temps() 491 if temp_str: 492 logging.info('Temperature reading: %s', temp_str) 493 else: 494 logging.error('Could not read temperature, skipping.') 495 496 497 def on_ac(self): 498 """ 499 Returns true if device is currently running from AC power. 500 """ 501 on_ac = False 502 for linepower in self.linepower: 503 on_ac |= linepower.online 504 505 # Butterfly can incorrectly report AC online for some time after 506 # unplug. Check battery discharge state to confirm. 507 if utils.get_board() == 'butterfly': 508 on_ac &= (not self.battery_discharging()) 509 return on_ac 510 511 512 def battery_charging(self): 513 """ 514 Returns true if battery is currently charging or false otherwise. 515 """ 516 for linepower in self.linepower: 517 if linepower.status == 'Charging': 518 return True 519 520 if not self.battery_path: 521 logging.warn('Unable to determine battery charge status') 522 return False 523 524 return self.battery.status.rstrip() == 'Charging' 525 526 527 def battery_discharging(self): 528 """ 529 Returns true if battery is currently discharging or false otherwise. 530 """ 531 if not self.battery_path: 532 logging.warn('Unable to determine battery discharge status') 533 return False 534 535 return self.battery.status.rstrip() == 'Discharging' 536 537 def battery_full(self): 538 """ 539 Returns true if battery is currently full or false otherwise. 540 """ 541 if not self.battery_path: 542 logging.warn('Unable to determine battery fullness status') 543 return False 544 545 return self.battery.status.rstrip() == 'Full' 546 547 548 def battery_discharge_ok_on_ac(self): 549 """Returns True if battery is ok to discharge on AC presently. 550 551 some devices cycle between charge & discharge above a certain 552 SoC. If AC is charging and SoC > 95% we can safely assume that. 553 """ 554 return self.battery_charging() and (self.percent_current_charge() > 95) 555 556 557 def percent_current_charge(self): 558 """Returns current charge compare to design capacity in percent. 559 """ 560 return self.battery.charge_now * 100 / \ 561 self.battery.charge_full_design 562 563 564 def percent_display_charge(self): 565 """Returns current display charge in percent. 566 """ 567 keyvals = parse_power_supply_info() 568 return float(keyvals['Battery']['display percentage']) 569 570 571 def assert_battery_state(self, percent_initial_charge_min): 572 """Check initial power configuration state is battery. 573 574 Args: 575 percent_initial_charge_min: float between 0 -> 1.00 of 576 percentage of battery that must be remaining. 577 None|0|False means check not performed. 578 579 Raises: 580 TestError: if one of battery assertions fails 581 """ 582 if self.on_ac(): 583 raise error.TestError( 584 'Running on AC power. Please remove AC power cable.') 585 586 percent_initial_charge = self.percent_current_charge() 587 588 if percent_initial_charge_min and percent_initial_charge < \ 589 percent_initial_charge_min: 590 raise error.TestError('Initial charge (%f) less than min (%f)' 591 % (percent_initial_charge, percent_initial_charge_min)) 592 593 def assert_battery_in_range(self, min_level, max_level): 594 """Raise a error.TestFail if the battery level is not in range.""" 595 current_percent = self.percent_display_charge() 596 if not (min_level <= current_percent <= max_level): 597 raise error.TestFail('battery must be in range [{}, {}]'.format( 598 min_level, max_level)) 599 600 def is_low_battery(self, low_batt_margin_p=2.0): 601 """Returns True if battery current charge is low 602 603 @param low_batt_margin_p: percentage of battery that would be added to 604 system low battery level. 605 """ 606 return (self.battery_discharging() and 607 self.percent_current_charge() < self.sys_low_batt_p + 608 low_batt_margin_p) 609 610 611def get_status(): 612 """ 613 Return a new power status object (SysStat). A new power status snapshot 614 for a given host can be obtained by either calling this routine again and 615 constructing a new SysStat object, or by using the refresh method of the 616 SysStat object. 617 """ 618 status = SysStat() 619 status.refresh() 620 return status 621 622 623def poll_for_charging_behavior(behavior, timeout): 624 """ 625 Wait up to |timeout| seconds for the charging behavior to become |behavior|. 626 627 @param behavior: One of 'ON_AC_AND_CHARGING', 628 'ON_AC_AND_NOT_CHARGING', 629 'NOT_ON_AC_AND_NOT_CHARGING'. 630 @param timeout: in seconds. 631 632 @raises: error.TestFail if the behavior does not match in time, or another 633 exception if something else fails along the way. 634 """ 635 ps = get_status() 636 637 def _verify_on_AC_and_charging(): 638 ps.refresh() 639 if not ps.on_ac(): 640 raise error.TestFail('Device is not on AC, but should be') 641 if not ps.battery_charging(): 642 raise error.TestFail('Device is not charging, but should be') 643 return True 644 645 def _verify_on_AC_and_not_charging(): 646 ps.refresh() 647 if not ps.on_ac(): 648 raise error.TestFail('Device is not on AC, but should be') 649 if ps.battery_charging(): 650 raise error.TestFail('Device is charging, but should not be') 651 return True 652 653 def _verify_not_on_AC_and_not_charging(): 654 ps.refresh() 655 if ps.on_ac(): 656 raise error.TestFail('Device is on AC, but should not be') 657 return True 658 659 poll_functions = { 660 'ON_AC_AND_CHARGING' : _verify_on_AC_and_charging, 661 'ON_AC_AND_NOT_CHARGING' : _verify_on_AC_and_not_charging, 662 'NOT_ON_AC_AND_NOT_CHARGING': _verify_not_on_AC_and_not_charging, 663 } 664 poll_for_condition_ex(poll_functions[behavior], 665 timeout=timeout, 666 sleep_interval=1) 667 668class AbstractStats(object): 669 """ 670 Common superclass for measurements of percentages per state over time. 671 672 Public Attributes: 673 incremental: If False, stats returned are from a single 674 _read_stats. Otherwise, stats are from the difference between 675 the current and last refresh. 676 """ 677 678 @staticmethod 679 def to_percent(stats): 680 """ 681 Turns a dict with absolute time values into a dict with percentages. 682 """ 683 total = sum(stats.itervalues()) 684 if total == 0: 685 return {k: 0 for k in stats} 686 return dict((k, v * 100.0 / total) for (k, v) in stats.iteritems()) 687 688 689 @staticmethod 690 def do_diff(new, old): 691 """ 692 Returns a dict with value deltas from two dicts with matching keys. 693 """ 694 return dict((k, new[k] - old.get(k, 0)) for k in new.iterkeys()) 695 696 697 @staticmethod 698 def format_results_percent(results, name, percent_stats): 699 """ 700 Formats autotest result keys to format: 701 percent_<name>_<key>_time 702 """ 703 for key in percent_stats: 704 results['percent_%s_%s_time' % (name, key)] = percent_stats[key] 705 706 707 @staticmethod 708 def format_results_wavg(results, name, wavg): 709 """ 710 Add an autotest result keys to format: wavg_<name> 711 """ 712 if wavg is not None: 713 results['wavg_%s' % (name)] = wavg 714 715 716 def __init__(self, name, incremental=True): 717 self.name = name 718 self.incremental = incremental 719 self._stats = self._read_stats() 720 self._first_stats = self._stats.copy() 721 722 def refresh(self): 723 """ 724 Returns dict mapping state names to percentage of time spent in them. 725 """ 726 raw_stats = result = self._read_stats() 727 if self.incremental: 728 result = self.do_diff(result, self._stats) 729 self._stats = raw_stats 730 return self.to_percent(result) 731 732 733 def _automatic_weighted_average(self): 734 """ 735 Turns a dict with absolute times (or percentages) into a weighted 736 average value. 737 """ 738 stats = self._stats 739 if self.incremental: 740 stats = self.do_diff(stats, self._first_stats) 741 742 total = sum(stats.itervalues()) 743 if total == 0: 744 return None 745 746 return sum(float(k) * v / total for k, v in stats.iteritems()) 747 748 def _supports_automatic_weighted_average(self): 749 """ 750 Override! 751 752 Returns True if stats collected can be automatically converted from 753 percent distribution to weighted average. False otherwise. 754 """ 755 return False 756 757 758 def weighted_average(self): 759 """ 760 Return weighted average calculated using the automated average method 761 (if supported) or using a custom method defined by the stat. 762 """ 763 if self._supports_automatic_weighted_average(): 764 return self._automatic_weighted_average() 765 766 return self._weighted_avg_fn() 767 768 769 def _weighted_avg_fn(self): 770 """ 771 Override! Custom weighted average function. 772 773 Returns weighted average as a single floating point value. 774 """ 775 return None 776 777 778 def _read_stats(self): 779 """ 780 Override! Reads the raw data values that shall be measured into a dict. 781 """ 782 raise NotImplementedError('Override _read_stats in the subclass!') 783 784 785CPU_BASE_PATH = '/sys/devices/system/cpu/' 786 787def count_all_cpus(): 788 """ 789 Return count of cpus on system. 790 """ 791 path = '%s/cpu[0-9]*' % CPU_BASE_PATH 792 return len(glob.glob(path)) 793 794def get_online_cpus(): 795 """ 796 Return frozenset of integer cpu numbers that are online. 797 """ 798 return frozenset(read_cpu_set('/sys/devices/system/cpu/online')) 799 800def get_cpus_filepaths_for_suffix(cpus, suffix): 801 """ 802 For each cpu in |cpus| check whether |CPU_BASE_PATH|/cpu%d/|suffix| exists. 803 Return tuple of two lists t: 804 t[0]: all cpu ids where the condition above holds 805 t[1]: all full paths where condition above holds. 806 """ 807 available_cpus = [] 808 available_paths = [] 809 for c in cpus: 810 c_file_path = os.path.join(CPU_BASE_PATH, 'cpu%d' % c, suffix) 811 if os.path.exists(c_file_path): 812 available_cpus.append(c) 813 available_paths.append(c_file_path) 814 return (available_cpus, available_paths) 815 816 817class CPUFreqStatsPState(AbstractStats): 818 """ 819 CPU Frequency statistics for intel_pstate 820 """ 821 MSR_PLATFORM_INFO = 0xce 822 MSR_IA32_MPERF = 0xe7 823 MSR_IA32_APERF = 0xe8 824 825 def __init__(self, cpus=None): 826 name = 'cpufreq' 827 if not cpus: 828 cpus = get_online_cpus() 829 self._cpus = cpus 830 831 if len(cpus) and len(cpus) < count_all_cpus(): 832 name = '%s_%s' % (name, '_'.join([str(c) for c in cpus])) 833 834 self._initial_perf = None 835 self._current_perf = None 836 837 # max_freq is supposed to be the same for all CPUs and remain 838 # constant throughout. So, we set the entry only once. 839 # Note that this is max non-turbo frequency, some CPU can run at 840 # higher turbo frequency in some condition. 841 platform_info = utils.rdmsr(self.MSR_PLATFORM_INFO) 842 mul = platform_info >> 8 & 0xff 843 bclk = utils.get_intel_bclk_khz() 844 self._max_freq = mul * bclk 845 846 super(CPUFreqStatsPState, self).__init__(name) 847 848 def _read_stats(self): 849 aperf = 0 850 mperf = 0 851 852 for cpu in self._cpus: 853 aperf += utils.rdmsr(self.MSR_IA32_APERF, cpu) 854 mperf += utils.rdmsr(self.MSR_IA32_MPERF, cpu) 855 856 if not self._initial_perf: 857 self._initial_perf = (aperf, mperf) 858 859 self._current_perf = (aperf, mperf) 860 861 return {} 862 863 def _weighted_avg_fn(self): 864 if (self._current_perf 865 and self._current_perf[1] != self._initial_perf[1]): 866 # Avg freq = max_freq * aperf_delta / mperf_delta 867 return self._max_freq * \ 868 float(self._current_perf[0] - self._initial_perf[0]) / \ 869 (self._current_perf[1] - self._initial_perf[1]) 870 return 1.0 871 872 873class CPUFreqStats(AbstractStats): 874 """ 875 CPU Frequency statistics 876 """ 877 878 def __init__(self, cpus=None): 879 name = 'cpufreq' 880 stats_suffix = 'cpufreq/stats/time_in_state' 881 key_suffix = 'cpufreq/scaling_available_frequencies' 882 if not cpus: 883 cpus = get_online_cpus() 884 _, self._file_paths = get_cpus_filepaths_for_suffix(cpus, stats_suffix) 885 886 if len(cpus) and len(cpus) < count_all_cpus(): 887 name = '%s_%s' % (name, '_'.join([str(c) for c in cpus])) 888 self._cpus = cpus 889 self._available_freqs = set() 890 891 if not self._file_paths: 892 logging.debug('time_in_state file not found') 893 894 # assumes cpufreq driver for CPU0 is the same as the others. 895 _, cpufreq_key_paths = get_cpus_filepaths_for_suffix(cpus, key_suffix) 896 for path in cpufreq_key_paths: 897 self._available_freqs |= set( 898 int(x) for x in utils.read_file(path).split()) 899 900 super(CPUFreqStats, self).__init__(name) 901 902 def _read_stats(self): 903 stats = dict((k, 0) for k in self._available_freqs) 904 for path in self._file_paths: 905 if not os.path.exists(path): 906 logging.debug('%s is not found', path) 907 continue 908 909 data = utils.read_file(path) 910 for line in data.splitlines(): 911 pair = line.split() 912 freq = int(pair[0]) 913 timeunits = int(pair[1]) 914 if freq in stats: 915 stats[freq] += timeunits 916 else: 917 stats[freq] = timeunits 918 return stats 919 920 def _supports_automatic_weighted_average(self): 921 return True 922 923 924class CPUCStateStats(AbstractStats): 925 """ 926 Base class for C-state residency statistics 927 """ 928 def __init__(self, name, non_c0_stat=''): 929 self._non_c0_stat = non_c0_stat 930 super(CPUCStateStats, self).__init__(name) 931 932 933 def to_percent(self, stats): 934 """ 935 Turns a dict with absolute time values into a dict with percentages. 936 Ignore the |non_c0_stat_name| which is aggegate stat in the total count. 937 """ 938 total = sum(v for k, v in stats.iteritems() if k != self._non_c0_stat) 939 if total == 0: 940 return {k: 0 for k in stats} 941 return {k: v * 100.0 / total for k, v in stats.iteritems()} 942 943 944class CPUIdleStats(CPUCStateStats): 945 """ 946 CPU Idle statistics (refresh() will not work with incremental=False!) 947 """ 948 # TODO (snanda): Handle changes in number of c-states due to events such 949 # as ac <-> battery transitions. 950 # TODO (snanda): Handle non-S0 states. Time spent in suspend states is 951 # currently not factored out. 952 def __init__(self, cpus=None): 953 name = 'cpuidle' 954 cpuidle_suffix = 'cpuidle' 955 if not cpus: 956 cpus = get_online_cpus() 957 cpus, self._cpus = get_cpus_filepaths_for_suffix(cpus, cpuidle_suffix) 958 if len(cpus) and len(cpus) < count_all_cpus(): 959 name = '%s_%s' % (name, '_'.join([str(c) for c in cpus])) 960 super(CPUIdleStats, self).__init__(name=name, non_c0_stat='non-C0') 961 962 963 def _read_stats(self): 964 cpuidle_stats = collections.defaultdict(int) 965 epoch_usecs = int(time.time() * 1000 * 1000) 966 for cpu in self._cpus: 967 state_path = os.path.join(cpu, 'state*') 968 states = glob.glob(state_path) 969 cpuidle_stats['C0'] += epoch_usecs 970 971 for state in states: 972 name = utils.read_one_line(os.path.join(state, 'name')) 973 latency = utils.read_one_line(os.path.join(state, 'latency')) 974 975 if not int(latency) and name == 'POLL': 976 # C0 state. Kernel stats aren't right, so calculate by 977 # subtracting all other states from total time (using epoch 978 # timer since we calculate differences in the end anyway). 979 # NOTE: Only x86 lists C0 under cpuidle, ARM does not. 980 continue 981 982 usecs = int(utils.read_one_line(os.path.join(state, 'time'))) 983 cpuidle_stats['C0'] -= usecs 984 985 if name == '<null>': 986 # Kernel race condition that can happen while a new C-state 987 # gets added (e.g. AC->battery). Don't know the 'name' of 988 # the state yet, but its 'time' would be 0 anyway. 989 logging.warning('Read name: <null>, time: %d from %s...' 990 'skipping.', usecs, state) 991 continue 992 993 cpuidle_stats[name] += usecs 994 cpuidle_stats['non-C0'] += usecs 995 996 return cpuidle_stats 997 998 999class CPUPackageStats(CPUCStateStats): 1000 """ 1001 Package C-state residency statistics for modern Intel CPUs. 1002 """ 1003 1004 ATOM = {'C2': 0x3F8, 'C4': 0x3F9, 'C6': 0x3FA} 1005 NEHALEM = {'C3': 0x3F8, 'C6': 0x3F9, 'C7': 0x3FA} 1006 SANDY_BRIDGE = {'C2': 0x60D, 'C3': 0x3F8, 'C6': 0x3F9, 'C7': 0x3FA} 1007 SILVERMONT = {'C6': 0x3FA} 1008 GOLDMONT = {'C2': 0x60D, 'C3': 0x3F8, 'C6': 0x3F9,'C10': 0x632} 1009 BROADWELL = {'C2': 0x60D, 'C3': 0x3F8, 'C6': 0x3F9, 'C7': 0x3FA, 1010 'C8': 0x630, 'C9': 0x631,'C10': 0x632} 1011 1012 def __init__(self): 1013 def _get_platform_states(): 1014 """ 1015 Helper to decide what set of microarchitecture-specific MSRs to use. 1016 1017 Returns: dict that maps C-state name to MSR address, or None. 1018 """ 1019 cpu_uarch = utils.get_intel_cpu_uarch() 1020 1021 return { 1022 # model groups pulled from Intel SDM, volume 4 1023 # Group same package cstate using the older uarch name 1024 # 1025 # TODO(harry.pan): As the keys represent microarchitecture 1026 # names, we could consider to rename the PC state groups 1027 # to avoid ambiguity. 1028 'Airmont': self.SILVERMONT, 1029 'Atom': self.ATOM, 1030 'Broadwell': self.BROADWELL, 1031 'Comet Lake': self.BROADWELL, 1032 'Goldmont': self.GOLDMONT, 1033 'Haswell': self.SANDY_BRIDGE, 1034 'Ice Lake': self.BROADWELL, 1035 'Ivy Bridge': self.SANDY_BRIDGE, 1036 'Ivy Bridge-E': self.SANDY_BRIDGE, 1037 'Kaby Lake': self.BROADWELL, 1038 'Nehalem': self.NEHALEM, 1039 'Sandy Bridge': self.SANDY_BRIDGE, 1040 'Silvermont': self.SILVERMONT, 1041 'Skylake': self.BROADWELL, 1042 'Tiger Lake': self.BROADWELL, 1043 'Tremont': self.GOLDMONT, 1044 'Westmere': self.NEHALEM, 1045 }.get(cpu_uarch, None) 1046 1047 self._platform_states = _get_platform_states() 1048 super(CPUPackageStats, self).__init__(name='cpupkg', 1049 non_c0_stat='non-C0_C1') 1050 1051 1052 def _read_stats(self): 1053 packages = set() 1054 template = '/sys/devices/system/cpu/cpu%s/topology/physical_package_id' 1055 if not self._platform_states: 1056 return {} 1057 stats = dict((state, 0) for state in self._platform_states) 1058 stats['C0_C1'] = 0 1059 stats['non-C0_C1'] = 0 1060 1061 for cpu in os.listdir('/dev/cpu'): 1062 if not os.path.exists(template % cpu): 1063 continue 1064 package = utils.read_one_line(template % cpu) 1065 if package in packages: 1066 continue 1067 packages.add(package) 1068 1069 stats['C0_C1'] += utils.rdmsr(0x10, cpu) # TSC 1070 for (state, msr) in self._platform_states.iteritems(): 1071 ticks = utils.rdmsr(msr, cpu) 1072 stats[state] += ticks 1073 stats['non-C0_C1'] += ticks 1074 stats['C0_C1'] -= ticks 1075 1076 return stats 1077 1078 1079class DevFreqStats(AbstractStats): 1080 """ 1081 Devfreq device frequency stats. 1082 """ 1083 1084 _DIR = '/sys/class/devfreq' 1085 1086 1087 def __init__(self, path): 1088 """Constructs DevFreqStats Object that track frequency stats 1089 for the path of the given Devfreq device. 1090 1091 The frequencies for devfreq devices are listed in Hz. 1092 1093 Args: 1094 path: the path to the devfreq device 1095 1096 Example: 1097 /sys/class/devfreq/dmc 1098 """ 1099 self._path = os.path.join(self._DIR, path) 1100 if not os.path.exists(self._path): 1101 raise error.TestError( 1102 'DevFreqStats: devfreq device does not exist: %s' % 1103 self._path) 1104 1105 fname = os.path.join(self._path, 'available_frequencies') 1106 af = utils.read_one_line(fname).strip() 1107 self._available_freqs = sorted(af.split(), key=int) 1108 1109 super(DevFreqStats, self).__init__(path) 1110 1111 def _read_stats(self): 1112 stats = dict((freq, 0) for freq in self._available_freqs) 1113 fname = os.path.join(self._path, 'trans_stat') 1114 1115 with open(fname) as fd: 1116 # The lines that contain the time in each frequency start on the 3rd 1117 # line, so skip the first 2 lines. The last line contains the number 1118 # of transitions, so skip that line too. 1119 # The time in each frequency is at the end of the line. 1120 freq_pattern = re.compile(r'\d+(?=:)') 1121 for line in fd.readlines()[2:-1]: 1122 freq = freq_pattern.search(line) 1123 if freq and freq.group() in self._available_freqs: 1124 stats[freq.group()] = int(line.strip().split()[-1]) 1125 1126 return stats 1127 1128 1129class GPUFreqStats(AbstractStats): 1130 """GPU Frequency statistics class. 1131 1132 TODO(tbroch): add stats for other GPUs 1133 """ 1134 1135 _MALI_DEV = '/sys/class/misc/mali0/device' 1136 _MALI_EVENTS = ['mali_dvfs:mali_dvfs_set_clock'] 1137 _MALI_TRACE_CLK_RE = \ 1138 r'kworker.* (\d+\.\d+): mali_dvfs_set_clock: frequency=(\d+)\d{6}' 1139 1140 _I915_ROOT = '/sys/kernel/debug/dri/0' 1141 _I915_EVENTS = ['i915:intel_gpu_freq_change'] 1142 _I915_CLKS_FILES = ['i915_cur_delayinfo', 'i915_frequency_info'] 1143 _I915_TRACE_CLK_RE = \ 1144 r'kworker.* (\d+\.\d+): intel_gpu_freq_change: new_freq=(\d+)' 1145 _I915_CUR_FREQ_RE = r'CAGF:\s+(\d+)MHz' 1146 _I915_MIN_FREQ_RE = r'Lowest \(RPN\) frequency:\s+(\d+)MHz' 1147 _I915_MAX_FREQ_RE = r'Max non-overclocked \(RP0\) frequency:\s+(\d+)MHz' 1148 # There are 6 frequency steps per 100 MHz 1149 _I915_FREQ_STEPS = [0, 17, 33, 50, 67, 83] 1150 1151 _gpu_type = None 1152 1153 1154 def _get_mali_freqs(self): 1155 """Get mali clocks based on kernel version. 1156 1157 For 3.8-3.18: 1158 # cat /sys/class/misc/mali0/device/clock 1159 100000000 1160 # cat /sys/class/misc/mali0/device/available_frequencies 1161 100000000 1162 160000000 1163 266000000 1164 350000000 1165 400000000 1166 450000000 1167 533000000 1168 533000000 1169 1170 For 4.4+: 1171 Tracked in DevFreqStats 1172 1173 Returns: 1174 cur_mhz: string of current GPU clock in mhz 1175 """ 1176 cur_mhz = None 1177 fqs = [] 1178 1179 fname = os.path.join(self._MALI_DEV, 'clock') 1180 if os.path.exists(fname): 1181 cur_mhz = str(int(int(utils.read_one_line(fname).strip()) / 1e6)) 1182 fname = os.path.join(self._MALI_DEV, 'available_frequencies') 1183 with open(fname) as fd: 1184 for ln in fd.readlines(): 1185 freq = int(int(ln.strip()) / 1e6) 1186 fqs.append(str(freq)) 1187 fqs.sort() 1188 1189 self._freqs = fqs 1190 return cur_mhz 1191 1192 1193 def __init__(self, incremental=False): 1194 1195 1196 min_mhz = None 1197 max_mhz = None 1198 cur_mhz = None 1199 events = None 1200 i915_path = None 1201 self._freqs = [] 1202 self._prev_sample = None 1203 self._trace = None 1204 1205 if os.path.exists(self._MALI_DEV) and \ 1206 not os.path.exists(os.path.join(self._MALI_DEV, "devfreq")): 1207 self._set_gpu_type('mali') 1208 else: 1209 for file_name in self._I915_CLKS_FILES: 1210 full_path = os.path.join(self._I915_ROOT, file_name) 1211 if os.path.exists(full_path): 1212 self._set_gpu_type('i915') 1213 i915_path = full_path 1214 break 1215 else: 1216 # We either don't know how to track GPU stats (yet) or the stats 1217 # are tracked in DevFreqStats. 1218 self._set_gpu_type(None) 1219 1220 logging.debug("gpu_type is %s", self._gpu_type) 1221 1222 if self._gpu_type is 'mali': 1223 events = self._MALI_EVENTS 1224 cur_mhz = self._get_mali_freqs() 1225 if self._freqs: 1226 min_mhz = self._freqs[0] 1227 max_mhz = self._freqs[-1] 1228 1229 elif self._gpu_type is 'i915': 1230 events = self._I915_EVENTS 1231 with open(i915_path) as fd: 1232 for ln in fd.readlines(): 1233 logging.debug("ln = %s", ln.strip()) 1234 result = re.findall(self._I915_CUR_FREQ_RE, ln) 1235 if result: 1236 cur_mhz = result[0] 1237 continue 1238 result = re.findall(self._I915_MIN_FREQ_RE, ln) 1239 if result: 1240 min_mhz = result[0] 1241 continue 1242 result = re.findall(self._I915_MAX_FREQ_RE, ln) 1243 if result: 1244 max_mhz = result[0] 1245 continue 1246 if min_mhz and max_mhz: 1247 for i in xrange(int(min_mhz), int(max_mhz) + 1): 1248 if i % 100 in self._I915_FREQ_STEPS: 1249 self._freqs.append(str(i)) 1250 1251 logging.debug("cur_mhz = %s, min_mhz = %s, max_mhz = %s", cur_mhz, 1252 min_mhz, max_mhz) 1253 1254 if cur_mhz and min_mhz and max_mhz: 1255 self._trace = kernel_trace.KernelTrace(events=events) 1256 1257 # Not all platforms or kernel versions support tracing. 1258 if not self._trace or not self._trace.is_tracing(): 1259 logging.warning("GPU frequency tracing not enabled.") 1260 else: 1261 self._prev_sample = (cur_mhz, self._trace.uptime_secs()) 1262 logging.debug("Current GPU freq: %s", cur_mhz) 1263 logging.debug("All GPU freqs: %s", self._freqs) 1264 1265 super(GPUFreqStats, self).__init__('gpufreq', incremental=incremental) 1266 1267 1268 @classmethod 1269 def _set_gpu_type(cls, gpu_type): 1270 cls._gpu_type = gpu_type 1271 1272 1273 def _read_stats(self): 1274 if self._gpu_type: 1275 return getattr(self, "_%s_read_stats" % self._gpu_type)() 1276 return {} 1277 1278 1279 def _trace_read_stats(self, regexp): 1280 """Read GPU stats from kernel trace outputs. 1281 1282 Args: 1283 regexp: regular expression to match trace output for frequency 1284 1285 Returns: 1286 Dict with key string in mhz and val float in seconds. 1287 """ 1288 if not self._prev_sample: 1289 return {} 1290 1291 stats = dict((k, 0.0) for k in self._freqs) 1292 results = self._trace.read(regexp=regexp) 1293 for (tstamp_str, freq) in results: 1294 tstamp = float(tstamp_str) 1295 1296 # do not reparse lines in trace buffer 1297 if tstamp <= self._prev_sample[1]: 1298 continue 1299 delta = tstamp - self._prev_sample[1] 1300 logging.debug("freq:%s tstamp:%f - %f delta:%f", 1301 self._prev_sample[0], 1302 tstamp, self._prev_sample[1], 1303 delta) 1304 stats[self._prev_sample[0]] += delta 1305 self._prev_sample = (freq, tstamp) 1306 1307 # Do last record 1308 delta = self._trace.uptime_secs() - self._prev_sample[1] 1309 logging.debug("freq:%s tstamp:uptime - %f delta:%f", 1310 self._prev_sample[0], 1311 self._prev_sample[1], delta) 1312 stats[self._prev_sample[0]] += delta 1313 1314 logging.debug("GPU freq percents:%s", stats) 1315 return stats 1316 1317 1318 def _mali_read_stats(self): 1319 """Read Mali GPU stats 1320 1321 Frequencies are reported in Hz, so use a regex that drops the last 6 1322 digits. 1323 1324 Output in trace looks like this: 1325 1326 kworker/u:24-5220 [000] .... 81060.329232: mali_dvfs_set_clock: frequency=400 1327 kworker/u:24-5220 [000] .... 81061.830128: mali_dvfs_set_clock: frequency=350 1328 1329 Returns: 1330 Dict with frequency in mhz as key and float in seconds for time 1331 spent at that frequency. 1332 """ 1333 return self._trace_read_stats(self._MALI_TRACE_CLK_RE) 1334 1335 1336 def _i915_read_stats(self): 1337 """Read i915 GPU stats. 1338 1339 Output looks like this (kernel >= 3.8): 1340 1341 kworker/u:0-28247 [000] .... 259391.579610: intel_gpu_freq_change: new_freq=400 1342 kworker/u:0-28247 [000] .... 259391.581797: intel_gpu_freq_change: new_freq=350 1343 1344 Returns: 1345 Dict with frequency in mhz as key and float in seconds for time 1346 spent at that frequency. 1347 """ 1348 return self._trace_read_stats(self._I915_TRACE_CLK_RE) 1349 1350 1351 def _supports_automatic_weighted_average(self): 1352 return self._gpu_type is not None 1353 1354 1355class USBSuspendStats(AbstractStats): 1356 """ 1357 USB active/suspend statistics (over all devices) 1358 """ 1359 # TODO (snanda): handle hot (un)plugging of USB devices 1360 # TODO (snanda): handle duration counters wraparound 1361 1362 def __init__(self): 1363 usb_stats_path = '/sys/bus/usb/devices/*/power' 1364 self._file_paths = glob.glob(usb_stats_path) 1365 if not self._file_paths: 1366 logging.debug('USB stats path not found') 1367 super(USBSuspendStats, self).__init__('usb') 1368 1369 1370 def _read_stats(self): 1371 usb_stats = {'active': 0, 'suspended': 0} 1372 1373 for path in self._file_paths: 1374 active_duration_path = os.path.join(path, 'active_duration') 1375 total_duration_path = os.path.join(path, 'connected_duration') 1376 1377 if not os.path.exists(active_duration_path) or \ 1378 not os.path.exists(total_duration_path): 1379 logging.debug('duration paths do not exist for: %s', path) 1380 continue 1381 1382 active = int(utils.read_file(active_duration_path)) 1383 total = int(utils.read_file(total_duration_path)) 1384 logging.debug('device %s active for %.2f%%', 1385 path, active * 100.0 / total) 1386 1387 usb_stats['active'] += active 1388 usb_stats['suspended'] += total - active 1389 1390 return usb_stats 1391 1392 1393def read_cpu_set(filename): 1394 """ 1395 Parse data of form "0,2-4,9" 1396 1397 Return a set of ints 1398 """ 1399 data = utils.read_file(filename) 1400 ret = set() 1401 1402 for entry in data.split(','): 1403 entry_data = entry.split('-') 1404 start = end = int(entry_data[0]) 1405 if len(entry_data) > 1: 1406 end = int(entry_data[1]) 1407 ret |= set(range(start, end + 1)) 1408 return ret 1409 1410 1411def get_cpu_sibling_groups(): 1412 """ 1413 Get CPU core groups in HMP systems. 1414 1415 In systems with both small core and big core, 1416 returns groups of small and big sibling groups. 1417 """ 1418 siblings_suffix = 'topology/core_siblings_list' 1419 sibling_groups = [] 1420 cpus_processed = set() 1421 cpus, sibling_file_paths = get_cpus_filepaths_for_suffix(get_online_cpus(), 1422 siblings_suffix) 1423 for c, siblings_path in zip(cpus, sibling_file_paths): 1424 if c in cpus_processed: 1425 # This cpu is already part of a sibling group. Skip. 1426 continue 1427 sibling_group = read_cpu_set(siblings_path) 1428 cpus_processed |= sibling_group 1429 sibling_groups.append(frozenset(sibling_group)) 1430 return tuple(sibling_groups) 1431 1432 1433def get_available_cpu_stats(): 1434 """Return CPUFreq/CPUIdleStats groups by big-small siblings groups.""" 1435 ret = [CPUPackageStats()] 1436 cpu_sibling_groups = get_cpu_sibling_groups() 1437 1438 cpufreq_stat_class = CPUFreqStats 1439 # assumes cpufreq driver for CPU0 is the same as the others. 1440 cpufreq_driver = '/sys/devices/system/cpu/cpu0/cpufreq/scaling_driver' 1441 if utils.read_one_line(cpufreq_driver) == 'intel_pstate': 1442 logging.debug('intel_pstate driver active') 1443 cpufreq_stat_class = CPUFreqStatsPState 1444 1445 if not cpu_sibling_groups: 1446 ret.append(cpufreq_stat_class()) 1447 ret.append(CPUIdleStats()) 1448 for cpu_group in cpu_sibling_groups: 1449 ret.append(cpufreq_stat_class(cpu_group)) 1450 ret.append(CPUIdleStats(cpu_group)) 1451 if has_rc6_support(): 1452 ret.append(GPURC6Stats()) 1453 return ret 1454 1455 1456class StatoMatic(object): 1457 """Class to aggregate and monitor a bunch of power related statistics.""" 1458 def __init__(self): 1459 self._start_uptime_secs = kernel_trace.KernelTrace.uptime_secs() 1460 self._astats = [USBSuspendStats(), GPUFreqStats(incremental=False)] 1461 self._astats.extend(get_available_cpu_stats()) 1462 if os.path.isdir(DevFreqStats._DIR): 1463 self._astats.extend([DevFreqStats(f) for f in \ 1464 os.listdir(DevFreqStats._DIR)]) 1465 1466 self._disk = DiskStateLogger() 1467 self._disk.start() 1468 1469 1470 def publish(self): 1471 """Publishes results of various statistics gathered. 1472 1473 Returns: 1474 dict with 1475 key = string 'percent_<name>_<key>_time' 1476 value = float in percent 1477 """ 1478 results = {} 1479 tot_secs = kernel_trace.KernelTrace.uptime_secs() - \ 1480 self._start_uptime_secs 1481 for stat_obj in self._astats: 1482 percent_stats = stat_obj.refresh() 1483 logging.debug("pstats = %s", percent_stats) 1484 if stat_obj.name is 'gpu': 1485 # TODO(tbroch) remove this once GPU freq stats have proved 1486 # reliable 1487 stats_secs = sum(stat_obj._stats.itervalues()) 1488 if stats_secs < (tot_secs * 0.9) or \ 1489 stats_secs > (tot_secs * 1.1): 1490 logging.warning('%s stats dont look right. Not publishing.', 1491 stat_obj.name) 1492 continue 1493 new_res = {} 1494 AbstractStats.format_results_percent(new_res, stat_obj.name, 1495 percent_stats) 1496 wavg = stat_obj.weighted_average() 1497 if wavg: 1498 AbstractStats.format_results_wavg(new_res, stat_obj.name, wavg) 1499 1500 results.update(new_res) 1501 1502 new_res = {} 1503 if self._disk.get_error(): 1504 new_res['disk_logging_error'] = str(self._disk.get_error()) 1505 else: 1506 AbstractStats.format_results_percent(new_res, 'disk', 1507 self._disk.result()) 1508 results.update(new_res) 1509 1510 return results 1511 1512 1513class PowerMeasurement(object): 1514 """Class to measure power. 1515 1516 Public attributes: 1517 domain: String name of the power domain being measured. Example is 1518 'system' for total system power 1519 1520 Public methods: 1521 refresh: Performs any power/energy sampling and calculation and returns 1522 power as float in watts. This method MUST be implemented in 1523 subclass. 1524 """ 1525 1526 def __init__(self, domain): 1527 """Constructor.""" 1528 self.domain = domain 1529 1530 1531 def refresh(self): 1532 """Performs any power/energy sampling and calculation. 1533 1534 MUST be implemented in subclass 1535 1536 Returns: 1537 float, power in watts. 1538 """ 1539 raise NotImplementedError("'refresh' method should be implemented in " 1540 "subclass.") 1541 1542 1543def parse_power_supply_info(): 1544 """Parses power_supply_info command output. 1545 1546 Command output from power_manager ( tools/power_supply_info.cc ) looks like 1547 this: 1548 1549 Device: Line Power 1550 path: /sys/class/power_supply/cros_ec-charger 1551 ... 1552 Device: Battery 1553 path: /sys/class/power_supply/sbs-9-000b 1554 ... 1555 1556 """ 1557 rv = collections.defaultdict(dict) 1558 dev = None 1559 for ln in utils.system_output('power_supply_info').splitlines(): 1560 logging.debug("psu: %s", ln) 1561 result = re.findall(r'^Device:\s+(.*)', ln) 1562 if result: 1563 dev = result[0] 1564 continue 1565 result = re.findall(r'\s+(.+):\s+(.+)', ln) 1566 if result and dev: 1567 kname = re.findall(r'(.*)\s+\(\w+\)', result[0][0]) 1568 if kname: 1569 rv[dev][kname[0]] = result[0][1] 1570 else: 1571 rv[dev][result[0][0]] = result[0][1] 1572 1573 return rv 1574 1575 1576class SystemPower(PowerMeasurement): 1577 """Class to measure system power. 1578 1579 TODO(tbroch): This class provides a subset of functionality in BatteryStat 1580 in hopes of minimizing power draw. Investigate whether its really 1581 significant and if not, deprecate. 1582 1583 Private Attributes: 1584 _voltage_file: path to retrieve voltage in uvolts 1585 _current_file: path to retrieve current in uamps 1586 """ 1587 1588 def __init__(self, battery_dir): 1589 """Constructor. 1590 1591 Args: 1592 battery_dir: path to dir containing the files to probe and log. 1593 usually something like /sys/class/power_supply/BAT0/ 1594 """ 1595 super(SystemPower, self).__init__('system') 1596 # Files to log voltage and current from 1597 self._voltage_file = os.path.join(battery_dir, 'voltage_now') 1598 self._current_file = os.path.join(battery_dir, 'current_now') 1599 1600 1601 def refresh(self): 1602 """refresh method. 1603 1604 See superclass PowerMeasurement for details. 1605 """ 1606 keyvals = parse_power_supply_info() 1607 return float(keyvals['Battery']['energy rate']) 1608 1609 1610class BatteryStateOfCharge(PowerMeasurement): 1611 """Class for logging battery state of charge.""" 1612 1613 def __init__(self): 1614 """Constructor.""" 1615 super(BatteryStateOfCharge, self).__init__('battery_soc') 1616 1617 def refresh(self): 1618 """refresh method. 1619 1620 See superclass PowerMeasurement for details. 1621 """ 1622 keyvals = parse_power_supply_info() 1623 return float(keyvals['Battery']['percentage']) 1624 1625 1626class CheckpointLogger(object): 1627 """Class to log checkpoint data. 1628 1629 Public attributes: 1630 checkpoint_data: dictionary of (tname, tlist). 1631 tname: String of testname associated with these time intervals 1632 tlist: list of tuples. Tuple contains: 1633 tstart: Float of time when subtest started 1634 tend: Float of time when subtest ended 1635 1636 Public methods: 1637 start: records a start timestamp 1638 checkpoint 1639 checkblock 1640 save_checkpoint_data 1641 load_checkpoint_data 1642 1643 Static methods: 1644 load_checkpoint_data_static 1645 1646 Private attributes: 1647 _start_time: start timestamp for checkpoint logger 1648 """ 1649 1650 def __init__(self): 1651 self.checkpoint_data = collections.defaultdict(list) 1652 self.start() 1653 1654 # If multiple MeasurementLoggers call start() on the same CheckpointLogger, 1655 # the latest one will register start time. 1656 def start(self): 1657 """Set start time for CheckpointLogger.""" 1658 self._start_time = time.time() 1659 1660 @contextlib.contextmanager 1661 def checkblock(self, tname=''): 1662 """Check point for the following block with test tname. 1663 1664 Args: 1665 tname: String of testname associated with this time interval 1666 """ 1667 start_time = time.time() 1668 yield 1669 self.checkpoint(tname, start_time) 1670 1671 def checkpoint(self, tname='', tstart=None, tend=None): 1672 """Check point the times in seconds associated with test tname. 1673 1674 Args: 1675 tname: String of testname associated with this time interval 1676 tstart: Float in seconds of when tname test started. Should be based 1677 off time.time(). If None, use start timestamp for the checkpoint 1678 logger. 1679 tend: Float in seconds of when tname test ended. Should be based 1680 off time.time(). If None, then value computed in the method. 1681 """ 1682 if not tstart and self._start_time: 1683 tstart = self._start_time 1684 if not tend: 1685 tend = time.time() 1686 self.checkpoint_data[tname].append((tstart, tend)) 1687 logging.info('Finished test "%s" between timestamps [%s, %s]', 1688 tname, tstart, tend) 1689 1690 def convert_relative(self, start_time=None): 1691 """Convert data from power_status.CheckpointLogger object to relative 1692 checkpoint data dictionary. Timestamps are converted to time in seconds 1693 since the test started. 1694 1695 Args: 1696 start_time: Float in seconds of the desired start time reference. 1697 Should be based off time.time(). If None, use start timestamp 1698 for the checkpoint logger. 1699 """ 1700 if start_time is None: 1701 start_time = self._start_time 1702 1703 checkpoint_dict = {} 1704 for tname, tlist in self.checkpoint_data.iteritems(): 1705 checkpoint_dict[tname] = [(tstart - start_time, tend - start_time) 1706 for tstart, tend in tlist] 1707 1708 return checkpoint_dict 1709 1710 def save_checkpoint_data(self, resultsdir, fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1711 """Save checkpoint data. 1712 1713 Args: 1714 resultsdir: String, directory to write results to 1715 fname: String, name of file to write results to 1716 """ 1717 fname = os.path.join(resultsdir, fname) 1718 with file(fname, 'wt') as f: 1719 json.dump(self.checkpoint_data, f, indent=4, separators=(',', ': ')) 1720 1721 def load_checkpoint_data(self, resultsdir, 1722 fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1723 """Load checkpoint data. 1724 1725 Args: 1726 resultsdir: String, directory to load results from 1727 fname: String, name of file to load results from 1728 """ 1729 fname = os.path.join(resultsdir, fname) 1730 try: 1731 with open(fname, 'r') as f: 1732 self.checkpoint_data = json.load(f, 1733 object_hook=to_checkpoint_data) 1734 # Set start time to the earliest start timestamp in file. 1735 self._start_time = min( 1736 ts_pair[0] for ts_pair in itertools.chain.from_iterable( 1737 self.checkpoint_data.itervalues())) 1738 except Exception as exc: 1739 logging.warning('Failed to load checkpoint data from json file %s, ' 1740 'see exception: %s', fname, exc) 1741 1742 @staticmethod 1743 def load_checkpoint_data_static(resultsdir, 1744 fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1745 """Load checkpoint data. 1746 1747 Args: 1748 resultsdir: String, directory to load results from 1749 fname: String, name of file to load results from 1750 """ 1751 fname = os.path.join(resultsdir, fname) 1752 with file(fname, 'r') as f: 1753 checkpoint_data = json.load(f) 1754 return checkpoint_data 1755 1756 1757def to_checkpoint_data(json_dict): 1758 """Helper method to translate json object into CheckpointLogger format. 1759 1760 Args: 1761 json_dict: a json object in the format of python dict 1762 Returns: 1763 a defaultdict in CheckpointLogger data format 1764 """ 1765 checkpoint_data = collections.defaultdict(list) 1766 for tname, tlist in json_dict.iteritems(): 1767 checkpoint_data[tname].extend([tuple(ts_pair) for ts_pair in tlist]) 1768 return checkpoint_data 1769 1770 1771def get_checkpoint_logger_from_file(resultsdir, 1772 fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1773 """Create a CheckpointLogger and load checkpoint data from file. 1774 1775 Args: 1776 resultsdir: String, directory to load results from 1777 fname: String, name of file to load results from 1778 Returns: 1779 CheckpointLogger with data from file 1780 """ 1781 checkpoint_logger = CheckpointLogger() 1782 checkpoint_logger.load_checkpoint_data(resultsdir, fname) 1783 return checkpoint_logger 1784 1785 1786class MeasurementLogger(threading.Thread): 1787 """A thread that logs measurement readings. 1788 1789 Example code snippet: 1790 my_logger = MeasurementLogger([Measurement1, Measurement2]) 1791 my_logger.start() 1792 for testname in tests: 1793 # Option 1: use checkblock 1794 with my_logger.checkblock(testname): 1795 # run the test method for testname 1796 1797 # Option 2: use checkpoint 1798 start_time = time.time() 1799 # run the test method for testname 1800 my_logger.checkpoint(testname, start_time) 1801 1802 keyvals = my_logger.calc() 1803 1804 or using CheckpointLogger: 1805 checkpoint_logger = CheckpointLogger() 1806 my_logger = MeasurementLogger([Measurement1, Measurement2], 1807 checkpoint_logger) 1808 my_logger.start() 1809 for testname in tests: 1810 # Option 1: use checkblock 1811 with checkpoint_logger.checkblock(testname): 1812 # run the test method for testname 1813 1814 # Option 2: use checkpoint 1815 start_time = time.time() 1816 # run the test method for testname 1817 checkpoint_logger.checkpoint(testname, start_time) 1818 1819 keyvals = my_logger.calc() 1820 1821 Public attributes: 1822 seconds_period: float, probing interval in seconds. 1823 readings: list of lists of floats of measurements. 1824 times: list of floats of time (since Epoch) of when measurements 1825 occurred. len(time) == len(readings). 1826 done: flag to stop the logger. 1827 domains: list of domain strings being measured 1828 1829 Public methods: 1830 run: launches the thread to gather measurements 1831 refresh: perform data samplings for every measurements 1832 calc: calculates 1833 save_results: 1834 1835 Private attributes: 1836 _measurements: list of Measurement objects to be sampled. 1837 _checkpoint_data: dictionary of (tname, tlist). 1838 tname: String of testname associated with these time intervals 1839 tlist: list of tuples. Tuple contains: 1840 tstart: Float of time when subtest started 1841 tend: Float of time when subtest ended 1842 _results: list of results tuples. Tuple contains: 1843 prefix: String of subtest 1844 mean: Float of mean in watts 1845 std: Float of standard deviation of measurements 1846 tstart: Float of time when subtest started 1847 tend: Float of time when subtest ended 1848 """ 1849 def __init__(self, measurements, seconds_period=1.0, checkpoint_logger=None): 1850 """Initialize a logger. 1851 1852 Args: 1853 _measurements: list of Measurement objects to be sampled. 1854 seconds_period: float, probing interval in seconds. Default 1.0 1855 """ 1856 threading.Thread.__init__(self) 1857 1858 self.seconds_period = seconds_period 1859 1860 self.readings = [] 1861 self.times = [] 1862 1863 self._measurements = measurements 1864 self.domains = [meas.domain for meas in self._measurements] 1865 1866 self._checkpoint_logger = \ 1867 checkpoint_logger if checkpoint_logger else CheckpointLogger() 1868 1869 self.done = False 1870 1871 def start(self): 1872 self._checkpoint_logger.start() 1873 super(MeasurementLogger, self).start() 1874 1875 def refresh(self): 1876 """Perform data samplings for every measurements. 1877 1878 Returns: 1879 list of sampled data for every measurements. 1880 """ 1881 return [meas.refresh() for meas in self._measurements] 1882 1883 def run(self): 1884 """Threads run method.""" 1885 loop = 0 1886 start_time = time.time() 1887 while(not self.done): 1888 # TODO (dbasehore): We probably need proper locking in this file 1889 # since there have been race conditions with modifying and accessing 1890 # data. 1891 self.readings.append(self.refresh()) 1892 current_time = time.time() 1893 self.times.append(current_time) 1894 loop += 1 1895 next_measurement_time = start_time + loop * self.seconds_period 1896 time.sleep(next_measurement_time - current_time) 1897 1898 @contextlib.contextmanager 1899 def checkblock(self, tname=''): 1900 """Check point for the following block with test tname. 1901 1902 Args: 1903 tname: String of testname associated with this time interval 1904 """ 1905 start_time = time.time() 1906 yield 1907 self.checkpoint(tname, start_time) 1908 1909 def checkpoint(self, tname='', tstart=None, tend=None): 1910 """Just a thin method calling the CheckpointLogger checkpoint method. 1911 1912 Args: 1913 tname: String of testname associated with this time interval 1914 tstart: Float in seconds of when tname test started. Should be based 1915 off time.time() 1916 tend: Float in seconds of when tname test ended. Should be based 1917 off time.time(). If None, then value computed in the method. 1918 """ 1919 self._checkpoint_logger.checkpoint(tname, tstart, tend) 1920 1921 # TODO(seankao): It might be useful to pull this method to CheckpointLogger, 1922 # to allow checkpoint usage without an explicit MeasurementLogger. 1923 def calc(self, mtype=None): 1924 """Calculate average measurement during each of the sub-tests. 1925 1926 Method performs the following steps: 1927 1. Signals the thread to stop running. 1928 2. Calculates mean, max, min, count on the samples for each of the 1929 measurements. 1930 3. Stores results to be written later. 1931 4. Creates keyvals for autotest publishing. 1932 1933 Args: 1934 mtype: string of measurement type. For example: 1935 pwr == power 1936 temp == temperature 1937 Returns: 1938 dict of keyvals suitable for autotest results. 1939 """ 1940 if not mtype: 1941 mtype = 'meas' 1942 1943 t = numpy.array(self.times) 1944 keyvals = {} 1945 results = [('domain', 'mean', 'std', 'duration (s)', 'start ts', 1946 'end ts')] 1947 # TODO(coconutruben): ensure that values is meaningful i.e. go through 1948 # the Loggers and add a unit attribute to each so that the raw 1949 # data is readable. 1950 raw_results = [('domain', 'values (%s)' % mtype)] 1951 1952 if not self.done: 1953 self.done = True 1954 # times 2 the sleep time in order to allow for readings as well. 1955 self.join(timeout=self.seconds_period * 2) 1956 1957 if not self._checkpoint_logger.checkpoint_data: 1958 self._checkpoint_logger.checkpoint() 1959 1960 for i, domain_readings in enumerate(zip(*self.readings)): 1961 meas = numpy.array(domain_readings) 1962 try: 1963 domain = self.domains[i] 1964 except IndexError: 1965 # TODO (evanbenn) temp logging for b:162610351 1966 logging.debug('b:162610351 IndexError: %s, %d, %d, (%s)', 1967 type(self).__name__, 1968 len(self.readings), 1969 len(self.domains), 1970 ', '.join(str(len(r)) for r in self.readings)) 1971 logging.debug('b:162610351 domains: %s', 1972 ', '.join(self.domains)) 1973 raise 1974 1975 for tname, tlist in self._checkpoint_logger.checkpoint_data.iteritems(): 1976 if tname: 1977 prefix = '%s_%s' % (tname, domain) 1978 else: 1979 prefix = domain 1980 keyvals[prefix+'_duration'] = 0 1981 # Select all readings taken between tstart and tend 1982 # timestamps in tlist. 1983 masks = [] 1984 for tstart, tend in tlist: 1985 keyvals[prefix+'_duration'] += tend - tstart 1986 # Try block just in case 1987 # code.google.com/p/chromium/issues/detail?id=318892 1988 # is not fixed. 1989 try: 1990 masks.append(numpy.logical_and(tstart < t, t < tend)) 1991 except ValueError, e: 1992 logging.debug('Error logging measurements: %s', str(e)) 1993 logging.debug('timestamps %d %s', t.len, t) 1994 logging.debug('timestamp start, end %f %f', tstart, tend) 1995 logging.debug('measurements %d %s', meas.len, meas) 1996 mask = numpy.logical_or.reduce(masks) 1997 meas_array = meas[mask] 1998 1999 # If sub-test terminated early, avoid calculating avg, std and 2000 # min 2001 if not meas_array.size: 2002 continue 2003 meas_mean = meas_array.mean() 2004 meas_std = meas_array.std() 2005 2006 # Results list can be used for pretty printing and saving as csv 2007 # TODO(seankao): new results format? 2008 result = (prefix, meas_mean, meas_std) 2009 for tstart, tend in tlist: 2010 result = result + (tend - tstart, tstart, tend) 2011 results.append(result) 2012 raw_results.append((prefix,) + tuple(meas_array.tolist())) 2013 2014 keyvals[prefix + '_' + mtype + '_avg'] = meas_mean 2015 keyvals[prefix + '_' + mtype + '_cnt'] = meas_array.size 2016 keyvals[prefix + '_' + mtype + '_max'] = meas_array.max() 2017 keyvals[prefix + '_' + mtype + '_min'] = meas_array.min() 2018 keyvals[prefix + '_' + mtype + '_std'] = meas_std 2019 self._results = results 2020 self._raw_results = raw_results 2021 return keyvals 2022 2023 2024 def save_results(self, resultsdir, fname_prefix=None): 2025 """Save computed results in a nice tab-separated format. 2026 This is useful for long manual runs. 2027 2028 Args: 2029 resultsdir: String, directory to write results to 2030 fname_prefix: prefix to use for fname. If provided outfiles 2031 will be [fname]_[raw|summary].txt 2032 """ 2033 if not fname_prefix: 2034 fname_prefix = 'meas_results_%.0f' % time.time() 2035 fname = '%s_summary.txt' % fname_prefix 2036 raw_fname = fname.replace('summary', 'raw') 2037 for name, data in [(fname, self._results), 2038 (raw_fname, self._raw_results)]: 2039 with open(os.path.join(resultsdir, name), 'wt') as f: 2040 # First row contains the headers 2041 f.write('%s\n' % '\t'.join(data[0])) 2042 for row in data[1:]: 2043 # First column name, rest are numbers. See _calc_power() 2044 fmt_row = [row[0]] + ['%.2f' % x for x in row[1:]] 2045 f.write('%s\n' % '\t'.join(fmt_row)) 2046 2047 2048class CPUStatsLogger(MeasurementLogger): 2049 """Class to measure CPU Frequency and CPU Idle Stats. 2050 2051 CPUStatsLogger derived from MeasurementLogger class but overload data 2052 samplings method because MeasurementLogger assumed that each sampling is 2053 independent to each other. However, in this case it is not. For example, 2054 CPU time spent in C0 state is measure by time not spent in all other states. 2055 2056 CPUStatsLogger also collects the weight average in each time period if the 2057 underlying AbstractStats support weight average function. 2058 2059 Private attributes: 2060 _stats: list of CPU AbstractStats objects to be sampled. 2061 _refresh_count: number of times refresh() has been called. 2062 _last_wavg: dict of wavg when refresh() was last called. 2063 """ 2064 def __init__(self, seconds_period=1.0, checkpoint_logger=None): 2065 """Initialize a CPUStatsLogger. 2066 2067 Args: 2068 seconds_period: float, probing interval in seconds. Default 1.0 2069 """ 2070 # We don't use measurements since CPU stats can't measure separately. 2071 super(CPUStatsLogger, self).__init__([], seconds_period, checkpoint_logger) 2072 2073 self._stats = get_available_cpu_stats() 2074 self._stats.append(GPUFreqStats()) 2075 self.domains = [] 2076 for stat in self._stats: 2077 self.domains.extend([stat.name + '_' + str(state_name) 2078 for state_name in stat.refresh()]) 2079 if stat.weighted_average(): 2080 self.domains.append('wavg_' + stat.name) 2081 self._refresh_count = 0 2082 self._last_wavg = collections.defaultdict(int) 2083 2084 def refresh(self): 2085 self._refresh_count += 1 2086 count = self._refresh_count 2087 ret = [] 2088 for stat in self._stats: 2089 ret.extend(stat.refresh().values()) 2090 wavg = stat.weighted_average() 2091 if wavg: 2092 if stat.incremental: 2093 last_wavg = self._last_wavg[stat.name] 2094 self._last_wavg[stat.name] = wavg 2095 # Calculate weight average in this period using current 2096 # total weight average and last total weight average. 2097 # The result will lose some precision with higher number of 2098 # count but still good enough for 11 significant digits even 2099 # if we logged the data every 1 second for a day. 2100 ret.append(wavg * count - last_wavg * (count - 1)) 2101 else: 2102 ret.append(wavg) 2103 return ret 2104 2105 def save_results(self, resultsdir, fname_prefix=None): 2106 if not fname_prefix: 2107 fname_prefix = 'cpu_results_%.0f' % time.time() 2108 super(CPUStatsLogger, self).save_results(resultsdir, fname_prefix) 2109 2110 2111class PowerLogger(MeasurementLogger): 2112 """Class to measure power consumption.""" 2113 2114 def __init__(self, measurements, seconds_period=1.0, 2115 checkpoint_logger=None): 2116 if not measurements: 2117 measurements = self.create_measurements() 2118 super(PowerLogger, self).__init__(measurements, seconds_period, 2119 checkpoint_logger) 2120 2121 def create_measurements(self): 2122 """Create power measurements based on device config.""" 2123 # Import here to avoid import loop. 2124 from autotest_lib.client.cros.power import power_rapl 2125 2126 measurements = [] 2127 status = get_status() 2128 if status.battery: 2129 measurements.append(BatteryStateOfCharge()) 2130 if status.battery_discharging(): 2131 measurements.append(SystemPower(status.battery_path)) 2132 if power_utils.has_powercap_support(): 2133 measurements += power_rapl.create_powercap() 2134 elif power_utils.has_rapl_support(): 2135 measurements += power_rapl.create_rapl() 2136 return measurements 2137 2138 def save_results(self, resultsdir, fname_prefix=None): 2139 if not fname_prefix: 2140 fname_prefix = 'power_results_%.0f' % time.time() 2141 super(PowerLogger, self).save_results(resultsdir, fname_prefix) 2142 2143 def calc(self, mtype='pwr'): 2144 return super(PowerLogger, self).calc(mtype) 2145 2146 2147class TempMeasurement(object): 2148 """Class to measure temperature. 2149 2150 Public attributes: 2151 domain: String name of the temperature domain being measured. Example is 2152 'cpu' for cpu temperature 2153 2154 Private attributes: 2155 _path: Path to temperature file to read ( in millidegrees Celsius ) 2156 2157 Public methods: 2158 refresh: Performs any temperature sampling and calculation and returns 2159 temperature as float in degrees Celsius. 2160 """ 2161 def __init__(self, domain, path): 2162 """Constructor.""" 2163 self.domain = domain 2164 self._path = path 2165 2166 2167 def refresh(self): 2168 """Performs temperature 2169 2170 Returns: 2171 float, temperature in degrees Celsius 2172 """ 2173 return int(utils.read_one_line(self._path)) / 1000. 2174 2175 2176class BatteryTempMeasurement(TempMeasurement): 2177 """Class to measure battery temperature.""" 2178 def __init__(self): 2179 super(BatteryTempMeasurement, self).__init__('battery', 'battery_temp') 2180 2181 2182 def refresh(self): 2183 """Perform battery temperature reading. 2184 2185 Returns: 2186 float, temperature in degrees Celsius. 2187 """ 2188 result = utils.run(self._path, timeout=5, ignore_status=True) 2189 return float(result.stdout) 2190 2191 2192def has_battery_temp(): 2193 """Determine if DUT can provide battery temperature. 2194 2195 Returns: 2196 Boolean, True if battery temperature available. False otherwise. 2197 """ 2198 if not power_utils.has_battery(): 2199 return False 2200 2201 btemp = BatteryTempMeasurement() 2202 try: 2203 btemp.refresh() 2204 except ValueError: 2205 return False 2206 2207 return True 2208 2209 2210class TempLogger(MeasurementLogger): 2211 """A thread that logs temperature readings in millidegrees Celsius.""" 2212 2213 def __init__(self, measurements, seconds_period=30.0, 2214 checkpoint_logger=None): 2215 if not measurements: 2216 measurements = self.create_measurements() 2217 super(TempLogger, self).__init__(measurements, seconds_period, 2218 checkpoint_logger) 2219 2220 def create_measurements(self): 2221 """Create measurements for TempLogger.""" 2222 domains = set() 2223 measurements = [] 2224 tstats = ThermalStatHwmon() 2225 for kname in tstats.fields: 2226 match = re.match(r'(\S+)_temp(\d+)_input', kname) 2227 if not match: 2228 continue 2229 domain = match.group(1) + '-t' + match.group(2) 2230 fpath = tstats.fields[kname][0] 2231 new_meas = TempMeasurement(domain, fpath) 2232 measurements.append(new_meas) 2233 domains.add(domain) 2234 2235 if has_battery_temp(): 2236 measurements.append(BatteryTempMeasurement()) 2237 2238 sysfs_paths = '/sys/class/thermal/thermal_zone*' 2239 paths = glob.glob(sysfs_paths) 2240 for path in paths: 2241 domain_path = os.path.join(path, 'type') 2242 temp_path = os.path.join(path, 'temp') 2243 2244 domain = utils.read_one_line(domain_path) 2245 2246 # Skip when thermal_zone and hwmon have same domain. 2247 if domain in domains: 2248 continue 2249 2250 domain = domain.replace(' ', '_') 2251 new_meas = TempMeasurement(domain, temp_path) 2252 measurements.append(new_meas) 2253 return measurements 2254 2255 def save_results(self, resultsdir, fname_prefix=None): 2256 if not fname_prefix: 2257 fname_prefix = 'temp_results_%.0f' % time.time() 2258 super(TempLogger, self).save_results(resultsdir, fname_prefix) 2259 2260 2261 def calc(self, mtype='temp'): 2262 return super(TempLogger, self).calc(mtype) 2263 2264 2265class VideoFpsLogger(MeasurementLogger): 2266 """Class to measure Video FPS.""" 2267 2268 @classmethod 2269 def time_until_ready(cls, tab, num_video=1, timeout=120): 2270 """Wait until tab is ready for VideoFpsLogger and return time used. 2271 2272 Keep polling Chrome tab until these 2 conditions are met one by one. 2273 - Number of <video> elements detected is equal to |num_video|. 2274 - All videos are played for at least 1 ms. 2275 2276 Args: 2277 tab: Chrome tab object 2278 num_video: number of expected <video> elements, default 1. 2279 timeout: timeout in seconds, default 120. 2280 2281 Returns: 2282 float, number of seconds elasped until condition met. 2283 2284 Raises: 2285 py_utils.TimeoutException if condition are not met by timeout. 2286 """ 2287 start_time = time.time() 2288 2289 # Number of <video> elements detected is equal to |num_video|. 2290 c = 'document.getElementsByTagName("video").length == %d' % num_video 2291 tab.WaitForJavaScriptCondition(c, timeout=timeout) 2292 2293 # All videos are played for at least 1 ms. 2294 c = ('Math.min(...Array.from(document.getElementsByTagName("video"))' 2295 '.map(v => v.currentTime)) >= 0.001') 2296 timeout_left = timeout - (time.time() - start_time) 2297 tab.WaitForJavaScriptCondition(c, timeout=timeout_left) 2298 return time.time() - start_time 2299 2300 def __init__(self, tab, seconds_period=1.0, checkpoint_logger=None): 2301 """Initialize a VideoFpsLogger. 2302 2303 Args: 2304 tab: Chrome tab object 2305 """ 2306 super(VideoFpsLogger, self).__init__([], seconds_period, 2307 checkpoint_logger) 2308 self._tab = tab 2309 names = self._tab.EvaluateJavaScript( 2310 'Array.from(document.getElementsByTagName("video")).map(v => v.id)') 2311 self.domains = [n or 'video_' + str(i) for i, n in enumerate(names)] 2312 self._last = [0] * len(names) 2313 self.refresh() 2314 2315 def refresh(self): 2316 @retry.retry(Exception, timeout_min=0.5, delay_sec=0.1) 2317 def get_fps(): 2318 return self._tab.EvaluateJavaScript( 2319 'Array.from(document.getElementsByTagName("video")).map(' 2320 'v => v.webkitDecodedFrameCount)') 2321 current = get_fps() 2322 fps = [(b - a if b >= a else b) / self.seconds_period 2323 for a, b in zip(self._last , current)] 2324 self._last = current 2325 return fps 2326 2327 def save_results(self, resultsdir, fname_prefix=None): 2328 if not fname_prefix: 2329 fname_prefix = 'video_fps_results_%.0f' % time.time() 2330 super(VideoFpsLogger, self).save_results(resultsdir, fname_prefix) 2331 2332 def calc(self, mtype='fps'): 2333 return super(VideoFpsLogger, self).calc(mtype) 2334 2335 2336def get_num_fans(): 2337 """Count how many fan DUT has. 2338 2339 Returns: 2340 Integer, number of fans that DUT has. 2341 """ 2342 res = utils.run('ectool pwmgetnumfans | grep -o [0-9]', ignore_status=True) 2343 if not res or res.exit_status != 0: 2344 return 0 2345 return int(res.stdout) 2346 2347 2348def has_fan(): 2349 """Determine if DUT has fan. 2350 2351 Returns: 2352 Boolean, True if dut has fan. False otherwise. 2353 """ 2354 return get_num_fans() > 0 2355 2356 2357class FanRpmLogger(MeasurementLogger): 2358 """Class to measure Fan RPM.""" 2359 2360 def __init__(self, seconds_period=1.0, checkpoint_logger=None): 2361 """Initialize a FanRpmLogger.""" 2362 super(FanRpmLogger, self).__init__([], seconds_period, 2363 checkpoint_logger) 2364 self.domains = ['fan_' + str(i) for i in range(get_num_fans())] 2365 self.refresh() 2366 2367 def refresh(self): 2368 @retry.retry(Exception, timeout_min=0.1, delay_sec=2) 2369 def get_fan_rpm_all(): 2370 """Some example outputs from ectool 2371 2372 * Two fan system 2373 localhost ~ # ectool pwmgetfanrpm all 2374 Fan 0 RPM: 4012 2375 Fan 1 RPM: 4009 2376 2377 * One fan but its stalled 2378 localhost ~ # ectool pwmgetfanrpm all 2379 Fan 0 stalled! 2380 """ 2381 cmd = 'ectool pwmgetfanrpm all' 2382 res = utils.run(cmd, ignore_status=True, 2383 stdout_tee=utils.TEE_TO_LOGS, 2384 stderr_tee=utils.TEE_TO_LOGS) 2385 rpm_data = [] 2386 for i, ln in enumerate(res.stdout.splitlines()): 2387 if ln.find('stalled') != -1: 2388 rpm_data.append(0) 2389 else: 2390 rpm_data.append(int(ln.split(':')[1])) 2391 return rpm_data 2392 return get_fan_rpm_all() 2393 2394 def save_results(self, resultsdir, fname_prefix=None): 2395 if not fname_prefix: 2396 fname_prefix = 'fan_rpm_results_%.0f' % time.time() 2397 super(FanRpmLogger, self).save_results(resultsdir, fname_prefix) 2398 2399 def calc(self, mtype='rpm'): 2400 return super(FanRpmLogger, self).calc(mtype) 2401 2402 2403def create_measurement_loggers(seconds_period=20.0, checkpoint_logger=None): 2404 """Create loggers for power test that is not test-specific. 2405 2406 Args: 2407 seconds_period: float, probing interval in seconds. Default 20.0 2408 checkpoint_logger: CheckpointLogger class for the loggers 2409 2410 Returns: 2411 list of loggers created. 2412 """ 2413 loggers = [ 2414 PowerLogger(None, seconds_period, checkpoint_logger), 2415 TempLogger(None, seconds_period, checkpoint_logger), 2416 CPUStatsLogger(seconds_period, checkpoint_logger), 2417 ] 2418 if has_fan(): 2419 loggers.append(FanRpmLogger(seconds_period, checkpoint_logger)) 2420 2421 return loggers 2422 2423 2424class DiskStateLogger(threading.Thread): 2425 """Records the time percentages the disk stays in its different power modes. 2426 2427 Example code snippet: 2428 mylogger = power_status.DiskStateLogger() 2429 mylogger.start() 2430 result = mylogger.result() 2431 2432 Public methods: 2433 start: Launches the thread and starts measurements. 2434 result: Stops the thread if it's still running and returns measurements. 2435 get_error: Returns the exception in _error if it exists. 2436 2437 Private functions: 2438 _get_disk_state: Returns the disk's current ATA power mode as a string. 2439 2440 Private attributes: 2441 _seconds_period: Disk polling interval in seconds. 2442 _stats: Dict that maps disk states to seconds spent in them. 2443 _running: Flag that is True as long as the logger should keep running. 2444 _time: Timestamp of last disk state reading. 2445 _device_path: The file system path of the disk's device node. 2446 _error: Contains a TestError exception if an unexpected error occured 2447 """ 2448 def __init__(self, seconds_period = 5.0, device_path = None): 2449 """Initializes a logger. 2450 2451 Args: 2452 seconds_period: Disk polling interval in seconds. Default 5.0 2453 device_path: The path of the disk's device node. Default '/dev/sda' 2454 """ 2455 threading.Thread.__init__(self) 2456 self._seconds_period = seconds_period 2457 self._device_path = device_path 2458 self._stats = {} 2459 self._running = False 2460 self._error = None 2461 2462 result = utils.system_output('rootdev -s') 2463 # TODO(tbroch) Won't work for emmc storage and will throw this error in 2464 # keyvals : 'ioctl(SG_IO) error: [Errno 22] Invalid argument' 2465 # Lets implement something complimentary for emmc 2466 if not device_path: 2467 self._device_path = \ 2468 re.sub('(sd[a-z]|mmcblk[0-9]+)p?[0-9]+', '\\1', result) 2469 logging.debug("device_path = %s", self._device_path) 2470 2471 2472 def start(self): 2473 logging.debug("inside DiskStateLogger.start") 2474 if os.path.exists(self._device_path): 2475 logging.debug("DiskStateLogger started") 2476 super(DiskStateLogger, self).start() 2477 2478 2479 def _get_disk_state(self): 2480 """Checks the disk's power mode and returns it as a string. 2481 2482 This uses the SG_IO ioctl to issue a raw SCSI command data block with 2483 the ATA-PASS-THROUGH command that allows SCSI-to-ATA translation (see 2484 T10 document 04-262r8). The ATA command issued is CHECKPOWERMODE1, 2485 which returns the device's current power mode. 2486 """ 2487 2488 def _addressof(obj): 2489 """Shortcut to return the memory address of an object as integer.""" 2490 return ctypes.cast(obj, ctypes.c_void_p).value 2491 2492 scsi_cdb = struct.pack("12B", # SCSI command data block (uint8[12]) 2493 0xa1, # SCSI opcode: ATA-PASS-THROUGH 2494 3 << 1, # protocol: Non-data 2495 1 << 5, # flags: CK_COND 2496 0, # features 2497 0, # sector count 2498 0, 0, 0, # LBA 2499 1 << 6, # flags: ATA-USING-LBA 2500 0xe5, # ATA opcode: CHECKPOWERMODE1 2501 0, # reserved 2502 0, # control (no idea what this is...) 2503 ) 2504 scsi_sense = (ctypes.c_ubyte * 32)() # SCSI sense buffer (uint8[32]) 2505 sgio_header = struct.pack("iiBBHIPPPIIiPBBBBHHiII", # see <scsi/sg.h> 2506 83, # Interface ID magic number (int32) 2507 -1, # data transfer direction: none (int32) 2508 12, # SCSI command data block length (uint8) 2509 32, # SCSI sense data block length (uint8) 2510 0, # iovec_count (not applicable?) (uint16) 2511 0, # data transfer length (uint32) 2512 0, # data block pointer 2513 _addressof(scsi_cdb), # SCSI CDB pointer 2514 _addressof(scsi_sense), # sense buffer pointer 2515 500, # timeout in milliseconds (uint32) 2516 0, # flags (uint32) 2517 0, # pack ID (unused) (int32) 2518 0, # user data pointer (unused) 2519 0, 0, 0, 0, 0, 0, 0, 0, 0, # output params 2520 ) 2521 try: 2522 with open(self._device_path, 'r') as dev: 2523 result = fcntl.ioctl(dev, 0x2285, sgio_header) 2524 except IOError, e: 2525 raise error.TestError('ioctl(SG_IO) error: %s' % str(e)) 2526 _, _, _, _, status, host_status, driver_status = \ 2527 struct.unpack("4x4xxx2x4xPPP4x4x4xPBxxxHH4x4x4x", result) 2528 if status != 0x2: # status: CHECK_CONDITION 2529 raise error.TestError('SG_IO status: %d' % status) 2530 if host_status != 0: 2531 raise error.TestError('SG_IO host status: %d' % host_status) 2532 if driver_status != 0x8: # driver status: SENSE 2533 raise error.TestError('SG_IO driver status: %d' % driver_status) 2534 2535 if scsi_sense[0] != 0x72: # resp. code: current error, descriptor format 2536 raise error.TestError('SENSE response code: %d' % scsi_sense[0]) 2537 if scsi_sense[1] != 0: # sense key: No Sense 2538 raise error.TestError('SENSE key: %d' % scsi_sense[1]) 2539 if scsi_sense[7] < 14: # additional length (ATA status is 14 - 1 bytes) 2540 raise error.TestError('ADD. SENSE too short: %d' % scsi_sense[7]) 2541 if scsi_sense[8] != 0x9: # additional descriptor type: ATA Return Status 2542 raise error.TestError('SENSE descriptor type: %d' % scsi_sense[8]) 2543 if scsi_sense[11] != 0: # errors: none 2544 raise error.TestError('ATA error code: %d' % scsi_sense[11]) 2545 2546 if scsi_sense[13] == 0x00: 2547 return 'standby' 2548 if scsi_sense[13] == 0x80: 2549 return 'idle' 2550 if scsi_sense[13] == 0xff: 2551 return 'active' 2552 return 'unknown(%d)' % scsi_sense[13] 2553 2554 2555 def run(self): 2556 """The Thread's run method.""" 2557 try: 2558 self._time = time.time() 2559 self._running = True 2560 while(self._running): 2561 time.sleep(self._seconds_period) 2562 state = self._get_disk_state() 2563 new_time = time.time() 2564 if state in self._stats: 2565 self._stats[state] += new_time - self._time 2566 else: 2567 self._stats[state] = new_time - self._time 2568 self._time = new_time 2569 except error.TestError, e: 2570 self._error = e 2571 self._running = False 2572 2573 2574 def result(self): 2575 """Stop the logger and return dict with result percentages.""" 2576 if (self._running): 2577 self._running = False 2578 self.join(self._seconds_period * 2) 2579 return AbstractStats.to_percent(self._stats) 2580 2581 2582 def get_error(self): 2583 """Returns the _error exception... please only call after result().""" 2584 return self._error 2585 2586def parse_pmc_s0ix_residency_info(): 2587 """ 2588 Parses S0ix residency for PMC based Intel systems 2589 (skylake/kabylake/apollolake), the debugfs paths might be 2590 different from platform to platform, yet the format is 2591 unified in microseconds. 2592 2593 @returns residency in seconds. 2594 @raises error.TestNAError if the debugfs file not found. 2595 """ 2596 info_path = None 2597 for node in ['/sys/kernel/debug/pmc_core/slp_s0_residency_usec', 2598 '/sys/kernel/debug/telemetry/s0ix_residency_usec']: 2599 if os.path.exists(node): 2600 info_path = node 2601 break 2602 if not info_path: 2603 raise error.TestNAError('S0ix residency file not found') 2604 return float(utils.read_one_line(info_path)) * 1e-6 2605 2606 2607class S0ixResidencyStats(object): 2608 """ 2609 Measures the S0ix residency of a given board over time. 2610 """ 2611 def __init__(self): 2612 self._initial_residency = parse_pmc_s0ix_residency_info() 2613 2614 def get_accumulated_residency_secs(self): 2615 """ 2616 @returns S0ix Residency since the class has been initialized. 2617 """ 2618 return parse_pmc_s0ix_residency_info() - self._initial_residency 2619 2620 2621class S2IdleStateStats(object): 2622 """ 2623 Usage stats of an s2idle state. 2624 """ 2625 2626 def __init__(self, usage, time): 2627 self.usage = usage 2628 self.time = time 2629 2630 2631def get_s2idle_path(cpu, state): 2632 path = os.path.join(CPU_BASE_PATH, 2633 "cpu{}/cpuidle/{}/s2idle".format(cpu, state)) 2634 if not os.path.exists(path): 2635 return None 2636 2637 return path 2638 2639 2640def get_s2idle_stats_for_state(cpu, state): 2641 """ 2642 Returns the s2idle stats for a given idle state of a CPU. 2643 """ 2644 s2idle_path = get_s2idle_path(cpu, state) 2645 2646 path = os.path.join(s2idle_path, 'usage') 2647 if not os.path.exists(path): 2648 raise error.TestFail("File not found: {}" % path) 2649 2650 usage = int(utils.read_one_line(path)) 2651 2652 path = os.path.join(s2idle_path, 'time') 2653 if not os.path.exists(path): 2654 raise error.TestFail("File not found: {}" % path) 2655 2656 time = int(utils.read_one_line(path)) 2657 2658 return S2IdleStateStats(usage, time) 2659 2660 2661def get_cpuidle_states(cpu): 2662 """ 2663 Returns the cpuidle states of a CPU. 2664 """ 2665 cpuidle_path = os.path.join(CPU_BASE_PATH, "cpu{}/cpuidle".format(cpu)) 2666 2667 pattern = os.path.join(cpuidle_path, 'state*') 2668 state_paths = glob.glob(pattern) 2669 2670 return [s.split('/')[-1] for s in state_paths] 2671 2672 2673def get_s2idle_stats_for_cpu(cpu): 2674 """ 2675 Returns the s2idle stats for a CPU. 2676 """ 2677 return {s: get_s2idle_stats_for_state(cpu, s) 2678 for s in get_cpuidle_states(cpu) 2679 if get_s2idle_path(cpu, s) is not None} 2680 2681 2682def get_s2idle_stats(): 2683 """ 2684 Returns the s2idle stats for all CPUs. 2685 """ 2686 return {cpu: get_s2idle_stats_for_cpu(cpu) for cpu in get_online_cpus()} 2687 2688 2689def get_s2idle_residency_total_usecs(): 2690 """ 2691 Get total s2idle residency time for all CPUs and states. 2692 """ 2693 total_usecs = 0 2694 2695 all_stats = get_s2idle_stats() 2696 for stats in all_stats.itervalues(): 2697 for st in stats.itervalues(): 2698 total_usecs += st.time 2699 2700 return total_usecs 2701 2702 2703class S2IdleResidencyStats(object): 2704 """ 2705 Measures the s2idle residency of a given board over time. 2706 """ 2707 2708 def __init__(self): 2709 self._initial_residency = get_s2idle_residency_total_usecs() 2710 2711 def get_accumulated_residency_usecs(self): 2712 """ 2713 @returns s2idle residency since the class has been initialized. 2714 """ 2715 return get_s2idle_residency_total_usecs() - self._initial_residency 2716 2717 2718class DMCFirmwareStats(object): 2719 """ 2720 Collect DMC firmware stats of Intel based system (SKL+), (APL+). 2721 """ 2722 # Intel CPUs only transition to DC6 from DC5. https://git.io/vppcG 2723 DC6_ENTRY_KEY = 'DC5 -> DC6 count' 2724 2725 def __init__(self): 2726 self._initial_stat = DMCFirmwareStats._parse_dmc_info() 2727 2728 def check_fw_loaded(self): 2729 """Check that DMC firmware is loaded 2730 2731 @returns boolean of DMC firmware loaded. 2732 """ 2733 return self._initial_stat['fw loaded'] 2734 2735 def is_dc6_supported(self): 2736 """Check that DMC support DC6 state.""" 2737 return self.DC6_ENTRY_KEY in self._initial_stat 2738 2739 def get_accumulated_dc6_entry(self): 2740 """Check number of DC6 state entry since the class has been initialized. 2741 2742 @returns number of DC6 state entry. 2743 """ 2744 if not self.is_dc6_supported(): 2745 return 0 2746 2747 key = self.DC6_ENTRY_KEY 2748 current_stat = DMCFirmwareStats._parse_dmc_info() 2749 return current_stat[key] - self._initial_stat[key] 2750 2751 @staticmethod 2752 def _parse_dmc_info(): 2753 """ 2754 Parses DMC firmware info for Intel based systems. 2755 2756 @returns dictionary of dmc_fw info 2757 @raises error.TestFail if the debugfs file not found. 2758 """ 2759 path = '/sys/kernel/debug/dri/0/i915_dmc_info' 2760 if not os.path.exists(path): 2761 raise error.TestFail('DMC info file not found.') 2762 2763 with open(path, 'r') as f: 2764 lines = [line.strip() for line in f.readlines()] 2765 2766 # For pre 4.16 kernel. https://git.io/vhThb 2767 if lines[0] == 'not supported': 2768 raise error.TestFail('DMC not supported.') 2769 2770 ret = dict() 2771 for line in lines: 2772 key, val = line.rsplit(': ', 1) 2773 2774 if key == 'fw loaded': 2775 val = val == 'yes' 2776 elif re.match(r'DC\d -> DC\d count', key): 2777 val = int(val) 2778 ret[key] = val 2779 return ret 2780 2781 2782class RC6ResidencyStats(object): 2783 """ 2784 Collect RC6 residency stats of Intel based system. 2785 """ 2786 def __init__(self): 2787 self._rc6_enable_checked = False 2788 self._previous_stat = self._parse_rc6_residency_info() 2789 self._accumulated_stat = 0 2790 2791 # Setup max RC6 residency count for modern chips. The table order 2792 # is in small/big-core first, follows by the uarch name. We don't 2793 # need to deal with the wraparound for devices with v4.17+ kernel 2794 # which has the commit 817cc0791823 ("drm/i915: Handle RC6 counter wrap"). 2795 cpu_uarch = utils.get_intel_cpu_uarch() 2796 self._max_counter = { 2797 # Small-core w/ GEN9 LP graphics 2798 'Airmont': 3579125, 2799 'Goldmont': 3579125, 2800 # Big-core 2801 'Broadwell': 5497558, 2802 'Haswell': 5497558, 2803 'Kaby Lake': 5497558, 2804 'Skylake': 5497558, 2805 }.get(cpu_uarch, None) 2806 2807 def get_accumulated_residency_msecs(self): 2808 """Check number of RC6 state entry since the class has been initialized. 2809 2810 @returns int of RC6 residency in milliseconds since instantiation. 2811 """ 2812 current_stat = self._parse_rc6_residency_info() 2813 2814 # The problem here is that we cannot assume the rc6_residency_ms is 2815 # monotonically increasing by current kernel i915 implementation. 2816 # 2817 # Considering different hardware has different wraparound period, 2818 # this is a mitigation plan to deal with different wraparound period 2819 # on various platforms, in order to make the test platform agnostic. 2820 # 2821 # This scarifes the accuracy of RC6 residency a bit, up on the calling 2822 # period. 2823 # 2824 # Reference: Bug 94852 - [SKL] rc6_residency_ms unreliable 2825 # (https://bugs.freedesktop.org/show_bug.cgi?id=94852) 2826 # 2827 # However for modern processors with a known overflow count, apply 2828 # constant of RC6 max counter to improve accuracy. 2829 # 2830 # Note that the max counter is bound for sysfs overflow, while the 2831 # accumulated residency here is the diff against the first reading. 2832 if current_stat < self._previous_stat: 2833 if self._max_counter is None: 2834 logging.warning('GPU: Detect rc6_residency_ms wraparound') 2835 self._accumulated_stat += current_stat 2836 else: 2837 self._accumulated_stat += current_stat + (self._max_counter - 2838 self._previous_stat) 2839 else: 2840 self._accumulated_stat += current_stat - self._previous_stat 2841 2842 self._previous_stat = current_stat 2843 return self._accumulated_stat 2844 2845 def _is_rc6_enable(self): 2846 """ 2847 Verified that RC6 is enable. 2848 2849 @returns Boolean of RC6 enable status. 2850 @raises error.TestFail if the sysfs file not found. 2851 """ 2852 path = '/sys/class/drm/card0/power/rc6_enable' 2853 if not os.path.exists(path): 2854 raise error.TestFail('RC6 enable file not found.') 2855 2856 return (int(utils.read_one_line(path)) & 0x1) == 0x1 2857 2858 def _parse_rc6_residency_info(self): 2859 """ 2860 Parses RC6 residency info for Intel based systems. 2861 2862 @returns int of RC6 residency in millisec since boot. 2863 @raises error.TestFail if the sysfs file not found or RC6 not enabled. 2864 """ 2865 if not self._rc6_enable_checked: 2866 if not self._is_rc6_enable(): 2867 raise error.TestFail('RC6 is not enabled.') 2868 self._rc6_enable_checked = True 2869 2870 path = '/sys/class/drm/card0/power/rc6_residency_ms' 2871 if not os.path.exists(path): 2872 raise error.TestFail('RC6 residency file not found.') 2873 2874 return int(utils.read_one_line(path)) 2875 2876 2877class PCHPowergatingStats(object): 2878 """ 2879 Collect PCH powergating status of intel based system. 2880 """ 2881 PMC_CORE_PATH = '/sys/kernel/debug/pmc_core/pch_ip_power_gating_status' 2882 TELEMETRY_PATH = '/sys/kernel/debug/telemetry/soc_states' 2883 2884 def __init__(self): 2885 self._stat = {} 2886 2887 def check_s0ix_requirement(self): 2888 """ 2889 Check PCH powergating status with S0ix requirement. 2890 2891 @returns list of PCH IP block name that need to be powergated for low 2892 power consumption S0ix, empty list if none. 2893 """ 2894 # PCH IP block that is on for S0ix. Ignore these IP block. 2895 S0IX_ALLOWLIST = set([ 2896 'PMC', 'OPI-DMI', 'SPI / eSPI', 'XHCI', 'xHCI', 'FUSE', 'Fuse', 2897 'PCIE0', 'NPKVRC', 'NPKVNN', 'NPK_VNN', 'PSF1', 'PSF2', 'PSF3', 2898 'PSF4', 'SBR0', 'SBR1', 'SBR2', 'SBR4', 'SBR5', 'SBR6', 'SBR7']) 2899 2900 # PCH IP block that is on/off for S0ix depend on features enabled. 2901 # Add log when these IPs state are on. 2902 S0IX_WARNLIST = set([ 2903 'HDA-PGD0', 'HDA-PGD1', 'HDA-PGD2', 'HDA-PGD3', 'LPSS', 2904 'AVSPGD1', 'AVSPGD4']) 2905 2906 # CNV device has 0x31dc as devid . 2907 if len(utils.system_output('lspci -d :31dc')) > 0: 2908 S0IX_ALLOWLIST.add('CNV') 2909 2910 # HrP2 device has 0x02f0(CML) or 0x4df0(JSL) as devid. 2911 if (len(utils.system_output('lspci -d :02f0')) > 0 or 2912 len(utils.system_output('lspci -d :4df0')) > 0): 2913 S0IX_ALLOWLIST.update(['CNVI', 'NPK_AON']) 2914 2915 on_ip = set(ip['name'] for ip in self._stat if ip['state']) 2916 on_ip -= S0IX_ALLOWLIST 2917 2918 if on_ip: 2919 on_ip_in_warn_list = on_ip & S0IX_WARNLIST 2920 if on_ip_in_warn_list: 2921 logging.warn('Found PCH IP that may be able to powergate: %s', 2922 ', '.join(on_ip_in_warn_list)) 2923 on_ip -= S0IX_WARNLIST 2924 2925 if on_ip: 2926 logging.error('Found PCH IP that need to powergate: %s', 2927 ', '.join(on_ip)) 2928 return on_ip 2929 return [] 2930 2931 def read_pch_powergating_info(self, sleep_seconds=1): 2932 """ 2933 Read PCH powergating status info for Intel based systems. 2934 2935 Intel currently shows powergating status in 2 different place in debugfs 2936 depend on which CPU platform. 2937 2938 @param sleep_seconds: sleep time to make DUT idle before read the data. 2939 2940 @raises error.TestFail if the debugfs file not found or parsing error. 2941 """ 2942 if os.path.exists(self.PMC_CORE_PATH): 2943 logging.info('Use PCH powergating info at %s', self.PMC_CORE_PATH) 2944 time.sleep(sleep_seconds) 2945 self._read_pcm_core_powergating_info() 2946 return 2947 2948 if os.path.exists(self.TELEMETRY_PATH): 2949 logging.info('Use PCH powergating info at %s', self.TELEMETRY_PATH) 2950 time.sleep(sleep_seconds) 2951 self._read_telemetry_powergating_info() 2952 return 2953 2954 raise error.TestFail('PCH powergating info file not found.') 2955 2956 def _read_pcm_core_powergating_info(self): 2957 """ 2958 read_pch_powergating_info() for Intel Core KBL+ 2959 2960 @raises error.TestFail if parsing error. 2961 """ 2962 with open(self.PMC_CORE_PATH, 'r') as f: 2963 lines = [line.strip() for line in f.readlines()] 2964 2965 # Example pattern to match: 2966 # PCH IP: 0 - PMC State: On 2967 # PCH IP: 1 - SATA State: Off 2968 pattern = r'PCH IP:\s+(?P<id>\d+)\s+' \ 2969 r'- (?P<name>.*\w)\s+' \ 2970 r'State: (?P<state>Off|On)' 2971 matcher = re.compile(pattern) 2972 ret = [] 2973 for i, line in enumerate(lines): 2974 match = matcher.match(line) 2975 if not match: 2976 raise error.TestFail('Can not parse PCH powergating info: ', 2977 line) 2978 2979 index = int(match.group('id')) 2980 if i != index: 2981 raise error.TestFail('Wrong index for PCH powergating info: ', 2982 line) 2983 2984 name = match.group('name') 2985 state = match.group('state') == 'On' 2986 2987 ret.append({'name': name, 'state': state}) 2988 self._stat = ret 2989 2990 def _read_telemetry_powergating_info(self): 2991 """ 2992 read_pch_powergating_info() for Intel Atom APL+ 2993 2994 @raises error.TestFail if parsing error. 2995 """ 2996 with open(self.TELEMETRY_PATH, 'r') as f: 2997 raw_str = f.read() 2998 2999 # Example pattern to match: 3000 # -------------------------------------- 3001 # South Complex PowerGate Status 3002 # -------------------------------------- 3003 # Device PG 3004 # LPSS 1 3005 # SPI 1 3006 # FUSE 0 3007 # 3008 # --------------------------------------- 3009 trimed_pattern = r'.*South Complex PowerGate Status\n' \ 3010 r'-+\n' \ 3011 r'Device\s+PG\n' \ 3012 r'(?P<trimmed_section>(\w+\s+[0|1]\n)+)' \ 3013 r'\n-+\n.*' 3014 trimed_match = re.match(trimed_pattern, raw_str, re.DOTALL) 3015 if not trimed_match: 3016 raise error.TestFail('Can not parse PCH powergating info: ', 3017 raw_str) 3018 trimmed_str = trimed_match.group('trimmed_section').strip() 3019 lines = [line.strip() for line in trimmed_str.split('\n')] 3020 3021 matcher = re.compile(r'(?P<name>\w+)\s+(?P<state>[0|1])') 3022 ret = [] 3023 for line in lines: 3024 match = matcher.match(line) 3025 if not match: 3026 raise error.TestFail('Can not parse PCH powergating info: %s', 3027 line) 3028 3029 name = match.group('name') 3030 state = match.group('state') == '0' # 0 means on and 1 means off 3031 3032 ret.append({'name': name, 'state': state}) 3033 self._stat = ret 3034 3035def has_rc6_support(): 3036 """ 3037 Helper to examine that RC6 is enabled with residency counter. 3038 3039 @returns Boolean of RC6 support status. 3040 """ 3041 enable_path = '/sys/class/drm/card0/power/rc6_enable' 3042 residency_path = '/sys/class/drm/card0/power/rc6_residency_ms' 3043 3044 has_rc6_enabled = os.path.exists(enable_path) 3045 has_rc6_residency = False 3046 rc6_enable_mask = 0 3047 3048 if has_rc6_enabled: 3049 # TODO (harry.pan): Some old chip has RC6P and RC6PP 3050 # in the bits[1:2]; in case of that, ideally these time 3051 # slice will fall into RC0, fix it up if required. 3052 rc6_enable_mask = int(utils.read_one_line(enable_path)) 3053 has_rc6_enabled &= (rc6_enable_mask) & 0x1 == 0x1 3054 has_rc6_residency = os.path.exists(residency_path) 3055 3056 logging.debug("GPU: RC6 residency support: %s, mask: 0x%x", 3057 {True: "yes", False: "no"} [has_rc6_enabled and has_rc6_residency], 3058 rc6_enable_mask) 3059 3060 return (has_rc6_enabled and has_rc6_residency) 3061 3062class GPURC6Stats(AbstractStats): 3063 """ 3064 GPU RC6 statistics to give ratio of RC6 and RC0 residency 3065 3066 Protected Attributes: 3067 _rc6: object of RC6ResidencyStats 3068 """ 3069 def __init__(self): 3070 self._rc6 = RC6ResidencyStats() 3071 super(GPURC6Stats, self).__init__(name='gpuidle') 3072 3073 def _read_stats(self): 3074 total = int(time.time() * 1000) 3075 msecs = self._rc6.get_accumulated_residency_msecs() 3076 stats = collections.defaultdict(int) 3077 stats['RC6'] += msecs 3078 stats['RC0'] += total - msecs 3079 logging.debug("GPU: RC6 residency: %d ms", msecs) 3080 return stats 3081