1# Lint as: python2, python3 2# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6from __future__ import absolute_import 7from __future__ import division 8from __future__ import print_function 9 10import collections 11import contextlib 12import ctypes 13import fcntl 14import glob 15import itertools 16import json 17import logging 18import math 19import numpy 20import os 21import re 22import struct 23import threading 24import time 25 26from autotest_lib.client.bin import utils 27from autotest_lib.client.common_lib import autotest_enum 28from autotest_lib.client.common_lib import error 29from autotest_lib.client.common_lib.cros import retry 30from autotest_lib.client.common_lib.utils import poll_for_condition_ex 31from autotest_lib.client.cros import kernel_trace 32from autotest_lib.client.cros.power import power_utils 33from collections import namedtuple 34from six.moves import range 35from six.moves import zip 36 37BatteryDataReportType = autotest_enum.AutotestEnum('CHARGE', 'ENERGY') 38 39# battery data reported at 1e6 scale 40BATTERY_DATA_SCALE = 1e6 41# number of times to retry reading the battery in the case of bad data 42BATTERY_RETRY_COUNT = 3 43# default filename when saving CheckpointLogger data to file 44CHECKPOINT_LOG_DEFAULT_FNAME = 'checkpoint_log.json' 45 46 47class DevStat(object): 48 """ 49 Device power status. This class implements generic status initialization 50 and parsing routines. 51 """ 52 53 def __init__(self, fields, path=None): 54 self.fields = fields 55 self.path = path 56 57 58 def reset_fields(self): 59 """ 60 Reset all class fields to None to mark their status as unknown. 61 """ 62 for field in self.fields.keys(): 63 setattr(self, field, None) 64 65 66 def read_val(self, file_name, field_type): 67 """Read a value from file. 68 """ 69 try: 70 path = file_name 71 if not file_name.startswith('/'): 72 path = os.path.join(self.path, file_name) 73 f = open(path, 'r') 74 out = f.readline().rstrip('\n') 75 val = field_type(out) 76 return val 77 78 except: 79 return field_type(0) 80 81 82 def read_all_vals(self): 83 """Read all values. 84 """ 85 for field, prop in self.fields.items(): 86 if prop[0]: 87 val = self.read_val(prop[0], prop[1]) 88 setattr(self, field, val) 89 90 def update(self): 91 """Update the DevStat. 92 93 Need to implement in subclass. 94 """ 95 pass 96 97class ThermalStatACPI(DevStat): 98 """ 99 ACPI-based thermal status. 100 101 Fields: 102 (All temperatures are in millidegrees Celsius.) 103 104 str enabled: Whether thermal zone is enabled 105 int temp: Current temperature 106 str type: Thermal zone type 107 int num_trip_points: Number of thermal trip points that activate 108 cooling devices 109 int num_points_tripped: Temperature is above this many trip points 110 str trip_point_N_type: Trip point #N's type 111 int trip_point_N_temp: Trip point #N's temperature value 112 int cdevX_trip_point: Trip point o cooling device #X (index) 113 """ 114 115 MAX_TRIP_POINTS = 20 116 117 thermal_fields = { 118 'enabled': ['enabled', str], 119 'temp': ['temp', int], 120 'type': ['type', str], 121 'num_points_tripped': ['', ''] 122 } 123 path = '/sys/class/thermal/thermal_zone*' 124 125 def __init__(self, path=None): 126 # Browse the thermal folder for trip point fields. 127 self.num_trip_points = 0 128 129 if path is None: 130 path = ThermalStatACPI.path 131 132 self.zones = {} 133 thermal_zones = glob.glob(path) 134 for (i, zone) in enumerate(thermal_zones): 135 desc_path = os.path.join(zone, 'device/description') 136 desc = '' 137 if os.path.exists(desc_path): 138 desc = utils.read_one_line(desc_path) 139 140 # If there's no description then use the type to create a description 141 if desc == '': 142 domain_path = os.path.join(zone, 'type') 143 domain = utils.read_one_line(domain_path) 144 desc = '%s%d' % (domain, i) 145 146 desc = desc.replace(' ', '_') 147 self.zones[desc] = os.path.join(zone, 'temp') 148 149 thermal_fields = glob.glob(path + '/*') 150 for file in thermal_fields: 151 field = file[len(path + '/'):] 152 if field.find('trip_point') != -1: 153 if field.find('temp'): 154 field_type = int 155 else: 156 field_type = str 157 self.thermal_fields[field] = [field, field_type] 158 159 # Count the number of trip points. 160 if field.find('_type') != -1: 161 self.num_trip_points += 1 162 163 super(ThermalStatACPI, self).__init__(self.thermal_fields, path) 164 self.update() 165 166 def update(self): 167 if not os.path.exists(self.path): 168 return 169 170 self.read_all_vals() 171 self.num_points_tripped = 0 172 173 for field in self.thermal_fields: 174 if field.find('trip_point_') != -1 and field.find('_temp') != -1 \ 175 and self.temp > self.read_val(field, int): 176 self.num_points_tripped += 1 177 logging.info('Temperature trip point #%s tripped.', \ 178 field[len('trip_point_'):field.rfind('_temp')]) 179 180 181class ThermalStatHwmon(DevStat): 182 """ 183 hwmon-based thermal status. Excludes overlaps with thermal zones by default 184 since thermal zones generally provide a more usable description. 185 186 Fields: 187 int <tname>_temp<num>_input: Current temperature in millidegrees Celsius 188 where: 189 <tname> : name of hwmon device in sysfs 190 <num> : number of temp as some hwmon devices have multiple 191 192 """ 193 path = '/sys/class/hwmon' 194 195 thermal_fields = {} 196 197 def __init__(self, rootpath=None, exclude_tz=True): 198 excluded_domains = set() 199 if exclude_tz: 200 thermal_zones = glob.glob('/sys/class/thermal/thermal_zone*') 201 for zone in thermal_zones: 202 domain_path = os.path.join(zone, 'type') 203 domain = utils.read_one_line(domain_path) 204 205 excluded_domains.add(domain) 206 207 if not rootpath: 208 rootpath = self.path 209 for subpath1 in glob.glob('%s/hwmon*' % rootpath): 210 for subpath2 in ['','device/']: 211 gpaths = glob.glob("%s/%stemp*_input" % (subpath1, subpath2)) 212 for gpath in gpaths: 213 bname = os.path.basename(gpath) 214 field_path = os.path.join(subpath1, subpath2, bname) 215 216 domain_path = os.path.join(os.path.dirname(gpath), "name") 217 domain = utils.read_one_line(domain_path) 218 219 if domain in excluded_domains: 220 continue 221 222 field_key = "%s_%s" % (domain, bname) 223 self.thermal_fields[field_key] = [field_path, int] 224 225 super(ThermalStatHwmon, self).__init__(self.thermal_fields, rootpath) 226 self.update() 227 228 def update(self): 229 if not os.path.exists(self.path): 230 return 231 232 self.read_all_vals() 233 234 def read_val(self, file_name, field_type): 235 try: 236 path = os.path.join(self.path, file_name) 237 f = open(path, 'r') 238 out = f.readline() 239 return field_type(out) 240 except: 241 return field_type(0) 242 243 244class ThermalStat(object): 245 """helper class to instantiate various thermal devices.""" 246 def __init__(self): 247 self._thermals = [] 248 self.min_temp = 999999999 249 self.max_temp = -999999999 250 251 thermal_stat_types = [(ThermalStatHwmon.path, ThermalStatHwmon), 252 (ThermalStatACPI.path, ThermalStatACPI)] 253 for thermal_glob_path, thermal_type in thermal_stat_types: 254 try: 255 thermal_path = glob.glob(thermal_glob_path)[0] 256 logging.debug('Using %s for thermal info.', thermal_path) 257 self._thermals.append(thermal_type(thermal_path)) 258 except: 259 logging.debug('Could not find thermal path %s, skipping.', 260 thermal_glob_path) 261 262 263 def get_temps(self): 264 """Get temperature readings. 265 266 Returns: 267 string of temperature readings. 268 """ 269 temp_str = '' 270 for thermal in self._thermals: 271 thermal.update() 272 for kname in thermal.fields: 273 if kname is 'temp' or kname.endswith('_input'): 274 val = getattr(thermal, kname) 275 temp_str += '%s:%d ' % (kname, val) 276 if val > self.max_temp: 277 self.max_temp = val 278 if val < self.min_temp: 279 self.min_temp = val 280 281 282 return temp_str 283 284 285class BatteryStat(DevStat): 286 """ 287 Battery status. 288 289 Fields: 290 291 float charge_full: Last full capacity reached [Ah] 292 float charge_full_design: Full capacity by design [Ah] 293 float charge_now: Remaining charge [Ah] 294 float current_now: Battery discharge rate [A] 295 int cycle_count: Battery cycle count 296 float energy: Current battery charge [Wh] 297 float energy_full: Last full capacity reached [Wh] 298 float energy_full_design: Full capacity by design [Wh] 299 float energy_rate: Battery discharge rate [W] 300 str manufacturer: Battery manufacturer 301 str model_name: Battery model name 302 float power_now: Battery discharge rate [W] 303 int present: Whether battery is present 304 float remaining_time: Remaining discharging time [h] 305 str serial_number: Battery serial number 306 str status: Charging status 307 float voltage_min_design: Minimum voltage by design [V] 308 float voltage_max_design: Maximum voltage by design [V] 309 float voltage_now: Voltage now [V] 310 """ 311 312 battery_fields = { 313 'status': ['status', str], 314 'charge_full': ['charge_full', float], 315 'charge_full_design': ['charge_full_design', float], 316 'charge_now': ['charge_now', float], 317 'current_now': ['current_now', float], 318 'cycle_count': ['cycle_count', int], 319 'voltage_min_design': ['voltage_min_design', float], 320 'voltage_max_design': ['voltage_max_design', float], 321 'voltage_now': ['voltage_now', float], 322 'energy': ['energy_now', float], 323 'energy_full': ['energy_full', float], 324 'energy_full_design': ['energy_full_design', float], 325 'power_now': ['power_now', float], 326 'present': ['present', int], 327 'manufacturer': ['manufacturer', str], 328 'model_name': ['model_name', str], 329 'serial_number': ['serial_number', str], 330 'energy_rate': ['', ''], 331 'remaining_time': ['', ''] 332 } 333 334 def __init__(self, path=None): 335 super(BatteryStat, self).__init__(self.battery_fields, path) 336 self.update() 337 338 339 def update(self): 340 for _ in range(BATTERY_RETRY_COUNT): 341 try: 342 self._read_battery() 343 return 344 except error.TestError as e: 345 logging.warning(e) 346 for field, prop in self.battery_fields.items(): 347 logging.warning(field + ': ' + repr(getattr(self, field))) 348 continue 349 raise error.TestError('Failed to read battery state') 350 351 352 def _read_battery(self): 353 self.read_all_vals() 354 355 if self.charge_full == 0 and self.energy_full != 0: 356 battery_type = BatteryDataReportType.ENERGY 357 else: 358 battery_type = BatteryDataReportType.CHARGE 359 360 if self.voltage_min_design != 0: 361 voltage_nominal = self.voltage_min_design 362 else: 363 voltage_nominal = self.voltage_now 364 365 if voltage_nominal == 0: 366 raise error.TestError('Failed to determine battery voltage') 367 368 battery_design_full_scale = 1 369 370 # Since charge data is present, calculate parameters based upon 371 # reported charge data. 372 if battery_type == BatteryDataReportType.CHARGE: 373 self.charge_full_design *= battery_design_full_scale 374 375 self.charge_full = self.charge_full / BATTERY_DATA_SCALE 376 self.charge_full_design = self.charge_full_design / \ 377 BATTERY_DATA_SCALE 378 self.charge_now = self.charge_now / BATTERY_DATA_SCALE 379 380 self.current_now = math.fabs(self.current_now) / \ 381 BATTERY_DATA_SCALE 382 383 self.energy = voltage_nominal * \ 384 self.charge_now / \ 385 BATTERY_DATA_SCALE 386 self.energy_full = voltage_nominal * \ 387 self.charge_full / \ 388 BATTERY_DATA_SCALE 389 self.energy_full_design = voltage_nominal * \ 390 self.charge_full_design / \ 391 BATTERY_DATA_SCALE 392 393 # Charge data not present, so calculate parameters based upon 394 # reported energy data. 395 elif battery_type == BatteryDataReportType.ENERGY: 396 self.energy_full_design *= battery_design_full_scale 397 398 self.charge_full = self.energy_full / voltage_nominal 399 self.charge_full_design = self.energy_full_design / \ 400 voltage_nominal 401 self.charge_now = self.energy / voltage_nominal 402 403 # TODO(shawnn): check if power_now can really be reported 404 # as negative, in the same way current_now can 405 self.current_now = math.fabs(self.power_now) / \ 406 voltage_nominal 407 408 self.energy = self.energy / BATTERY_DATA_SCALE 409 self.energy_full = self.energy_full / BATTERY_DATA_SCALE 410 self.energy_full_design = self.energy_full_design / \ 411 BATTERY_DATA_SCALE 412 413 self.voltage_min_design = self.voltage_min_design / \ 414 BATTERY_DATA_SCALE 415 self.voltage_max_design = self.voltage_max_design / \ 416 BATTERY_DATA_SCALE 417 self.voltage_now = self.voltage_now / \ 418 BATTERY_DATA_SCALE 419 voltage_nominal = voltage_nominal / \ 420 BATTERY_DATA_SCALE 421 422 self.energy_rate = self.voltage_now * self.current_now 423 424 self.remaining_time = 0 425 if self.current_now and self.energy_rate: 426 self.remaining_time = self.energy / self.energy_rate 427 428 if self.charge_full > (self.charge_full_design * 1.5): 429 raise error.TestError('Unreasonable charge_full value') 430 if self.charge_now > (self.charge_full_design * 1.5): 431 raise error.TestError('Unreasonable charge_now value') 432 433 434class LineStatPlaceholder(DevStat): 435 """ 436 Placeholder line stat for devices which don't provide power_supply related 437 sysfs interface. 438 """ 439 def __init__(self): 440 self.online = True 441 442 443 def update(self): 444 pass 445 446class LineStat(DevStat): 447 """ 448 Power line status. 449 450 Fields: 451 452 bool online: Line power online 453 """ 454 455 linepower_fields = { 456 'is_online': ['online', int], 457 'status': ['status', str] 458 } 459 460 461 def __init__(self, path=None): 462 super(LineStat, self).__init__(self.linepower_fields, path) 463 logging.debug("line path: %s", path) 464 self.update() 465 466 467 def update(self): 468 self.read_all_vals() 469 self.online = self.is_online == 1 470 471 472class SysStat(object): 473 """ 474 System power status for a given host. 475 476 Fields: 477 478 battery: A BatteryStat object. 479 linepower: A list of LineStat objects. 480 """ 481 psu_types = ['Mains', 'USB', 'USB_ACA', 'USB_C', 'USB_CDP', 'USB_DCP', 482 'USB_PD', 'USB_PD_DRP', 'Unknown'] 483 484 def __init__(self): 485 power_supply_path = '/sys/class/power_supply/*' 486 self.battery = None 487 self.linepower = [] 488 self.thermal = None 489 self.battery_path = None 490 self.linepower_path = [] 491 492 power_supplies = glob.glob(power_supply_path) 493 for path in power_supplies: 494 type_path = os.path.join(path,'type') 495 if not os.path.exists(type_path): 496 continue 497 power_type = utils.read_one_line(type_path) 498 if power_type == 'Battery': 499 scope_path = os.path.join(path,'scope') 500 if (os.path.exists(scope_path) and 501 utils.read_one_line(scope_path) == 'Device'): 502 continue 503 self.battery_path = path 504 elif power_type in self.psu_types: 505 self.linepower_path.append(path) 506 507 if not self.battery_path or not self.linepower_path: 508 logging.warning("System does not provide power sysfs interface") 509 510 self.thermal = ThermalStat() 511 if self.battery_path: 512 self.sys_low_batt_p = float(utils.system_output( 513 'check_powerd_config --low_battery_shutdown_percent', 514 ignore_status=True) or 4.0) 515 516 517 def refresh(self): 518 """ 519 Initialize device power status objects. 520 """ 521 self.linepower = [] 522 523 if self.battery_path: 524 self.battery = BatteryStat(self.battery_path) 525 526 for path in self.linepower_path: 527 self.linepower.append(LineStat(path)) 528 if not self.linepower: 529 self.linepower = [ LineStatPlaceholder() ] 530 531 temp_str = self.thermal.get_temps() 532 if temp_str: 533 logging.info('Temperature reading: %s', temp_str) 534 else: 535 logging.error('Could not read temperature, skipping.') 536 537 538 def on_ac(self): 539 """ 540 Returns true if device is currently running from AC power. 541 """ 542 on_ac = False 543 for linepower in self.linepower: 544 on_ac |= linepower.online 545 546 # Butterfly can incorrectly report AC online for some time after 547 # unplug. Check battery discharge state to confirm. 548 if utils.get_board() == 'butterfly': 549 on_ac &= (not self.battery_discharging()) 550 return on_ac 551 552 553 def battery_charging(self): 554 """ 555 Returns true if battery is currently charging or false otherwise. 556 """ 557 for linepower in self.linepower: 558 if linepower.status == 'Charging': 559 return True 560 561 if not self.battery_path: 562 logging.warning('Unable to determine battery charge status') 563 return False 564 565 return self.battery.status.rstrip() == 'Charging' 566 567 568 def battery_discharging(self): 569 """ 570 Returns true if battery is currently discharging or false otherwise. 571 """ 572 if not self.battery_path: 573 logging.warning('Unable to determine battery discharge status') 574 return False 575 576 return self.battery.status.rstrip() == 'Discharging' 577 578 def battery_full(self): 579 """ 580 Returns true if battery is currently full or false otherwise. 581 """ 582 if not self.battery_path: 583 logging.warning('Unable to determine battery fullness status') 584 return False 585 586 return self.battery.status.rstrip() == 'Full' 587 588 589 def battery_discharge_ok_on_ac(self): 590 """Returns True if battery is ok to discharge on AC presently. 591 592 some devices cycle between charge & discharge above a certain 593 SoC. If AC is charging and SoC > 95% we can safely assume that. 594 """ 595 return self.battery_charging() and (self.percent_current_charge() > 95) 596 597 598 def percent_current_charge(self): 599 """Returns current charge compare to design capacity in percent. 600 """ 601 return self.battery.charge_now * 100 / \ 602 self.battery.charge_full_design 603 604 605 def percent_display_charge(self): 606 """Returns current display charge in percent. 607 """ 608 keyvals = parse_power_supply_info() 609 return float(keyvals['Battery']['display percentage']) 610 611 612 def assert_battery_state(self, percent_initial_charge_min): 613 """Check initial power configuration state is battery. 614 615 Args: 616 percent_initial_charge_min: float between 0 -> 1.00 of 617 percentage of battery that must be remaining. 618 None|0|False means check not performed. 619 620 Raises: 621 TestError: if one of battery assertions fails 622 """ 623 if self.on_ac(): 624 raise error.TestError( 625 'Running on AC power. Please remove AC power cable.') 626 627 percent_initial_charge = self.percent_current_charge() 628 629 if percent_initial_charge_min and percent_initial_charge < \ 630 percent_initial_charge_min: 631 raise error.TestError('Initial charge (%f) less than min (%f)' 632 % (percent_initial_charge, percent_initial_charge_min)) 633 634 def assert_battery_in_range(self, min_level, max_level): 635 """Raise a error.TestFail if the battery level is not in range.""" 636 current_percent = self.percent_display_charge() 637 if not (min_level <= current_percent <= max_level): 638 raise error.TestFail('battery must be in range [{}, {}]'.format( 639 min_level, max_level)) 640 641 def is_low_battery(self, low_batt_margin_p=2.0): 642 """Returns True if battery current charge is low 643 644 @param low_batt_margin_p: percentage of battery that would be added to 645 system low battery level. 646 """ 647 return (self.battery_discharging() and 648 self.percent_current_charge() < self.sys_low_batt_p + 649 low_batt_margin_p) 650 651 652def get_status(): 653 """ 654 Return a new power status object (SysStat). A new power status snapshot 655 for a given host can be obtained by either calling this routine again and 656 constructing a new SysStat object, or by using the refresh method of the 657 SysStat object. 658 """ 659 status = SysStat() 660 status.refresh() 661 return status 662 663 664def poll_for_charging_behavior(behavior, timeout): 665 """ 666 Wait up to |timeout| seconds for the charging behavior to become |behavior|. 667 668 @param behavior: One of 'ON_AC_AND_CHARGING', 669 'ON_AC_AND_NOT_CHARGING', 670 'NOT_ON_AC_AND_NOT_CHARGING'. 671 @param timeout: in seconds. 672 673 @raises: error.TestFail if the behavior does not match in time, or another 674 exception if something else fails along the way. 675 """ 676 ps = get_status() 677 678 def _verify_on_AC_and_charging(): 679 ps.refresh() 680 if not ps.on_ac(): 681 raise error.TestFail('Device is not on AC, but should be') 682 if not ps.battery_charging(): 683 raise error.TestFail('Device is not charging, but should be') 684 return True 685 686 def _verify_on_AC_and_not_charging(): 687 ps.refresh() 688 if not ps.on_ac(): 689 raise error.TestFail('Device is not on AC, but should be') 690 if ps.battery_charging(): 691 raise error.TestFail('Device is charging, but should not be') 692 return True 693 694 def _verify_not_on_AC_and_not_charging(): 695 ps.refresh() 696 if ps.on_ac(): 697 raise error.TestFail('Device is on AC, but should not be') 698 return True 699 700 poll_functions = { 701 'ON_AC_AND_CHARGING' : _verify_on_AC_and_charging, 702 'ON_AC_AND_NOT_CHARGING' : _verify_on_AC_and_not_charging, 703 'NOT_ON_AC_AND_NOT_CHARGING': _verify_not_on_AC_and_not_charging, 704 } 705 poll_for_condition_ex(poll_functions[behavior], 706 timeout=timeout, 707 sleep_interval=1) 708 709class AbstractStats(object): 710 """ 711 Common superclass for measurements of percentages per state over time. 712 713 Public Attributes: 714 incremental: If False, stats returned are from a single 715 _read_stats. Otherwise, stats are from the difference between 716 the current and last refresh. 717 """ 718 719 @staticmethod 720 def to_percent(stats): 721 """ 722 Turns a dict with absolute time values into a dict with percentages. 723 """ 724 total = sum(stats.values()) 725 if total == 0: 726 return {k: 0 for k in stats} 727 return dict((k, v * 100.0 / total) for (k, v) in stats.items()) 728 729 730 @staticmethod 731 def do_diff(new, old): 732 """ 733 Returns a dict with value deltas from two dicts with matching keys. 734 """ 735 return dict((k, new[k] - old.get(k, 0)) for k in new.keys()) 736 737 738 @staticmethod 739 def format_results_percent(results, name, percent_stats): 740 """ 741 Formats autotest result keys to format: 742 percent_<name>_<key>_time 743 """ 744 for key in percent_stats: 745 results['percent_%s_%s_time' % (name, key)] = percent_stats[key] 746 747 748 @staticmethod 749 def format_results_wavg(results, name, wavg): 750 """ 751 Add an autotest result keys to format: wavg_<name> 752 """ 753 if wavg is not None: 754 results['wavg_%s' % (name)] = wavg 755 756 757 def __init__(self, name, incremental=True): 758 self.name = name 759 self.incremental = incremental 760 self._stats = self._read_stats() 761 self._first_stats = self._stats.copy() 762 763 def refresh(self): 764 """ 765 Returns dict mapping state names to percentage of time spent in them. 766 """ 767 raw_stats = result = self._read_stats() 768 if self.incremental: 769 result = self.do_diff(result, self._stats) 770 self._stats = raw_stats 771 return self.to_percent(result) 772 773 774 def _automatic_weighted_average(self): 775 """ 776 Turns a dict with absolute times (or percentages) into a weighted 777 average value. 778 """ 779 stats = self._stats 780 if self.incremental: 781 stats = self.do_diff(stats, self._first_stats) 782 783 total = sum(stats.values()) 784 if total == 0: 785 return None 786 787 return sum(float(k) * v / total for k, v in stats.items()) 788 789 def _supports_automatic_weighted_average(self): 790 """ 791 Override! 792 793 Returns True if stats collected can be automatically converted from 794 percent distribution to weighted average. False otherwise. 795 """ 796 return False 797 798 799 def weighted_average(self): 800 """ 801 Return weighted average calculated using the automated average method 802 (if supported) or using a custom method defined by the stat. 803 """ 804 if self._supports_automatic_weighted_average(): 805 return self._automatic_weighted_average() 806 807 return self._weighted_avg_fn() 808 809 810 def _weighted_avg_fn(self): 811 """ 812 Override! Custom weighted average function. 813 814 Returns weighted average as a single floating point value. 815 """ 816 return None 817 818 819 def _read_stats(self): 820 """ 821 Override! Reads the raw data values that shall be measured into a dict. 822 """ 823 raise NotImplementedError('Override _read_stats in the subclass!') 824 825 826CPU_BASE_PATH = '/sys/devices/system/cpu/' 827 828def count_all_cpus(): 829 """ 830 Return count of cpus on system. 831 """ 832 path = '%s/cpu[0-9]*' % CPU_BASE_PATH 833 return len(glob.glob(path)) 834 835def get_online_cpus(): 836 """ 837 Return frozenset of integer cpu numbers that are online. 838 """ 839 return frozenset(read_cpu_set('/sys/devices/system/cpu/online')) 840 841def get_cpus_filepaths_for_suffix(cpus, suffix): 842 """ 843 For each cpu in |cpus| check whether |CPU_BASE_PATH|/cpu%d/|suffix| exists. 844 Return tuple of two lists t: 845 t[0]: all cpu ids where the condition above holds 846 t[1]: all full paths where condition above holds. 847 """ 848 available_cpus = [] 849 available_paths = [] 850 for c in cpus: 851 c_file_path = os.path.join(CPU_BASE_PATH, 'cpu%d' % c, suffix) 852 if os.path.exists(c_file_path): 853 available_cpus.append(c) 854 available_paths.append(c_file_path) 855 return (available_cpus, available_paths) 856 857 858class CPUFreqStatsPState(AbstractStats): 859 """ 860 CPU Frequency statistics for intel_pstate 861 """ 862 MSR_PLATFORM_INFO = 0xce 863 MSR_IA32_MPERF = 0xe7 864 MSR_IA32_APERF = 0xe8 865 866 def __init__(self, cpus=None): 867 name = 'cpufreq' 868 if not cpus: 869 cpus = get_online_cpus() 870 self._cpus = cpus 871 872 if len(cpus) and len(cpus) < count_all_cpus(): 873 name = '%s_%s' % (name, '_'.join([str(c) for c in cpus])) 874 875 self._initial_perf = None 876 self._current_perf = None 877 878 # max_freq is supposed to be the same for all CPUs and remain 879 # constant throughout. So, we set the entry only once. 880 # Note that this is max non-turbo frequency, some CPU can run at 881 # higher turbo frequency in some condition. 882 platform_info = utils.rdmsr(self.MSR_PLATFORM_INFO) 883 mul = platform_info >> 8 & 0xff 884 bclk = utils.get_intel_bclk_khz() 885 self._max_freq = mul * bclk 886 887 super(CPUFreqStatsPState, self).__init__(name) 888 889 def _read_stats(self): 890 aperf = 0 891 mperf = 0 892 893 for cpu in self._cpus: 894 aperf += utils.rdmsr(self.MSR_IA32_APERF, cpu) 895 mperf += utils.rdmsr(self.MSR_IA32_MPERF, cpu) 896 897 if not self._initial_perf: 898 self._initial_perf = (aperf, mperf) 899 900 self._current_perf = (aperf, mperf) 901 902 return {} 903 904 def _weighted_avg_fn(self): 905 if (self._current_perf 906 and self._current_perf[1] != self._initial_perf[1]): 907 # Avg freq = max_freq * aperf_delta / mperf_delta 908 return self._max_freq * \ 909 float(self._current_perf[0] - self._initial_perf[0]) / \ 910 (self._current_perf[1] - self._initial_perf[1]) 911 return 1.0 912 913 914class CPUFreqStats(AbstractStats): 915 """ 916 CPU Frequency statistics 917 """ 918 919 def __init__(self, cpus=None): 920 name = 'cpufreq' 921 stats_suffix = 'cpufreq/stats/time_in_state' 922 key_suffix = 'cpufreq/scaling_available_frequencies' 923 if not cpus: 924 cpus = get_online_cpus() 925 _, self._file_paths = get_cpus_filepaths_for_suffix(cpus, stats_suffix) 926 927 if len(cpus) and len(cpus) < count_all_cpus(): 928 name = '%s_%s' % (name, '_'.join([str(c) for c in cpus])) 929 self._cpus = cpus 930 self._available_freqs = set() 931 932 if not self._file_paths: 933 logging.debug('time_in_state file not found') 934 935 # assumes cpufreq driver for CPU0 is the same as the others. 936 _, cpufreq_key_paths = get_cpus_filepaths_for_suffix(cpus, key_suffix) 937 for path in cpufreq_key_paths: 938 self._available_freqs |= set( 939 int(x) for x in utils.read_file(path).split()) 940 941 super(CPUFreqStats, self).__init__(name) 942 943 def _read_stats(self): 944 stats = dict((k, 0) for k in self._available_freqs) 945 for path in self._file_paths: 946 if not os.path.exists(path): 947 logging.debug('%s is not found', path) 948 continue 949 950 data = utils.read_file(path) 951 for line in data.splitlines(): 952 pair = line.split() 953 freq = int(pair[0]) 954 timeunits = int(pair[1]) 955 if freq in stats: 956 stats[freq] += timeunits 957 else: 958 stats[freq] = timeunits 959 return stats 960 961 def _supports_automatic_weighted_average(self): 962 return True 963 964 965class CPUCStateStats(AbstractStats): 966 """ 967 Base class for C-state residency statistics 968 """ 969 def __init__(self, name, non_c0_stat=''): 970 self._non_c0_stat = non_c0_stat 971 super(CPUCStateStats, self).__init__(name) 972 973 974 def to_percent(self, stats): 975 """ 976 Turns a dict with absolute time values into a dict with percentages. 977 Ignore the |non_c0_stat_name| which is aggegate stat in the total count. 978 """ 979 total = sum(v for k, v in stats.items() if k != self._non_c0_stat) 980 if total == 0: 981 return {k: 0 for k in stats} 982 return {k: v * 100.0 / total for k, v in stats.items()} 983 984 985class CPUIdleStats(CPUCStateStats): 986 """ 987 CPU Idle statistics (refresh() will not work with incremental=False!) 988 """ 989 # TODO (snanda): Handle changes in number of c-states due to events such 990 # as ac <-> battery transitions. 991 # TODO (snanda): Handle non-S0 states. Time spent in suspend states is 992 # currently not factored out. 993 def __init__(self, cpus=None): 994 name = 'cpuidle' 995 cpuidle_suffix = 'cpuidle' 996 if not cpus: 997 cpus = get_online_cpus() 998 cpus, self._cpus = get_cpus_filepaths_for_suffix(cpus, cpuidle_suffix) 999 if len(cpus) and len(cpus) < count_all_cpus(): 1000 name = '%s_%s' % (name, '_'.join([str(c) for c in cpus])) 1001 super(CPUIdleStats, self).__init__(name=name, non_c0_stat='non-C0') 1002 1003 1004 def _read_stats(self): 1005 cpuidle_stats = collections.defaultdict(int) 1006 epoch_usecs = int(time.time() * 1000 * 1000) 1007 for cpu in self._cpus: 1008 state_path = os.path.join(cpu, 'state*') 1009 states = glob.glob(state_path) 1010 cpuidle_stats['C0'] += epoch_usecs 1011 1012 for state in states: 1013 name = utils.read_one_line(os.path.join(state, 'name')) 1014 latency = utils.read_one_line(os.path.join(state, 'latency')) 1015 1016 if not int(latency) and name == 'POLL': 1017 # C0 state. Kernel stats aren't right, so calculate by 1018 # subtracting all other states from total time (using epoch 1019 # timer since we calculate differences in the end anyway). 1020 # NOTE: Only x86 lists C0 under cpuidle, ARM does not. 1021 continue 1022 1023 usecs = int(utils.read_one_line(os.path.join(state, 'time'))) 1024 cpuidle_stats['C0'] -= usecs 1025 1026 if name == '<null>': 1027 # Kernel race condition that can happen while a new C-state 1028 # gets added (e.g. AC->battery). Don't know the 'name' of 1029 # the state yet, but its 'time' would be 0 anyway. 1030 logging.warning('Read name: <null>, time: %d from %s...' 1031 'skipping.', usecs, state) 1032 continue 1033 1034 cpuidle_stats[name] += usecs 1035 cpuidle_stats['non-C0'] += usecs 1036 1037 return cpuidle_stats 1038 1039 1040class CPUPackageStats(CPUCStateStats): 1041 """ 1042 Package C-state residency statistics for modern Intel CPUs. 1043 """ 1044 1045 ATOM = {'C2': 0x3F8, 'C4': 0x3F9, 'C6': 0x3FA} 1046 NEHALEM = {'C3': 0x3F8, 'C6': 0x3F9, 'C7': 0x3FA} 1047 SANDY_BRIDGE = {'C2': 0x60D, 'C3': 0x3F8, 'C6': 0x3F9, 'C7': 0x3FA} 1048 SILVERMONT = {'C6': 0x3FA} 1049 GOLDMONT = {'C2': 0x60D, 'C3': 0x3F8, 'C6': 0x3F9,'C10': 0x632} 1050 BROADWELL = {'C2': 0x60D, 'C3': 0x3F8, 'C6': 0x3F9, 'C7': 0x3FA, 1051 'C8': 0x630, 'C9': 0x631,'C10': 0x632} 1052 1053 def __init__(self): 1054 def _get_platform_states(): 1055 """ 1056 Helper to decide what set of microarchitecture-specific MSRs to use. 1057 1058 Returns: dict that maps C-state name to MSR address, or None. 1059 """ 1060 cpu_uarch = utils.get_intel_cpu_uarch() 1061 1062 return { 1063 # model groups pulled from Intel SDM, volume 4 1064 # Group same package cstate using the older uarch name 1065 # 1066 # TODO(harry.pan): As the keys represent microarchitecture 1067 # names, we could consider to rename the PC state groups 1068 # to avoid ambiguity. 1069 'Airmont': self.SILVERMONT, 1070 'Atom': self.ATOM, 1071 'Broadwell': self.BROADWELL, 1072 'Comet Lake': self.BROADWELL, 1073 'Goldmont': self.GOLDMONT, 1074 'Haswell': self.SANDY_BRIDGE, 1075 'Ice Lake': self.BROADWELL, 1076 'Ivy Bridge': self.SANDY_BRIDGE, 1077 'Ivy Bridge-E': self.SANDY_BRIDGE, 1078 'Kaby Lake': self.BROADWELL, 1079 'Nehalem': self.NEHALEM, 1080 'Sandy Bridge': self.SANDY_BRIDGE, 1081 'Silvermont': self.SILVERMONT, 1082 'Skylake': self.BROADWELL, 1083 'Tiger Lake': self.BROADWELL, 1084 'Alder Lake': self.BROADWELL, 1085 'Tremont': self.GOLDMONT, 1086 'Westmere': self.NEHALEM, 1087 }.get(cpu_uarch, None) 1088 1089 self._platform_states = _get_platform_states() 1090 super(CPUPackageStats, self).__init__(name='cpupkg', 1091 non_c0_stat='non-C0_C1') 1092 1093 1094 def _read_stats(self): 1095 packages = set() 1096 template = '/sys/devices/system/cpu/cpu%s/topology/physical_package_id' 1097 if not self._platform_states: 1098 return {} 1099 stats = dict((state, 0) for state in self._platform_states) 1100 stats['C0_C1'] = 0 1101 stats['non-C0_C1'] = 0 1102 1103 for cpu in os.listdir('/dev/cpu'): 1104 if not os.path.exists(template % cpu): 1105 continue 1106 package = utils.read_one_line(template % cpu) 1107 if package in packages: 1108 continue 1109 packages.add(package) 1110 1111 stats['C0_C1'] += utils.rdmsr(0x10, cpu) # TSC 1112 for (state, msr) in self._platform_states.items(): 1113 ticks = utils.rdmsr(msr, cpu) 1114 stats[state] += ticks 1115 stats['non-C0_C1'] += ticks 1116 stats['C0_C1'] -= ticks 1117 1118 return stats 1119 1120 1121class DevFreqStats(AbstractStats): 1122 """ 1123 Devfreq device frequency stats. 1124 """ 1125 1126 _DIR = '/sys/class/devfreq' 1127 1128 1129 def __init__(self, path): 1130 """Constructs DevFreqStats Object that track frequency stats 1131 for the path of the given Devfreq device. 1132 1133 The frequencies for devfreq devices are listed in Hz. 1134 1135 Args: 1136 path: the path to the devfreq device 1137 1138 Example: 1139 /sys/class/devfreq/dmc 1140 """ 1141 self._path = os.path.join(self._DIR, path) 1142 if not os.path.exists(self._path): 1143 raise error.TestError( 1144 'DevFreqStats: devfreq device does not exist: %s' % 1145 self._path) 1146 1147 fname = os.path.join(self._path, 'available_frequencies') 1148 af = utils.read_one_line(fname).strip() 1149 self._available_freqs = sorted(af.split(), key=int) 1150 1151 super(DevFreqStats, self).__init__(path) 1152 1153 def _read_stats(self): 1154 stats = dict((freq, 0) for freq in self._available_freqs) 1155 fname = os.path.join(self._path, 'trans_stat') 1156 1157 with open(fname) as fd: 1158 # The lines that contain the time in each frequency start on the 3rd 1159 # line, so skip the first 2 lines. The last line contains the number 1160 # of transitions, so skip that line too. 1161 # The time in each frequency is at the end of the line. 1162 freq_pattern = re.compile(r'\d+(?=:)') 1163 for line in fd.readlines()[2:-1]: 1164 freq = freq_pattern.search(line) 1165 if freq and freq.group() in self._available_freqs: 1166 stats[freq.group()] = int(line.strip().split()[-1]) 1167 1168 return stats 1169 1170 1171class GPUFreqStats(AbstractStats): 1172 """GPU Frequency statistics class. 1173 1174 TODO(tbroch): add stats for other GPUs 1175 """ 1176 1177 _MALI_DEV = '/sys/class/misc/mali0/device' 1178 _MALI_EVENTS = ['mali_dvfs:mali_dvfs_set_clock'] 1179 _MALI_TRACE_CLK_RE = \ 1180 r'kworker.* (\d+\.\d+): mali_dvfs_set_clock: frequency=(\d+)\d{6}' 1181 1182 _I915_ROOT = '/sys/kernel/debug/dri/0' 1183 _I915_EVENTS = ['i915:intel_gpu_freq_change'] 1184 _I915_CLKS_FILES = ['i915_cur_delayinfo', 'i915_frequency_info'] 1185 _I915_TRACE_CLK_RE = \ 1186 r'kworker.* (\d+\.\d+): intel_gpu_freq_change: new_freq=(\d+)' 1187 _I915_CUR_FREQ_RE = r'CAGF:\s+(\d+)MHz' 1188 _I915_MIN_FREQ_RE = r'Lowest \(RPN\) frequency:\s+(\d+)MHz' 1189 _I915_MAX_FREQ_RE = r'Max non-overclocked \(RP0\) frequency:\s+(\d+)MHz' 1190 # There are 6 frequency steps per 100 MHz 1191 _I915_FREQ_STEPS = [0, 17, 33, 50, 67, 83] 1192 1193 _gpu_type = None 1194 1195 1196 def _get_mali_freqs(self): 1197 """Get mali clocks based on kernel version. 1198 1199 For 3.8-3.18: 1200 # cat /sys/class/misc/mali0/device/clock 1201 100000000 1202 # cat /sys/class/misc/mali0/device/available_frequencies 1203 100000000 1204 160000000 1205 266000000 1206 350000000 1207 400000000 1208 450000000 1209 533000000 1210 533000000 1211 1212 For 4.4+: 1213 Tracked in DevFreqStats 1214 1215 Returns: 1216 cur_mhz: string of current GPU clock in mhz 1217 """ 1218 cur_mhz = None 1219 fqs = [] 1220 1221 fname = os.path.join(self._MALI_DEV, 'clock') 1222 if os.path.exists(fname): 1223 cur_mhz = str(int(int(utils.read_one_line(fname).strip()) / 1e6)) 1224 fname = os.path.join(self._MALI_DEV, 'available_frequencies') 1225 with open(fname) as fd: 1226 for ln in fd.readlines(): 1227 freq = int(int(ln.strip()) / 1e6) 1228 fqs.append(str(freq)) 1229 fqs.sort() 1230 1231 self._freqs = fqs 1232 return cur_mhz 1233 1234 1235 def __init__(self, incremental=False): 1236 1237 1238 min_mhz = None 1239 max_mhz = None 1240 cur_mhz = None 1241 events = None 1242 i915_path = None 1243 self._freqs = [] 1244 self._prev_sample = None 1245 self._trace = None 1246 1247 if os.path.exists(self._MALI_DEV) and \ 1248 not os.path.exists(os.path.join(self._MALI_DEV, "devfreq")): 1249 self._set_gpu_type('mali') 1250 else: 1251 for file_name in self._I915_CLKS_FILES: 1252 full_path = os.path.join(self._I915_ROOT, file_name) 1253 if os.path.exists(full_path): 1254 self._set_gpu_type('i915') 1255 i915_path = full_path 1256 break 1257 else: 1258 # We either don't know how to track GPU stats (yet) or the stats 1259 # are tracked in DevFreqStats. 1260 self._set_gpu_type(None) 1261 1262 logging.debug("gpu_type is %s", self._gpu_type) 1263 1264 if self._gpu_type is 'mali': 1265 events = self._MALI_EVENTS 1266 cur_mhz = self._get_mali_freqs() 1267 if self._freqs: 1268 min_mhz = self._freqs[0] 1269 max_mhz = self._freqs[-1] 1270 1271 elif self._gpu_type is 'i915': 1272 events = self._I915_EVENTS 1273 with open(i915_path) as fd: 1274 for ln in fd.readlines(): 1275 logging.debug("ln = %s", ln.strip()) 1276 result = re.findall(self._I915_CUR_FREQ_RE, ln) 1277 if result: 1278 cur_mhz = result[0] 1279 continue 1280 result = re.findall(self._I915_MIN_FREQ_RE, ln) 1281 if result: 1282 min_mhz = result[0] 1283 continue 1284 result = re.findall(self._I915_MAX_FREQ_RE, ln) 1285 if result: 1286 max_mhz = result[0] 1287 continue 1288 if min_mhz and max_mhz: 1289 real_min_mhz = min(int(min_mhz), int(cur_mhz)) 1290 for i in range(real_min_mhz, int(max_mhz) + 1): 1291 if i % 100 in self._I915_FREQ_STEPS: 1292 self._freqs.append(str(i)) 1293 1294 logging.debug("cur_mhz = %s, min_mhz = %s, max_mhz = %s", cur_mhz, 1295 min_mhz, max_mhz) 1296 1297 if cur_mhz and min_mhz and max_mhz: 1298 self._trace = kernel_trace.KernelTrace(events=events) 1299 1300 # Not all platforms or kernel versions support tracing. 1301 if not self._trace or not self._trace.is_tracing(): 1302 logging.warning("GPU frequency tracing not enabled.") 1303 else: 1304 self._prev_sample = (cur_mhz, self._trace.uptime_secs()) 1305 logging.debug("Current GPU freq: %s", cur_mhz) 1306 logging.debug("All GPU freqs: %s", self._freqs) 1307 1308 super(GPUFreqStats, self).__init__('gpufreq', incremental=incremental) 1309 1310 1311 @classmethod 1312 def _set_gpu_type(cls, gpu_type): 1313 cls._gpu_type = gpu_type 1314 1315 1316 def _read_stats(self): 1317 if self._gpu_type: 1318 return getattr(self, "_%s_read_stats" % self._gpu_type)() 1319 return {} 1320 1321 1322 def _trace_read_stats(self, regexp): 1323 """Read GPU stats from kernel trace outputs. 1324 1325 Args: 1326 regexp: regular expression to match trace output for frequency 1327 1328 Returns: 1329 Dict with key string in mhz and val float in seconds. 1330 """ 1331 if not self._prev_sample: 1332 return {} 1333 1334 stats = dict((k, 0.0) for k in self._freqs) 1335 results = self._trace.read(regexp=regexp) 1336 for (tstamp_str, freq) in results: 1337 tstamp = float(tstamp_str) 1338 1339 # do not reparse lines in trace buffer 1340 if tstamp <= self._prev_sample[1]: 1341 continue 1342 delta = tstamp - self._prev_sample[1] 1343 logging.debug("freq:%s tstamp:%f - %f delta:%f", 1344 self._prev_sample[0], 1345 tstamp, self._prev_sample[1], 1346 delta) 1347 stats[self._prev_sample[0]] += delta 1348 self._prev_sample = (freq, tstamp) 1349 1350 # Do last record 1351 delta = self._trace.uptime_secs() - self._prev_sample[1] 1352 logging.debug("freq:%s tstamp:uptime - %f delta:%f", 1353 self._prev_sample[0], 1354 self._prev_sample[1], delta) 1355 stats[self._prev_sample[0]] += delta 1356 1357 logging.debug("GPU freq percents:%s", stats) 1358 return stats 1359 1360 1361 def _mali_read_stats(self): 1362 """Read Mali GPU stats 1363 1364 Frequencies are reported in Hz, so use a regex that drops the last 6 1365 digits. 1366 1367 Output in trace looks like this: 1368 1369 kworker/u:24-5220 [000] .... 81060.329232: mali_dvfs_set_clock: frequency=400 1370 kworker/u:24-5220 [000] .... 81061.830128: mali_dvfs_set_clock: frequency=350 1371 1372 Returns: 1373 Dict with frequency in mhz as key and float in seconds for time 1374 spent at that frequency. 1375 """ 1376 return self._trace_read_stats(self._MALI_TRACE_CLK_RE) 1377 1378 1379 def _i915_read_stats(self): 1380 """Read i915 GPU stats. 1381 1382 Output looks like this (kernel >= 3.8): 1383 1384 kworker/u:0-28247 [000] .... 259391.579610: intel_gpu_freq_change: new_freq=400 1385 kworker/u:0-28247 [000] .... 259391.581797: intel_gpu_freq_change: new_freq=350 1386 1387 Returns: 1388 Dict with frequency in mhz as key and float in seconds for time 1389 spent at that frequency. 1390 """ 1391 return self._trace_read_stats(self._I915_TRACE_CLK_RE) 1392 1393 1394 def _supports_automatic_weighted_average(self): 1395 return self._gpu_type is not None 1396 1397 1398class USBSuspendStats(AbstractStats): 1399 """ 1400 USB active/suspend statistics (over all devices) 1401 """ 1402 # TODO (snanda): handle hot (un)plugging of USB devices 1403 # TODO (snanda): handle duration counters wraparound 1404 1405 def __init__(self): 1406 usb_stats_path = '/sys/bus/usb/devices/*/power' 1407 self._file_paths = glob.glob(usb_stats_path) 1408 if not self._file_paths: 1409 logging.debug('USB stats path not found') 1410 super(USBSuspendStats, self).__init__('usb') 1411 1412 1413 def _read_stats(self): 1414 usb_stats = {'active': 0, 'suspended': 0} 1415 1416 for path in self._file_paths: 1417 active_duration_path = os.path.join(path, 'active_duration') 1418 total_duration_path = os.path.join(path, 'connected_duration') 1419 1420 if not os.path.exists(active_duration_path) or \ 1421 not os.path.exists(total_duration_path): 1422 logging.debug('duration paths do not exist for: %s', path) 1423 continue 1424 1425 active = int(utils.read_file(active_duration_path)) 1426 total = int(utils.read_file(total_duration_path)) 1427 logging.debug('device %s active for %.2f%%', 1428 path, active * 100.0 / total) 1429 1430 usb_stats['active'] += active 1431 usb_stats['suspended'] += total - active 1432 1433 return usb_stats 1434 1435 1436def read_cpu_set(filename): 1437 """ 1438 Parse data of form "0,2-4,9" 1439 1440 Return a set of ints 1441 """ 1442 data = utils.read_file(filename) 1443 ret = set() 1444 1445 for entry in data.split(','): 1446 entry_data = entry.split('-') 1447 start = end = int(entry_data[0]) 1448 if len(entry_data) > 1: 1449 end = int(entry_data[1]) 1450 ret |= set(range(start, end + 1)) 1451 return ret 1452 1453 1454def get_cpu_sibling_groups(): 1455 """ 1456 Get CPU core groups in HMP systems. 1457 1458 In systems with both small core and big core, 1459 returns groups of small and big sibling groups. 1460 """ 1461 siblings_suffix = 'topology/core_siblings_list' 1462 sibling_groups = [] 1463 cpus_processed = set() 1464 cpus, sibling_file_paths = get_cpus_filepaths_for_suffix(get_online_cpus(), 1465 siblings_suffix) 1466 for c, siblings_path in zip(cpus, sibling_file_paths): 1467 if c in cpus_processed: 1468 # This cpu is already part of a sibling group. Skip. 1469 continue 1470 sibling_group = read_cpu_set(siblings_path) 1471 cpus_processed |= sibling_group 1472 sibling_groups.append(frozenset(sibling_group)) 1473 return tuple(sibling_groups) 1474 1475 1476def get_available_cpu_stats(): 1477 """Return CPUFreq/CPUIdleStats groups by big-small siblings groups.""" 1478 ret = [CPUPackageStats()] 1479 cpu_sibling_groups = get_cpu_sibling_groups() 1480 1481 cpufreq_stat_class = CPUFreqStats 1482 # assumes cpufreq driver for CPU0 is the same as the others. 1483 cpufreq_driver = '/sys/devices/system/cpu/cpu0/cpufreq/scaling_driver' 1484 if (os.path.exists(cpufreq_driver) and 1485 utils.read_one_line(cpufreq_driver) == 'intel_pstate'): 1486 logging.debug('intel_pstate driver active') 1487 cpufreq_stat_class = CPUFreqStatsPState 1488 1489 if not cpu_sibling_groups: 1490 ret.append(cpufreq_stat_class()) 1491 ret.append(CPUIdleStats()) 1492 for cpu_group in cpu_sibling_groups: 1493 ret.append(cpufreq_stat_class(cpu_group)) 1494 ret.append(CPUIdleStats(cpu_group)) 1495 if has_rc6_support(): 1496 ret.append(GPURC6Stats()) 1497 return ret 1498 1499 1500class StatoMatic(object): 1501 """Class to aggregate and monitor a bunch of power related statistics.""" 1502 def __init__(self): 1503 self._start_uptime_secs = kernel_trace.KernelTrace.uptime_secs() 1504 self._astats = [USBSuspendStats(), GPUFreqStats(incremental=False)] 1505 self._astats.extend(get_available_cpu_stats()) 1506 if os.path.isdir(DevFreqStats._DIR): 1507 self._astats.extend([DevFreqStats(f) for f in \ 1508 os.listdir(DevFreqStats._DIR)]) 1509 1510 self._disk = DiskStateLogger() 1511 self._disk.start() 1512 1513 1514 def publish(self): 1515 """Publishes results of various statistics gathered. 1516 1517 Returns: 1518 dict with 1519 key = string 'percent_<name>_<key>_time' 1520 value = float in percent 1521 """ 1522 results = {} 1523 tot_secs = kernel_trace.KernelTrace.uptime_secs() - \ 1524 self._start_uptime_secs 1525 for stat_obj in self._astats: 1526 percent_stats = stat_obj.refresh() 1527 logging.debug("pstats = %s", percent_stats) 1528 if stat_obj.name is 'gpu': 1529 # TODO(tbroch) remove this once GPU freq stats have proved 1530 # reliable 1531 stats_secs = sum(stat_obj._stats.values()) 1532 if stats_secs < (tot_secs * 0.9) or \ 1533 stats_secs > (tot_secs * 1.1): 1534 logging.warning('%s stats dont look right. Not publishing.', 1535 stat_obj.name) 1536 continue 1537 new_res = {} 1538 AbstractStats.format_results_percent(new_res, stat_obj.name, 1539 percent_stats) 1540 wavg = stat_obj.weighted_average() 1541 if wavg: 1542 AbstractStats.format_results_wavg(new_res, stat_obj.name, wavg) 1543 1544 results.update(new_res) 1545 1546 new_res = {} 1547 if self._disk.get_error(): 1548 new_res['disk_logging_error'] = str(self._disk.get_error()) 1549 else: 1550 AbstractStats.format_results_percent(new_res, 'disk', 1551 self._disk.result()) 1552 results.update(new_res) 1553 1554 return results 1555 1556 1557class PowerMeasurement(object): 1558 """Class to measure power. 1559 1560 Public attributes: 1561 domain: String name of the power domain being measured. Example is 1562 'system' for total system power 1563 1564 Public methods: 1565 refresh: Performs any power/energy sampling and calculation and returns 1566 power as float in watts. This method MUST be implemented in 1567 subclass. 1568 """ 1569 1570 def __init__(self, domain): 1571 """Constructor.""" 1572 self.domain = domain 1573 1574 1575 def refresh(self): 1576 """Performs any power/energy sampling and calculation. 1577 1578 MUST be implemented in subclass 1579 1580 Returns: 1581 float, power in watts. 1582 """ 1583 raise NotImplementedError("'refresh' method should be implemented in " 1584 "subclass.") 1585 1586 1587def parse_power_supply_info(): 1588 """Parses power_supply_info command output. 1589 1590 Command output from power_manager ( tools/power_supply_info.cc ) looks like 1591 this: 1592 1593 Device: Line Power 1594 path: /sys/class/power_supply/cros_ec-charger 1595 ... 1596 Device: Battery 1597 path: /sys/class/power_supply/sbs-9-000b 1598 ... 1599 1600 """ 1601 rv = collections.defaultdict(dict) 1602 dev = None 1603 for ln in utils.system_output('power_supply_info').splitlines(): 1604 logging.debug("psu: %s", ln) 1605 result = re.findall(r'^Device:\s+(.*)', ln) 1606 if result: 1607 dev = result[0] 1608 continue 1609 result = re.findall(r'\s+(.+):\s+(.+)', ln) 1610 if result and dev: 1611 kname = re.findall(r'(.*)\s+\(\w+\)', result[0][0]) 1612 if kname: 1613 rv[dev][kname[0]] = result[0][1] 1614 else: 1615 rv[dev][result[0][0]] = result[0][1] 1616 1617 return rv 1618 1619 1620class SystemPower(PowerMeasurement): 1621 """Class to measure system power. 1622 1623 TODO(tbroch): This class provides a subset of functionality in BatteryStat 1624 in hopes of minimizing power draw. Investigate whether its really 1625 significant and if not, deprecate. 1626 1627 Private Attributes: 1628 _voltage_file: path to retrieve voltage in uvolts 1629 _current_file: path to retrieve current in uamps 1630 """ 1631 1632 def __init__(self, battery_dir): 1633 """Constructor. 1634 1635 Args: 1636 battery_dir: path to dir containing the files to probe and log. 1637 usually something like /sys/class/power_supply/BAT0/ 1638 """ 1639 super(SystemPower, self).__init__('system') 1640 # Files to log voltage and current from 1641 self._voltage_file = os.path.join(battery_dir, 'voltage_now') 1642 self._current_file = os.path.join(battery_dir, 'current_now') 1643 1644 1645 def refresh(self): 1646 """refresh method. 1647 1648 See superclass PowerMeasurement for details. 1649 """ 1650 keyvals = parse_power_supply_info() 1651 return float(keyvals['Battery']['energy rate']) 1652 1653 1654class BatteryStateOfCharge(PowerMeasurement): 1655 """Class for logging battery state of charge.""" 1656 1657 def __init__(self): 1658 """Constructor.""" 1659 super(BatteryStateOfCharge, self).__init__('battery_soc') 1660 1661 def refresh(self): 1662 """refresh method. 1663 1664 See superclass PowerMeasurement for details. 1665 """ 1666 keyvals = parse_power_supply_info() 1667 return float(keyvals['Battery']['percentage']) 1668 1669 1670class CheckpointLogger(object): 1671 """Class to log checkpoint data. 1672 1673 Public attributes: 1674 checkpoint_data: dictionary of (tname, tlist). 1675 tname: String of testname associated with these time intervals 1676 tlist: list of tuples. Tuple contains: 1677 tstart: Float of time when subtest started 1678 tend: Float of time when subtest ended 1679 1680 Public methods: 1681 start: records a start timestamp 1682 checkpoint 1683 checkblock 1684 save_checkpoint_data 1685 load_checkpoint_data 1686 1687 Static methods: 1688 load_checkpoint_data_static 1689 1690 Private attributes: 1691 _start_time: start timestamp for checkpoint logger 1692 """ 1693 1694 def __init__(self): 1695 self.checkpoint_data = collections.defaultdict(list) 1696 self.start() 1697 1698 # If multiple MeasurementLoggers call start() on the same CheckpointLogger, 1699 # the latest one will register start time. 1700 def start(self): 1701 """Set start time for CheckpointLogger.""" 1702 self._start_time = time.time() 1703 1704 @contextlib.contextmanager 1705 def checkblock(self, tname=''): 1706 """Check point for the following block with test tname. 1707 1708 Args: 1709 tname: String of testname associated with this time interval 1710 """ 1711 start_time = time.time() 1712 yield 1713 self.checkpoint(tname, start_time) 1714 1715 def checkpoint(self, tname='', tstart=None, tend=None): 1716 """Check point the times in seconds associated with test tname. 1717 1718 Args: 1719 tname: String of testname associated with this time interval 1720 tstart: Float in seconds of when tname test started. Should be based 1721 off time.time(). If None, use start timestamp for the checkpoint 1722 logger. 1723 tend: Float in seconds of when tname test ended. Should be based 1724 off time.time(). If None, then value computed in the method. 1725 """ 1726 if not tstart and self._start_time: 1727 tstart = self._start_time 1728 if not tend: 1729 tend = time.time() 1730 self.checkpoint_data[tname].append((tstart, tend)) 1731 logging.info('Finished test "%s" between timestamps [%s, %s]', 1732 tname, tstart, tend) 1733 1734 def convert_relative(self, start_time=None): 1735 """Convert data from power_status.CheckpointLogger object to relative 1736 checkpoint data dictionary. Timestamps are converted to time in seconds 1737 since the test started. 1738 1739 Args: 1740 start_time: Float in seconds of the desired start time reference. 1741 Should be based off time.time(). If None, use start timestamp 1742 for the checkpoint logger. 1743 """ 1744 if start_time is None: 1745 start_time = self._start_time 1746 1747 checkpoint_dict = {} 1748 for tname, tlist in self.checkpoint_data.items(): 1749 checkpoint_dict[tname] = [(tstart - start_time, tend - start_time) 1750 for tstart, tend in tlist] 1751 1752 return checkpoint_dict 1753 1754 def save_checkpoint_data(self, resultsdir, fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1755 """Save checkpoint data. 1756 1757 Args: 1758 resultsdir: String, directory to write results to 1759 fname: String, name of file to write results to 1760 """ 1761 fname = os.path.join(resultsdir, fname) 1762 with open(fname, 'wt') as f: 1763 json.dump(self.checkpoint_data, f, indent=4, separators=(',', ': ')) 1764 1765 def load_checkpoint_data(self, resultsdir, 1766 fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1767 """Load checkpoint data. 1768 1769 Args: 1770 resultsdir: String, directory to load results from 1771 fname: String, name of file to load results from 1772 """ 1773 fname = os.path.join(resultsdir, fname) 1774 try: 1775 with open(fname, 'r') as f: 1776 self.checkpoint_data = json.load(f, 1777 object_hook=to_checkpoint_data) 1778 # Set start time to the earliest start timestamp in file. 1779 self._start_time = min( 1780 ts_pair[0] 1781 for ts_pair in itertools.chain.from_iterable( 1782 list(self.checkpoint_data.values()))) 1783 except Exception as exc: 1784 logging.warning('Failed to load checkpoint data from json file %s, ' 1785 'see exception: %s', fname, exc) 1786 1787 @staticmethod 1788 def load_checkpoint_data_static(resultsdir, 1789 fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1790 """Load checkpoint data. 1791 1792 Args: 1793 resultsdir: String, directory to load results from 1794 fname: String, name of file to load results from 1795 """ 1796 fname = os.path.join(resultsdir, fname) 1797 with open(fname, 'r') as f: 1798 checkpoint_data = json.load(f) 1799 return checkpoint_data 1800 1801 1802def to_checkpoint_data(json_dict): 1803 """Helper method to translate json object into CheckpointLogger format. 1804 1805 Args: 1806 json_dict: a json object in the format of python dict 1807 Returns: 1808 a defaultdict in CheckpointLogger data format 1809 """ 1810 checkpoint_data = collections.defaultdict(list) 1811 for tname, tlist in json_dict.items(): 1812 checkpoint_data[tname].extend([tuple(ts_pair) for ts_pair in tlist]) 1813 return checkpoint_data 1814 1815 1816def get_checkpoint_logger_from_file(resultsdir, 1817 fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1818 """Create a CheckpointLogger and load checkpoint data from file. 1819 1820 Args: 1821 resultsdir: String, directory to load results from 1822 fname: String, name of file to load results from 1823 Returns: 1824 CheckpointLogger with data from file 1825 """ 1826 checkpoint_logger = CheckpointLogger() 1827 checkpoint_logger.load_checkpoint_data(resultsdir, fname) 1828 return checkpoint_logger 1829 1830 1831class MeasurementLogger(threading.Thread): 1832 """A thread that logs measurement readings. 1833 1834 Example code snippet: 1835 my_logger = MeasurementLogger([Measurement1, Measurement2]) 1836 my_logger.start() 1837 for testname in tests: 1838 # Option 1: use checkblock 1839 with my_logger.checkblock(testname): 1840 # run the test method for testname 1841 1842 # Option 2: use checkpoint 1843 start_time = time.time() 1844 # run the test method for testname 1845 my_logger.checkpoint(testname, start_time) 1846 1847 keyvals = my_logger.calc() 1848 1849 or using CheckpointLogger: 1850 checkpoint_logger = CheckpointLogger() 1851 my_logger = MeasurementLogger([Measurement1, Measurement2], 1852 checkpoint_logger) 1853 my_logger.start() 1854 for testname in tests: 1855 # Option 1: use checkblock 1856 with checkpoint_logger.checkblock(testname): 1857 # run the test method for testname 1858 1859 # Option 2: use checkpoint 1860 start_time = time.time() 1861 # run the test method for testname 1862 checkpoint_logger.checkpoint(testname, start_time) 1863 1864 keyvals = my_logger.calc() 1865 1866 Public attributes: 1867 seconds_period: float, probing interval in seconds. 1868 readings: list of lists of floats of measurements. 1869 times: list of floats of time (since Epoch) of when measurements 1870 occurred. len(time) == len(readings). 1871 done: flag to stop the logger. 1872 domains: list of domain strings being measured 1873 1874 Public methods: 1875 run: launches the thread to gather measurements 1876 refresh: perform data samplings for every measurements 1877 calc: calculates 1878 save_results: 1879 1880 Private attributes: 1881 _measurements: list of Measurement objects to be sampled. 1882 _checkpoint_data: dictionary of (tname, tlist). 1883 tname: String of testname associated with these time intervals 1884 tlist: list of tuples. Tuple contains: 1885 tstart: Float of time when subtest started 1886 tend: Float of time when subtest ended 1887 _results: list of results tuples. Tuple contains: 1888 prefix: String of subtest 1889 mean: Float of mean in watts 1890 std: Float of standard deviation of measurements 1891 tstart: Float of time when subtest started 1892 tend: Float of time when subtest ended 1893 """ 1894 def __init__(self, measurements, seconds_period=1.0, checkpoint_logger=None): 1895 """Initialize a logger. 1896 1897 Args: 1898 _measurements: list of Measurement objects to be sampled. 1899 seconds_period: float, probing interval in seconds. Default 1.0 1900 """ 1901 threading.Thread.__init__(self) 1902 1903 self.seconds_period = seconds_period 1904 1905 self.readings = [] 1906 self.times = [] 1907 1908 self._measurements = measurements 1909 self.domains = [meas.domain for meas in self._measurements] 1910 1911 self._checkpoint_logger = \ 1912 checkpoint_logger if checkpoint_logger else CheckpointLogger() 1913 1914 self.done = False 1915 1916 def start(self): 1917 self._checkpoint_logger.start() 1918 super(MeasurementLogger, self).start() 1919 1920 def refresh(self): 1921 """Perform data samplings for every measurements. 1922 1923 Returns: 1924 list of sampled data for every measurements. 1925 """ 1926 return [meas.refresh() for meas in self._measurements] 1927 1928 def run(self): 1929 """Threads run method.""" 1930 loop = 1 1931 start_time = time.time() 1932 time.sleep(self.seconds_period) 1933 while(not self.done): 1934 # TODO (dbasehore): We probably need proper locking in this file 1935 # since there have been race conditions with modifying and accessing 1936 # data. 1937 self.readings.append(self.refresh()) 1938 current_time = time.time() 1939 self.times.append(current_time) 1940 loop += 1 1941 next_measurement_time = start_time + loop * self.seconds_period 1942 time.sleep(next_measurement_time - current_time) 1943 1944 @contextlib.contextmanager 1945 def checkblock(self, tname=''): 1946 """Check point for the following block with test tname. 1947 1948 Args: 1949 tname: String of testname associated with this time interval 1950 """ 1951 start_time = time.time() 1952 yield 1953 self.checkpoint(tname, start_time) 1954 1955 def checkpoint(self, tname='', tstart=None, tend=None): 1956 """Just a thin method calling the CheckpointLogger checkpoint method. 1957 1958 Args: 1959 tname: String of testname associated with this time interval 1960 tstart: Float in seconds of when tname test started. Should be based 1961 off time.time() 1962 tend: Float in seconds of when tname test ended. Should be based 1963 off time.time(). If None, then value computed in the method. 1964 """ 1965 self._checkpoint_logger.checkpoint(tname, tstart, tend) 1966 1967 # TODO(seankao): It might be useful to pull this method to CheckpointLogger, 1968 # to allow checkpoint usage without an explicit MeasurementLogger. 1969 def calc(self, mtype=None): 1970 """Calculate average measurement during each of the sub-tests. 1971 1972 Method performs the following steps: 1973 1. Signals the thread to stop running. 1974 2. Calculates mean, max, min, count on the samples for each of the 1975 measurements. 1976 3. Stores results to be written later. 1977 4. Creates keyvals for autotest publishing. 1978 1979 Args: 1980 mtype: string of measurement type. For example: 1981 pwr == power 1982 temp == temperature 1983 Returns: 1984 dict of keyvals suitable for autotest results. 1985 """ 1986 if not mtype: 1987 mtype = 'meas' 1988 1989 t = numpy.array(self.times) 1990 keyvals = {} 1991 results = [('domain', 'mean', 'std', 'duration (s)', 'start ts', 1992 'end ts')] 1993 # TODO(coconutruben): ensure that values is meaningful i.e. go through 1994 # the Loggers and add a unit attribute to each so that the raw 1995 # data is readable. 1996 raw_results = [('domain', 'values (%s)' % mtype)] 1997 1998 if not self.done: 1999 self.done = True 2000 # times 2 the sleep time in order to allow for readings as well. 2001 self.join(timeout=self.seconds_period * 2) 2002 2003 if not self._checkpoint_logger.checkpoint_data: 2004 self._checkpoint_logger.checkpoint() 2005 2006 for i, domain_readings in enumerate(zip(*self.readings)): 2007 meas = numpy.array(domain_readings) 2008 try: 2009 domain = self.domains[i] 2010 except IndexError: 2011 # TODO (evanbenn) temp logging for b:162610351 2012 logging.debug('b:162610351 IndexError: %s, %d, %d, (%s)', 2013 type(self).__name__, 2014 len(self.readings), 2015 len(self.domains), 2016 ', '.join(str(len(r)) for r in self.readings)) 2017 logging.debug('b:162610351 domains: %s', 2018 ', '.join(self.domains)) 2019 raise 2020 2021 for tname, tlist in \ 2022 self._checkpoint_logger.checkpoint_data.items(): 2023 if tname: 2024 prefix = '%s_%s' % (tname, domain) 2025 else: 2026 prefix = domain 2027 keyvals[prefix+'_duration'] = 0 2028 # Select all readings taken between tstart and tend 2029 # timestamps in tlist. 2030 masks = [] 2031 for tstart, tend in tlist: 2032 keyvals[prefix+'_duration'] += tend - tstart 2033 # Try block just in case 2034 # code.google.com/p/chromium/issues/detail?id=318892 2035 # is not fixed. 2036 try: 2037 masks.append(numpy.logical_and(tstart < t, t < tend)) 2038 except ValueError as e: 2039 logging.debug('Error logging measurements: %s', str(e)) 2040 logging.debug('timestamps %d %s', t.len, t) 2041 logging.debug('timestamp start, end %f %f', tstart, tend) 2042 logging.debug('measurements %d %s', meas.len, meas) 2043 mask = numpy.logical_or.reduce(masks) 2044 meas_array = meas[mask] 2045 2046 # If sub-test terminated early, avoid calculating avg, std and 2047 # min 2048 if not meas_array.size: 2049 continue 2050 meas_mean = meas_array.mean() 2051 meas_std = meas_array.std() 2052 2053 # Results list can be used for pretty printing and saving as csv 2054 # TODO(seankao): new results format? 2055 result = (prefix, meas_mean, meas_std) 2056 for tstart, tend in tlist: 2057 result = result + (tend - tstart, tstart, tend) 2058 results.append(result) 2059 raw_results.append((prefix,) + tuple(meas_array.tolist())) 2060 2061 keyvals[prefix + '_' + mtype + '_avg'] = meas_mean 2062 keyvals[prefix + '_' + mtype + '_cnt'] = meas_array.size 2063 keyvals[prefix + '_' + mtype + '_max'] = meas_array.max() 2064 keyvals[prefix + '_' + mtype + '_min'] = meas_array.min() 2065 keyvals[prefix + '_' + mtype + '_std'] = meas_std 2066 self._results = results 2067 self._raw_results = raw_results 2068 return keyvals 2069 2070 2071 def save_results(self, resultsdir, fname_prefix=None): 2072 """Save computed results in a nice tab-separated format. 2073 This is useful for long manual runs. 2074 2075 Args: 2076 resultsdir: String, directory to write results to 2077 fname_prefix: prefix to use for fname. If provided outfiles 2078 will be [fname]_[raw|summary].txt 2079 """ 2080 if not fname_prefix: 2081 fname_prefix = 'meas_results_%.0f' % time.time() 2082 fname = '%s_summary.txt' % fname_prefix 2083 raw_fname = fname.replace('summary', 'raw') 2084 for name, data in [(fname, self._results), 2085 (raw_fname, self._raw_results)]: 2086 with open(os.path.join(resultsdir, name), 'wt') as f: 2087 # First row contains the headers 2088 f.write('%s\n' % '\t'.join(data[0])) 2089 for row in data[1:]: 2090 # First column name, rest are numbers. See _calc_power() 2091 fmt_row = [row[0]] + ['%.2f' % x for x in row[1:]] 2092 f.write('%s\n' % '\t'.join(fmt_row)) 2093 2094 2095class CPUStatsLogger(MeasurementLogger): 2096 """Class to measure CPU Frequency and CPU Idle Stats. 2097 2098 CPUStatsLogger derived from MeasurementLogger class but overload data 2099 samplings method because MeasurementLogger assumed that each sampling is 2100 independent to each other. However, in this case it is not. For example, 2101 CPU time spent in C0 state is measure by time not spent in all other states. 2102 2103 CPUStatsLogger also collects the weight average in each time period if the 2104 underlying AbstractStats support weight average function. 2105 2106 Private attributes: 2107 _stats: list of CPU AbstractStats objects to be sampled. 2108 _refresh_count: number of times refresh() has been called. 2109 _last_wavg: dict of wavg when refresh() was last called. 2110 """ 2111 def __init__(self, seconds_period=1.0, checkpoint_logger=None): 2112 """Initialize a CPUStatsLogger. 2113 2114 Args: 2115 seconds_period: float, probing interval in seconds. Default 1.0 2116 """ 2117 # We don't use measurements since CPU stats can't measure separately. 2118 super(CPUStatsLogger, self).__init__([], seconds_period, checkpoint_logger) 2119 2120 self._stats = get_available_cpu_stats() 2121 self._stats.append(GPUFreqStats()) 2122 self.domains = [] 2123 self._refresh_count = 0 2124 self._last_wavg = collections.defaultdict(int) 2125 2126 def _set_domains(self): 2127 self.domains = [] 2128 for stat in self._stats: 2129 self.domains.extend([stat.name + '_' + str(state_name) 2130 for state_name in stat.refresh()]) 2131 if stat.weighted_average(): 2132 self.domains.append('wavg_' + stat.name) 2133 2134 def refresh(self): 2135 self._refresh_count += 1 2136 count = self._refresh_count 2137 ret = [] 2138 for stat in self._stats: 2139 ret.extend(list(stat.refresh().values())) 2140 wavg = stat.weighted_average() 2141 if wavg: 2142 if stat.incremental: 2143 last_wavg = self._last_wavg[stat.name] 2144 self._last_wavg[stat.name] = wavg 2145 # Calculate weight average in this period using current 2146 # total weight average and last total weight average. 2147 # The result will lose some precision with higher number of 2148 # count but still good enough for 11 significant digits even 2149 # if we logged the data every 1 second for a day. 2150 ret.append(wavg * count - last_wavg * (count - 1)) 2151 else: 2152 ret.append(wavg) 2153 if not self.domains: 2154 self._set_domains() 2155 elif len(self.domains) != len(ret): 2156 # This would make data jumble but better than IndexError. 2157 # Add the log to help detecting the root cause. 2158 logging.warning('b:162610351 len(self.domains) != len(ret)') 2159 logging.warning('old_domains: (%s)', ', '.join(self.domains)) 2160 self._set_domains() 2161 logging.warning('new_domains: (%s)', ', '.join(self.domains)) 2162 return ret 2163 2164 def save_results(self, resultsdir, fname_prefix=None): 2165 if not fname_prefix: 2166 fname_prefix = 'cpu_results_%.0f' % time.time() 2167 super(CPUStatsLogger, self).save_results(resultsdir, fname_prefix) 2168 2169 2170class PowerLogger(MeasurementLogger): 2171 """Class to measure power consumption.""" 2172 2173 def __init__(self, measurements, seconds_period=1.0, 2174 checkpoint_logger=None): 2175 if not measurements: 2176 measurements = self.create_measurements() 2177 super(PowerLogger, self).__init__(measurements, seconds_period, 2178 checkpoint_logger) 2179 2180 def create_measurements(self): 2181 """Create power measurements based on device config.""" 2182 # Import here to avoid import loop. 2183 from autotest_lib.client.cros.power import power_rapl 2184 2185 measurements = [] 2186 status = get_status() 2187 if status.battery: 2188 measurements.append(BatteryStateOfCharge()) 2189 if status.battery_discharging(): 2190 measurements.append(SystemPower(status.battery_path)) 2191 if power_utils.has_powercap_support(): 2192 measurements += power_rapl.create_powercap() 2193 elif power_utils.has_rapl_support(): 2194 measurements += power_rapl.create_rapl() 2195 return measurements 2196 2197 def save_results(self, resultsdir, fname_prefix=None): 2198 if not fname_prefix: 2199 fname_prefix = 'power_results_%.0f' % time.time() 2200 super(PowerLogger, self).save_results(resultsdir, fname_prefix) 2201 2202 def calc(self, mtype='pwr'): 2203 return super(PowerLogger, self).calc(mtype) 2204 2205 2206class TempMeasurement(object): 2207 """Class to measure temperature. 2208 2209 Public attributes: 2210 domain: String name of the temperature domain being measured. Example is 2211 'cpu' for cpu temperature 2212 2213 Private attributes: 2214 _path: Path to temperature file to read ( in millidegrees Celsius ) 2215 2216 Public methods: 2217 refresh: Performs any temperature sampling and calculation and returns 2218 temperature as float in degrees Celsius. 2219 """ 2220 def __init__(self, domain, path): 2221 """Constructor.""" 2222 self.domain = domain 2223 self._path = path 2224 2225 2226 def refresh(self): 2227 """Performs temperature 2228 2229 Returns: 2230 float, temperature in degrees Celsius 2231 """ 2232 return int(utils.read_one_line(self._path)) / 1000. 2233 2234 2235class BatteryTempMeasurement(TempMeasurement): 2236 """Class to measure battery temperature.""" 2237 def __init__(self): 2238 super(BatteryTempMeasurement, self).__init__('battery', 'battery_temp') 2239 2240 2241 def refresh(self): 2242 """Perform battery temperature reading. 2243 2244 Returns: 2245 float, temperature in degrees Celsius. 2246 """ 2247 result = utils.run(self._path, timeout=5, ignore_status=True) 2248 2249 value = float(result.stdout) 2250 2251 # `battery_temp` return in celsius unit. 2252 if 0 < value < 100: 2253 return round(value, 1) 2254 2255 # `battery_temp` return in kelvin unit. 2256 if 273 < value < 373: 2257 return round(value - 273.15, 1) 2258 2259 # `battery_temp` return in millicelsius unit. 2260 if 1000 < value < 100000: 2261 return round(value / 1000., 1) 2262 2263 # The command return value in millikelvin unit. 2264 if 273150 < value < 373150: 2265 return round(value / 1000. - 273.15, 1) 2266 2267 raise ValueError 2268 2269 2270def has_battery_temp(): 2271 """Determine if DUT can provide battery temperature. 2272 2273 Returns: 2274 Boolean, True if battery temperature available. False otherwise. 2275 """ 2276 if not power_utils.has_battery(): 2277 return False 2278 2279 btemp = BatteryTempMeasurement() 2280 try: 2281 btemp.refresh() 2282 except ValueError: 2283 return False 2284 2285 return True 2286 2287 2288class TempLogger(MeasurementLogger): 2289 """A thread that logs temperature readings in millidegrees Celsius.""" 2290 2291 def __init__(self, measurements, seconds_period=30.0, 2292 checkpoint_logger=None): 2293 if not measurements: 2294 measurements = self.create_measurements() 2295 super(TempLogger, self).__init__(measurements, seconds_period, 2296 checkpoint_logger) 2297 2298 def create_measurements(self): 2299 """Create measurements for TempLogger.""" 2300 measurements = [] 2301 2302 zstats = ThermalStatACPI() 2303 for desc, fpath in zstats.zones.items(): 2304 new_meas = TempMeasurement(desc, fpath) 2305 measurements.append(new_meas) 2306 2307 tstats = ThermalStatHwmon() 2308 for kname in tstats.fields: 2309 match = re.match(r'(\S+)_temp(\d+)_input', kname) 2310 if not match: 2311 continue 2312 desc = match.group(1) + '-t' + match.group(2) 2313 fpath = tstats.fields[kname][0] 2314 new_meas = TempMeasurement(desc, fpath) 2315 measurements.append(new_meas) 2316 2317 if has_battery_temp(): 2318 measurements.append(BatteryTempMeasurement()) 2319 2320 return measurements 2321 2322 def save_results(self, resultsdir, fname_prefix=None): 2323 if not fname_prefix: 2324 fname_prefix = 'temp_results_%.0f' % time.time() 2325 super(TempLogger, self).save_results(resultsdir, fname_prefix) 2326 2327 2328 def calc(self, mtype='temp'): 2329 return super(TempLogger, self).calc(mtype) 2330 2331 2332class VideoFpsLogger(MeasurementLogger): 2333 """Class to measure Video FPS.""" 2334 2335 @classmethod 2336 def time_until_ready(cls, tab, num_video=1, timeout=120): 2337 """Wait until tab is ready for VideoFpsLogger and return time used. 2338 2339 Keep polling Chrome tab until these 2 conditions are met one by one. 2340 - Number of <video> elements detected is equal to |num_video|. 2341 - All videos are played for at least 1 ms. 2342 2343 Args: 2344 tab: Chrome tab object 2345 num_video: number of expected <video> elements, default 1. 2346 timeout: timeout in seconds, default 120. 2347 2348 Returns: 2349 float, number of seconds elasped until condition met. 2350 2351 Raises: 2352 error.TestFail if condition are not met by timeout. 2353 """ 2354 start_time = time.time() 2355 2356 # Number of <video> elements detected is equal to |num_video|. 2357 c = 'document.getElementsByTagName("video").length == %d' % num_video 2358 tab.WaitForJavaScriptCondition(c, timeout=timeout) 2359 2360 # All videos are played for at least 1 ms. 2361 c = ('Math.min(...Array.from(document.getElementsByTagName("video"))' 2362 '.map(v => v.currentTime)) >= 0.001') 2363 timeout_left = timeout - (time.time() - start_time) 2364 try: 2365 tab.WaitForJavaScriptCondition(c, timeout=timeout_left) 2366 # Broad exception because py_utils.TimeoutException require libchrome 2367 except Exception: 2368 times = tab.EvaluateJavaScript( 2369 'Array.from(document.getElementsByTagName("video"))' 2370 '.map(v => v.currentTime)') 2371 # Not timeout exception, re-raise original exception 2372 if min(times) > 0.001: 2373 raise 2374 videos = tab.EvaluateJavaScript( 2375 'Array.from(document.getElementsByTagName("video"))' 2376 '.map(v => v.id)') 2377 failed_videos = [v for v, t in zip(videos, times) if t < 0.001] 2378 raise error.TestFail('Media playback failed: %s' % failed_videos) 2379 return time.time() - start_time 2380 2381 def __init__(self, tab, seconds_period=1.0, checkpoint_logger=None): 2382 """Initialize a VideoFpsLogger. 2383 2384 Args: 2385 tab: Chrome tab object 2386 """ 2387 super(VideoFpsLogger, self).__init__([], seconds_period, 2388 checkpoint_logger) 2389 self._tab = tab 2390 names = self._tab.EvaluateJavaScript( 2391 'Array.from(document.getElementsByTagName("video")).map(v => v.id)') 2392 self.domains = [n or 'video_' + str(i) for i, n in enumerate(names)] 2393 self._last = [0] * len(names) 2394 self.refresh() 2395 2396 def refresh(self): 2397 @retry.retry(Exception, timeout_min=0.5, delay_sec=0.1) 2398 def get_fps(): 2399 return self._tab.EvaluateJavaScript( 2400 'Array.from(document.getElementsByTagName("video")).map(' 2401 'v => v.webkitDecodedFrameCount)') 2402 current = get_fps() 2403 fps = [(b - a if b >= a else b) / self.seconds_period 2404 for a, b in zip(self._last , current)] 2405 self._last = current 2406 return fps 2407 2408 def save_results(self, resultsdir, fname_prefix=None): 2409 if not fname_prefix: 2410 fname_prefix = 'video_fps_results_%.0f' % time.time() 2411 super(VideoFpsLogger, self).save_results(resultsdir, fname_prefix) 2412 2413 def calc(self, mtype='fps'): 2414 return super(VideoFpsLogger, self).calc(mtype) 2415 2416 2417def get_num_fans(): 2418 """Count how many fan DUT has. 2419 2420 Returns: 2421 Integer, number of fans that DUT has. 2422 """ 2423 res = utils.run('ectool pwmgetnumfans | grep -o [0-9]', ignore_status=True) 2424 if not res or res.exit_status != 0: 2425 return 0 2426 return int(res.stdout) 2427 2428 2429def has_fan(): 2430 """Determine if DUT has fan. 2431 2432 Returns: 2433 Boolean, True if dut has fan. False otherwise. 2434 """ 2435 return get_num_fans() > 0 2436 2437 2438class FanRpmLogger(MeasurementLogger): 2439 """Class to measure Fan RPM.""" 2440 2441 def __init__(self, seconds_period=1.0, checkpoint_logger=None): 2442 """Initialize a FanRpmLogger.""" 2443 super(FanRpmLogger, self).__init__([], seconds_period, 2444 checkpoint_logger) 2445 self.domains = ['fan_' + str(i) for i in range(get_num_fans())] 2446 self.refresh() 2447 2448 def refresh(self): 2449 @retry.retry(Exception, timeout_min=0.1, delay_sec=2) 2450 def get_fan_rpm_all(): 2451 """Some example outputs from ectool 2452 2453 * Two fan system 2454 localhost ~ # ectool pwmgetfanrpm all 2455 Fan 0 RPM: 4012 2456 Fan 1 RPM: 4009 2457 2458 * One fan but its stalled 2459 localhost ~ # ectool pwmgetfanrpm all 2460 Fan 0 stalled! 2461 """ 2462 cmd = 'ectool pwmgetfanrpm all' 2463 res = utils.run(cmd, ignore_status=True, 2464 stdout_tee=utils.TEE_TO_LOGS, 2465 stderr_tee=utils.TEE_TO_LOGS) 2466 rpm_data = [] 2467 for i, ln in enumerate(res.stdout.splitlines()): 2468 if ln.find('stalled') != -1: 2469 rpm_data.append(0) 2470 else: 2471 rpm_data.append(int(ln.split(':')[1])) 2472 return rpm_data 2473 return get_fan_rpm_all() 2474 2475 def save_results(self, resultsdir, fname_prefix=None): 2476 if not fname_prefix: 2477 fname_prefix = 'fan_rpm_results_%.0f' % time.time() 2478 super(FanRpmLogger, self).save_results(resultsdir, fname_prefix) 2479 2480 def calc(self, mtype='rpm'): 2481 return super(FanRpmLogger, self).calc(mtype) 2482 2483 2484class FreeMemoryLogger(MeasurementLogger): 2485 """Class to measure free memory from /proc/meminfo in KB unit.""" 2486 2487 def __init__(self, seconds_period=1.0, checkpoint_logger=None): 2488 """Initialize a FreeMemoryLogger.""" 2489 super(FreeMemoryLogger, self).__init__([], seconds_period, 2490 checkpoint_logger) 2491 self.domains = ['MemFree', 'MemAvailable'] 2492 self.refresh() 2493 2494 def refresh(self): 2495 return [ 2496 utils.read_from_meminfo('MemFree'), 2497 utils.read_from_meminfo('MemAvailable') 2498 ] 2499 2500 def save_results(self, resultsdir, fname_prefix=None): 2501 if not fname_prefix: 2502 fname_prefix = 'free_memory_results_%.0f' % time.time() 2503 super(FreeMemoryLogger, self).save_results(resultsdir, fname_prefix) 2504 2505 def calc(self, mtype='kB'): 2506 return super(FreeMemoryLogger, self).calc(mtype) 2507 2508 2509def create_measurement_loggers(seconds_period=20.0, checkpoint_logger=None): 2510 """Create loggers for power test that is not test-specific. 2511 2512 Args: 2513 seconds_period: float, probing interval in seconds. Default 20.0 2514 checkpoint_logger: CheckpointLogger class for the loggers 2515 2516 Returns: 2517 list of loggers created. 2518 """ 2519 loggers = [ 2520 PowerLogger(None, seconds_period, checkpoint_logger), 2521 TempLogger(None, seconds_period, checkpoint_logger), 2522 CPUStatsLogger(seconds_period, checkpoint_logger), 2523 FreeMemoryLogger(seconds_period, checkpoint_logger), 2524 ] 2525 if has_fan(): 2526 loggers.append(FanRpmLogger(seconds_period, checkpoint_logger)) 2527 2528 return loggers 2529 2530 2531class DiskStateLogger(threading.Thread): 2532 """Records the time percentages the disk stays in its different power modes. 2533 2534 Example code snippet: 2535 mylogger = power_status.DiskStateLogger() 2536 mylogger.start() 2537 result = mylogger.result() 2538 2539 Public methods: 2540 start: Launches the thread and starts measurements. 2541 result: Stops the thread if it's still running and returns measurements. 2542 get_error: Returns the exception in _error if it exists. 2543 2544 Private functions: 2545 _get_disk_state: Returns the disk's current ATA power mode as a string. 2546 2547 Private attributes: 2548 _seconds_period: Disk polling interval in seconds. 2549 _stats: Dict that maps disk states to seconds spent in them. 2550 _running: Flag that is True as long as the logger should keep running. 2551 _time: Timestamp of last disk state reading. 2552 _device_path: The file system path of the disk's device node. 2553 _error: Contains a TestError exception if an unexpected error occured 2554 """ 2555 def __init__(self, seconds_period = 5.0, device_path = None): 2556 """Initializes a logger. 2557 2558 Args: 2559 seconds_period: Disk polling interval in seconds. Default 5.0 2560 device_path: The path of the disk's device node. Default '/dev/sda' 2561 """ 2562 threading.Thread.__init__(self) 2563 self._seconds_period = seconds_period 2564 self._device_path = device_path 2565 self._stats = {} 2566 self._running = False 2567 self._error = None 2568 2569 result = utils.system_output('rootdev -s') 2570 # TODO(tbroch) Won't work for emmc storage and will throw this error in 2571 # keyvals : 'ioctl(SG_IO) error: [Errno 22] Invalid argument' 2572 # Lets implement something complimentary for emmc 2573 if not device_path: 2574 self._device_path = \ 2575 re.sub('(sd[a-z]|mmcblk[0-9]+)p?[0-9]+', '\\1', result) 2576 logging.debug("device_path = %s", self._device_path) 2577 2578 2579 def start(self): 2580 logging.debug("inside DiskStateLogger.start") 2581 if os.path.exists(self._device_path): 2582 logging.debug("DiskStateLogger started") 2583 super(DiskStateLogger, self).start() 2584 2585 2586 def _get_disk_state(self): 2587 """Checks the disk's power mode and returns it as a string. 2588 2589 This uses the SG_IO ioctl to issue a raw SCSI command data block with 2590 the ATA-PASS-THROUGH command that allows SCSI-to-ATA translation (see 2591 T10 document 04-262r8). The ATA command issued is CHECKPOWERMODE1, 2592 which returns the device's current power mode. 2593 """ 2594 2595 def _addressof(obj): 2596 """Shortcut to return the memory address of an object as integer.""" 2597 return ctypes.cast(obj, ctypes.c_void_p).value 2598 2599 scsi_cdb = struct.pack("12B", # SCSI command data block (uint8[12]) 2600 0xa1, # SCSI opcode: ATA-PASS-THROUGH 2601 3 << 1, # protocol: Non-data 2602 1 << 5, # flags: CK_COND 2603 0, # features 2604 0, # sector count 2605 0, 0, 0, # LBA 2606 1 << 6, # flags: ATA-USING-LBA 2607 0xe5, # ATA opcode: CHECKPOWERMODE1 2608 0, # reserved 2609 0, # control (no idea what this is...) 2610 ) 2611 scsi_sense = (ctypes.c_ubyte * 32)() # SCSI sense buffer (uint8[32]) 2612 sgio_header = struct.pack("iiBBHIPPPIIiPBBBBHHiII", # see <scsi/sg.h> 2613 83, # Interface ID magic number (int32) 2614 -1, # data transfer direction: none (int32) 2615 12, # SCSI command data block length (uint8) 2616 32, # SCSI sense data block length (uint8) 2617 0, # iovec_count (not applicable?) (uint16) 2618 0, # data transfer length (uint32) 2619 0, # data block pointer 2620 _addressof(scsi_cdb), # SCSI CDB pointer 2621 _addressof(scsi_sense), # sense buffer pointer 2622 500, # timeout in milliseconds (uint32) 2623 0, # flags (uint32) 2624 0, # pack ID (unused) (int32) 2625 0, # user data pointer (unused) 2626 0, 0, 0, 0, 0, 0, 0, 0, 0, # output params 2627 ) 2628 try: 2629 with open(self._device_path, 'r') as dev: 2630 result = fcntl.ioctl(dev, 0x2285, sgio_header) 2631 except IOError as e: 2632 raise error.TestError('ioctl(SG_IO) error: %s' % str(e)) 2633 _, _, _, _, status, host_status, driver_status = \ 2634 struct.unpack("4x4xxx2x4xPPP4x4x4xPBxxxHH4x4x4x", result) 2635 if status != 0x2: # status: CHECK_CONDITION 2636 raise error.TestError('SG_IO status: %d' % status) 2637 if host_status != 0: 2638 raise error.TestError('SG_IO host status: %d' % host_status) 2639 if driver_status != 0x8: # driver status: SENSE 2640 raise error.TestError('SG_IO driver status: %d' % driver_status) 2641 2642 if scsi_sense[0] != 0x72: # resp. code: current error, descriptor format 2643 raise error.TestError('SENSE response code: %d' % scsi_sense[0]) 2644 if scsi_sense[1] != 0: # sense key: No Sense 2645 raise error.TestError('SENSE key: %d' % scsi_sense[1]) 2646 if scsi_sense[7] < 14: # additional length (ATA status is 14 - 1 bytes) 2647 raise error.TestError('ADD. SENSE too short: %d' % scsi_sense[7]) 2648 if scsi_sense[8] != 0x9: # additional descriptor type: ATA Return Status 2649 raise error.TestError('SENSE descriptor type: %d' % scsi_sense[8]) 2650 if scsi_sense[11] != 0: # errors: none 2651 raise error.TestError('ATA error code: %d' % scsi_sense[11]) 2652 2653 if scsi_sense[13] == 0x00: 2654 return 'standby' 2655 if scsi_sense[13] == 0x80: 2656 return 'idle' 2657 if scsi_sense[13] == 0xff: 2658 return 'active' 2659 return 'unknown(%d)' % scsi_sense[13] 2660 2661 2662 def run(self): 2663 """The Thread's run method.""" 2664 try: 2665 self._time = time.time() 2666 self._running = True 2667 while(self._running): 2668 time.sleep(self._seconds_period) 2669 state = self._get_disk_state() 2670 new_time = time.time() 2671 if state in self._stats: 2672 self._stats[state] += new_time - self._time 2673 else: 2674 self._stats[state] = new_time - self._time 2675 self._time = new_time 2676 except error.TestError as e: 2677 self._error = e 2678 self._running = False 2679 2680 2681 def result(self): 2682 """Stop the logger and return dict with result percentages.""" 2683 if (self._running): 2684 self._running = False 2685 self.join(self._seconds_period * 2) 2686 return AbstractStats.to_percent(self._stats) 2687 2688 2689 def get_error(self): 2690 """Returns the _error exception... please only call after result().""" 2691 return self._error 2692 2693S0ixAmdStats = namedtuple('S0ixAmdStats',['entry','exit','residency']) 2694 2695def parse_amd_pmc_s0ix_residency_info(): 2696 """ 2697 Parses S0ix residency for AMD systems 2698 2699 @returns S0ixAmdStats 2700 @raises error.TestNAError if the debugfs file not found. 2701 """ 2702 s = [] 2703 with open('/sys/kernel/debug/amd_pmc/s0ix_stats',"r") as f: 2704 for line in f: 2705 if ':' in line: 2706 val = line.split(": ") 2707 s.append(int(val[1])) 2708 stat = S0ixAmdStats(entry=s[0], exit=s[1], residency=s[2]) 2709 return stat 2710 raise error.TestNAError('AMD S0ix residency not supported') 2711 2712def parse_pmc_s0ix_residency_info(): 2713 """ 2714 Parses S0ix residency for PMC based Intel systems 2715 (skylake/kabylake/apollolake), the debugfs paths might be 2716 different from platform to platform, yet the format is 2717 unified in microseconds. 2718 2719 @returns residency in seconds. 2720 @raises error.TestNAError if the debugfs file not found. 2721 """ 2722 info_path = None 2723 for node in ['/sys/kernel/debug/pmc_core/slp_s0_residency_usec', 2724 '/sys/kernel/debug/telemetry/s0ix_residency_usec']: 2725 if os.path.exists(node): 2726 info_path = node 2727 break 2728 if not info_path: 2729 raise error.TestNAError('S0ix residency file not found') 2730 return float(utils.read_one_line(info_path)) * 1e-6 2731 2732 2733class S0ixResidencyStats(object): 2734 """ 2735 Measures the S0ix residency of a given board over time. 2736 """ 2737 def __init__(self): 2738 if "amd" in utils.get_cpu_soc_family(): 2739 self._initial_residency = parse_amd_pmc_s0ix_residency_info() 2740 else: 2741 self._initial_residency = parse_pmc_s0ix_residency_info() 2742 2743 def get_accumulated_residency_secs(self): 2744 """ 2745 @returns S0ix Residency since the class has been initialized. 2746 """ 2747 if "amd" in utils.get_cpu_soc_family(): 2748 s0ix = parse_amd_pmc_s0ix_residency_info() 2749 if s0ix != self._initial_residency: 2750 return s0ix.residency 2751 else: 2752 return 0 2753 else: 2754 return parse_pmc_s0ix_residency_info() - self._initial_residency 2755 2756 2757class S2IdleStateStats(object): 2758 """ 2759 Usage stats of an s2idle state. 2760 """ 2761 2762 def __init__(self, usage, time): 2763 self.usage = usage 2764 self.time = time 2765 2766 2767def get_s2idle_path(cpu, state): 2768 path = os.path.join(CPU_BASE_PATH, 2769 "cpu{}/cpuidle/{}/s2idle".format(cpu, state)) 2770 if not os.path.exists(path): 2771 return None 2772 2773 return path 2774 2775 2776def get_s2idle_stats_for_state(cpu, state): 2777 """ 2778 Returns the s2idle stats for a given idle state of a CPU. 2779 """ 2780 s2idle_path = get_s2idle_path(cpu, state) 2781 2782 path = os.path.join(s2idle_path, 'usage') 2783 if not os.path.exists(path): 2784 raise error.TestFail("File not found: {}" % path) 2785 2786 usage = int(utils.read_one_line(path)) 2787 2788 path = os.path.join(s2idle_path, 'time') 2789 if not os.path.exists(path): 2790 raise error.TestFail("File not found: {}" % path) 2791 2792 time = int(utils.read_one_line(path)) 2793 2794 return S2IdleStateStats(usage, time) 2795 2796 2797def get_cpuidle_states(cpu): 2798 """ 2799 Returns the cpuidle states of a CPU. 2800 """ 2801 cpuidle_path = os.path.join(CPU_BASE_PATH, "cpu{}/cpuidle".format(cpu)) 2802 2803 pattern = os.path.join(cpuidle_path, 'state*') 2804 state_paths = glob.glob(pattern) 2805 2806 return [s.split('/')[-1] for s in state_paths] 2807 2808 2809def get_s2idle_stats_for_cpu(cpu): 2810 """ 2811 Returns the s2idle stats for a CPU. 2812 """ 2813 return {s: get_s2idle_stats_for_state(cpu, s) 2814 for s in get_cpuidle_states(cpu) 2815 if get_s2idle_path(cpu, s) is not None} 2816 2817 2818def get_s2idle_stats(): 2819 """ 2820 Returns the s2idle stats for all CPUs. 2821 """ 2822 return {cpu: get_s2idle_stats_for_cpu(cpu) for cpu in get_online_cpus()} 2823 2824 2825def get_s2idle_residency_total_usecs(): 2826 """ 2827 Get total s2idle residency time for all CPUs and states. 2828 """ 2829 total_usecs = 0 2830 2831 all_stats = get_s2idle_stats() 2832 for stats in all_stats.values(): 2833 for st in stats.values(): 2834 total_usecs += st.time 2835 2836 return total_usecs 2837 2838 2839class S2IdleResidencyStats(object): 2840 """ 2841 Measures the s2idle residency of a given board over time. 2842 """ 2843 2844 def __init__(self): 2845 self._initial_residency = get_s2idle_residency_total_usecs() 2846 2847 def get_accumulated_residency_usecs(self): 2848 """ 2849 @returns s2idle residency since the class has been initialized. 2850 """ 2851 return get_s2idle_residency_total_usecs() - self._initial_residency 2852 2853 2854class DMCFirmwareStats(object): 2855 """ 2856 Collect DMC firmware stats of Intel based system (SKL+), (APL+). 2857 """ 2858 # Intel CPUs only transition to DC6 from DC5. https://git.io/vppcG 2859 DC6_ENTRY_KEY = 'DC5 -> DC6 count' 2860 2861 def __init__(self): 2862 self._initial_stat = DMCFirmwareStats._parse_dmc_info() 2863 2864 def check_fw_loaded(self): 2865 """Check that DMC firmware is loaded 2866 2867 @returns boolean of DMC firmware loaded. 2868 """ 2869 return self._initial_stat['fw loaded'] 2870 2871 def is_dc6_supported(self): 2872 """Check that DMC support DC6 state.""" 2873 return self.DC6_ENTRY_KEY in self._initial_stat 2874 2875 def get_accumulated_dc6_entry(self): 2876 """Check number of DC6 state entry since the class has been initialized. 2877 2878 @returns number of DC6 state entry. 2879 """ 2880 if not self.is_dc6_supported(): 2881 return 0 2882 2883 key = self.DC6_ENTRY_KEY 2884 current_stat = DMCFirmwareStats._parse_dmc_info() 2885 return current_stat[key] - self._initial_stat[key] 2886 2887 @staticmethod 2888 def _parse_dmc_info(): 2889 """ 2890 Parses DMC firmware info for Intel based systems. 2891 2892 @returns dictionary of dmc_fw info 2893 @raises error.TestFail if the debugfs file not found. 2894 """ 2895 path = '/sys/kernel/debug/dri/0/i915_dmc_info' 2896 if not os.path.exists(path): 2897 raise error.TestFail('DMC info file not found.') 2898 2899 with open(path, 'r') as f: 2900 lines = [line.strip() for line in f.readlines()] 2901 2902 # For pre 4.16 kernel. https://git.io/vhThb 2903 if lines[0] == 'not supported': 2904 raise error.TestFail('DMC not supported.') 2905 2906 ret = dict() 2907 for line in lines: 2908 key, val = line.rsplit(': ', 1) 2909 2910 if key == 'fw loaded': 2911 val = val == 'yes' 2912 elif re.match(r'DC\d -> DC\d count', key): 2913 val = int(val) 2914 ret[key] = val 2915 return ret 2916 2917 2918class RC6ResidencyStats(object): 2919 """ 2920 Collect RC6 residency stats of Intel based system. 2921 """ 2922 def __init__(self): 2923 self._rc6_enable_checked = False 2924 self._previous_stat = self._parse_rc6_residency_info() 2925 self._accumulated_stat = 0 2926 2927 # Setup max RC6 residency count for modern chips. The table order 2928 # is in small/big-core first, follows by the uarch name. We don't 2929 # need to deal with the wraparound for devices with v4.17+ kernel 2930 # which has the commit 817cc0791823 ("drm/i915: Handle RC6 counter wrap"). 2931 cpu_uarch = utils.get_intel_cpu_uarch() 2932 self._max_counter = { 2933 # Small-core w/ GEN9 LP graphics 2934 'Airmont': 3579125, 2935 'Goldmont': 3579125, 2936 # Big-core 2937 'Broadwell': 5497558, 2938 'Haswell': 5497558, 2939 'Kaby Lake': 5497558, 2940 'Skylake': 5497558, 2941 }.get(cpu_uarch, None) 2942 2943 def get_accumulated_residency_msecs(self): 2944 """Check number of RC6 state entry since the class has been initialized. 2945 2946 @returns int of RC6 residency in milliseconds since instantiation. 2947 """ 2948 current_stat = self._parse_rc6_residency_info() 2949 2950 # The problem here is that we cannot assume the rc6_residency_ms is 2951 # monotonically increasing by current kernel i915 implementation. 2952 # 2953 # Considering different hardware has different wraparound period, 2954 # this is a mitigation plan to deal with different wraparound period 2955 # on various platforms, in order to make the test platform agnostic. 2956 # 2957 # This scarifes the accuracy of RC6 residency a bit, up on the calling 2958 # period. 2959 # 2960 # Reference: Bug 94852 - [SKL] rc6_residency_ms unreliable 2961 # (https://bugs.freedesktop.org/show_bug.cgi?id=94852) 2962 # 2963 # However for modern processors with a known overflow count, apply 2964 # constant of RC6 max counter to improve accuracy. 2965 # 2966 # Note that the max counter is bound for sysfs overflow, while the 2967 # accumulated residency here is the diff against the first reading. 2968 if current_stat < self._previous_stat: 2969 if self._max_counter is None: 2970 logging.warning('GPU: Detect rc6_residency_ms wraparound') 2971 self._accumulated_stat += current_stat 2972 else: 2973 self._accumulated_stat += current_stat + (self._max_counter - 2974 self._previous_stat) 2975 else: 2976 self._accumulated_stat += current_stat - self._previous_stat 2977 2978 self._previous_stat = current_stat 2979 return self._accumulated_stat 2980 2981 def _is_rc6_enable(self): 2982 """ 2983 Verified that RC6 is enable. 2984 2985 @returns Boolean of RC6 enable status. 2986 @raises error.TestFail if the sysfs file not found. 2987 """ 2988 path = '/sys/class/drm/card0/power/rc6_enable' 2989 if not os.path.exists(path): 2990 raise error.TestFail('RC6 enable file not found.') 2991 2992 return (int(utils.read_one_line(path)) & 0x1) == 0x1 2993 2994 def _parse_rc6_residency_info(self): 2995 """ 2996 Parses RC6 residency info for Intel based systems. 2997 2998 @returns int of RC6 residency in millisec since boot. 2999 @raises error.TestFail if the sysfs file not found or RC6 not enabled. 3000 """ 3001 if not self._rc6_enable_checked: 3002 if not self._is_rc6_enable(): 3003 raise error.TestFail('RC6 is not enabled.') 3004 self._rc6_enable_checked = True 3005 3006 path = '/sys/class/drm/card0/power/rc6_residency_ms' 3007 if not os.path.exists(path): 3008 raise error.TestFail('RC6 residency file not found.') 3009 3010 return int(utils.read_one_line(path)) 3011 3012 3013class PCHPowergatingStats(object): 3014 """ 3015 Collect PCH powergating status of intel based system. 3016 """ 3017 PMC_CORE_PATH = '/sys/kernel/debug/pmc_core/pch_ip_power_gating_status' 3018 TELEMETRY_PATH = '/sys/kernel/debug/telemetry/soc_states' 3019 3020 def __init__(self): 3021 self._stat = {} 3022 3023 def check_s0ix_requirement(self): 3024 """ 3025 Check PCH powergating status with S0ix requirement. 3026 3027 @returns list of PCH IP block name that need to be powergated for low 3028 power consumption S0ix, empty list if none. 3029 """ 3030 # PCH IP block that is on for S0ix. Ignore these IP block. 3031 S0IX_ALLOWLIST = set([ 3032 'PMC', 'OPI-DMI', 'SPI / eSPI', 'XHCI', 'xHCI', 'FUSE', 'Fuse', 3033 'PCIE0', 'NPKVRC', 'NPKVNN', 'NPK_VNN', 'PSF1', 'PSF2', 'PSF3', 3034 'PSF4', 'SBR0', 'SBR1', 'SBR2', 'SBR4', 'SBR5', 'SBR6', 'SBR7']) 3035 3036 # PCH IP block that is on/off for S0ix depend on features enabled. 3037 # Add log when these IPs state are on. 3038 S0IX_WARNLIST = set([ 3039 'HDA-PGD0', 'HDA-PGD1', 'HDA-PGD2', 'HDA-PGD3', 'LPSS', 3040 'AVSPGD1', 'AVSPGD4']) 3041 3042 # CNV device has 0x31dc as devid . 3043 if len(utils.system_output('lspci -d :31dc')) > 0: 3044 S0IX_ALLOWLIST.add('CNV') 3045 3046 # HrP2 device has 0x02f0(CML) or 0x4df0(JSL) as devid. 3047 if (len(utils.system_output('lspci -d :02f0')) > 0 or 3048 len(utils.system_output('lspci -d :4df0')) > 0): 3049 S0IX_ALLOWLIST.update(['CNVI', 'NPK_AON']) 3050 3051 on_ip = set(ip['name'] for ip in self._stat if ip['state']) 3052 on_ip -= S0IX_ALLOWLIST 3053 3054 if on_ip: 3055 on_ip_in_warn_list = on_ip & S0IX_WARNLIST 3056 if on_ip_in_warn_list: 3057 logging.warning('Found PCH IP that may be able to powergate: %s', 3058 ', '.join(on_ip_in_warn_list)) 3059 on_ip -= S0IX_WARNLIST 3060 3061 if on_ip: 3062 logging.error('Found PCH IP that need to powergate: %s', 3063 ', '.join(on_ip)) 3064 return on_ip 3065 return [] 3066 3067 def read_pch_powergating_info(self, sleep_seconds=1): 3068 """ 3069 Read PCH powergating status info for Intel based systems. 3070 3071 Intel currently shows powergating status in 2 different place in debugfs 3072 depend on which CPU platform. 3073 3074 @param sleep_seconds: sleep time to make DUT idle before read the data. 3075 3076 @raises error.TestFail if the debugfs file not found or parsing error. 3077 """ 3078 if os.path.exists(self.PMC_CORE_PATH): 3079 logging.info('Use PCH powergating info at %s', self.PMC_CORE_PATH) 3080 time.sleep(sleep_seconds) 3081 self._read_pcm_core_powergating_info() 3082 return 3083 3084 if os.path.exists(self.TELEMETRY_PATH): 3085 logging.info('Use PCH powergating info at %s', self.TELEMETRY_PATH) 3086 time.sleep(sleep_seconds) 3087 self._read_telemetry_powergating_info() 3088 return 3089 3090 raise error.TestFail('PCH powergating info file not found.') 3091 3092 def _read_pcm_core_powergating_info(self): 3093 """ 3094 read_pch_powergating_info() for Intel Core KBL+ 3095 3096 @raises error.TestFail if parsing error. 3097 """ 3098 with open(self.PMC_CORE_PATH, 'r') as f: 3099 lines = [line.strip() for line in f.readlines()] 3100 3101 # Example pattern to match: 3102 # PCH IP: 0 - PMC State: On 3103 # PCH IP: 1 - SATA State: Off 3104 pattern = r'PCH IP:\s+(?P<id>\d+)\s+' \ 3105 r'- (?P<name>.*\w)\s+' \ 3106 r'State: (?P<state>Off|On)' 3107 matcher = re.compile(pattern) 3108 ret = [] 3109 for i, line in enumerate(lines): 3110 match = matcher.match(line) 3111 if not match: 3112 raise error.TestFail('Can not parse PCH powergating info: ', 3113 line) 3114 3115 index = int(match.group('id')) 3116 if i != index: 3117 raise error.TestFail('Wrong index for PCH powergating info: ', 3118 line) 3119 3120 name = match.group('name') 3121 state = match.group('state') == 'On' 3122 3123 ret.append({'name': name, 'state': state}) 3124 self._stat = ret 3125 3126 def _read_telemetry_powergating_info(self): 3127 """ 3128 read_pch_powergating_info() for Intel Atom APL+ 3129 3130 @raises error.TestFail if parsing error. 3131 """ 3132 with open(self.TELEMETRY_PATH, 'r') as f: 3133 raw_str = f.read() 3134 3135 # Example pattern to match: 3136 # -------------------------------------- 3137 # South Complex PowerGate Status 3138 # -------------------------------------- 3139 # Device PG 3140 # LPSS 1 3141 # SPI 1 3142 # FUSE 0 3143 # 3144 # --------------------------------------- 3145 trimed_pattern = r'.*South Complex PowerGate Status\n' \ 3146 r'-+\n' \ 3147 r'Device\s+PG\n' \ 3148 r'(?P<trimmed_section>(\w+\s+[0|1]\n)+)' \ 3149 r'\n-+\n.*' 3150 trimed_match = re.match(trimed_pattern, raw_str, re.DOTALL) 3151 if not trimed_match: 3152 raise error.TestFail('Can not parse PCH powergating info: ', 3153 raw_str) 3154 trimmed_str = trimed_match.group('trimmed_section').strip() 3155 lines = [line.strip() for line in trimmed_str.split('\n')] 3156 3157 matcher = re.compile(r'(?P<name>\w+)\s+(?P<state>[0|1])') 3158 ret = [] 3159 for line in lines: 3160 match = matcher.match(line) 3161 if not match: 3162 raise error.TestFail('Can not parse PCH powergating info: %s', 3163 line) 3164 3165 name = match.group('name') 3166 state = match.group('state') == '0' # 0 means on and 1 means off 3167 3168 ret.append({'name': name, 'state': state}) 3169 self._stat = ret 3170 3171def has_rc6_support(): 3172 """ 3173 Helper to examine that RC6 is enabled with residency counter. 3174 3175 @returns Boolean of RC6 support status. 3176 """ 3177 enable_path = '/sys/class/drm/card0/power/rc6_enable' 3178 residency_path = '/sys/class/drm/card0/power/rc6_residency_ms' 3179 3180 has_rc6_enabled = os.path.exists(enable_path) 3181 has_rc6_residency = False 3182 rc6_enable_mask = 0 3183 3184 if has_rc6_enabled: 3185 # TODO (harry.pan): Some old chip has RC6P and RC6PP 3186 # in the bits[1:2]; in case of that, ideally these time 3187 # slice will fall into RC0, fix it up if required. 3188 rc6_enable_mask = int(utils.read_one_line(enable_path)) 3189 has_rc6_enabled &= (rc6_enable_mask) & 0x1 == 0x1 3190 has_rc6_residency = os.path.exists(residency_path) 3191 3192 logging.debug("GPU: RC6 residency support: %s, mask: 0x%x", 3193 {True: "yes", False: "no"} [has_rc6_enabled and has_rc6_residency], 3194 rc6_enable_mask) 3195 3196 return (has_rc6_enabled and has_rc6_residency) 3197 3198class GPURC6Stats(AbstractStats): 3199 """ 3200 GPU RC6 statistics to give ratio of RC6 and RC0 residency 3201 3202 Protected Attributes: 3203 _rc6: object of RC6ResidencyStats 3204 """ 3205 def __init__(self): 3206 self._rc6 = RC6ResidencyStats() 3207 super(GPURC6Stats, self).__init__(name='gpuidle') 3208 3209 def _read_stats(self): 3210 total = int(time.time() * 1000) 3211 msecs = self._rc6.get_accumulated_residency_msecs() 3212 stats = collections.defaultdict(int) 3213 stats['RC6'] += msecs 3214 stats['RC0'] += total - msecs 3215 logging.debug("GPU: RC6 residency: %d ms", msecs) 3216 return stats 3217