1# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5import collections 6import contextlib 7import ctypes 8import fcntl 9import glob 10import itertools 11import json 12import logging 13import math 14import numpy 15import os 16import re 17import struct 18import threading 19import time 20 21from autotest_lib.client.bin import utils 22from autotest_lib.client.common_lib import error, enum 23from autotest_lib.client.cros import kernel_trace 24from autotest_lib.client.cros.power import power_utils 25 26BatteryDataReportType = enum.Enum('CHARGE', 'ENERGY') 27 28# battery data reported at 1e6 scale 29BATTERY_DATA_SCALE = 1e6 30# number of times to retry reading the battery in the case of bad data 31BATTERY_RETRY_COUNT = 3 32# default filename when saving CheckpointLogger data to file 33CHECKPOINT_LOG_DEFAULT_FNAME = 'checkpoint_log.json' 34 35 36class DevStat(object): 37 """ 38 Device power status. This class implements generic status initialization 39 and parsing routines. 40 """ 41 42 def __init__(self, fields, path=None): 43 self.fields = fields 44 self.path = path 45 46 47 def reset_fields(self): 48 """ 49 Reset all class fields to None to mark their status as unknown. 50 """ 51 for field in self.fields.iterkeys(): 52 setattr(self, field, None) 53 54 55 def read_val(self, file_name, field_type): 56 """Read a value from file. 57 """ 58 try: 59 path = file_name 60 if not file_name.startswith('/'): 61 path = os.path.join(self.path, file_name) 62 f = open(path, 'r') 63 out = f.readline().rstrip('\n') 64 val = field_type(out) 65 return val 66 67 except: 68 return field_type(0) 69 70 71 def read_all_vals(self): 72 """Read all values. 73 """ 74 for field, prop in self.fields.iteritems(): 75 if prop[0]: 76 val = self.read_val(prop[0], prop[1]) 77 setattr(self, field, val) 78 79 def update(self): 80 """Update the DevStat. 81 82 Need to implement in subclass. 83 """ 84 pass 85 86class ThermalStatACPI(DevStat): 87 """ 88 ACPI-based thermal status. 89 90 Fields: 91 (All temperatures are in millidegrees Celsius.) 92 93 str enabled: Whether thermal zone is enabled 94 int temp: Current temperature 95 str type: Thermal zone type 96 int num_trip_points: Number of thermal trip points that activate 97 cooling devices 98 int num_points_tripped: Temperature is above this many trip points 99 str trip_point_N_type: Trip point #N's type 100 int trip_point_N_temp: Trip point #N's temperature value 101 int cdevX_trip_point: Trip point o cooling device #X (index) 102 """ 103 104 MAX_TRIP_POINTS = 20 105 106 thermal_fields = { 107 'enabled': ['enabled', str], 108 'temp': ['temp', int], 109 'type': ['type', str], 110 'num_points_tripped': ['', ''] 111 } 112 path = '/sys/class/thermal/thermal_zone*' 113 114 def __init__(self, path=None): 115 # Browse the thermal folder for trip point fields. 116 self.num_trip_points = 0 117 118 thermal_fields = glob.glob(path + '/*') 119 for file in thermal_fields: 120 field = file[len(path + '/'):] 121 if field.find('trip_point') != -1: 122 if field.find('temp'): 123 field_type = int 124 else: 125 field_type = str 126 self.thermal_fields[field] = [field, field_type] 127 128 # Count the number of trip points. 129 if field.find('_type') != -1: 130 self.num_trip_points += 1 131 132 super(ThermalStatACPI, self).__init__(self.thermal_fields, path) 133 self.update() 134 135 def update(self): 136 if not os.path.exists(self.path): 137 return 138 139 self.read_all_vals() 140 self.num_points_tripped = 0 141 142 for field in self.thermal_fields: 143 if field.find('trip_point_') != -1 and field.find('_temp') != -1 \ 144 and self.temp > self.read_val(field, int): 145 self.num_points_tripped += 1 146 logging.info('Temperature trip point #' + \ 147 field[len('trip_point_'):field.rfind('_temp')] + \ 148 ' tripped.') 149 150 151class ThermalStatHwmon(DevStat): 152 """ 153 hwmon-based thermal status. 154 155 Fields: 156 int <tname>_temp<num>_input: Current temperature in millidegrees Celsius 157 where: 158 <tname> : name of hwmon device in sysfs 159 <num> : number of temp as some hwmon devices have multiple 160 161 """ 162 path = '/sys/class/hwmon' 163 164 thermal_fields = {} 165 def __init__(self, rootpath=None): 166 if not rootpath: 167 rootpath = self.path 168 for subpath1 in glob.glob('%s/hwmon*' % rootpath): 169 for subpath2 in ['','device/']: 170 gpaths = glob.glob("%s/%stemp*_input" % (subpath1, subpath2)) 171 for gpath in gpaths: 172 bname = os.path.basename(gpath) 173 field_path = os.path.join(subpath1, subpath2, bname) 174 175 tname_path = os.path.join(os.path.dirname(gpath), "name") 176 tname = utils.read_one_line(tname_path) 177 178 field_key = "%s_%s" % (tname, bname) 179 self.thermal_fields[field_key] = [field_path, int] 180 181 super(ThermalStatHwmon, self).__init__(self.thermal_fields, rootpath) 182 self.update() 183 184 def update(self): 185 if not os.path.exists(self.path): 186 return 187 188 self.read_all_vals() 189 190 def read_val(self, file_name, field_type): 191 try: 192 path = os.path.join(self.path, file_name) 193 f = open(path, 'r') 194 out = f.readline() 195 return field_type(out) 196 except: 197 return field_type(0) 198 199 200class ThermalStat(object): 201 """helper class to instantiate various thermal devices.""" 202 def __init__(self): 203 self._thermals = [] 204 self.min_temp = 999999999 205 self.max_temp = -999999999 206 207 thermal_stat_types = [(ThermalStatHwmon.path, ThermalStatHwmon), 208 (ThermalStatACPI.path, ThermalStatACPI)] 209 for thermal_glob_path, thermal_type in thermal_stat_types: 210 try: 211 thermal_path = glob.glob(thermal_glob_path)[0] 212 logging.debug('Using %s for thermal info.', thermal_path) 213 self._thermals.append(thermal_type(thermal_path)) 214 except: 215 logging.debug('Could not find thermal path %s, skipping.', 216 thermal_glob_path) 217 218 219 def get_temps(self): 220 """Get temperature readings. 221 222 Returns: 223 string of temperature readings. 224 """ 225 temp_str = '' 226 for thermal in self._thermals: 227 thermal.update() 228 for kname in thermal.fields: 229 if kname is 'temp' or kname.endswith('_input'): 230 val = getattr(thermal, kname) 231 temp_str += '%s:%d ' % (kname, val) 232 if val > self.max_temp: 233 self.max_temp = val 234 if val < self.min_temp: 235 self.min_temp = val 236 237 238 return temp_str 239 240 241class BatteryStat(DevStat): 242 """ 243 Battery status. 244 245 Fields: 246 247 float charge_full: Last full capacity reached [Ah] 248 float charge_full_design: Full capacity by design [Ah] 249 float charge_now: Remaining charge [Ah] 250 float current_now: Battery discharge rate [A] 251 float energy: Current battery charge [Wh] 252 float energy_full: Last full capacity reached [Wh] 253 float energy_full_design: Full capacity by design [Wh] 254 float energy_rate: Battery discharge rate [W] 255 float power_now: Battery discharge rate [W] 256 float remaining_time: Remaining discharging time [h] 257 float voltage_min_design: Minimum voltage by design [V] 258 float voltage_max_design: Maximum voltage by design [V] 259 float voltage_now: Voltage now [V] 260 """ 261 262 battery_fields = { 263 'status': ['status', str], 264 'charge_full': ['charge_full', float], 265 'charge_full_design': ['charge_full_design', float], 266 'charge_now': ['charge_now', float], 267 'current_now': ['current_now', float], 268 'voltage_min_design': ['voltage_min_design', float], 269 'voltage_max_design': ['voltage_max_design', float], 270 'voltage_now': ['voltage_now', float], 271 'energy': ['energy_now', float], 272 'energy_full': ['energy_full', float], 273 'energy_full_design': ['energy_full_design', float], 274 'power_now': ['power_now', float], 275 'present': ['present', int], 276 'energy_rate': ['', ''], 277 'remaining_time': ['', ''] 278 } 279 280 def __init__(self, path=None): 281 super(BatteryStat, self).__init__(self.battery_fields, path) 282 self.update() 283 284 285 def update(self): 286 for _ in xrange(BATTERY_RETRY_COUNT): 287 try: 288 self._read_battery() 289 return 290 except error.TestError as e: 291 logging.warn(e) 292 for field, prop in self.battery_fields.iteritems(): 293 logging.warn(field + ': ' + repr(getattr(self, field))) 294 continue 295 raise error.TestError('Failed to read battery state') 296 297 298 def _read_battery(self): 299 self.read_all_vals() 300 301 if self.charge_full == 0 and self.energy_full != 0: 302 battery_type = BatteryDataReportType.ENERGY 303 else: 304 battery_type = BatteryDataReportType.CHARGE 305 306 if self.voltage_min_design != 0: 307 voltage_nominal = self.voltage_min_design 308 else: 309 voltage_nominal = self.voltage_now 310 311 if voltage_nominal == 0: 312 raise error.TestError('Failed to determine battery voltage') 313 314 # Since charge data is present, calculate parameters based upon 315 # reported charge data. 316 if battery_type == BatteryDataReportType.CHARGE: 317 self.charge_full = self.charge_full / BATTERY_DATA_SCALE 318 self.charge_full_design = self.charge_full_design / \ 319 BATTERY_DATA_SCALE 320 self.charge_now = self.charge_now / BATTERY_DATA_SCALE 321 322 self.current_now = math.fabs(self.current_now) / \ 323 BATTERY_DATA_SCALE 324 325 self.energy = voltage_nominal * \ 326 self.charge_now / \ 327 BATTERY_DATA_SCALE 328 self.energy_full = voltage_nominal * \ 329 self.charge_full / \ 330 BATTERY_DATA_SCALE 331 self.energy_full_design = voltage_nominal * \ 332 self.charge_full_design / \ 333 BATTERY_DATA_SCALE 334 335 # Charge data not present, so calculate parameters based upon 336 # reported energy data. 337 elif battery_type == BatteryDataReportType.ENERGY: 338 self.charge_full = self.energy_full / voltage_nominal 339 self.charge_full_design = self.energy_full_design / \ 340 voltage_nominal 341 self.charge_now = self.energy / voltage_nominal 342 343 # TODO(shawnn): check if power_now can really be reported 344 # as negative, in the same way current_now can 345 self.current_now = math.fabs(self.power_now) / \ 346 voltage_nominal 347 348 self.energy = self.energy / BATTERY_DATA_SCALE 349 self.energy_full = self.energy_full / BATTERY_DATA_SCALE 350 self.energy_full_design = self.energy_full_design / \ 351 BATTERY_DATA_SCALE 352 353 self.voltage_min_design = self.voltage_min_design / \ 354 BATTERY_DATA_SCALE 355 self.voltage_max_design = self.voltage_max_design / \ 356 BATTERY_DATA_SCALE 357 self.voltage_now = self.voltage_now / \ 358 BATTERY_DATA_SCALE 359 voltage_nominal = voltage_nominal / \ 360 BATTERY_DATA_SCALE 361 362 self.energy_rate = self.voltage_now * self.current_now 363 364 self.remaining_time = 0 365 if self.current_now and self.energy_rate: 366 self.remaining_time = self.energy / self.energy_rate 367 368 if self.charge_full > (self.charge_full_design * 1.5): 369 raise error.TestError('Unreasonable charge_full value') 370 if self.charge_now > (self.charge_full_design * 1.5): 371 raise error.TestError('Unreasonable charge_now value') 372 373 374class LineStatDummy(DevStat): 375 """ 376 Dummy line stat for devices which don't provide power_supply related sysfs 377 interface. 378 """ 379 def __init__(self): 380 self.online = True 381 382 383 def update(self): 384 pass 385 386class LineStat(DevStat): 387 """ 388 Power line status. 389 390 Fields: 391 392 bool online: Line power online 393 """ 394 395 linepower_fields = { 396 'is_online': ['online', int], 397 'status': ['status', str] 398 } 399 400 401 def __init__(self, path=None): 402 super(LineStat, self).__init__(self.linepower_fields, path) 403 logging.debug("line path: %s", path) 404 self.update() 405 406 407 def update(self): 408 self.read_all_vals() 409 self.online = self.is_online == 1 410 411 412class SysStat(object): 413 """ 414 System power status for a given host. 415 416 Fields: 417 418 battery: A list of BatteryStat objects. 419 linepower: A list of LineStat objects. 420 """ 421 psu_types = ['Mains', 'USB', 'USB_ACA', 'USB_C', 'USB_CDP', 'USB_DCP', 422 'USB_PD', 'USB_PD_DRP', 'Unknown'] 423 424 def __init__(self): 425 power_supply_path = '/sys/class/power_supply/*' 426 self.battery = None 427 self.linepower = [] 428 self.thermal = None 429 self.battery_path = None 430 self.linepower_path = [] 431 432 power_supplies = glob.glob(power_supply_path) 433 for path in power_supplies: 434 type_path = os.path.join(path,'type') 435 if not os.path.exists(type_path): 436 continue 437 power_type = utils.read_one_line(type_path) 438 if power_type == 'Battery': 439 scope_path = os.path.join(path,'scope') 440 if (os.path.exists(scope_path) and 441 utils.read_one_line(scope_path) == 'Device'): 442 continue 443 self.battery_path = path 444 elif power_type in self.psu_types: 445 self.linepower_path.append(path) 446 447 if not self.battery_path or not self.linepower_path: 448 logging.warning("System does not provide power sysfs interface") 449 450 self.thermal = ThermalStat() 451 452 453 def refresh(self): 454 """ 455 Initialize device power status objects. 456 """ 457 self.linepower = [] 458 459 if self.battery_path: 460 self.battery = [ BatteryStat(self.battery_path) ] 461 462 for path in self.linepower_path: 463 self.linepower.append(LineStat(path)) 464 if not self.linepower: 465 self.linepower = [ LineStatDummy() ] 466 467 temp_str = self.thermal.get_temps() 468 if temp_str: 469 logging.info('Temperature reading: ' + temp_str) 470 else: 471 logging.error('Could not read temperature, skipping.') 472 473 474 def on_ac(self): 475 """ 476 Returns true if device is currently running from AC power. 477 """ 478 on_ac = False 479 for linepower in self.linepower: 480 on_ac |= linepower.online 481 482 # Butterfly can incorrectly report AC online for some time after 483 # unplug. Check battery discharge state to confirm. 484 if utils.get_board() == 'butterfly': 485 on_ac &= (not self.battery_discharging()) 486 return on_ac 487 488 489 def ac_charging(self): 490 """ 491 Returns true if device is currently charging from AC power. 492 """ 493 charging = False 494 for linepower in self.linepower: 495 charging |= (linepower.status == 'Charging') 496 return charging 497 498 499 def battery_discharging(self): 500 """ 501 Returns true if battery is currently discharging or false otherwise. 502 """ 503 if not self.battery_path: 504 logging.warn('Unable to determine battery discharge status') 505 return False 506 507 return(self.battery[0].status.rstrip() == 'Discharging') 508 509 510 def battery_discharge_ok_on_ac(self): 511 """Returns True if battery is ok to discharge on AC presently. 512 513 some devices cycle between charge & discharge above a certain 514 SoC. If AC is charging and SoC > 95% we can safely assume that. 515 """ 516 return self.ac_charging() and (self.percent_current_charge() > 95) 517 518 519 def percent_current_charge(self): 520 """Returns current charge compare to design capacity in percent. 521 """ 522 return self.battery[0].charge_now * 100 / \ 523 self.battery[0].charge_full_design 524 525 526 def assert_battery_state(self, percent_initial_charge_min): 527 """Check initial power configuration state is battery. 528 529 Args: 530 percent_initial_charge_min: float between 0 -> 1.00 of 531 percentage of battery that must be remaining. 532 None|0|False means check not performed. 533 534 Raises: 535 TestError: if one of battery assertions fails 536 """ 537 if self.on_ac(): 538 raise error.TestError( 539 'Running on AC power. Please remove AC power cable.') 540 541 percent_initial_charge = self.percent_current_charge() 542 543 if percent_initial_charge_min and percent_initial_charge < \ 544 percent_initial_charge_min: 545 raise error.TestError('Initial charge (%f) less than min (%f)' 546 % (percent_initial_charge, percent_initial_charge_min)) 547 548 549def get_status(): 550 """ 551 Return a new power status object (SysStat). A new power status snapshot 552 for a given host can be obtained by either calling this routine again and 553 constructing a new SysStat object, or by using the refresh method of the 554 SysStat object. 555 """ 556 status = SysStat() 557 status.refresh() 558 return status 559 560 561class AbstractStats(object): 562 """ 563 Common superclass for measurements of percentages per state over time. 564 565 Public Attributes: 566 incremental: If False, stats returned are from a single 567 _read_stats. Otherwise, stats are from the difference between 568 the current and last refresh. 569 """ 570 571 @staticmethod 572 def to_percent(stats): 573 """ 574 Turns a dict with absolute time values into a dict with percentages. 575 """ 576 total = sum(stats.itervalues()) 577 if total == 0: 578 return {k: 0 for k in stats} 579 return dict((k, v * 100.0 / total) for (k, v) in stats.iteritems()) 580 581 582 @staticmethod 583 def do_diff(new, old): 584 """ 585 Returns a dict with value deltas from two dicts with matching keys. 586 """ 587 return dict((k, new[k] - old.get(k, 0)) for k in new.iterkeys()) 588 589 590 @staticmethod 591 def format_results_percent(results, name, percent_stats): 592 """ 593 Formats autotest result keys to format: 594 percent_<name>_<key>_time 595 """ 596 for key in percent_stats: 597 results['percent_%s_%s_time' % (name, key)] = percent_stats[key] 598 599 600 @staticmethod 601 def format_results_wavg(results, name, wavg): 602 """ 603 Add an autotest result keys to format: wavg_<name> 604 """ 605 if wavg is not None: 606 results['wavg_%s' % (name)] = wavg 607 608 609 def __init__(self, name=None, incremental=True): 610 if not name: 611 error.TestFail("Need to name AbstractStats instance please.") 612 self.name = name 613 self.incremental = incremental 614 self._stats = self._read_stats() 615 616 617 def refresh(self): 618 """ 619 Returns dict mapping state names to percentage of time spent in them. 620 """ 621 raw_stats = result = self._read_stats() 622 if self.incremental: 623 result = self.do_diff(result, self._stats) 624 self._stats = raw_stats 625 return self.to_percent(result) 626 627 628 def _automatic_weighted_average(self): 629 """ 630 Turns a dict with absolute times (or percentages) into a weighted 631 average value. 632 """ 633 total = sum(self._stats.itervalues()) 634 if total == 0: 635 return None 636 637 return sum((float(k)*v) / total for (k, v) in self._stats.iteritems()) 638 639 640 def _supports_automatic_weighted_average(self): 641 """ 642 Override! 643 644 Returns True if stats collected can be automatically converted from 645 percent distribution to weighted average. False otherwise. 646 """ 647 return False 648 649 650 def weighted_average(self): 651 """ 652 Return weighted average calculated using the automated average method 653 (if supported) or using a custom method defined by the stat. 654 """ 655 if self._supports_automatic_weighted_average(): 656 return self._automatic_weighted_average() 657 658 return self._weighted_avg_fn() 659 660 661 def _weighted_avg_fn(self): 662 """ 663 Override! Custom weighted average function. 664 665 Returns weighted average as a single floating point value. 666 """ 667 return None 668 669 670 def _read_stats(self): 671 """ 672 Override! Reads the raw data values that shall be measured into a dict. 673 """ 674 raise NotImplementedError('Override _read_stats in the subclass!') 675 676 677CPU_BASE_PATH = '/sys/devices/system/cpu/' 678 679def get_all_cpus(): 680 """ 681 Retrieve all numbers of 'cpu\d+' files under |CPU_BASE_PATH|. 682 """ 683 cpu_entry_re = re.compile(r'cpu(\d+)') 684 cpus = [] 685 for f in os.listdir(CPU_BASE_PATH): 686 match = cpu_entry_re.match(f) 687 if match: 688 cpus.append(int(match.groups()[0])) 689 return frozenset(cpus) 690 691def get_cpus_filepaths_for_suffix(cpus, suffix): 692 """ 693 For each cpu in |cpus| check whether |CPU_BASE_PATH|/cpu%d/|suffix| exists. 694 Return tuple of two lists t: 695 t[0]: all cpu ids where the condition above holds 696 t[1]: all full paths where condition above holds. 697 """ 698 available_cpus = [] 699 available_paths = [] 700 for c in cpus: 701 c_file_path = os.path.join(CPU_BASE_PATH, 'cpu%d' % c, suffix) 702 if os.path.exists(c_file_path): 703 available_cpus.append(c) 704 available_paths.append(c_file_path) 705 return (available_cpus, available_paths) 706 707class CPUFreqStats(AbstractStats): 708 """ 709 CPU Frequency statistics 710 """ 711 MSR_PLATFORM_INFO = 0xce 712 MSR_IA32_MPERF = 0xe7 713 MSR_IA32_APERF = 0xe8 714 715 def __init__(self, cpus=None): 716 name = 'cpufreq' 717 stats_suffix = 'cpufreq/stats/time_in_state' 718 key_suffix = 'cpufreq/scaling_available_frequencies' 719 intel_pstate_msr_path = '/dev/cpu/*/msr' 720 all_cpus = get_all_cpus() 721 if not cpus: 722 cpus = all_cpus 723 cpus, self._file_paths = get_cpus_filepaths_for_suffix(cpus, 724 stats_suffix) 725 if len(cpus) and len(cpus) < len(all_cpus): 726 name = '%s_%s' % (name, '_'.join([str(c) for c in cpus])) 727 _, cpufreq_key_paths = get_cpus_filepaths_for_suffix(cpus, key_suffix) 728 intel_pstate_msr_paths = glob.glob(intel_pstate_msr_path) 729 self._running_intel_pstate = False 730 self._initial_perf = None 731 self._current_perf = None 732 self._max_freq = 0 733 if not self._file_paths: 734 logging.debug('time_in_state file not found') 735 if intel_pstate_msr_paths: 736 logging.debug('intel_pstate msr file found') 737 self._num_cpus = len(intel_pstate_msr_paths) 738 self._running_intel_pstate = True 739 740 self._available_freqs = set() 741 for path in cpufreq_key_paths: 742 self._available_freqs |= set(int(x) for x in 743 utils.read_file(path).split()) 744 745 super(CPUFreqStats, self).__init__(name=name) 746 747 748 def _read_stats(self): 749 if self._running_intel_pstate: 750 aperf = 0 751 mperf = 0 752 753 for cpu in range(self._num_cpus): 754 aperf += utils.rdmsr(self.MSR_IA32_APERF, cpu) 755 mperf += utils.rdmsr(self.MSR_IA32_MPERF, cpu) 756 757 # max_freq is supposed to be the same for all CPUs and remain 758 # constant throughout. So, we set the entry only once. 759 # Note that this is max non-turbo frequency, some CPU can run at 760 # higher turbo frequency in some condition. 761 if not self._max_freq: 762 platform_info = utils.rdmsr(self.MSR_PLATFORM_INFO) 763 mul = platform_info >> 8 & 0xff 764 bclk = utils.get_intel_bclk_khz() 765 self._max_freq = mul * bclk 766 767 if not self._initial_perf: 768 self._initial_perf = (aperf, mperf) 769 770 self._current_perf = (aperf, mperf) 771 772 stats = dict((k, 0) for k in self._available_freqs) 773 for path in self._file_paths: 774 if not os.path.exists(path): 775 logging.debug('%s is not found', path) 776 continue 777 778 data = utils.read_file(path) 779 for line in data.splitlines(): 780 pair = line.split() 781 freq = int(pair[0]) 782 timeunits = int(pair[1]) 783 if freq in stats: 784 stats[freq] += timeunits 785 else: 786 stats[freq] = timeunits 787 return stats 788 789 790 def _supports_automatic_weighted_average(self): 791 return not self._running_intel_pstate 792 793 794 def _weighted_avg_fn(self): 795 if not self._running_intel_pstate: 796 return None 797 798 if self._current_perf[1] != self._initial_perf[1]: 799 # Avg freq = max_freq * aperf_delta / mperf_delta 800 return self._max_freq * \ 801 float(self._current_perf[0] - self._initial_perf[0]) / \ 802 (self._current_perf[1] - self._initial_perf[1]) 803 return 1.0 804 805 806class CPUCStateStats(AbstractStats): 807 """ 808 Base class for C-state residency statistics 809 """ 810 def __init__(self, name, non_c0_stat=''): 811 self._non_c0_stat = non_c0_stat 812 super(CPUCStateStats, self).__init__(name=name) 813 814 815 def to_percent(self, stats): 816 """ 817 Turns a dict with absolute time values into a dict with percentages. 818 Ignore the |non_c0_stat_name| which is aggegate stat in the total count. 819 """ 820 total = sum(v for k, v in stats.iteritems() if k != self._non_c0_stat) 821 if total == 0: 822 return {k: 0 for k in stats} 823 return {k: v * 100.0 / total for k, v in stats.iteritems()} 824 825 826class CPUIdleStats(CPUCStateStats): 827 """ 828 CPU Idle statistics (refresh() will not work with incremental=False!) 829 """ 830 # TODO (snanda): Handle changes in number of c-states due to events such 831 # as ac <-> battery transitions. 832 # TODO (snanda): Handle non-S0 states. Time spent in suspend states is 833 # currently not factored out. 834 def __init__(self, cpus=None): 835 name = 'cpuidle' 836 cpuidle_suffix = 'cpuidle' 837 all_cpus = get_all_cpus() 838 if not cpus: 839 cpus = all_cpus 840 cpus, self._cpus = get_cpus_filepaths_for_suffix(cpus, cpuidle_suffix) 841 if len(cpus) and len(cpus) < len(all_cpus): 842 name = '%s_%s' % (name, '_'.join([str(c) for c in cpus])) 843 super(CPUIdleStats, self).__init__(name=name, non_c0_stat='non-C0') 844 845 846 def _read_stats(self): 847 cpuidle_stats = collections.defaultdict(int) 848 epoch_usecs = int(time.time() * 1000 * 1000) 849 for cpu in self._cpus: 850 state_path = os.path.join(cpu, 'state*') 851 states = glob.glob(state_path) 852 cpuidle_stats['C0'] += epoch_usecs 853 854 for state in states: 855 name = utils.read_one_line(os.path.join(state, 'name')) 856 latency = utils.read_one_line(os.path.join(state, 'latency')) 857 858 if not int(latency) and name == 'POLL': 859 # C0 state. Kernel stats aren't right, so calculate by 860 # subtracting all other states from total time (using epoch 861 # timer since we calculate differences in the end anyway). 862 # NOTE: Only x86 lists C0 under cpuidle, ARM does not. 863 continue 864 865 usecs = int(utils.read_one_line(os.path.join(state, 'time'))) 866 cpuidle_stats['C0'] -= usecs 867 868 if name == '<null>': 869 # Kernel race condition that can happen while a new C-state 870 # gets added (e.g. AC->battery). Don't know the 'name' of 871 # the state yet, but its 'time' would be 0 anyway. 872 logging.warning('Read name: <null>, time: %d from %s' 873 % (usecs, state) + '... skipping.') 874 continue 875 876 cpuidle_stats[name] += usecs 877 cpuidle_stats['non-C0'] += usecs 878 879 return cpuidle_stats 880 881 882class CPUPackageStats(CPUCStateStats): 883 """ 884 Package C-state residency statistics for modern Intel CPUs. 885 """ 886 887 ATOM = {'C2': 0x3F8, 'C4': 0x3F9, 'C6': 0x3FA} 888 NEHALEM = {'C3': 0x3F8, 'C6': 0x3F9, 'C7': 0x3FA} 889 SANDY_BRIDGE = {'C2': 0x60D, 'C3': 0x3F8, 'C6': 0x3F9, 'C7': 0x3FA} 890 SILVERMONT = {'C6': 0x3FA} 891 GOLDMONT = {'C2': 0x60D, 'C3': 0x3F8, 'C6': 0x3F9,'C10': 0x632} 892 BROADWELL = {'C2': 0x60D, 'C3': 0x3F8, 'C6': 0x3F9, 'C7': 0x3FA, 893 'C8': 0x630, 'C9': 0x631,'C10': 0x632} 894 895 def __init__(self): 896 def _get_platform_states(): 897 """ 898 Helper to decide what set of microarchitecture-specific MSRs to use. 899 900 Returns: dict that maps C-state name to MSR address, or None. 901 """ 902 cpu_uarch = utils.get_intel_cpu_uarch() 903 904 return { 905 # model groups pulled from Intel SDM, volume 4 906 # Group same package cstate using the older uarch name 907 # 908 # TODO(harry.pan): As the keys represent microarchitecture 909 # names, we could consider to rename the PC state groups 910 # to avoid ambiguity. 911 'Airmont': self.SILVERMONT, 912 'Atom': self.ATOM, 913 'Broadwell': self.BROADWELL, 914 'Goldmont': self.GOLDMONT, 915 'Haswell': self.SANDY_BRIDGE, 916 'Ivy Bridge': self.SANDY_BRIDGE, 917 'Ivy Bridge-E': self.SANDY_BRIDGE, 918 'Kaby Lake': self.BROADWELL, 919 'Nehalem': self.NEHALEM, 920 'Sandy Bridge': self.SANDY_BRIDGE, 921 'Silvermont': self.SILVERMONT, 922 'Skylake': self.BROADWELL, 923 'Westmere': self.NEHALEM, 924 }.get(cpu_uarch, None) 925 926 self._platform_states = _get_platform_states() 927 super(CPUPackageStats, self).__init__(name='cpupkg', 928 non_c0_stat='non-C0_C1') 929 930 931 def _read_stats(self): 932 packages = set() 933 template = '/sys/devices/system/cpu/cpu%s/topology/physical_package_id' 934 if not self._platform_states: 935 return {} 936 stats = dict((state, 0) for state in self._platform_states) 937 stats['C0_C1'] = 0 938 stats['non-C0_C1'] = 0 939 940 for cpu in os.listdir('/dev/cpu'): 941 if not os.path.exists(template % cpu): 942 continue 943 package = utils.read_one_line(template % cpu) 944 if package in packages: 945 continue 946 packages.add(package) 947 948 stats['C0_C1'] += utils.rdmsr(0x10, cpu) # TSC 949 for (state, msr) in self._platform_states.iteritems(): 950 ticks = utils.rdmsr(msr, cpu) 951 stats[state] += ticks 952 stats['non-C0_C1'] += ticks 953 stats['C0_C1'] -= ticks 954 955 return stats 956 957 958class DevFreqStats(AbstractStats): 959 """ 960 Devfreq device frequency stats. 961 """ 962 963 _DIR = '/sys/class/devfreq' 964 965 966 def __init__(self, f): 967 """Constructs DevFreqStats Object that track frequency stats 968 for the path of the given Devfreq device. 969 970 The frequencies for devfreq devices are listed in Hz. 971 972 Args: 973 path: the path to the devfreq device 974 975 Example: 976 /sys/class/devfreq/dmc 977 """ 978 self._path = os.path.join(self._DIR, f) 979 if not os.path.exists(self._path): 980 raise error.TestError('DevFreqStats: devfreq device does not exist') 981 982 fname = os.path.join(self._path, 'available_frequencies') 983 af = utils.read_one_line(fname).strip() 984 self._available_freqs = sorted(af.split(), key=int) 985 986 super(DevFreqStats, self).__init__(name=f) 987 988 def _read_stats(self): 989 stats = dict((freq, 0) for freq in self._available_freqs) 990 fname = os.path.join(self._path, 'trans_stat') 991 992 with open(fname) as fd: 993 # The lines that contain the time in each frequency start on the 3rd 994 # line, so skip the first 2 lines. The last line contains the number 995 # of transitions, so skip that line too. 996 # The time in each frequency is at the end of the line. 997 freq_pattern = re.compile(r'\d+(?=:)') 998 for line in fd.readlines()[2:-1]: 999 freq = freq_pattern.search(line) 1000 if freq and freq.group() in self._available_freqs: 1001 stats[freq.group()] = int(line.strip().split()[-1]) 1002 1003 return stats 1004 1005 1006class GPUFreqStats(AbstractStats): 1007 """GPU Frequency statistics class. 1008 1009 TODO(tbroch): add stats for other GPUs 1010 """ 1011 1012 _MALI_DEV = '/sys/class/misc/mali0/device' 1013 _MALI_EVENTS = ['mali_dvfs:mali_dvfs_set_clock'] 1014 _MALI_TRACE_CLK_RE = \ 1015 r'kworker.* (\d+\.\d+): mali_dvfs_set_clock: frequency=(\d+)\d{6}' 1016 1017 _I915_ROOT = '/sys/kernel/debug/dri/0' 1018 _I915_EVENTS = ['i915:intel_gpu_freq_change'] 1019 _I915_CLKS_FILES = ['i915_cur_delayinfo', 'i915_frequency_info'] 1020 _I915_TRACE_CLK_RE = \ 1021 r'kworker.* (\d+\.\d+): intel_gpu_freq_change: new_freq=(\d+)' 1022 _I915_CUR_FREQ_RE = r'CAGF:\s+(\d+)MHz' 1023 _I915_MIN_FREQ_RE = r'Lowest \(RPN\) frequency:\s+(\d+)MHz' 1024 _I915_MAX_FREQ_RE = r'Max non-overclocked \(RP0\) frequency:\s+(\d+)MHz' 1025 # There are 6 frequency steps per 100 MHz 1026 _I915_FREQ_STEPS = [0, 17, 33, 50, 67, 83] 1027 1028 _gpu_type = None 1029 1030 1031 def _get_mali_freqs(self): 1032 """Get mali clocks based on kernel version. 1033 1034 For 3.8-3.18: 1035 # cat /sys/class/misc/mali0/device/clock 1036 100000000 1037 # cat /sys/class/misc/mali0/device/available_frequencies 1038 100000000 1039 160000000 1040 266000000 1041 350000000 1042 400000000 1043 450000000 1044 533000000 1045 533000000 1046 1047 For 4.4+: 1048 Tracked in DevFreqStats 1049 1050 Returns: 1051 cur_mhz: string of current GPU clock in mhz 1052 """ 1053 cur_mhz = None 1054 fqs = [] 1055 1056 fname = os.path.join(self._MALI_DEV, 'clock') 1057 if os.path.exists(fname): 1058 cur_mhz = str(int(int(utils.read_one_line(fname).strip()) / 1e6)) 1059 fname = os.path.join(self._MALI_DEV, 'available_frequencies') 1060 with open(fname) as fd: 1061 for ln in fd.readlines(): 1062 freq = int(int(ln.strip()) / 1e6) 1063 fqs.append(str(freq)) 1064 fqs.sort() 1065 1066 self._freqs = fqs 1067 return cur_mhz 1068 1069 1070 def __init__(self, incremental=False): 1071 1072 1073 min_mhz = None 1074 max_mhz = None 1075 cur_mhz = None 1076 events = None 1077 i915_path = None 1078 self._freqs = [] 1079 self._prev_sample = None 1080 self._trace = None 1081 1082 if os.path.exists(self._MALI_DEV) and \ 1083 not os.path.exists(os.path.join(self._MALI_DEV, "devfreq")): 1084 self._set_gpu_type('mali') 1085 else: 1086 for file_name in self._I915_CLKS_FILES: 1087 full_path = os.path.join(self._I915_ROOT, file_name) 1088 if os.path.exists(full_path): 1089 self._set_gpu_type('i915') 1090 i915_path = full_path 1091 break 1092 else: 1093 # We either don't know how to track GPU stats (yet) or the stats 1094 # are tracked in DevFreqStats. 1095 self._set_gpu_type(None) 1096 1097 logging.debug("gpu_type is %s", self._gpu_type) 1098 1099 if self._gpu_type is 'mali': 1100 events = self._MALI_EVENTS 1101 cur_mhz = self._get_mali_freqs() 1102 if self._freqs: 1103 min_mhz = self._freqs[0] 1104 max_mhz = self._freqs[-1] 1105 1106 elif self._gpu_type is 'i915': 1107 events = self._I915_EVENTS 1108 with open(i915_path) as fd: 1109 for ln in fd.readlines(): 1110 logging.debug("ln = %s", ln) 1111 result = re.findall(self._I915_CUR_FREQ_RE, ln) 1112 if result: 1113 cur_mhz = result[0] 1114 continue 1115 result = re.findall(self._I915_MIN_FREQ_RE, ln) 1116 if result: 1117 min_mhz = result[0] 1118 continue 1119 result = re.findall(self._I915_MAX_FREQ_RE, ln) 1120 if result: 1121 max_mhz = result[0] 1122 continue 1123 if min_mhz and max_mhz: 1124 for i in xrange(int(min_mhz), int(max_mhz) + 1): 1125 if i % 100 in self._I915_FREQ_STEPS: 1126 self._freqs.append(str(i)) 1127 1128 logging.debug("cur_mhz = %s, min_mhz = %s, max_mhz = %s", cur_mhz, 1129 min_mhz, max_mhz) 1130 1131 if cur_mhz and min_mhz and max_mhz: 1132 self._trace = kernel_trace.KernelTrace(events=events) 1133 1134 # Not all platforms or kernel versions support tracing. 1135 if not self._trace or not self._trace.is_tracing(): 1136 logging.warning("GPU frequency tracing not enabled.") 1137 else: 1138 self._prev_sample = (cur_mhz, self._trace.uptime_secs()) 1139 logging.debug("Current GPU freq: %s", cur_mhz) 1140 logging.debug("All GPU freqs: %s", self._freqs) 1141 1142 super(GPUFreqStats, self).__init__(name='gpu', incremental=incremental) 1143 1144 1145 @classmethod 1146 def _set_gpu_type(cls, gpu_type): 1147 cls._gpu_type = gpu_type 1148 1149 1150 def _read_stats(self): 1151 if self._gpu_type: 1152 return getattr(self, "_%s_read_stats" % self._gpu_type)() 1153 return {} 1154 1155 1156 def _trace_read_stats(self, regexp): 1157 """Read GPU stats from kernel trace outputs. 1158 1159 Args: 1160 regexp: regular expression to match trace output for frequency 1161 1162 Returns: 1163 Dict with key string in mhz and val float in seconds. 1164 """ 1165 if not self._prev_sample: 1166 return {} 1167 1168 stats = dict((k, 0.0) for k in self._freqs) 1169 results = self._trace.read(regexp=regexp) 1170 for (tstamp_str, freq) in results: 1171 tstamp = float(tstamp_str) 1172 1173 # do not reparse lines in trace buffer 1174 if tstamp <= self._prev_sample[1]: 1175 continue 1176 delta = tstamp - self._prev_sample[1] 1177 logging.debug("freq:%s tstamp:%f - %f delta:%f", 1178 self._prev_sample[0], 1179 tstamp, self._prev_sample[1], 1180 delta) 1181 stats[self._prev_sample[0]] += delta 1182 self._prev_sample = (freq, tstamp) 1183 1184 # Do last record 1185 delta = self._trace.uptime_secs() - self._prev_sample[1] 1186 logging.debug("freq:%s tstamp:uptime - %f delta:%f", 1187 self._prev_sample[0], 1188 self._prev_sample[1], delta) 1189 stats[self._prev_sample[0]] += delta 1190 1191 logging.debug("GPU freq percents:%s", stats) 1192 return stats 1193 1194 1195 def _mali_read_stats(self): 1196 """Read Mali GPU stats 1197 1198 Frequencies are reported in Hz, so use a regex that drops the last 6 1199 digits. 1200 1201 Output in trace looks like this: 1202 1203 kworker/u:24-5220 [000] .... 81060.329232: mali_dvfs_set_clock: frequency=400 1204 kworker/u:24-5220 [000] .... 81061.830128: mali_dvfs_set_clock: frequency=350 1205 1206 Returns: 1207 Dict with frequency in mhz as key and float in seconds for time 1208 spent at that frequency. 1209 """ 1210 return self._trace_read_stats(self._MALI_TRACE_CLK_RE) 1211 1212 1213 def _i915_read_stats(self): 1214 """Read i915 GPU stats. 1215 1216 Output looks like this (kernel >= 3.8): 1217 1218 kworker/u:0-28247 [000] .... 259391.579610: intel_gpu_freq_change: new_freq=400 1219 kworker/u:0-28247 [000] .... 259391.581797: intel_gpu_freq_change: new_freq=350 1220 1221 Returns: 1222 Dict with frequency in mhz as key and float in seconds for time 1223 spent at that frequency. 1224 """ 1225 return self._trace_read_stats(self._I915_TRACE_CLK_RE) 1226 1227 1228 def _supports_automatic_weighted_average(self): 1229 return self._gpu_type is not None 1230 1231 1232class USBSuspendStats(AbstractStats): 1233 """ 1234 USB active/suspend statistics (over all devices) 1235 """ 1236 # TODO (snanda): handle hot (un)plugging of USB devices 1237 # TODO (snanda): handle duration counters wraparound 1238 1239 def __init__(self): 1240 usb_stats_path = '/sys/bus/usb/devices/*/power' 1241 self._file_paths = glob.glob(usb_stats_path) 1242 if not self._file_paths: 1243 logging.debug('USB stats path not found') 1244 super(USBSuspendStats, self).__init__(name='usb') 1245 1246 1247 def _read_stats(self): 1248 usb_stats = {'active': 0, 'suspended': 0} 1249 1250 for path in self._file_paths: 1251 active_duration_path = os.path.join(path, 'active_duration') 1252 total_duration_path = os.path.join(path, 'connected_duration') 1253 1254 if not os.path.exists(active_duration_path) or \ 1255 not os.path.exists(total_duration_path): 1256 logging.debug('duration paths do not exist for: %s', path) 1257 continue 1258 1259 active = int(utils.read_file(active_duration_path)) 1260 total = int(utils.read_file(total_duration_path)) 1261 logging.debug('device %s active for %.2f%%', 1262 path, active * 100.0 / total) 1263 1264 usb_stats['active'] += active 1265 usb_stats['suspended'] += total - active 1266 1267 return usb_stats 1268 1269 1270def get_cpu_sibling_groups(): 1271 """ 1272 Get CPU core groups in HMP systems. 1273 1274 In systems with both small core and big core, 1275 returns groups of small and big sibling groups. 1276 """ 1277 siblings_suffix = 'topology/core_siblings_list' 1278 sibling_groups = [] 1279 cpus_processed = set() 1280 cpus, sibling_file_paths = get_cpus_filepaths_for_suffix(get_all_cpus(), 1281 siblings_suffix) 1282 for c, siblings_path in zip(cpus, sibling_file_paths): 1283 if c in cpus_processed: 1284 # This cpu is already part of a sibling group. Skip. 1285 continue 1286 siblings_data = utils.read_file(siblings_path) 1287 sibling_group = set() 1288 for sibling_entry in siblings_data.split(','): 1289 entry_data = sibling_entry.split('-') 1290 sibling_start = sibling_end = int(entry_data[0]) 1291 if len(entry_data) > 1: 1292 sibling_end = int(entry_data[1]) 1293 siblings = set(range(sibling_start, sibling_end + 1)) 1294 sibling_group |= siblings 1295 cpus_processed |= sibling_group 1296 sibling_groups.append(frozenset(sibling_group)) 1297 return tuple(sibling_groups) 1298 1299 1300def get_available_cpu_stats(): 1301 """Return CPUFreq/CPUIdleStats groups by big-small siblings groups.""" 1302 ret = [CPUPackageStats()] 1303 cpu_sibling_groups = get_cpu_sibling_groups() 1304 if not cpu_sibling_groups: 1305 ret.append(CPUFreqStats()) 1306 ret.append(CPUIdleStats()) 1307 for cpu_group in cpu_sibling_groups: 1308 ret.append(CPUFreqStats(cpu_group)) 1309 ret.append(CPUIdleStats(cpu_group)) 1310 return ret 1311 1312 1313class StatoMatic(object): 1314 """Class to aggregate and monitor a bunch of power related statistics.""" 1315 def __init__(self): 1316 self._start_uptime_secs = kernel_trace.KernelTrace.uptime_secs() 1317 self._astats = [USBSuspendStats(), GPUFreqStats(incremental=False)] 1318 self._astats.extend(get_available_cpu_stats()) 1319 if os.path.isdir(DevFreqStats._DIR): 1320 self._astats.extend([DevFreqStats(f) for f in \ 1321 os.listdir(DevFreqStats._DIR)]) 1322 1323 self._disk = DiskStateLogger() 1324 self._disk.start() 1325 1326 1327 def publish(self): 1328 """Publishes results of various statistics gathered. 1329 1330 Returns: 1331 dict with 1332 key = string 'percent_<name>_<key>_time' 1333 value = float in percent 1334 """ 1335 results = {} 1336 tot_secs = kernel_trace.KernelTrace.uptime_secs() - \ 1337 self._start_uptime_secs 1338 for stat_obj in self._astats: 1339 percent_stats = stat_obj.refresh() 1340 logging.debug("pstats = %s", percent_stats) 1341 if stat_obj.name is 'gpu': 1342 # TODO(tbroch) remove this once GPU freq stats have proved 1343 # reliable 1344 stats_secs = sum(stat_obj._stats.itervalues()) 1345 if stats_secs < (tot_secs * 0.9) or \ 1346 stats_secs > (tot_secs * 1.1): 1347 logging.warning('%s stats dont look right. Not publishing.', 1348 stat_obj.name) 1349 continue 1350 new_res = {} 1351 AbstractStats.format_results_percent(new_res, stat_obj.name, 1352 percent_stats) 1353 wavg = stat_obj.weighted_average() 1354 if wavg: 1355 AbstractStats.format_results_wavg(new_res, stat_obj.name, wavg) 1356 1357 results.update(new_res) 1358 1359 new_res = {} 1360 if self._disk.get_error(): 1361 new_res['disk_logging_error'] = str(self._disk.get_error()) 1362 else: 1363 AbstractStats.format_results_percent(new_res, 'disk', 1364 self._disk.result()) 1365 results.update(new_res) 1366 1367 return results 1368 1369 1370class PowerMeasurement(object): 1371 """Class to measure power. 1372 1373 Public attributes: 1374 domain: String name of the power domain being measured. Example is 1375 'system' for total system power 1376 1377 Public methods: 1378 refresh: Performs any power/energy sampling and calculation and returns 1379 power as float in watts. This method MUST be implemented in 1380 subclass. 1381 """ 1382 1383 def __init__(self, domain): 1384 """Constructor.""" 1385 self.domain = domain 1386 1387 1388 def refresh(self): 1389 """Performs any power/energy sampling and calculation. 1390 1391 MUST be implemented in subclass 1392 1393 Returns: 1394 float, power in watts. 1395 """ 1396 raise NotImplementedError("'refresh' method should be implemented in " 1397 "subclass.") 1398 1399 1400def parse_power_supply_info(): 1401 """Parses power_supply_info command output. 1402 1403 Command output from power_manager ( tools/power_supply_info.cc ) looks like 1404 this: 1405 1406 Device: Line Power 1407 path: /sys/class/power_supply/cros_ec-charger 1408 ... 1409 Device: Battery 1410 path: /sys/class/power_supply/sbs-9-000b 1411 ... 1412 1413 """ 1414 rv = collections.defaultdict(dict) 1415 dev = None 1416 for ln in utils.system_output('power_supply_info').splitlines(): 1417 logging.debug("psu: %s", ln) 1418 result = re.findall(r'^Device:\s+(.*)', ln) 1419 if result: 1420 dev = result[0] 1421 continue 1422 result = re.findall(r'\s+(.+):\s+(.+)', ln) 1423 if result and dev: 1424 kname = re.findall(r'(.*)\s+\(\w+\)', result[0][0]) 1425 if kname: 1426 rv[dev][kname[0]] = result[0][1] 1427 else: 1428 rv[dev][result[0][0]] = result[0][1] 1429 1430 return rv 1431 1432 1433class SystemPower(PowerMeasurement): 1434 """Class to measure system power. 1435 1436 TODO(tbroch): This class provides a subset of functionality in BatteryStat 1437 in hopes of minimizing power draw. Investigate whether its really 1438 significant and if not, deprecate. 1439 1440 Private Attributes: 1441 _voltage_file: path to retrieve voltage in uvolts 1442 _current_file: path to retrieve current in uamps 1443 """ 1444 1445 def __init__(self, battery_dir): 1446 """Constructor. 1447 1448 Args: 1449 battery_dir: path to dir containing the files to probe and log. 1450 usually something like /sys/class/power_supply/BAT0/ 1451 """ 1452 super(SystemPower, self).__init__('system') 1453 # Files to log voltage and current from 1454 self._voltage_file = os.path.join(battery_dir, 'voltage_now') 1455 self._current_file = os.path.join(battery_dir, 'current_now') 1456 1457 1458 def refresh(self): 1459 """refresh method. 1460 1461 See superclass PowerMeasurement for details. 1462 """ 1463 keyvals = parse_power_supply_info() 1464 return float(keyvals['Battery']['energy rate']) 1465 1466 1467class CheckpointLogger(object): 1468 """Class to log checkpoint data. 1469 1470 Public attributes: 1471 checkpoint_data: dictionary of (tname, tlist). 1472 tname: String of testname associated with these time intervals 1473 tlist: list of tuples. Tuple contains: 1474 tstart: Float of time when subtest started 1475 tend: Float of time when subtest ended 1476 1477 Public methods: 1478 start: records a start timestamp 1479 checkpoint 1480 checkblock 1481 save_checkpoint_data 1482 load_checkpoint_data 1483 1484 Static methods: 1485 load_checkpoint_data_static 1486 1487 Private attributes: 1488 _start_time: start timestamp for checkpoint logger 1489 """ 1490 1491 def __init__(self): 1492 self.checkpoint_data = collections.defaultdict(list) 1493 self.start() 1494 1495 # If multiple MeasurementLoggers call start() on the same CheckpointLogger, 1496 # the latest one will register start time. 1497 def start(self): 1498 """Set start time for CheckpointLogger.""" 1499 self._start_time = time.time() 1500 1501 @contextlib.contextmanager 1502 def checkblock(self, tname=''): 1503 """Check point for the following block with test tname. 1504 1505 Args: 1506 tname: String of testname associated with this time interval 1507 """ 1508 start_time = time.time() 1509 yield 1510 self.checkpoint(tname, start_time) 1511 1512 def checkpoint(self, tname='', tstart=None, tend=None): 1513 """Check point the times in seconds associated with test tname. 1514 1515 Args: 1516 tname: String of testname associated with this time interval 1517 tstart: Float in seconds of when tname test started. Should be based 1518 off time.time(). If None, use start timestamp for the checkpoint 1519 logger. 1520 tend: Float in seconds of when tname test ended. Should be based 1521 off time.time(). If None, then value computed in the method. 1522 """ 1523 if not tstart and self._start_time: 1524 tstart = self._start_time 1525 if not tend: 1526 tend = time.time() 1527 self.checkpoint_data[tname].append((tstart, tend)) 1528 logging.info('Finished test "%s" between timestamps [%s, %s]', 1529 tname, tstart, tend) 1530 1531 def convert_relative(self, start_time=None): 1532 """Convert data from power_status.CheckpointLogger object to relative 1533 checkpoint data dictionary. Timestamps are converted to time in seconds 1534 since the test started. 1535 1536 Args: 1537 start_time: Float in seconds of the desired start time reference. 1538 Should be based off time.time(). If None, use start timestamp 1539 for the checkpoint logger. 1540 """ 1541 if start_time is None: 1542 start_time = self._start_time 1543 1544 checkpoint_dict = {} 1545 for tname, tlist in self.checkpoint_data.iteritems(): 1546 checkpoint_dict[tname] = [(tstart - start_time, tend - start_time) 1547 for tstart, tend in tlist] 1548 1549 return checkpoint_dict 1550 1551 def save_checkpoint_data(self, resultsdir, fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1552 """Save checkpoint data. 1553 1554 Args: 1555 resultsdir: String, directory to write results to 1556 fname: String, name of file to write results to 1557 """ 1558 fname = os.path.join(resultsdir, fname) 1559 with file(fname, 'wt') as f: 1560 json.dump(self.checkpoint_data, f, indent=4, separators=(',', ': ')) 1561 1562 def load_checkpoint_data(self, resultsdir, 1563 fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1564 """Load checkpoint data. 1565 1566 Args: 1567 resultsdir: String, directory to load results from 1568 fname: String, name of file to load results from 1569 """ 1570 fname = os.path.join(resultsdir, fname) 1571 try: 1572 with open(fname, 'r') as f: 1573 self.checkpoint_data = json.load(f, 1574 object_hook=to_checkpoint_data) 1575 # Set start time to the earliest start timestamp in file. 1576 self._start_time = min( 1577 ts_pair[0] for ts_pair in itertools.chain.from_iterable( 1578 self.checkpoint_data.itervalues())) 1579 except Exception as exc: 1580 logging.warning('Failed to load checkpoint data from json file %s, ' 1581 'see exception: %s', fname, exc) 1582 1583 @staticmethod 1584 def load_checkpoint_data_static(resultsdir, 1585 fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1586 """Load checkpoint data. 1587 1588 Args: 1589 resultsdir: String, directory to load results from 1590 fname: String, name of file to load results from 1591 """ 1592 fname = os.path.join(resultsdir, fname) 1593 with file(fname, 'r') as f: 1594 checkpoint_data = json.load(f) 1595 return checkpoint_data 1596 1597 1598def to_checkpoint_data(json_dict): 1599 """Helper method to translate json object into CheckpointLogger format. 1600 1601 Args: 1602 json_dict: a json object in the format of python dict 1603 Returns: 1604 a defaultdict in CheckpointLogger data format 1605 """ 1606 checkpoint_data = collections.defaultdict(list) 1607 for tname, tlist in json_dict.iteritems(): 1608 checkpoint_data[tname].extend([tuple(ts_pair) for ts_pair in tlist]) 1609 return checkpoint_data 1610 1611 1612def get_checkpoint_logger_from_file(resultsdir, 1613 fname=CHECKPOINT_LOG_DEFAULT_FNAME): 1614 """Create a CheckpointLogger and load checkpoint data from file. 1615 1616 Args: 1617 resultsdir: String, directory to load results from 1618 fname: String, name of file to load results from 1619 Returns: 1620 CheckpointLogger with data from file 1621 """ 1622 checkpoint_logger = CheckpointLogger() 1623 checkpoint_logger.load_checkpoint_data(resultsdir, fname) 1624 return checkpoint_logger 1625 1626 1627class MeasurementLogger(threading.Thread): 1628 """A thread that logs measurement readings. 1629 1630 Example code snippet: 1631 my_logger = MeasurementLogger([Measurement1, Measurement2]) 1632 my_logger.start() 1633 for testname in tests: 1634 # Option 1: use checkblock 1635 with my_logger.checkblock(testname): 1636 # run the test method for testname 1637 1638 # Option 2: use checkpoint 1639 start_time = time.time() 1640 # run the test method for testname 1641 my_logger.checkpoint(testname, start_time) 1642 1643 keyvals = my_logger.calc() 1644 1645 or using CheckpointLogger: 1646 checkpoint_logger = CheckpointLogger() 1647 my_logger = MeasurementLogger([Measurement1, Measurement2], 1648 checkpoint_logger) 1649 my_logger.start() 1650 for testname in tests: 1651 # Option 1: use checkblock 1652 with checkpoint_logger.checkblock(testname): 1653 # run the test method for testname 1654 1655 # Option 2: use checkpoint 1656 start_time = time.time() 1657 # run the test method for testname 1658 checkpoint_logger.checkpoint(testname, start_time) 1659 1660 keyvals = my_logger.calc() 1661 1662 Public attributes: 1663 seconds_period: float, probing interval in seconds. 1664 readings: list of lists of floats of measurements. 1665 times: list of floats of time (since Epoch) of when measurements 1666 occurred. len(time) == len(readings). 1667 done: flag to stop the logger. 1668 domains: list of domain strings being measured 1669 1670 Public methods: 1671 run: launches the thread to gather measurements 1672 refresh: perform data samplings for every measurements 1673 calc: calculates 1674 save_results: 1675 1676 Private attributes: 1677 _measurements: list of Measurement objects to be sampled. 1678 _checkpoint_data: dictionary of (tname, tlist). 1679 tname: String of testname associated with these time intervals 1680 tlist: list of tuples. Tuple contains: 1681 tstart: Float of time when subtest started 1682 tend: Float of time when subtest ended 1683 _results: list of results tuples. Tuple contains: 1684 prefix: String of subtest 1685 mean: Float of mean in watts 1686 std: Float of standard deviation of measurements 1687 tstart: Float of time when subtest started 1688 tend: Float of time when subtest ended 1689 """ 1690 def __init__(self, measurements, seconds_period=1.0, checkpoint_logger=None): 1691 """Initialize a logger. 1692 1693 Args: 1694 _measurements: list of Measurement objects to be sampled. 1695 seconds_period: float, probing interval in seconds. Default 1.0 1696 """ 1697 threading.Thread.__init__(self) 1698 1699 self.seconds_period = seconds_period 1700 1701 self.readings = [] 1702 self.times = [] 1703 1704 self.domains = [] 1705 self._measurements = measurements 1706 for meas in self._measurements: 1707 self.domains.append(meas.domain) 1708 1709 self._checkpoint_logger = \ 1710 checkpoint_logger if checkpoint_logger else CheckpointLogger() 1711 1712 self.done = False 1713 1714 def start(self): 1715 self._checkpoint_logger.start() 1716 super(MeasurementLogger, self).start() 1717 1718 def refresh(self): 1719 """Perform data samplings for every measurements. 1720 1721 Returns: 1722 list of sampled data for every measurements. 1723 """ 1724 readings = [] 1725 for meas in self._measurements: 1726 readings.append(meas.refresh()) 1727 return readings 1728 1729 def run(self): 1730 """Threads run method.""" 1731 loop = 0 1732 start_time = time.time() 1733 while(not self.done): 1734 # TODO (dbasehore): We probably need proper locking in this file 1735 # since there have been race conditions with modifying and accessing 1736 # data. 1737 self.readings.append(self.refresh()) 1738 current_time = time.time() 1739 self.times.append(current_time) 1740 loop += 1 1741 next_measurement_time = start_time + loop * self.seconds_period 1742 time.sleep(next_measurement_time - current_time) 1743 1744 @contextlib.contextmanager 1745 def checkblock(self, tname=''): 1746 """Check point for the following block with test tname. 1747 1748 Args: 1749 tname: String of testname associated with this time interval 1750 """ 1751 start_time = time.time() 1752 yield 1753 self.checkpoint(tname, start_time) 1754 1755 def checkpoint(self, tname='', tstart=None, tend=None): 1756 """Just a thin method calling the CheckpointLogger checkpoint method. 1757 1758 Args: 1759 tname: String of testname associated with this time interval 1760 tstart: Float in seconds of when tname test started. Should be based 1761 off time.time() 1762 tend: Float in seconds of when tname test ended. Should be based 1763 off time.time(). If None, then value computed in the method. 1764 """ 1765 self._checkpoint_logger.checkpoint(tname, tstart, tend) 1766 1767 # TODO(seankao): It might be useful to pull this method to CheckpointLogger, 1768 # to allow checkpoint usage without an explicit MeasurementLogger. 1769 def calc(self, mtype=None): 1770 """Calculate average measurement during each of the sub-tests. 1771 1772 Method performs the following steps: 1773 1. Signals the thread to stop running. 1774 2. Calculates mean, max, min, count on the samples for each of the 1775 measurements. 1776 3. Stores results to be written later. 1777 4. Creates keyvals for autotest publishing. 1778 1779 Args: 1780 mtype: string of measurement type. For example: 1781 pwr == power 1782 temp == temperature 1783 Returns: 1784 dict of keyvals suitable for autotest results. 1785 """ 1786 if not mtype: 1787 mtype = 'meas' 1788 1789 t = numpy.array(self.times) 1790 keyvals = {} 1791 results = [('domain', 'mean', 'std', 'duration (s)', 'start ts', 1792 'end ts')] 1793 # TODO(coconutruben): ensure that values is meaningful i.e. go through 1794 # the Loggers and add a unit attribute to each so that the raw 1795 # data is readable. 1796 raw_results = [('domain', 'values (%s)' % mtype)] 1797 1798 if not self.done: 1799 self.done = True 1800 # times 2 the sleep time in order to allow for readings as well. 1801 self.join(timeout=self.seconds_period * 2) 1802 1803 if not self._checkpoint_logger.checkpoint_data: 1804 self._checkpoint_logger.checkpoint() 1805 1806 for i, domain_readings in enumerate(zip(*self.readings)): 1807 meas = numpy.array(domain_readings) 1808 domain = self.domains[i] 1809 1810 for tname, tlist in self._checkpoint_logger.checkpoint_data.iteritems(): 1811 if tname: 1812 prefix = '%s_%s' % (tname, domain) 1813 else: 1814 prefix = domain 1815 keyvals[prefix+'_duration'] = 0 1816 # Select all readings taken between tstart and tend 1817 # timestamps in tlist. 1818 masks = [] 1819 for tstart, tend in tlist: 1820 keyvals[prefix+'_duration'] += tend - tstart 1821 # Try block just in case 1822 # code.google.com/p/chromium/issues/detail?id=318892 1823 # is not fixed. 1824 try: 1825 masks.append(numpy.logical_and(tstart < t, t < tend)) 1826 except ValueError, e: 1827 logging.debug('Error logging measurements: %s', str(e)) 1828 logging.debug('timestamps %d %s', t.len, t) 1829 logging.debug('timestamp start, end %f %f', tstart, tend) 1830 logging.debug('measurements %d %s', meas.len, meas) 1831 mask = numpy.logical_or.reduce(masks) 1832 meas_array = meas[mask] 1833 1834 # If sub-test terminated early, avoid calculating avg, std and 1835 # min 1836 if not meas_array.size: 1837 continue 1838 meas_mean = meas_array.mean() 1839 meas_std = meas_array.std() 1840 1841 # Results list can be used for pretty printing and saving as csv 1842 # TODO(seankao): new results format? 1843 result = (prefix, meas_mean, meas_std) 1844 for tstart, tend in tlist: 1845 result = result + (tend - tstart, tstart, tend) 1846 results.append(result) 1847 raw_results.append((prefix,) + tuple(meas_array.tolist())) 1848 1849 keyvals[prefix + '_' + mtype + '_avg'] = meas_mean 1850 keyvals[prefix + '_' + mtype + '_cnt'] = meas_array.size 1851 keyvals[prefix + '_' + mtype + '_max'] = meas_array.max() 1852 keyvals[prefix + '_' + mtype + '_min'] = meas_array.min() 1853 keyvals[prefix + '_' + mtype + '_std'] = meas_std 1854 self._results = results 1855 self._raw_results = raw_results 1856 return keyvals 1857 1858 1859 def save_results(self, resultsdir, fname_prefix=None): 1860 """Save computed results in a nice tab-separated format. 1861 This is useful for long manual runs. 1862 1863 Args: 1864 resultsdir: String, directory to write results to 1865 fname_prefix: prefix to use for fname. If provided outfiles 1866 will be [fname]_[raw|summary].txt 1867 """ 1868 if not fname_prefix: 1869 fname_prefix = 'meas_results_%.0f' % time.time() 1870 fname = '%s_summary.txt' % fname_prefix 1871 raw_fname = fname.replace('summary', 'raw') 1872 for name, data in [(fname, self._results), 1873 (raw_fname, self._raw_results)]: 1874 with open(os.path.join(resultsdir, name), 'wt') as f: 1875 # First row contains the headers 1876 f.write('%s\n' % '\t'.join(data[0])) 1877 for row in data[1:]: 1878 # First column name, rest are numbers. See _calc_power() 1879 fmt_row = [row[0]] + ['%.2f' % x for x in row[1:]] 1880 f.write('%s\n' % '\t'.join(fmt_row)) 1881 1882 1883class CPUStatsLogger(MeasurementLogger): 1884 """Class to measure CPU Frequency and CPU Idle Stats. 1885 1886 CPUStatsLogger derived from MeasurementLogger class but overload data 1887 samplings method because MeasurementLogger assumed that each sampling is 1888 independent to each other. However, in this case it is not. For example, 1889 CPU time spent in C0 state is measure by time not spent in all other states. 1890 1891 CPUStatsLogger also collects the weight average in each time period if the 1892 underlying AbstractStats support weight average function. 1893 1894 Private attributes: 1895 _stats: list of CPU AbstractStats objects to be sampled. 1896 _refresh_count: number of times refresh() has been called. 1897 _last_wavg: dict of wavg when refresh() was last called. 1898 """ 1899 def __init__(self, seconds_period=1.0, checkpoint_logger=None): 1900 """Initialize a CPUStatsLogger. 1901 1902 Args: 1903 seconds_period: float, probing interval in seconds. Default 1.0 1904 """ 1905 # We don't use measurements since CPU stats can't measure separately. 1906 super(CPUStatsLogger, self).__init__([], seconds_period, checkpoint_logger) 1907 1908 self._stats = get_available_cpu_stats() 1909 self._stats.append(GPUFreqStats()) 1910 self.domains = [] 1911 for stat in self._stats: 1912 self.domains.extend([stat.name + '_' + str(state_name) 1913 for state_name in stat.refresh()]) 1914 if stat.weighted_average(): 1915 self.domains.append('wavg_' + stat.name) 1916 self._refresh_count = 0 1917 self._last_wavg = collections.defaultdict(int) 1918 1919 def refresh(self): 1920 self._refresh_count += 1 1921 count = self._refresh_count 1922 ret = [] 1923 for stat in self._stats: 1924 ret.extend(stat.refresh().values()) 1925 wavg = stat.weighted_average() 1926 if wavg: 1927 if stat.incremental: 1928 last_wavg = self._last_wavg[stat.name] 1929 self._last_wavg[stat.name] = wavg 1930 # Calculate weight average in this period using current 1931 # total weight average and last total weight average. 1932 # The result will lose some precision with higher number of 1933 # count but still good enough for 11 significant digits even 1934 # if we logged the data every 1 second for a day. 1935 ret.append(wavg * count - last_wavg * (count - 1)) 1936 else: 1937 ret.append(wavg) 1938 return ret 1939 1940 def save_results(self, resultsdir, fname_prefix=None): 1941 if not fname_prefix: 1942 fname_prefix = 'cpu_results_%.0f' % time.time() 1943 super(CPUStatsLogger, self).save_results(resultsdir, fname_prefix) 1944 1945 1946class PowerLogger(MeasurementLogger): 1947 """Class to measure power consumption. 1948 """ 1949 def save_results(self, resultsdir, fname_prefix=None): 1950 if not fname_prefix: 1951 fname_prefix = 'power_results_%.0f' % time.time() 1952 super(PowerLogger, self).save_results(resultsdir, fname_prefix) 1953 1954 1955 def calc(self, mtype='pwr'): 1956 return super(PowerLogger, self).calc(mtype) 1957 1958 1959class TempMeasurement(object): 1960 """Class to measure temperature. 1961 1962 Public attributes: 1963 domain: String name of the temperature domain being measured. Example is 1964 'cpu' for cpu temperature 1965 1966 Private attributes: 1967 _path: Path to temperature file to read ( in millidegrees Celsius ) 1968 1969 Public methods: 1970 refresh: Performs any temperature sampling and calculation and returns 1971 temperature as float in degrees Celsius. 1972 """ 1973 def __init__(self, domain, path): 1974 """Constructor.""" 1975 self.domain = domain 1976 self._path = path 1977 1978 1979 def refresh(self): 1980 """Performs temperature 1981 1982 Returns: 1983 float, temperature in degrees Celsius 1984 """ 1985 return int(utils.read_one_line(self._path)) / 1000. 1986 1987 1988class BatteryTempMeasurement(TempMeasurement): 1989 """Class to measure battery temperature.""" 1990 def __init__(self): 1991 super(BatteryTempMeasurement, self).__init__('battery', 'battery_temp') 1992 1993 1994 def refresh(self): 1995 """Perform battery temperature reading. 1996 1997 Returns: 1998 float, temperature in degrees Celsius. 1999 """ 2000 result = utils.run(self._path, timeout=5, ignore_status=True) 2001 return float(result.stdout) 2002 2003 2004def has_battery_temp(): 2005 """Determine if DUT can provide battery temperature. 2006 2007 Returns: 2008 Boolean, True if battery temperature available. False otherwise. 2009 """ 2010 if not power_utils.has_battery(): 2011 return False 2012 2013 btemp = BatteryTempMeasurement() 2014 try: 2015 btemp.refresh() 2016 except ValueError: 2017 return False 2018 2019 return True 2020 2021 2022class TempLogger(MeasurementLogger): 2023 """A thread that logs temperature readings in millidegrees Celsius.""" 2024 def __init__(self, measurements, seconds_period=30.0, checkpoint_logger=None): 2025 if not measurements: 2026 domains = set() 2027 measurements = [] 2028 tstats = ThermalStatHwmon() 2029 for kname in tstats.fields: 2030 match = re.match(r'(\S+)_temp(\d+)_input', kname) 2031 if not match: 2032 continue 2033 domain = match.group(1) + '-t' + match.group(2) 2034 fpath = tstats.fields[kname][0] 2035 new_meas = TempMeasurement(domain, fpath) 2036 measurements.append(new_meas) 2037 domains.add(domain) 2038 2039 if has_battery_temp(): 2040 measurements.append(BatteryTempMeasurement()) 2041 2042 sysfs_paths = '/sys/class/thermal/thermal_zone*' 2043 paths = glob.glob(sysfs_paths) 2044 for path in paths: 2045 domain_path = os.path.join(path, 'type') 2046 temp_path = os.path.join(path, 'temp') 2047 2048 domain = utils.read_one_line(domain_path) 2049 2050 # Skip when thermal_zone and hwmon have same domain. 2051 if domain in domains: 2052 continue 2053 2054 domain = domain.replace(' ', '_') 2055 new_meas = TempMeasurement(domain, temp_path) 2056 measurements.append(new_meas) 2057 2058 super(TempLogger, self).__init__(measurements, seconds_period, checkpoint_logger) 2059 2060 2061 def save_results(self, resultsdir, fname_prefix=None): 2062 if not fname_prefix: 2063 fname_prefix = 'temp_results_%.0f' % time.time() 2064 super(TempLogger, self).save_results(resultsdir, fname_prefix) 2065 2066 2067 def calc(self, mtype='temp'): 2068 return super(TempLogger, self).calc(mtype) 2069 2070 2071class DiskStateLogger(threading.Thread): 2072 """Records the time percentages the disk stays in its different power modes. 2073 2074 Example code snippet: 2075 mylogger = power_status.DiskStateLogger() 2076 mylogger.start() 2077 result = mylogger.result() 2078 2079 Public methods: 2080 start: Launches the thread and starts measurements. 2081 result: Stops the thread if it's still running and returns measurements. 2082 get_error: Returns the exception in _error if it exists. 2083 2084 Private functions: 2085 _get_disk_state: Returns the disk's current ATA power mode as a string. 2086 2087 Private attributes: 2088 _seconds_period: Disk polling interval in seconds. 2089 _stats: Dict that maps disk states to seconds spent in them. 2090 _running: Flag that is True as long as the logger should keep running. 2091 _time: Timestamp of last disk state reading. 2092 _device_path: The file system path of the disk's device node. 2093 _error: Contains a TestError exception if an unexpected error occured 2094 """ 2095 def __init__(self, seconds_period = 5.0, device_path = None): 2096 """Initializes a logger. 2097 2098 Args: 2099 seconds_period: Disk polling interval in seconds. Default 5.0 2100 device_path: The path of the disk's device node. Default '/dev/sda' 2101 """ 2102 threading.Thread.__init__(self) 2103 self._seconds_period = seconds_period 2104 self._device_path = device_path 2105 self._stats = {} 2106 self._running = False 2107 self._error = None 2108 2109 result = utils.system_output('rootdev -s') 2110 # TODO(tbroch) Won't work for emmc storage and will throw this error in 2111 # keyvals : 'ioctl(SG_IO) error: [Errno 22] Invalid argument' 2112 # Lets implement something complimentary for emmc 2113 if not device_path: 2114 self._device_path = \ 2115 re.sub('(sd[a-z]|mmcblk[0-9]+)p?[0-9]+', '\\1', result) 2116 logging.debug("device_path = %s", self._device_path) 2117 2118 2119 def start(self): 2120 logging.debug("inside DiskStateLogger.start") 2121 if os.path.exists(self._device_path): 2122 logging.debug("DiskStateLogger started") 2123 super(DiskStateLogger, self).start() 2124 2125 2126 def _get_disk_state(self): 2127 """Checks the disk's power mode and returns it as a string. 2128 2129 This uses the SG_IO ioctl to issue a raw SCSI command data block with 2130 the ATA-PASS-THROUGH command that allows SCSI-to-ATA translation (see 2131 T10 document 04-262r8). The ATA command issued is CHECKPOWERMODE1, 2132 which returns the device's current power mode. 2133 """ 2134 2135 def _addressof(obj): 2136 """Shortcut to return the memory address of an object as integer.""" 2137 return ctypes.cast(obj, ctypes.c_void_p).value 2138 2139 scsi_cdb = struct.pack("12B", # SCSI command data block (uint8[12]) 2140 0xa1, # SCSI opcode: ATA-PASS-THROUGH 2141 3 << 1, # protocol: Non-data 2142 1 << 5, # flags: CK_COND 2143 0, # features 2144 0, # sector count 2145 0, 0, 0, # LBA 2146 1 << 6, # flags: ATA-USING-LBA 2147 0xe5, # ATA opcode: CHECKPOWERMODE1 2148 0, # reserved 2149 0, # control (no idea what this is...) 2150 ) 2151 scsi_sense = (ctypes.c_ubyte * 32)() # SCSI sense buffer (uint8[32]) 2152 sgio_header = struct.pack("iiBBHIPPPIIiPBBBBHHiII", # see <scsi/sg.h> 2153 83, # Interface ID magic number (int32) 2154 -1, # data transfer direction: none (int32) 2155 12, # SCSI command data block length (uint8) 2156 32, # SCSI sense data block length (uint8) 2157 0, # iovec_count (not applicable?) (uint16) 2158 0, # data transfer length (uint32) 2159 0, # data block pointer 2160 _addressof(scsi_cdb), # SCSI CDB pointer 2161 _addressof(scsi_sense), # sense buffer pointer 2162 500, # timeout in milliseconds (uint32) 2163 0, # flags (uint32) 2164 0, # pack ID (unused) (int32) 2165 0, # user data pointer (unused) 2166 0, 0, 0, 0, 0, 0, 0, 0, 0, # output params 2167 ) 2168 try: 2169 with open(self._device_path, 'r') as dev: 2170 result = fcntl.ioctl(dev, 0x2285, sgio_header) 2171 except IOError, e: 2172 raise error.TestError('ioctl(SG_IO) error: %s' % str(e)) 2173 _, _, _, _, status, host_status, driver_status = \ 2174 struct.unpack("4x4xxx2x4xPPP4x4x4xPBxxxHH4x4x4x", result) 2175 if status != 0x2: # status: CHECK_CONDITION 2176 raise error.TestError('SG_IO status: %d' % status) 2177 if host_status != 0: 2178 raise error.TestError('SG_IO host status: %d' % host_status) 2179 if driver_status != 0x8: # driver status: SENSE 2180 raise error.TestError('SG_IO driver status: %d' % driver_status) 2181 2182 if scsi_sense[0] != 0x72: # resp. code: current error, descriptor format 2183 raise error.TestError('SENSE response code: %d' % scsi_sense[0]) 2184 if scsi_sense[1] != 0: # sense key: No Sense 2185 raise error.TestError('SENSE key: %d' % scsi_sense[1]) 2186 if scsi_sense[7] < 14: # additional length (ATA status is 14 - 1 bytes) 2187 raise error.TestError('ADD. SENSE too short: %d' % scsi_sense[7]) 2188 if scsi_sense[8] != 0x9: # additional descriptor type: ATA Return Status 2189 raise error.TestError('SENSE descriptor type: %d' % scsi_sense[8]) 2190 if scsi_sense[11] != 0: # errors: none 2191 raise error.TestError('ATA error code: %d' % scsi_sense[11]) 2192 2193 if scsi_sense[13] == 0x00: 2194 return 'standby' 2195 if scsi_sense[13] == 0x80: 2196 return 'idle' 2197 if scsi_sense[13] == 0xff: 2198 return 'active' 2199 return 'unknown(%d)' % scsi_sense[13] 2200 2201 2202 def run(self): 2203 """The Thread's run method.""" 2204 try: 2205 self._time = time.time() 2206 self._running = True 2207 while(self._running): 2208 time.sleep(self._seconds_period) 2209 state = self._get_disk_state() 2210 new_time = time.time() 2211 if state in self._stats: 2212 self._stats[state] += new_time - self._time 2213 else: 2214 self._stats[state] = new_time - self._time 2215 self._time = new_time 2216 except error.TestError, e: 2217 self._error = e 2218 self._running = False 2219 2220 2221 def result(self): 2222 """Stop the logger and return dict with result percentages.""" 2223 if (self._running): 2224 self._running = False 2225 self.join(self._seconds_period * 2) 2226 return AbstractStats.to_percent(self._stats) 2227 2228 2229 def get_error(self): 2230 """Returns the _error exception... please only call after result().""" 2231 return self._error 2232 2233def parse_pmc_s0ix_residency_info(): 2234 """ 2235 Parses S0ix residency for PMC based Intel systems 2236 (skylake/kabylake/apollolake), the debugfs paths might be 2237 different from platform to platform, yet the format is 2238 unified in microseconds. 2239 2240 @returns residency in seconds. 2241 @raises error.TestNAError if the debugfs file not found. 2242 """ 2243 info_path = None 2244 for node in ['/sys/kernel/debug/pmc_core/slp_s0_residency_usec', 2245 '/sys/kernel/debug/telemetry/s0ix_residency_usec']: 2246 if os.path.exists(node): 2247 info_path = node 2248 break 2249 if not info_path: 2250 raise error.TestNAError('S0ix residency file not found') 2251 return float(utils.read_one_line(info_path)) * 1e-6 2252 2253 2254class S0ixResidencyStats(object): 2255 """ 2256 Measures the S0ix residency of a given board over time. 2257 """ 2258 def __init__(self): 2259 self._initial_residency = parse_pmc_s0ix_residency_info() 2260 2261 def get_accumulated_residency_secs(self): 2262 """ 2263 @returns S0ix Residency since the class has been initialized. 2264 """ 2265 return parse_pmc_s0ix_residency_info() - self._initial_residency 2266 2267 2268class DMCFirmwareStats(object): 2269 """ 2270 Collect DMC firmware stats of Intel based system (SKL+), (APL+). 2271 """ 2272 # Intel CPUs only transition to DC6 from DC5. https://git.io/vppcG 2273 DC6_ENTRY_KEY = 'DC5 -> DC6 count' 2274 2275 def __init__(self): 2276 self._initial_stat = DMCFirmwareStats._parse_dmc_info() 2277 2278 def check_fw_loaded(self): 2279 """Check that DMC firmware is loaded 2280 2281 @returns boolean of DMC firmware loaded. 2282 """ 2283 return self._initial_stat['fw loaded'] 2284 2285 def is_dc6_supported(self): 2286 """Check that DMC support DC6 state.""" 2287 return self.DC6_ENTRY_KEY in self._initial_stat 2288 2289 def get_accumulated_dc6_entry(self): 2290 """Check number of DC6 state entry since the class has been initialized. 2291 2292 @returns number of DC6 state entry. 2293 """ 2294 if not self.is_dc6_supported(): 2295 return 0 2296 2297 key = self.DC6_ENTRY_KEY 2298 current_stat = DMCFirmwareStats._parse_dmc_info() 2299 return current_stat[key] - self._initial_stat[key] 2300 2301 @staticmethod 2302 def _parse_dmc_info(): 2303 """ 2304 Parses DMC firmware info for Intel based systems. 2305 2306 @returns dictionary of dmc_fw info 2307 @raises error.TestFail if the debugfs file not found. 2308 """ 2309 path = '/sys/kernel/debug/dri/0/i915_dmc_info' 2310 if not os.path.exists(path): 2311 raise error.TestFail('DMC info file not found.') 2312 2313 with open(path, 'r') as f: 2314 lines = [line.strip() for line in f.readlines()] 2315 2316 # For pre 4.16 kernel. https://git.io/vhThb 2317 if lines[0] == 'not supported': 2318 raise error.TestFail('DMC not supported.') 2319 2320 ret = dict() 2321 for line in lines: 2322 key, val = line.rsplit(': ', 1) 2323 2324 if key == 'fw loaded': 2325 val = val == 'yes' 2326 elif re.match(r'DC\d -> DC\d count', key): 2327 val = int(val) 2328 ret[key] = val 2329 return ret 2330 2331 2332class RC6ResidencyStats(object): 2333 """ 2334 Collect RC6 residency stats of Intel based system. 2335 """ 2336 def __init__(self): 2337 self._rc6_enable_checked = False 2338 self._initial_stat = self._parse_rc6_residency_info() 2339 2340 def get_accumulated_residency_secs(self): 2341 """Check number of RC6 state entry since the class has been initialized. 2342 2343 @returns int of RC6 residency in seconds since instantiation. 2344 """ 2345 current_stat = self._parse_rc6_residency_info() 2346 return (current_stat - self._initial_stat) * 1e-3 2347 2348 def _is_rc6_enable(self): 2349 """ 2350 Verified that RC6 is enable. 2351 2352 @returns Boolean of RC6 enable status. 2353 @raises error.TestFail if the sysfs file not found. 2354 """ 2355 path = '/sys/class/drm/card0/power/rc6_enable' 2356 if not os.path.exists(path): 2357 raise error.TestFail('RC6 enable file not found.') 2358 2359 return int(utils.read_one_line(path)) == 1 2360 2361 def _parse_rc6_residency_info(self): 2362 """ 2363 Parses RC6 residency info for Intel based systems. 2364 2365 @returns int of RC6 residency in millisec since boot. 2366 @raises error.TestFail if the sysfs file not found or RC6 not enabled. 2367 """ 2368 if not self._rc6_enable_checked: 2369 if not self._is_rc6_enable(): 2370 raise error.TestFail('RC6 is not enabled.') 2371 self._rc6_enable_checked = True 2372 2373 path = '/sys/class/drm/card0/power/rc6_residency_ms' 2374 if not os.path.exists(path): 2375 raise error.TestFail('RC6 residency file not found.') 2376 2377 return int(utils.read_one_line(path)) 2378 2379 2380class PCHPowergatingStats(object): 2381 """ 2382 Collect PCH powergating status of intel based system. 2383 """ 2384 PMC_CORE_PATH = '/sys/kernel/debug/pmc_core/pch_ip_power_gating_status' 2385 TELEMETRY_PATH = '/sys/kernel/debug/telemetry/soc_states' 2386 2387 def __init__(self): 2388 self._stat = {} 2389 2390 def check_s0ix_requirement(self): 2391 """ 2392 Check PCH powergating status with S0ix requirement. 2393 2394 @returns list of PCH IP block name that need to be powergated for low 2395 power consumption S0ix, empty list if none. 2396 """ 2397 # PCH IP block that is on for S0ix. Ignore these IP block. 2398 S0IX_WHITELIST = set([ 2399 'PMC', 'OPI-DMI', 'SPI / eSPI', 'XHCI', 'xHCI', 'FUSE', 'PCIE0', 2400 'NPKVRC', 'NPKVNN']) 2401 2402 # PCH IP block that is on/off for S0ix depend on features enabled. 2403 # Add log when these IPs state are on. 2404 S0IX_WARNLIST = set([ 2405 'HDA-PGD0', 'HDA-PGD1', 'HDA-PGD2', 'HDA-PGD3', 'LPSS', 2406 'AVSPGD1', 'AVSPGD4']) 2407 2408 # CNV device has 0x31dc as devid . 2409 if len(utils.system_output('lspci -d :31dc')) > 0: 2410 S0IX_WHITELIST.add('CNV') 2411 2412 on_ip = set(ip['name'] for ip in self._stat if ip['state']) 2413 on_ip -= S0IX_WHITELIST 2414 2415 if on_ip: 2416 on_ip_in_warn_list = on_ip & S0IX_WARNLIST 2417 if on_ip_in_warn_list: 2418 logging.warn('Found PCH IP that may be able to powergate: %s', 2419 ', '.join(on_ip_in_warn_list)) 2420 on_ip -= S0IX_WARNLIST 2421 2422 if on_ip: 2423 logging.error('Found PCH IP that need to powergate: %s', 2424 ', '.join(on_ip)) 2425 return False 2426 return True 2427 2428 def read_pch_powergating_info(self, sleep_seconds=1): 2429 """ 2430 Read PCH powergating status info for Intel based systems. 2431 2432 Intel currently shows powergating status in 2 different place in debugfs 2433 depend on which CPU platform. 2434 2435 @param sleep_seconds: sleep time to make DUT idle before read the data. 2436 2437 @raises error.TestFail if the debugfs file not found or parsing error. 2438 """ 2439 if os.path.exists(self.PMC_CORE_PATH): 2440 logging.info('Use PCH powergating info at %s', self.PMC_CORE_PATH) 2441 time.sleep(sleep_seconds) 2442 self._read_pcm_core_powergating_info() 2443 return 2444 2445 if os.path.exists(self.TELEMETRY_PATH): 2446 logging.info('Use PCH powergating info at %s', self.TELEMETRY_PATH) 2447 time.sleep(sleep_seconds) 2448 self._read_telemetry_powergating_info() 2449 return 2450 2451 raise error.TestFail('PCH powergating info file not found.') 2452 2453 def _read_pcm_core_powergating_info(self): 2454 """ 2455 read_pch_powergating_info() for Intel Core KBL+ 2456 2457 @raises error.TestFail if parsing error. 2458 """ 2459 with open(self.PMC_CORE_PATH, 'r') as f: 2460 lines = [line.strip() for line in f.readlines()] 2461 2462 # Example pattern to match: 2463 # PCH IP: 0 - PMC State: On 2464 # PCH IP: 1 - SATA State: Off 2465 pattern = r'PCH IP:\s+(?P<id>\d+)\s+' \ 2466 r'- (?P<name>.*\w)\s+' \ 2467 r'State: (?P<state>Off|On)' 2468 matcher = re.compile(pattern) 2469 ret = [] 2470 for i, line in enumerate(lines): 2471 match = matcher.match(line) 2472 if not match: 2473 raise error.TestFail('Can not parse PCH powergating info: ', 2474 line) 2475 2476 index = int(match.group('id')) 2477 if i != index: 2478 raise error.TestFail('Wrong index for PCH powergating info: ', 2479 line) 2480 2481 name = match.group('name') 2482 state = match.group('state') == 'On' 2483 2484 ret.append({'name': name, 'state': state}) 2485 self._stat = ret 2486 2487 def _read_telemetry_powergating_info(self): 2488 """ 2489 read_pch_powergating_info() for Intel Atom APL+ 2490 2491 @raises error.TestFail if parsing error. 2492 """ 2493 with open(self.TELEMETRY_PATH, 'r') as f: 2494 raw_str = f.read() 2495 2496 # Example pattern to match: 2497 # -------------------------------------- 2498 # South Complex PowerGate Status 2499 # -------------------------------------- 2500 # Device PG 2501 # LPSS 1 2502 # SPI 1 2503 # FUSE 0 2504 # 2505 # --------------------------------------- 2506 trimed_pattern = r'.*South Complex PowerGate Status\n' \ 2507 r'-+\n' \ 2508 r'Device\s+PG\n' \ 2509 r'(?P<trimmed_section>(\w+\s+[0|1]\n)+)' \ 2510 r'\n-+\n.*' 2511 trimed_match = re.match(trimed_pattern, raw_str, re.DOTALL) 2512 if not trimed_match: 2513 raise error.TestFail('Can not parse PCH powergating info: ', 2514 raw_str) 2515 trimmed_str = trimed_match.group('trimmed_section').strip() 2516 lines = [line.strip() for line in trimmed_str.split('\n')] 2517 2518 matcher = re.compile(r'(?P<name>\w+)\s+(?P<state>[0|1])') 2519 ret = [] 2520 for line in lines: 2521 match = matcher.match(line) 2522 if not match: 2523 raise error.TestFail('Can not parse PCH powergating info: %s', 2524 line) 2525 2526 name = match.group('name') 2527 state = match.group('state') == '0' # 0 means on and 1 means off 2528 2529 ret.append({'name': name, 'state': state}) 2530 self._stat = ret 2531