• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# -*- coding: utf-8 -*-
2# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Module to deal with result cache."""
7
8from __future__ import division
9from __future__ import print_function
10
11import collections
12import glob
13import hashlib
14import heapq
15import json
16import os
17import pickle
18import re
19import tempfile
20
21from cros_utils import command_executer
22from cros_utils import misc
23
24from image_checksummer import ImageChecksummer
25
26import results_report
27import test_flag
28
29SCRATCH_DIR = os.path.expanduser('~/cros_scratch')
30RESULTS_FILE = 'results.txt'
31MACHINE_FILE = 'machine.txt'
32AUTOTEST_TARBALL = 'autotest.tbz2'
33RESULTS_TARBALL = 'results.tbz2'
34PERF_RESULTS_FILE = 'perf-results.txt'
35CACHE_KEYS_FILE = 'cache_keys.txt'
36
37
38class PidVerificationError(Exception):
39  """Error of perf PID verification in per-process mode."""
40
41
42class PerfDataReadError(Exception):
43  """Error of reading a perf.data header."""
44
45
46class Result(object):
47  """Class for holding the results of a single test run.
48
49  This class manages what exactly is stored inside the cache without knowing
50  what the key of the cache is. For runs with perf, it stores perf.data,
51  perf.report, etc. The key generation is handled by the ResultsCache class.
52  """
53
54  def __init__(self, logger, label, log_level, machine, cmd_exec=None):
55    self.chromeos_root = label.chromeos_root
56    self._logger = logger
57    self.ce = cmd_exec or command_executer.GetCommandExecuter(
58        self._logger, log_level=log_level)
59    self.temp_dir = None
60    self.label = label
61    self.results_dir = None
62    self.log_level = log_level
63    self.machine = machine
64    self.perf_data_files = []
65    self.perf_report_files = []
66    self.results_file = []
67    self.turbostat_log_file = ''
68    self.cpustats_log_file = ''
69    self.cpuinfo_file = ''
70    self.top_log_file = ''
71    self.wait_time_log_file = ''
72    self.chrome_version = ''
73    self.err = None
74    self.chroot_results_dir = ''
75    self.test_name = ''
76    self.keyvals = None
77    self.board = None
78    self.suite = None
79    self.cwp_dso = ''
80    self.retval = None
81    self.out = None
82    self.top_cmds = []
83
84  def GetTopCmds(self):
85    """Get the list of top commands consuming CPU on the machine."""
86    return self.top_cmds
87
88  def FormatStringTopCommands(self):
89    """Get formatted string of top commands.
90
91    Get the formatted string with top commands consuming CPU on DUT machine.
92    Number of "non-chrome" processes in the list is limited to 5.
93    """
94    format_list = [
95        'Top commands with highest CPU usage:',
96        # Header.
97        '%20s %9s %6s   %s' % ('COMMAND', 'AVG CPU%', 'COUNT', 'HIGHEST 5'),
98        '-' * 50,
99    ]
100    if self.top_cmds:
101      # After switching to top processes we have to expand the list since there
102      # will be a lot of 'chrome' processes (up to 10, sometimes more) in the
103      # top.
104      # Let's limit the list size by the number of non-chrome processes.
105      limit_of_non_chrome_procs = 5
106      num_of_non_chrome_procs = 0
107      for topcmd in self.top_cmds:
108        print_line = '%20s %9.2f %6s   %s' % (
109            topcmd['cmd'], topcmd['cpu_use_avg'], topcmd['count'],
110            topcmd['top5_cpu_use'])
111        format_list.append(print_line)
112        if not topcmd['cmd'].startswith('chrome'):
113          num_of_non_chrome_procs += 1
114          if num_of_non_chrome_procs >= limit_of_non_chrome_procs:
115            break
116    else:
117      format_list.append('[NO DATA FROM THE TOP LOG]')
118    format_list.append('-' * 50)
119    return '\n'.join(format_list)
120
121  def CopyFilesTo(self, dest_dir, files_to_copy):
122    file_index = 0
123    for file_to_copy in files_to_copy:
124      if not os.path.isdir(dest_dir):
125        command = 'mkdir -p %s' % dest_dir
126        self.ce.RunCommand(command)
127      dest_file = os.path.join(
128          dest_dir, ('%s.%s' % (os.path.basename(file_to_copy), file_index)))
129      ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
130      if ret:
131        raise IOError('Could not copy results file: %s' % file_to_copy)
132
133  def CopyResultsTo(self, dest_dir):
134    self.CopyFilesTo(dest_dir, self.results_file)
135    self.CopyFilesTo(dest_dir, self.perf_data_files)
136    self.CopyFilesTo(dest_dir, self.perf_report_files)
137    extra_files = []
138    if self.top_log_file:
139      extra_files.append(self.top_log_file)
140    if self.cpuinfo_file:
141      extra_files.append(self.cpuinfo_file)
142    if extra_files:
143      self.CopyFilesTo(dest_dir, extra_files)
144    if self.results_file or self.perf_data_files or self.perf_report_files:
145      self._logger.LogOutput('Results files stored in %s.' % dest_dir)
146
147  def CompressResultsTo(self, dest_dir):
148    tarball = os.path.join(self.results_dir, RESULTS_TARBALL)
149    # Test_that runs hold all output under TEST_NAME_HASHTAG/results/,
150    # while tast runs hold output under TEST_NAME/.
151    # Both ensure to be unique.
152    result_dir_name = self.test_name if self.suite == 'tast' else 'results'
153    results_dir = self.FindFilesInResultsDir('-name %s' %
154                                             result_dir_name).split('\n')[0]
155
156    if not results_dir:
157      self._logger.LogOutput('WARNING: No results dir matching %r found' %
158                             result_dir_name)
159      return
160
161    self.CreateTarball(results_dir, tarball)
162    self.CopyFilesTo(dest_dir, [tarball])
163    if results_dir:
164      self._logger.LogOutput('Results files compressed into %s.' % dest_dir)
165
166  def GetNewKeyvals(self, keyvals_dict):
167    # Initialize 'units' dictionary.
168    units_dict = {}
169    for k in keyvals_dict:
170      units_dict[k] = ''
171    results_files = self.GetDataMeasurementsFiles()
172    for f in results_files:
173      # Make sure we can find the results file
174      if os.path.exists(f):
175        data_filename = f
176      else:
177        # Otherwise get the base filename and create the correct
178        # path for it.
179        _, f_base = misc.GetRoot(f)
180        data_filename = os.path.join(self.chromeos_root, 'chroot/tmp',
181                                     self.temp_dir, f_base)
182      if data_filename.find('.json') > 0:
183        raw_dict = dict()
184        if os.path.exists(data_filename):
185          with open(data_filename, 'r') as data_file:
186            raw_dict = json.load(data_file)
187
188        if 'charts' in raw_dict:
189          raw_dict = raw_dict['charts']
190        for k1 in raw_dict:
191          field_dict = raw_dict[k1]
192          for k2 in field_dict:
193            result_dict = field_dict[k2]
194            key = k1 + '__' + k2
195            if 'value' in result_dict:
196              keyvals_dict[key] = result_dict['value']
197            elif 'values' in result_dict:
198              values = result_dict['values']
199              if ('type' in result_dict and
200                  result_dict['type'] == 'list_of_scalar_values' and values and
201                  values != 'null'):
202                keyvals_dict[key] = sum(values) / float(len(values))
203              else:
204                keyvals_dict[key] = values
205            units_dict[key] = result_dict['units']
206      else:
207        if os.path.exists(data_filename):
208          with open(data_filename, 'r') as data_file:
209            lines = data_file.readlines()
210            for line in lines:
211              tmp_dict = json.loads(line)
212              graph_name = tmp_dict['graph']
213              graph_str = (graph_name + '__') if graph_name else ''
214              key = graph_str + tmp_dict['description']
215              keyvals_dict[key] = tmp_dict['value']
216              units_dict[key] = tmp_dict['units']
217
218    return keyvals_dict, units_dict
219
220  def AppendTelemetryUnits(self, keyvals_dict, units_dict):
221    """keyvals_dict is the dict of key-value used to generate Crosperf reports.
222
223    units_dict is a dictionary of the units for the return values in
224    keyvals_dict.  We need to associate the units with the return values,
225    for Telemetry tests, so that we can include the units in the reports.
226    This function takes each value in keyvals_dict, finds the corresponding
227    unit in the units_dict, and replaces the old value with a list of the
228    old value and the units.  This later gets properly parsed in the
229    ResultOrganizer class, for generating the reports.
230    """
231
232    results_dict = {}
233    for k in keyvals_dict:
234      # We don't want these lines in our reports; they add no useful data.
235      if not k or k == 'telemetry_Crosperf':
236        continue
237      val = keyvals_dict[k]
238      units = units_dict[k]
239      new_val = [val, units]
240      results_dict[k] = new_val
241    return results_dict
242
243  def GetKeyvals(self):
244    results_in_chroot = os.path.join(self.chromeos_root, 'chroot', 'tmp')
245    if not self.temp_dir:
246      self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
247      command = 'cp -r {0}/* {1}'.format(self.results_dir, self.temp_dir)
248      self.ce.RunCommand(command, print_to_console=False)
249
250    command = ('./generate_test_report --no-color --csv %s' %
251               (os.path.join('/tmp', os.path.basename(self.temp_dir))))
252    _, out, _ = self.ce.ChrootRunCommandWOutput(
253        self.chromeos_root, command, print_to_console=False)
254    keyvals_dict = {}
255    tmp_dir_in_chroot = misc.GetInsideChrootPath(self.chromeos_root,
256                                                 self.temp_dir)
257    for line in out.splitlines():
258      tokens = re.split('=|,', line)
259      key = tokens[-2]
260      if key.startswith(tmp_dir_in_chroot):
261        key = key[len(tmp_dir_in_chroot) + 1:]
262      value = tokens[-1]
263      keyvals_dict[key] = value
264
265    # Check to see if there is a perf_measurements file and get the
266    # data from it if so.
267    keyvals_dict, units_dict = self.GetNewKeyvals(keyvals_dict)
268    if self.suite == 'telemetry_Crosperf':
269      # For telemtry_Crosperf results, append the units to the return
270      # results, for use in generating the reports.
271      keyvals_dict = self.AppendTelemetryUnits(keyvals_dict, units_dict)
272    return keyvals_dict
273
274  def GetSamples(self):
275    samples = 0
276    for perf_data_file in self.perf_data_files:
277      chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
278                                                       perf_data_file)
279      perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
280      perf_file = '/usr/sbin/perf'
281      if os.path.exists(perf_path):
282        perf_file = '/usr/bin/perf'
283
284      # For each perf.data, we want to collect sample count for specific DSO.
285      # We specify exact match for known DSO type, and every sample for `all`.
286      exact_match = ''
287      if self.cwp_dso == 'all':
288        exact_match = '""'
289      elif self.cwp_dso == 'chrome':
290        exact_match = '" chrome "'
291      elif self.cwp_dso == 'kallsyms':
292        exact_match = '"[kernel.kallsyms]"'
293      else:
294        # This will need to be updated once there are more DSO types supported,
295        # if user want an exact match for the field they want.
296        exact_match = '"%s"' % self.cwp_dso
297
298      command = ('%s report -n -s dso -i %s 2> /dev/null | grep %s' %
299                 (perf_file, chroot_perf_data_file, exact_match))
300      _, result, _ = self.ce.ChrootRunCommandWOutput(self.chromeos_root,
301                                                     command)
302      # Accumulate the sample count for all matched fields.
303      # Each line looks like this:
304      #     45.42%        237210  chrome
305      # And we want the second number which is the sample count.
306      sample = 0
307      try:
308        for line in result.split('\n'):
309          attr = line.split()
310          if len(attr) == 3 and '%' in attr[0]:
311            sample += int(attr[1])
312      except:
313        raise RuntimeError('Cannot parse perf dso result')
314
315      samples += sample
316    return [samples, u'samples']
317
318  def GetResultsDir(self):
319    if self.suite == 'tast':
320      mo = re.search(r'Writing results to (\S+)', self.out)
321    else:
322      mo = re.search(r'Results placed in (\S+)', self.out)
323    if mo:
324      result = mo.group(1)
325      return result
326    raise RuntimeError('Could not find results directory.')
327
328  def FindFilesInResultsDir(self, find_args):
329    if not self.results_dir:
330      return ''
331
332    command = 'find %s %s' % (self.results_dir, find_args)
333    ret, out, _ = self.ce.RunCommandWOutput(command, print_to_console=False)
334    if ret:
335      raise RuntimeError('Could not run find command!')
336    return out
337
338  def GetResultsFile(self):
339    if self.suite == 'telemetry_Crosperf':
340      return self.FindFilesInResultsDir('-name histograms.json').splitlines()
341    return self.FindFilesInResultsDir('-name results-chart.json').splitlines()
342
343  def GetPerfDataFiles(self):
344    return self.FindFilesInResultsDir('-name perf.data').splitlines()
345
346  def GetPerfReportFiles(self):
347    return self.FindFilesInResultsDir('-name perf.data.report').splitlines()
348
349  def GetDataMeasurementsFiles(self):
350    result = self.FindFilesInResultsDir('-name perf_measurements').splitlines()
351    if not result:
352      if self.suite == 'telemetry_Crosperf':
353        result = \
354            self.FindFilesInResultsDir('-name histograms.json').splitlines()
355      else:
356        result = \
357            self.FindFilesInResultsDir('-name results-chart.json').splitlines()
358    return result
359
360  def GetTurbostatFile(self):
361    """Get turbostat log path string."""
362    return self.FindFilesInResultsDir('-name turbostat.log').split('\n')[0]
363
364  def GetCpustatsFile(self):
365    """Get cpustats log path string."""
366    return self.FindFilesInResultsDir('-name cpustats.log').split('\n')[0]
367
368  def GetCpuinfoFile(self):
369    """Get cpustats log path string."""
370    return self.FindFilesInResultsDir('-name cpuinfo.log').split('\n')[0]
371
372  def GetTopFile(self):
373    """Get cpustats log path string."""
374    return self.FindFilesInResultsDir('-name top.log').split('\n')[0]
375
376  def GetWaitTimeFile(self):
377    """Get wait time log path string."""
378    return self.FindFilesInResultsDir('-name wait_time.log').split('\n')[0]
379
380  def _CheckDebugPath(self, option, path):
381    relative_path = path[1:]
382    out_chroot_path = os.path.join(self.chromeos_root, 'chroot', relative_path)
383    if os.path.exists(out_chroot_path):
384      if option == 'kallsyms':
385        path = os.path.join(path, 'System.map-*')
386      return '--' + option + ' ' + path
387    else:
388      print('** WARNING **: --%s option not applied, %s does not exist' %
389            (option, out_chroot_path))
390      return ''
391
392  def GeneratePerfReportFiles(self):
393    perf_report_files = []
394    for perf_data_file in self.perf_data_files:
395      # Generate a perf.report and store it side-by-side with the perf.data
396      # file.
397      chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
398                                                       perf_data_file)
399      perf_report_file = '%s.report' % perf_data_file
400      if os.path.exists(perf_report_file):
401        raise RuntimeError('Perf report file already exists: %s' %
402                           perf_report_file)
403      chroot_perf_report_file = misc.GetInsideChrootPath(
404          self.chromeos_root, perf_report_file)
405      perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
406
407      perf_file = '/usr/sbin/perf'
408      if os.path.exists(perf_path):
409        perf_file = '/usr/bin/perf'
410
411      debug_path = self.label.debug_path
412
413      if debug_path:
414        symfs = '--symfs ' + debug_path
415        vmlinux = '--vmlinux ' + os.path.join(debug_path, 'boot', 'vmlinux')
416        kallsyms = ''
417        print('** WARNING **: --kallsyms option not applied, no System.map-* '
418              'for downloaded image.')
419      else:
420        if self.label.image_type != 'local':
421          print('** WARNING **: Using local debug info in /build, this may '
422                'not match the downloaded image.')
423        build_path = os.path.join('/build', self.board)
424        symfs = self._CheckDebugPath('symfs', build_path)
425        vmlinux_path = os.path.join(build_path, 'usr/lib/debug/boot/vmlinux')
426        vmlinux = self._CheckDebugPath('vmlinux', vmlinux_path)
427        kallsyms_path = os.path.join(build_path, 'boot')
428        kallsyms = self._CheckDebugPath('kallsyms', kallsyms_path)
429
430      command = ('%s report -n %s %s %s -i %s --stdio > %s' %
431                 (perf_file, symfs, vmlinux, kallsyms, chroot_perf_data_file,
432                  chroot_perf_report_file))
433      if self.log_level != 'verbose':
434        self._logger.LogOutput('Generating perf report...\nCMD: %s' % command)
435      exit_code = self.ce.ChrootRunCommand(self.chromeos_root, command)
436      if exit_code == 0:
437        if self.log_level != 'verbose':
438          self._logger.LogOutput('Perf report generated successfully.')
439      else:
440        raise RuntimeError('Perf report not generated correctly. CMD: %s' %
441                           command)
442
443      # Add a keyval to the dictionary for the events captured.
444      perf_report_files.append(
445          misc.GetOutsideChrootPath(self.chromeos_root,
446                                    chroot_perf_report_file))
447    return perf_report_files
448
449  def GatherPerfResults(self):
450    report_id = 0
451    for perf_report_file in self.perf_report_files:
452      with open(perf_report_file, 'r') as f:
453        report_contents = f.read()
454        for group in re.findall(r'Events: (\S+) (\S+)', report_contents):
455          num_events = group[0]
456          event_name = group[1]
457          key = 'perf_%s_%s' % (report_id, event_name)
458          value = str(misc.UnitToNumber(num_events))
459          self.keyvals[key] = value
460
461  def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
462    self.board = self.label.board
463    self.out = out
464    self.err = err
465    self.retval = retval
466    self.test_name = test
467    self.suite = suite
468    self.cwp_dso = cwp_dso
469    self.chroot_results_dir = self.GetResultsDir()
470    self.results_dir = misc.GetOutsideChrootPath(self.chromeos_root,
471                                                 self.chroot_results_dir)
472    self.results_file = self.GetResultsFile()
473    self.perf_data_files = self.GetPerfDataFiles()
474    # Include all perf.report data in table.
475    self.perf_report_files = self.GeneratePerfReportFiles()
476    self.turbostat_log_file = self.GetTurbostatFile()
477    self.cpustats_log_file = self.GetCpustatsFile()
478    self.cpuinfo_file = self.GetCpuinfoFile()
479    self.top_log_file = self.GetTopFile()
480    self.wait_time_log_file = self.GetWaitTimeFile()
481    # TODO(asharif): Do something similar with perf stat.
482
483    # Grab keyvals from the directory.
484    self.ProcessResults()
485
486  def ProcessChartResults(self):
487    # Open and parse the json results file generated by telemetry/test_that.
488    if not self.results_file:
489      raise IOError('No results file found.')
490    filename = self.results_file[0]
491    if not filename.endswith('.json'):
492      raise IOError('Attempt to call json on non-json file: %s' % filename)
493    if not os.path.exists(filename):
494      raise IOError('%s does not exist' % filename)
495
496    keyvals = {}
497    with open(filename, 'r') as f:
498      raw_dict = json.load(f)
499      if 'charts' in raw_dict:
500        raw_dict = raw_dict['charts']
501      for k, field_dict in raw_dict.items():
502        for item in field_dict:
503          keyname = k + '__' + item
504          value_dict = field_dict[item]
505          if 'value' in value_dict:
506            result = value_dict['value']
507          elif 'values' in value_dict:
508            values = value_dict['values']
509            if not values:
510              continue
511            if ('type' in value_dict and
512                value_dict['type'] == 'list_of_scalar_values' and
513                values != 'null'):
514              result = sum(values) / float(len(values))
515            else:
516              result = values
517          else:
518            continue
519          units = value_dict['units']
520          new_value = [result, units]
521          keyvals[keyname] = new_value
522    return keyvals
523
524  def ProcessTurbostatResults(self):
525    """Given turbostat_log_file non-null parse cpu stats from file.
526
527    Returns:
528      Dictionary of 'cpufreq', 'cputemp' where each
529      includes dictionary 'all': [list_of_values]
530
531    Example of the output of turbostat_log.
532    ----------------------
533    CPU     Avg_MHz Busy%   Bzy_MHz TSC_MHz IRQ     CoreTmp
534    -       329     12.13   2723    2393    10975   77
535    0       336     12.41   2715    2393    6328    77
536    2       323     11.86   2731    2393    4647    69
537    CPU     Avg_MHz Busy%   Bzy_MHz TSC_MHz IRQ     CoreTmp
538    -       1940    67.46   2884    2393    39920   83
539    0       1827    63.70   2877    2393    21184   83
540    """
541    cpustats = {}
542    read_data = ''
543    with open(self.turbostat_log_file) as f:
544      read_data = f.readlines()
545
546    if not read_data:
547      self._logger.LogOutput('WARNING: Turbostat output file is empty.')
548      return {}
549
550    # First line always contains the header.
551    stats = read_data[0].split()
552
553    # Mandatory parameters.
554    if 'CPU' not in stats:
555      self._logger.LogOutput(
556          'WARNING: Missing data for CPU# in Turbostat output.')
557      return {}
558    if 'Bzy_MHz' not in stats:
559      self._logger.LogOutput(
560          'WARNING: Missing data for Bzy_MHz in Turbostat output.')
561      return {}
562    cpu_index = stats.index('CPU')
563    cpufreq_index = stats.index('Bzy_MHz')
564    cpufreq = cpustats.setdefault('cpufreq', {'all': []})
565
566    # Optional parameters.
567    cputemp_index = -1
568    if 'CoreTmp' in stats:
569      cputemp_index = stats.index('CoreTmp')
570      cputemp = cpustats.setdefault('cputemp', {'all': []})
571
572    # Parse data starting from the second line ignoring repeating headers.
573    for st in read_data[1:]:
574      # Data represented by int or float separated by spaces.
575      numbers = st.split()
576      if not all(word.replace('.', '', 1).isdigit() for word in numbers[1:]):
577        # Skip the line if data mismatch.
578        continue
579      if numbers[cpu_index] != '-':
580        # Ignore Core-specific statistics which starts with Core number.
581        # Combined statistics for all core has "-" CPU identifier.
582        continue
583
584      cpufreq['all'].append(int(numbers[cpufreq_index]))
585      if cputemp_index != -1:
586        cputemp['all'].append(int(numbers[cputemp_index]))
587    return cpustats
588
589  def ProcessTopResults(self):
590    """Given self.top_log_file process top log data.
591
592    Returns:
593      List of dictionaries with the following keyvals:
594       'cmd': command name (string),
595       'cpu_use_avg': average cpu usage (float),
596       'count': number of occurrences (int),
597       'top5_cpu_use': up to 5 highest cpu usages (descending list of floats)
598
599    Example of the top log:
600      PID USER      PR  NI    VIRT    RES    SHR S  %CPU  %MEM     TIME+ COMMAND
601     4102 chronos   12  -8 3454472 238300 118188 R  41.8   6.1   0:08.37 chrome
602      375 root       0 -20       0      0      0 S   5.9   0.0   0:00.17 kworker
603      617 syslog    20   0   25332   8372   7888 S   5.9   0.2   0:00.77 systemd
604
605      PID USER      PR  NI    VIRT    RES    SHR S  %CPU  %MEM     TIME+ COMMAND
606     5745 chronos   20   0 5438580 139328  67988 R 122.8   3.6   0:04.26 chrome
607      912 root     -51   0       0      0      0 S   2.0   0.0   0:01.04 irq/cro
608      121 root      20   0       0      0      0 S   1.0   0.0   0:00.45 spi5
609    """
610    all_data = ''
611    with open(self.top_log_file) as f:
612      all_data = f.read()
613
614    if not all_data:
615      self._logger.LogOutput('WARNING: Top log file is empty.')
616      return []
617
618    top_line_regex = re.compile(
619        r"""
620        ^\s*(?P<pid>\d+)\s+         # Group 1: PID
621        \S+\s+\S+\s+-?\d+\s+        # Ignore: user, prio, nice
622        \d+\s+\d+\s+\d+\s+          # Ignore: virt/res/shared mem
623        \S+\s+                      # Ignore: state
624        (?P<cpu_use>\d+\.\d+)\s+    # Group 2: CPU usage
625        \d+\.\d+\s+\d+:\d+\.\d+\s+  # Ignore: mem usage, time
626        (?P<cmd>\S+)$               # Group 3: command
627        """, re.VERBOSE)
628    # Page represents top log data per one measurement within time interval
629    # 'top_interval'.
630    # Pages separated by empty line.
631    pages = all_data.split('\n\n')
632    # Snapshots are structured representation of the pages.
633    snapshots = []
634    for page in pages:
635      if not page:
636        continue
637
638      # Snapshot list will contain all processes (command duplicates are
639      # allowed).
640      snapshot = []
641      for line in page.splitlines():
642        match = top_line_regex.match(line)
643        if match:
644          # Top line is valid, collect data.
645          process = {
646              # NOTE: One command may be represented by multiple processes.
647              'cmd': match.group('cmd'),
648              'pid': match.group('pid'),
649              'cpu_use': float(match.group('cpu_use')),
650          }
651
652          # Filter out processes with 0 CPU usage and top command.
653          if process['cpu_use'] > 0 and process['cmd'] != 'top':
654            snapshot.append(process)
655
656      # If page contained meaningful data add snapshot to the list.
657      if snapshot:
658        snapshots.append(snapshot)
659
660    # Define threshold of CPU usage when Chrome is busy, i.e. benchmark is
661    # running.
662    # Ideally it should be 100% but it will be hardly reachable with 1 core.
663    # Statistics on DUT with 2-6 cores shows that chrome load of 100%, 95% and
664    # 90% equally occurs in 72-74% of all top log snapshots.
665    # Further decreasing of load threshold leads to a shifting percent of
666    # "high load" snapshots which might include snapshots when benchmark is
667    # not running.
668    # On 1-core DUT 90% chrome cpu load occurs in 55%, 95% in 33% and 100% in 2%
669    # of snapshots accordingly.
670    # Threshold of "high load" is reduced to 70% (from 90) when we switched to
671    # topstats per process. From experiment data the rest 20% are distributed
672    # among other chrome processes.
673    CHROME_HIGH_CPU_LOAD = 70
674    # Number of snapshots where chrome is heavily used.
675    high_load_snapshots = 0
676    # Total CPU use per process in ALL active snapshots.
677    cmd_total_cpu_use = collections.defaultdict(float)
678    # Top CPU usages per command.
679    cmd_top5_cpu_use = collections.defaultdict(list)
680    # List of Top Commands to be returned.
681    topcmds = []
682
683    for snapshot_processes in snapshots:
684      # CPU usage per command, per PID in one snapshot.
685      cmd_cpu_use_per_snapshot = collections.defaultdict(dict)
686      for process in snapshot_processes:
687        cmd = process['cmd']
688        cpu_use = process['cpu_use']
689        pid = process['pid']
690        cmd_cpu_use_per_snapshot[cmd][pid] = cpu_use
691
692      # Chrome processes, pid: cpu_usage.
693      chrome_processes = cmd_cpu_use_per_snapshot.get('chrome', {})
694      chrome_cpu_use_list = chrome_processes.values()
695
696      if chrome_cpu_use_list and max(
697          chrome_cpu_use_list) > CHROME_HIGH_CPU_LOAD:
698        # CPU usage of any of the "chrome" processes exceeds "High load"
699        # threshold which means DUT is busy running a benchmark.
700        high_load_snapshots += 1
701        for cmd, cpu_use_per_pid in cmd_cpu_use_per_snapshot.items():
702          for pid, cpu_use in cpu_use_per_pid.items():
703            # Append PID to the name of the command.
704            cmd_with_pid = cmd + '-' + pid
705            cmd_total_cpu_use[cmd_with_pid] += cpu_use
706
707            # Add cpu_use into command top cpu usages, sorted in descending
708            # order.
709            heapq.heappush(cmd_top5_cpu_use[cmd_with_pid], round(cpu_use, 1))
710
711    for consumer, usage in sorted(
712        cmd_total_cpu_use.items(), key=lambda x: x[1], reverse=True):
713      # Iterate through commands by descending order of total CPU usage.
714      topcmd = {
715          'cmd': consumer,
716          'cpu_use_avg': usage / high_load_snapshots,
717          'count': len(cmd_top5_cpu_use[consumer]),
718          'top5_cpu_use': heapq.nlargest(5, cmd_top5_cpu_use[consumer]),
719      }
720      topcmds.append(topcmd)
721
722    return topcmds
723
724  def ProcessCpustatsResults(self):
725    """Given cpustats_log_file non-null parse cpu data from file.
726
727    Returns:
728      Dictionary of 'cpufreq', 'cputemp' where each
729      includes dictionary of parameter: [list_of_values]
730
731    Example of cpustats.log output.
732    ----------------------
733    /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
734    /sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 2016000
735    little-cpu 41234
736    big-cpu 51234
737
738    If cores share the same policy their frequencies may always match
739    on some devices.
740    To make report concise we should eliminate redundancy in the output.
741    Function removes cpuN data if it duplicates data from other cores.
742    """
743
744    cpustats = {}
745    read_data = ''
746    with open(self.cpustats_log_file) as f:
747      read_data = f.readlines()
748
749    if not read_data:
750      self._logger.LogOutput('WARNING: Cpustats output file is empty.')
751      return {}
752
753    cpufreq_regex = re.compile(r'^[/\S]+/(cpu\d+)/[/\S]+\s+(\d+)$')
754    cputemp_regex = re.compile(r'^([^/\s]+)\s+(\d+)$')
755
756    for st in read_data:
757      match = cpufreq_regex.match(st)
758      if match:
759        cpu = match.group(1)
760        # CPU frequency comes in kHz.
761        freq_khz = int(match.group(2))
762        freq_mhz = freq_khz / 1000
763        # cpufreq represents a dictionary with CPU frequency-related
764        # data from cpustats.log.
765        cpufreq = cpustats.setdefault('cpufreq', {})
766        cpu_n_freq = cpufreq.setdefault(cpu, [])
767        cpu_n_freq.append(freq_mhz)
768      else:
769        match = cputemp_regex.match(st)
770        if match:
771          therm_type = match.group(1)
772          # The value is int, uCelsius unit.
773          temp_uc = float(match.group(2))
774          # Round to XX.X float.
775          temp_c = round(temp_uc / 1000, 1)
776          # cputemp represents a dictionary with temperature measurements
777          # from cpustats.log.
778          cputemp = cpustats.setdefault('cputemp', {})
779          therm_type = cputemp.setdefault(therm_type, [])
780          therm_type.append(temp_c)
781
782    # Remove duplicate statistics from cpustats.
783    pruned_stats = {}
784    for cpukey, cpuparam in cpustats.items():
785      # Copy 'cpufreq' and 'cputemp'.
786      pruned_params = pruned_stats.setdefault(cpukey, {})
787      for paramkey, paramvalue in sorted(cpuparam.items()):
788        # paramvalue is list of all measured data.
789        if paramvalue not in pruned_params.values():
790          pruned_params[paramkey] = paramvalue
791
792    return pruned_stats
793
794  def ProcessHistogramsResults(self):
795    # Open and parse the json results file generated by telemetry/test_that.
796    if not self.results_file:
797      raise IOError('No results file found.')
798    filename = self.results_file[0]
799    if not filename.endswith('.json'):
800      raise IOError('Attempt to call json on non-json file: %s' % filename)
801    if not os.path.exists(filename):
802      raise IOError('%s does not exist' % filename)
803
804    keyvals = {}
805    with open(filename) as f:
806      histograms = json.load(f)
807      value_map = {}
808      # Gets generic set values.
809      for obj in histograms:
810        if 'type' in obj and obj['type'] == 'GenericSet':
811          value_map[obj['guid']] = obj['values']
812
813      for obj in histograms:
814        if 'name' not in obj or 'sampleValues' not in obj:
815          continue
816        metric_name = obj['name']
817        vals = obj['sampleValues']
818        if isinstance(vals, list):
819          # Remove None elements from the list
820          vals = [val for val in vals if val is not None]
821          if vals:
822            result = float(sum(vals)) / len(vals)
823          else:
824            result = 0
825        else:
826          result = vals
827        unit = obj['unit']
828        diagnostics = obj['diagnostics']
829        # for summaries of benchmarks
830        key = metric_name
831        if key not in keyvals:
832          keyvals[key] = [[result], unit]
833        else:
834          keyvals[key][0].append(result)
835        # TODO: do we need summaries of stories?
836        # for summaries of story tags
837        if 'storyTags' in diagnostics:
838          guid = diagnostics['storyTags']
839          if guid not in value_map:
840            raise RuntimeError('Unrecognized storyTags in %s ' % (obj))
841          for story_tag in value_map[guid]:
842            key = metric_name + '__' + story_tag
843            if key not in keyvals:
844              keyvals[key] = [[result], unit]
845            else:
846              keyvals[key][0].append(result)
847    # calculate summary
848    for key in keyvals:
849      vals = keyvals[key][0]
850      unit = keyvals[key][1]
851      result = float(sum(vals)) / len(vals)
852      keyvals[key] = [result, unit]
853    return keyvals
854
855  def ReadPidFromPerfData(self):
856    """Read PIDs from perf.data files.
857
858    Extract PID from perf.data if "perf record" was running per process,
859    i.e. with "-p <PID>" and no "-a".
860
861    Returns:
862      pids: list of PIDs.
863
864    Raises:
865      PerfDataReadError when perf.data header reading fails.
866    """
867    cmd = ['/usr/bin/perf', 'report', '--header-only', '-i']
868    pids = []
869
870    for perf_data_path in self.perf_data_files:
871      perf_data_path_in_chroot = misc.GetInsideChrootPath(
872          self.chromeos_root, perf_data_path)
873      path_str = ' '.join(cmd + [perf_data_path_in_chroot])
874      status, output, _ = self.ce.ChrootRunCommandWOutput(
875          self.chromeos_root, path_str)
876      if status:
877        # Error of reading a perf.data profile is fatal.
878        raise PerfDataReadError(f'Failed to read perf.data profile: {path_str}')
879
880      # Pattern to search a line with "perf record" command line:
881      # # cmdline : /usr/bin/perf record -e instructions -p 123"
882      cmdline_regex = re.compile(
883          r'^\#\scmdline\s:\s+(?P<cmd>.*perf\s+record\s+.*)$')
884      # Pattern to search PID in a command line.
885      pid_regex = re.compile(r'^.*\s-p\s(?P<pid>\d+)\s*.*$')
886      for line in output.splitlines():
887        cmd_match = cmdline_regex.match(line)
888        if cmd_match:
889          # Found a perf command line.
890          cmdline = cmd_match.group('cmd')
891          # '-a' is a system-wide mode argument.
892          if '-a' not in cmdline.split():
893            # It can be that perf was attached to PID and was still running in
894            # system-wide mode.
895            # We filter out this case here since it's not per-process.
896            pid_match = pid_regex.match(cmdline)
897            if pid_match:
898              pids.append(pid_match.group('pid'))
899          # Stop the search and move to the next perf.data file.
900          break
901      else:
902        # cmdline wasn't found in the header. It's a fatal error.
903        raise PerfDataReadError(f'Perf command line is not found in {path_str}')
904    return pids
905
906  def VerifyPerfDataPID(self):
907    """Verify PIDs in per-process perf.data profiles.
908
909    Check that at list one top process is profiled if perf was running in
910    per-process mode.
911
912    Raises:
913      PidVerificationError if PID verification of per-process perf.data profiles
914      fail.
915    """
916    perf_data_pids = self.ReadPidFromPerfData()
917    if not perf_data_pids:
918      # In system-wide mode there are no PIDs.
919      self._logger.LogOutput('System-wide perf mode. Skip verification.')
920      return
921
922    # PIDs will be present only in per-process profiles.
923    # In this case we need to verify that profiles are collected on the
924    # hottest processes.
925    top_processes = [top_cmd['cmd'] for top_cmd in self.top_cmds]
926    # top_process structure: <cmd>-<pid>
927    top_pids = [top_process.split('-')[-1] for top_process in top_processes]
928    for top_pid in top_pids:
929      if top_pid in perf_data_pids:
930        self._logger.LogOutput('PID verification passed! '
931                               f'Top process {top_pid} is profiled.')
932        return
933    raise PidVerificationError(
934        f'top processes {top_processes} are missing in perf.data traces with'
935        f' PID: {perf_data_pids}.')
936
937  def ProcessResults(self, use_cache=False):
938    # Note that this function doesn't know anything about whether there is a
939    # cache hit or miss. It should process results agnostic of the cache hit
940    # state.
941    if (self.results_file and self.suite == 'telemetry_Crosperf' and
942        'histograms.json' in self.results_file[0]):
943      self.keyvals = self.ProcessHistogramsResults()
944    elif (self.results_file and self.suite != 'telemetry_Crosperf' and
945          'results-chart.json' in self.results_file[0]):
946      self.keyvals = self.ProcessChartResults()
947    else:
948      if not use_cache:
949        print('\n ** WARNING **: Had to use deprecated output-method to '
950              'collect results.\n')
951      self.keyvals = self.GetKeyvals()
952    self.keyvals['retval'] = self.retval
953    # If we are in CWP approximation mode, we want to collect DSO samples
954    # for each perf.data file
955    if self.cwp_dso and self.retval == 0:
956      self.keyvals['samples'] = self.GetSamples()
957      # If the samples count collected from perf file is 0, we will treat
958      # it as a failed run.
959      if self.keyvals['samples'][0] == 0:
960        del self.keyvals['samples']
961        self.keyvals['retval'] = 1
962    # Generate report from all perf.data files.
963    # Now parse all perf report files and include them in keyvals.
964    self.GatherPerfResults()
965
966    cpustats = {}
967    # Turbostat output has higher priority of processing.
968    if self.turbostat_log_file:
969      cpustats = self.ProcessTurbostatResults()
970    # Process cpustats output only if turbostat has no data.
971    if not cpustats and self.cpustats_log_file:
972      cpustats = self.ProcessCpustatsResults()
973    if self.top_log_file:
974      self.top_cmds = self.ProcessTopResults()
975    # Verify that PID in non system-wide perf.data and top_cmds are matching.
976    if self.perf_data_files and self.top_cmds:
977      self.VerifyPerfDataPID()
978    if self.wait_time_log_file:
979      with open(self.wait_time_log_file) as f:
980        wait_time = f.readline().strip()
981        try:
982          wait_time = float(wait_time)
983        except ValueError:
984          raise ValueError('Wait time in log file is not a number.')
985      # This is for accumulating wait time for telemtry_Crosperf runs only,
986      # for test_that runs, please refer to suite_runner.
987      self.machine.AddCooldownWaitTime(wait_time)
988
989    for param_key, param in cpustats.items():
990      for param_type, param_values in param.items():
991        val_avg = sum(param_values) / len(param_values)
992        val_min = min(param_values)
993        val_max = max(param_values)
994        # Average data is always included.
995        self.keyvals['_'.join([param_key, param_type, 'avg'])] = val_avg
996        # Insert min/max results only if they deviate
997        # from average.
998        if val_min != val_avg:
999          self.keyvals['_'.join([param_key, param_type, 'min'])] = val_min
1000        if val_max != val_avg:
1001          self.keyvals['_'.join([param_key, param_type, 'max'])] = val_max
1002
1003  def GetChromeVersionFromCache(self, cache_dir):
1004    # Read chrome_version from keys file, if present.
1005    chrome_version = ''
1006    keys_file = os.path.join(cache_dir, CACHE_KEYS_FILE)
1007    if os.path.exists(keys_file):
1008      with open(keys_file, 'r') as f:
1009        lines = f.readlines()
1010        for l in lines:
1011          if l.startswith('Google Chrome '):
1012            chrome_version = l
1013            if chrome_version.endswith('\n'):
1014              chrome_version = chrome_version[:-1]
1015            break
1016    return chrome_version
1017
1018  def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
1019    self.test_name = test
1020    self.suite = suite
1021    self.cwp_dso = cwp_dso
1022    # Read in everything from the cache directory.
1023    with open(os.path.join(cache_dir, RESULTS_FILE), 'rb') as f:
1024      self.out = pickle.load(f)
1025      self.err = pickle.load(f)
1026      self.retval = pickle.load(f)
1027
1028    # Untar the tarball to a temporary directory
1029    self.temp_dir = tempfile.mkdtemp(
1030        dir=os.path.join(self.chromeos_root, 'chroot', 'tmp'))
1031
1032    command = ('cd %s && tar xf %s' %
1033               (self.temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
1034    ret = self.ce.RunCommand(command, print_to_console=False)
1035    if ret:
1036      raise RuntimeError('Could not untar cached tarball')
1037    self.results_dir = self.temp_dir
1038    self.results_file = self.GetDataMeasurementsFiles()
1039    self.perf_data_files = self.GetPerfDataFiles()
1040    self.perf_report_files = self.GetPerfReportFiles()
1041    self.chrome_version = self.GetChromeVersionFromCache(cache_dir)
1042    self.ProcessResults(use_cache=True)
1043
1044  def CleanUp(self, rm_chroot_tmp):
1045    if rm_chroot_tmp and self.results_dir:
1046      dirname, basename = misc.GetRoot(self.results_dir)
1047      if basename.find('test_that_results_') != -1:
1048        command = 'rm -rf %s' % self.results_dir
1049      else:
1050        command = 'rm -rf %s' % dirname
1051      self.ce.RunCommand(command)
1052    if self.temp_dir:
1053      command = 'rm -rf %s' % self.temp_dir
1054      self.ce.RunCommand(command)
1055
1056  def CreateTarball(self, results_dir, tarball):
1057    if not results_dir.strip():
1058      raise ValueError('Refusing to `tar` an empty results_dir: %r' %
1059                       results_dir)
1060
1061    ret = self.ce.RunCommand('cd %s && '
1062                             'tar '
1063                             '--exclude=var/spool '
1064                             '--exclude=var/log '
1065                             '-cjf %s .' % (results_dir, tarball))
1066    if ret:
1067      raise RuntimeError("Couldn't compress test output directory.")
1068
1069  def StoreToCacheDir(self, cache_dir, machine_manager, key_list):
1070    # Create the dir if it doesn't exist.
1071    temp_dir = tempfile.mkdtemp()
1072
1073    # Store to the temp directory.
1074    with open(os.path.join(temp_dir, RESULTS_FILE), 'wb') as f:
1075      pickle.dump(self.out, f)
1076      pickle.dump(self.err, f)
1077      pickle.dump(self.retval, f)
1078
1079    if not test_flag.GetTestMode():
1080      with open(os.path.join(temp_dir, CACHE_KEYS_FILE), 'w') as f:
1081        f.write('%s\n' % self.label.name)
1082        f.write('%s\n' % self.label.chrome_version)
1083        f.write('%s\n' % self.machine.checksum_string)
1084        for k in key_list:
1085          f.write(k)
1086          f.write('\n')
1087
1088    if self.results_dir:
1089      tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
1090      self.CreateTarball(self.results_dir, tarball)
1091
1092    # Store machine info.
1093    # TODO(asharif): Make machine_manager a singleton, and don't pass it into
1094    # this function.
1095    with open(os.path.join(temp_dir, MACHINE_FILE), 'w') as f:
1096      f.write(machine_manager.machine_checksum_string[self.label.name])
1097
1098    if os.path.exists(cache_dir):
1099      command = 'rm -rf {0}'.format(cache_dir)
1100      self.ce.RunCommand(command)
1101
1102    command = 'mkdir -p {0} && '.format(os.path.dirname(cache_dir))
1103    command += 'chmod g+x {0} && '.format(temp_dir)
1104    command += 'mv {0} {1}'.format(temp_dir, cache_dir)
1105    ret = self.ce.RunCommand(command)
1106    if ret:
1107      command = 'rm -rf {0}'.format(temp_dir)
1108      self.ce.RunCommand(command)
1109      raise RuntimeError('Could not move dir %s to dir %s' %
1110                         (temp_dir, cache_dir))
1111
1112  @classmethod
1113  def CreateFromRun(cls,
1114                    logger,
1115                    log_level,
1116                    label,
1117                    machine,
1118                    out,
1119                    err,
1120                    retval,
1121                    test,
1122                    suite='telemetry_Crosperf',
1123                    cwp_dso=''):
1124    if suite == 'telemetry':
1125      result = TelemetryResult(logger, label, log_level, machine)
1126    else:
1127      result = cls(logger, label, log_level, machine)
1128    result.PopulateFromRun(out, err, retval, test, suite, cwp_dso)
1129    return result
1130
1131  @classmethod
1132  def CreateFromCacheHit(cls,
1133                         logger,
1134                         log_level,
1135                         label,
1136                         machine,
1137                         cache_dir,
1138                         test,
1139                         suite='telemetry_Crosperf',
1140                         cwp_dso=''):
1141    if suite == 'telemetry':
1142      result = TelemetryResult(logger, label, log_level, machine)
1143    else:
1144      result = cls(logger, label, log_level, machine)
1145    try:
1146      result.PopulateFromCacheDir(cache_dir, test, suite, cwp_dso)
1147
1148    except RuntimeError as e:
1149      logger.LogError('Exception while using cache: %s' % e)
1150      return None
1151    return result
1152
1153
1154class TelemetryResult(Result):
1155  """Class to hold the results of a single Telemetry run."""
1156
1157  def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
1158    self.out = out
1159    self.err = err
1160    self.retval = retval
1161
1162    self.ProcessResults()
1163
1164  # pylint: disable=arguments-differ
1165  def ProcessResults(self):
1166    # The output is:
1167    # url,average_commit_time (ms),...
1168    # www.google.com,33.4,21.2,...
1169    # We need to convert to this format:
1170    # {"www.google.com:average_commit_time (ms)": "33.4",
1171    #  "www.google.com:...": "21.2"}
1172    # Added note:  Occasionally the output comes back
1173    # with "JSON.stringify(window.automation.GetResults())" on
1174    # the first line, and then the rest of the output as
1175    # described above.
1176
1177    lines = self.out.splitlines()
1178    self.keyvals = {}
1179
1180    if lines:
1181      if lines[0].startswith('JSON.stringify'):
1182        lines = lines[1:]
1183
1184    if not lines:
1185      return
1186    labels = lines[0].split(',')
1187    for line in lines[1:]:
1188      fields = line.split(',')
1189      if len(fields) != len(labels):
1190        continue
1191      for i in range(1, len(labels)):
1192        key = '%s %s' % (fields[0], labels[i])
1193        value = fields[i]
1194        self.keyvals[key] = value
1195    self.keyvals['retval'] = self.retval
1196
1197  def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
1198    self.test_name = test
1199    self.suite = suite
1200    self.cwp_dso = cwp_dso
1201    with open(os.path.join(cache_dir, RESULTS_FILE), 'rb') as f:
1202      self.out = pickle.load(f)
1203      self.err = pickle.load(f)
1204      self.retval = pickle.load(f)
1205
1206    self.chrome_version = \
1207        super(TelemetryResult, self).GetChromeVersionFromCache(cache_dir)
1208    self.ProcessResults()
1209
1210
1211class CacheConditions(object):
1212  """Various Cache condition values, for export."""
1213
1214  # Cache hit only if the result file exists.
1215  CACHE_FILE_EXISTS = 0
1216
1217  # Cache hit if the checksum of cpuinfo and totalmem of
1218  # the cached result and the new run match.
1219  MACHINES_MATCH = 1
1220
1221  # Cache hit if the image checksum of the cached result and the new run match.
1222  CHECKSUMS_MATCH = 2
1223
1224  # Cache hit only if the cached result was successful
1225  RUN_SUCCEEDED = 3
1226
1227  # Never a cache hit.
1228  FALSE = 4
1229
1230  # Cache hit if the image path matches the cached image path.
1231  IMAGE_PATH_MATCH = 5
1232
1233  # Cache hit if the uuid of hard disk mataches the cached one
1234
1235  SAME_MACHINE_MATCH = 6
1236
1237
1238class ResultsCache(object):
1239  """Class to handle the cache for storing/retrieving test run results.
1240
1241  This class manages the key of the cached runs without worrying about what
1242  is exactly stored (value). The value generation is handled by the Results
1243  class.
1244  """
1245  CACHE_VERSION = 6
1246
1247  def __init__(self):
1248    # Proper initialization happens in the Init function below.
1249    self.chromeos_image = None
1250    self.chromeos_root = None
1251    self.test_name = None
1252    self.iteration = None
1253    self.test_args = None
1254    self.profiler_args = None
1255    self.board = None
1256    self.cache_conditions = None
1257    self.machine_manager = None
1258    self.machine = None
1259    self._logger = None
1260    self.ce = None
1261    self.label = None
1262    self.share_cache = None
1263    self.suite = None
1264    self.log_level = None
1265    self.show_all = None
1266    self.run_local = None
1267    self.cwp_dso = None
1268
1269  def Init(self, chromeos_image, chromeos_root, test_name, iteration, test_args,
1270           profiler_args, machine_manager, machine, board, cache_conditions,
1271           logger_to_use, log_level, label, share_cache, suite,
1272           show_all_results, run_local, cwp_dso):
1273    self.chromeos_image = chromeos_image
1274    self.chromeos_root = chromeos_root
1275    self.test_name = test_name
1276    self.iteration = iteration
1277    self.test_args = test_args
1278    self.profiler_args = profiler_args
1279    self.board = board
1280    self.cache_conditions = cache_conditions
1281    self.machine_manager = machine_manager
1282    self.machine = machine
1283    self._logger = logger_to_use
1284    self.ce = command_executer.GetCommandExecuter(
1285        self._logger, log_level=log_level)
1286    self.label = label
1287    self.share_cache = share_cache
1288    self.suite = suite
1289    self.log_level = log_level
1290    self.show_all = show_all_results
1291    self.run_local = run_local
1292    self.cwp_dso = cwp_dso
1293
1294  def GetCacheDirForRead(self):
1295    matching_dirs = []
1296    for glob_path in self.FormCacheDir(self.GetCacheKeyList(True)):
1297      matching_dirs += glob.glob(glob_path)
1298
1299    if matching_dirs:
1300      # Cache file found.
1301      return matching_dirs[0]
1302    return None
1303
1304  def GetCacheDirForWrite(self, get_keylist=False):
1305    cache_path = self.FormCacheDir(self.GetCacheKeyList(False))[0]
1306    if get_keylist:
1307      args_str = '%s_%s_%s' % (self.test_args, self.profiler_args,
1308                               self.run_local)
1309      version, image = results_report.ParseChromeosImage(
1310          self.label.chromeos_image)
1311      keylist = [
1312          version, image, self.label.board, self.machine.name, self.test_name,
1313          str(self.iteration), args_str
1314      ]
1315      return cache_path, keylist
1316    return cache_path
1317
1318  def FormCacheDir(self, list_of_strings):
1319    cache_key = ' '.join(list_of_strings)
1320    cache_dir = misc.GetFilenameFromString(cache_key)
1321    if self.label.cache_dir:
1322      cache_home = os.path.abspath(os.path.expanduser(self.label.cache_dir))
1323      cache_path = [os.path.join(cache_home, cache_dir)]
1324    else:
1325      cache_path = [os.path.join(SCRATCH_DIR, cache_dir)]
1326
1327    if self.share_cache:
1328      for path in [x.strip() for x in self.share_cache.split(',')]:
1329        if os.path.exists(path):
1330          cache_path.append(os.path.join(path, cache_dir))
1331        else:
1332          self._logger.LogFatal('Unable to find shared cache: %s' % path)
1333
1334    return cache_path
1335
1336  def GetCacheKeyList(self, read):
1337    if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions:
1338      machine_checksum = '*'
1339    else:
1340      machine_checksum = self.machine_manager.machine_checksum[self.label.name]
1341    if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions:
1342      checksum = '*'
1343    elif self.label.image_type == 'trybot':
1344      checksum = hashlib.md5(
1345          self.label.chromeos_image.encode('utf-8')).hexdigest()
1346    elif self.label.image_type == 'official':
1347      checksum = '*'
1348    else:
1349      checksum = ImageChecksummer().Checksum(self.label, self.log_level)
1350
1351    if read and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions:
1352      image_path_checksum = '*'
1353    else:
1354      image_path_checksum = hashlib.md5(
1355          self.chromeos_image.encode('utf-8')).hexdigest()
1356
1357    machine_id_checksum = ''
1358    if read and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions:
1359      machine_id_checksum = '*'
1360    else:
1361      if self.machine and self.machine.name in self.label.remote:
1362        machine_id_checksum = self.machine.machine_id_checksum
1363      else:
1364        for machine in self.machine_manager.GetMachines(self.label):
1365          if machine.name == self.label.remote[0]:
1366            machine_id_checksum = machine.machine_id_checksum
1367            break
1368
1369    temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args,
1370                                   self.run_local)
1371    test_args_checksum = hashlib.md5(temp_test_args.encode('utf-8')).hexdigest()
1372    return (image_path_checksum, self.test_name, str(self.iteration),
1373            test_args_checksum, checksum, machine_checksum, machine_id_checksum,
1374            str(self.CACHE_VERSION))
1375
1376  def ReadResult(self):
1377    if CacheConditions.FALSE in self.cache_conditions:
1378      cache_dir = self.GetCacheDirForWrite()
1379      command = 'rm -rf %s' % (cache_dir,)
1380      self.ce.RunCommand(command)
1381      return None
1382    cache_dir = self.GetCacheDirForRead()
1383
1384    if not cache_dir:
1385      return None
1386
1387    if not os.path.isdir(cache_dir):
1388      return None
1389
1390    if self.log_level == 'verbose':
1391      self._logger.LogOutput('Trying to read from cache dir: %s' % cache_dir)
1392    result = Result.CreateFromCacheHit(self._logger, self.log_level, self.label,
1393                                       self.machine, cache_dir, self.test_name,
1394                                       self.suite, self.cwp_dso)
1395    if not result:
1396      return None
1397
1398    if (result.retval == 0 or
1399        CacheConditions.RUN_SUCCEEDED not in self.cache_conditions):
1400      return result
1401
1402    return None
1403
1404  def StoreResult(self, result):
1405    cache_dir, keylist = self.GetCacheDirForWrite(get_keylist=True)
1406    result.StoreToCacheDir(cache_dir, self.machine_manager, keylist)
1407
1408
1409class MockResultsCache(ResultsCache):
1410  """Class for mock testing, corresponding to ResultsCache class."""
1411
1412  # FIXME: pylint complains about this mock init method, we should probably
1413  # replace all Mock classes in Crosperf with simple Mock.mock().
1414  # pylint: disable=arguments-differ
1415  def Init(self, *args):
1416    pass
1417
1418  def ReadResult(self):
1419    return None
1420
1421  def StoreResult(self, result):
1422    pass
1423
1424
1425class MockResult(Result):
1426  """Class for mock testing, corresponding to Result class."""
1427
1428  def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
1429    self.out = out
1430    self.err = err
1431    self.retval = retval
1432