• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# -*- coding: utf-8 -*-
2# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Module to deal with result cache."""
7
8from __future__ import division
9from __future__ import print_function
10
11import collections
12import glob
13import hashlib
14import heapq
15import json
16import os
17import pickle
18import re
19import tempfile
20
21from cros_utils import command_executer
22from cros_utils import misc
23
24from image_checksummer import ImageChecksummer
25
26import results_report
27import test_flag
28
29SCRATCH_DIR = os.path.expanduser('~/cros_scratch')
30RESULTS_FILE = 'results.txt'
31MACHINE_FILE = 'machine.txt'
32AUTOTEST_TARBALL = 'autotest.tbz2'
33PERF_RESULTS_FILE = 'perf-results.txt'
34CACHE_KEYS_FILE = 'cache_keys.txt'
35
36
37class Result(object):
38  """Class for holding the results of a single test run.
39
40  This class manages what exactly is stored inside the cache without knowing
41  what the key of the cache is. For runs with perf, it stores perf.data,
42  perf.report, etc. The key generation is handled by the ResultsCache class.
43  """
44
45  def __init__(self, logger, label, log_level, machine, cmd_exec=None):
46    self.chromeos_root = label.chromeos_root
47    self._logger = logger
48    self.ce = cmd_exec or command_executer.GetCommandExecuter(
49        self._logger, log_level=log_level)
50    self.temp_dir = None
51    self.label = label
52    self.results_dir = None
53    self.log_level = log_level
54    self.machine = machine
55    self.perf_data_files = []
56    self.perf_report_files = []
57    self.results_file = []
58    self.turbostat_log_file = ''
59    self.cpustats_log_file = ''
60    self.top_log_file = ''
61    self.wait_time_log_file = ''
62    self.chrome_version = ''
63    self.err = None
64    self.chroot_results_dir = ''
65    self.test_name = ''
66    self.keyvals = None
67    self.board = None
68    self.suite = None
69    self.cwp_dso = ''
70    self.retval = None
71    self.out = None
72    self.top_cmds = []
73
74  def GetTopCmds(self):
75    """Get the list of top commands consuming CPU on the machine."""
76    return self.top_cmds
77
78  def FormatStringTop5(self):
79    """Get formatted top5 string.
80
81    Get the formatted string with top5 commands consuming CPU on DUT machine.
82    """
83    format_list = [
84        'Top 5 commands with highest CPU usage:',
85        # Header.
86        '%20s %9s %6s   %s' % ('COMMAND', 'AVG CPU%', 'COUNT', 'HIGHEST 5'),
87        '-' * 50,
88    ]
89    if self.top_cmds:
90      for topcmd in self.top_cmds[:5]:
91        print_line = '%20s %9.2f %6s   %s' % (topcmd['cmd'], topcmd['cpu_avg'],
92                                              topcmd['count'], topcmd['top5'])
93        format_list.append(print_line)
94    else:
95      format_list.append('[NO DATA FROM THE TOP LOG]')
96    format_list.append('-' * 50)
97    return '\n'.join(format_list)
98
99  def CopyFilesTo(self, dest_dir, files_to_copy):
100    file_index = 0
101    for file_to_copy in files_to_copy:
102      if not os.path.isdir(dest_dir):
103        command = 'mkdir -p %s' % dest_dir
104        self.ce.RunCommand(command)
105      dest_file = os.path.join(
106          dest_dir, ('%s.%s' % (os.path.basename(file_to_copy), file_index)))
107      ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
108      if ret:
109        raise IOError('Could not copy results file: %s' % file_to_copy)
110
111  def CopyResultsTo(self, dest_dir):
112    self.CopyFilesTo(dest_dir, self.perf_data_files)
113    self.CopyFilesTo(dest_dir, self.perf_report_files)
114    if self.perf_data_files or self.perf_report_files:
115      self._logger.LogOutput('Perf results files stored in %s.' % dest_dir)
116
117  def GetNewKeyvals(self, keyvals_dict):
118    # Initialize 'units' dictionary.
119    units_dict = {}
120    for k in keyvals_dict:
121      units_dict[k] = ''
122    results_files = self.GetDataMeasurementsFiles()
123    for f in results_files:
124      # Make sure we can find the results file
125      if os.path.exists(f):
126        data_filename = f
127      else:
128        # Otherwise get the base filename and create the correct
129        # path for it.
130        _, f_base = misc.GetRoot(f)
131        data_filename = os.path.join(self.chromeos_root, 'chroot/tmp',
132                                     self.temp_dir, f_base)
133      if data_filename.find('.json') > 0:
134        raw_dict = dict()
135        if os.path.exists(data_filename):
136          with open(data_filename, 'r') as data_file:
137            raw_dict = json.load(data_file)
138
139        if 'charts' in raw_dict:
140          raw_dict = raw_dict['charts']
141        for k1 in raw_dict:
142          field_dict = raw_dict[k1]
143          for k2 in field_dict:
144            result_dict = field_dict[k2]
145            key = k1 + '__' + k2
146            if 'value' in result_dict:
147              keyvals_dict[key] = result_dict['value']
148            elif 'values' in result_dict:
149              values = result_dict['values']
150              if ('type' in result_dict and
151                  result_dict['type'] == 'list_of_scalar_values' and values and
152                  values != 'null'):
153                keyvals_dict[key] = sum(values) / float(len(values))
154              else:
155                keyvals_dict[key] = values
156            units_dict[key] = result_dict['units']
157      else:
158        if os.path.exists(data_filename):
159          with open(data_filename, 'r') as data_file:
160            lines = data_file.readlines()
161            for line in lines:
162              tmp_dict = json.loads(line)
163              graph_name = tmp_dict['graph']
164              graph_str = (graph_name + '__') if graph_name else ''
165              key = graph_str + tmp_dict['description']
166              keyvals_dict[key] = tmp_dict['value']
167              units_dict[key] = tmp_dict['units']
168
169    return keyvals_dict, units_dict
170
171  def AppendTelemetryUnits(self, keyvals_dict, units_dict):
172    """keyvals_dict is the dict of key-value used to generate Crosperf reports.
173
174    units_dict is a dictionary of the units for the return values in
175    keyvals_dict.  We need to associate the units with the return values,
176    for Telemetry tests, so that we can include the units in the reports.
177    This function takes each value in keyvals_dict, finds the corresponding
178    unit in the units_dict, and replaces the old value with a list of the
179    old value and the units.  This later gets properly parsed in the
180    ResultOrganizer class, for generating the reports.
181    """
182
183    results_dict = {}
184    for k in keyvals_dict:
185      # We don't want these lines in our reports; they add no useful data.
186      if not k or k == 'telemetry_Crosperf':
187        continue
188      val = keyvals_dict[k]
189      units = units_dict[k]
190      new_val = [val, units]
191      results_dict[k] = new_val
192    return results_dict
193
194  def GetKeyvals(self):
195    results_in_chroot = os.path.join(self.chromeos_root, 'chroot', 'tmp')
196    if not self.temp_dir:
197      self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
198      command = 'cp -r {0}/* {1}'.format(self.results_dir, self.temp_dir)
199      self.ce.RunCommand(command, print_to_console=False)
200
201    command = ('./generate_test_report --no-color --csv %s' % (os.path.join(
202        '/tmp', os.path.basename(self.temp_dir))))
203    _, out, _ = self.ce.ChrootRunCommandWOutput(
204        self.chromeos_root, command, print_to_console=False)
205    keyvals_dict = {}
206    tmp_dir_in_chroot = misc.GetInsideChrootPath(self.chromeos_root,
207                                                 self.temp_dir)
208    for line in out.splitlines():
209      tokens = re.split('=|,', line)
210      key = tokens[-2]
211      if key.startswith(tmp_dir_in_chroot):
212        key = key[len(tmp_dir_in_chroot) + 1:]
213      value = tokens[-1]
214      keyvals_dict[key] = value
215
216    # Check to see if there is a perf_measurements file and get the
217    # data from it if so.
218    keyvals_dict, units_dict = self.GetNewKeyvals(keyvals_dict)
219    if self.suite == 'telemetry_Crosperf':
220      # For telemtry_Crosperf results, append the units to the return
221      # results, for use in generating the reports.
222      keyvals_dict = self.AppendTelemetryUnits(keyvals_dict, units_dict)
223    return keyvals_dict
224
225  def GetSamples(self):
226    samples = 0
227    for perf_data_file in self.perf_data_files:
228      chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
229                                                       perf_data_file)
230      perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
231      perf_file = '/usr/sbin/perf'
232      if os.path.exists(perf_path):
233        perf_file = '/usr/bin/perf'
234
235      # For each perf.data, we want to collect sample count for specific DSO.
236      # We specify exact match for known DSO type, and every sample for `all`.
237      exact_match = ''
238      if self.cwp_dso == 'all':
239        exact_match = '""'
240      elif self.cwp_dso == 'chrome':
241        exact_match = '" chrome "'
242      elif self.cwp_dso == 'kallsyms':
243        exact_match = '"[kernel.kallsyms]"'
244      else:
245        # This will need to be updated once there are more DSO types supported,
246        # if user want an exact match for the field they want.
247        exact_match = '"%s"' % self.cwp_dso
248
249      command = ('%s report -n -s dso -i %s 2> /dev/null | grep %s' %
250                 (perf_file, chroot_perf_data_file, exact_match))
251      _, result, _ = self.ce.ChrootRunCommandWOutput(self.chromeos_root,
252                                                     command)
253      # Accumulate the sample count for all matched fields.
254      # Each line looks like this:
255      #     45.42%        237210  chrome
256      # And we want the second number which is the sample count.
257      sample = 0
258      try:
259        for line in result.split('\n'):
260          attr = line.split()
261          if len(attr) == 3 and '%' in attr[0]:
262            sample += int(attr[1])
263      except:
264        raise RuntimeError('Cannot parse perf dso result')
265
266      samples += sample
267    return [samples, u'samples']
268
269  def GetResultsDir(self):
270    mo = re.search(r'Results placed in (\S+)', self.out)
271    if mo:
272      result = mo.group(1)
273      return result
274    raise RuntimeError('Could not find results directory.')
275
276  def FindFilesInResultsDir(self, find_args):
277    if not self.results_dir:
278      return ''
279
280    command = 'find %s %s' % (self.results_dir, find_args)
281    ret, out, _ = self.ce.RunCommandWOutput(command, print_to_console=False)
282    if ret:
283      raise RuntimeError('Could not run find command!')
284    return out
285
286  def GetResultsFile(self):
287    if self.suite == 'telemetry_Crosperf':
288      return self.FindFilesInResultsDir('-name histograms.json').splitlines()
289    return self.FindFilesInResultsDir('-name results-chart.json').splitlines()
290
291  def GetPerfDataFiles(self):
292    return self.FindFilesInResultsDir('-name perf.data').splitlines()
293
294  def GetPerfReportFiles(self):
295    return self.FindFilesInResultsDir('-name perf.data.report').splitlines()
296
297  def GetDataMeasurementsFiles(self):
298    result = self.FindFilesInResultsDir('-name perf_measurements').splitlines()
299    if not result:
300      if self.suite == 'telemetry_Crosperf':
301        result = \
302            self.FindFilesInResultsDir('-name histograms.json').splitlines()
303      else:
304        result = \
305            self.FindFilesInResultsDir('-name results-chart.json').splitlines()
306    return result
307
308  def GetTurbostatFile(self):
309    """Get turbostat log path string."""
310    return self.FindFilesInResultsDir('-name turbostat.log').split('\n')[0]
311
312  def GetCpustatsFile(self):
313    """Get cpustats log path string."""
314    return self.FindFilesInResultsDir('-name cpustats.log').split('\n')[0]
315
316  def GetTopFile(self):
317    """Get cpustats log path string."""
318    return self.FindFilesInResultsDir('-name top.log').split('\n')[0]
319
320  def GetWaitTimeFile(self):
321    """Get wait time log path string."""
322    return self.FindFilesInResultsDir('-name wait_time.log').split('\n')[0]
323
324  def _CheckDebugPath(self, option, path):
325    relative_path = path[1:]
326    out_chroot_path = os.path.join(self.chromeos_root, 'chroot', relative_path)
327    if os.path.exists(out_chroot_path):
328      if option == 'kallsyms':
329        path = os.path.join(path, 'System.map-*')
330      return '--' + option + ' ' + path
331    else:
332      print('** WARNING **: --%s option not applied, %s does not exist' %
333            (option, out_chroot_path))
334      return ''
335
336  def GeneratePerfReportFiles(self):
337    perf_report_files = []
338    for perf_data_file in self.perf_data_files:
339      # Generate a perf.report and store it side-by-side with the perf.data
340      # file.
341      chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
342                                                       perf_data_file)
343      perf_report_file = '%s.report' % perf_data_file
344      if os.path.exists(perf_report_file):
345        raise RuntimeError(
346            'Perf report file already exists: %s' % perf_report_file)
347      chroot_perf_report_file = misc.GetInsideChrootPath(
348          self.chromeos_root, perf_report_file)
349      perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
350
351      perf_file = '/usr/sbin/perf'
352      if os.path.exists(perf_path):
353        perf_file = '/usr/bin/perf'
354
355      debug_path = self.label.debug_path
356
357      if debug_path:
358        symfs = '--symfs ' + debug_path
359        vmlinux = '--vmlinux ' + os.path.join(debug_path, 'boot', 'vmlinux')
360        kallsyms = ''
361        print('** WARNING **: --kallsyms option not applied, no System.map-* '
362              'for downloaded image.')
363      else:
364        if self.label.image_type != 'local':
365          print('** WARNING **: Using local debug info in /build, this may '
366                'not match the downloaded image.')
367        build_path = os.path.join('/build', self.board)
368        symfs = self._CheckDebugPath('symfs', build_path)
369        vmlinux_path = os.path.join(build_path, 'usr/lib/debug/boot/vmlinux')
370        vmlinux = self._CheckDebugPath('vmlinux', vmlinux_path)
371        kallsyms_path = os.path.join(build_path, 'boot')
372        kallsyms = self._CheckDebugPath('kallsyms', kallsyms_path)
373
374      command = ('%s report -n %s %s %s -i %s --stdio > %s' %
375                 (perf_file, symfs, vmlinux, kallsyms, chroot_perf_data_file,
376                  chroot_perf_report_file))
377      if self.log_level != 'verbose':
378        self._logger.LogOutput('Generating perf report...\nCMD: %s' % command)
379      exit_code = self.ce.ChrootRunCommand(self.chromeos_root, command)
380      if exit_code == 0:
381        if self.log_level != 'verbose':
382          self._logger.LogOutput('Perf report generated successfully.')
383      else:
384        raise RuntimeError(
385            'Perf report not generated correctly. CMD: %s' % command)
386
387      # Add a keyval to the dictionary for the events captured.
388      perf_report_files.append(
389          misc.GetOutsideChrootPath(self.chromeos_root,
390                                    chroot_perf_report_file))
391    return perf_report_files
392
393  def GatherPerfResults(self):
394    report_id = 0
395    for perf_report_file in self.perf_report_files:
396      with open(perf_report_file, 'r') as f:
397        report_contents = f.read()
398        for group in re.findall(r'Events: (\S+) (\S+)', report_contents):
399          num_events = group[0]
400          event_name = group[1]
401          key = 'perf_%s_%s' % (report_id, event_name)
402          value = str(misc.UnitToNumber(num_events))
403          self.keyvals[key] = value
404
405  def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
406    self.board = self.label.board
407    self.out = out
408    self.err = err
409    self.retval = retval
410    self.test_name = test
411    self.suite = suite
412    self.cwp_dso = cwp_dso
413    self.chroot_results_dir = self.GetResultsDir()
414    self.results_dir = misc.GetOutsideChrootPath(self.chromeos_root,
415                                                 self.chroot_results_dir)
416    self.results_file = self.GetResultsFile()
417    self.perf_data_files = self.GetPerfDataFiles()
418    # Include all perf.report data in table.
419    self.perf_report_files = self.GeneratePerfReportFiles()
420    self.turbostat_log_file = self.GetTurbostatFile()
421    self.cpustats_log_file = self.GetCpustatsFile()
422    self.top_log_file = self.GetTopFile()
423    self.wait_time_log_file = self.GetWaitTimeFile()
424    # TODO(asharif): Do something similar with perf stat.
425
426    # Grab keyvals from the directory.
427    self.ProcessResults()
428
429  def ProcessChartResults(self):
430    # Open and parse the json results file generated by telemetry/test_that.
431    if not self.results_file:
432      raise IOError('No results file found.')
433    filename = self.results_file[0]
434    if not filename.endswith('.json'):
435      raise IOError('Attempt to call json on non-json file: %s' % filename)
436    if not os.path.exists(filename):
437      raise IOError('%s does not exist' % filename)
438
439    keyvals = {}
440    with open(filename, 'r') as f:
441      raw_dict = json.load(f)
442      if 'charts' in raw_dict:
443        raw_dict = raw_dict['charts']
444      for k, field_dict in raw_dict.items():
445        for item in field_dict:
446          keyname = k + '__' + item
447          value_dict = field_dict[item]
448          if 'value' in value_dict:
449            result = value_dict['value']
450          elif 'values' in value_dict:
451            values = value_dict['values']
452            if not values:
453              continue
454            if ('type' in value_dict and
455                value_dict['type'] == 'list_of_scalar_values' and
456                values != 'null'):
457              result = sum(values) / float(len(values))
458            else:
459              result = values
460          else:
461            continue
462          units = value_dict['units']
463          new_value = [result, units]
464          keyvals[keyname] = new_value
465    return keyvals
466
467  def ProcessTurbostatResults(self):
468    """Given turbostat_log_file non-null parse cpu stats from file.
469
470    Returns:
471      Dictionary of 'cpufreq', 'cputemp' where each
472      includes dictionary 'all': [list_of_values]
473
474    Example of the output of turbostat_log.
475    ----------------------
476    CPU     Avg_MHz Busy%   Bzy_MHz TSC_MHz IRQ     CoreTmp
477    -       329     12.13   2723    2393    10975   77
478    0       336     12.41   2715    2393    6328    77
479    2       323     11.86   2731    2393    4647    69
480    CPU     Avg_MHz Busy%   Bzy_MHz TSC_MHz IRQ     CoreTmp
481    -       1940    67.46   2884    2393    39920   83
482    0       1827    63.70   2877    2393    21184   83
483    """
484    cpustats = {}
485    read_data = ''
486    with open(self.turbostat_log_file) as f:
487      read_data = f.readlines()
488
489    if not read_data:
490      self._logger.LogOutput('WARNING: Turbostat output file is empty.')
491      return {}
492
493    # First line always contains the header.
494    stats = read_data[0].split()
495
496    # Mandatory parameters.
497    if 'CPU' not in stats:
498      self._logger.LogOutput(
499          'WARNING: Missing data for CPU# in Turbostat output.')
500      return {}
501    if 'Bzy_MHz' not in stats:
502      self._logger.LogOutput(
503          'WARNING: Missing data for Bzy_MHz in Turbostat output.')
504      return {}
505    cpu_index = stats.index('CPU')
506    cpufreq_index = stats.index('Bzy_MHz')
507    cpufreq = cpustats.setdefault('cpufreq', {'all': []})
508
509    # Optional parameters.
510    cputemp_index = -1
511    if 'CoreTmp' in stats:
512      cputemp_index = stats.index('CoreTmp')
513      cputemp = cpustats.setdefault('cputemp', {'all': []})
514
515    # Parse data starting from the second line ignoring repeating headers.
516    for st in read_data[1:]:
517      # Data represented by int or float separated by spaces.
518      numbers = st.split()
519      if not all(word.replace('.', '', 1).isdigit() for word in numbers[1:]):
520        # Skip the line if data mismatch.
521        continue
522      if numbers[cpu_index] != '-':
523        # Ignore Core-specific statistics which starts with Core number.
524        # Combined statistics for all core has "-" CPU identifier.
525        continue
526
527      cpufreq['all'].append(int(numbers[cpufreq_index]))
528      if cputemp_index != -1:
529        cputemp['all'].append(int(numbers[cputemp_index]))
530    return cpustats
531
532  def ProcessTopResults(self):
533    """Given self.top_log_file process top log data.
534
535    Returns:
536      List of dictionaries with the following keyvals:
537       'cmd': command name (string),
538       'cpu_avg': average cpu usage (float),
539       'count': number of occurrences (int),
540       'top5': up to 5 highest cpu usages (descending list of floats)
541
542    Example of the top log:
543      PID USER      PR  NI    VIRT    RES    SHR S  %CPU  %MEM     TIME+ COMMAND
544     4102 chronos   12  -8 3454472 238300 118188 R  41.8   6.1   0:08.37 chrome
545      375 root       0 -20       0      0      0 S   5.9   0.0   0:00.17 kworker
546      617 syslog    20   0   25332   8372   7888 S   5.9   0.2   0:00.77 systemd
547
548      PID USER      PR  NI    VIRT    RES    SHR S  %CPU  %MEM     TIME+ COMMAND
549     5745 chronos   20   0 5438580 139328  67988 R 122.8   3.6   0:04.26 chrome
550      912 root     -51   0       0      0      0 S   2.0   0.0   0:01.04 irq/cro
551      121 root      20   0       0      0      0 S   1.0   0.0   0:00.45 spi5
552    """
553    all_data = ''
554    with open(self.top_log_file) as f:
555      all_data = f.read()
556
557    if not all_data:
558      self._logger.LogOutput('WARNING: Top log file is empty.')
559      return []
560
561    top_line_regex = re.compile(
562        r"""
563        ^\s*(?P<pid>\d+)\s+         # Group 1: PID
564        \S+\s+\S+\s+-?\d+\s+        # Ignore: user, prio, nice
565        \d+\s+\d+\s+\d+\s+          # Ignore: virt/res/shared mem
566        \S+\s+                      # Ignore: state
567        (?P<cpu_use>\d+\.\d+)\s+    # Group 2: CPU usage
568        \d+\.\d+\s+\d+:\d+\.\d+\s+  # Ignore: mem usage, time
569        (?P<cmd>\S+)$               # Group 3: command
570        """, re.VERBOSE)
571    # Page represents top log data per one measurement within time interval
572    # 'top_interval'.
573    # Pages separated by empty line.
574    pages = all_data.split('\n\n')
575    # Snapshots are structured representation of the pages.
576    snapshots = []
577    for page in pages:
578      if not page:
579        continue
580
581      # Snapshot list will contain all processes (command duplicates are
582      # allowed).
583      snapshot = []
584      for line in page.splitlines():
585        match = top_line_regex.match(line)
586        if match:
587          # Top line is valid, collect data.
588          process = {
589              # NOTE: One command may be represented by multiple processes.
590              'cmd': match.group('cmd'),
591              'pid': int(match.group('pid')),
592              'cpu_use': float(match.group('cpu_use')),
593          }
594
595          # Filter out processes with 0 CPU usage and top command.
596          if process['cpu_use'] > 0 and process['cmd'] != 'top':
597            snapshot.append(process)
598
599      # If page contained meaningful data add snapshot to the list.
600      if snapshot:
601        snapshots.append(snapshot)
602
603    # Define threshold of CPU usage when Chrome is busy, i.e. benchmark is
604    # running.
605    # Ideally it should be 100% but it will be hardly reachable with 1 core.
606    # Statistics on DUT with 2-6 cores shows that chrome load of 100%, 95% and
607    # 90% equally occurs in 72-74% of all top log snapshots.
608    # Further decreasing of load threshold leads to a shifting percent of
609    # "high load" snapshots which might include snapshots when benchmark is
610    # not running.
611    # On 1-core DUT 90% chrome cpu load occurs in 55%, 95% in 33% and 100% in 2%
612    # of snapshots accordingly.
613    CHROME_HIGH_CPU_LOAD = 90
614    # Number of snapshots where chrome is heavily used.
615    high_load_snapshots = 0
616    # Total CPU use per process in ALL active snapshots.
617    cmd_total_cpu_use = collections.defaultdict(float)
618    # Top CPU usages per command.
619    cmd_top5_cpu_use = collections.defaultdict(list)
620    # List of Top Commands to be returned.
621    topcmds = []
622
623    for snapshot_processes in snapshots:
624      # CPU usage per command in one snapshot.
625      cmd_cpu_use_per_snapshot = collections.defaultdict(float)
626      for process in snapshot_processes:
627        cmd = process['cmd']
628        cpu_use = process['cpu_use']
629
630        # Collect CPU usage per command.
631        cmd_cpu_use_per_snapshot[cmd] += cpu_use
632
633      if cmd_cpu_use_per_snapshot.setdefault('chrome',
634                                             0.0) > CHROME_HIGH_CPU_LOAD:
635        # Combined CPU usage of "chrome" command exceeds "High load" threshold
636        # which means DUT is busy running a benchmark.
637        high_load_snapshots += 1
638        for cmd, cpu_use in cmd_cpu_use_per_snapshot.items():
639          # Update total CPU usage.
640          cmd_total_cpu_use[cmd] += cpu_use
641
642          # Add cpu_use into command top cpu usages, sorted in descending order.
643          heapq.heappush(cmd_top5_cpu_use[cmd], round(cpu_use, 1))
644
645    for consumer, usage in sorted(
646        cmd_total_cpu_use.items(), key=lambda x: x[1], reverse=True):
647      # Iterate through commands by descending order of total CPU usage.
648      topcmd = {
649          'cmd': consumer,
650          'cpu_avg': usage / high_load_snapshots,
651          'count': len(cmd_top5_cpu_use[consumer]),
652          'top5': heapq.nlargest(5, cmd_top5_cpu_use[consumer]),
653      }
654      topcmds.append(topcmd)
655
656    return topcmds
657
658  def ProcessCpustatsResults(self):
659    """Given cpustats_log_file non-null parse cpu data from file.
660
661    Returns:
662      Dictionary of 'cpufreq', 'cputemp' where each
663      includes dictionary of parameter: [list_of_values]
664
665    Example of cpustats.log output.
666    ----------------------
667    /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
668    /sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 2016000
669    little-cpu 41234
670    big-cpu 51234
671
672    If cores share the same policy their frequencies may always match
673    on some devices.
674    To make report concise we should eliminate redundancy in the output.
675    Function removes cpuN data if it duplicates data from other cores.
676    """
677
678    cpustats = {}
679    read_data = ''
680    with open(self.cpustats_log_file) as f:
681      read_data = f.readlines()
682
683    if not read_data:
684      self._logger.LogOutput('WARNING: Cpustats output file is empty.')
685      return {}
686
687    cpufreq_regex = re.compile(r'^[/\S]+/(cpu\d+)/[/\S]+\s+(\d+)$')
688    cputemp_regex = re.compile(r'^([^/\s]+)\s+(\d+)$')
689
690    for st in read_data:
691      match = cpufreq_regex.match(st)
692      if match:
693        cpu = match.group(1)
694        # CPU frequency comes in kHz.
695        freq_khz = int(match.group(2))
696        freq_mhz = freq_khz / 1000
697        # cpufreq represents a dictionary with CPU frequency-related
698        # data from cpustats.log.
699        cpufreq = cpustats.setdefault('cpufreq', {})
700        cpu_n_freq = cpufreq.setdefault(cpu, [])
701        cpu_n_freq.append(freq_mhz)
702      else:
703        match = cputemp_regex.match(st)
704        if match:
705          therm_type = match.group(1)
706          # The value is int, uCelsius unit.
707          temp_uc = float(match.group(2))
708          # Round to XX.X float.
709          temp_c = round(temp_uc / 1000, 1)
710          # cputemp represents a dictionary with temperature measurements
711          # from cpustats.log.
712          cputemp = cpustats.setdefault('cputemp', {})
713          therm_type = cputemp.setdefault(therm_type, [])
714          therm_type.append(temp_c)
715
716    # Remove duplicate statistics from cpustats.
717    pruned_stats = {}
718    for cpukey, cpuparam in cpustats.items():
719      # Copy 'cpufreq' and 'cputemp'.
720      pruned_params = pruned_stats.setdefault(cpukey, {})
721      for paramkey, paramvalue in sorted(cpuparam.items()):
722        # paramvalue is list of all measured data.
723        if paramvalue not in pruned_params.values():
724          pruned_params[paramkey] = paramvalue
725
726    return pruned_stats
727
728  def ProcessHistogramsResults(self):
729    # Open and parse the json results file generated by telemetry/test_that.
730    if not self.results_file:
731      raise IOError('No results file found.')
732    filename = self.results_file[0]
733    if not filename.endswith('.json'):
734      raise IOError('Attempt to call json on non-json file: %s' % filename)
735    if not os.path.exists(filename):
736      raise IOError('%s does not exist' % filename)
737
738    keyvals = {}
739    with open(filename) as f:
740      histograms = json.load(f)
741      value_map = {}
742      # Gets generic set values.
743      for obj in histograms:
744        if 'type' in obj and obj['type'] == 'GenericSet':
745          value_map[obj['guid']] = obj['values']
746
747      for obj in histograms:
748        if 'name' not in obj or 'sampleValues' not in obj:
749          continue
750        metric_name = obj['name']
751        vals = obj['sampleValues']
752        if isinstance(vals, list):
753          # Remove None elements from the list
754          vals = [val for val in vals if val is not None]
755          if vals:
756            result = float(sum(vals)) / len(vals)
757          else:
758            result = 0
759        else:
760          result = vals
761        unit = obj['unit']
762        diagnostics = obj['diagnostics']
763        # for summaries of benchmarks
764        key = metric_name
765        if key not in keyvals:
766          keyvals[key] = [[result], unit]
767        else:
768          keyvals[key][0].append(result)
769        # TODO: do we need summaries of stories?
770        # for summaries of story tags
771        if 'storyTags' in diagnostics:
772          guid = diagnostics['storyTags']
773          if guid not in value_map:
774            raise RuntimeError('Unrecognized storyTags in %s ' % (obj))
775          for story_tag in value_map[guid]:
776            key = metric_name + '__' + story_tag
777            if key not in keyvals:
778              keyvals[key] = [[result], unit]
779            else:
780              keyvals[key][0].append(result)
781    # calculate summary
782    for key in keyvals:
783      vals = keyvals[key][0]
784      unit = keyvals[key][1]
785      result = float(sum(vals)) / len(vals)
786      keyvals[key] = [result, unit]
787    return keyvals
788
789  def ProcessResults(self, use_cache=False):
790    # Note that this function doesn't know anything about whether there is a
791    # cache hit or miss. It should process results agnostic of the cache hit
792    # state.
793    if (self.results_file and self.suite == 'telemetry_Crosperf' and
794        'histograms.json' in self.results_file[0]):
795      self.keyvals = self.ProcessHistogramsResults()
796    elif (self.results_file and self.suite != 'telemetry_Crosperf' and
797          'results-chart.json' in self.results_file[0]):
798      self.keyvals = self.ProcessChartResults()
799    else:
800      if not use_cache:
801        print('\n ** WARNING **: Had to use deprecated output-method to '
802              'collect results.\n')
803      self.keyvals = self.GetKeyvals()
804    self.keyvals['retval'] = self.retval
805    # If we are in CWP approximation mode, we want to collect DSO samples
806    # for each perf.data file
807    if self.cwp_dso and self.retval == 0:
808      self.keyvals['samples'] = self.GetSamples()
809      # If the samples count collected from perf file is 0, we will treat
810      # it as a failed run.
811      if self.keyvals['samples'][0] == 0:
812        del self.keyvals['samples']
813        self.keyvals['retval'] = 1
814    # Generate report from all perf.data files.
815    # Now parse all perf report files and include them in keyvals.
816    self.GatherPerfResults()
817
818    cpustats = {}
819    # Turbostat output has higher priority of processing.
820    if self.turbostat_log_file:
821      cpustats = self.ProcessTurbostatResults()
822    # Process cpustats output only if turbostat has no data.
823    if not cpustats and self.cpustats_log_file:
824      cpustats = self.ProcessCpustatsResults()
825    if self.top_log_file:
826      self.top_cmds = self.ProcessTopResults()
827    if self.wait_time_log_file:
828      with open(self.wait_time_log_file) as f:
829        wait_time = f.readline().strip()
830        try:
831          wait_time = float(wait_time)
832        except ValueError:
833          raise ValueError('Wait time in log file is not a number.')
834      # This is for accumulating wait time for telemtry_Crosperf runs only,
835      # for test_that runs, please refer to suite_runner.
836      self.machine.AddCooldownWaitTime(wait_time)
837
838    for param_key, param in cpustats.items():
839      for param_type, param_values in param.items():
840        val_avg = sum(param_values) / len(param_values)
841        val_min = min(param_values)
842        val_max = max(param_values)
843        # Average data is always included.
844        self.keyvals['_'.join([param_key, param_type, 'avg'])] = val_avg
845        # Insert min/max results only if they deviate
846        # from average.
847        if val_min != val_avg:
848          self.keyvals['_'.join([param_key, param_type, 'min'])] = val_min
849        if val_max != val_avg:
850          self.keyvals['_'.join([param_key, param_type, 'max'])] = val_max
851
852  def GetChromeVersionFromCache(self, cache_dir):
853    # Read chrome_version from keys file, if present.
854    chrome_version = ''
855    keys_file = os.path.join(cache_dir, CACHE_KEYS_FILE)
856    if os.path.exists(keys_file):
857      with open(keys_file, 'r') as f:
858        lines = f.readlines()
859        for l in lines:
860          if l.startswith('Google Chrome '):
861            chrome_version = l
862            if chrome_version.endswith('\n'):
863              chrome_version = chrome_version[:-1]
864            break
865    return chrome_version
866
867  def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
868    self.test_name = test
869    self.suite = suite
870    self.cwp_dso = cwp_dso
871    # Read in everything from the cache directory.
872    with open(os.path.join(cache_dir, RESULTS_FILE), 'rb') as f:
873      self.out = pickle.load(f)
874      self.err = pickle.load(f)
875      self.retval = pickle.load(f)
876
877    # Untar the tarball to a temporary directory
878    self.temp_dir = tempfile.mkdtemp(
879        dir=os.path.join(self.chromeos_root, 'chroot', 'tmp'))
880
881    command = ('cd %s && tar xf %s' %
882               (self.temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
883    ret = self.ce.RunCommand(command, print_to_console=False)
884    if ret:
885      raise RuntimeError('Could not untar cached tarball')
886    self.results_dir = self.temp_dir
887    self.results_file = self.GetDataMeasurementsFiles()
888    self.perf_data_files = self.GetPerfDataFiles()
889    self.perf_report_files = self.GetPerfReportFiles()
890    self.chrome_version = self.GetChromeVersionFromCache(cache_dir)
891    self.ProcessResults(use_cache=True)
892
893  def CleanUp(self, rm_chroot_tmp):
894    if rm_chroot_tmp and self.results_dir:
895      dirname, basename = misc.GetRoot(self.results_dir)
896      if basename.find('test_that_results_') != -1:
897        command = 'rm -rf %s' % self.results_dir
898      else:
899        command = 'rm -rf %s' % dirname
900      self.ce.RunCommand(command)
901    if self.temp_dir:
902      command = 'rm -rf %s' % self.temp_dir
903      self.ce.RunCommand(command)
904
905  def StoreToCacheDir(self, cache_dir, machine_manager, key_list):
906    # Create the dir if it doesn't exist.
907    temp_dir = tempfile.mkdtemp()
908
909    # Store to the temp directory.
910    with open(os.path.join(temp_dir, RESULTS_FILE), 'wb') as f:
911      pickle.dump(self.out, f)
912      pickle.dump(self.err, f)
913      pickle.dump(self.retval, f)
914
915    if not test_flag.GetTestMode():
916      with open(os.path.join(temp_dir, CACHE_KEYS_FILE), 'w') as f:
917        f.write('%s\n' % self.label.name)
918        f.write('%s\n' % self.label.chrome_version)
919        f.write('%s\n' % self.machine.checksum_string)
920        for k in key_list:
921          f.write(k)
922          f.write('\n')
923
924    if self.results_dir:
925      tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
926      command = ('cd %s && '
927                 'tar '
928                 '--exclude=var/spool '
929                 '--exclude=var/log '
930                 '-cjf %s .' % (self.results_dir, tarball))
931      ret = self.ce.RunCommand(command)
932      if ret:
933        raise RuntimeError("Couldn't store autotest output directory.")
934    # Store machine info.
935    # TODO(asharif): Make machine_manager a singleton, and don't pass it into
936    # this function.
937    with open(os.path.join(temp_dir, MACHINE_FILE), 'w') as f:
938      f.write(machine_manager.machine_checksum_string[self.label.name])
939
940    if os.path.exists(cache_dir):
941      command = 'rm -rf {0}'.format(cache_dir)
942      self.ce.RunCommand(command)
943
944    command = 'mkdir -p {0} && '.format(os.path.dirname(cache_dir))
945    command += 'chmod g+x {0} && '.format(temp_dir)
946    command += 'mv {0} {1}'.format(temp_dir, cache_dir)
947    ret = self.ce.RunCommand(command)
948    if ret:
949      command = 'rm -rf {0}'.format(temp_dir)
950      self.ce.RunCommand(command)
951      raise RuntimeError(
952          'Could not move dir %s to dir %s' % (temp_dir, cache_dir))
953
954  @classmethod
955  def CreateFromRun(cls,
956                    logger,
957                    log_level,
958                    label,
959                    machine,
960                    out,
961                    err,
962                    retval,
963                    test,
964                    suite='telemetry_Crosperf',
965                    cwp_dso=''):
966    if suite == 'telemetry':
967      result = TelemetryResult(logger, label, log_level, machine)
968    else:
969      result = cls(logger, label, log_level, machine)
970    result.PopulateFromRun(out, err, retval, test, suite, cwp_dso)
971    return result
972
973  @classmethod
974  def CreateFromCacheHit(cls,
975                         logger,
976                         log_level,
977                         label,
978                         machine,
979                         cache_dir,
980                         test,
981                         suite='telemetry_Crosperf',
982                         cwp_dso=''):
983    if suite == 'telemetry':
984      result = TelemetryResult(logger, label, log_level, machine)
985    else:
986      result = cls(logger, label, log_level, machine)
987    try:
988      result.PopulateFromCacheDir(cache_dir, test, suite, cwp_dso)
989
990    except RuntimeError as e:
991      logger.LogError('Exception while using cache: %s' % e)
992      return None
993    return result
994
995
996class TelemetryResult(Result):
997  """Class to hold the results of a single Telemetry run."""
998
999  def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
1000    self.out = out
1001    self.err = err
1002    self.retval = retval
1003
1004    self.ProcessResults()
1005
1006  # pylint: disable=arguments-differ
1007  def ProcessResults(self):
1008    # The output is:
1009    # url,average_commit_time (ms),...
1010    # www.google.com,33.4,21.2,...
1011    # We need to convert to this format:
1012    # {"www.google.com:average_commit_time (ms)": "33.4",
1013    #  "www.google.com:...": "21.2"}
1014    # Added note:  Occasionally the output comes back
1015    # with "JSON.stringify(window.automation.GetResults())" on
1016    # the first line, and then the rest of the output as
1017    # described above.
1018
1019    lines = self.out.splitlines()
1020    self.keyvals = {}
1021
1022    if lines:
1023      if lines[0].startswith('JSON.stringify'):
1024        lines = lines[1:]
1025
1026    if not lines:
1027      return
1028    labels = lines[0].split(',')
1029    for line in lines[1:]:
1030      fields = line.split(',')
1031      if len(fields) != len(labels):
1032        continue
1033      for i in range(1, len(labels)):
1034        key = '%s %s' % (fields[0], labels[i])
1035        value = fields[i]
1036        self.keyvals[key] = value
1037    self.keyvals['retval'] = self.retval
1038
1039  def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
1040    self.test_name = test
1041    self.suite = suite
1042    self.cwp_dso = cwp_dso
1043    with open(os.path.join(cache_dir, RESULTS_FILE), 'rb') as f:
1044      self.out = pickle.load(f)
1045      self.err = pickle.load(f)
1046      self.retval = pickle.load(f)
1047
1048    self.chrome_version = \
1049        super(TelemetryResult, self).GetChromeVersionFromCache(cache_dir)
1050    self.ProcessResults()
1051
1052
1053class CacheConditions(object):
1054  """Various Cache condition values, for export."""
1055
1056  # Cache hit only if the result file exists.
1057  CACHE_FILE_EXISTS = 0
1058
1059  # Cache hit if the checksum of cpuinfo and totalmem of
1060  # the cached result and the new run match.
1061  MACHINES_MATCH = 1
1062
1063  # Cache hit if the image checksum of the cached result and the new run match.
1064  CHECKSUMS_MATCH = 2
1065
1066  # Cache hit only if the cached result was successful
1067  RUN_SUCCEEDED = 3
1068
1069  # Never a cache hit.
1070  FALSE = 4
1071
1072  # Cache hit if the image path matches the cached image path.
1073  IMAGE_PATH_MATCH = 5
1074
1075  # Cache hit if the uuid of hard disk mataches the cached one
1076
1077  SAME_MACHINE_MATCH = 6
1078
1079
1080class ResultsCache(object):
1081  """Class to handle the cache for storing/retrieving test run results.
1082
1083  This class manages the key of the cached runs without worrying about what
1084  is exactly stored (value). The value generation is handled by the Results
1085  class.
1086  """
1087  CACHE_VERSION = 6
1088
1089  def __init__(self):
1090    # Proper initialization happens in the Init function below.
1091    self.chromeos_image = None
1092    self.chromeos_root = None
1093    self.test_name = None
1094    self.iteration = None
1095    self.test_args = None
1096    self.profiler_args = None
1097    self.board = None
1098    self.cache_conditions = None
1099    self.machine_manager = None
1100    self.machine = None
1101    self._logger = None
1102    self.ce = None
1103    self.label = None
1104    self.share_cache = None
1105    self.suite = None
1106    self.log_level = None
1107    self.show_all = None
1108    self.run_local = None
1109    self.cwp_dso = None
1110
1111  def Init(self, chromeos_image, chromeos_root, test_name, iteration, test_args,
1112           profiler_args, machine_manager, machine, board, cache_conditions,
1113           logger_to_use, log_level, label, share_cache, suite,
1114           show_all_results, run_local, cwp_dso):
1115    self.chromeos_image = chromeos_image
1116    self.chromeos_root = chromeos_root
1117    self.test_name = test_name
1118    self.iteration = iteration
1119    self.test_args = test_args
1120    self.profiler_args = profiler_args
1121    self.board = board
1122    self.cache_conditions = cache_conditions
1123    self.machine_manager = machine_manager
1124    self.machine = machine
1125    self._logger = logger_to_use
1126    self.ce = command_executer.GetCommandExecuter(
1127        self._logger, log_level=log_level)
1128    self.label = label
1129    self.share_cache = share_cache
1130    self.suite = suite
1131    self.log_level = log_level
1132    self.show_all = show_all_results
1133    self.run_local = run_local
1134    self.cwp_dso = cwp_dso
1135
1136  def GetCacheDirForRead(self):
1137    matching_dirs = []
1138    for glob_path in self.FormCacheDir(self.GetCacheKeyList(True)):
1139      matching_dirs += glob.glob(glob_path)
1140
1141    if matching_dirs:
1142      # Cache file found.
1143      return matching_dirs[0]
1144    return None
1145
1146  def GetCacheDirForWrite(self, get_keylist=False):
1147    cache_path = self.FormCacheDir(self.GetCacheKeyList(False))[0]
1148    if get_keylist:
1149      args_str = '%s_%s_%s' % (self.test_args, self.profiler_args,
1150                               self.run_local)
1151      version, image = results_report.ParseChromeosImage(
1152          self.label.chromeos_image)
1153      keylist = [
1154          version, image, self.label.board, self.machine.name, self.test_name,
1155          str(self.iteration), args_str
1156      ]
1157      return cache_path, keylist
1158    return cache_path
1159
1160  def FormCacheDir(self, list_of_strings):
1161    cache_key = ' '.join(list_of_strings)
1162    cache_dir = misc.GetFilenameFromString(cache_key)
1163    if self.label.cache_dir:
1164      cache_home = os.path.abspath(os.path.expanduser(self.label.cache_dir))
1165      cache_path = [os.path.join(cache_home, cache_dir)]
1166    else:
1167      cache_path = [os.path.join(SCRATCH_DIR, cache_dir)]
1168
1169    if self.share_cache:
1170      for path in [x.strip() for x in self.share_cache.split(',')]:
1171        if os.path.exists(path):
1172          cache_path.append(os.path.join(path, cache_dir))
1173        else:
1174          self._logger.LogFatal('Unable to find shared cache: %s' % path)
1175
1176    return cache_path
1177
1178  def GetCacheKeyList(self, read):
1179    if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions:
1180      machine_checksum = '*'
1181    else:
1182      machine_checksum = self.machine_manager.machine_checksum[self.label.name]
1183    if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions:
1184      checksum = '*'
1185    elif self.label.image_type == 'trybot':
1186      checksum = hashlib.md5(
1187          self.label.chromeos_image.encode('utf-8')).hexdigest()
1188    elif self.label.image_type == 'official':
1189      checksum = '*'
1190    else:
1191      checksum = ImageChecksummer().Checksum(self.label, self.log_level)
1192
1193    if read and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions:
1194      image_path_checksum = '*'
1195    else:
1196      image_path_checksum = hashlib.md5(
1197          self.chromeos_image.encode('utf-8')).hexdigest()
1198
1199    machine_id_checksum = ''
1200    if read and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions:
1201      machine_id_checksum = '*'
1202    else:
1203      if self.machine and self.machine.name in self.label.remote:
1204        machine_id_checksum = self.machine.machine_id_checksum
1205      else:
1206        for machine in self.machine_manager.GetMachines(self.label):
1207          if machine.name == self.label.remote[0]:
1208            machine_id_checksum = machine.machine_id_checksum
1209            break
1210
1211    temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args,
1212                                   self.run_local)
1213    test_args_checksum = hashlib.md5(temp_test_args.encode('utf-8')).hexdigest()
1214    return (image_path_checksum, self.test_name, str(self.iteration),
1215            test_args_checksum, checksum, machine_checksum, machine_id_checksum,
1216            str(self.CACHE_VERSION))
1217
1218  def ReadResult(self):
1219    if CacheConditions.FALSE in self.cache_conditions:
1220      cache_dir = self.GetCacheDirForWrite()
1221      command = 'rm -rf %s' % (cache_dir,)
1222      self.ce.RunCommand(command)
1223      return None
1224    cache_dir = self.GetCacheDirForRead()
1225
1226    if not cache_dir:
1227      return None
1228
1229    if not os.path.isdir(cache_dir):
1230      return None
1231
1232    if self.log_level == 'verbose':
1233      self._logger.LogOutput('Trying to read from cache dir: %s' % cache_dir)
1234    result = Result.CreateFromCacheHit(self._logger, self.log_level, self.label,
1235                                       self.machine, cache_dir, self.test_name,
1236                                       self.suite, self.cwp_dso)
1237    if not result:
1238      return None
1239
1240    if (result.retval == 0 or
1241        CacheConditions.RUN_SUCCEEDED not in self.cache_conditions):
1242      return result
1243
1244    return None
1245
1246  def StoreResult(self, result):
1247    cache_dir, keylist = self.GetCacheDirForWrite(get_keylist=True)
1248    result.StoreToCacheDir(cache_dir, self.machine_manager, keylist)
1249
1250
1251class MockResultsCache(ResultsCache):
1252  """Class for mock testing, corresponding to ResultsCache class."""
1253
1254  # FIXME: pylint complains about this mock init method, we should probably
1255  # replace all Mock classes in Crosperf with simple Mock.mock().
1256  # pylint: disable=arguments-differ
1257  def Init(self, *args):
1258    pass
1259
1260  def ReadResult(self):
1261    return None
1262
1263  def StoreResult(self, result):
1264    pass
1265
1266
1267class MockResult(Result):
1268  """Class for mock testing, corresponding to Result class."""
1269
1270  def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
1271    self.out = out
1272    self.err = err
1273    self.retval = retval
1274