• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# -*- coding: utf-8 -*-
2# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""A module to handle the report format."""
7from __future__ import print_function
8
9import datetime
10import functools
11import itertools
12import json
13import os
14import re
15import time
16
17from cros_utils.tabulator import AmeanResult
18from cros_utils.tabulator import Cell
19from cros_utils.tabulator import CoeffVarFormat
20from cros_utils.tabulator import CoeffVarResult
21from cros_utils.tabulator import Column
22from cros_utils.tabulator import SamplesTableGenerator
23from cros_utils.tabulator import Format
24from cros_utils.tabulator import IterationResult
25from cros_utils.tabulator import GmeanRatioResult
26from cros_utils.tabulator import LiteralResult
27from cros_utils.tabulator import MaxResult
28from cros_utils.tabulator import MinResult
29from cros_utils.tabulator import PValueFormat
30from cros_utils.tabulator import PValueResult
31from cros_utils.tabulator import RatioFormat
32from cros_utils.tabulator import RawResult
33from cros_utils.tabulator import StdResult
34from cros_utils.tabulator import TableFormatter
35from cros_utils.tabulator import TableGenerator
36from cros_utils.tabulator import TablePrinter
37from update_telemetry_defaults import TelemetryDefaults
38
39from column_chart import ColumnChart
40from results_organizer import OrganizeResults
41
42import results_report_templates as templates
43
44
45def ParseChromeosImage(chromeos_image):
46  """Parse the chromeos_image string for the image and version.
47
48  The chromeos_image string will probably be in one of two formats:
49  1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
50     chromiumos_test_image.bin
51  2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \
52      chromiumos_test_image.bin
53
54  We parse these strings to find the 'chromeos_version' to store in the
55  json archive (without the .datatime bit in the first case); and also
56  the 'chromeos_image', which would be all of the first case, but only the
57  part after '/chroot/tmp' in the second case.
58
59  Args:
60    chromeos_image: string containing the path to the chromeos_image that
61    crosperf used for the test.
62
63  Returns:
64    version, image: The results of parsing the input string, as explained
65    above.
66  """
67  # Find the Chromeos Version, e.g. R45-2345.0.0.....
68  # chromeos_image should have been something like:
69  # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
70  if chromeos_image.endswith('/chromiumos_test_image.bin'):
71    full_version = chromeos_image.split('/')[-2]
72    # Strip the date and time off of local builds (which have the format
73    # "R43-2345.0.0.date-and-time").
74    version, _ = os.path.splitext(full_version)
75  else:
76    version = ''
77
78  # Find the chromeos image.  If it's somewhere in .../chroot/tmp/..., then
79  # it's an official image that got downloaded, so chop off the download path
80  # to make the official image name more clear.
81  official_image_path = '/chroot/tmp'
82  if official_image_path in chromeos_image:
83    image = chromeos_image.split(official_image_path, 1)[1]
84  else:
85    image = chromeos_image
86  return version, image
87
88
89def _AppendUntilLengthIs(gen, the_list, target_len):
90  """Appends to `list` until `list` is `target_len` elements long.
91
92  Uses `gen` to generate elements.
93  """
94  the_list.extend(gen() for _ in range(target_len - len(the_list)))
95  return the_list
96
97
98def _FilterPerfReport(event_threshold, report):
99  """Filters out entries with `< event_threshold` percent in a perf report."""
100
101  def filter_dict(m):
102    return {
103        fn_name: pct for fn_name, pct in m.items() if pct >= event_threshold
104    }
105
106  return {event: filter_dict(m) for event, m in report.items()}
107
108
109class _PerfTable(object):
110  """Generates dicts from a perf table.
111
112  Dicts look like:
113  {'benchmark_name': {'perf_event_name': [LabelData]}}
114  where LabelData is a list of perf dicts, each perf dict coming from the same
115  label.
116  Each perf dict looks like {'function_name': 0.10, ...} (where 0.10 is the
117  percentage of time spent in function_name).
118  """
119
120  def __init__(self,
121               benchmark_names_and_iterations,
122               label_names,
123               read_perf_report,
124               event_threshold=None):
125    """Constructor.
126
127    read_perf_report is a function that takes a label name, benchmark name, and
128    benchmark iteration, and returns a dictionary describing the perf output for
129    that given run.
130    """
131    self.event_threshold = event_threshold
132    self._label_indices = {name: i for i, name in enumerate(label_names)}
133    self.perf_data = {}
134    for label in label_names:
135      for bench_name, bench_iterations in benchmark_names_and_iterations:
136        for i in range(bench_iterations):
137          report = read_perf_report(label, bench_name, i)
138          self._ProcessPerfReport(report, label, bench_name, i)
139
140  def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration):
141    """Add the data from one run to the dict."""
142    perf_of_run = perf_report
143    if self.event_threshold is not None:
144      perf_of_run = _FilterPerfReport(self.event_threshold, perf_report)
145    if benchmark_name not in self.perf_data:
146      self.perf_data[benchmark_name] = {event: [] for event in perf_of_run}
147    ben_data = self.perf_data[benchmark_name]
148    label_index = self._label_indices[label]
149    for event in ben_data:
150      _AppendUntilLengthIs(list, ben_data[event], label_index + 1)
151      data_for_label = ben_data[event][label_index]
152      _AppendUntilLengthIs(dict, data_for_label, iteration + 1)
153      data_for_label[iteration] = perf_of_run[event] if perf_of_run else {}
154
155
156def _GetResultsTableHeader(ben_name, iterations):
157  benchmark_info = ('Benchmark:  {0};  Iterations: {1}'.format(
158      ben_name, iterations))
159  cell = Cell()
160  cell.string_value = benchmark_info
161  cell.header = True
162  return [[cell]]
163
164
165def _GetDSOHeader(cwp_dso):
166  info = 'CWP_DSO: %s' % cwp_dso
167  cell = Cell()
168  cell.string_value = info
169  cell.header = False
170  return [[cell]]
171
172
173def _ParseColumn(columns, iteration):
174  new_column = []
175  for column in columns:
176    if column.result.__class__.__name__ != 'RawResult':
177      new_column.append(column)
178    else:
179      new_column.extend(
180          Column(LiteralResult(i), Format(), str(i + 1))
181          for i in range(iteration))
182  return new_column
183
184
185def _GetTables(benchmark_results, columns, table_type):
186  iter_counts = benchmark_results.iter_counts
187  result = benchmark_results.run_keyvals
188  tables = []
189  for bench_name, runs in result.items():
190    iterations = iter_counts[bench_name]
191    ben_table = _GetResultsTableHeader(bench_name, iterations)
192
193    all_runs_empty = all(not dict for label in runs for dict in label)
194    if all_runs_empty:
195      cell = Cell()
196      cell.string_value = ('This benchmark contains no result.'
197                           ' Is the benchmark name valid?')
198      cell_table = [[cell]]
199    else:
200      table = TableGenerator(runs, benchmark_results.label_names).GetTable()
201      parsed_columns = _ParseColumn(columns, iterations)
202      tf = TableFormatter(table, parsed_columns)
203      cell_table = tf.GetCellTable(table_type)
204    tables.append(ben_table)
205    tables.append(cell_table)
206  return tables
207
208
209def _GetPerfTables(benchmark_results, columns, table_type):
210  p_table = _PerfTable(benchmark_results.benchmark_names_and_iterations,
211                       benchmark_results.label_names,
212                       benchmark_results.read_perf_report)
213
214  tables = []
215  for benchmark in p_table.perf_data:
216    iterations = benchmark_results.iter_counts[benchmark]
217    ben_table = _GetResultsTableHeader(benchmark, iterations)
218    tables.append(ben_table)
219    benchmark_data = p_table.perf_data[benchmark]
220    table = []
221    for event in benchmark_data:
222      tg = TableGenerator(
223          benchmark_data[event],
224          benchmark_results.label_names,
225          sort=TableGenerator.SORT_BY_VALUES_DESC)
226      table = tg.GetTable(ResultsReport.PERF_ROWS)
227      parsed_columns = _ParseColumn(columns, iterations)
228      tf = TableFormatter(table, parsed_columns)
229      tf.GenerateCellTable(table_type)
230      tf.AddColumnName()
231      tf.AddLabelName()
232      tf.AddHeader(str(event))
233      table = tf.GetCellTable(table_type, headers=False)
234      tables.append(table)
235  return tables
236
237
238def _GetSamplesTables(benchmark_results, columns, table_type):
239  tables = []
240  dso_header_table = _GetDSOHeader(benchmark_results.cwp_dso)
241  tables.append(dso_header_table)
242  (table, new_keyvals, iter_counts) = SamplesTableGenerator(
243      benchmark_results.run_keyvals, benchmark_results.label_names,
244      benchmark_results.iter_counts, benchmark_results.weights).GetTable()
245  parsed_columns = _ParseColumn(columns, 1)
246  tf = TableFormatter(table, parsed_columns, samples_table=True)
247  cell_table = tf.GetCellTable(table_type)
248  tables.append(cell_table)
249  return (tables, new_keyvals, iter_counts)
250
251
252class ResultsReport(object):
253  """Class to handle the report format."""
254  MAX_COLOR_CODE = 255
255  PERF_ROWS = 5
256
257  def __init__(self, results):
258    self.benchmark_results = results
259
260  def _GetTablesWithColumns(self, columns, table_type, summary_type):
261    if summary_type == 'perf':
262      get_tables = _GetPerfTables
263    elif summary_type == 'samples':
264      get_tables = _GetSamplesTables
265    else:
266      get_tables = _GetTables
267    ret = get_tables(self.benchmark_results, columns, table_type)
268    # If we are generating a samples summary table, the return value of
269    # get_tables will be a tuple, and we will update the benchmark_results for
270    # composite benchmark so that full table can use it.
271    if isinstance(ret, tuple):
272      self.benchmark_results.run_keyvals = ret[1]
273      self.benchmark_results.iter_counts = ret[2]
274      ret = ret[0]
275    return ret
276
277  def GetFullTables(self, perf=False):
278    ignore_min_max = self.benchmark_results.ignore_min_max
279    columns = [
280        Column(RawResult(), Format()),
281        Column(MinResult(), Format()),
282        Column(MaxResult(), Format()),
283        Column(AmeanResult(ignore_min_max), Format()),
284        Column(StdResult(ignore_min_max), Format(), 'StdDev'),
285        Column(CoeffVarResult(ignore_min_max), CoeffVarFormat(), 'StdDev/Mean'),
286        Column(GmeanRatioResult(ignore_min_max), RatioFormat(), 'GmeanSpeedup'),
287        Column(PValueResult(ignore_min_max), PValueFormat(), 'p-value')
288    ]
289    return self._GetTablesWithColumns(columns, 'full', perf)
290
291  def GetSummaryTables(self, summary_type=''):
292    ignore_min_max = self.benchmark_results.ignore_min_max
293    columns = []
294    if summary_type == 'samples':
295      columns += [Column(IterationResult(), Format(), 'Iterations [Pass:Fail]')]
296    columns += [
297        Column(
298            AmeanResult(ignore_min_max), Format(),
299            'Weighted Samples Amean' if summary_type == 'samples' else ''),
300        Column(StdResult(ignore_min_max), Format(), 'StdDev'),
301        Column(CoeffVarResult(ignore_min_max), CoeffVarFormat(), 'StdDev/Mean'),
302        Column(GmeanRatioResult(ignore_min_max), RatioFormat(), 'GmeanSpeedup'),
303        Column(PValueResult(ignore_min_max), PValueFormat(), 'p-value')
304    ]
305    return self._GetTablesWithColumns(columns, 'summary', summary_type)
306
307
308def _PrintTable(tables, out_to):
309  # tables may be None.
310  if not tables:
311    return ''
312
313  if out_to == 'HTML':
314    out_type = TablePrinter.HTML
315  elif out_to == 'PLAIN':
316    out_type = TablePrinter.PLAIN
317  elif out_to == 'CONSOLE':
318    out_type = TablePrinter.CONSOLE
319  elif out_to == 'TSV':
320    out_type = TablePrinter.TSV
321  elif out_to == 'EMAIL':
322    out_type = TablePrinter.EMAIL
323  else:
324    raise ValueError('Invalid out_to value: %s' % (out_to,))
325
326  printers = (TablePrinter(table, out_type) for table in tables)
327  return ''.join(printer.Print() for printer in printers)
328
329
330class TextResultsReport(ResultsReport):
331  """Class to generate text result report."""
332
333  H1_STR = '==========================================='
334  H2_STR = '-------------------------------------------'
335
336  def __init__(self, results, email=False, experiment=None):
337    super(TextResultsReport, self).__init__(results)
338    self.email = email
339    self.experiment = experiment
340
341  @staticmethod
342  def _MakeTitle(title):
343    header_line = TextResultsReport.H1_STR
344    # '' at the end gives one newline.
345    return '\n'.join([header_line, title, header_line, ''])
346
347  @staticmethod
348  def _MakeSection(title, body):
349    header_line = TextResultsReport.H2_STR
350    # '\n' at the end gives us two newlines.
351    return '\n'.join([header_line, title, header_line, body, '\n'])
352
353  @staticmethod
354  def FromExperiment(experiment, email=False):
355    results = BenchmarkResults.FromExperiment(experiment)
356    return TextResultsReport(results, email, experiment)
357
358  def GetStatusTable(self):
359    """Generate the status table by the tabulator."""
360    table = [['', '']]
361    columns = [
362        Column(LiteralResult(iteration=0), Format(), 'Status'),
363        Column(LiteralResult(iteration=1), Format(), 'Failing Reason')
364    ]
365
366    for benchmark_run in self.experiment.benchmark_runs:
367      status = [
368          benchmark_run.name,
369          [benchmark_run.timeline.GetLastEvent(), benchmark_run.failure_reason]
370      ]
371      table.append(status)
372    cell_table = TableFormatter(table, columns).GetCellTable('status')
373    return [cell_table]
374
375  def GetTotalWaitCooldownTime(self):
376    """Get cooldown wait time in seconds from experiment benchmark runs.
377
378    Returns:
379      Dictionary {'dut': int(wait_time_in_seconds)}
380    """
381    waittime_dict = {}
382    for dut in self.experiment.machine_manager.GetMachines():
383      waittime_dict[dut.name] = dut.GetCooldownWaitTime()
384    return waittime_dict
385
386  def GetReport(self):
387    """Generate the report for email and console."""
388    output_type = 'EMAIL' if self.email else 'CONSOLE'
389    experiment = self.experiment
390
391    sections = []
392    if experiment is not None:
393      title_contents = "Results report for '%s'" % (experiment.name,)
394    else:
395      title_contents = 'Results report'
396    sections.append(self._MakeTitle(title_contents))
397
398    if not self.benchmark_results.cwp_dso:
399      summary_table = _PrintTable(self.GetSummaryTables(), output_type)
400    else:
401      summary_table = _PrintTable(
402          self.GetSummaryTables(summary_type='samples'), output_type)
403    sections.append(self._MakeSection('Summary', summary_table))
404
405    if experiment is not None:
406      table = _PrintTable(self.GetStatusTable(), output_type)
407      sections.append(self._MakeSection('Benchmark Run Status', table))
408
409    if not self.benchmark_results.cwp_dso:
410      perf_table = _PrintTable(
411          self.GetSummaryTables(summary_type='perf'), output_type)
412      sections.append(self._MakeSection('Perf Data', perf_table))
413
414    if experiment is not None:
415      experiment_file = experiment.experiment_file
416      sections.append(self._MakeSection('Experiment File', experiment_file))
417
418      cpu_info = experiment.machine_manager.GetAllCPUInfo(experiment.labels)
419      sections.append(self._MakeSection('CPUInfo', cpu_info))
420
421      totaltime = (time.time() -
422                   experiment.start_time) if experiment.start_time else 0
423      totaltime_str = 'Total experiment time:\n%d min' % (totaltime // 60)
424      cooldown_waittime_list = ['Cooldown wait time:']
425      # When running experiment on multiple DUTs cooldown wait time may vary
426      # on different devices. In addition its combined time may exceed total
427      # experiment time which will look weird but it is reasonable.
428      # For this matter print cooldown time per DUT.
429      for dut, waittime in sorted(self.GetTotalWaitCooldownTime().items()):
430        cooldown_waittime_list.append('DUT %s: %d min' % (dut, waittime // 60))
431      cooldown_waittime_str = '\n'.join(cooldown_waittime_list)
432      sections.append(
433          self._MakeSection('Duration',
434                            '\n\n'.join([totaltime_str,
435                                         cooldown_waittime_str])))
436
437    return '\n'.join(sections)
438
439
440def _GetHTMLCharts(label_names, test_results):
441  charts = []
442  for item, runs in test_results.items():
443    # Fun fact: label_names is actually *entirely* useless as a param, since we
444    # never add headers. We still need to pass it anyway.
445    table = TableGenerator(runs, label_names).GetTable()
446    columns = [
447        Column(AmeanResult(), Format()),
448        Column(MinResult(), Format()),
449        Column(MaxResult(), Format())
450    ]
451    tf = TableFormatter(table, columns)
452    data_table = tf.GetCellTable('full', headers=False)
453
454    for cur_row_data in data_table:
455      test_key = cur_row_data[0].string_value
456      title = '{0}: {1}'.format(item, test_key.replace('/', ''))
457      chart = ColumnChart(title, 300, 200)
458      chart.AddColumn('Label', 'string')
459      chart.AddColumn('Average', 'number')
460      chart.AddColumn('Min', 'number')
461      chart.AddColumn('Max', 'number')
462      chart.AddSeries('Min', 'line', 'black')
463      chart.AddSeries('Max', 'line', 'black')
464      cur_index = 1
465      for label in label_names:
466        chart.AddRow([
467            label, cur_row_data[cur_index].value,
468            cur_row_data[cur_index + 1].value, cur_row_data[cur_index + 2].value
469        ])
470        if isinstance(cur_row_data[cur_index].value, str):
471          chart = None
472          break
473        cur_index += 3
474      if chart:
475        charts.append(chart)
476  return charts
477
478
479class HTMLResultsReport(ResultsReport):
480  """Class to generate html result report."""
481
482  def __init__(self, benchmark_results, experiment=None):
483    super(HTMLResultsReport, self).__init__(benchmark_results)
484    self.experiment = experiment
485
486  @staticmethod
487  def FromExperiment(experiment):
488    return HTMLResultsReport(
489        BenchmarkResults.FromExperiment(experiment), experiment=experiment)
490
491  def GetReport(self):
492    label_names = self.benchmark_results.label_names
493    test_results = self.benchmark_results.run_keyvals
494    charts = _GetHTMLCharts(label_names, test_results)
495    chart_javascript = ''.join(chart.GetJavascript() for chart in charts)
496    chart_divs = ''.join(chart.GetDiv() for chart in charts)
497
498    if not self.benchmark_results.cwp_dso:
499      summary_table = self.GetSummaryTables()
500      perf_table = self.GetSummaryTables(summary_type='perf')
501    else:
502      summary_table = self.GetSummaryTables(summary_type='samples')
503      perf_table = None
504    full_table = self.GetFullTables()
505
506    experiment_file = ''
507    if self.experiment is not None:
508      experiment_file = self.experiment.experiment_file
509    # Use kwargs for code readability, and so that testing is a bit easier.
510    return templates.GenerateHTMLPage(
511        perf_table=perf_table,
512        chart_js=chart_javascript,
513        summary_table=summary_table,
514        print_table=_PrintTable,
515        chart_divs=chart_divs,
516        full_table=full_table,
517        experiment_file=experiment_file)
518
519
520def ParseStandardPerfReport(report_data):
521  """Parses the output of `perf report`.
522
523  It'll parse the following:
524  {{garbage}}
525  # Samples: 1234M of event 'foo'
526
527  1.23% command shared_object location function::name
528
529  1.22% command shared_object location function2::name
530
531  # Samples: 999K of event 'bar'
532
533  0.23% command shared_object location function3::name
534  {{etc.}}
535
536  Into:
537    {'foo': {'function::name': 1.23, 'function2::name': 1.22},
538     'bar': {'function3::name': 0.23, etc.}}
539  """
540  # This function fails silently on its if it's handed a string (as opposed to a
541  # list of lines). So, auto-split if we do happen to get a string.
542  if isinstance(report_data, str):
543    report_data = report_data.splitlines()
544  # When switching to python3 catch the case when bytes are passed.
545  elif isinstance(report_data, bytes):
546    raise TypeError()
547
548  # Samples: N{K,M,G} of event 'event-name'
549  samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'")
550
551  # We expect lines like:
552  # N.NN%  command  samples  shared_object  [location] symbol
553  #
554  # Note that we're looking at stripped lines, so there is no space at the
555  # start.
556  perf_regex = re.compile(r'^(\d+(?:.\d*)?)%'  # N.NN%
557                          r'\s*\d+'  # samples count (ignored)
558                          r'\s*\S+'  # command (ignored)
559                          r'\s*\S+'  # shared_object (ignored)
560                          r'\s*\[.\]'  # location (ignored)
561                          r'\s*(\S.+)'  # function
562                         )
563
564  stripped_lines = (l.strip() for l in report_data)
565  nonempty_lines = (l for l in stripped_lines if l)
566  # Ignore all lines before we see samples_regex
567  interesting_lines = itertools.dropwhile(lambda x: not samples_regex.match(x),
568                                          nonempty_lines)
569
570  first_sample_line = next(interesting_lines, None)
571  # Went through the entire file without finding a 'samples' header. Quit.
572  if first_sample_line is None:
573    return {}
574
575  sample_name = samples_regex.match(first_sample_line).group(1)
576  current_result = {}
577  results = {sample_name: current_result}
578  for line in interesting_lines:
579    samples_match = samples_regex.match(line)
580    if samples_match:
581      sample_name = samples_match.group(1)
582      current_result = {}
583      results[sample_name] = current_result
584      continue
585
586    match = perf_regex.match(line)
587    if not match:
588      continue
589    percentage_str, func_name = match.groups()
590    try:
591      percentage = float(percentage_str)
592    except ValueError:
593      # Couldn't parse it; try to be "resilient".
594      continue
595    current_result[func_name] = percentage
596  return results
597
598
599def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name,
600                              benchmark_iteration):
601  """Reads a perf report for the given benchmark. Returns {} on failure.
602
603  The result should be a map of maps; it should look like:
604  {perf_event_name: {function_name: pct_time_spent}}, e.g.
605  {'cpu_cycles': {'_malloc': 10.0, '_free': 0.3, ...}}
606  """
607  raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
608  dir_name = ''.join(c for c in raw_dir_name if c.isalnum())
609  file_name = os.path.join(results_directory, dir_name, 'perf.data.report.0')
610  try:
611    with open(file_name) as in_file:
612      return ParseStandardPerfReport(in_file)
613  except IOError:
614    # Yes, we swallow any IO-related errors.
615    return {}
616
617
618# Split out so that testing (specifically: mocking) is easier
619def _ExperimentToKeyvals(experiment, for_json_report):
620  """Converts an experiment to keyvals."""
621  return OrganizeResults(
622      experiment.benchmark_runs, experiment.labels, json_report=for_json_report)
623
624
625class BenchmarkResults(object):
626  """The minimum set of fields that any ResultsReport will take."""
627
628  def __init__(self,
629               label_names,
630               benchmark_names_and_iterations,
631               run_keyvals,
632               ignore_min_max=False,
633               read_perf_report=None,
634               cwp_dso=None,
635               weights=None):
636    if read_perf_report is None:
637
638      def _NoPerfReport(*_args, **_kwargs):
639        return {}
640
641      read_perf_report = _NoPerfReport
642
643    self.label_names = label_names
644    self.benchmark_names_and_iterations = benchmark_names_and_iterations
645    self.iter_counts = dict(benchmark_names_and_iterations)
646    self.run_keyvals = run_keyvals
647    self.ignore_min_max = ignore_min_max
648    self.read_perf_report = read_perf_report
649    self.cwp_dso = cwp_dso
650    self.weights = dict(weights) if weights else None
651
652  @staticmethod
653  def FromExperiment(experiment, for_json_report=False):
654    label_names = [label.name for label in experiment.labels]
655    benchmark_names_and_iterations = [(benchmark.name, benchmark.iterations)
656                                      for benchmark in experiment.benchmarks]
657    run_keyvals = _ExperimentToKeyvals(experiment, for_json_report)
658    ignore_min_max = experiment.ignore_min_max
659    read_perf_report = functools.partial(_ReadExperimentPerfReport,
660                                         experiment.results_directory)
661    cwp_dso = experiment.cwp_dso
662    weights = [(benchmark.name, benchmark.weight)
663               for benchmark in experiment.benchmarks]
664    return BenchmarkResults(label_names, benchmark_names_and_iterations,
665                            run_keyvals, ignore_min_max, read_perf_report,
666                            cwp_dso, weights)
667
668
669def _GetElemByName(name, from_list):
670  """Gets an element from the given list by its name field.
671
672  Raises an error if it doesn't find exactly one match.
673  """
674  elems = [e for e in from_list if e.name == name]
675  if len(elems) != 1:
676    raise ValueError('Expected 1 item named %s, found %d' % (name, len(elems)))
677  return elems[0]
678
679
680def _Unlist(l):
681  """If l is a list, extracts the first element of l. Otherwise, returns l."""
682  return l[0] if isinstance(l, list) else l
683
684
685class JSONResultsReport(ResultsReport):
686  """Class that generates JSON reports for experiments."""
687
688  def __init__(self,
689               benchmark_results,
690               benchmark_date=None,
691               benchmark_time=None,
692               experiment=None,
693               json_args=None):
694    """Construct a JSONResultsReport.
695
696    json_args is the dict of arguments we pass to json.dumps in GetReport().
697    """
698    super(JSONResultsReport, self).__init__(benchmark_results)
699
700    defaults = TelemetryDefaults()
701    defaults.ReadDefaultsFile()
702    summary_field_defaults = defaults.GetDefault()
703    if summary_field_defaults is None:
704      summary_field_defaults = {}
705    self.summary_field_defaults = summary_field_defaults
706
707    if json_args is None:
708      json_args = {}
709    self.json_args = json_args
710
711    self.experiment = experiment
712    if not benchmark_date:
713      timestamp = datetime.datetime.strftime(datetime.datetime.now(),
714                                             '%Y-%m-%d %H:%M:%S')
715      benchmark_date, benchmark_time = timestamp.split(' ')
716    self.date = benchmark_date
717    self.time = benchmark_time
718
719  @staticmethod
720  def FromExperiment(experiment,
721                     benchmark_date=None,
722                     benchmark_time=None,
723                     json_args=None):
724    benchmark_results = BenchmarkResults.FromExperiment(
725        experiment, for_json_report=True)
726    return JSONResultsReport(benchmark_results, benchmark_date, benchmark_time,
727                             experiment, json_args)
728
729  def GetReportObjectIgnoringExperiment(self):
730    """Gets the JSON report object specifically for the output data.
731
732    Ignores any experiment-specific fields (e.g. board, machine checksum, ...).
733    """
734    benchmark_results = self.benchmark_results
735    label_names = benchmark_results.label_names
736    summary_field_defaults = self.summary_field_defaults
737    final_results = []
738    for test, test_results in benchmark_results.run_keyvals.items():
739      for label_name, label_results in zip(label_names, test_results):
740        for iter_results in label_results:
741          passed = iter_results.get('retval') == 0
742          json_results = {
743              'date': self.date,
744              'time': self.time,
745              'label': label_name,
746              'test_name': test,
747              'pass': passed,
748          }
749          final_results.append(json_results)
750
751          if not passed:
752            continue
753
754          # Get overall results.
755          summary_fields = summary_field_defaults.get(test)
756          if summary_fields is not None:
757            value = []
758            json_results['overall_result'] = value
759            for f in summary_fields:
760              v = iter_results.get(f)
761              if v is None:
762                continue
763              # New telemetry results format: sometimes we get a list of lists
764              # now.
765              v = _Unlist(_Unlist(v))
766              value.append((f, float(v)))
767
768          # Get detailed results.
769          detail_results = {}
770          json_results['detailed_results'] = detail_results
771          for k, v in iter_results.items():
772            if k == 'retval' or k == 'PASS' or k == ['PASS'] or v == 'PASS':
773              continue
774
775            v = _Unlist(v)
776            if 'machine' in k:
777              json_results[k] = v
778            elif v is not None:
779              if isinstance(v, list):
780                detail_results[k] = [float(d) for d in v]
781              else:
782                detail_results[k] = float(v)
783    return final_results
784
785  def GetReportObject(self):
786    """Generate the JSON report, returning it as a python object."""
787    report_list = self.GetReportObjectIgnoringExperiment()
788    if self.experiment is not None:
789      self._AddExperimentSpecificFields(report_list)
790    return report_list
791
792  def _AddExperimentSpecificFields(self, report_list):
793    """Add experiment-specific data to the JSON report."""
794    board = self.experiment.labels[0].board
795    manager = self.experiment.machine_manager
796    for report in report_list:
797      label_name = report['label']
798      label = _GetElemByName(label_name, self.experiment.labels)
799
800      img_path = os.path.realpath(os.path.expanduser(label.chromeos_image))
801      ver, img = ParseChromeosImage(img_path)
802
803      report.update({
804          'board': board,
805          'chromeos_image': img,
806          'chromeos_version': ver,
807          'chrome_version': label.chrome_version,
808          'compiler': label.compiler
809      })
810
811      if not report['pass']:
812        continue
813      if 'machine_checksum' not in report:
814        report['machine_checksum'] = manager.machine_checksum[label_name]
815      if 'machine_string' not in report:
816        report['machine_string'] = manager.machine_checksum_string[label_name]
817
818  def GetReport(self):
819    """Dump the results of self.GetReportObject() to a string as JSON."""
820    # This exists for consistency with the other GetReport methods.
821    # Specifically, they all return strings, so it's a bit awkward if the JSON
822    # results reporter returns an object.
823    return json.dumps(self.GetReportObject(), **self.json_args)
824