• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""A module to handle the report format."""
5from __future__ import print_function
6
7import datetime
8import functools
9import itertools
10import json
11import os
12import re
13
14from cros_utils.tabulator import AmeanResult
15from cros_utils.tabulator import Cell
16from cros_utils.tabulator import CoeffVarFormat
17from cros_utils.tabulator import CoeffVarResult
18from cros_utils.tabulator import Column
19from cros_utils.tabulator import Format
20from cros_utils.tabulator import GmeanRatioResult
21from cros_utils.tabulator import LiteralResult
22from cros_utils.tabulator import MaxResult
23from cros_utils.tabulator import MinResult
24from cros_utils.tabulator import PValueFormat
25from cros_utils.tabulator import PValueResult
26from cros_utils.tabulator import RatioFormat
27from cros_utils.tabulator import RawResult
28from cros_utils.tabulator import StdResult
29from cros_utils.tabulator import TableFormatter
30from cros_utils.tabulator import TableGenerator
31from cros_utils.tabulator import TablePrinter
32from update_telemetry_defaults import TelemetryDefaults
33
34from column_chart import ColumnChart
35from results_organizer import OrganizeResults
36
37import results_report_templates as templates
38
39
40def ParseChromeosImage(chromeos_image):
41  """Parse the chromeos_image string for the image and version.
42
43  The chromeos_image string will probably be in one of two formats:
44  1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
45     chromiumos_test_image.bin
46  2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \
47      chromiumos_test_image.bin
48
49  We parse these strings to find the 'chromeos_version' to store in the
50  json archive (without the .datatime bit in the first case); and also
51  the 'chromeos_image', which would be all of the first case, but only the
52  part after '/chroot/tmp' in the second case.
53
54  Args:
55      chromeos_image: string containing the path to the chromeos_image that
56      crosperf used for the test.
57
58  Returns:
59      version, image: The results of parsing the input string, as explained
60      above.
61  """
62  # Find the Chromeos Version, e.g. R45-2345.0.0.....
63  # chromeos_image should have been something like:
64  # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
65  if chromeos_image.endswith('/chromiumos_test_image.bin'):
66    full_version = chromeos_image.split('/')[-2]
67    # Strip the date and time off of local builds (which have the format
68    # "R43-2345.0.0.date-and-time").
69    version, _ = os.path.splitext(full_version)
70  else:
71    version = ''
72
73  # Find the chromeos image.  If it's somewhere in .../chroot/tmp/..., then
74  # it's an official image that got downloaded, so chop off the download path
75  # to make the official image name more clear.
76  official_image_path = '/chroot/tmp'
77  if official_image_path in chromeos_image:
78    image = chromeos_image.split(official_image_path, 1)[1]
79  else:
80    image = chromeos_image
81  return version, image
82
83
84def _AppendUntilLengthIs(gen, the_list, target_len):
85  """Appends to `list` until `list` is `target_len` elements long.
86
87  Uses `gen` to generate elements.
88  """
89  the_list.extend(gen() for _ in xrange(target_len - len(the_list)))
90  return the_list
91
92
93def _FilterPerfReport(event_threshold, report):
94  """Filters out entries with `< event_threshold` percent in a perf report."""
95
96  def filter_dict(m):
97    return {
98        fn_name: pct
99        for fn_name, pct in m.iteritems() if pct >= event_threshold
100    }
101
102  return {event: filter_dict(m) for event, m in report.iteritems()}
103
104
105class _PerfTable(object):
106  """Generates dicts from a perf table.
107
108  Dicts look like:
109  {'benchmark_name': {'perf_event_name': [LabelData]}}
110  where LabelData is a list of perf dicts, each perf dict coming from the same
111  label.
112  Each perf dict looks like {'function_name': 0.10, ...} (where 0.10 is the
113  percentage of time spent in function_name).
114  """
115
116  def __init__(self,
117               benchmark_names_and_iterations,
118               label_names,
119               read_perf_report,
120               event_threshold=None):
121    """Constructor.
122
123    read_perf_report is a function that takes a label name, benchmark name, and
124    benchmark iteration, and returns a dictionary describing the perf output for
125    that given run.
126    """
127    self.event_threshold = event_threshold
128    self._label_indices = {name: i for i, name in enumerate(label_names)}
129    self.perf_data = {}
130    for label in label_names:
131      for bench_name, bench_iterations in benchmark_names_and_iterations:
132        for i in xrange(bench_iterations):
133          report = read_perf_report(label, bench_name, i)
134          self._ProcessPerfReport(report, label, bench_name, i)
135
136  def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration):
137    """Add the data from one run to the dict."""
138    perf_of_run = perf_report
139    if self.event_threshold is not None:
140      perf_of_run = _FilterPerfReport(self.event_threshold, perf_report)
141    if benchmark_name not in self.perf_data:
142      self.perf_data[benchmark_name] = {event: [] for event in perf_of_run}
143    ben_data = self.perf_data[benchmark_name]
144    label_index = self._label_indices[label]
145    for event in ben_data:
146      _AppendUntilLengthIs(list, ben_data[event], label_index + 1)
147      data_for_label = ben_data[event][label_index]
148      _AppendUntilLengthIs(dict, data_for_label, iteration + 1)
149      data_for_label[iteration] = perf_of_run[event] if perf_of_run else {}
150
151
152def _GetResultsTableHeader(ben_name, iterations):
153  benchmark_info = ('Benchmark:  {0};  Iterations: {1}'.format(
154      ben_name, iterations))
155  cell = Cell()
156  cell.string_value = benchmark_info
157  cell.header = True
158  return [[cell]]
159
160
161def _ParseColumn(columns, iteration):
162  new_column = []
163  for column in columns:
164    if column.result.__class__.__name__ != 'RawResult':
165      new_column.append(column)
166    else:
167      new_column.extend(
168          Column(LiteralResult(i), Format(), str(i + 1))
169          for i in xrange(iteration))
170  return new_column
171
172
173def _GetTables(benchmark_results, columns, table_type):
174  iter_counts = benchmark_results.iter_counts
175  result = benchmark_results.run_keyvals
176  tables = []
177  for bench_name, runs in result.iteritems():
178    iterations = iter_counts[bench_name]
179    ben_table = _GetResultsTableHeader(bench_name, iterations)
180
181    all_runs_empty = all(not dict for label in runs for dict in label)
182    if all_runs_empty:
183      cell = Cell()
184      cell.string_value = ('This benchmark contains no result.'
185                           ' Is the benchmark name valid?')
186      cell_table = [[cell]]
187    else:
188      table = TableGenerator(runs, benchmark_results.label_names).GetTable()
189      parsed_columns = _ParseColumn(columns, iterations)
190      tf = TableFormatter(table, parsed_columns)
191      cell_table = tf.GetCellTable(table_type)
192    tables.append(ben_table)
193    tables.append(cell_table)
194  return tables
195
196
197def _GetPerfTables(benchmark_results, columns, table_type):
198  p_table = _PerfTable(benchmark_results.benchmark_names_and_iterations,
199                       benchmark_results.label_names,
200                       benchmark_results.read_perf_report)
201
202  tables = []
203  for benchmark in p_table.perf_data:
204    iterations = benchmark_results.iter_counts[benchmark]
205    ben_table = _GetResultsTableHeader(benchmark, iterations)
206    tables.append(ben_table)
207    benchmark_data = p_table.perf_data[benchmark]
208    table = []
209    for event in benchmark_data:
210      tg = TableGenerator(
211          benchmark_data[event],
212          benchmark_results.label_names,
213          sort=TableGenerator.SORT_BY_VALUES_DESC)
214      table = tg.GetTable(ResultsReport.PERF_ROWS)
215      parsed_columns = _ParseColumn(columns, iterations)
216      tf = TableFormatter(table, parsed_columns)
217      tf.GenerateCellTable(table_type)
218      tf.AddColumnName()
219      tf.AddLabelName()
220      tf.AddHeader(str(event))
221      table = tf.GetCellTable(table_type, headers=False)
222      tables.append(table)
223  return tables
224
225
226class ResultsReport(object):
227  """Class to handle the report format."""
228  MAX_COLOR_CODE = 255
229  PERF_ROWS = 5
230
231  def __init__(self, results):
232    self.benchmark_results = results
233
234  def _GetTablesWithColumns(self, columns, table_type, perf):
235    get_tables = _GetPerfTables if perf else _GetTables
236    return get_tables(self.benchmark_results, columns, table_type)
237
238  def GetFullTables(self, perf=False):
239    columns = [
240        Column(RawResult(), Format()), Column(MinResult(), Format()), Column(
241            MaxResult(), Format()), Column(AmeanResult(), Format()), Column(
242                StdResult(), Format(), 'StdDev'),
243        Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'), Column(
244            GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'), Column(
245                PValueResult(), PValueFormat(), 'p-value')
246    ]
247    return self._GetTablesWithColumns(columns, 'full', perf)
248
249  def GetSummaryTables(self, perf=False):
250    columns = [
251        Column(AmeanResult(), Format()), Column(StdResult(), Format(),
252                                                'StdDev'),
253        Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'), Column(
254            GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'), Column(
255                PValueResult(), PValueFormat(), 'p-value')
256    ]
257    return self._GetTablesWithColumns(columns, 'summary', perf)
258
259
260def _PrintTable(tables, out_to):
261  # tables may be None.
262  if not tables:
263    return ''
264
265  if out_to == 'HTML':
266    out_type = TablePrinter.HTML
267  elif out_to == 'PLAIN':
268    out_type = TablePrinter.PLAIN
269  elif out_to == 'CONSOLE':
270    out_type = TablePrinter.CONSOLE
271  elif out_to == 'TSV':
272    out_type = TablePrinter.TSV
273  elif out_to == 'EMAIL':
274    out_type = TablePrinter.EMAIL
275  else:
276    raise ValueError('Invalid out_to value: %s' % (out_to,))
277
278  printers = (TablePrinter(table, out_type) for table in tables)
279  return ''.join(printer.Print() for printer in printers)
280
281
282class TextResultsReport(ResultsReport):
283  """Class to generate text result report."""
284
285  H1_STR = '==========================================='
286  H2_STR = '-------------------------------------------'
287
288  def __init__(self, results, email=False, experiment=None):
289    super(TextResultsReport, self).__init__(results)
290    self.email = email
291    self.experiment = experiment
292
293  @staticmethod
294  def _MakeTitle(title):
295    header_line = TextResultsReport.H1_STR
296    # '' at the end gives one newline.
297    return '\n'.join([header_line, title, header_line, ''])
298
299  @staticmethod
300  def _MakeSection(title, body):
301    header_line = TextResultsReport.H2_STR
302    # '\n' at the end gives us two newlines.
303    return '\n'.join([header_line, title, header_line, body, '\n'])
304
305  @staticmethod
306  def FromExperiment(experiment, email=False):
307    results = BenchmarkResults.FromExperiment(experiment)
308    return TextResultsReport(results, email, experiment)
309
310  def GetStatusTable(self):
311    """Generate the status table by the tabulator."""
312    table = [['', '']]
313    columns = [
314        Column(LiteralResult(iteration=0), Format(), 'Status'), Column(
315            LiteralResult(iteration=1), Format(), 'Failing Reason')
316    ]
317
318    for benchmark_run in self.experiment.benchmark_runs:
319      status = [
320          benchmark_run.name,
321          [benchmark_run.timeline.GetLastEvent(), benchmark_run.failure_reason]
322      ]
323      table.append(status)
324    cell_table = TableFormatter(table, columns).GetCellTable('status')
325    return [cell_table]
326
327  def GetReport(self):
328    """Generate the report for email and console."""
329    output_type = 'EMAIL' if self.email else 'CONSOLE'
330    experiment = self.experiment
331
332    sections = []
333    if experiment is not None:
334      title_contents = "Results report for '%s'" % (experiment.name,)
335    else:
336      title_contents = 'Results report'
337    sections.append(self._MakeTitle(title_contents))
338
339    summary_table = _PrintTable(self.GetSummaryTables(perf=False), output_type)
340    sections.append(self._MakeSection('Summary', summary_table))
341
342    if experiment is not None:
343      table = _PrintTable(self.GetStatusTable(), output_type)
344      sections.append(self._MakeSection('Benchmark Run Status', table))
345
346    perf_table = _PrintTable(self.GetSummaryTables(perf=True), output_type)
347    if perf_table:
348      sections.append(self._MakeSection('Perf Data', perf_table))
349
350    if experiment is not None:
351      experiment_file = experiment.experiment_file
352      sections.append(self._MakeSection('Experiment File', experiment_file))
353
354      cpu_info = experiment.machine_manager.GetAllCPUInfo(experiment.labels)
355      sections.append(self._MakeSection('CPUInfo', cpu_info))
356
357    return '\n'.join(sections)
358
359
360def _GetHTMLCharts(label_names, test_results):
361  charts = []
362  for item, runs in test_results.iteritems():
363    # Fun fact: label_names is actually *entirely* useless as a param, since we
364    # never add headers. We still need to pass it anyway.
365    table = TableGenerator(runs, label_names).GetTable()
366    columns = [
367        Column(AmeanResult(), Format()), Column(MinResult(), Format()), Column(
368            MaxResult(), Format())
369    ]
370    tf = TableFormatter(table, columns)
371    data_table = tf.GetCellTable('full', headers=False)
372
373    for cur_row_data in data_table:
374      test_key = cur_row_data[0].string_value
375      title = '{0}: {1}'.format(item, test_key.replace('/', ''))
376      chart = ColumnChart(title, 300, 200)
377      chart.AddColumn('Label', 'string')
378      chart.AddColumn('Average', 'number')
379      chart.AddColumn('Min', 'number')
380      chart.AddColumn('Max', 'number')
381      chart.AddSeries('Min', 'line', 'black')
382      chart.AddSeries('Max', 'line', 'black')
383      cur_index = 1
384      for label in label_names:
385        chart.AddRow([
386            label, cur_row_data[cur_index].value,
387            cur_row_data[cur_index + 1].value, cur_row_data[cur_index + 2].value
388        ])
389        if isinstance(cur_row_data[cur_index].value, str):
390          chart = None
391          break
392        cur_index += 3
393      if chart:
394        charts.append(chart)
395  return charts
396
397
398class HTMLResultsReport(ResultsReport):
399  """Class to generate html result report."""
400
401  def __init__(self, benchmark_results, experiment=None):
402    super(HTMLResultsReport, self).__init__(benchmark_results)
403    self.experiment = experiment
404
405  @staticmethod
406  def FromExperiment(experiment):
407    return HTMLResultsReport(
408        BenchmarkResults.FromExperiment(experiment), experiment=experiment)
409
410  def GetReport(self):
411    label_names = self.benchmark_results.label_names
412    test_results = self.benchmark_results.run_keyvals
413    charts = _GetHTMLCharts(label_names, test_results)
414    chart_javascript = ''.join(chart.GetJavascript() for chart in charts)
415    chart_divs = ''.join(chart.GetDiv() for chart in charts)
416
417    summary_table = self.GetSummaryTables()
418    full_table = self.GetFullTables()
419    perf_table = self.GetSummaryTables(perf=True)
420    experiment_file = ''
421    if self.experiment is not None:
422      experiment_file = self.experiment.experiment_file
423    # Use kwargs for sanity, and so that testing is a bit easier.
424    return templates.GenerateHTMLPage(
425        perf_table=perf_table,
426        chart_js=chart_javascript,
427        summary_table=summary_table,
428        print_table=_PrintTable,
429        chart_divs=chart_divs,
430        full_table=full_table,
431        experiment_file=experiment_file)
432
433
434def ParseStandardPerfReport(report_data):
435  """Parses the output of `perf report`.
436
437  It'll parse the following:
438  {{garbage}}
439  # Samples: 1234M of event 'foo'
440
441  1.23% command shared_object location function::name
442
443  1.22% command shared_object location function2::name
444
445  # Samples: 999K of event 'bar'
446
447  0.23% command shared_object location function3::name
448  {{etc.}}
449
450  Into:
451    {'foo': {'function::name': 1.23, 'function2::name': 1.22},
452     'bar': {'function3::name': 0.23, etc.}}
453  """
454  # This function fails silently on its if it's handed a string (as opposed to a
455  # list of lines). So, auto-split if we do happen to get a string.
456  if isinstance(report_data, basestring):
457    report_data = report_data.splitlines()
458
459  # Samples: N{K,M,G} of event 'event-name'
460  samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'")
461
462  # We expect lines like:
463  # N.NN%  command  samples  shared_object  [location] symbol
464  #
465  # Note that we're looking at stripped lines, so there is no space at the
466  # start.
467  perf_regex = re.compile(r'^(\d+(?:.\d*)?)%'  # N.NN%
468                          r'\s*\d+'  # samples count (ignored)
469                          r'\s*\S+'  # command (ignored)
470                          r'\s*\S+'  # shared_object (ignored)
471                          r'\s*\[.\]'  # location (ignored)
472                          r'\s*(\S.+)'  # function
473                         )
474
475  stripped_lines = (l.strip() for l in report_data)
476  nonempty_lines = (l for l in stripped_lines if l)
477  # Ignore all lines before we see samples_regex
478  interesting_lines = itertools.dropwhile(lambda x: not samples_regex.match(x),
479                                          nonempty_lines)
480
481  first_sample_line = next(interesting_lines, None)
482  # Went through the entire file without finding a 'samples' header. Quit.
483  if first_sample_line is None:
484    return {}
485
486  sample_name = samples_regex.match(first_sample_line).group(1)
487  current_result = {}
488  results = {sample_name: current_result}
489  for line in interesting_lines:
490    samples_match = samples_regex.match(line)
491    if samples_match:
492      sample_name = samples_match.group(1)
493      current_result = {}
494      results[sample_name] = current_result
495      continue
496
497    match = perf_regex.match(line)
498    if not match:
499      continue
500    percentage_str, func_name = match.groups()
501    try:
502      percentage = float(percentage_str)
503    except ValueError:
504      # Couldn't parse it; try to be "resilient".
505      continue
506    current_result[func_name] = percentage
507  return results
508
509
510def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name,
511                              benchmark_iteration):
512  """Reads a perf report for the given benchmark. Returns {} on failure.
513
514  The result should be a map of maps; it should look like:
515  {perf_event_name: {function_name: pct_time_spent}}, e.g.
516  {'cpu_cycles': {'_malloc': 10.0, '_free': 0.3, ...}}
517  """
518  raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
519  dir_name = ''.join(c for c in raw_dir_name if c.isalnum())
520  file_name = os.path.join(results_directory, dir_name, 'perf.data.report.0')
521  try:
522    with open(file_name) as in_file:
523      return ParseStandardPerfReport(in_file)
524  except IOError:
525    # Yes, we swallow any IO-related errors.
526    return {}
527
528
529# Split out so that testing (specifically: mocking) is easier
530def _ExperimentToKeyvals(experiment, for_json_report):
531  """Converts an experiment to keyvals."""
532  return OrganizeResults(
533      experiment.benchmark_runs, experiment.labels, json_report=for_json_report)
534
535
536class BenchmarkResults(object):
537  """The minimum set of fields that any ResultsReport will take."""
538
539  def __init__(self,
540               label_names,
541               benchmark_names_and_iterations,
542               run_keyvals,
543               read_perf_report=None):
544    if read_perf_report is None:
545
546      def _NoPerfReport(*_args, **_kwargs):
547        return {}
548
549      read_perf_report = _NoPerfReport
550
551    self.label_names = label_names
552    self.benchmark_names_and_iterations = benchmark_names_and_iterations
553    self.iter_counts = dict(benchmark_names_and_iterations)
554    self.run_keyvals = run_keyvals
555    self.read_perf_report = read_perf_report
556
557  @staticmethod
558  def FromExperiment(experiment, for_json_report=False):
559    label_names = [label.name for label in experiment.labels]
560    benchmark_names_and_iterations = [(benchmark.name, benchmark.iterations)
561                                      for benchmark in experiment.benchmarks]
562    run_keyvals = _ExperimentToKeyvals(experiment, for_json_report)
563    read_perf_report = functools.partial(_ReadExperimentPerfReport,
564                                         experiment.results_directory)
565    return BenchmarkResults(label_names, benchmark_names_and_iterations,
566                            run_keyvals, read_perf_report)
567
568
569def _GetElemByName(name, from_list):
570  """Gets an element from the given list by its name field.
571
572  Raises an error if it doesn't find exactly one match.
573  """
574  elems = [e for e in from_list if e.name == name]
575  if len(elems) != 1:
576    raise ValueError('Expected 1 item named %s, found %d' % (name, len(elems)))
577  return elems[0]
578
579
580def _Unlist(l):
581  """If l is a list, extracts the first element of l. Otherwise, returns l."""
582  return l[0] if isinstance(l, list) else l
583
584
585class JSONResultsReport(ResultsReport):
586  """Class that generates JSON reports for experiments."""
587
588  def __init__(self,
589               benchmark_results,
590               date=None,
591               time=None,
592               experiment=None,
593               json_args=None):
594    """Construct a JSONResultsReport.
595
596    json_args is the dict of arguments we pass to json.dumps in GetReport().
597    """
598    super(JSONResultsReport, self).__init__(benchmark_results)
599
600    defaults = TelemetryDefaults()
601    defaults.ReadDefaultsFile()
602    summary_field_defaults = defaults.GetDefault()
603    if summary_field_defaults is None:
604      summary_field_defaults = {}
605    self.summary_field_defaults = summary_field_defaults
606
607    if json_args is None:
608      json_args = {}
609    self.json_args = json_args
610
611    self.experiment = experiment
612    if not date:
613      timestamp = datetime.datetime.strftime(datetime.datetime.now(),
614                                             '%Y-%m-%d %H:%M:%S')
615      date, time = timestamp.split(' ')
616    self.date = date
617    self.time = time
618
619  @staticmethod
620  def FromExperiment(experiment, date=None, time=None, json_args=None):
621    benchmark_results = BenchmarkResults.FromExperiment(
622        experiment, for_json_report=True)
623    return JSONResultsReport(benchmark_results, date, time, experiment,
624                             json_args)
625
626  def GetReportObjectIgnoringExperiment(self):
627    """Gets the JSON report object specifically for the output data.
628
629    Ignores any experiment-specific fields (e.g. board, machine checksum, ...).
630    """
631    benchmark_results = self.benchmark_results
632    label_names = benchmark_results.label_names
633    summary_field_defaults = self.summary_field_defaults
634    final_results = []
635    for test, test_results in benchmark_results.run_keyvals.iteritems():
636      for label_name, label_results in zip(label_names, test_results):
637        for iter_results in label_results:
638          passed = iter_results.get('retval') == 0
639          json_results = {
640              'date': self.date,
641              'time': self.time,
642              'label': label_name,
643              'test_name': test,
644              'pass': passed,
645          }
646          final_results.append(json_results)
647
648          if not passed:
649            continue
650
651          # Get overall results.
652          summary_fields = summary_field_defaults.get(test)
653          if summary_fields is not None:
654            value = []
655            json_results['overall_result'] = value
656            for f in summary_fields:
657              v = iter_results.get(f)
658              if v is None:
659                continue
660              # New telemetry results format: sometimes we get a list of lists
661              # now.
662              v = _Unlist(_Unlist(v))
663              value.append((f, float(v)))
664
665          # Get detailed results.
666          detail_results = {}
667          json_results['detailed_results'] = detail_results
668          for k, v in iter_results.iteritems():
669            if k == 'retval' or k == 'PASS' or k == ['PASS'] or v == 'PASS':
670              continue
671
672            v = _Unlist(v)
673            if 'machine' in k:
674              json_results[k] = v
675            elif v is not None:
676              if isinstance(v, list):
677                detail_results[k] = [float(d) for d in v]
678              else:
679                detail_results[k] = float(v)
680    return final_results
681
682  def GetReportObject(self):
683    """Generate the JSON report, returning it as a python object."""
684    report_list = self.GetReportObjectIgnoringExperiment()
685    if self.experiment is not None:
686      self._AddExperimentSpecificFields(report_list)
687    return report_list
688
689  def _AddExperimentSpecificFields(self, report_list):
690    """Add experiment-specific data to the JSON report."""
691    board = self.experiment.labels[0].board
692    manager = self.experiment.machine_manager
693    for report in report_list:
694      label_name = report['label']
695      label = _GetElemByName(label_name, self.experiment.labels)
696
697      img_path = os.path.realpath(os.path.expanduser(label.chromeos_image))
698      ver, img = ParseChromeosImage(img_path)
699
700      report.update({
701          'board': board,
702          'chromeos_image': img,
703          'chromeos_version': ver,
704          'chrome_version': label.chrome_version,
705          'compiler': label.compiler
706      })
707
708      if not report['pass']:
709        continue
710      if 'machine_checksum' not in report:
711        report['machine_checksum'] = manager.machine_checksum[label_name]
712      if 'machine_string' not in report:
713        report['machine_string'] = manager.machine_checksum_string[label_name]
714
715  def GetReport(self):
716    """Dump the results of self.GetReportObject() to a string as JSON."""
717    # This exists for consistency with the other GetReport methods.
718    # Specifically, they all return strings, so it's a bit awkward if the JSON
719    # results reporter returns an object.
720    return json.dumps(self.GetReportObject(), **self.json_args)
721