• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""A module to handle the report format."""
5from __future__ import print_function
6
7import datetime
8import functools
9import itertools
10import json
11import os
12import re
13
14from cros_utils.tabulator import AmeanResult
15from cros_utils.tabulator import Cell
16from cros_utils.tabulator import CoeffVarFormat
17from cros_utils.tabulator import CoeffVarResult
18from cros_utils.tabulator import Column
19from cros_utils.tabulator import Format
20from cros_utils.tabulator import GmeanRatioResult
21from cros_utils.tabulator import LiteralResult
22from cros_utils.tabulator import MaxResult
23from cros_utils.tabulator import MinResult
24from cros_utils.tabulator import PValueFormat
25from cros_utils.tabulator import PValueResult
26from cros_utils.tabulator import RatioFormat
27from cros_utils.tabulator import RawResult
28from cros_utils.tabulator import StdResult
29from cros_utils.tabulator import TableFormatter
30from cros_utils.tabulator import TableGenerator
31from cros_utils.tabulator import TablePrinter
32from update_telemetry_defaults import TelemetryDefaults
33
34from column_chart import ColumnChart
35from results_organizer import OrganizeResults
36
37import results_report_templates as templates
38
39
40def ParseChromeosImage(chromeos_image):
41  """Parse the chromeos_image string for the image and version.
42
43  The chromeos_image string will probably be in one of two formats:
44  1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
45     chromiumos_test_image.bin
46  2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \
47      chromiumos_test_image.bin
48
49  We parse these strings to find the 'chromeos_version' to store in the
50  json archive (without the .datatime bit in the first case); and also
51  the 'chromeos_image', which would be all of the first case, but only the
52  part after '/chroot/tmp' in the second case.
53
54  Args:
55      chromeos_image: string containing the path to the chromeos_image that
56      crosperf used for the test.
57
58  Returns:
59      version, image: The results of parsing the input string, as explained
60      above.
61  """
62  # Find the Chromeos Version, e.g. R45-2345.0.0.....
63  # chromeos_image should have been something like:
64  # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
65  if chromeos_image.endswith('/chromiumos_test_image.bin'):
66    full_version = chromeos_image.split('/')[-2]
67    # Strip the date and time off of local builds (which have the format
68    # "R43-2345.0.0.date-and-time").
69    version, _ = os.path.splitext(full_version)
70  else:
71    version = ''
72
73  # Find the chromeos image.  If it's somewhere in .../chroot/tmp/..., then
74  # it's an official image that got downloaded, so chop off the download path
75  # to make the official image name more clear.
76  official_image_path = '/chroot/tmp'
77  if official_image_path in chromeos_image:
78    image = chromeos_image.split(official_image_path, 1)[1]
79  else:
80    image = chromeos_image
81  return version, image
82
83
84def _AppendUntilLengthIs(gen, the_list, target_len):
85  """Appends to `list` until `list` is `target_len` elements long.
86
87  Uses `gen` to generate elements.
88  """
89  the_list.extend(gen() for _ in xrange(target_len - len(the_list)))
90  return the_list
91
92
93def _FilterPerfReport(event_threshold, report):
94  """Filters out entries with `< event_threshold` percent in a perf report."""
95  def filter_dict(m):
96    return {fn_name: pct for fn_name, pct in m.iteritems()
97            if pct >= event_threshold}
98  return {event: filter_dict(m) for event, m in report.iteritems()}
99
100
101class _PerfTable(object):
102  """Generates dicts from a perf table.
103
104  Dicts look like:
105  {'benchmark_name': {'perf_event_name': [LabelData]}}
106  where LabelData is a list of perf dicts, each perf dict coming from the same
107  label.
108  Each perf dict looks like {'function_name': 0.10, ...} (where 0.10 is the
109  percentage of time spent in function_name).
110  """
111
112  def __init__(self, benchmark_names_and_iterations, label_names,
113               read_perf_report, event_threshold=None):
114    """Constructor.
115
116    read_perf_report is a function that takes a label name, benchmark name, and
117    benchmark iteration, and returns a dictionary describing the perf output for
118    that given run.
119    """
120    self.event_threshold = event_threshold
121    self._label_indices = {name: i for i, name in enumerate(label_names)}
122    self.perf_data = {}
123    for label in label_names:
124      for bench_name, bench_iterations in benchmark_names_and_iterations:
125        for i in xrange(bench_iterations):
126          report = read_perf_report(label, bench_name, i)
127          self._ProcessPerfReport(report, label, bench_name, i)
128
129  def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration):
130    """Add the data from one run to the dict."""
131    perf_of_run = perf_report
132    if self.event_threshold is not None:
133      perf_of_run = _FilterPerfReport(self.event_threshold, perf_report)
134    if benchmark_name not in self.perf_data:
135      self.perf_data[benchmark_name] = {event: [] for event in perf_of_run}
136    ben_data = self.perf_data[benchmark_name]
137    label_index = self._label_indices[label]
138    for event in ben_data:
139      _AppendUntilLengthIs(list, ben_data[event], label_index + 1)
140      data_for_label = ben_data[event][label_index]
141      _AppendUntilLengthIs(dict, data_for_label, iteration + 1)
142      data_for_label[iteration] = perf_of_run[event] if perf_of_run else {}
143
144
145def _GetResultsTableHeader(ben_name, iterations):
146  benchmark_info = ('Benchmark:  {0};  Iterations: {1}'
147                    .format(ben_name, iterations))
148  cell = Cell()
149  cell.string_value = benchmark_info
150  cell.header = True
151  return [[cell]]
152
153
154def _ParseColumn(columns, iteration):
155  new_column = []
156  for column in columns:
157    if column.result.__class__.__name__ != 'RawResult':
158      new_column.append(column)
159    else:
160      new_column.extend(Column(LiteralResult(i), Format(), str(i + 1))
161                        for i in xrange(iteration))
162  return new_column
163
164
165def _GetTables(benchmark_results, columns, table_type):
166  iter_counts = benchmark_results.iter_counts
167  result = benchmark_results.run_keyvals
168  tables = []
169  for bench_name, runs in result.iteritems():
170    iterations = iter_counts[bench_name]
171    ben_table = _GetResultsTableHeader(bench_name, iterations)
172
173    all_runs_empty = all(not dict for label in runs for dict in label)
174    if all_runs_empty:
175      cell = Cell()
176      cell.string_value = ('This benchmark contains no result.'
177                           ' Is the benchmark name valid?')
178      cell_table = [[cell]]
179    else:
180      table = TableGenerator(runs, benchmark_results.label_names).GetTable()
181      parsed_columns = _ParseColumn(columns, iterations)
182      tf = TableFormatter(table, parsed_columns)
183      cell_table = tf.GetCellTable(table_type)
184    tables.append(ben_table)
185    tables.append(cell_table)
186  return tables
187
188
189def _GetPerfTables(benchmark_results, columns, table_type):
190  p_table = _PerfTable(benchmark_results.benchmark_names_and_iterations,
191                       benchmark_results.label_names,
192                       benchmark_results.read_perf_report)
193
194  tables = []
195  for benchmark in p_table.perf_data:
196    iterations = benchmark_results.iter_counts[benchmark]
197    ben_table = _GetResultsTableHeader(benchmark, iterations)
198    tables.append(ben_table)
199    benchmark_data = p_table.perf_data[benchmark]
200    table = []
201    for event in benchmark_data:
202      tg = TableGenerator(benchmark_data[event],
203                          benchmark_results.label_names,
204                          sort=TableGenerator.SORT_BY_VALUES_DESC)
205      table = tg.GetTable(ResultsReport.PERF_ROWS)
206      parsed_columns = _ParseColumn(columns, iterations)
207      tf = TableFormatter(table, parsed_columns)
208      tf.GenerateCellTable(table_type)
209      tf.AddColumnName()
210      tf.AddLabelName()
211      tf.AddHeader(str(event))
212      table = tf.GetCellTable(table_type, headers=False)
213      tables.append(table)
214  return tables
215
216
217class ResultsReport(object):
218  """Class to handle the report format."""
219  MAX_COLOR_CODE = 255
220  PERF_ROWS = 5
221
222  def __init__(self, results):
223    self.benchmark_results = results
224
225  def _GetTablesWithColumns(self, columns, table_type, perf):
226    get_tables = _GetPerfTables if perf else _GetTables
227    return get_tables(self.benchmark_results, columns, table_type)
228
229  def GetFullTables(self, perf=False):
230    columns = [Column(RawResult(), Format()),
231               Column(MinResult(), Format()),
232               Column(MaxResult(), Format()),
233               Column(AmeanResult(), Format()),
234               Column(StdResult(), Format(), 'StdDev'),
235               Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
236               Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
237               Column(PValueResult(), PValueFormat(), 'p-value')]
238    return self._GetTablesWithColumns(columns, 'full', perf)
239
240  def GetSummaryTables(self, perf=False):
241    columns = [Column(AmeanResult(), Format()),
242               Column(StdResult(), Format(), 'StdDev'),
243               Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
244               Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
245               Column(PValueResult(), PValueFormat(), 'p-value')]
246    return self._GetTablesWithColumns(columns, 'summary', perf)
247
248
249def _PrintTable(tables, out_to):
250  # tables may be None.
251  if not tables:
252    return ''
253
254  if out_to == 'HTML':
255    out_type = TablePrinter.HTML
256  elif out_to == 'PLAIN':
257    out_type = TablePrinter.PLAIN
258  elif out_to == 'CONSOLE':
259    out_type = TablePrinter.CONSOLE
260  elif out_to == 'TSV':
261    out_type = TablePrinter.TSV
262  elif out_to == 'EMAIL':
263    out_type = TablePrinter.EMAIL
264  else:
265    raise ValueError('Invalid out_to value: %s' % (out_to,))
266
267  printers = (TablePrinter(table, out_type) for table in tables)
268  return ''.join(printer.Print() for printer in printers)
269
270
271class TextResultsReport(ResultsReport):
272  """Class to generate text result report."""
273
274  H1_STR = '==========================================='
275  H2_STR = '-------------------------------------------'
276
277  def __init__(self, results, email=False, experiment=None):
278    super(TextResultsReport, self).__init__(results)
279    self.email = email
280    self.experiment = experiment
281
282  @staticmethod
283  def _MakeTitle(title):
284    header_line = TextResultsReport.H1_STR
285    # '' at the end gives one newline.
286    return '\n'.join([header_line, title, header_line, ''])
287
288  @staticmethod
289  def _MakeSection(title, body):
290    header_line = TextResultsReport.H2_STR
291    # '\n' at the end gives us two newlines.
292    return '\n'.join([header_line, title, header_line, body, '\n'])
293
294  @staticmethod
295  def FromExperiment(experiment, email=False):
296    results = BenchmarkResults.FromExperiment(experiment)
297    return TextResultsReport(results, email, experiment)
298
299  def GetStatusTable(self):
300    """Generate the status table by the tabulator."""
301    table = [['', '']]
302    columns = [Column(LiteralResult(iteration=0), Format(), 'Status'),
303               Column(LiteralResult(iteration=1), Format(), 'Failing Reason')]
304
305    for benchmark_run in self.experiment.benchmark_runs:
306      status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
307                                     benchmark_run.failure_reason]]
308      table.append(status)
309    cell_table = TableFormatter(table, columns).GetCellTable('status')
310    return [cell_table]
311
312  def GetReport(self):
313    """Generate the report for email and console."""
314    output_type = 'EMAIL' if self.email else 'CONSOLE'
315    experiment = self.experiment
316
317    sections = []
318    if experiment is not None:
319      title_contents = "Results report for '%s'" % (experiment.name, )
320    else:
321      title_contents = 'Results report'
322    sections.append(self._MakeTitle(title_contents))
323
324    summary_table = _PrintTable(self.GetSummaryTables(perf=False), output_type)
325    sections.append(self._MakeSection('Summary', summary_table))
326
327    if experiment is not None:
328      table = _PrintTable(self.GetStatusTable(), output_type)
329      sections.append(self._MakeSection('Benchmark Run Status', table))
330
331    perf_table = _PrintTable(self.GetSummaryTables(perf=True), output_type)
332    if perf_table:
333      sections.append(self._MakeSection('Perf Data', perf_table))
334
335    if experiment is not None:
336      experiment_file = experiment.experiment_file
337      sections.append(self._MakeSection('Experiment File', experiment_file))
338
339      cpu_info = experiment.machine_manager.GetAllCPUInfo(experiment.labels)
340      sections.append(self._MakeSection('CPUInfo', cpu_info))
341
342    return '\n'.join(sections)
343
344
345def _GetHTMLCharts(label_names, test_results):
346  charts = []
347  for item, runs in test_results.iteritems():
348    # Fun fact: label_names is actually *entirely* useless as a param, since we
349    # never add headers. We still need to pass it anyway.
350    table = TableGenerator(runs, label_names).GetTable()
351    columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
352               Column(MaxResult(), Format())]
353    tf = TableFormatter(table, columns)
354    data_table = tf.GetCellTable('full', headers=False)
355
356    for cur_row_data in data_table:
357      test_key = cur_row_data[0].string_value
358      title = '{0}: {1}'.format(item, test_key.replace('/', ''))
359      chart = ColumnChart(title, 300, 200)
360      chart.AddColumn('Label', 'string')
361      chart.AddColumn('Average', 'number')
362      chart.AddColumn('Min', 'number')
363      chart.AddColumn('Max', 'number')
364      chart.AddSeries('Min', 'line', 'black')
365      chart.AddSeries('Max', 'line', 'black')
366      cur_index = 1
367      for label in label_names:
368        chart.AddRow([label,
369                      cur_row_data[cur_index].value,
370                      cur_row_data[cur_index + 1].value,
371                      cur_row_data[cur_index + 2].value])
372        if isinstance(cur_row_data[cur_index].value, str):
373          chart = None
374          break
375        cur_index += 3
376      if chart:
377        charts.append(chart)
378  return charts
379
380
381class HTMLResultsReport(ResultsReport):
382  """Class to generate html result report."""
383
384  def __init__(self, benchmark_results, experiment=None):
385    super(HTMLResultsReport, self).__init__(benchmark_results)
386    self.experiment = experiment
387
388  @staticmethod
389  def FromExperiment(experiment):
390    return HTMLResultsReport(BenchmarkResults.FromExperiment(experiment),
391                             experiment=experiment)
392
393  def GetReport(self):
394    label_names = self.benchmark_results.label_names
395    test_results = self.benchmark_results.run_keyvals
396    charts = _GetHTMLCharts(label_names, test_results)
397    chart_javascript = ''.join(chart.GetJavascript() for chart in charts)
398    chart_divs = ''.join(chart.GetDiv() for chart in charts)
399
400    summary_table = self.GetSummaryTables()
401    full_table = self.GetFullTables()
402    perf_table = self.GetSummaryTables(perf=True)
403    experiment_file = ''
404    if self.experiment is not None:
405      experiment_file = self.experiment.experiment_file
406    # Use kwargs for sanity, and so that testing is a bit easier.
407    return templates.GenerateHTMLPage(perf_table=perf_table,
408                                      chart_js=chart_javascript,
409                                      summary_table=summary_table,
410                                      print_table=_PrintTable,
411                                      chart_divs=chart_divs,
412                                      full_table=full_table,
413                                      experiment_file=experiment_file)
414
415
416def ParseStandardPerfReport(report_data):
417  """Parses the output of `perf report`.
418
419  It'll parse the following:
420  {{garbage}}
421  # Samples: 1234M of event 'foo'
422
423  1.23% command shared_object location function::name
424
425  1.22% command shared_object location function2::name
426
427  # Samples: 999K of event 'bar'
428
429  0.23% command shared_object location function3::name
430  {{etc.}}
431
432  Into:
433    {'foo': {'function::name': 1.23, 'function2::name': 1.22},
434     'bar': {'function3::name': 0.23, etc.}}
435  """
436  # This function fails silently on its if it's handed a string (as opposed to a
437  # list of lines). So, auto-split if we do happen to get a string.
438  if isinstance(report_data, basestring):
439    report_data = report_data.splitlines()
440
441  # Samples: N{K,M,G} of event 'event-name'
442  samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'")
443
444  # We expect lines like:
445  # N.NN%  command  samples  shared_object  [location] symbol
446  #
447  # Note that we're looking at stripped lines, so there is no space at the
448  # start.
449  perf_regex = re.compile(r'^(\d+(?:.\d*)?)%' # N.NN%
450                          r'\s*\d+' # samples count (ignored)
451                          r'\s*\S+' # command (ignored)
452                          r'\s*\S+' # shared_object (ignored)
453                          r'\s*\[.\]' # location (ignored)
454                          r'\s*(\S.+)' # function
455                         )
456
457  stripped_lines = (l.strip() for l in report_data)
458  nonempty_lines = (l for l in stripped_lines if l)
459  # Ignore all lines before we see samples_regex
460  interesting_lines = itertools.dropwhile(lambda x: not samples_regex.match(x),
461                                          nonempty_lines)
462
463  first_sample_line = next(interesting_lines, None)
464  # Went through the entire file without finding a 'samples' header. Quit.
465  if first_sample_line is None:
466    return {}
467
468  sample_name = samples_regex.match(first_sample_line).group(1)
469  current_result = {}
470  results = {sample_name: current_result}
471  for line in interesting_lines:
472    samples_match = samples_regex.match(line)
473    if samples_match:
474      sample_name = samples_match.group(1)
475      current_result = {}
476      results[sample_name] = current_result
477      continue
478
479    match = perf_regex.match(line)
480    if not match:
481      continue
482    percentage_str, func_name = match.groups()
483    try:
484      percentage = float(percentage_str)
485    except ValueError:
486      # Couldn't parse it; try to be "resilient".
487      continue
488    current_result[func_name] = percentage
489  return results
490
491
492def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name,
493                              benchmark_iteration):
494  """Reads a perf report for the given benchmark. Returns {} on failure.
495
496  The result should be a map of maps; it should look like:
497  {perf_event_name: {function_name: pct_time_spent}}, e.g.
498  {'cpu_cycles': {'_malloc': 10.0, '_free': 0.3, ...}}
499  """
500  raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
501  dir_name = ''.join(c for c in raw_dir_name if c.isalnum())
502  file_name = os.path.join(results_directory, dir_name, 'perf.data.report.0')
503  try:
504    with open(file_name) as in_file:
505      return ParseStandardPerfReport(in_file)
506  except IOError:
507    # Yes, we swallow any IO-related errors.
508    return {}
509
510
511# Split out so that testing (specifically: mocking) is easier
512def _ExperimentToKeyvals(experiment, for_json_report):
513  """Converts an experiment to keyvals."""
514  return OrganizeResults(experiment.benchmark_runs, experiment.labels,
515                         json_report=for_json_report)
516
517
518class BenchmarkResults(object):
519  """The minimum set of fields that any ResultsReport will take."""
520  def __init__(self, label_names, benchmark_names_and_iterations, run_keyvals,
521               read_perf_report=None):
522    if read_perf_report is None:
523      def _NoPerfReport(*_args, **_kwargs):
524        return {}
525      read_perf_report = _NoPerfReport
526
527    self.label_names = label_names
528    self.benchmark_names_and_iterations = benchmark_names_and_iterations
529    self.iter_counts = dict(benchmark_names_and_iterations)
530    self.run_keyvals = run_keyvals
531    self.read_perf_report = read_perf_report
532
533  @staticmethod
534  def FromExperiment(experiment, for_json_report=False):
535    label_names = [label.name for label in experiment.labels]
536    benchmark_names_and_iterations = [(benchmark.name, benchmark.iterations)
537                                      for benchmark in experiment.benchmarks]
538    run_keyvals = _ExperimentToKeyvals(experiment, for_json_report)
539    read_perf_report = functools.partial(_ReadExperimentPerfReport,
540                                         experiment.results_directory)
541    return BenchmarkResults(label_names, benchmark_names_and_iterations,
542                            run_keyvals, read_perf_report)
543
544
545def _GetElemByName(name, from_list):
546  """Gets an element from the given list by its name field.
547
548  Raises an error if it doesn't find exactly one match.
549  """
550  elems = [e for e in from_list if e.name == name]
551  if len(elems) != 1:
552    raise ValueError('Expected 1 item named %s, found %d' % (name, len(elems)))
553  return elems[0]
554
555
556def _Unlist(l):
557  """If l is a list, extracts the first element of l. Otherwise, returns l."""
558  return l[0] if isinstance(l, list) else l
559
560class JSONResultsReport(ResultsReport):
561  """Class that generates JSON reports for experiments."""
562
563  def __init__(self, benchmark_results, date=None, time=None, experiment=None,
564               json_args=None):
565    """Construct a JSONResultsReport.
566
567    json_args is the dict of arguments we pass to json.dumps in GetReport().
568    """
569    super(JSONResultsReport, self).__init__(benchmark_results)
570
571    defaults = TelemetryDefaults()
572    defaults.ReadDefaultsFile()
573    summary_field_defaults = defaults.GetDefault()
574    if summary_field_defaults is None:
575      summary_field_defaults = {}
576    self.summary_field_defaults = summary_field_defaults
577
578    if json_args is None:
579      json_args = {}
580    self.json_args = json_args
581
582    self.experiment = experiment
583    if not date:
584      timestamp = datetime.datetime.strftime(datetime.datetime.now(),
585                                             '%Y-%m-%d %H:%M:%S')
586      date, time = timestamp.split(' ')
587    self.date = date
588    self.time = time
589
590  @staticmethod
591  def FromExperiment(experiment, date=None, time=None, json_args=None):
592    benchmark_results = BenchmarkResults.FromExperiment(experiment,
593                                                        for_json_report=True)
594    return JSONResultsReport(benchmark_results, date, time, experiment,
595                             json_args)
596
597  def GetReportObjectIgnoringExperiment(self):
598    """Gets the JSON report object specifically for the output data.
599
600    Ignores any experiment-specific fields (e.g. board, machine checksum, ...).
601    """
602    benchmark_results = self.benchmark_results
603    label_names = benchmark_results.label_names
604    summary_field_defaults = self.summary_field_defaults
605    final_results = []
606    for test, test_results in benchmark_results.run_keyvals.iteritems():
607      for label_name, label_results in zip(label_names, test_results):
608        for iter_results in label_results:
609          passed = iter_results.get('retval') == 0
610          json_results = {
611              'date': self.date,
612              'time': self.time,
613              'label': label_name,
614              'test_name': test,
615              'pass': passed,
616          }
617          final_results.append(json_results)
618
619          if not passed:
620            continue
621
622          # Get overall results.
623          summary_fields = summary_field_defaults.get(test)
624          if summary_fields is not None:
625            value = []
626            json_results['overall_result'] = value
627            for f in summary_fields:
628              v = iter_results.get(f)
629              if v is None:
630                continue
631              # New telemetry results format: sometimes we get a list of lists
632              # now.
633              v = _Unlist(_Unlist(v))
634              value.append((f, float(v)))
635
636          # Get detailed results.
637          detail_results = {}
638          json_results['detailed_results'] = detail_results
639          for k, v in iter_results.iteritems():
640            if k == 'retval' or k == 'PASS' or k == ['PASS'] or v == 'PASS':
641              continue
642
643            v = _Unlist(v)
644            if 'machine' in k:
645              json_results[k] = v
646            elif v is not None:
647              if isinstance(v, list):
648                detail_results[k] = [float(d) for d in v]
649              else:
650                detail_results[k] = float(v)
651    return final_results
652
653  def GetReportObject(self):
654    """Generate the JSON report, returning it as a python object."""
655    report_list = self.GetReportObjectIgnoringExperiment()
656    if self.experiment is not None:
657      self._AddExperimentSpecificFields(report_list)
658    return report_list
659
660  def _AddExperimentSpecificFields(self, report_list):
661    """Add experiment-specific data to the JSON report."""
662    board = self.experiment.labels[0].board
663    manager = self.experiment.machine_manager
664    for report in report_list:
665      label_name = report['label']
666      label = _GetElemByName(label_name, self.experiment.labels)
667
668      img_path = os.path.realpath(os.path.expanduser(label.chromeos_image))
669      ver, img = ParseChromeosImage(img_path)
670
671      report.update({
672          'board': board,
673          'chromeos_image': img,
674          'chromeos_version': ver,
675          'chrome_version': label.chrome_version,
676          'compiler': label.compiler
677      })
678
679      if not report['pass']:
680        continue
681      if 'machine_checksum' not in report:
682        report['machine_checksum'] = manager.machine_checksum[label_name]
683      if 'machine_string' not in report:
684        report['machine_string'] = manager.machine_checksum_string[label_name]
685
686  def GetReport(self):
687    """Dump the results of self.GetReportObject() to a string as JSON."""
688    # This exists for consistency with the other GetReport methods.
689    # Specifically, they all return strings, so it's a bit awkward if the JSON
690    # results reporter returns an object.
691    return json.dumps(self.GetReportObject(), **self.json_args)
692