• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python2
2#
3# Copyright 2016 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6"""Given a specially-formatted JSON object, generates results report(s).
7
8The JSON object should look like:
9{"data": BenchmarkData, "platforms": BenchmarkPlatforms}
10
11BenchmarkPlatforms is a [str], each of which names a platform the benchmark
12  was run on (e.g. peppy, shamu, ...). Note that the order of this list is
13  related with the order of items in BenchmarkData.
14
15BenchmarkData is a {str: [PlatformData]}. The str is the name of the benchmark,
16and a PlatformData is a set of data for a given platform. There must be one
17PlatformData for each benchmark, for each element in BenchmarkPlatforms.
18
19A PlatformData is a [{str: float}], where each str names a metric we recorded,
20and the float is the value for that metric. Each element is considered to be
21the metrics collected from an independent run of this benchmark. NOTE: Each
22PlatformData is expected to have a "retval" key, with the return value of
23the benchmark. If the benchmark is successful, said return value should be 0.
24Otherwise, this will break some of our JSON functionality.
25
26Putting it all together, a JSON object will end up looking like:
27  { "platforms": ["peppy", "peppy-new-crosstool"],
28    "data": {
29      "bench_draw_line": [
30        [{"time (ms)": 1.321, "memory (mb)": 128.1, "retval": 0},
31         {"time (ms)": 1.920, "memory (mb)": 128.4, "retval": 0}],
32        [{"time (ms)": 1.221, "memory (mb)": 124.3, "retval": 0},
33         {"time (ms)": 1.423, "memory (mb)": 123.9, "retval": 0}]
34      ]
35    }
36  }
37
38Which says that we ran a benchmark on platforms named peppy, and
39  peppy-new-crosstool.
40We ran one benchmark, named bench_draw_line.
41It was run twice on each platform.
42Peppy's runs took 1.321ms and 1.920ms, while peppy-new-crosstool's took 1.221ms
43  and 1.423ms. None of the runs failed to complete.
44"""
45
46from __future__ import division
47from __future__ import print_function
48
49import argparse
50import functools
51import json
52import os
53import sys
54import traceback
55
56from results_report import BenchmarkResults
57from results_report import HTMLResultsReport
58from results_report import JSONResultsReport
59from results_report import TextResultsReport
60
61
62def CountBenchmarks(benchmark_runs):
63  """Counts the number of iterations for each benchmark in benchmark_runs."""
64
65  # Example input for benchmark_runs:
66  # {"bench": [[run1, run2, run3], [run1, run2, run3, run4]]}
67  def _MaxLen(results):
68    return 0 if not results else max(len(r) for r in results)
69
70  return [(name, _MaxLen(results))
71          for name, results in benchmark_runs.iteritems()]
72
73
74def CutResultsInPlace(results, max_keys=50, complain_on_update=True):
75  """Limits the given benchmark results to max_keys keys in-place.
76
77  This takes the `data` field from the benchmark input, and mutates each
78  benchmark run to contain `max_keys` elements (ignoring special elements, like
79  "retval"). At the moment, it just selects the first `max_keys` keyvals,
80  alphabetically.
81
82  If complain_on_update is true, this will print a message noting that a
83  truncation occurred.
84
85  This returns the `results` object that was passed in, for convenience.
86
87  e.g.
88  >>> benchmark_data = {
89  ...   "bench_draw_line": [
90  ...     [{"time (ms)": 1.321, "memory (mb)": 128.1, "retval": 0},
91  ...      {"time (ms)": 1.920, "memory (mb)": 128.4, "retval": 0}],
92  ...     [{"time (ms)": 1.221, "memory (mb)": 124.3, "retval": 0},
93  ...      {"time (ms)": 1.423, "memory (mb)": 123.9, "retval": 0}]
94  ...   ]
95  ... }
96  >>> CutResultsInPlace(benchmark_data, max_keys=1, complain_on_update=False)
97  {
98    'bench_draw_line': [
99      [{'memory (mb)': 128.1, 'retval': 0},
100       {'memory (mb)': 128.4, 'retval': 0}],
101      [{'memory (mb)': 124.3, 'retval': 0},
102       {'memory (mb)': 123.9, 'retval': 0}]
103    ]
104  }
105  """
106  actually_updated = False
107  for bench_results in results.itervalues():
108    for platform_results in bench_results:
109      for i, result in enumerate(platform_results):
110        # Keep the keys that come earliest when sorted alphabetically.
111        # Forcing alphabetical order is arbitrary, but necessary; otherwise,
112        # the keyvals we'd emit would depend on our iteration order through a
113        # map.
114        removable_keys = sorted(k for k in result if k != 'retval')
115        retained_keys = removable_keys[:max_keys]
116        platform_results[i] = {k: result[k] for k in retained_keys}
117        # retval needs to be passed through all of the time.
118        retval = result.get('retval')
119        if retval is not None:
120          platform_results[i]['retval'] = retval
121        actually_updated = actually_updated or \
122          len(retained_keys) != len(removable_keys)
123
124  if actually_updated and complain_on_update:
125    print(
126        'Warning: Some benchmark keyvals have been truncated.', file=sys.stderr)
127  return results
128
129
130def _ConvertToASCII(obj):
131  """Convert an object loaded from JSON to ASCII; JSON gives us unicode."""
132
133  # Using something like `object_hook` is insufficient, since it only fires on
134  # actual JSON objects. `encoding` fails, too, since the default decoder always
135  # uses unicode() to decode strings.
136  if isinstance(obj, unicode):
137    return str(obj)
138  if isinstance(obj, dict):
139    return {_ConvertToASCII(k): _ConvertToASCII(v) for k, v in obj.iteritems()}
140  if isinstance(obj, list):
141    return [_ConvertToASCII(v) for v in obj]
142  return obj
143
144
145def _PositiveInt(s):
146  i = int(s)
147  if i < 0:
148    raise argparse.ArgumentTypeError('%d is not a positive integer.' % (i,))
149  return i
150
151
152def _AccumulateActions(args):
153  """Given program arguments, determines what actions we want to run.
154
155  Returns [(ResultsReportCtor, str)], where ResultsReportCtor can construct a
156  ResultsReport, and the str is the file extension for the given report.
157  """
158  results = []
159  # The order of these is arbitrary.
160  if args.json:
161    results.append((JSONResultsReport, 'json'))
162  if args.text:
163    results.append((TextResultsReport, 'txt'))
164  if args.email:
165    email_ctor = functools.partial(TextResultsReport, email=True)
166    results.append((email_ctor, 'email'))
167  # We emit HTML if nothing else was specified.
168  if args.html or not results:
169    results.append((HTMLResultsReport, 'html'))
170  return results
171
172
173# Note: get_contents is a function, because it may be expensive (generating some
174# HTML reports takes O(seconds) on my machine, depending on the size of the
175# input data).
176def WriteFile(output_prefix, extension, get_contents, overwrite, verbose):
177  """Writes `contents` to a file named "${output_prefix}.${extension}".
178
179  get_contents should be a zero-args function that returns a string (of the
180  contents to write).
181  If output_prefix == '-', this writes to stdout.
182  If overwrite is False, this will not overwrite files.
183  """
184  if output_prefix == '-':
185    if verbose:
186      print('Writing %s report to stdout' % (extension,), file=sys.stderr)
187    sys.stdout.write(get_contents())
188    return
189
190  file_name = '%s.%s' % (output_prefix, extension)
191  if not overwrite and os.path.exists(file_name):
192    raise IOError('Refusing to write %s -- it already exists' % (file_name,))
193
194  with open(file_name, 'w') as out_file:
195    if verbose:
196      print('Writing %s report to %s' % (extension, file_name), file=sys.stderr)
197    out_file.write(get_contents())
198
199
200def RunActions(actions, benchmark_results, output_prefix, overwrite, verbose):
201  """Runs `actions`, returning True if all succeeded."""
202  failed = False
203
204  report_ctor = None  # Make the linter happy
205  for report_ctor, extension in actions:
206    try:
207      get_contents = lambda: report_ctor(benchmark_results).GetReport()
208      WriteFile(output_prefix, extension, get_contents, overwrite, verbose)
209    except Exception:
210      # Complain and move along; we may have more actions that might complete
211      # successfully.
212      failed = True
213      traceback.print_exc()
214  return not failed
215
216
217def PickInputFile(input_name):
218  """Given program arguments, returns file to read for benchmark input."""
219  return sys.stdin if input_name == '-' else open(input_name)
220
221
222def _NoPerfReport(_label_name, _benchmark_name, _benchmark_iteration):
223  return {}
224
225
226def _ParseArgs(argv):
227  parser = argparse.ArgumentParser(description='Turns JSON into results '
228                                   'report(s).')
229  parser.add_argument(
230      '-v',
231      '--verbose',
232      action='store_true',
233      help='Be a tiny bit more verbose.')
234  parser.add_argument(
235      '-f',
236      '--force',
237      action='store_true',
238      help='Overwrite existing results files.')
239  parser.add_argument(
240      '-o',
241      '--output',
242      default='report',
243      type=str,
244      help='Prefix of the output filename (default: report). '
245      '- means stdout.')
246  parser.add_argument(
247      '-i',
248      '--input',
249      required=True,
250      type=str,
251      help='Where to read the JSON from. - means stdin.')
252  parser.add_argument(
253      '-l',
254      '--statistic-limit',
255      default=0,
256      type=_PositiveInt,
257      help='The maximum number of benchmark statistics to '
258      'display from a single run. 0 implies unlimited.')
259  parser.add_argument(
260      '--json', action='store_true', help='Output a JSON report.')
261  parser.add_argument(
262      '--text', action='store_true', help='Output a text report.')
263  parser.add_argument(
264      '--email',
265      action='store_true',
266      help='Output a text report suitable for email.')
267  parser.add_argument(
268      '--html',
269      action='store_true',
270      help='Output an HTML report (this is the default if no '
271      'other output format is specified).')
272  return parser.parse_args(argv)
273
274
275def Main(argv):
276  args = _ParseArgs(argv)
277  # JSON likes to load UTF-8; our results reporter *really* doesn't like
278  # UTF-8.
279  with PickInputFile(args.input) as in_file:
280    raw_results = _ConvertToASCII(json.load(in_file))
281
282  platform_names = raw_results['platforms']
283  results = raw_results['data']
284  if args.statistic_limit:
285    results = CutResultsInPlace(results, max_keys=args.statistic_limit)
286  benches = CountBenchmarks(results)
287  # In crosperf, a label is essentially a platform+configuration. So, a name of
288  # a label and a name of a platform are equivalent for our purposes.
289  bench_results = BenchmarkResults(
290      label_names=platform_names,
291      benchmark_names_and_iterations=benches,
292      run_keyvals=results,
293      read_perf_report=_NoPerfReport)
294  actions = _AccumulateActions(args)
295  ok = RunActions(actions, bench_results, args.output, args.force, args.verbose)
296  return 0 if ok else 1
297
298
299if __name__ == '__main__':
300  sys.exit(Main(sys.argv[1:]))
301