• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python2
2#
3# Copyright 2017 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7# pylint: disable=cros-logging-import
8
9"""Transforms skia benchmark results to ones that crosperf can understand."""
10
11from __future__ import print_function
12
13import itertools
14import logging
15import json
16import sys
17
18# Turn the logging level to INFO before importing other autotest
19# code, to avoid having failed import logging messages confuse the
20# test_droid user.
21logging.basicConfig(level=logging.INFO)
22
23# All of the results we care about, by name.
24# Each of these *must* end in _ns, _us, _ms, or _s, since all the metrics we
25# collect (so far) are related to time, and we alter the results based on the
26# suffix of these strings (so we don't have 0.000421ms per sample, for example)
27_RESULT_RENAMES = {
28    'memset32_100000_640_480_nonrendering': 'memset_time_ms',
29    'path_equality_50%_640_480_nonrendering': 'path_equality_ns',
30    'sort_qsort_backward_640_480_nonrendering': 'qsort_us'
31}
32
33
34def _GetFamiliarName(name):
35  r = _RESULT_RENAMES[name]
36  return r if r else name
37
38
39def _IsResultInteresting(name):
40  return name in _RESULT_RENAMES
41
42
43def _GetTimeMultiplier(label_name):
44  """Given a time (in milliseconds), normalize it to what label_name expects.
45
46  "What label_name expects" meaning "we pattern match against the last few
47  non-space chars in label_name."
48
49  This expects the time unit to be separated from anything else by '_'.
50  """
51  ms_mul = 1000 * 1000.
52  endings = [('_ns', 1), ('_us', 1000), ('_ms', ms_mul), ('_s', ms_mul * 1000)]
53  for end, mul in endings:
54    if label_name.endswith(end):
55      return ms_mul / mul
56  raise ValueError('Unknown ending in "%s"; expecting one of %s' %
57                   (label_name, [end for end, _ in endings]))
58
59
60def _GetTimeDenom(ms):
61  """Express times in a common time unit.
62
63  Given a list of times (in milliseconds), find a time unit in which
64  they can all be expressed.
65
66  Returns the unit name, and `ms` normalized to that time unit.
67
68  >>> _GetTimeDenom([1, 2, 3])
69  ('ms', [1.0, 2.0, 3.0])
70  >>> _GetTimeDenom([.1, .2, .3])
71  ('us', [100.0, 200.0, 300.0])
72  """
73
74  ms_mul = 1000 * 1000
75  units = [('us', 1000), ('ms', ms_mul), ('s', ms_mul * 1000)]
76  for name, mul in reversed(units):
77    normalized = [float(t) * ms_mul / mul for t in ms]
78    average = sum(normalized) / len(normalized)
79    if all(n > 0.1 for n in normalized) and average >= 1:
80      return name, normalized
81
82  normalized = [float(t) * ms_mul for t in ms]
83  return 'ns', normalized
84
85
86def _TransformBenchmarks(raw_benchmarks):
87  # We get {"results": {"bench_name": Results}}
88  # where
89  #   Results = {"config_name": {"samples": [float], etc.}}
90  #
91  # We want {"data": {"skia": [[BenchmarkData]]},
92  #          "platforms": ["platform1, ..."]}
93  # where
94  #   BenchmarkData = {"bench_name": bench_samples[N], ..., "retval": 0}
95  #
96  # Note that retval is awkward -- crosperf's JSON reporter reports the result
97  # as a failure if it's not there. Everything else treats it like a
98  # statistic...
99  benchmarks = raw_benchmarks['results']
100  results = []
101  for bench_name, bench_result in benchmarks.items():
102    try:
103      for cfg_name, keyvals in bench_result.items():
104        # Some benchmarks won't have timing data (either it won't exist at all,
105        # or it'll be empty); skip them.
106        samples = keyvals.get('samples')
107        if not samples:
108          continue
109
110        bench_name = '%s_%s' % (bench_name, cfg_name)
111        if not _IsResultInteresting(bench_name):
112          continue
113
114        friendly_name = _GetFamiliarName(bench_name)
115        if len(results) < len(samples):
116          results.extend(
117              {'retval': 0} for _ in range(len(samples) - len(results)))
118
119        time_mul = _GetTimeMultiplier(friendly_name)
120        for sample, app in itertools.zip(samples, results):
121          assert friendly_name not in app
122          app[friendly_name] = sample * time_mul
123    except (KeyError, ValueError) as e:
124      logging.error('While converting "%s" (key: %s): %s', bench_result,
125                    bench_name, e)
126      raise
127
128  # Realistically, [results] should be multiple results, where each entry in the
129  # list is the result for a different label. Because we only deal with one
130  # label at the moment, we need to wrap it in its own list.
131  return results
132
133
134if __name__ == '__main__':
135
136  def _GetUserFile(argv):
137    if not argv or argv[0] == '-':
138      return sys.stdin
139    return open(argv[0])
140
141  def _Main():
142    with _GetUserFile(sys.argv[1:]) as in_file:
143      obj = json.load(in_file)
144    output = _TransformBenchmarks(obj)
145    json.dump(output, sys.stdout)
146
147  _Main()
148