• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""Parse data from benchmark_runs for tabulator."""
5
6from __future__ import print_function
7
8import errno
9import json
10import os
11import re
12import sys
13
14from cros_utils import misc
15
16_TELEMETRY_RESULT_DEFAULTS_FILE = 'default-telemetry-results.json'
17_DUP_KEY_REGEX = re.compile(r'(\w+)\{(\d+)\}')
18
19
20def _AdjustIteration(benchmarks, max_dup, bench):
21  """Adjust the interation numbers if they have keys like ABCD{i}."""
22  for benchmark in benchmarks:
23    if benchmark.name != bench or benchmark.iteration_adjusted:
24      continue
25    benchmark.iteration_adjusted = True
26    benchmark.iterations *= (max_dup + 1)
27
28
29def _GetMaxDup(data):
30  """Find the maximum i inside ABCD{i}.
31
32  data should be a [[[Key]]], where Key is a string that may look like
33  ABCD{i}.
34  """
35  max_dup = 0
36  for label in data:
37    for run in label:
38      for key in run:
39        match = _DUP_KEY_REGEX.match(key)
40        if match:
41          max_dup = max(max_dup, int(match.group(2)))
42  return max_dup
43
44
45def _Repeat(func, times):
46  """Returns the result of running func() n times."""
47  return [func() for _ in xrange(times)]
48
49
50def _DictWithReturnValues(retval, pass_fail):
51  """Create a new dictionary pre-populated with success/fail values."""
52  new_dict = {}
53  # Note: 0 is a valid retval; test to make sure it's not None.
54  if retval is not None:
55    new_dict['retval'] = retval
56  if pass_fail:
57    new_dict[''] = pass_fail
58  return new_dict
59
60
61def _GetNonDupLabel(max_dup, runs):
62  """Create new list for the runs of the same label.
63
64  Specifically, this will split out keys like foo{0}, foo{1} from one run into
65  their own runs. For example, given a run like:
66    {"foo": 1, "bar{0}": 2, "baz": 3, "qux{1}": 4, "pirate{0}": 5}
67
68  You'll get:
69    [{"foo": 1, "baz": 3}, {"bar": 2, "pirate": 5}, {"qux": 4}]
70
71  Hands back the lists of transformed runs, all concatenated together.
72  """
73  new_runs = []
74  for run in runs:
75    run_retval = run.get('retval', None)
76    run_pass_fail = run.get('', None)
77    new_run = {}
78    # pylint: disable=cell-var-from-loop
79    added_runs = _Repeat(
80        lambda: _DictWithReturnValues(run_retval, run_pass_fail), max_dup)
81    for key, value in run.iteritems():
82      match = _DUP_KEY_REGEX.match(key)
83      if not match:
84        new_run[key] = value
85      else:
86        new_key, index_str = match.groups()
87        added_runs[int(index_str) - 1][new_key] = str(value)
88    new_runs.append(new_run)
89    new_runs += added_runs
90  return new_runs
91
92
93def _DuplicatePass(result, benchmarks):
94  """Properly expands keys like `foo{1}` in `result`."""
95  for bench, data in result.iteritems():
96    max_dup = _GetMaxDup(data)
97    # If there's nothing to expand, there's nothing to do.
98    if not max_dup:
99      continue
100    for i, runs in enumerate(data):
101      data[i] = _GetNonDupLabel(max_dup, runs)
102    _AdjustIteration(benchmarks, max_dup, bench)
103
104
105def _ReadSummaryFile(filename):
106  """Reads the summary file at filename."""
107  dirname, _ = misc.GetRoot(filename)
108  fullname = os.path.join(dirname, _TELEMETRY_RESULT_DEFAULTS_FILE)
109  try:
110    # Slurp the summary file into a dictionary. The keys in the dictionary are
111    # the benchmark names. The value for a key is a list containing the names
112    # of all the result fields that should be returned in a 'default' report.
113    with open(fullname) as in_file:
114      return json.load(in_file)
115  except IOError as e:
116    # ENOENT means "no such file or directory"
117    if e.errno == errno.ENOENT:
118      return {}
119    raise
120
121
122def _MakeOrganizeResultOutline(benchmark_runs, labels):
123  """Creates the "outline" of the OrganizeResults result for a set of runs.
124
125  Report generation returns lists of different sizes, depending on the input
126  data. Depending on the order in which we iterate through said input data, we
127  may populate the Nth index of a list, then the N-1st, then the N+Mth, ...
128
129  It's cleaner to figure out the "skeleton"/"outline" ahead of time, so we don't
130  have to worry about resizing while computing results.
131  """
132  # Count how many iterations exist for each benchmark run.
133  # We can't simply count up, since we may be given an incomplete set of
134  # iterations (e.g. [r.iteration for r in benchmark_runs] == [1, 3])
135  iteration_count = {}
136  for run in benchmark_runs:
137    name = run.benchmark.name
138    old_iterations = iteration_count.get(name, -1)
139    # N.B. run.iteration starts at 1, not 0.
140    iteration_count[name] = max(old_iterations, run.iteration)
141
142  # Result structure: {benchmark_name: [[{key: val}]]}
143  result = {}
144  for run in benchmark_runs:
145    name = run.benchmark.name
146    num_iterations = iteration_count[name]
147    # default param makes cros lint be quiet about defining num_iterations in a
148    # loop.
149    make_dicts = lambda n=num_iterations: _Repeat(dict, n)
150    result[name] = _Repeat(make_dicts, len(labels))
151  return result
152
153
154def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
155  """Create a dict from benchmark_runs.
156
157  The structure of the output dict is as follows:
158  {"benchmark_1":[
159    [{"key1":"v1", "key2":"v2"},{"key1":"v1", "key2","v2"}]
160    #one label
161    []
162    #the other label
163    ]
164   "benchmark_2":
165    [
166    ]}.
167  """
168  result = _MakeOrganizeResultOutline(benchmark_runs, labels)
169  label_names = [label.name for label in labels]
170  label_indices = {name: i for i, name in enumerate(label_names)}
171  summary_file = _ReadSummaryFile(sys.argv[0])
172  if benchmarks is None:
173    benchmarks = []
174
175  for benchmark_run in benchmark_runs:
176    if not benchmark_run.result:
177      continue
178    benchmark = benchmark_run.benchmark
179    label_index = label_indices[benchmark_run.label.name]
180    cur_label_list = result[benchmark.name][label_index]
181    cur_dict = cur_label_list[benchmark_run.iteration - 1]
182
183    show_all_results = json_report or benchmark.show_all_results
184    if not show_all_results:
185      summary_list = summary_file.get(benchmark.test_name)
186      if summary_list:
187        summary_list.append('retval')
188      else:
189        # Did not find test_name in json file; show everything.
190        show_all_results = True
191    for test_key in benchmark_run.result.keyvals:
192      if show_all_results or test_key in summary_list:
193        cur_dict[test_key] = benchmark_run.result.keyvals[test_key]
194    # Occasionally Telemetry tests will not fail but they will not return a
195    # result, either.  Look for those cases, and force them to be a fail.
196    # (This can happen if, for example, the test has been disabled.)
197    if len(cur_dict) == 1 and cur_dict['retval'] == 0:
198      cur_dict['retval'] = 1
199      benchmark_run.result.keyvals['retval'] = 1
200      # TODO: This output should be sent via logger.
201      print(
202          "WARNING: Test '%s' appears to have succeeded but returned"
203          ' no results.' % benchmark.name,
204          file=sys.stderr)
205    if json_report and benchmark_run.machine:
206      cur_dict['machine'] = benchmark_run.machine.name
207      cur_dict['machine_checksum'] = benchmark_run.machine.checksum
208      cur_dict['machine_string'] = benchmark_run.machine.checksum_string
209  _DuplicatePass(result, benchmarks)
210  return result
211