• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2013 The Chromium Authors
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5
6import re
7import sys
8
9import json
10import logging
11import math
12
13import perf_result_data_type
14
15
16# Mapping from result type to test output
17RESULT_TYPES = {perf_result_data_type.UNIMPORTANT: 'RESULT ',
18                perf_result_data_type.DEFAULT: '*RESULT ',
19                perf_result_data_type.INFORMATIONAL: '',
20                perf_result_data_type.UNIMPORTANT_HISTOGRAM: 'HISTOGRAM ',
21                perf_result_data_type.HISTOGRAM: '*HISTOGRAM '}
22
23
24def _EscapePerfResult(s):
25  """Escapes |s| for use in a perf result."""
26  return re.sub('[\:|=/#&,]', '_', s)
27
28
29def FlattenList(values):
30  """Returns a simple list without sub-lists."""
31  ret = []
32  for entry in values:
33    if isinstance(entry, list):
34      ret.extend(FlattenList(entry))
35    else:
36      ret.append(entry)
37  return ret
38
39
40def GeomMeanAndStdDevFromHistogram(histogram_json):
41  histogram = json.loads(histogram_json)
42  # Handle empty histograms gracefully.
43  if not 'buckets' in histogram:
44    return 0.0, 0.0
45  count = 0
46  sum_of_logs = 0
47  for bucket in histogram['buckets']:
48    if 'high' in bucket:
49      bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
50    else:
51      bucket['mean'] = bucket['low']
52    if bucket['mean'] > 0:
53      sum_of_logs += math.log(bucket['mean']) * bucket['count']
54      count += bucket['count']
55
56  if count == 0:
57    return 0.0, 0.0
58
59  sum_of_squares = 0
60  geom_mean = math.exp(sum_of_logs / count)
61  for bucket in histogram['buckets']:
62    if bucket['mean'] > 0:
63      sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
64  return geom_mean, math.sqrt(sum_of_squares / count)
65
66
67def _ValueToString(v):
68  # Special case for floats so we don't print using scientific notation.
69  if isinstance(v, float):
70    return '%f' % v
71  else:
72    return str(v)
73
74
75def _MeanAndStdDevFromList(values):
76  avg = None
77  sd = None
78  if len(values) > 1:
79    try:
80      value = '[%s]' % ','.join([_ValueToString(v) for v in values])
81      avg = sum([float(v) for v in values]) / len(values)
82      sqdiffs = [(float(v) - avg) ** 2 for v in values]
83      variance = sum(sqdiffs) / (len(values) - 1)
84      sd = math.sqrt(variance)
85    except ValueError:
86      value = ', '.join(values)
87  else:
88    value = values[0]
89  return value, avg, sd
90
91
92def PrintPages(page_list):
93  """Prints list of pages to stdout in the format required by perf tests."""
94  print('Pages: [%s]' % ','.join([_EscapePerfResult(p) for p in page_list]))
95
96
97def PrintPerfResult(measurement, trace, values, units,
98                    result_type=perf_result_data_type.DEFAULT,
99                    print_to_stdout=True):
100  """Prints numerical data to stdout in the format required by perf tests.
101
102  The string args may be empty but they must not contain any colons (:) or
103  equals signs (=).
104  This is parsed by the buildbot using:
105  http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/process_log_utils.py
106
107  Args:
108    measurement: A description of the quantity being measured, e.g. "vm_peak".
109        On the dashboard, this maps to a particular graph. Mandatory.
110    trace: A description of the particular data point, e.g. "reference".
111        On the dashboard, this maps to a particular "line" in the graph.
112        Mandatory.
113    values: A list of numeric measured values. An N-dimensional list will be
114        flattened and treated as a simple list.
115    units: A description of the units of measure, e.g. "bytes".
116    result_type: Accepts values of perf_result_data_type.ALL_TYPES.
117    print_to_stdout: If True, prints the output in stdout instead of returning
118        the output to caller.
119
120    Returns:
121      String of the formated perf result.
122  """
123  assert perf_result_data_type.IsValidType(result_type), \
124         'result type: %s is invalid' % result_type
125
126  trace_name = _EscapePerfResult(trace)
127
128  if (result_type == perf_result_data_type.UNIMPORTANT or
129      result_type == perf_result_data_type.DEFAULT or
130      result_type == perf_result_data_type.INFORMATIONAL):
131    assert isinstance(values, list)
132    assert '/' not in measurement
133    flattened_values = FlattenList(values)
134    assert len(flattened_values)
135    value, avg, sd = _MeanAndStdDevFromList(flattened_values)
136    output = '%s%s: %s%s%s %s' % (
137        RESULT_TYPES[result_type],
138        _EscapePerfResult(measurement),
139        trace_name,
140        # Do not show equal sign if the trace is empty. Usually it happens when
141        # measurement is enough clear to describe the result.
142        '= ' if trace_name else '',
143        value,
144        units)
145  else:
146    assert perf_result_data_type.IsHistogram(result_type)
147    assert isinstance(values, list)
148    # The histograms can only be printed individually, there's no computation
149    # across different histograms.
150    assert len(values) == 1
151    value = values[0]
152    output = '%s%s: %s= %s %s' % (
153        RESULT_TYPES[result_type],
154        _EscapePerfResult(measurement),
155        trace_name,
156        value,
157        units)
158    avg, sd = GeomMeanAndStdDevFromHistogram(value)
159
160  if avg:
161    output += '\nAvg %s: %f%s' % (measurement, avg, units)
162  if sd:
163    output += '\nSd  %s: %f%s' % (measurement, sd, units)
164  if print_to_stdout:
165    print(output)
166    sys.stdout.flush()
167  return output
168
169
170def ReportPerfResult(chart_data, graph_title, trace_title, value, units,
171                     improvement_direction='down', important=True):
172  """Outputs test results in correct format.
173
174  If chart_data is None, it outputs data in old format. If chart_data is a
175  dictionary, formats in chartjson format. If any other format defaults to
176  old format.
177
178  Args:
179    chart_data: A dictionary corresponding to perf results in the chartjson
180        format.
181    graph_title: A string containing the name of the chart to add the result
182        to.
183    trace_title: A string containing the name of the trace within the chart
184        to add the result to.
185    value: The value of the result being reported.
186    units: The units of the value being reported.
187    improvement_direction: A string denoting whether higher or lower is
188        better for the result. Either 'up' or 'down'.
189    important: A boolean denoting whether the result is important or not.
190  """
191  if chart_data and isinstance(chart_data, dict):
192    chart_data['charts'].setdefault(graph_title, {})
193    chart_data['charts'][graph_title][trace_title] = {
194        'type': 'scalar',
195        'value': value,
196        'units': units,
197        'improvement_direction': improvement_direction,
198        'important': important
199    }
200  else:
201    PrintPerfResult(graph_title, trace_title, [value], units)
202