• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2013 The Chromium Authors
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5
6import re
7import sys
8
9import json
10import logging
11import math
12
13from lib.common import perf_result_data_type
14
15
16# Mapping from result type to test output
17RESULT_TYPES = {perf_result_data_type.UNIMPORTANT: 'RESULT ',
18                perf_result_data_type.DEFAULT: '*RESULT ',
19                perf_result_data_type.INFORMATIONAL: '',
20                perf_result_data_type.UNIMPORTANT_HISTOGRAM: 'HISTOGRAM ',
21                perf_result_data_type.HISTOGRAM: '*HISTOGRAM '}
22
23
24def _EscapePerfResult(s):
25  """Escapes |s| for use in a perf result."""
26  return re.sub('[\:|=/#&,]', '_', s)
27
28
29def FlattenList(values):
30  """Returns a simple list without sub-lists."""
31  ret = []
32  for entry in values:
33    if isinstance(entry, list):
34      ret.extend(FlattenList(entry))
35    else:
36      ret.append(entry)
37  return ret
38
39
40def GeomMeanAndStdDevFromHistogram(histogram_json):
41  histogram = json.loads(histogram_json)
42  # Handle empty histograms gracefully.
43  if not 'buckets' in histogram:
44    return 0.0, 0.0
45  count = 0
46  sum_of_logs = 0
47  for bucket in histogram['buckets']:
48    if 'high' in bucket:
49      bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
50    else:
51      bucket['mean'] = bucket['low']
52    if bucket['mean'] > 0:
53      sum_of_logs += math.log(bucket['mean']) * bucket['count']
54      count += bucket['count']
55
56  if count == 0:
57    return 0.0, 0.0
58
59  sum_of_squares = 0
60  geom_mean = math.exp(sum_of_logs / count)
61  for bucket in histogram['buckets']:
62    if bucket['mean'] > 0:
63      sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
64  return geom_mean, math.sqrt(sum_of_squares / count)
65
66
67def _ValueToString(v):
68  # Special case for floats so we don't print using scientific notation.
69  if isinstance(v, float):
70    return '%f' % v
71  else:
72    return str(v)
73
74
75def _MeanAndStdDevFromList(values):
76  avg = None
77  sd = None
78  if len(values) > 1:
79    try:
80      value = '[%s]' % ','.join([_ValueToString(v) for v in values])
81      avg = sum([float(v) for v in values]) / len(values)
82      sqdiffs = [(float(v) - avg) ** 2 for v in values]
83      variance = sum(sqdiffs) / (len(values) - 1)
84      sd = math.sqrt(variance)
85    except ValueError:
86      value = ', '.join(values)
87  else:
88    value = values[0]
89  return value, avg, sd
90
91
92def PrintPages(page_list):
93  """Prints list of pages to stdout in the format required by perf tests."""
94  print('Pages: [%s]' % ','.join([_EscapePerfResult(p) for p in page_list]))
95
96
97def PrintPerfResult(measurement, trace, values, units,
98                    result_type=perf_result_data_type.DEFAULT,
99                    print_to_stdout=True):
100  """Prints numerical data to stdout in the format required by perf tests.
101
102  The string args may be empty but they must not contain any colons (:) or
103  equals signs (=).
104
105  Args:
106    measurement: A description of the quantity being measured, e.g. "vm_peak".
107        On the dashboard, this maps to a particular graph. Mandatory.
108    trace: A description of the particular data point, e.g. "reference".
109        On the dashboard, this maps to a particular "line" in the graph.
110        Mandatory.
111    values: A list of numeric measured values. An N-dimensional list will be
112        flattened and treated as a simple list.
113    units: A description of the units of measure, e.g. "bytes".
114    result_type: Accepts values of perf_result_data_type.ALL_TYPES.
115    print_to_stdout: If True, prints the output in stdout instead of returning
116        the output to caller.
117
118    Returns:
119      String of the formated perf result.
120  """
121  assert perf_result_data_type.IsValidType(result_type), \
122         'result type: %s is invalid' % result_type
123
124  trace_name = _EscapePerfResult(trace)
125
126  if (result_type == perf_result_data_type.UNIMPORTANT or
127      result_type == perf_result_data_type.DEFAULT or
128      result_type == perf_result_data_type.INFORMATIONAL):
129    assert isinstance(values, list)
130    assert '/' not in measurement
131    flattened_values = FlattenList(values)
132    assert len(flattened_values)
133    value, avg, sd = _MeanAndStdDevFromList(flattened_values)
134    output = '%s%s: %s%s%s %s' % (
135        RESULT_TYPES[result_type],
136        _EscapePerfResult(measurement),
137        trace_name,
138        # Do not show equal sign if the trace is empty. Usually it happens when
139        # measurement is enough clear to describe the result.
140        '= ' if trace_name else '',
141        value,
142        units)
143  else:
144    assert perf_result_data_type.IsHistogram(result_type)
145    assert isinstance(values, list)
146    # The histograms can only be printed individually, there's no computation
147    # across different histograms.
148    assert len(values) == 1
149    value = values[0]
150    output = '%s%s: %s= %s %s' % (
151        RESULT_TYPES[result_type],
152        _EscapePerfResult(measurement),
153        trace_name,
154        value,
155        units)
156    avg, sd = GeomMeanAndStdDevFromHistogram(value)
157
158  if avg:
159    output += '\nAvg %s: %f%s' % (measurement, avg, units)
160  if sd:
161    output += '\nSd  %s: %f%s' % (measurement, sd, units)
162  if print_to_stdout:
163    print(output)
164    sys.stdout.flush()
165  return output
166
167
168def ReportPerfResult(chart_data, graph_title, trace_title, value, units,
169                     improvement_direction='down', important=True):
170  """Outputs test results in correct format.
171
172  If chart_data is None, it outputs data in old format. If chart_data is a
173  dictionary, formats in chartjson format. If any other format defaults to
174  old format.
175
176  Args:
177    chart_data: A dictionary corresponding to perf results in the chartjson
178        format.
179    graph_title: A string containing the name of the chart to add the result
180        to.
181    trace_title: A string containing the name of the trace within the chart
182        to add the result to.
183    value: The value of the result being reported.
184    units: The units of the value being reported.
185    improvement_direction: A string denoting whether higher or lower is
186        better for the result. Either 'up' or 'down'.
187    important: A boolean denoting whether the result is important or not.
188  """
189  if chart_data and isinstance(chart_data, dict):
190    chart_data['charts'].setdefault(graph_title, {})
191    chart_data['charts'][graph_title][trace_title] = {
192        'type': 'scalar',
193        'value': value,
194        'units': units,
195        'improvement_direction': improvement_direction,
196        'important': important
197    }
198  else:
199    PrintPerfResult(graph_title, trace_title, [value], units)
200