• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""Define a type that wraps a Benchmark instance."""
5from __future__ import print_function
6
7import math
8from scipy import stats
9
10# See crbug.com/673558 for how these are estimated.
11_estimated_stddev = {
12    'octane': 0.015,
13    'kraken': 0.019,
14    'speedometer': 0.007,
15    'dromaeo.domcoreattr': 0.023,
16    'dromaeo.domcoremodify': 0.011,
17    'smoothness.tough_webgl_cases': 0.025,
18    'graphics_WebGLAquarium': 0.008,
19    'page_cycler_v2.typical_25': 0.021,
20}
21
22
23# Get #samples needed to guarantee a given confidence interval, assuming the
24# samples follow normal distribution.
25def _samples(b):
26  # TODO: Make this an option
27  # CI = (0.9, 0.02), i.e., 90% chance that |sample mean - true mean| < 2%.
28  p = 0.9
29  e = 0.02
30  if b not in _estimated_stddev:
31    return 1
32  d = _estimated_stddev[b]
33  # Get at least 2 samples so as to calculate standard deviation, which is
34  # needed in T-test for p-value.
35  n = int(math.ceil((stats.norm.isf((1 - p) / 2) * d / e)**2))
36  return n if n > 1 else 2
37
38
39class Benchmark(object):
40  """Class representing a benchmark to be run.
41
42  Contains details of the benchmark suite, arguments to pass to the suite,
43  iterations to run the benchmark suite and so on. Note that the benchmark name
44  can be different to the test suite name. For example, you may want to have
45  two different benchmarks which run the same test_name with different
46  arguments.
47  """
48
49  def __init__(self,
50               name,
51               test_name,
52               test_args,
53               iterations,
54               rm_chroot_tmp,
55               perf_args,
56               suite='',
57               show_all_results=False,
58               retries=0,
59               run_local=False):
60    self.name = name
61    #For telemetry, this is the benchmark name.
62    self.test_name = test_name
63    #For telemetry, this is the data.
64    self.test_args = test_args
65    self.iterations = iterations if iterations > 0 else _samples(name)
66    self.perf_args = perf_args
67    self.rm_chroot_tmp = rm_chroot_tmp
68    self.iteration_adjusted = False
69    self.suite = suite
70    self.show_all_results = show_all_results
71    self.retries = retries
72    if self.suite == 'telemetry':
73      self.show_all_results = True
74    if run_local and self.suite != 'telemetry_Crosperf':
75      raise RuntimeError('run_local is only supported by telemetry_Crosperf.')
76    self.run_local = run_local
77