• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""A module to generate experiments."""
5
6from __future__ import print_function
7import os
8import re
9import socket
10
11from benchmark import Benchmark
12import config
13from experiment import Experiment
14from label import Label
15from label import MockLabel
16from results_cache import CacheConditions
17import test_flag
18import file_lock_machine
19
20# Users may want to run Telemetry tests either individually, or in
21# specified sets.  Here we define sets of tests that users may want
22# to run together.
23
24telemetry_perfv2_tests = [
25    'kraken',
26    'octane',
27]
28
29telemetry_pagecycler_tests = [
30    'page_cycler_v2.intl_ar_fa_he',
31    'page_cycler_v2.intl_es_fr_pt-BR',
32    'page_cycler_v2.intl_hi_ru',
33    'page_cycler_v2.intl_ja_zh',
34    'page_cycler_v2.intl_ko_th_vi',
35    'page_cycler_v2.typical_25',
36]
37
38telemetry_toolchain_old_perf_tests = [
39    'page_cycler_v2.intl_es_fr_pt-BR',
40    'page_cycler_v2.intl_hi_ru',
41    'page_cycler_v2.intl_ja_zh',
42    'page_cycler_v2.intl_ko_th_vi',
43    'page_cycler_v2.netsim.top_10',
44    'page_cycler_v2.typical_25',
45    'spaceport',
46    'tab_switching.top_10',
47]
48telemetry_toolchain_perf_tests = [
49    'octane',
50    'kraken',
51    'speedometer',
52    'speedometer2',
53]
54graphics_perf_tests = [
55    'graphics_GLBench',
56    'graphics_GLMark2',
57    'graphics_SanAngeles',
58    'graphics_WebGLAquarium',
59    'graphics_WebGLPerformance',
60]
61telemetry_crosbolt_perf_tests = [
62    'octane',
63    'kraken',
64    'speedometer',
65    'speedometer2',
66    'jetstream',
67    'cros_ui_smoothness',
68]
69crosbolt_perf_tests = [
70    'graphics_WebGLAquarium',
71    'video_PlaybackPerf.h264',
72    'video_PlaybackPerf.vp9',
73    'video_WebRtcPerf',
74    'BootPerfServerCrosPerf',
75    'power_Resume',
76    'build_RootFilesystemSize',
77]
78
79#    'cheets_AntutuTest',
80#    'cheets_PerfBootServer',
81#    'cheets_CandyCrushTest',
82#    'cheets_LinpackTest',
83#]
84
85
86class ExperimentFactory(object):
87  """Factory class for building an Experiment, given an ExperimentFile as input.
88
89  This factory is currently hardcoded to produce an experiment for running
90  ChromeOS benchmarks, but the idea is that in the future, other types
91  of experiments could be produced.
92  """
93
94  def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
95                         iterations, rm_chroot_tmp, perf_args, suite,
96                         show_all_results, retries, run_local):
97    """Add all the tests in a set to the benchmarks list."""
98    for test_name in benchmark_list:
99      telemetry_benchmark = Benchmark(
100          test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
101          suite, show_all_results, retries, run_local)
102      benchmarks.append(telemetry_benchmark)
103
104  def GetExperiment(self, experiment_file, working_directory, log_dir):
105    """Construct an experiment from an experiment file."""
106    global_settings = experiment_file.GetGlobalSettings()
107    experiment_name = global_settings.GetField('name')
108    board = global_settings.GetField('board')
109    remote = global_settings.GetField('remote')
110    # This is used to remove the ",' from the remote if user
111    # add them to the remote string.
112    new_remote = []
113    if remote:
114      for i in remote:
115        c = re.sub('["\']', '', i)
116        new_remote.append(c)
117    remote = new_remote
118    chromeos_root = global_settings.GetField('chromeos_root')
119    rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp')
120    perf_args = global_settings.GetField('perf_args')
121    acquire_timeout = global_settings.GetField('acquire_timeout')
122    cache_dir = global_settings.GetField('cache_dir')
123    cache_only = global_settings.GetField('cache_only')
124    config.AddConfig('no_email', global_settings.GetField('no_email'))
125    share_cache = global_settings.GetField('share_cache')
126    results_dir = global_settings.GetField('results_dir')
127    use_file_locks = global_settings.GetField('use_file_locks')
128    locks_dir = global_settings.GetField('locks_dir')
129    # If we pass a blank locks_dir to the Experiment, it will use the AFE server
130    # lock mechanism.  So if the user specified use_file_locks, but did not
131    # specify a locks dir, set the locks  dir to the default locks dir in
132    # file_lock_machine.
133    if use_file_locks and not locks_dir:
134      locks_dir = file_lock_machine.Machine.LOCKS_DIR
135    chrome_src = global_settings.GetField('chrome_src')
136    show_all_results = global_settings.GetField('show_all_results')
137    log_level = global_settings.GetField('logging_level')
138    if log_level not in ('quiet', 'average', 'verbose'):
139      log_level = 'verbose'
140    # Default cache hit conditions. The image checksum in the cache and the
141    # computed checksum of the image must match. Also a cache file must exist.
142    cache_conditions = [
143        CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
144    ]
145    if global_settings.GetField('rerun_if_failed'):
146      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
147    if global_settings.GetField('rerun'):
148      cache_conditions.append(CacheConditions.FALSE)
149    if global_settings.GetField('same_machine'):
150      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
151    if global_settings.GetField('same_specs'):
152      cache_conditions.append(CacheConditions.MACHINES_MATCH)
153
154    # Construct benchmarks.
155    # Some fields are common with global settings. The values are
156    # inherited and/or merged with the global settings values.
157    benchmarks = []
158    all_benchmark_settings = experiment_file.GetSettings('benchmark')
159    for benchmark_settings in all_benchmark_settings:
160      benchmark_name = benchmark_settings.name
161      test_name = benchmark_settings.GetField('test_name')
162      if not test_name:
163        test_name = benchmark_name
164      test_args = benchmark_settings.GetField('test_args')
165      iterations = benchmark_settings.GetField('iterations')
166      suite = benchmark_settings.GetField('suite')
167      retries = benchmark_settings.GetField('retries')
168      run_local = benchmark_settings.GetField('run_local')
169
170      if suite == 'telemetry_Crosperf':
171        if test_name == 'all_perfv2':
172          self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
173                                  iterations, rm_chroot_tmp, perf_args, suite,
174                                  show_all_results, retries, run_local)
175        elif test_name == 'all_pagecyclers':
176          self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
177                                  test_args, iterations, rm_chroot_tmp,
178                                  perf_args, suite, show_all_results, retries,
179                                  run_local)
180        elif test_name == 'all_toolchain_perf':
181          self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
182                                  test_args, iterations, rm_chroot_tmp,
183                                  perf_args, suite, show_all_results, retries,
184                                  run_local)
185          # Add non-telemetry toolchain-perf benchmarks:
186          benchmarks.append(
187              Benchmark(
188                  'graphics_WebGLAquarium',
189                  'graphics_WebGLAquarium',
190                  '',
191                  iterations,
192                  rm_chroot_tmp,
193                  perf_args,
194                  '',
195                  show_all_results,
196                  retries,
197                  run_local=False))
198        elif test_name == 'all_toolchain_perf_old':
199          self.AppendBenchmarkSet(benchmarks,
200                                  telemetry_toolchain_old_perf_tests, test_args,
201                                  iterations, rm_chroot_tmp, perf_args, suite,
202                                  show_all_results, retries, run_local)
203        else:
204          benchmark = Benchmark(test_name, test_name, test_args, iterations,
205                                rm_chroot_tmp, perf_args, suite,
206                                show_all_results, retries, run_local)
207          benchmarks.append(benchmark)
208      else:
209        if test_name == 'all_graphics_perf':
210          self.AppendBenchmarkSet(
211              benchmarks,
212              graphics_perf_tests,
213              '',
214              iterations,
215              rm_chroot_tmp,
216              perf_args,
217              '',
218              show_all_results,
219              retries,
220              run_local=False)
221        elif test_name == 'all_crosbolt_perf':
222          self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
223                                  test_args, iterations, rm_chroot_tmp,
224                                  perf_args, 'telemetry_Crosperf',
225                                  show_all_results, retries, run_local)
226          self.AppendBenchmarkSet(
227              benchmarks,
228              crosbolt_perf_tests,
229              '',
230              iterations,
231              rm_chroot_tmp,
232              perf_args,
233              '',
234              show_all_results,
235              retries,
236              run_local=False)
237        else:
238          # Add the single benchmark.
239          benchmark = Benchmark(
240              benchmark_name,
241              test_name,
242              test_args,
243              iterations,
244              rm_chroot_tmp,
245              perf_args,
246              suite,
247              show_all_results,
248              retries,
249              run_local=False)
250          benchmarks.append(benchmark)
251
252    if not benchmarks:
253      raise RuntimeError('No benchmarks specified')
254
255    # Construct labels.
256    # Some fields are common with global settings. The values are
257    # inherited and/or merged with the global settings values.
258    labels = []
259    all_label_settings = experiment_file.GetSettings('label')
260    all_remote = list(remote)
261    for label_settings in all_label_settings:
262      label_name = label_settings.name
263      image = label_settings.GetField('chromeos_image')
264      autotest_path = label_settings.GetField('autotest_path')
265      chromeos_root = label_settings.GetField('chromeos_root')
266      my_remote = label_settings.GetField('remote')
267      compiler = label_settings.GetField('compiler')
268      new_remote = []
269      if my_remote:
270        for i in my_remote:
271          c = re.sub('["\']', '', i)
272          new_remote.append(c)
273      my_remote = new_remote
274      if image == '':
275        build = label_settings.GetField('build')
276        if len(build) == 0:
277          raise RuntimeError("Can not have empty 'build' field!")
278        image, autotest_path = label_settings.GetXbuddyPath(
279            build, autotest_path, board, chromeos_root, log_level)
280
281      cache_dir = label_settings.GetField('cache_dir')
282      chrome_src = label_settings.GetField('chrome_src')
283
284      # TODO(yunlian): We should consolidate code in machine_manager.py
285      # to derermine whether we are running from within google or not
286      if ('corp.google.com' in socket.gethostname() and
287          (not my_remote or
288           my_remote == remote and global_settings.GetField('board') != board)):
289        my_remote = self.GetDefaultRemotes(board)
290      if global_settings.GetField('same_machine') and len(my_remote) > 1:
291        raise RuntimeError('Only one remote is allowed when same_machine '
292                           'is turned on')
293      all_remote += my_remote
294      image_args = label_settings.GetField('image_args')
295      if test_flag.GetTestMode():
296        # pylint: disable=too-many-function-args
297        label = MockLabel(label_name, image, autotest_path, chromeos_root,
298                          board, my_remote, image_args, cache_dir, cache_only,
299                          log_level, compiler, chrome_src)
300      else:
301        label = Label(label_name, image, autotest_path, chromeos_root, board,
302                      my_remote, image_args, cache_dir, cache_only, log_level,
303                      compiler, chrome_src)
304      labels.append(label)
305
306    if not labels:
307      raise RuntimeError('No labels specified')
308
309    email = global_settings.GetField('email')
310    all_remote += list(set(my_remote))
311    all_remote = list(set(all_remote))
312    experiment = Experiment(experiment_name, all_remote, working_directory,
313                            chromeos_root, cache_conditions, labels, benchmarks,
314                            experiment_file.Canonicalize(), email,
315                            acquire_timeout, log_dir, log_level, share_cache,
316                            results_dir, locks_dir)
317
318    return experiment
319
320  def GetDefaultRemotes(self, board):
321    default_remotes_file = os.path.join(
322        os.path.dirname(__file__), 'default_remotes')
323    try:
324      with open(default_remotes_file) as f:
325        for line in f:
326          key, v = line.split(':')
327          if key.strip() == board:
328            remotes = v.strip().split()
329            if remotes:
330              return remotes
331            else:
332              raise RuntimeError('There is no remote for {0}'.format(board))
333    except IOError:
334      # TODO: rethrow instead of throwing different exception.
335      raise RuntimeError('IOError while reading file {0}'
336                         .format(default_remotes_file))
337    else:
338      raise RuntimeError('There is not remote for {0}'.format(board))
339