• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# -*- coding: utf-8 -*-
2# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""A module to generate experiments."""
7
8from __future__ import print_function
9import os
10import re
11import socket
12import sys
13
14from benchmark import Benchmark
15import config
16from cros_utils import logger
17from cros_utils import command_executer
18from experiment import Experiment
19from label import Label
20from label import MockLabel
21from results_cache import CacheConditions
22import test_flag
23import file_lock_machine
24
25# Users may want to run Telemetry tests either individually, or in
26# specified sets.  Here we define sets of tests that users may want
27# to run together.
28
29telemetry_perfv2_tests = [
30    'kraken',
31    'octane',
32]
33
34telemetry_pagecycler_tests = [
35    'page_cycler_v2.intl_ar_fa_he',
36    'page_cycler_v2.intl_es_fr_pt-BR',
37    'page_cycler_v2.intl_hi_ru',
38    'page_cycler_v2.intl_ja_zh',
39    'page_cycler_v2.intl_ko_th_vi',
40    'page_cycler_v2.typical_25',
41]
42
43telemetry_toolchain_old_perf_tests = [
44    'page_cycler_v2.intl_es_fr_pt-BR',
45    'page_cycler_v2.intl_hi_ru',
46    'page_cycler_v2.intl_ja_zh',
47    'page_cycler_v2.intl_ko_th_vi',
48    'page_cycler_v2.netsim.top_10',
49    'page_cycler_v2.typical_25',
50    'spaceport',
51    'tab_switching.top_10',
52]
53telemetry_toolchain_perf_tests = [
54    'octane', 'kraken', 'speedometer', 'speedometer2', 'jetstream2'
55]
56graphics_perf_tests = [
57    'graphics_GLBench',
58    'graphics_GLMark2',
59    'graphics_SanAngeles',
60    'graphics_WebGLAquarium',
61    'graphics_WebGLPerformance',
62]
63# TODO: disable rendering.desktop by default as the benchmark is
64# currently in a bad state
65# page_cycler_v2.typical_25 is deprecated and the recommend replacement is
66# loading.desktop@@typical (crbug.com/916340)
67telemetry_crosbolt_perf_tests = [
68    'octane',
69    'kraken',
70    'speedometer2',
71    'jetstream',
72    'loading.desktop',
73    # 'rendering.desktop',
74]
75
76crosbolt_perf_tests = [
77    'graphics_WebGLAquarium',
78    'tast.video.PlaybackPerfVP91080P30FPS',
79]
80
81#    'cheets_AntutuTest',
82#    'cheets_PerfBootServer',
83#    'cheets_CandyCrushTest',
84#    'cheets_LinpackTest',
85# ]
86
87dso_list = [
88    'all',
89    'chrome',
90    'kallsyms',
91]
92
93
94class ExperimentFactory(object):
95  """Factory class for building an Experiment, given an ExperimentFile as input.
96
97  This factory is currently hardcoded to produce an experiment for running
98  ChromeOS benchmarks, but the idea is that in the future, other types
99  of experiments could be produced.
100  """
101
102  def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
103                         iterations, rm_chroot_tmp, perf_args, suite,
104                         show_all_results, retries, run_local, cwp_dso, weight):
105    """Add all the tests in a set to the benchmarks list."""
106    for test_name in benchmark_list:
107      telemetry_benchmark = Benchmark(
108          test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
109          suite, show_all_results, retries, run_local, cwp_dso, weight)
110      benchmarks.append(telemetry_benchmark)
111
112  def GetExperiment(self, experiment_file, working_directory, log_dir):
113    """Construct an experiment from an experiment file."""
114    global_settings = experiment_file.GetGlobalSettings()
115    experiment_name = global_settings.GetField('name')
116    board = global_settings.GetField('board')
117    chromeos_root = global_settings.GetField('chromeos_root')
118    log_level = global_settings.GetField('logging_level')
119    if log_level not in ('quiet', 'average', 'verbose'):
120      log_level = 'verbose'
121
122    skylab = global_settings.GetField('skylab')
123    # Check whether skylab tool is installed correctly for skylab mode.
124    if skylab and not self.CheckSkylabTool(chromeos_root, log_level):
125      sys.exit(0)
126
127    remote = global_settings.GetField('remote')
128    # This is used to remove the ",' from the remote if user
129    # add them to the remote string.
130    new_remote = []
131    if remote:
132      for i in remote:
133        c = re.sub('["\']', '', i)
134        new_remote.append(c)
135    remote = new_remote
136    rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp')
137    perf_args = global_settings.GetField('perf_args')
138    download_debug = global_settings.GetField('download_debug')
139    # Do not download debug symbols when perf_args is not specified.
140    if not perf_args and download_debug:
141      download_debug = False
142    acquire_timeout = global_settings.GetField('acquire_timeout')
143    cache_dir = global_settings.GetField('cache_dir')
144    cache_only = global_settings.GetField('cache_only')
145    config.AddConfig('no_email', global_settings.GetField('no_email'))
146    share_cache = global_settings.GetField('share_cache')
147    results_dir = global_settings.GetField('results_dir')
148    compress_results = global_settings.GetField('compress_results')
149    # Warn user that option use_file_locks is deprecated.
150    use_file_locks = global_settings.GetField('use_file_locks')
151    if use_file_locks:
152      l = logger.GetLogger()
153      l.LogWarning('Option use_file_locks is deprecated, please remove it '
154                   'from your experiment settings.')
155    locks_dir = global_settings.GetField('locks_dir')
156    # If not specified, set the locks dir to the default locks dir in
157    # file_lock_machine.
158    if not locks_dir:
159      locks_dir = file_lock_machine.Machine.LOCKS_DIR
160    if not os.path.exists(locks_dir):
161      raise RuntimeError('Cannot access default lock directory. '
162                         'Please run prodaccess or specify a local directory')
163    chrome_src = global_settings.GetField('chrome_src')
164    show_all_results = global_settings.GetField('show_all_results')
165    cwp_dso = global_settings.GetField('cwp_dso')
166    if cwp_dso and not cwp_dso in dso_list:
167      raise RuntimeError('The DSO specified is not supported')
168    ignore_min_max = global_settings.GetField('ignore_min_max')
169    dut_config = {
170        'enable_aslr': global_settings.GetField('enable_aslr'),
171        'intel_pstate': global_settings.GetField('intel_pstate'),
172        'cooldown_time': global_settings.GetField('cooldown_time'),
173        'cooldown_temp': global_settings.GetField('cooldown_temp'),
174        'governor': global_settings.GetField('governor'),
175        'cpu_usage': global_settings.GetField('cpu_usage'),
176        'cpu_freq_pct': global_settings.GetField('cpu_freq_pct'),
177        'turbostat': global_settings.GetField('turbostat'),
178        'top_interval': global_settings.GetField('top_interval'),
179    }
180
181    # Default cache hit conditions. The image checksum in the cache and the
182    # computed checksum of the image must match. Also a cache file must exist.
183    cache_conditions = [
184        CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
185    ]
186    if global_settings.GetField('rerun_if_failed'):
187      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
188    if global_settings.GetField('rerun'):
189      cache_conditions.append(CacheConditions.FALSE)
190    if global_settings.GetField('same_machine'):
191      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
192    if global_settings.GetField('same_specs'):
193      cache_conditions.append(CacheConditions.MACHINES_MATCH)
194
195    # Construct benchmarks.
196    # Some fields are common with global settings. The values are
197    # inherited and/or merged with the global settings values.
198    benchmarks = []
199    all_benchmark_settings = experiment_file.GetSettings('benchmark')
200
201    # Check if there is duplicated benchmark name
202    benchmark_names = {}
203    # Check if in cwp_dso mode, all benchmarks should have same iterations
204    cwp_dso_iterations = 0
205
206    for benchmark_settings in all_benchmark_settings:
207      benchmark_name = benchmark_settings.name
208      test_name = benchmark_settings.GetField('test_name')
209      if not test_name:
210        test_name = benchmark_name
211      test_args = benchmark_settings.GetField('test_args')
212
213      # Rename benchmark name if 'story-filter' or 'story-tag-filter' specified
214      # in test_args. Make sure these two tags only appear once.
215      story_count = 0
216      for arg in test_args.split():
217        if '--story-filter=' in arg or '--story-tag-filter=' in arg:
218          story_count += 1
219          if story_count > 1:
220            raise RuntimeError('Only one story or story-tag filter allowed in '
221                               'a single benchmark run')
222          # Rename benchmark name with an extension of 'story'-option
223          benchmark_name = '%s@@%s' % (benchmark_name, arg.split('=')[-1])
224
225      # Check for duplicated benchmark name after renaming
226      if not benchmark_name in benchmark_names:
227        benchmark_names[benchmark_name] = True
228      else:
229        raise SyntaxError("Duplicate benchmark name: '%s'." % benchmark_name)
230
231      iterations = benchmark_settings.GetField('iterations')
232      if cwp_dso:
233        if cwp_dso_iterations not in (0, iterations):
234          raise RuntimeError('Iterations of each benchmark run are not the '
235                             'same')
236        cwp_dso_iterations = iterations
237
238      suite = benchmark_settings.GetField('suite')
239      retries = benchmark_settings.GetField('retries')
240      run_local = benchmark_settings.GetField('run_local')
241      weight = benchmark_settings.GetField('weight')
242      if weight:
243        if not cwp_dso:
244          raise RuntimeError('Weight can only be set when DSO specified')
245        if suite != 'telemetry_Crosperf':
246          raise RuntimeError('CWP approximation weight only works with '
247                             'telemetry_Crosperf suite')
248        if run_local:
249          raise RuntimeError('run_local must be set to False to use CWP '
250                             'approximation')
251        if weight < 0:
252          raise RuntimeError('Weight should be a float >=0')
253      elif cwp_dso:
254        raise RuntimeError('With DSO specified, each benchmark should have a '
255                           'weight')
256
257      if suite == 'telemetry_Crosperf':
258        if test_name == 'all_perfv2':
259          self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
260                                  iterations, rm_chroot_tmp, perf_args, suite,
261                                  show_all_results, retries, run_local, cwp_dso,
262                                  weight)
263        elif test_name == 'all_pagecyclers':
264          self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
265                                  test_args, iterations, rm_chroot_tmp,
266                                  perf_args, suite, show_all_results, retries,
267                                  run_local, cwp_dso, weight)
268        elif test_name == 'all_crosbolt_perf':
269          self.AppendBenchmarkSet(
270              benchmarks, telemetry_crosbolt_perf_tests, test_args, iterations,
271              rm_chroot_tmp, perf_args, 'telemetry_Crosperf', show_all_results,
272              retries, run_local, cwp_dso, weight)
273          self.AppendBenchmarkSet(
274              benchmarks,
275              crosbolt_perf_tests,
276              '',
277              iterations,
278              rm_chroot_tmp,
279              perf_args,
280              '',
281              show_all_results,
282              retries,
283              run_local=False,
284              cwp_dso=cwp_dso,
285              weight=weight)
286        elif test_name == 'all_toolchain_perf':
287          self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
288                                  test_args, iterations, rm_chroot_tmp,
289                                  perf_args, suite, show_all_results, retries,
290                                  run_local, cwp_dso, weight)
291          # Add non-telemetry toolchain-perf benchmarks:
292
293          # Tast test platform.ReportDiskUsage for image size.
294          benchmarks.append(
295              Benchmark(
296                  'platform.ReportDiskUsage',
297                  'platform.ReportDiskUsage',
298                  '',
299                  1,  # This is not a performance benchmark, only run once.
300                  rm_chroot_tmp,
301                  '',
302                  'tast',  # Specify the suite to be 'tast'
303                  show_all_results,
304                  retries))
305
306          # TODO: crbug.com/1057755 Do not enable graphics_WebGLAquarium until
307          # it gets fixed.
308          #
309          # benchmarks.append(
310          #     Benchmark(
311          #         'graphics_WebGLAquarium',
312          #         'graphics_WebGLAquarium',
313          #         '',
314          #         iterations,
315          #         rm_chroot_tmp,
316          #         perf_args,
317          #         'crosperf_Wrapper',  # Use client wrapper in Autotest
318          #         show_all_results,
319          #         retries,
320          #         run_local=False,
321          #         cwp_dso=cwp_dso,
322          #         weight=weight))
323        elif test_name == 'all_toolchain_perf_old':
324          self.AppendBenchmarkSet(
325              benchmarks, telemetry_toolchain_old_perf_tests, test_args,
326              iterations, rm_chroot_tmp, perf_args, suite, show_all_results,
327              retries, run_local, cwp_dso, weight)
328        else:
329          benchmark = Benchmark(benchmark_name, test_name, test_args,
330                                iterations, rm_chroot_tmp, perf_args, suite,
331                                show_all_results, retries, run_local, cwp_dso,
332                                weight)
333          benchmarks.append(benchmark)
334      else:
335        if test_name == 'all_graphics_perf':
336          self.AppendBenchmarkSet(
337              benchmarks,
338              graphics_perf_tests,
339              '',
340              iterations,
341              rm_chroot_tmp,
342              perf_args,
343              '',
344              show_all_results,
345              retries,
346              run_local=False,
347              cwp_dso=cwp_dso,
348              weight=weight)
349        else:
350          # Add the single benchmark.
351          benchmark = Benchmark(
352              benchmark_name,
353              test_name,
354              test_args,
355              iterations,
356              rm_chroot_tmp,
357              perf_args,
358              suite,
359              show_all_results,
360              retries,
361              run_local=False,
362              cwp_dso=cwp_dso,
363              weight=weight)
364          benchmarks.append(benchmark)
365
366    if not benchmarks:
367      raise RuntimeError('No benchmarks specified')
368
369    # Construct labels.
370    # Some fields are common with global settings. The values are
371    # inherited and/or merged with the global settings values.
372    labels = []
373    all_label_settings = experiment_file.GetSettings('label')
374    all_remote = list(remote)
375    for label_settings in all_label_settings:
376      label_name = label_settings.name
377      image = label_settings.GetField('chromeos_image')
378      build = label_settings.GetField('build')
379      autotest_path = label_settings.GetField('autotest_path')
380      debug_path = label_settings.GetField('debug_path')
381      chromeos_root = label_settings.GetField('chromeos_root')
382      my_remote = label_settings.GetField('remote')
383      compiler = label_settings.GetField('compiler')
384      new_remote = []
385      if my_remote:
386        for i in my_remote:
387          c = re.sub('["\']', '', i)
388          new_remote.append(c)
389      my_remote = new_remote
390
391      if image:
392        if skylab:
393          raise RuntimeError('In skylab mode, local image should not be used.')
394        if build:
395          raise RuntimeError('Image path and build are provided at the same '
396                             'time, please use only one of them.')
397      else:
398        if not build:
399          raise RuntimeError("Can not have empty 'build' field!")
400        image, autotest_path, debug_path = label_settings.GetXbuddyPath(
401            build, autotest_path, debug_path, board, chromeos_root, log_level,
402            download_debug)
403
404      cache_dir = label_settings.GetField('cache_dir')
405      chrome_src = label_settings.GetField('chrome_src')
406
407      # TODO(yunlian): We should consolidate code in machine_manager.py
408      # to derermine whether we are running from within google or not
409      if ('corp.google.com' in socket.gethostname() and not my_remote and
410          not skylab):
411        my_remote = self.GetDefaultRemotes(board)
412      if global_settings.GetField('same_machine') and len(my_remote) > 1:
413        raise RuntimeError('Only one remote is allowed when same_machine '
414                           'is turned on')
415      all_remote += my_remote
416      image_args = label_settings.GetField('image_args')
417      if test_flag.GetTestMode():
418        # pylint: disable=too-many-function-args
419        label = MockLabel(label_name, build, image, autotest_path, debug_path,
420                          chromeos_root, board, my_remote, image_args,
421                          cache_dir, cache_only, log_level, compiler, skylab,
422                          chrome_src)
423      else:
424        label = Label(label_name, build, image, autotest_path, debug_path,
425                      chromeos_root, board, my_remote, image_args, cache_dir,
426                      cache_only, log_level, compiler, skylab, chrome_src)
427      labels.append(label)
428
429    if not labels:
430      raise RuntimeError('No labels specified')
431
432    email = global_settings.GetField('email')
433    all_remote += list(set(my_remote))
434    all_remote = list(set(all_remote))
435    if skylab:
436      for remote in all_remote:
437        self.CheckRemotesInSkylab(remote)
438    experiment = Experiment(experiment_name, all_remote, working_directory,
439                            chromeos_root, cache_conditions, labels, benchmarks,
440                            experiment_file.Canonicalize(), email,
441                            acquire_timeout, log_dir, log_level, share_cache,
442                            results_dir, compress_results, locks_dir, cwp_dso,
443                            ignore_min_max, skylab, dut_config)
444
445    return experiment
446
447  def GetDefaultRemotes(self, board):
448    default_remotes_file = os.path.join(
449        os.path.dirname(__file__), 'default_remotes')
450    try:
451      with open(default_remotes_file) as f:
452        for line in f:
453          key, v = line.split(':')
454          if key.strip() == board:
455            remotes = v.strip().split()
456            if remotes:
457              return remotes
458            else:
459              raise RuntimeError('There is no remote for {0}'.format(board))
460    except IOError:
461      # TODO: rethrow instead of throwing different exception.
462      raise RuntimeError(
463          'IOError while reading file {0}'.format(default_remotes_file))
464    else:
465      raise RuntimeError('There is no remote for {0}'.format(board))
466
467  def CheckRemotesInSkylab(self, remote):
468    # TODO: (AI:zhizhouy) need to check whether a remote is a local or lab
469    # machine. If not lab machine, raise an error.
470    pass
471
472  def CheckSkylabTool(self, chromeos_root, log_level):
473    SKYLAB_PATH = '/usr/local/bin/skylab'
474    if os.path.exists(SKYLAB_PATH):
475      return True
476    l = logger.GetLogger()
477    l.LogOutput('Skylab tool not installed, trying to install it.')
478    ce = command_executer.GetCommandExecuter(l, log_level=log_level)
479    setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin', 'lab-tools',
480                                   'setup_lab_tools')
481    cmd = '%s' % setup_lab_tools
482    status = ce.RunCommand(cmd)
483    if status != 0:
484      raise RuntimeError('Skylab tool not installed correctly, please try to '
485                         'manually install it from %s' % setup_lab_tools)
486    l.LogOutput('Skylab is installed at %s, please login before first use. '
487                'Login by running "skylab login" and follow instructions.' %
488                SKYLAB_PATH)
489    return False
490