1# -*- coding: utf-8 -*- 2# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6"""A module to generate experiments.""" 7 8from __future__ import print_function 9import os 10import re 11import socket 12import sys 13 14from benchmark import Benchmark 15import config 16from cros_utils import logger 17from cros_utils import command_executer 18from experiment import Experiment 19from label import Label 20from label import MockLabel 21from results_cache import CacheConditions 22import test_flag 23import file_lock_machine 24 25# Users may want to run Telemetry tests either individually, or in 26# specified sets. Here we define sets of tests that users may want 27# to run together. 28 29telemetry_perfv2_tests = [ 30 'kraken', 31 'octane', 32] 33 34telemetry_pagecycler_tests = [ 35 'page_cycler_v2.intl_ar_fa_he', 36 'page_cycler_v2.intl_es_fr_pt-BR', 37 'page_cycler_v2.intl_hi_ru', 38 'page_cycler_v2.intl_ja_zh', 39 'page_cycler_v2.intl_ko_th_vi', 40 'page_cycler_v2.typical_25', 41] 42 43telemetry_toolchain_old_perf_tests = [ 44 'page_cycler_v2.intl_es_fr_pt-BR', 45 'page_cycler_v2.intl_hi_ru', 46 'page_cycler_v2.intl_ja_zh', 47 'page_cycler_v2.intl_ko_th_vi', 48 'page_cycler_v2.netsim.top_10', 49 'page_cycler_v2.typical_25', 50 'spaceport', 51 'tab_switching.top_10', 52] 53telemetry_toolchain_perf_tests = [ 54 'octane', 'kraken', 'speedometer', 'speedometer2', 'jetstream2' 55] 56graphics_perf_tests = [ 57 'graphics_GLBench', 58 'graphics_GLMark2', 59 'graphics_SanAngeles', 60 'graphics_WebGLAquarium', 61 'graphics_WebGLPerformance', 62] 63# TODO: disable rendering.desktop by default as the benchmark is 64# currently in a bad state 65# page_cycler_v2.typical_25 is deprecated and the recommend replacement is 66# loading.desktop@@typical (crbug.com/916340) 67telemetry_crosbolt_perf_tests = [ 68 'octane', 69 'kraken', 70 'speedometer2', 71 'jetstream', 72 'loading.desktop', 73 # 'rendering.desktop', 74] 75 76crosbolt_perf_tests = [ 77 'graphics_WebGLAquarium', 78 'tast.video.PlaybackPerfVP91080P30FPS', 79] 80 81# 'cheets_AntutuTest', 82# 'cheets_PerfBootServer', 83# 'cheets_CandyCrushTest', 84# 'cheets_LinpackTest', 85# ] 86 87dso_list = [ 88 'all', 89 'chrome', 90 'kallsyms', 91] 92 93 94class ExperimentFactory(object): 95 """Factory class for building an Experiment, given an ExperimentFile as input. 96 97 This factory is currently hardcoded to produce an experiment for running 98 ChromeOS benchmarks, but the idea is that in the future, other types 99 of experiments could be produced. 100 """ 101 102 def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args, 103 iterations, rm_chroot_tmp, perf_args, suite, 104 show_all_results, retries, run_local, cwp_dso, weight): 105 """Add all the tests in a set to the benchmarks list.""" 106 for test_name in benchmark_list: 107 telemetry_benchmark = Benchmark( 108 test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args, 109 suite, show_all_results, retries, run_local, cwp_dso, weight) 110 benchmarks.append(telemetry_benchmark) 111 112 def GetExperiment(self, experiment_file, working_directory, log_dir): 113 """Construct an experiment from an experiment file.""" 114 global_settings = experiment_file.GetGlobalSettings() 115 experiment_name = global_settings.GetField('name') 116 board = global_settings.GetField('board') 117 chromeos_root = global_settings.GetField('chromeos_root') 118 log_level = global_settings.GetField('logging_level') 119 if log_level not in ('quiet', 'average', 'verbose'): 120 log_level = 'verbose' 121 122 skylab = global_settings.GetField('skylab') 123 # Check whether skylab tool is installed correctly for skylab mode. 124 if skylab and not self.CheckSkylabTool(chromeos_root, log_level): 125 sys.exit(0) 126 127 remote = global_settings.GetField('remote') 128 # This is used to remove the ",' from the remote if user 129 # add them to the remote string. 130 new_remote = [] 131 if remote: 132 for i in remote: 133 c = re.sub('["\']', '', i) 134 new_remote.append(c) 135 remote = new_remote 136 rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp') 137 perf_args = global_settings.GetField('perf_args') 138 download_debug = global_settings.GetField('download_debug') 139 # Do not download debug symbols when perf_args is not specified. 140 if not perf_args and download_debug: 141 download_debug = False 142 acquire_timeout = global_settings.GetField('acquire_timeout') 143 cache_dir = global_settings.GetField('cache_dir') 144 cache_only = global_settings.GetField('cache_only') 145 config.AddConfig('no_email', global_settings.GetField('no_email')) 146 share_cache = global_settings.GetField('share_cache') 147 results_dir = global_settings.GetField('results_dir') 148 # Warn user that option use_file_locks is deprecated. 149 use_file_locks = global_settings.GetField('use_file_locks') 150 if use_file_locks: 151 l = logger.GetLogger() 152 l.LogWarning('Option use_file_locks is deprecated, please remove it ' 153 'from your experiment settings.') 154 locks_dir = global_settings.GetField('locks_dir') 155 # If not specified, set the locks dir to the default locks dir in 156 # file_lock_machine. 157 if not locks_dir: 158 locks_dir = file_lock_machine.Machine.LOCKS_DIR 159 if not os.path.exists(locks_dir): 160 raise RuntimeError('Cannot access default lock directory. ' 161 'Please run prodaccess or specify a local directory') 162 chrome_src = global_settings.GetField('chrome_src') 163 show_all_results = global_settings.GetField('show_all_results') 164 cwp_dso = global_settings.GetField('cwp_dso') 165 if cwp_dso and not cwp_dso in dso_list: 166 raise RuntimeError('The DSO specified is not supported') 167 ignore_min_max = global_settings.GetField('ignore_min_max') 168 dut_config = { 169 'enable_aslr': global_settings.GetField('enable_aslr'), 170 'intel_pstate': global_settings.GetField('intel_pstate'), 171 'cooldown_time': global_settings.GetField('cooldown_time'), 172 'cooldown_temp': global_settings.GetField('cooldown_temp'), 173 'governor': global_settings.GetField('governor'), 174 'cpu_usage': global_settings.GetField('cpu_usage'), 175 'cpu_freq_pct': global_settings.GetField('cpu_freq_pct'), 176 'turbostat': global_settings.GetField('turbostat'), 177 'top_interval': global_settings.GetField('top_interval'), 178 } 179 180 # Default cache hit conditions. The image checksum in the cache and the 181 # computed checksum of the image must match. Also a cache file must exist. 182 cache_conditions = [ 183 CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH 184 ] 185 if global_settings.GetField('rerun_if_failed'): 186 cache_conditions.append(CacheConditions.RUN_SUCCEEDED) 187 if global_settings.GetField('rerun'): 188 cache_conditions.append(CacheConditions.FALSE) 189 if global_settings.GetField('same_machine'): 190 cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH) 191 if global_settings.GetField('same_specs'): 192 cache_conditions.append(CacheConditions.MACHINES_MATCH) 193 194 # Construct benchmarks. 195 # Some fields are common with global settings. The values are 196 # inherited and/or merged with the global settings values. 197 benchmarks = [] 198 all_benchmark_settings = experiment_file.GetSettings('benchmark') 199 200 # Check if there is duplicated benchmark name 201 benchmark_names = {} 202 # Check if in cwp_dso mode, all benchmarks should have same iterations 203 cwp_dso_iterations = 0 204 205 for benchmark_settings in all_benchmark_settings: 206 benchmark_name = benchmark_settings.name 207 test_name = benchmark_settings.GetField('test_name') 208 if not test_name: 209 test_name = benchmark_name 210 test_args = benchmark_settings.GetField('test_args') 211 212 # Rename benchmark name if 'story-filter' or 'story-tag-filter' specified 213 # in test_args. Make sure these two tags only appear once. 214 story_count = 0 215 for arg in test_args.split(): 216 if '--story-filter=' in arg or '--story-tag-filter=' in arg: 217 story_count += 1 218 if story_count > 1: 219 raise RuntimeError('Only one story or story-tag filter allowed in ' 220 'a single benchmark run') 221 # Rename benchmark name with an extension of 'story'-option 222 benchmark_name = '%s@@%s' % (benchmark_name, arg.split('=')[-1]) 223 224 # Check for duplicated benchmark name after renaming 225 if not benchmark_name in benchmark_names: 226 benchmark_names[benchmark_name] = True 227 else: 228 raise SyntaxError("Duplicate benchmark name: '%s'." % benchmark_name) 229 230 iterations = benchmark_settings.GetField('iterations') 231 if cwp_dso: 232 if cwp_dso_iterations != 0 and iterations != cwp_dso_iterations: 233 raise RuntimeError('Iterations of each benchmark run are not the ' \ 234 'same') 235 cwp_dso_iterations = iterations 236 237 suite = benchmark_settings.GetField('suite') 238 retries = benchmark_settings.GetField('retries') 239 run_local = benchmark_settings.GetField('run_local') 240 weight = benchmark_settings.GetField('weight') 241 if weight: 242 if not cwp_dso: 243 raise RuntimeError('Weight can only be set when DSO specified') 244 if suite != 'telemetry_Crosperf': 245 raise RuntimeError('CWP approximation weight only works with ' 246 'telemetry_Crosperf suite') 247 if run_local: 248 raise RuntimeError('run_local must be set to False to use CWP ' 249 'approximation') 250 if weight < 0: 251 raise RuntimeError('Weight should be a float >=0') 252 elif cwp_dso: 253 raise RuntimeError('With DSO specified, each benchmark should have a ' 254 'weight') 255 256 if suite == 'telemetry_Crosperf': 257 if test_name == 'all_perfv2': 258 self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args, 259 iterations, rm_chroot_tmp, perf_args, suite, 260 show_all_results, retries, run_local, cwp_dso, 261 weight) 262 elif test_name == 'all_pagecyclers': 263 self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests, 264 test_args, iterations, rm_chroot_tmp, 265 perf_args, suite, show_all_results, retries, 266 run_local, cwp_dso, weight) 267 elif test_name == 'all_crosbolt_perf': 268 self.AppendBenchmarkSet( 269 benchmarks, telemetry_crosbolt_perf_tests, test_args, iterations, 270 rm_chroot_tmp, perf_args, 'telemetry_Crosperf', show_all_results, 271 retries, run_local, cwp_dso, weight) 272 self.AppendBenchmarkSet( 273 benchmarks, 274 crosbolt_perf_tests, 275 '', 276 iterations, 277 rm_chroot_tmp, 278 perf_args, 279 '', 280 show_all_results, 281 retries, 282 run_local=False, 283 cwp_dso=cwp_dso, 284 weight=weight) 285 elif test_name == 'all_toolchain_perf': 286 self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests, 287 test_args, iterations, rm_chroot_tmp, 288 perf_args, suite, show_all_results, retries, 289 run_local, cwp_dso, weight) 290 # Add non-telemetry toolchain-perf benchmarks: 291 benchmarks.append( 292 Benchmark( 293 'graphics_WebGLAquarium', 294 'graphics_WebGLAquarium', 295 '', 296 iterations, 297 rm_chroot_tmp, 298 perf_args, 299 'crosperf_Wrapper', # Use client wrapper in Autotest 300 show_all_results, 301 retries, 302 run_local=False, 303 cwp_dso=cwp_dso, 304 weight=weight)) 305 elif test_name == 'all_toolchain_perf_old': 306 self.AppendBenchmarkSet( 307 benchmarks, telemetry_toolchain_old_perf_tests, test_args, 308 iterations, rm_chroot_tmp, perf_args, suite, show_all_results, 309 retries, run_local, cwp_dso, weight) 310 else: 311 benchmark = Benchmark(benchmark_name, test_name, test_args, 312 iterations, rm_chroot_tmp, perf_args, suite, 313 show_all_results, retries, run_local, cwp_dso, 314 weight) 315 benchmarks.append(benchmark) 316 else: 317 if test_name == 'all_graphics_perf': 318 self.AppendBenchmarkSet( 319 benchmarks, 320 graphics_perf_tests, 321 '', 322 iterations, 323 rm_chroot_tmp, 324 perf_args, 325 '', 326 show_all_results, 327 retries, 328 run_local=False, 329 cwp_dso=cwp_dso, 330 weight=weight) 331 else: 332 # Add the single benchmark. 333 benchmark = Benchmark( 334 benchmark_name, 335 test_name, 336 test_args, 337 iterations, 338 rm_chroot_tmp, 339 perf_args, 340 suite, 341 show_all_results, 342 retries, 343 run_local=False, 344 cwp_dso=cwp_dso, 345 weight=weight) 346 benchmarks.append(benchmark) 347 348 if not benchmarks: 349 raise RuntimeError('No benchmarks specified') 350 351 # Construct labels. 352 # Some fields are common with global settings. The values are 353 # inherited and/or merged with the global settings values. 354 labels = [] 355 all_label_settings = experiment_file.GetSettings('label') 356 all_remote = list(remote) 357 for label_settings in all_label_settings: 358 label_name = label_settings.name 359 image = label_settings.GetField('chromeos_image') 360 build = label_settings.GetField('build') 361 autotest_path = label_settings.GetField('autotest_path') 362 debug_path = label_settings.GetField('debug_path') 363 chromeos_root = label_settings.GetField('chromeos_root') 364 my_remote = label_settings.GetField('remote') 365 compiler = label_settings.GetField('compiler') 366 new_remote = [] 367 if my_remote: 368 for i in my_remote: 369 c = re.sub('["\']', '', i) 370 new_remote.append(c) 371 my_remote = new_remote 372 373 if image: 374 if skylab: 375 raise RuntimeError('In skylab mode, local image should not be used.') 376 if build: 377 raise RuntimeError('Image path and build are provided at the same ' 378 'time, please use only one of them.') 379 else: 380 if not build: 381 raise RuntimeError("Can not have empty 'build' field!") 382 image, autotest_path, debug_path = label_settings.GetXbuddyPath( 383 build, autotest_path, debug_path, board, chromeos_root, log_level, 384 download_debug) 385 386 cache_dir = label_settings.GetField('cache_dir') 387 chrome_src = label_settings.GetField('chrome_src') 388 389 # TODO(yunlian): We should consolidate code in machine_manager.py 390 # to derermine whether we are running from within google or not 391 if ('corp.google.com' in socket.gethostname() and not my_remote and 392 not skylab): 393 my_remote = self.GetDefaultRemotes(board) 394 if global_settings.GetField('same_machine') and len(my_remote) > 1: 395 raise RuntimeError('Only one remote is allowed when same_machine ' 396 'is turned on') 397 all_remote += my_remote 398 image_args = label_settings.GetField('image_args') 399 if test_flag.GetTestMode(): 400 # pylint: disable=too-many-function-args 401 label = MockLabel(label_name, build, image, autotest_path, debug_path, 402 chromeos_root, board, my_remote, image_args, 403 cache_dir, cache_only, log_level, compiler, skylab, 404 chrome_src) 405 else: 406 label = Label(label_name, build, image, autotest_path, debug_path, 407 chromeos_root, board, my_remote, image_args, cache_dir, 408 cache_only, log_level, compiler, skylab, chrome_src) 409 labels.append(label) 410 411 if not labels: 412 raise RuntimeError('No labels specified') 413 414 email = global_settings.GetField('email') 415 all_remote += list(set(my_remote)) 416 all_remote = list(set(all_remote)) 417 if skylab: 418 for remote in all_remote: 419 self.CheckRemotesInSkylab(remote) 420 experiment = Experiment(experiment_name, all_remote, working_directory, 421 chromeos_root, cache_conditions, labels, benchmarks, 422 experiment_file.Canonicalize(), email, 423 acquire_timeout, log_dir, log_level, share_cache, 424 results_dir, locks_dir, cwp_dso, ignore_min_max, 425 skylab, dut_config) 426 427 return experiment 428 429 def GetDefaultRemotes(self, board): 430 default_remotes_file = os.path.join( 431 os.path.dirname(__file__), 'default_remotes') 432 try: 433 with open(default_remotes_file) as f: 434 for line in f: 435 key, v = line.split(':') 436 if key.strip() == board: 437 remotes = v.strip().split() 438 if remotes: 439 return remotes 440 else: 441 raise RuntimeError('There is no remote for {0}'.format(board)) 442 except IOError: 443 # TODO: rethrow instead of throwing different exception. 444 raise RuntimeError( 445 'IOError while reading file {0}'.format(default_remotes_file)) 446 else: 447 raise RuntimeError('There is no remote for {0}'.format(board)) 448 449 def CheckRemotesInSkylab(self, remote): 450 # TODO: (AI:zhizhouy) need to check whether a remote is a local or lab 451 # machine. If not lab machine, raise an error. 452 pass 453 454 def CheckSkylabTool(self, chromeos_root, log_level): 455 SKYLAB_PATH = '/usr/local/bin/skylab' 456 if os.path.exists(SKYLAB_PATH): 457 return True 458 l = logger.GetLogger() 459 l.LogOutput('Skylab tool not installed, trying to install it.') 460 ce = command_executer.GetCommandExecuter(l, log_level=log_level) 461 setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin', 'lab-tools', 462 'setup_lab_tools') 463 cmd = '%s' % setup_lab_tools 464 status = ce.RunCommand(cmd) 465 if status != 0: 466 raise RuntimeError('Skylab tool not installed correctly, please try to ' 467 'manually install it from %s' % setup_lab_tools) 468 l.LogOutput('Skylab is installed at %s, please login before first use. ' 469 'Login by running "skylab login" and follow instructions.' % 470 SKYLAB_PATH) 471 return False 472