1# -*- coding: utf-8 -*- 2# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6"""A module to generate experiments.""" 7 8from __future__ import print_function 9import os 10import re 11import socket 12import sys 13 14from benchmark import Benchmark 15import config 16from cros_utils import logger 17from cros_utils import command_executer 18from experiment import Experiment 19from label import Label 20from label import MockLabel 21from results_cache import CacheConditions 22import test_flag 23import file_lock_machine 24 25# Users may want to run Telemetry tests either individually, or in 26# specified sets. Here we define sets of tests that users may want 27# to run together. 28 29telemetry_perfv2_tests = [ 30 'kraken', 31 'octane', 32] 33 34telemetry_pagecycler_tests = [ 35 'page_cycler_v2.intl_ar_fa_he', 36 'page_cycler_v2.intl_es_fr_pt-BR', 37 'page_cycler_v2.intl_hi_ru', 38 'page_cycler_v2.intl_ja_zh', 39 'page_cycler_v2.intl_ko_th_vi', 40 'page_cycler_v2.typical_25', 41] 42 43telemetry_toolchain_old_perf_tests = [ 44 'page_cycler_v2.intl_es_fr_pt-BR', 45 'page_cycler_v2.intl_hi_ru', 46 'page_cycler_v2.intl_ja_zh', 47 'page_cycler_v2.intl_ko_th_vi', 48 'page_cycler_v2.netsim.top_10', 49 'page_cycler_v2.typical_25', 50 'spaceport', 51 'tab_switching.top_10', 52] 53telemetry_toolchain_perf_tests = [ 54 'octane', 'kraken', 'speedometer', 'speedometer2', 'jetstream2' 55] 56graphics_perf_tests = [ 57 'graphics_GLBench', 58 'graphics_GLMark2', 59 'graphics_SanAngeles', 60 'graphics_WebGLAquarium', 61 'graphics_WebGLPerformance', 62] 63# TODO: disable rendering.desktop by default as the benchmark is 64# currently in a bad state 65# page_cycler_v2.typical_25 is deprecated and the recommend replacement is 66# loading.desktop@@typical (crbug.com/916340) 67telemetry_crosbolt_perf_tests = [ 68 'octane', 69 'kraken', 70 'speedometer2', 71 'jetstream', 72 'loading.desktop', 73 # 'rendering.desktop', 74] 75 76crosbolt_perf_tests = [ 77 'graphics_WebGLAquarium', 78 'tast.video.PlaybackPerfVP91080P30FPS', 79] 80 81# 'cheets_AntutuTest', 82# 'cheets_PerfBootServer', 83# 'cheets_CandyCrushTest', 84# 'cheets_LinpackTest', 85# ] 86 87dso_list = [ 88 'all', 89 'chrome', 90 'kallsyms', 91] 92 93 94class ExperimentFactory(object): 95 """Factory class for building an Experiment, given an ExperimentFile as input. 96 97 This factory is currently hardcoded to produce an experiment for running 98 ChromeOS benchmarks, but the idea is that in the future, other types 99 of experiments could be produced. 100 """ 101 102 def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args, 103 iterations, rm_chroot_tmp, perf_args, suite, 104 show_all_results, retries, run_local, cwp_dso, 105 weight): 106 """Add all the tests in a set to the benchmarks list.""" 107 for test_name in benchmark_list: 108 telemetry_benchmark = Benchmark(test_name, test_name, test_args, 109 iterations, rm_chroot_tmp, perf_args, 110 suite, show_all_results, retries, 111 run_local, cwp_dso, weight) 112 benchmarks.append(telemetry_benchmark) 113 114 def GetExperiment(self, experiment_file, working_directory, log_dir): 115 """Construct an experiment from an experiment file.""" 116 global_settings = experiment_file.GetGlobalSettings() 117 experiment_name = global_settings.GetField('name') 118 board = global_settings.GetField('board') 119 chromeos_root = global_settings.GetField('chromeos_root') 120 log_level = global_settings.GetField('logging_level') 121 if log_level not in ('quiet', 'average', 'verbose'): 122 log_level = 'verbose' 123 124 crosfleet = global_settings.GetField('crosfleet') 125 no_lock = bool(global_settings.GetField('no_lock')) 126 # Check whether crosfleet tool is installed correctly for crosfleet mode. 127 if crosfleet and not self.CheckCrosfleetTool(chromeos_root, log_level): 128 sys.exit(0) 129 130 remote = global_settings.GetField('remote') 131 # This is used to remove the ",' from the remote if user 132 # add them to the remote string. 133 new_remote = [] 134 if remote: 135 for i in remote: 136 c = re.sub('["\']', '', i) 137 new_remote.append(c) 138 remote = new_remote 139 rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp') 140 perf_args = global_settings.GetField('perf_args') 141 download_debug = global_settings.GetField('download_debug') 142 # Do not download debug symbols when perf_args is not specified. 143 if not perf_args and download_debug: 144 download_debug = False 145 acquire_timeout = global_settings.GetField('acquire_timeout') 146 cache_dir = global_settings.GetField('cache_dir') 147 cache_only = global_settings.GetField('cache_only') 148 config.AddConfig('no_email', global_settings.GetField('no_email')) 149 share_cache = global_settings.GetField('share_cache') 150 results_dir = global_settings.GetField('results_dir') 151 compress_results = global_settings.GetField('compress_results') 152 # Warn user that option use_file_locks is deprecated. 153 use_file_locks = global_settings.GetField('use_file_locks') 154 if use_file_locks: 155 l = logger.GetLogger() 156 l.LogWarning('Option use_file_locks is deprecated, please remove it ' 157 'from your experiment settings.') 158 locks_dir = global_settings.GetField('locks_dir') 159 # If not specified, set the locks dir to the default locks dir in 160 # file_lock_machine. 161 if not locks_dir: 162 locks_dir = file_lock_machine.Machine.LOCKS_DIR 163 if not os.path.exists(locks_dir): 164 raise RuntimeError('Cannot access default lock directory. ' 165 'Please run prodaccess or specify a local directory') 166 chrome_src = global_settings.GetField('chrome_src') 167 show_all_results = global_settings.GetField('show_all_results') 168 cwp_dso = global_settings.GetField('cwp_dso') 169 if cwp_dso and not cwp_dso in dso_list: 170 raise RuntimeError('The DSO specified is not supported') 171 ignore_min_max = global_settings.GetField('ignore_min_max') 172 dut_config = { 173 'enable_aslr': global_settings.GetField('enable_aslr'), 174 'intel_pstate': global_settings.GetField('intel_pstate'), 175 'cooldown_time': global_settings.GetField('cooldown_time'), 176 'cooldown_temp': global_settings.GetField('cooldown_temp'), 177 'governor': global_settings.GetField('governor'), 178 'cpu_usage': global_settings.GetField('cpu_usage'), 179 'cpu_freq_pct': global_settings.GetField('cpu_freq_pct'), 180 'turbostat': global_settings.GetField('turbostat'), 181 'top_interval': global_settings.GetField('top_interval'), 182 } 183 184 # Default cache hit conditions. The image checksum in the cache and the 185 # computed checksum of the image must match. Also a cache file must exist. 186 cache_conditions = [ 187 CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH 188 ] 189 if global_settings.GetField('rerun_if_failed'): 190 cache_conditions.append(CacheConditions.RUN_SUCCEEDED) 191 if global_settings.GetField('rerun'): 192 cache_conditions.append(CacheConditions.FALSE) 193 if global_settings.GetField('same_machine'): 194 cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH) 195 if global_settings.GetField('same_specs'): 196 cache_conditions.append(CacheConditions.MACHINES_MATCH) 197 198 # Construct benchmarks. 199 # Some fields are common with global settings. The values are 200 # inherited and/or merged with the global settings values. 201 benchmarks = [] 202 all_benchmark_settings = experiment_file.GetSettings('benchmark') 203 204 # Check if there is duplicated benchmark name 205 benchmark_names = {} 206 # Check if in cwp_dso mode, all benchmarks should have same iterations 207 cwp_dso_iterations = 0 208 209 for benchmark_settings in all_benchmark_settings: 210 benchmark_name = benchmark_settings.name 211 test_name = benchmark_settings.GetField('test_name') 212 if not test_name: 213 test_name = benchmark_name 214 test_args = benchmark_settings.GetField('test_args') 215 216 # Rename benchmark name if 'story-filter' or 'story-tag-filter' specified 217 # in test_args. Make sure these two tags only appear once. 218 story_count = 0 219 for arg in test_args.split(): 220 if '--story-filter=' in arg or '--story-tag-filter=' in arg: 221 story_count += 1 222 if story_count > 1: 223 raise RuntimeError('Only one story or story-tag filter allowed in ' 224 'a single benchmark run') 225 # Rename benchmark name with an extension of 'story'-option 226 benchmark_name = '%s@@%s' % (benchmark_name, arg.split('=')[-1]) 227 228 # Check for duplicated benchmark name after renaming 229 if not benchmark_name in benchmark_names: 230 benchmark_names[benchmark_name] = True 231 else: 232 raise SyntaxError("Duplicate benchmark name: '%s'." % benchmark_name) 233 234 iterations = benchmark_settings.GetField('iterations') 235 if cwp_dso: 236 if cwp_dso_iterations not in (0, iterations): 237 raise RuntimeError('Iterations of each benchmark run are not the ' 238 'same') 239 cwp_dso_iterations = iterations 240 241 suite = benchmark_settings.GetField('suite') 242 retries = benchmark_settings.GetField('retries') 243 run_local = benchmark_settings.GetField('run_local') 244 weight = benchmark_settings.GetField('weight') 245 if weight: 246 if not cwp_dso: 247 raise RuntimeError('Weight can only be set when DSO specified') 248 if suite != 'telemetry_Crosperf': 249 raise RuntimeError('CWP approximation weight only works with ' 250 'telemetry_Crosperf suite') 251 if run_local: 252 raise RuntimeError('run_local must be set to False to use CWP ' 253 'approximation') 254 if weight < 0: 255 raise RuntimeError('Weight should be a float >=0') 256 elif cwp_dso: 257 raise RuntimeError('With DSO specified, each benchmark should have a ' 258 'weight') 259 260 if suite == 'telemetry_Crosperf': 261 if test_name == 'all_perfv2': 262 self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, 263 test_args, iterations, rm_chroot_tmp, 264 perf_args, suite, show_all_results, retries, 265 run_local, cwp_dso, weight) 266 elif test_name == 'all_pagecyclers': 267 self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests, 268 test_args, iterations, rm_chroot_tmp, 269 perf_args, suite, show_all_results, retries, 270 run_local, cwp_dso, weight) 271 elif test_name == 'all_crosbolt_perf': 272 self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests, 273 test_args, iterations, rm_chroot_tmp, 274 perf_args, 'telemetry_Crosperf', 275 show_all_results, retries, run_local, 276 cwp_dso, weight) 277 self.AppendBenchmarkSet(benchmarks, 278 crosbolt_perf_tests, 279 '', 280 iterations, 281 rm_chroot_tmp, 282 perf_args, 283 '', 284 show_all_results, 285 retries, 286 run_local=False, 287 cwp_dso=cwp_dso, 288 weight=weight) 289 elif test_name == 'all_toolchain_perf': 290 self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests, 291 test_args, iterations, rm_chroot_tmp, 292 perf_args, suite, show_all_results, retries, 293 run_local, cwp_dso, weight) 294 # Add non-telemetry toolchain-perf benchmarks: 295 296 # Tast test platform.ReportDiskUsage for image size. 297 benchmarks.append( 298 Benchmark( 299 'platform.ReportDiskUsage', 300 'platform.ReportDiskUsage', 301 '', 302 1, # This is not a performance benchmark, only run once. 303 rm_chroot_tmp, 304 '', 305 'tast', # Specify the suite to be 'tast' 306 show_all_results, 307 retries)) 308 309 # TODO: crbug.com/1057755 Do not enable graphics_WebGLAquarium until 310 # it gets fixed. 311 # 312 # benchmarks.append( 313 # Benchmark( 314 # 'graphics_WebGLAquarium', 315 # 'graphics_WebGLAquarium', 316 # '', 317 # iterations, 318 # rm_chroot_tmp, 319 # perf_args, 320 # 'crosperf_Wrapper', # Use client wrapper in Autotest 321 # show_all_results, 322 # retries, 323 # run_local=False, 324 # cwp_dso=cwp_dso, 325 # weight=weight)) 326 elif test_name == 'all_toolchain_perf_old': 327 self.AppendBenchmarkSet(benchmarks, 328 telemetry_toolchain_old_perf_tests, 329 test_args, iterations, rm_chroot_tmp, 330 perf_args, suite, show_all_results, retries, 331 run_local, cwp_dso, weight) 332 else: 333 benchmark = Benchmark(benchmark_name, test_name, test_args, 334 iterations, rm_chroot_tmp, perf_args, suite, 335 show_all_results, retries, run_local, cwp_dso, 336 weight) 337 benchmarks.append(benchmark) 338 else: 339 if test_name == 'all_graphics_perf': 340 self.AppendBenchmarkSet(benchmarks, 341 graphics_perf_tests, 342 '', 343 iterations, 344 rm_chroot_tmp, 345 perf_args, 346 '', 347 show_all_results, 348 retries, 349 run_local=False, 350 cwp_dso=cwp_dso, 351 weight=weight) 352 else: 353 # Add the single benchmark. 354 benchmark = Benchmark(benchmark_name, 355 test_name, 356 test_args, 357 iterations, 358 rm_chroot_tmp, 359 perf_args, 360 suite, 361 show_all_results, 362 retries, 363 run_local=False, 364 cwp_dso=cwp_dso, 365 weight=weight) 366 benchmarks.append(benchmark) 367 368 if not benchmarks: 369 raise RuntimeError('No benchmarks specified') 370 371 # Construct labels. 372 # Some fields are common with global settings. The values are 373 # inherited and/or merged with the global settings values. 374 labels = [] 375 all_label_settings = experiment_file.GetSettings('label') 376 all_remote = list(remote) 377 for label_settings in all_label_settings: 378 label_name = label_settings.name 379 image = label_settings.GetField('chromeos_image') 380 build = label_settings.GetField('build') 381 autotest_path = label_settings.GetField('autotest_path') 382 debug_path = label_settings.GetField('debug_path') 383 chromeos_root = label_settings.GetField('chromeos_root') 384 my_remote = label_settings.GetField('remote') 385 compiler = label_settings.GetField('compiler') 386 new_remote = [] 387 if my_remote: 388 for i in my_remote: 389 c = re.sub('["\']', '', i) 390 new_remote.append(c) 391 my_remote = new_remote 392 393 if image: 394 if crosfleet: 395 raise RuntimeError( 396 'In crosfleet mode, local image should not be used.') 397 if build: 398 raise RuntimeError('Image path and build are provided at the same ' 399 'time, please use only one of them.') 400 else: 401 if not build: 402 raise RuntimeError("Can not have empty 'build' field!") 403 image, autotest_path, debug_path = label_settings.GetXbuddyPath( 404 build, autotest_path, debug_path, board, chromeos_root, log_level, 405 download_debug) 406 407 cache_dir = label_settings.GetField('cache_dir') 408 chrome_src = label_settings.GetField('chrome_src') 409 410 # TODO(yunlian): We should consolidate code in machine_manager.py 411 # to derermine whether we are running from within google or not 412 if ('corp.google.com' in socket.gethostname() and not my_remote 413 and not crosfleet): 414 my_remote = self.GetDefaultRemotes(board) 415 if global_settings.GetField('same_machine') and len(my_remote) > 1: 416 raise RuntimeError('Only one remote is allowed when same_machine ' 417 'is turned on') 418 all_remote += my_remote 419 image_args = label_settings.GetField('image_args') 420 if test_flag.GetTestMode(): 421 # pylint: disable=too-many-function-args 422 label = MockLabel(label_name, build, image, autotest_path, debug_path, 423 chromeos_root, board, my_remote, image_args, 424 cache_dir, cache_only, log_level, compiler, 425 crosfleet, chrome_src) 426 else: 427 label = Label(label_name, build, image, autotest_path, debug_path, 428 chromeos_root, board, my_remote, image_args, cache_dir, 429 cache_only, log_level, compiler, crosfleet, chrome_src) 430 labels.append(label) 431 432 if not labels: 433 raise RuntimeError('No labels specified') 434 435 email = global_settings.GetField('email') 436 all_remote += list(set(my_remote)) 437 all_remote = list(set(all_remote)) 438 if crosfleet: 439 for remote in all_remote: 440 self.CheckRemotesInCrosfleet(remote) 441 experiment = Experiment(experiment_name, 442 all_remote, 443 working_directory, 444 chromeos_root, 445 cache_conditions, 446 labels, 447 benchmarks, 448 experiment_file.Canonicalize(), 449 email, 450 acquire_timeout, 451 log_dir, 452 log_level, 453 share_cache, 454 results_dir, 455 compress_results, 456 locks_dir, 457 cwp_dso, 458 ignore_min_max, 459 crosfleet, 460 dut_config, 461 no_lock=no_lock) 462 463 return experiment 464 465 def GetDefaultRemotes(self, board): 466 default_remotes_file = os.path.join(os.path.dirname(__file__), 467 'default_remotes') 468 try: 469 with open(default_remotes_file) as f: 470 for line in f: 471 key, v = line.split(':') 472 if key.strip() == board: 473 remotes = v.strip().split() 474 if remotes: 475 return remotes 476 else: 477 raise RuntimeError('There is no remote for {0}'.format(board)) 478 except IOError: 479 # TODO: rethrow instead of throwing different exception. 480 raise RuntimeError( 481 'IOError while reading file {0}'.format(default_remotes_file)) 482 else: 483 raise RuntimeError('There is no remote for {0}'.format(board)) 484 485 def CheckRemotesInCrosfleet(self, remote): 486 # TODO: (AI:zhizhouy) need to check whether a remote is a local or lab 487 # machine. If not lab machine, raise an error. 488 pass 489 490 def CheckCrosfleetTool(self, chromeos_root, log_level): 491 CROSFLEET_PATH = 'crosfleet' 492 if os.path.exists(CROSFLEET_PATH): 493 return True 494 l = logger.GetLogger() 495 l.LogOutput('Crosfleet tool not installed, trying to install it.') 496 ce = command_executer.GetCommandExecuter(l, log_level=log_level) 497 setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin', 498 'lab-tools', 'setup_lab_tools') 499 cmd = '%s' % setup_lab_tools 500 status = ce.RunCommand(cmd) 501 if status != 0: 502 raise RuntimeError( 503 'Crosfleet tool not installed correctly, please try to ' 504 'manually install it from %s' % setup_lab_tools) 505 l.LogOutput('Crosfleet is installed at %s, please login before first use. ' 506 'Login by running "crosfleet login" and follow instructions.' % 507 CROSFLEET_PATH) 508 return False 509