1#!/usr/bin/env python 2# Copyright 2014 the V8 project authors. All rights reserved. 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6""" 7Performance runner for d8. 8 9Call e.g. with tools/run-perf.py --arch ia32 some_suite.json 10 11The suite json format is expected to be: 12{ 13 "path": <relative path chunks to perf resources and main file>, 14 "name": <optional suite name, file name is default>, 15 "archs": [<architecture name for which this suite is run>, ...], 16 "binary": <name of binary to run, default "d8">, 17 "flags": [<flag to d8>, ...], 18 "test_flags": [<flag to the test file>, ...], 19 "run_count": <how often will this suite run (optional)>, 20 "run_count_XXX": <how often will this suite run for arch XXX (optional)>, 21 "resources": [<js file to be moved to android device>, ...] 22 "main": <main js perf runner file>, 23 "results_regexp": <optional regexp>, 24 "results_processor": <optional python results processor script>, 25 "units": <the unit specification for the performance dashboard>, 26 "tests": [ 27 { 28 "name": <name of the trace>, 29 "results_regexp": <optional more specific regexp>, 30 "results_processor": <optional python results processor script>, 31 "units": <the unit specification for the performance dashboard>, 32 }, ... 33 ] 34} 35 36The tests field can also nest other suites in arbitrary depth. A suite 37with a "main" file is a leaf suite that can contain one more level of 38tests. 39 40A suite's results_regexp is expected to have one string place holder 41"%s" for the trace name. A trace's results_regexp overwrites suite 42defaults. 43 44A suite's results_processor may point to an optional python script. If 45specified, it is called after running the tests like this (with a path 46relatve to the suite level's path): 47<results_processor file> <same flags as for d8> <suite level name> <output> 48 49The <output> is a temporary file containing d8 output. The results_regexp will 50be applied to the output of this script. 51 52A suite without "tests" is considered a performance test itself. 53 54Full example (suite with one runner): 55{ 56 "path": ["."], 57 "flags": ["--expose-gc"], 58 "test_flags": ["5"], 59 "archs": ["ia32", "x64"], 60 "run_count": 5, 61 "run_count_ia32": 3, 62 "main": "run.js", 63 "results_regexp": "^%s: (.+)$", 64 "units": "score", 65 "tests": [ 66 {"name": "Richards"}, 67 {"name": "DeltaBlue"}, 68 {"name": "NavierStokes", 69 "results_regexp": "^NavierStokes: (.+)$"} 70 ] 71} 72 73Full example (suite with several runners): 74{ 75 "path": ["."], 76 "flags": ["--expose-gc"], 77 "archs": ["ia32", "x64"], 78 "run_count": 5, 79 "units": "score", 80 "tests": [ 81 {"name": "Richards", 82 "path": ["richards"], 83 "main": "run.js", 84 "run_count": 3, 85 "results_regexp": "^Richards: (.+)$"}, 86 {"name": "NavierStokes", 87 "path": ["navier_stokes"], 88 "main": "run.js", 89 "results_regexp": "^NavierStokes: (.+)$"} 90 ] 91} 92 93Path pieces are concatenated. D8 is always run with the suite's path as cwd. 94 95The test flags are passed to the js test file after '--'. 96""" 97 98from collections import OrderedDict 99import json 100import logging 101import math 102import optparse 103import os 104import re 105import subprocess 106import sys 107 108from testrunner.local import commands 109from testrunner.local import utils 110 111ARCH_GUESS = utils.DefaultArch() 112SUPPORTED_ARCHS = ["arm", 113 "ia32", 114 "mips", 115 "mipsel", 116 "x64", 117 "arm64"] 118 119GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$") 120RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$") 121RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$") 122TOOLS_BASE = os.path.abspath(os.path.dirname(__file__)) 123 124 125def LoadAndroidBuildTools(path): # pragma: no cover 126 assert os.path.exists(path) 127 sys.path.insert(0, path) 128 129 import devil_chromium 130 from devil.android import device_errors # pylint: disable=import-error 131 from devil.android import device_utils # pylint: disable=import-error 132 from devil.android.sdk import adb_wrapper # pylint: disable=import-error 133 from devil.android.perf import cache_control # pylint: disable=import-error 134 from devil.android.perf import perf_control # pylint: disable=import-error 135 global adb_wrapper 136 global cache_control 137 global device_errors 138 global device_utils 139 global perf_control 140 141 devil_chromium.Initialize() 142 143 144def GeometricMean(values): 145 """Returns the geometric mean of a list of values. 146 147 The mean is calculated using log to avoid overflow. 148 """ 149 values = map(float, values) 150 return str(math.exp(sum(map(math.log, values)) / len(values))) 151 152 153class Results(object): 154 """Place holder for result traces.""" 155 def __init__(self, traces=None, errors=None): 156 self.traces = traces or [] 157 self.errors = errors or [] 158 159 def ToDict(self): 160 return {"traces": self.traces, "errors": self.errors} 161 162 def WriteToFile(self, file_name): 163 with open(file_name, "w") as f: 164 f.write(json.dumps(self.ToDict())) 165 166 def __add__(self, other): 167 self.traces += other.traces 168 self.errors += other.errors 169 return self 170 171 def __str__(self): # pragma: no cover 172 return str(self.ToDict()) 173 174 175class Measurement(object): 176 """Represents a series of results of one trace. 177 178 The results are from repetitive runs of the same executable. They are 179 gathered by repeated calls to ConsumeOutput. 180 """ 181 def __init__(self, graphs, units, results_regexp, stddev_regexp): 182 self.name = graphs[-1] 183 self.graphs = graphs 184 self.units = units 185 self.results_regexp = results_regexp 186 self.stddev_regexp = stddev_regexp 187 self.results = [] 188 self.errors = [] 189 self.stddev = "" 190 191 def ConsumeOutput(self, stdout): 192 try: 193 result = re.search(self.results_regexp, stdout, re.M).group(1) 194 self.results.append(str(float(result))) 195 except ValueError: 196 self.errors.append("Regexp \"%s\" returned a non-numeric for test %s." 197 % (self.results_regexp, self.name)) 198 except: 199 self.errors.append("Regexp \"%s\" didn't match for test %s." 200 % (self.results_regexp, self.name)) 201 202 try: 203 if self.stddev_regexp and self.stddev: 204 self.errors.append("Test %s should only run once since a stddev " 205 "is provided by the test." % self.name) 206 if self.stddev_regexp: 207 self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1) 208 except: 209 self.errors.append("Regexp \"%s\" didn't match for test %s." 210 % (self.stddev_regexp, self.name)) 211 212 def GetResults(self): 213 return Results([{ 214 "graphs": self.graphs, 215 "units": self.units, 216 "results": self.results, 217 "stddev": self.stddev, 218 }], self.errors) 219 220 221class NullMeasurement(object): 222 """Null object to avoid having extra logic for configurations that didn't 223 run like running without patch on trybots. 224 """ 225 def ConsumeOutput(self, stdout): 226 pass 227 228 def GetResults(self): 229 return Results() 230 231 232def Unzip(iterable): 233 left = [] 234 right = [] 235 for l, r in iterable: 236 left.append(l) 237 right.append(r) 238 return lambda: iter(left), lambda: iter(right) 239 240 241def AccumulateResults( 242 graph_names, trace_configs, iter_output, trybot, no_patch, calc_total): 243 """Iterates over the output of multiple benchmark reruns and accumulates 244 results for a configured list of traces. 245 246 Args: 247 graph_names: List of names that configure the base path of the traces. E.g. 248 ['v8', 'Octane']. 249 trace_configs: List of "TraceConfig" instances. Each trace config defines 250 how to perform a measurement. 251 iter_output: Iterator over the standard output of each test run. 252 trybot: Indicates that this is run in trybot mode, i.e. run twice, once 253 with once without patch. 254 no_patch: Indicates weather this is a trybot run without patch. 255 calc_total: Boolean flag to speficy the calculation of a summary trace. 256 Returns: A "Results" object. 257 """ 258 measurements = [ 259 trace.CreateMeasurement(trybot, no_patch) for trace in trace_configs] 260 for stdout in iter_output(): 261 for measurement in measurements: 262 measurement.ConsumeOutput(stdout) 263 264 res = reduce(lambda r, m: r + m.GetResults(), measurements, Results()) 265 266 if not res.traces or not calc_total: 267 return res 268 269 # Assume all traces have the same structure. 270 if len(set(map(lambda t: len(t["results"]), res.traces))) != 1: 271 res.errors.append("Not all traces have the same number of results.") 272 return res 273 274 # Calculate the geometric means for all traces. Above we made sure that 275 # there is at least one trace and that the number of results is the same 276 # for each trace. 277 n_results = len(res.traces[0]["results"]) 278 total_results = [GeometricMean(t["results"][i] for t in res.traces) 279 for i in range(0, n_results)] 280 res.traces.append({ 281 "graphs": graph_names + ["Total"], 282 "units": res.traces[0]["units"], 283 "results": total_results, 284 "stddev": "", 285 }) 286 return res 287 288 289def AccumulateGenericResults(graph_names, suite_units, iter_output): 290 """Iterates over the output of multiple benchmark reruns and accumulates 291 generic results. 292 293 Args: 294 graph_names: List of names that configure the base path of the traces. E.g. 295 ['v8', 'Octane']. 296 suite_units: Measurement default units as defined by the benchmark suite. 297 iter_output: Iterator over the standard output of each test run. 298 Returns: A "Results" object. 299 """ 300 traces = OrderedDict() 301 for stdout in iter_output(): 302 if stdout is None: 303 # The None value is used as a null object to simplify logic. 304 continue 305 for line in stdout.strip().splitlines(): 306 match = GENERIC_RESULTS_RE.match(line) 307 if match: 308 stddev = "" 309 graph = match.group(1) 310 trace = match.group(2) 311 body = match.group(3) 312 units = match.group(4) 313 match_stddev = RESULT_STDDEV_RE.match(body) 314 match_list = RESULT_LIST_RE.match(body) 315 errors = [] 316 if match_stddev: 317 result, stddev = map(str.strip, match_stddev.group(1).split(",")) 318 results = [result] 319 elif match_list: 320 results = map(str.strip, match_list.group(1).split(",")) 321 else: 322 results = [body.strip()] 323 324 try: 325 results = map(lambda r: str(float(r)), results) 326 except ValueError: 327 results = [] 328 errors = ["Found non-numeric in %s" % 329 "/".join(graph_names + [graph, trace])] 330 331 trace_result = traces.setdefault(trace, Results([{ 332 "graphs": graph_names + [graph, trace], 333 "units": (units or suite_units).strip(), 334 "results": [], 335 "stddev": "", 336 }], errors)) 337 trace_result.traces[0]["results"].extend(results) 338 trace_result.traces[0]["stddev"] = stddev 339 340 return reduce(lambda r, t: r + t, traces.itervalues(), Results()) 341 342 343class Node(object): 344 """Represents a node in the suite tree structure.""" 345 def __init__(self, *args): 346 self._children = [] 347 348 def AppendChild(self, child): 349 self._children.append(child) 350 351 352class DefaultSentinel(Node): 353 """Fake parent node with all default values.""" 354 def __init__(self, binary = "d8"): 355 super(DefaultSentinel, self).__init__() 356 self.binary = binary 357 self.run_count = 10 358 self.timeout = 60 359 self.path = [] 360 self.graphs = [] 361 self.flags = [] 362 self.test_flags = [] 363 self.resources = [] 364 self.results_regexp = None 365 self.stddev_regexp = None 366 self.units = "score" 367 self.total = False 368 369 370class GraphConfig(Node): 371 """Represents a suite definition. 372 373 Can either be a leaf or an inner node that provides default values. 374 """ 375 def __init__(self, suite, parent, arch): 376 super(GraphConfig, self).__init__() 377 self._suite = suite 378 379 assert isinstance(suite.get("path", []), list) 380 assert isinstance(suite["name"], basestring) 381 assert isinstance(suite.get("flags", []), list) 382 assert isinstance(suite.get("test_flags", []), list) 383 assert isinstance(suite.get("resources", []), list) 384 385 # Accumulated values. 386 self.path = parent.path[:] + suite.get("path", []) 387 self.graphs = parent.graphs[:] + [suite["name"]] 388 self.flags = parent.flags[:] + suite.get("flags", []) 389 self.test_flags = parent.test_flags[:] + suite.get("test_flags", []) 390 391 # Values independent of parent node. 392 self.resources = suite.get("resources", []) 393 394 # Descrete values (with parent defaults). 395 self.binary = suite.get("binary", parent.binary) 396 self.run_count = suite.get("run_count", parent.run_count) 397 self.run_count = suite.get("run_count_%s" % arch, self.run_count) 398 self.timeout = suite.get("timeout", parent.timeout) 399 self.timeout = suite.get("timeout_%s" % arch, self.timeout) 400 self.units = suite.get("units", parent.units) 401 self.total = suite.get("total", parent.total) 402 403 # A regular expression for results. If the parent graph provides a 404 # regexp and the current suite has none, a string place holder for the 405 # suite name is expected. 406 # TODO(machenbach): Currently that makes only sense for the leaf level. 407 # Multiple place holders for multiple levels are not supported. 408 if parent.results_regexp: 409 regexp_default = parent.results_regexp % re.escape(suite["name"]) 410 else: 411 regexp_default = None 412 self.results_regexp = suite.get("results_regexp", regexp_default) 413 414 # A similar regular expression for the standard deviation (optional). 415 if parent.stddev_regexp: 416 stddev_default = parent.stddev_regexp % re.escape(suite["name"]) 417 else: 418 stddev_default = None 419 self.stddev_regexp = suite.get("stddev_regexp", stddev_default) 420 421 422class TraceConfig(GraphConfig): 423 """Represents a leaf in the suite tree structure.""" 424 def __init__(self, suite, parent, arch): 425 super(TraceConfig, self).__init__(suite, parent, arch) 426 assert self.results_regexp 427 428 def CreateMeasurement(self, trybot, no_patch): 429 if not trybot and no_patch: 430 # Use null object for no-patch logic if this is not a trybot run. 431 return NullMeasurement() 432 433 return Measurement( 434 self.graphs, 435 self.units, 436 self.results_regexp, 437 self.stddev_regexp, 438 ) 439 440 441class RunnableConfig(GraphConfig): 442 """Represents a runnable suite definition (i.e. has a main file). 443 """ 444 @property 445 def main(self): 446 return self._suite.get("main", "") 447 448 def ChangeCWD(self, suite_path): 449 """Changes the cwd to to path defined in the current graph. 450 451 The tests are supposed to be relative to the suite configuration. 452 """ 453 suite_dir = os.path.abspath(os.path.dirname(suite_path)) 454 bench_dir = os.path.normpath(os.path.join(*self.path)) 455 os.chdir(os.path.join(suite_dir, bench_dir)) 456 457 def GetCommandFlags(self, extra_flags=None): 458 suffix = ["--"] + self.test_flags if self.test_flags else [] 459 return self.flags + (extra_flags or []) + [self.main] + suffix 460 461 def GetCommand(self, shell_dir, extra_flags=None): 462 # TODO(machenbach): This requires +.exe if run on windows. 463 extra_flags = extra_flags or [] 464 cmd = [os.path.join(shell_dir, self.binary)] 465 if self.binary != 'd8' and '--prof' in extra_flags: 466 print "Profiler supported only on a benchmark run with d8" 467 return cmd + self.GetCommandFlags(extra_flags=extra_flags) 468 469 def Run(self, runner, trybot): 470 """Iterates over several runs and handles the output for all traces.""" 471 stdout_with_patch, stdout_no_patch = Unzip(runner()) 472 return ( 473 AccumulateResults( 474 self.graphs, 475 self._children, 476 iter_output=stdout_with_patch, 477 trybot=trybot, 478 no_patch=False, 479 calc_total=self.total, 480 ), 481 AccumulateResults( 482 self.graphs, 483 self._children, 484 iter_output=stdout_no_patch, 485 trybot=trybot, 486 no_patch=True, 487 calc_total=self.total, 488 ), 489 ) 490 491 492class RunnableTraceConfig(TraceConfig, RunnableConfig): 493 """Represents a runnable suite definition that is a leaf.""" 494 def __init__(self, suite, parent, arch): 495 super(RunnableTraceConfig, self).__init__(suite, parent, arch) 496 497 def Run(self, runner, trybot): 498 """Iterates over several runs and handles the output.""" 499 measurement_with_patch = self.CreateMeasurement(trybot, False) 500 measurement_no_patch = self.CreateMeasurement(trybot, True) 501 for stdout_with_patch, stdout_no_patch in runner(): 502 measurement_with_patch.ConsumeOutput(stdout_with_patch) 503 measurement_no_patch.ConsumeOutput(stdout_no_patch) 504 return ( 505 measurement_with_patch.GetResults(), 506 measurement_no_patch.GetResults(), 507 ) 508 509 510class RunnableGenericConfig(RunnableConfig): 511 """Represents a runnable suite definition with generic traces.""" 512 def __init__(self, suite, parent, arch): 513 super(RunnableGenericConfig, self).__init__(suite, parent, arch) 514 515 def Run(self, runner, trybot): 516 stdout_with_patch, stdout_no_patch = Unzip(runner()) 517 return ( 518 AccumulateGenericResults(self.graphs, self.units, stdout_with_patch), 519 AccumulateGenericResults(self.graphs, self.units, stdout_no_patch), 520 ) 521 522 523def MakeGraphConfig(suite, arch, parent): 524 """Factory method for making graph configuration objects.""" 525 if isinstance(parent, RunnableConfig): 526 # Below a runnable can only be traces. 527 return TraceConfig(suite, parent, arch) 528 elif suite.get("main") is not None: 529 # A main file makes this graph runnable. Empty strings are accepted. 530 if suite.get("tests"): 531 # This graph has subgraphs (traces). 532 return RunnableConfig(suite, parent, arch) 533 else: 534 # This graph has no subgraphs, it's a leaf. 535 return RunnableTraceConfig(suite, parent, arch) 536 elif suite.get("generic"): 537 # This is a generic suite definition. It is either a runnable executable 538 # or has a main js file. 539 return RunnableGenericConfig(suite, parent, arch) 540 elif suite.get("tests"): 541 # This is neither a leaf nor a runnable. 542 return GraphConfig(suite, parent, arch) 543 else: # pragma: no cover 544 raise Exception("Invalid suite configuration.") 545 546 547def BuildGraphConfigs(suite, arch, parent): 548 """Builds a tree structure of graph objects that corresponds to the suite 549 configuration. 550 """ 551 552 # TODO(machenbach): Implement notion of cpu type? 553 if arch not in suite.get("archs", SUPPORTED_ARCHS): 554 return None 555 556 graph = MakeGraphConfig(suite, arch, parent) 557 for subsuite in suite.get("tests", []): 558 BuildGraphConfigs(subsuite, arch, graph) 559 parent.AppendChild(graph) 560 return graph 561 562 563def FlattenRunnables(node, node_cb): 564 """Generator that traverses the tree structure and iterates over all 565 runnables. 566 """ 567 node_cb(node) 568 if isinstance(node, RunnableConfig): 569 yield node 570 elif isinstance(node, Node): 571 for child in node._children: 572 for result in FlattenRunnables(child, node_cb): 573 yield result 574 else: # pragma: no cover 575 raise Exception("Invalid suite configuration.") 576 577 578class Platform(object): 579 def __init__(self, options): 580 self.shell_dir = options.shell_dir 581 self.shell_dir_no_patch = options.shell_dir_no_patch 582 self.extra_flags = options.extra_flags.split() 583 584 @staticmethod 585 def GetPlatform(options): 586 if options.android_build_tools: 587 return AndroidPlatform(options) 588 else: 589 return DesktopPlatform(options) 590 591 def _Run(self, runnable, count, no_patch=False): 592 raise NotImplementedError() # pragma: no cover 593 594 def Run(self, runnable, count): 595 """Execute the benchmark's main file. 596 597 If options.shell_dir_no_patch is specified, the benchmark is run once with 598 and once without patch. 599 Args: 600 runnable: A Runnable benchmark instance. 601 count: The number of this (repeated) run. 602 Returns: A tuple with the benchmark outputs with and without patch. The 603 latter will be None if options.shell_dir_no_patch was not 604 specified. 605 """ 606 stdout = self._Run(runnable, count, no_patch=False) 607 if self.shell_dir_no_patch: 608 return stdout, self._Run(runnable, count, no_patch=True) 609 else: 610 return stdout, None 611 612 613class DesktopPlatform(Platform): 614 def __init__(self, options): 615 super(DesktopPlatform, self).__init__(options) 616 self.command_prefix = [] 617 618 if options.prioritize or options.affinitize != None: 619 self.command_prefix = ["schedtool"] 620 if options.prioritize: 621 self.command_prefix += ["-n", "-20"] 622 if options.affinitize != None: 623 # schedtool expects a bit pattern when setting affinity, where each 624 # bit set to '1' corresponds to a core where the process may run on. 625 # First bit corresponds to CPU 0. Since the 'affinitize' parameter is 626 # a core number, we need to map to said bit pattern. 627 cpu = int(options.affinitize) 628 core = 1 << cpu 629 self.command_prefix += ["-a", ("0x%x" % core)] 630 self.command_prefix += ["-e"] 631 632 def PreExecution(self): 633 pass 634 635 def PostExecution(self): 636 pass 637 638 def PreTests(self, node, path): 639 if isinstance(node, RunnableConfig): 640 node.ChangeCWD(path) 641 642 def _Run(self, runnable, count, no_patch=False): 643 suffix = ' - without patch' if no_patch else '' 644 shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir 645 title = ">>> %%s (#%d)%s:" % ((count + 1), suffix) 646 command = self.command_prefix + runnable.GetCommand(shell_dir, 647 self.extra_flags) 648 try: 649 output = commands.Execute( 650 command, 651 timeout=runnable.timeout, 652 ) 653 except OSError as e: # pragma: no cover 654 print title % "OSError" 655 print e 656 return "" 657 658 print title % "Stdout" 659 print output.stdout 660 if output.stderr: # pragma: no cover 661 # Print stderr for debugging. 662 print title % "Stderr" 663 print output.stderr 664 if output.timed_out: 665 print ">>> Test timed out after %ss." % runnable.timeout 666 if '--prof' in self.extra_flags: 667 os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS()) 668 if os_prefix: 669 tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix) 670 subprocess.check_call(tick_tools + " --only-summary", shell=True) 671 else: # pragma: no cover 672 print "Profiler option currently supported on Linux and Mac OS." 673 return output.stdout 674 675 676class AndroidPlatform(Platform): # pragma: no cover 677 DEVICE_DIR = "/data/local/tmp/v8/" 678 679 def __init__(self, options): 680 super(AndroidPlatform, self).__init__(options) 681 LoadAndroidBuildTools(options.android_build_tools) 682 683 if not options.device: 684 # Detect attached device if not specified. 685 devices = adb_wrapper.AdbWrapper.Devices() 686 assert devices and len(devices) == 1, ( 687 "None or multiple devices detected. Please specify the device on " 688 "the command-line with --device") 689 options.device = str(devices[0]) 690 self.adb_wrapper = adb_wrapper.AdbWrapper(options.device) 691 self.device = device_utils.DeviceUtils(self.adb_wrapper) 692 693 def PreExecution(self): 694 perf = perf_control.PerfControl(self.device) 695 perf.SetHighPerfMode() 696 697 # Remember what we have already pushed to the device. 698 self.pushed = set() 699 700 def PostExecution(self): 701 perf = perf_control.PerfControl(self.device) 702 perf.SetDefaultPerfMode() 703 self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR]) 704 705 def _PushFile(self, host_dir, file_name, target_rel=".", 706 skip_if_missing=False): 707 file_on_host = os.path.join(host_dir, file_name) 708 file_on_device_tmp = os.path.join( 709 AndroidPlatform.DEVICE_DIR, "_tmp_", file_name) 710 file_on_device = os.path.join( 711 AndroidPlatform.DEVICE_DIR, target_rel, file_name) 712 folder_on_device = os.path.dirname(file_on_device) 713 714 # Only attempt to push files that exist. 715 if not os.path.exists(file_on_host): 716 if not skip_if_missing: 717 logging.critical('Missing file on host: %s' % file_on_host) 718 return 719 720 # Only push files not yet pushed in one execution. 721 if file_on_host in self.pushed: 722 return 723 else: 724 self.pushed.add(file_on_host) 725 726 # Work-around for "text file busy" errors. Push the files to a temporary 727 # location and then copy them with a shell command. 728 output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp) 729 # Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)". 730 # Errors look like this: "failed to copy ... ". 731 if output and not re.search('^[0-9]', output.splitlines()[-1]): 732 logging.critical('PUSH FAILED: ' + output) 733 self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device) 734 self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device)) 735 736 def _PushExecutable(self, shell_dir, target_dir, binary): 737 self._PushFile(shell_dir, binary, target_dir) 738 739 # Push external startup data. Backwards compatible for revisions where 740 # these files didn't exist. 741 self._PushFile( 742 shell_dir, 743 "natives_blob.bin", 744 target_dir, 745 skip_if_missing=True, 746 ) 747 self._PushFile( 748 shell_dir, 749 "snapshot_blob.bin", 750 target_dir, 751 skip_if_missing=True, 752 ) 753 self._PushFile( 754 shell_dir, 755 "snapshot_blob_ignition.bin", 756 target_dir, 757 skip_if_missing=True, 758 ) 759 760 def PreTests(self, node, path): 761 suite_dir = os.path.abspath(os.path.dirname(path)) 762 if node.path: 763 bench_rel = os.path.normpath(os.path.join(*node.path)) 764 bench_abs = os.path.join(suite_dir, bench_rel) 765 else: 766 bench_rel = "." 767 bench_abs = suite_dir 768 769 self._PushExecutable(self.shell_dir, "bin", node.binary) 770 if self.shell_dir_no_patch: 771 self._PushExecutable( 772 self.shell_dir_no_patch, "bin_no_patch", node.binary) 773 774 if isinstance(node, RunnableConfig): 775 self._PushFile(bench_abs, node.main, bench_rel) 776 for resource in node.resources: 777 self._PushFile(bench_abs, resource, bench_rel) 778 779 def _Run(self, runnable, count, no_patch=False): 780 suffix = ' - without patch' if no_patch else '' 781 target_dir = "bin_no_patch" if no_patch else "bin" 782 title = ">>> %%s (#%d)%s:" % ((count + 1), suffix) 783 cache = cache_control.CacheControl(self.device) 784 cache.DropRamCaches() 785 binary_on_device = os.path.join( 786 AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary) 787 cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags) 788 789 # Relative path to benchmark directory. 790 if runnable.path: 791 bench_rel = os.path.normpath(os.path.join(*runnable.path)) 792 else: 793 bench_rel = "." 794 795 try: 796 output = self.device.RunShellCommand( 797 cmd, 798 cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel), 799 timeout=runnable.timeout, 800 retries=0, 801 ) 802 stdout = "\n".join(output) 803 print title % "Stdout" 804 print stdout 805 except device_errors.CommandTimeoutError: 806 print ">>> Test timed out after %ss." % runnable.timeout 807 stdout = "" 808 return stdout 809 810class CustomMachineConfiguration: 811 def __init__(self, disable_aslr = False, governor = None): 812 self.aslr_backup = None 813 self.governor_backup = None 814 self.disable_aslr = disable_aslr 815 self.governor = governor 816 817 def __enter__(self): 818 if self.disable_aslr: 819 self.aslr_backup = CustomMachineConfiguration.GetASLR() 820 CustomMachineConfiguration.SetASLR(0) 821 if self.governor != None: 822 self.governor_backup = CustomMachineConfiguration.GetCPUGovernor() 823 CustomMachineConfiguration.SetCPUGovernor(self.governor) 824 return self 825 826 def __exit__(self, type, value, traceback): 827 if self.aslr_backup != None: 828 CustomMachineConfiguration.SetASLR(self.aslr_backup) 829 if self.governor_backup != None: 830 CustomMachineConfiguration.SetCPUGovernor(self.governor_backup) 831 832 @staticmethod 833 def GetASLR(): 834 try: 835 with open("/proc/sys/kernel/randomize_va_space", "r") as f: 836 return int(f.readline().strip()) 837 except Exception as e: 838 print "Failed to get current ASLR settings." 839 raise e 840 841 @staticmethod 842 def SetASLR(value): 843 try: 844 with open("/proc/sys/kernel/randomize_va_space", "w") as f: 845 f.write(str(value)) 846 except Exception as e: 847 print "Failed to update ASLR to %s." % value 848 print "Are we running under sudo?" 849 raise e 850 851 new_value = CustomMachineConfiguration.GetASLR() 852 if value != new_value: 853 raise Exception("Present value is %s" % new_value) 854 855 @staticmethod 856 def GetCPUCoresRange(): 857 try: 858 with open("/sys/devices/system/cpu/present", "r") as f: 859 indexes = f.readline() 860 r = map(int, indexes.split("-")) 861 if len(r) == 1: 862 return range(r[0], r[0] + 1) 863 return range(r[0], r[1] + 1) 864 except Exception as e: 865 print "Failed to retrieve number of CPUs." 866 raise e 867 868 @staticmethod 869 def GetCPUPathForId(cpu_index): 870 ret = "/sys/devices/system/cpu/cpu" 871 ret += str(cpu_index) 872 ret += "/cpufreq/scaling_governor" 873 return ret 874 875 @staticmethod 876 def GetCPUGovernor(): 877 try: 878 cpu_indices = CustomMachineConfiguration.GetCPUCoresRange() 879 ret = None 880 for cpu_index in cpu_indices: 881 cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index) 882 with open(cpu_device, "r") as f: 883 # We assume the governors of all CPUs are set to the same value 884 val = f.readline().strip() 885 if ret == None: 886 ret = val 887 elif ret != val: 888 raise Exception("CPU cores have differing governor settings") 889 return ret 890 except Exception as e: 891 print "Failed to get the current CPU governor." 892 print "Is the CPU governor disabled? Check BIOS." 893 raise e 894 895 @staticmethod 896 def SetCPUGovernor(value): 897 try: 898 cpu_indices = CustomMachineConfiguration.GetCPUCoresRange() 899 for cpu_index in cpu_indices: 900 cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index) 901 with open(cpu_device, "w") as f: 902 f.write(value) 903 904 except Exception as e: 905 print "Failed to change CPU governor to %s." % value 906 print "Are we running under sudo?" 907 raise e 908 909 cur_value = CustomMachineConfiguration.GetCPUGovernor() 910 if cur_value != value: 911 raise Exception("Could not set CPU governor. Present value is %s" 912 % cur_value ) 913 914# TODO: Implement results_processor. 915def Main(args): 916 logging.getLogger().setLevel(logging.INFO) 917 parser = optparse.OptionParser() 918 parser.add_option("--android-build-tools", 919 help="Path to chromium's build/android. Specifying this " 920 "option will run tests using android platform.") 921 parser.add_option("--arch", 922 help=("The architecture to run tests for, " 923 "'auto' or 'native' for auto-detect"), 924 default="x64") 925 parser.add_option("--buildbot", 926 help="Adapt to path structure used on buildbots", 927 default=False, action="store_true") 928 parser.add_option("--device", 929 help="The device ID to run Android tests on. If not given " 930 "it will be autodetected.") 931 parser.add_option("--extra-flags", 932 help="Additional flags to pass to the test executable", 933 default="") 934 parser.add_option("--json-test-results", 935 help="Path to a file for storing json results.") 936 parser.add_option("--json-test-results-no-patch", 937 help="Path to a file for storing json results from run " 938 "without patch.") 939 parser.add_option("--outdir", help="Base directory with compile output", 940 default="out") 941 parser.add_option("--outdir-no-patch", 942 help="Base directory with compile output without patch") 943 parser.add_option("--binary-override-path", 944 help="JavaScript engine binary. By default, d8 under " 945 "architecture-specific build dir. " 946 "Not supported in conjunction with outdir-no-patch.") 947 parser.add_option("--prioritize", 948 help="Raise the priority to nice -20 for the benchmarking " 949 "process.Requires Linux, schedtool, and sudo privileges.", 950 default=False, action="store_true") 951 parser.add_option("--affinitize", 952 help="Run benchmarking process on the specified core. " 953 "For example: " 954 "--affinitize=0 will run the benchmark process on core 0. " 955 "--affinitize=3 will run the benchmark process on core 3. " 956 "Requires Linux, schedtool, and sudo privileges.", 957 default=None) 958 parser.add_option("--noaslr", 959 help="Disable ASLR for the duration of the benchmarked " 960 "process. Requires Linux and sudo privileges.", 961 default=False, action="store_true") 962 parser.add_option("--cpu-governor", 963 help="Set cpu governor to specified policy for the " 964 "duration of the benchmarked process. Typical options: " 965 "'powersave' for more stable results, or 'performance' " 966 "for shorter completion time of suite, with potentially " 967 "more noise in results.") 968 969 (options, args) = parser.parse_args(args) 970 971 if len(args) == 0: # pragma: no cover 972 parser.print_help() 973 return 1 974 975 if options.arch in ["auto", "native"]: # pragma: no cover 976 options.arch = ARCH_GUESS 977 978 if not options.arch in SUPPORTED_ARCHS: # pragma: no cover 979 print "Unknown architecture %s" % options.arch 980 return 1 981 982 if options.device and not options.android_build_tools: # pragma: no cover 983 print "Specifying a device requires Android build tools." 984 return 1 985 986 if (options.json_test_results_no_patch and 987 not options.outdir_no_patch): # pragma: no cover 988 print("For writing json test results without patch, an outdir without " 989 "patch must be specified.") 990 return 1 991 992 workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) 993 994 if options.buildbot: 995 build_config = "Release" 996 else: 997 build_config = "%s.release" % options.arch 998 999 if options.binary_override_path == None: 1000 options.shell_dir = os.path.join(workspace, options.outdir, build_config) 1001 default_binary_name = "d8" 1002 else: 1003 if not os.path.isfile(options.binary_override_path): 1004 print "binary-override-path must be a file name" 1005 return 1 1006 if options.outdir_no_patch: 1007 print "specify either binary-override-path or outdir-no-patch" 1008 return 1 1009 options.shell_dir = os.path.dirname(options.binary_override_path) 1010 default_binary_name = os.path.basename(options.binary_override_path) 1011 1012 if options.outdir_no_patch: 1013 options.shell_dir_no_patch = os.path.join( 1014 workspace, options.outdir_no_patch, build_config) 1015 else: 1016 options.shell_dir_no_patch = None 1017 1018 prev_aslr = None 1019 prev_cpu_gov = None 1020 platform = Platform.GetPlatform(options) 1021 1022 results = Results() 1023 results_no_patch = Results() 1024 with CustomMachineConfiguration(governor = options.cpu_governor, 1025 disable_aslr = options.noaslr) as conf: 1026 for path in args: 1027 path = os.path.abspath(path) 1028 1029 if not os.path.exists(path): # pragma: no cover 1030 results.errors.append("Configuration file %s does not exist." % path) 1031 continue 1032 1033 with open(path) as f: 1034 suite = json.loads(f.read()) 1035 1036 # If no name is given, default to the file name without .json. 1037 suite.setdefault("name", os.path.splitext(os.path.basename(path))[0]) 1038 1039 # Setup things common to one test suite. 1040 platform.PreExecution() 1041 1042 # Build the graph/trace tree structure. 1043 default_parent = DefaultSentinel(default_binary_name) 1044 root = BuildGraphConfigs(suite, options.arch, default_parent) 1045 1046 # Callback to be called on each node on traversal. 1047 def NodeCB(node): 1048 platform.PreTests(node, path) 1049 1050 # Traverse graph/trace tree and interate over all runnables. 1051 for runnable in FlattenRunnables(root, NodeCB): 1052 print ">>> Running suite: %s" % "/".join(runnable.graphs) 1053 1054 def Runner(): 1055 """Output generator that reruns several times.""" 1056 for i in xrange(0, max(1, runnable.run_count)): 1057 # TODO(machenbach): Allow timeout per arch like with run_count per 1058 # arch. 1059 yield platform.Run(runnable, i) 1060 1061 # Let runnable iterate over all runs and handle output. 1062 result, result_no_patch = runnable.Run( 1063 Runner, trybot=options.shell_dir_no_patch) 1064 results += result 1065 results_no_patch += result_no_patch 1066 platform.PostExecution() 1067 1068 if options.json_test_results: 1069 results.WriteToFile(options.json_test_results) 1070 else: # pragma: no cover 1071 print results 1072 1073 if options.json_test_results_no_patch: 1074 results_no_patch.WriteToFile(options.json_test_results_no_patch) 1075 else: # pragma: no cover 1076 print results_no_patch 1077 1078 return min(1, len(results.errors)) 1079 1080if __name__ == "__main__": # pragma: no cover 1081 sys.exit(Main(sys.argv[1:])) 1082