• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright 2014 the V8 project authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""
7Performance runner for d8.
8
9Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
10
11The suite json format is expected to be:
12{
13  "path": <relative path chunks to perf resources and main file>,
14  "name": <optional suite name, file name is default>,
15  "archs": [<architecture name for which this suite is run>, ...],
16  "binary": <name of binary to run, default "d8">,
17  "flags": [<flag to d8>, ...],
18  "test_flags": [<flag to the test file>, ...],
19  "run_count": <how often will this suite run (optional)>,
20  "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
21  "resources": [<js file to be moved to android device>, ...]
22  "main": <main js perf runner file>,
23  "results_regexp": <optional regexp>,
24  "results_processor": <optional python results processor script>,
25  "units": <the unit specification for the performance dashboard>,
26  "tests": [
27    {
28      "name": <name of the trace>,
29      "results_regexp": <optional more specific regexp>,
30      "results_processor": <optional python results processor script>,
31      "units": <the unit specification for the performance dashboard>,
32    }, ...
33  ]
34}
35
36The tests field can also nest other suites in arbitrary depth. A suite
37with a "main" file is a leaf suite that can contain one more level of
38tests.
39
40A suite's results_regexp is expected to have one string place holder
41"%s" for the trace name. A trace's results_regexp overwrites suite
42defaults.
43
44A suite's results_processor may point to an optional python script. If
45specified, it is called after running the tests (with a path relative to the
46suite level's path). It is expected to read the measurement's output text
47on stdin and print the processed output to stdout.
48
49The results_regexp will be applied to the processed output.
50
51A suite without "tests" is considered a performance test itself.
52
53Full example (suite with one runner):
54{
55  "path": ["."],
56  "flags": ["--expose-gc"],
57  "test_flags": ["5"],
58  "archs": ["ia32", "x64"],
59  "run_count": 5,
60  "run_count_ia32": 3,
61  "main": "run.js",
62  "results_regexp": "^%s: (.+)$",
63  "units": "score",
64  "tests": [
65    {"name": "Richards"},
66    {"name": "DeltaBlue"},
67    {"name": "NavierStokes",
68     "results_regexp": "^NavierStokes: (.+)$"}
69  ]
70}
71
72Full example (suite with several runners):
73{
74  "path": ["."],
75  "flags": ["--expose-gc"],
76  "archs": ["ia32", "x64"],
77  "run_count": 5,
78  "units": "score",
79  "tests": [
80    {"name": "Richards",
81     "path": ["richards"],
82     "main": "run.js",
83     "run_count": 3,
84     "results_regexp": "^Richards: (.+)$"},
85    {"name": "NavierStokes",
86     "path": ["navier_stokes"],
87     "main": "run.js",
88     "results_regexp": "^NavierStokes: (.+)$"}
89  ]
90}
91
92Path pieces are concatenated. D8 is always run with the suite's path as cwd.
93
94The test flags are passed to the js test file after '--'.
95"""
96
97from collections import OrderedDict
98import json
99import logging
100import math
101import optparse
102import os
103import re
104import subprocess
105import sys
106
107from testrunner.local import commands
108from testrunner.local import utils
109
110ARCH_GUESS = utils.DefaultArch()
111SUPPORTED_ARCHS = ["arm",
112                   "ia32",
113                   "mips",
114                   "mipsel",
115                   "x64",
116                   "arm64"]
117
118GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
119RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
120RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
121TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
122
123
124def LoadAndroidBuildTools(path):  # pragma: no cover
125  assert os.path.exists(path)
126  sys.path.insert(0, path)
127
128  import devil_chromium
129  from devil.android import device_errors  # pylint: disable=import-error
130  from devil.android import device_utils  # pylint: disable=import-error
131  from devil.android.sdk import adb_wrapper  # pylint: disable=import-error
132  from devil.android.perf import cache_control  # pylint: disable=import-error
133  from devil.android.perf import perf_control  # pylint: disable=import-error
134  global adb_wrapper
135  global cache_control
136  global device_errors
137  global device_utils
138  global perf_control
139
140  devil_chromium.Initialize()
141
142
143def GeometricMean(values):
144  """Returns the geometric mean of a list of values.
145
146  The mean is calculated using log to avoid overflow.
147  """
148  values = map(float, values)
149  return str(math.exp(sum(map(math.log, values)) / len(values)))
150
151
152class Results(object):
153  """Place holder for result traces."""
154  def __init__(self, traces=None, errors=None):
155    self.traces = traces or []
156    self.errors = errors or []
157
158  def ToDict(self):
159    return {"traces": self.traces, "errors": self.errors}
160
161  def WriteToFile(self, file_name):
162    with open(file_name, "w") as f:
163      f.write(json.dumps(self.ToDict()))
164
165  def __add__(self, other):
166    self.traces += other.traces
167    self.errors += other.errors
168    return self
169
170  def __str__(self):  # pragma: no cover
171    return str(self.ToDict())
172
173
174class Measurement(object):
175  """Represents a series of results of one trace.
176
177  The results are from repetitive runs of the same executable. They are
178  gathered by repeated calls to ConsumeOutput.
179  """
180  def __init__(self, graphs, units, results_regexp, stddev_regexp):
181    self.name = '/'.join(graphs)
182    self.graphs = graphs
183    self.units = units
184    self.results_regexp = results_regexp
185    self.stddev_regexp = stddev_regexp
186    self.results = []
187    self.errors = []
188    self.stddev = ""
189
190  def ConsumeOutput(self, stdout):
191    try:
192      result = re.search(self.results_regexp, stdout, re.M).group(1)
193      self.results.append(str(float(result)))
194    except ValueError:
195      self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
196                         % (self.results_regexp, self.name))
197    except:
198      self.errors.append("Regexp \"%s\" didn't match for test %s."
199                         % (self.results_regexp, self.name))
200
201    try:
202      if self.stddev_regexp and self.stddev:
203        self.errors.append("Test %s should only run once since a stddev "
204                           "is provided by the test." % self.name)
205      if self.stddev_regexp:
206        self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
207    except:
208      self.errors.append("Regexp \"%s\" didn't match for test %s."
209                         % (self.stddev_regexp, self.name))
210
211  def GetResults(self):
212    return Results([{
213      "graphs": self.graphs,
214      "units": self.units,
215      "results": self.results,
216      "stddev": self.stddev,
217    }], self.errors)
218
219
220class NullMeasurement(object):
221  """Null object to avoid having extra logic for configurations that didn't
222  run like running without patch on trybots.
223  """
224  def ConsumeOutput(self, stdout):
225    pass
226
227  def GetResults(self):
228    return Results()
229
230
231def Unzip(iterable):
232  left = []
233  right = []
234  for l, r in iterable:
235    left.append(l)
236    right.append(r)
237  return lambda: iter(left), lambda: iter(right)
238
239
240def RunResultsProcessor(results_processor, stdout, count):
241  # Dummy pass through for null-runs.
242  if stdout is None:
243    return None
244
245  # We assume the results processor is relative to the suite.
246  assert os.path.exists(results_processor)
247  p = subprocess.Popen(
248      [sys.executable, results_processor],
249      stdin=subprocess.PIPE,
250      stdout=subprocess.PIPE,
251      stderr=subprocess.PIPE,
252  )
253  result, _ = p.communicate(input=stdout)
254  print ">>> Processed stdout (#%d):" % count
255  print result
256  return result
257
258
259def AccumulateResults(
260    graph_names, trace_configs, iter_output, trybot, no_patch, calc_total):
261  """Iterates over the output of multiple benchmark reruns and accumulates
262  results for a configured list of traces.
263
264  Args:
265    graph_names: List of names that configure the base path of the traces. E.g.
266                 ['v8', 'Octane'].
267    trace_configs: List of "TraceConfig" instances. Each trace config defines
268                   how to perform a measurement.
269    iter_output: Iterator over the standard output of each test run.
270    trybot: Indicates that this is run in trybot mode, i.e. run twice, once
271            with once without patch.
272    no_patch: Indicates weather this is a trybot run without patch.
273    calc_total: Boolean flag to speficy the calculation of a summary trace.
274  Returns: A "Results" object.
275  """
276  measurements = [
277    trace.CreateMeasurement(trybot, no_patch) for trace in trace_configs]
278  for stdout in iter_output():
279    for measurement in measurements:
280      measurement.ConsumeOutput(stdout)
281
282  res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())
283
284  if not res.traces or not calc_total:
285    return res
286
287  # Assume all traces have the same structure.
288  if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
289    res.errors.append("Not all traces have the same number of results.")
290    return res
291
292  # Calculate the geometric means for all traces. Above we made sure that
293  # there is at least one trace and that the number of results is the same
294  # for each trace.
295  n_results = len(res.traces[0]["results"])
296  total_results = [GeometricMean(t["results"][i] for t in res.traces)
297                   for i in range(0, n_results)]
298  res.traces.append({
299    "graphs": graph_names + ["Total"],
300    "units": res.traces[0]["units"],
301    "results": total_results,
302    "stddev": "",
303  })
304  return res
305
306
307def AccumulateGenericResults(graph_names, suite_units, iter_output):
308  """Iterates over the output of multiple benchmark reruns and accumulates
309  generic results.
310
311  Args:
312    graph_names: List of names that configure the base path of the traces. E.g.
313                 ['v8', 'Octane'].
314    suite_units: Measurement default units as defined by the benchmark suite.
315    iter_output: Iterator over the standard output of each test run.
316  Returns: A "Results" object.
317  """
318  traces = OrderedDict()
319  for stdout in iter_output():
320    if stdout is None:
321      # The None value is used as a null object to simplify logic.
322      continue
323    for line in stdout.strip().splitlines():
324      match = GENERIC_RESULTS_RE.match(line)
325      if match:
326        stddev = ""
327        graph = match.group(1)
328        trace = match.group(2)
329        body = match.group(3)
330        units = match.group(4)
331        match_stddev = RESULT_STDDEV_RE.match(body)
332        match_list = RESULT_LIST_RE.match(body)
333        errors = []
334        if match_stddev:
335          result, stddev = map(str.strip, match_stddev.group(1).split(","))
336          results = [result]
337        elif match_list:
338          results = map(str.strip, match_list.group(1).split(","))
339        else:
340          results = [body.strip()]
341
342        try:
343          results = map(lambda r: str(float(r)), results)
344        except ValueError:
345          results = []
346          errors = ["Found non-numeric in %s" %
347                    "/".join(graph_names + [graph, trace])]
348
349        trace_result = traces.setdefault(trace, Results([{
350          "graphs": graph_names + [graph, trace],
351          "units": (units or suite_units).strip(),
352          "results": [],
353          "stddev": "",
354        }], errors))
355        trace_result.traces[0]["results"].extend(results)
356        trace_result.traces[0]["stddev"] = stddev
357
358  return reduce(lambda r, t: r + t, traces.itervalues(), Results())
359
360
361class Node(object):
362  """Represents a node in the suite tree structure."""
363  def __init__(self, *args):
364    self._children = []
365
366  def AppendChild(self, child):
367    self._children.append(child)
368
369
370class DefaultSentinel(Node):
371  """Fake parent node with all default values."""
372  def __init__(self, binary = "d8"):
373    super(DefaultSentinel, self).__init__()
374    self.binary = binary
375    self.run_count = 10
376    self.timeout = 60
377    self.path = []
378    self.graphs = []
379    self.flags = []
380    self.test_flags = []
381    self.resources = []
382    self.results_processor = None
383    self.results_regexp = None
384    self.stddev_regexp = None
385    self.units = "score"
386    self.total = False
387
388
389class GraphConfig(Node):
390  """Represents a suite definition.
391
392  Can either be a leaf or an inner node that provides default values.
393  """
394  def __init__(self, suite, parent, arch):
395    super(GraphConfig, self).__init__()
396    self._suite = suite
397
398    assert isinstance(suite.get("path", []), list)
399    assert isinstance(suite["name"], basestring)
400    assert isinstance(suite.get("flags", []), list)
401    assert isinstance(suite.get("test_flags", []), list)
402    assert isinstance(suite.get("resources", []), list)
403
404    # Accumulated values.
405    self.path = parent.path[:] + suite.get("path", [])
406    self.graphs = parent.graphs[:] + [suite["name"]]
407    self.flags = parent.flags[:] + suite.get("flags", [])
408    self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
409
410    # Values independent of parent node.
411    self.resources = suite.get("resources", [])
412
413    # Descrete values (with parent defaults).
414    self.binary = suite.get("binary", parent.binary)
415    self.run_count = suite.get("run_count", parent.run_count)
416    self.run_count = suite.get("run_count_%s" % arch, self.run_count)
417    self.timeout = suite.get("timeout", parent.timeout)
418    self.timeout = suite.get("timeout_%s" % arch, self.timeout)
419    self.units = suite.get("units", parent.units)
420    self.total = suite.get("total", parent.total)
421    self.results_processor = suite.get(
422        "results_processor", parent.results_processor)
423
424    # A regular expression for results. If the parent graph provides a
425    # regexp and the current suite has none, a string place holder for the
426    # suite name is expected.
427    # TODO(machenbach): Currently that makes only sense for the leaf level.
428    # Multiple place holders for multiple levels are not supported.
429    if parent.results_regexp:
430      regexp_default = parent.results_regexp % re.escape(suite["name"])
431    else:
432      regexp_default = None
433    self.results_regexp = suite.get("results_regexp", regexp_default)
434
435    # A similar regular expression for the standard deviation (optional).
436    if parent.stddev_regexp:
437      stddev_default = parent.stddev_regexp % re.escape(suite["name"])
438    else:
439      stddev_default = None
440    self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
441
442
443class TraceConfig(GraphConfig):
444  """Represents a leaf in the suite tree structure."""
445  def __init__(self, suite, parent, arch):
446    super(TraceConfig, self).__init__(suite, parent, arch)
447    assert self.results_regexp
448
449  def CreateMeasurement(self, trybot, no_patch):
450    if not trybot and no_patch:
451      # Use null object for no-patch logic if this is not a trybot run.
452      return NullMeasurement()
453
454    return Measurement(
455        self.graphs,
456        self.units,
457        self.results_regexp,
458        self.stddev_regexp,
459    )
460
461
462class RunnableConfig(GraphConfig):
463  """Represents a runnable suite definition (i.e. has a main file).
464  """
465  @property
466  def main(self):
467    return self._suite.get("main", "")
468
469  def PostProcess(self, stdouts_iter):
470    if self.results_processor:
471      def it():
472        for i, stdout in enumerate(stdouts_iter()):
473          yield RunResultsProcessor(self.results_processor, stdout, i + 1)
474      return it
475    else:
476      return stdouts_iter
477
478  def ChangeCWD(self, suite_path):
479    """Changes the cwd to to path defined in the current graph.
480
481    The tests are supposed to be relative to the suite configuration.
482    """
483    suite_dir = os.path.abspath(os.path.dirname(suite_path))
484    bench_dir = os.path.normpath(os.path.join(*self.path))
485    os.chdir(os.path.join(suite_dir, bench_dir))
486
487  def GetCommandFlags(self, extra_flags=None):
488    suffix = ["--"] + self.test_flags if self.test_flags else []
489    return self.flags + (extra_flags or []) + [self.main] + suffix
490
491  def GetCommand(self, shell_dir, extra_flags=None):
492    # TODO(machenbach): This requires +.exe if run on windows.
493    extra_flags = extra_flags or []
494    cmd = [os.path.join(shell_dir, self.binary)]
495    if self.binary.endswith(".py"):
496      cmd = [sys.executable] + cmd
497    if self.binary != 'd8' and '--prof' in extra_flags:
498      print "Profiler supported only on a benchmark run with d8"
499    return cmd + self.GetCommandFlags(extra_flags=extra_flags)
500
501  def Run(self, runner, trybot):
502    """Iterates over several runs and handles the output for all traces."""
503    stdout_with_patch, stdout_no_patch = Unzip(runner())
504    return (
505        AccumulateResults(
506            self.graphs,
507            self._children,
508            iter_output=self.PostProcess(stdout_with_patch),
509            trybot=trybot,
510            no_patch=False,
511            calc_total=self.total,
512        ),
513        AccumulateResults(
514            self.graphs,
515            self._children,
516            iter_output=self.PostProcess(stdout_no_patch),
517            trybot=trybot,
518            no_patch=True,
519            calc_total=self.total,
520        ),
521    )
522
523
524class RunnableTraceConfig(TraceConfig, RunnableConfig):
525  """Represents a runnable suite definition that is a leaf."""
526  def __init__(self, suite, parent, arch):
527    super(RunnableTraceConfig, self).__init__(suite, parent, arch)
528
529  def Run(self, runner, trybot):
530    """Iterates over several runs and handles the output."""
531    measurement_with_patch = self.CreateMeasurement(trybot, False)
532    measurement_no_patch = self.CreateMeasurement(trybot, True)
533    for stdout_with_patch, stdout_no_patch in runner():
534      measurement_with_patch.ConsumeOutput(stdout_with_patch)
535      measurement_no_patch.ConsumeOutput(stdout_no_patch)
536    return (
537        measurement_with_patch.GetResults(),
538        measurement_no_patch.GetResults(),
539    )
540
541
542class RunnableGenericConfig(RunnableConfig):
543  """Represents a runnable suite definition with generic traces."""
544  def __init__(self, suite, parent, arch):
545    super(RunnableGenericConfig, self).__init__(suite, parent, arch)
546
547  def Run(self, runner, trybot):
548    stdout_with_patch, stdout_no_patch = Unzip(runner())
549    return (
550        AccumulateGenericResults(self.graphs, self.units, stdout_with_patch),
551        AccumulateGenericResults(self.graphs, self.units, stdout_no_patch),
552    )
553
554
555def MakeGraphConfig(suite, arch, parent):
556  """Factory method for making graph configuration objects."""
557  if isinstance(parent, RunnableConfig):
558    # Below a runnable can only be traces.
559    return TraceConfig(suite, parent, arch)
560  elif suite.get("main") is not None:
561    # A main file makes this graph runnable. Empty strings are accepted.
562    if suite.get("tests"):
563      # This graph has subgraphs (traces).
564      return RunnableConfig(suite, parent, arch)
565    else:
566      # This graph has no subgraphs, it's a leaf.
567      return RunnableTraceConfig(suite, parent, arch)
568  elif suite.get("generic"):
569    # This is a generic suite definition. It is either a runnable executable
570    # or has a main js file.
571    return RunnableGenericConfig(suite, parent, arch)
572  elif suite.get("tests"):
573    # This is neither a leaf nor a runnable.
574    return GraphConfig(suite, parent, arch)
575  else:  # pragma: no cover
576    raise Exception("Invalid suite configuration.")
577
578
579def BuildGraphConfigs(suite, arch, parent):
580  """Builds a tree structure of graph objects that corresponds to the suite
581  configuration.
582  """
583
584  # TODO(machenbach): Implement notion of cpu type?
585  if arch not in suite.get("archs", SUPPORTED_ARCHS):
586    return None
587
588  graph = MakeGraphConfig(suite, arch, parent)
589  for subsuite in suite.get("tests", []):
590    BuildGraphConfigs(subsuite, arch, graph)
591  parent.AppendChild(graph)
592  return graph
593
594
595def FlattenRunnables(node, node_cb):
596  """Generator that traverses the tree structure and iterates over all
597  runnables.
598  """
599  node_cb(node)
600  if isinstance(node, RunnableConfig):
601    yield node
602  elif isinstance(node, Node):
603    for child in node._children:
604      for result in FlattenRunnables(child, node_cb):
605        yield result
606  else:  # pragma: no cover
607    raise Exception("Invalid suite configuration.")
608
609
610class Platform(object):
611  def __init__(self, options):
612    self.shell_dir = options.shell_dir
613    self.shell_dir_no_patch = options.shell_dir_no_patch
614    self.extra_flags = options.extra_flags.split()
615
616  @staticmethod
617  def GetPlatform(options):
618    if options.android_build_tools:
619      return AndroidPlatform(options)
620    else:
621      return DesktopPlatform(options)
622
623  def _Run(self, runnable, count, no_patch=False):
624    raise NotImplementedError()  # pragma: no cover
625
626  def Run(self, runnable, count):
627    """Execute the benchmark's main file.
628
629    If options.shell_dir_no_patch is specified, the benchmark is run once with
630    and once without patch.
631    Args:
632      runnable: A Runnable benchmark instance.
633      count: The number of this (repeated) run.
634    Returns: A tuple with the benchmark outputs with and without patch. The
635             latter will be None if options.shell_dir_no_patch was not
636             specified.
637    """
638    stdout = self._Run(runnable, count, no_patch=False)
639    if self.shell_dir_no_patch:
640      return stdout, self._Run(runnable, count, no_patch=True)
641    else:
642      return stdout, None
643
644
645class DesktopPlatform(Platform):
646  def __init__(self, options):
647    super(DesktopPlatform, self).__init__(options)
648    self.command_prefix = []
649
650    if options.prioritize or options.affinitize != None:
651      self.command_prefix = ["schedtool"]
652      if options.prioritize:
653        self.command_prefix += ["-n", "-20"]
654      if options.affinitize != None:
655      # schedtool expects a bit pattern when setting affinity, where each
656      # bit set to '1' corresponds to a core where the process may run on.
657      # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
658      # a core number, we need to map to said bit pattern.
659        cpu = int(options.affinitize)
660        core = 1 << cpu
661        self.command_prefix += ["-a", ("0x%x" % core)]
662      self.command_prefix += ["-e"]
663
664  def PreExecution(self):
665    pass
666
667  def PostExecution(self):
668    pass
669
670  def PreTests(self, node, path):
671    if isinstance(node, RunnableConfig):
672      node.ChangeCWD(path)
673
674  def _Run(self, runnable, count, no_patch=False):
675    suffix = ' - without patch' if no_patch else ''
676    shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir
677    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
678    command = self.command_prefix + runnable.GetCommand(shell_dir,
679                                                        self.extra_flags)
680    try:
681      output = commands.Execute(
682        command,
683        timeout=runnable.timeout,
684      )
685    except OSError as e:  # pragma: no cover
686      print title % "OSError"
687      print e
688      return ""
689
690    print title % "Stdout"
691    print output.stdout
692    if output.stderr:  # pragma: no cover
693      # Print stderr for debugging.
694      print title % "Stderr"
695      print output.stderr
696    if output.timed_out:
697      print ">>> Test timed out after %ss." % runnable.timeout
698    if '--prof' in self.extra_flags:
699      os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
700      if os_prefix:
701        tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
702        subprocess.check_call(tick_tools + " --only-summary", shell=True)
703      else:  # pragma: no cover
704        print "Profiler option currently supported on Linux and Mac OS."
705    return output.stdout
706
707
708class AndroidPlatform(Platform):  # pragma: no cover
709  DEVICE_DIR = "/data/local/tmp/v8/"
710
711  def __init__(self, options):
712    super(AndroidPlatform, self).__init__(options)
713    LoadAndroidBuildTools(options.android_build_tools)
714
715    if not options.device:
716      # Detect attached device if not specified.
717      devices = adb_wrapper.AdbWrapper.Devices()
718      assert devices and len(devices) == 1, (
719          "None or multiple devices detected. Please specify the device on "
720          "the command-line with --device")
721      options.device = str(devices[0])
722    self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
723    self.device = device_utils.DeviceUtils(self.adb_wrapper)
724
725  def PreExecution(self):
726    perf = perf_control.PerfControl(self.device)
727    perf.SetHighPerfMode()
728
729    # Remember what we have already pushed to the device.
730    self.pushed = set()
731
732  def PostExecution(self):
733    perf = perf_control.PerfControl(self.device)
734    perf.SetDefaultPerfMode()
735    self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR])
736
737  def _PushFile(self, host_dir, file_name, target_rel=".",
738                skip_if_missing=False):
739    file_on_host = os.path.join(host_dir, file_name)
740    file_on_device_tmp = os.path.join(
741        AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
742    file_on_device = os.path.join(
743        AndroidPlatform.DEVICE_DIR, target_rel, file_name)
744    folder_on_device = os.path.dirname(file_on_device)
745
746    # Only attempt to push files that exist.
747    if not os.path.exists(file_on_host):
748      if not skip_if_missing:
749        logging.critical('Missing file on host: %s' % file_on_host)
750      return
751
752    # Only push files not yet pushed in one execution.
753    if file_on_host in self.pushed:
754      return
755    else:
756      self.pushed.add(file_on_host)
757
758    # Work-around for "text file busy" errors. Push the files to a temporary
759    # location and then copy them with a shell command.
760    output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
761    # Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
762    # Errors look like this: "failed to copy  ... ".
763    if output and not re.search('^[0-9]', output.splitlines()[-1]):
764      logging.critical('PUSH FAILED: ' + output)
765    self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device)
766    self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device))
767
768  def _PushExecutable(self, shell_dir, target_dir, binary):
769    self._PushFile(shell_dir, binary, target_dir)
770
771    # Push external startup data. Backwards compatible for revisions where
772    # these files didn't exist.
773    self._PushFile(
774        shell_dir,
775        "natives_blob.bin",
776        target_dir,
777        skip_if_missing=True,
778    )
779    self._PushFile(
780        shell_dir,
781        "snapshot_blob.bin",
782        target_dir,
783        skip_if_missing=True,
784    )
785    self._PushFile(
786        shell_dir,
787        "snapshot_blob_ignition.bin",
788        target_dir,
789        skip_if_missing=True,
790    )
791
792  def PreTests(self, node, path):
793    if isinstance(node, RunnableConfig):
794      node.ChangeCWD(path)
795    suite_dir = os.path.abspath(os.path.dirname(path))
796    if node.path:
797      bench_rel = os.path.normpath(os.path.join(*node.path))
798      bench_abs = os.path.join(suite_dir, bench_rel)
799    else:
800      bench_rel = "."
801      bench_abs = suite_dir
802
803    self._PushExecutable(self.shell_dir, "bin", node.binary)
804    if self.shell_dir_no_patch:
805      self._PushExecutable(
806          self.shell_dir_no_patch, "bin_no_patch", node.binary)
807
808    if isinstance(node, RunnableConfig):
809      self._PushFile(bench_abs, node.main, bench_rel)
810    for resource in node.resources:
811      self._PushFile(bench_abs, resource, bench_rel)
812
813  def _Run(self, runnable, count, no_patch=False):
814    suffix = ' - without patch' if no_patch else ''
815    target_dir = "bin_no_patch" if no_patch else "bin"
816    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
817    cache = cache_control.CacheControl(self.device)
818    cache.DropRamCaches()
819    binary_on_device = os.path.join(
820        AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary)
821    cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags)
822
823    # Relative path to benchmark directory.
824    if runnable.path:
825      bench_rel = os.path.normpath(os.path.join(*runnable.path))
826    else:
827      bench_rel = "."
828
829    try:
830      output = self.device.RunShellCommand(
831          cmd,
832          cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
833          timeout=runnable.timeout,
834          retries=0,
835      )
836      stdout = "\n".join(output)
837      print title % "Stdout"
838      print stdout
839    except device_errors.CommandTimeoutError:
840      print ">>> Test timed out after %ss." % runnable.timeout
841      stdout = ""
842    return stdout
843
844class CustomMachineConfiguration:
845  def __init__(self, disable_aslr = False, governor = None):
846    self.aslr_backup = None
847    self.governor_backup = None
848    self.disable_aslr = disable_aslr
849    self.governor = governor
850
851  def __enter__(self):
852    if self.disable_aslr:
853      self.aslr_backup = CustomMachineConfiguration.GetASLR()
854      CustomMachineConfiguration.SetASLR(0)
855    if self.governor != None:
856      self.governor_backup = CustomMachineConfiguration.GetCPUGovernor()
857      CustomMachineConfiguration.SetCPUGovernor(self.governor)
858    return self
859
860  def __exit__(self, type, value, traceback):
861    if self.aslr_backup != None:
862      CustomMachineConfiguration.SetASLR(self.aslr_backup)
863    if self.governor_backup != None:
864      CustomMachineConfiguration.SetCPUGovernor(self.governor_backup)
865
866  @staticmethod
867  def GetASLR():
868    try:
869      with open("/proc/sys/kernel/randomize_va_space", "r") as f:
870        return int(f.readline().strip())
871    except Exception as e:
872      print "Failed to get current ASLR settings."
873      raise e
874
875  @staticmethod
876  def SetASLR(value):
877    try:
878      with open("/proc/sys/kernel/randomize_va_space", "w") as f:
879        f.write(str(value))
880    except Exception as e:
881      print "Failed to update ASLR to %s." % value
882      print "Are we running under sudo?"
883      raise e
884
885    new_value = CustomMachineConfiguration.GetASLR()
886    if value != new_value:
887      raise Exception("Present value is %s" % new_value)
888
889  @staticmethod
890  def GetCPUCoresRange():
891    try:
892      with open("/sys/devices/system/cpu/present", "r") as f:
893        indexes = f.readline()
894        r = map(int, indexes.split("-"))
895        if len(r) == 1:
896          return range(r[0], r[0] + 1)
897        return range(r[0], r[1] + 1)
898    except Exception as e:
899      print "Failed to retrieve number of CPUs."
900      raise e
901
902  @staticmethod
903  def GetCPUPathForId(cpu_index):
904    ret = "/sys/devices/system/cpu/cpu"
905    ret += str(cpu_index)
906    ret += "/cpufreq/scaling_governor"
907    return ret
908
909  @staticmethod
910  def GetCPUGovernor():
911    try:
912      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
913      ret = None
914      for cpu_index in cpu_indices:
915        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
916        with open(cpu_device, "r") as f:
917          # We assume the governors of all CPUs are set to the same value
918          val = f.readline().strip()
919          if ret == None:
920            ret = val
921          elif ret != val:
922            raise Exception("CPU cores have differing governor settings")
923      return ret
924    except Exception as e:
925      print "Failed to get the current CPU governor."
926      print "Is the CPU governor disabled? Check BIOS."
927      raise e
928
929  @staticmethod
930  def SetCPUGovernor(value):
931    try:
932      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
933      for cpu_index in cpu_indices:
934        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
935        with open(cpu_device, "w") as f:
936          f.write(value)
937
938    except Exception as e:
939      print "Failed to change CPU governor to %s." % value
940      print "Are we running under sudo?"
941      raise e
942
943    cur_value = CustomMachineConfiguration.GetCPUGovernor()
944    if cur_value != value:
945      raise Exception("Could not set CPU governor. Present value is %s"
946                      % cur_value )
947
948def Main(args):
949  logging.getLogger().setLevel(logging.INFO)
950  parser = optparse.OptionParser()
951  parser.add_option("--android-build-tools",
952                    help="Path to chromium's build/android. Specifying this "
953                         "option will run tests using android platform.")
954  parser.add_option("--arch",
955                    help=("The architecture to run tests for, "
956                          "'auto' or 'native' for auto-detect"),
957                    default="x64")
958  parser.add_option("--buildbot",
959                    help="Adapt to path structure used on buildbots",
960                    default=False, action="store_true")
961  parser.add_option("--device",
962                    help="The device ID to run Android tests on. If not given "
963                         "it will be autodetected.")
964  parser.add_option("--extra-flags",
965                    help="Additional flags to pass to the test executable",
966                    default="")
967  parser.add_option("--json-test-results",
968                    help="Path to a file for storing json results.")
969  parser.add_option("--json-test-results-no-patch",
970                    help="Path to a file for storing json results from run "
971                         "without patch.")
972  parser.add_option("--outdir", help="Base directory with compile output",
973                    default="out")
974  parser.add_option("--outdir-no-patch",
975                    help="Base directory with compile output without patch")
976  parser.add_option("--binary-override-path",
977                    help="JavaScript engine binary. By default, d8 under "
978                    "architecture-specific build dir. "
979                    "Not supported in conjunction with outdir-no-patch.")
980  parser.add_option("--prioritize",
981                    help="Raise the priority to nice -20 for the benchmarking "
982                    "process.Requires Linux, schedtool, and sudo privileges.",
983                    default=False, action="store_true")
984  parser.add_option("--affinitize",
985                    help="Run benchmarking process on the specified core. "
986                    "For example: "
987                    "--affinitize=0 will run the benchmark process on core 0. "
988                    "--affinitize=3 will run the benchmark process on core 3. "
989                    "Requires Linux, schedtool, and sudo privileges.",
990                    default=None)
991  parser.add_option("--noaslr",
992                    help="Disable ASLR for the duration of the benchmarked "
993                    "process. Requires Linux and sudo privileges.",
994                    default=False, action="store_true")
995  parser.add_option("--cpu-governor",
996                    help="Set cpu governor to specified policy for the "
997                    "duration of the benchmarked process. Typical options: "
998                    "'powersave' for more stable results, or 'performance' "
999                    "for shorter completion time of suite, with potentially "
1000                    "more noise in results.")
1001
1002  (options, args) = parser.parse_args(args)
1003
1004  if len(args) == 0:  # pragma: no cover
1005    parser.print_help()
1006    return 1
1007
1008  if options.arch in ["auto", "native"]:  # pragma: no cover
1009    options.arch = ARCH_GUESS
1010
1011  if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
1012    print "Unknown architecture %s" % options.arch
1013    return 1
1014
1015  if options.device and not options.android_build_tools:  # pragma: no cover
1016    print "Specifying a device requires Android build tools."
1017    return 1
1018
1019  if (options.json_test_results_no_patch and
1020      not options.outdir_no_patch):  # pragma: no cover
1021    print("For writing json test results without patch, an outdir without "
1022          "patch must be specified.")
1023    return 1
1024
1025  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
1026
1027  if options.buildbot:
1028    build_config = "Release"
1029  else:
1030    build_config = "%s.release" % options.arch
1031
1032  if options.binary_override_path == None:
1033    options.shell_dir = os.path.join(workspace, options.outdir, build_config)
1034    default_binary_name = "d8"
1035  else:
1036    if not os.path.isfile(options.binary_override_path):
1037      print "binary-override-path must be a file name"
1038      return 1
1039    if options.outdir_no_patch:
1040      print "specify either binary-override-path or outdir-no-patch"
1041      return 1
1042    options.shell_dir = os.path.dirname(options.binary_override_path)
1043    default_binary_name = os.path.basename(options.binary_override_path)
1044
1045  if options.outdir_no_patch:
1046    options.shell_dir_no_patch = os.path.join(
1047        workspace, options.outdir_no_patch, build_config)
1048  else:
1049    options.shell_dir_no_patch = None
1050
1051  prev_aslr = None
1052  prev_cpu_gov = None
1053  platform = Platform.GetPlatform(options)
1054
1055  results = Results()
1056  results_no_patch = Results()
1057  with CustomMachineConfiguration(governor = options.cpu_governor,
1058                                  disable_aslr = options.noaslr) as conf:
1059    for path in args:
1060      path = os.path.abspath(path)
1061
1062      if not os.path.exists(path):  # pragma: no cover
1063        results.errors.append("Configuration file %s does not exist." % path)
1064        continue
1065
1066      with open(path) as f:
1067        suite = json.loads(f.read())
1068
1069      # If no name is given, default to the file name without .json.
1070      suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
1071
1072      # Setup things common to one test suite.
1073      platform.PreExecution()
1074
1075      # Build the graph/trace tree structure.
1076      default_parent = DefaultSentinel(default_binary_name)
1077      root = BuildGraphConfigs(suite, options.arch, default_parent)
1078
1079      # Callback to be called on each node on traversal.
1080      def NodeCB(node):
1081        platform.PreTests(node, path)
1082
1083      # Traverse graph/trace tree and interate over all runnables.
1084      for runnable in FlattenRunnables(root, NodeCB):
1085        print ">>> Running suite: %s" % "/".join(runnable.graphs)
1086
1087        def Runner():
1088          """Output generator that reruns several times."""
1089          for i in xrange(0, max(1, runnable.run_count)):
1090            # TODO(machenbach): Allow timeout per arch like with run_count per
1091            # arch.
1092            yield platform.Run(runnable, i)
1093
1094        # Let runnable iterate over all runs and handle output.
1095        result, result_no_patch = runnable.Run(
1096          Runner, trybot=options.shell_dir_no_patch)
1097        results += result
1098        results_no_patch += result_no_patch
1099      platform.PostExecution()
1100
1101    if options.json_test_results:
1102      results.WriteToFile(options.json_test_results)
1103    else:  # pragma: no cover
1104      print results
1105
1106  if options.json_test_results_no_patch:
1107    results_no_patch.WriteToFile(options.json_test_results_no_patch)
1108  else:  # pragma: no cover
1109    print results_no_patch
1110
1111  return min(1, len(results.errors))
1112
1113if __name__ == "__main__":  # pragma: no cover
1114  sys.exit(Main(sys.argv[1:]))
1115