• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright 2014 the V8 project authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""
7Performance runner for d8.
8
9Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
10
11The suite json format is expected to be:
12{
13  "path": <relative path chunks to perf resources and main file>,
14  "owners": [<list of email addresses of benchmark owners (required)>],
15  "name": <optional suite name, file name is default>,
16  "archs": [<architecture name for which this suite is run>, ...],
17  "binary": <name of binary to run, default "d8">,
18  "flags": [<flag to d8>, ...],
19  "test_flags": [<flag to the test file>, ...],
20  "run_count": <how often will this suite run (optional)>,
21  "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
22  "resources": [<js file to be moved to android device>, ...]
23  "main": <main js perf runner file>,
24  "results_regexp": <optional regexp>,
25  "results_processor": <optional python results processor script>,
26  "units": <the unit specification for the performance dashboard>,
27  "process_size": <flag - collect maximum memory used by the process>,
28  "tests": [
29    {
30      "name": <name of the trace>,
31      "results_regexp": <optional more specific regexp>,
32      "results_processor": <optional python results processor script>,
33      "units": <the unit specification for the performance dashboard>,
34      "process_size": <flag - collect maximum memory used by the process>,
35    }, ...
36  ]
37}
38
39The tests field can also nest other suites in arbitrary depth. A suite
40with a "main" file is a leaf suite that can contain one more level of
41tests.
42
43A suite's results_regexp is expected to have one string place holder
44"%s" for the trace name. A trace's results_regexp overwrites suite
45defaults.
46
47A suite's results_processor may point to an optional python script. If
48specified, it is called after running the tests (with a path relative to the
49suite level's path). It is expected to read the measurement's output text
50on stdin and print the processed output to stdout.
51
52The results_regexp will be applied to the processed output.
53
54A suite without "tests" is considered a performance test itself.
55
56Full example (suite with one runner):
57{
58  "path": ["."],
59  "owners": ["username@chromium.org"],
60  "flags": ["--expose-gc"],
61  "test_flags": ["5"],
62  "archs": ["ia32", "x64"],
63  "run_count": 5,
64  "run_count_ia32": 3,
65  "main": "run.js",
66  "results_regexp": "^%s: (.+)$",
67  "units": "score",
68  "tests": [
69    {"name": "Richards"},
70    {"name": "DeltaBlue"},
71    {"name": "NavierStokes",
72     "results_regexp": "^NavierStokes: (.+)$"}
73  ]
74}
75
76Full example (suite with several runners):
77{
78  "path": ["."],
79  "owners": ["username@chromium.org", "otherowner@google.com"],
80  "flags": ["--expose-gc"],
81  "archs": ["ia32", "x64"],
82  "run_count": 5,
83  "units": "score",
84  "tests": [
85    {"name": "Richards",
86     "path": ["richards"],
87     "main": "run.js",
88     "run_count": 3,
89     "results_regexp": "^Richards: (.+)$"},
90    {"name": "NavierStokes",
91     "path": ["navier_stokes"],
92     "main": "run.js",
93     "results_regexp": "^NavierStokes: (.+)$"}
94  ]
95}
96
97Path pieces are concatenated. D8 is always run with the suite's path as cwd.
98
99The test flags are passed to the js test file after '--'.
100"""
101
102from collections import OrderedDict
103import json
104import logging
105import math
106import optparse
107import os
108import re
109import subprocess
110import sys
111
112from testrunner.local import android
113from testrunner.local import command
114from testrunner.local import utils
115
116ARCH_GUESS = utils.DefaultArch()
117SUPPORTED_ARCHS = ["arm",
118                   "ia32",
119                   "mips",
120                   "mipsel",
121                   "x64",
122                   "arm64"]
123
124GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
125RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
126RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
127TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
128
129
130def GeometricMean(values):
131  """Returns the geometric mean of a list of values.
132
133  The mean is calculated using log to avoid overflow.
134  """
135  values = map(float, values)
136  return str(math.exp(sum(map(math.log, values)) / len(values)))
137
138
139class Results(object):
140  """Place holder for result traces."""
141  def __init__(self, traces=None, errors=None):
142    self.traces = traces or []
143    self.errors = errors or []
144
145  def ToDict(self):
146    return {"traces": self.traces, "errors": self.errors}
147
148  def WriteToFile(self, file_name):
149    with open(file_name, "w") as f:
150      f.write(json.dumps(self.ToDict()))
151
152  def __add__(self, other):
153    self.traces += other.traces
154    self.errors += other.errors
155    return self
156
157  def __str__(self):  # pragma: no cover
158    return str(self.ToDict())
159
160
161class Measurement(object):
162  """Represents a series of results of one trace.
163
164  The results are from repetitive runs of the same executable. They are
165  gathered by repeated calls to ConsumeOutput.
166  """
167  def __init__(self, graphs, units, results_regexp, stddev_regexp):
168    self.name = '/'.join(graphs)
169    self.graphs = graphs
170    self.units = units
171    self.results_regexp = results_regexp
172    self.stddev_regexp = stddev_regexp
173    self.results = []
174    self.errors = []
175    self.stddev = ""
176    self.process_size = False
177
178  def ConsumeOutput(self, stdout):
179    try:
180      result = re.search(self.results_regexp, stdout, re.M).group(1)
181      self.results.append(str(float(result)))
182    except ValueError:
183      self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
184                         % (self.results_regexp, self.name))
185    except:
186      self.errors.append("Regexp \"%s\" didn't match for test %s."
187                         % (self.results_regexp, self.name))
188
189    try:
190      if self.stddev_regexp and self.stddev:
191        self.errors.append("Test %s should only run once since a stddev "
192                           "is provided by the test." % self.name)
193      if self.stddev_regexp:
194        self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
195    except:
196      self.errors.append("Regexp \"%s\" didn't match for test %s."
197                         % (self.stddev_regexp, self.name))
198
199  def GetResults(self):
200    return Results([{
201      "graphs": self.graphs,
202      "units": self.units,
203      "results": self.results,
204      "stddev": self.stddev,
205    }], self.errors)
206
207
208class NullMeasurement(object):
209  """Null object to avoid having extra logic for configurations that don't
210  require secondary run, e.g. CI bots.
211  """
212  def ConsumeOutput(self, stdout):
213    pass
214
215  def GetResults(self):
216    return Results()
217
218
219def Unzip(iterable):
220  left = []
221  right = []
222  for l, r in iterable:
223    left.append(l)
224    right.append(r)
225  return lambda: iter(left), lambda: iter(right)
226
227
228def RunResultsProcessor(results_processor, stdout, count):
229  # Dummy pass through for null-runs.
230  if stdout is None:
231    return None
232
233  # We assume the results processor is relative to the suite.
234  assert os.path.exists(results_processor)
235  p = subprocess.Popen(
236      [sys.executable, results_processor],
237      stdin=subprocess.PIPE,
238      stdout=subprocess.PIPE,
239      stderr=subprocess.PIPE,
240  )
241  result, _ = p.communicate(input=stdout)
242  logging.info(">>> Processed stdout (#%d):\n%s", count, result)
243  return result
244
245
246def AccumulateResults(
247    graph_names, trace_configs, iter_output, perform_measurement, calc_total):
248  """Iterates over the output of multiple benchmark reruns and accumulates
249  results for a configured list of traces.
250
251  Args:
252    graph_names: List of names that configure the base path of the traces. E.g.
253                 ['v8', 'Octane'].
254    trace_configs: List of "TraceConfig" instances. Each trace config defines
255                   how to perform a measurement.
256    iter_output: Iterator over the standard output of each test run.
257    perform_measurement: Whether to actually run tests and perform measurements.
258                         This is needed so that we reuse this script for both CI
259                         and trybot, but want to ignore second run on CI without
260                         having to spread this logic throughout the script.
261    calc_total: Boolean flag to speficy the calculation of a summary trace.
262  Returns: A "Results" object.
263  """
264  measurements = [
265    trace.CreateMeasurement(perform_measurement) for trace in trace_configs]
266  for stdout in iter_output():
267    for measurement in measurements:
268      measurement.ConsumeOutput(stdout)
269
270  res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())
271
272  if not res.traces or not calc_total:
273    return res
274
275  # Assume all traces have the same structure.
276  if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
277    res.errors.append("Not all traces have the same number of results.")
278    return res
279
280  # Calculate the geometric means for all traces. Above we made sure that
281  # there is at least one trace and that the number of results is the same
282  # for each trace.
283  n_results = len(res.traces[0]["results"])
284  total_results = [GeometricMean(t["results"][i] for t in res.traces)
285                   for i in range(0, n_results)]
286  res.traces.append({
287    "graphs": graph_names + ["Total"],
288    "units": res.traces[0]["units"],
289    "results": total_results,
290    "stddev": "",
291  })
292  return res
293
294
295def AccumulateGenericResults(graph_names, suite_units, iter_output):
296  """Iterates over the output of multiple benchmark reruns and accumulates
297  generic results.
298
299  Args:
300    graph_names: List of names that configure the base path of the traces. E.g.
301                 ['v8', 'Octane'].
302    suite_units: Measurement default units as defined by the benchmark suite.
303    iter_output: Iterator over the standard output of each test run.
304  Returns: A "Results" object.
305  """
306  traces = OrderedDict()
307  for stdout in iter_output():
308    if stdout is None:
309      # The None value is used as a null object to simplify logic.
310      continue
311    for line in stdout.strip().splitlines():
312      match = GENERIC_RESULTS_RE.match(line)
313      if match:
314        stddev = ""
315        graph = match.group(1)
316        trace = match.group(2)
317        body = match.group(3)
318        units = match.group(4)
319        match_stddev = RESULT_STDDEV_RE.match(body)
320        match_list = RESULT_LIST_RE.match(body)
321        errors = []
322        if match_stddev:
323          result, stddev = map(str.strip, match_stddev.group(1).split(","))
324          results = [result]
325        elif match_list:
326          results = map(str.strip, match_list.group(1).split(","))
327        else:
328          results = [body.strip()]
329
330        try:
331          results = map(lambda r: str(float(r)), results)
332        except ValueError:
333          results = []
334          errors = ["Found non-numeric in %s" %
335                    "/".join(graph_names + [graph, trace])]
336
337        trace_result = traces.setdefault(trace, Results([{
338          "graphs": graph_names + [graph, trace],
339          "units": (units or suite_units).strip(),
340          "results": [],
341          "stddev": "",
342        }], errors))
343        trace_result.traces[0]["results"].extend(results)
344        trace_result.traces[0]["stddev"] = stddev
345
346  return reduce(lambda r, t: r + t, traces.itervalues(), Results())
347
348
349class Node(object):
350  """Represents a node in the suite tree structure."""
351  def __init__(self, *args):
352    self._children = []
353
354  def AppendChild(self, child):
355    self._children.append(child)
356
357
358class DefaultSentinel(Node):
359  """Fake parent node with all default values."""
360  def __init__(self, binary = "d8"):
361    super(DefaultSentinel, self).__init__()
362    self.binary = binary
363    self.run_count = 10
364    self.timeout = 60
365    self.path = []
366    self.graphs = []
367    self.flags = []
368    self.test_flags = []
369    self.process_size = False
370    self.resources = []
371    self.results_processor = None
372    self.results_regexp = None
373    self.stddev_regexp = None
374    self.units = "score"
375    self.total = False
376    self.owners = []
377
378
379class GraphConfig(Node):
380  """Represents a suite definition.
381
382  Can either be a leaf or an inner node that provides default values.
383  """
384  def __init__(self, suite, parent, arch):
385    super(GraphConfig, self).__init__()
386    self._suite = suite
387
388    assert isinstance(suite.get("path", []), list)
389    assert isinstance(suite.get("owners", []), list)
390    assert isinstance(suite["name"], basestring)
391    assert isinstance(suite.get("flags", []), list)
392    assert isinstance(suite.get("test_flags", []), list)
393    assert isinstance(suite.get("resources", []), list)
394
395    # Accumulated values.
396    self.path = parent.path[:] + suite.get("path", [])
397    self.graphs = parent.graphs[:] + [suite["name"]]
398    self.flags = parent.flags[:] + suite.get("flags", [])
399    self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
400    self.owners = parent.owners[:] + suite.get("owners", [])
401
402    # Values independent of parent node.
403    self.resources = suite.get("resources", [])
404
405    # Descrete values (with parent defaults).
406    self.binary = suite.get("binary", parent.binary)
407    self.run_count = suite.get("run_count", parent.run_count)
408    self.run_count = suite.get("run_count_%s" % arch, self.run_count)
409    self.timeout = suite.get("timeout", parent.timeout)
410    self.timeout = suite.get("timeout_%s" % arch, self.timeout)
411    self.units = suite.get("units", parent.units)
412    self.total = suite.get("total", parent.total)
413    self.results_processor = suite.get(
414        "results_processor", parent.results_processor)
415    self.process_size = suite.get("process_size", parent.process_size)
416
417    # A regular expression for results. If the parent graph provides a
418    # regexp and the current suite has none, a string place holder for the
419    # suite name is expected.
420    # TODO(machenbach): Currently that makes only sense for the leaf level.
421    # Multiple place holders for multiple levels are not supported.
422    if parent.results_regexp:
423      regexp_default = parent.results_regexp % re.escape(suite["name"])
424    else:
425      regexp_default = None
426    self.results_regexp = suite.get("results_regexp", regexp_default)
427
428    # A similar regular expression for the standard deviation (optional).
429    if parent.stddev_regexp:
430      stddev_default = parent.stddev_regexp % re.escape(suite["name"])
431    else:
432      stddev_default = None
433    self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
434
435
436class TraceConfig(GraphConfig):
437  """Represents a leaf in the suite tree structure."""
438  def __init__(self, suite, parent, arch):
439    super(TraceConfig, self).__init__(suite, parent, arch)
440    assert self.results_regexp
441    assert self.owners
442
443  def CreateMeasurement(self, perform_measurement):
444    if not perform_measurement:
445      return NullMeasurement()
446
447    return Measurement(
448        self.graphs,
449        self.units,
450        self.results_regexp,
451        self.stddev_regexp,
452    )
453
454
455class RunnableConfig(GraphConfig):
456  """Represents a runnable suite definition (i.e. has a main file).
457  """
458  @property
459  def main(self):
460    return self._suite.get("main", "")
461
462  def PostProcess(self, stdouts_iter):
463    if self.results_processor:
464      def it():
465        for i, stdout in enumerate(stdouts_iter()):
466          yield RunResultsProcessor(self.results_processor, stdout, i + 1)
467      return it
468    else:
469      return stdouts_iter
470
471  def ChangeCWD(self, suite_path):
472    """Changes the cwd to to path defined in the current graph.
473
474    The tests are supposed to be relative to the suite configuration.
475    """
476    suite_dir = os.path.abspath(os.path.dirname(suite_path))
477    bench_dir = os.path.normpath(os.path.join(*self.path))
478    os.chdir(os.path.join(suite_dir, bench_dir))
479
480  def GetCommandFlags(self, extra_flags=None):
481    suffix = ["--"] + self.test_flags if self.test_flags else []
482    return self.flags + (extra_flags or []) + [self.main] + suffix
483
484  def GetCommand(self, cmd_prefix, shell_dir, extra_flags=None):
485    # TODO(machenbach): This requires +.exe if run on windows.
486    extra_flags = extra_flags or []
487    if self.binary != 'd8' and '--prof' in extra_flags:
488      logging.info("Profiler supported only on a benchmark run with d8")
489
490    if self.process_size:
491      cmd_prefix = ["/usr/bin/time", "--format=MaxMemory: %MKB"] + cmd_prefix
492    if self.binary.endswith('.py'):
493      # Copy cmd_prefix instead of update (+=).
494      cmd_prefix = cmd_prefix + [sys.executable]
495
496    return command.Command(
497        cmd_prefix=cmd_prefix,
498        shell=os.path.join(shell_dir, self.binary),
499        args=self.GetCommandFlags(extra_flags=extra_flags),
500        timeout=self.timeout or 60)
501
502  def Run(self, runner, trybot):
503    """Iterates over several runs and handles the output for all traces."""
504    stdout, stdout_secondary = Unzip(runner())
505    return (
506        AccumulateResults(
507            self.graphs,
508            self._children,
509            iter_output=self.PostProcess(stdout),
510            perform_measurement=True,
511            calc_total=self.total,
512        ),
513        AccumulateResults(
514            self.graphs,
515            self._children,
516            iter_output=self.PostProcess(stdout_secondary),
517            perform_measurement=trybot,  # only run second time on trybots
518            calc_total=self.total,
519        ),
520    )
521
522
523class RunnableTraceConfig(TraceConfig, RunnableConfig):
524  """Represents a runnable suite definition that is a leaf."""
525  def __init__(self, suite, parent, arch):
526    super(RunnableTraceConfig, self).__init__(suite, parent, arch)
527
528  def Run(self, runner, trybot):
529    """Iterates over several runs and handles the output."""
530    measurement = self.CreateMeasurement(perform_measurement=True)
531    measurement_secondary = self.CreateMeasurement(perform_measurement=trybot)
532    for stdout, stdout_secondary in runner():
533      measurement.ConsumeOutput(stdout)
534      measurement_secondary.ConsumeOutput(stdout_secondary)
535    return (
536        measurement.GetResults(),
537        measurement_secondary.GetResults(),
538    )
539
540
541class RunnableGenericConfig(RunnableConfig):
542  """Represents a runnable suite definition with generic traces."""
543  def __init__(self, suite, parent, arch):
544    super(RunnableGenericConfig, self).__init__(suite, parent, arch)
545
546  def Run(self, runner, trybot):
547    stdout, stdout_secondary = Unzip(runner())
548    return (
549        AccumulateGenericResults(self.graphs, self.units, stdout),
550        AccumulateGenericResults(self.graphs, self.units, stdout_secondary),
551    )
552
553
554def MakeGraphConfig(suite, arch, parent):
555  """Factory method for making graph configuration objects."""
556  if isinstance(parent, RunnableConfig):
557    # Below a runnable can only be traces.
558    return TraceConfig(suite, parent, arch)
559  elif suite.get("main") is not None:
560    # A main file makes this graph runnable. Empty strings are accepted.
561    if suite.get("tests"):
562      # This graph has subgraphs (traces).
563      return RunnableConfig(suite, parent, arch)
564    else:
565      # This graph has no subgraphs, it's a leaf.
566      return RunnableTraceConfig(suite, parent, arch)
567  elif suite.get("generic"):
568    # This is a generic suite definition. It is either a runnable executable
569    # or has a main js file.
570    return RunnableGenericConfig(suite, parent, arch)
571  elif suite.get("tests"):
572    # This is neither a leaf nor a runnable.
573    return GraphConfig(suite, parent, arch)
574  else:  # pragma: no cover
575    raise Exception("Invalid suite configuration.")
576
577
578def BuildGraphConfigs(suite, arch, parent):
579  """Builds a tree structure of graph objects that corresponds to the suite
580  configuration.
581  """
582
583  # TODO(machenbach): Implement notion of cpu type?
584  if arch not in suite.get("archs", SUPPORTED_ARCHS):
585    return None
586
587  graph = MakeGraphConfig(suite, arch, parent)
588  for subsuite in suite.get("tests", []):
589    BuildGraphConfigs(subsuite, arch, graph)
590  parent.AppendChild(graph)
591  return graph
592
593
594def FlattenRunnables(node, node_cb):
595  """Generator that traverses the tree structure and iterates over all
596  runnables.
597  """
598  node_cb(node)
599  if isinstance(node, RunnableConfig):
600    yield node
601  elif isinstance(node, Node):
602    for child in node._children:
603      for result in FlattenRunnables(child, node_cb):
604        yield result
605  else:  # pragma: no cover
606    raise Exception("Invalid suite configuration.")
607
608
609class Platform(object):
610  def __init__(self, options):
611    self.shell_dir = options.shell_dir
612    self.shell_dir_secondary = options.shell_dir_secondary
613    self.extra_flags = options.extra_flags.split()
614    self.options = options
615
616  @staticmethod
617  def ReadBuildConfig(options):
618    config_path = os.path.join(options.shell_dir, 'v8_build_config.json')
619    if not os.path.isfile(config_path):
620      return {}
621    with open(config_path) as f:
622      return json.load(f)
623
624  @staticmethod
625  def GetPlatform(options):
626    if Platform.ReadBuildConfig(options).get('is_android', False):
627      return AndroidPlatform(options)
628    else:
629      return DesktopPlatform(options)
630
631  def _Run(self, runnable, count, secondary=False):
632    raise NotImplementedError()  # pragma: no cover
633
634  def Run(self, runnable, count):
635    """Execute the benchmark's main file.
636
637    If options.shell_dir_secondary is specified, the benchmark is run twice,
638    e.g. with and without patch.
639    Args:
640      runnable: A Runnable benchmark instance.
641      count: The number of this (repeated) run.
642    Returns: A tuple with the two benchmark outputs. The latter will be None if
643             options.shell_dir_secondary was not specified.
644    """
645    stdout = self._Run(runnable, count, secondary=False)
646    if self.shell_dir_secondary:
647      return stdout, self._Run(runnable, count, secondary=True)
648    else:
649      return stdout, None
650
651
652class DesktopPlatform(Platform):
653  def __init__(self, options):
654    super(DesktopPlatform, self).__init__(options)
655    self.command_prefix = []
656
657    # Setup command class to OS specific version.
658    command.setup(utils.GuessOS())
659
660    if options.prioritize or options.affinitize != None:
661      self.command_prefix = ["schedtool"]
662      if options.prioritize:
663        self.command_prefix += ["-n", "-20"]
664      if options.affinitize != None:
665      # schedtool expects a bit pattern when setting affinity, where each
666      # bit set to '1' corresponds to a core where the process may run on.
667      # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
668      # a core number, we need to map to said bit pattern.
669        cpu = int(options.affinitize)
670        core = 1 << cpu
671        self.command_prefix += ["-a", ("0x%x" % core)]
672      self.command_prefix += ["-e"]
673
674  def PreExecution(self):
675    pass
676
677  def PostExecution(self):
678    pass
679
680  def PreTests(self, node, path):
681    if isinstance(node, RunnableConfig):
682      node.ChangeCWD(path)
683
684  def _Run(self, runnable, count, secondary=False):
685    suffix = ' - secondary' if secondary else ''
686    shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
687    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
688    cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
689    try:
690      output = cmd.execute()
691    except OSError:  # pragma: no cover
692      logging.exception(title % "OSError")
693      return ""
694
695    logging.info(title % "Stdout" + "\n%s", output.stdout)
696    if output.stderr:  # pragma: no cover
697      # Print stderr for debugging.
698      logging.info(title % "Stderr" + "\n%s", output.stderr)
699    if output.timed_out:
700      logging.warning(">>> Test timed out after %ss.", runnable.timeout)
701    if '--prof' in self.extra_flags:
702      os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
703      if os_prefix:
704        tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
705        subprocess.check_call(tick_tools + " --only-summary", shell=True)
706      else:  # pragma: no cover
707        logging.warning(
708            "Profiler option currently supported on Linux and Mac OS.")
709
710    # time outputs to stderr
711    if runnable.process_size:
712      return output.stdout + output.stderr
713    return output.stdout
714
715
716class AndroidPlatform(Platform):  # pragma: no cover
717
718  def __init__(self, options):
719    super(AndroidPlatform, self).__init__(options)
720    self.driver = android.android_driver(options.device)
721
722  def PreExecution(self):
723    self.driver.set_high_perf_mode()
724
725  def PostExecution(self):
726    self.driver.set_default_perf_mode()
727    self.driver.tear_down()
728
729  def PreTests(self, node, path):
730    if isinstance(node, RunnableConfig):
731      node.ChangeCWD(path)
732    suite_dir = os.path.abspath(os.path.dirname(path))
733    if node.path:
734      bench_rel = os.path.normpath(os.path.join(*node.path))
735      bench_abs = os.path.join(suite_dir, bench_rel)
736    else:
737      bench_rel = "."
738      bench_abs = suite_dir
739
740    self.driver.push_executable(self.shell_dir, "bin", node.binary)
741    if self.shell_dir_secondary:
742      self.driver.push_executable(
743          self.shell_dir_secondary, "bin_secondary", node.binary)
744
745    if isinstance(node, RunnableConfig):
746      self.driver.push_file(bench_abs, node.main, bench_rel)
747    for resource in node.resources:
748      self.driver.push_file(bench_abs, resource, bench_rel)
749
750  def _Run(self, runnable, count, secondary=False):
751    suffix = ' - secondary' if secondary else ''
752    target_dir = "bin_secondary" if secondary else "bin"
753    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
754    self.driver.drop_ram_caches()
755
756    # Relative path to benchmark directory.
757    if runnable.path:
758      bench_rel = os.path.normpath(os.path.join(*runnable.path))
759    else:
760      bench_rel = "."
761
762    logcat_file = None
763    if self.options.dump_logcats_to:
764      runnable_name = '-'.join(runnable.graphs)
765      logcat_file = os.path.join(
766          self.options.dump_logcats_to, 'logcat-%s-#%d%s.log' % (
767            runnable_name, count + 1, '-secondary' if secondary else ''))
768      logging.debug('Dumping logcat into %s', logcat_file)
769
770    try:
771      stdout = self.driver.run(
772          target_dir=target_dir,
773          binary=runnable.binary,
774          args=runnable.GetCommandFlags(self.extra_flags),
775          rel_path=bench_rel,
776          timeout=runnable.timeout,
777          logcat_file=logcat_file,
778      )
779      logging.info(title % "Stdout" + "\n%s", stdout)
780    except android.CommandFailedException as e:
781      logging.info(title % "Stdout" + "\n%s", e.output)
782      raise
783    except android.TimeoutException:
784      logging.warning(">>> Test timed out after %ss.", runnable.timeout)
785      stdout = ""
786    if runnable.process_size:
787      return stdout + "MaxMemory: Unsupported"
788    return stdout
789
790class CustomMachineConfiguration:
791  def __init__(self, disable_aslr = False, governor = None):
792    self.aslr_backup = None
793    self.governor_backup = None
794    self.disable_aslr = disable_aslr
795    self.governor = governor
796
797  def __enter__(self):
798    if self.disable_aslr:
799      self.aslr_backup = CustomMachineConfiguration.GetASLR()
800      CustomMachineConfiguration.SetASLR(0)
801    if self.governor != None:
802      self.governor_backup = CustomMachineConfiguration.GetCPUGovernor()
803      CustomMachineConfiguration.SetCPUGovernor(self.governor)
804    return self
805
806  def __exit__(self, type, value, traceback):
807    if self.aslr_backup != None:
808      CustomMachineConfiguration.SetASLR(self.aslr_backup)
809    if self.governor_backup != None:
810      CustomMachineConfiguration.SetCPUGovernor(self.governor_backup)
811
812  @staticmethod
813  def GetASLR():
814    try:
815      with open("/proc/sys/kernel/randomize_va_space", "r") as f:
816        return int(f.readline().strip())
817    except Exception:
818      logging.exception("Failed to get current ASLR settings.")
819      raise
820
821  @staticmethod
822  def SetASLR(value):
823    try:
824      with open("/proc/sys/kernel/randomize_va_space", "w") as f:
825        f.write(str(value))
826    except Exception:
827      logging.exception(
828          "Failed to update ASLR to %s. Are we running under sudo?", value)
829      raise
830
831    new_value = CustomMachineConfiguration.GetASLR()
832    if value != new_value:
833      raise Exception("Present value is %s" % new_value)
834
835  @staticmethod
836  def GetCPUCoresRange():
837    try:
838      with open("/sys/devices/system/cpu/present", "r") as f:
839        indexes = f.readline()
840        r = map(int, indexes.split("-"))
841        if len(r) == 1:
842          return range(r[0], r[0] + 1)
843        return range(r[0], r[1] + 1)
844    except Exception:
845      logging.exception("Failed to retrieve number of CPUs.")
846      raise
847
848  @staticmethod
849  def GetCPUPathForId(cpu_index):
850    ret = "/sys/devices/system/cpu/cpu"
851    ret += str(cpu_index)
852    ret += "/cpufreq/scaling_governor"
853    return ret
854
855  @staticmethod
856  def GetCPUGovernor():
857    try:
858      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
859      ret = None
860      for cpu_index in cpu_indices:
861        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
862        with open(cpu_device, "r") as f:
863          # We assume the governors of all CPUs are set to the same value
864          val = f.readline().strip()
865          if ret == None:
866            ret = val
867          elif ret != val:
868            raise Exception("CPU cores have differing governor settings")
869      return ret
870    except Exception:
871      logging.exception("Failed to get the current CPU governor. Is the CPU "
872                        "governor disabled? Check BIOS.")
873      raise
874
875  @staticmethod
876  def SetCPUGovernor(value):
877    try:
878      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
879      for cpu_index in cpu_indices:
880        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
881        with open(cpu_device, "w") as f:
882          f.write(value)
883
884    except Exception:
885      logging.exception("Failed to change CPU governor to %s. Are we "
886                        "running under sudo?", value)
887      raise
888
889    cur_value = CustomMachineConfiguration.GetCPUGovernor()
890    if cur_value != value:
891      raise Exception("Could not set CPU governor. Present value is %s"
892                      % cur_value )
893
894def Main(args):
895  parser = optparse.OptionParser()
896  parser.add_option("--android-build-tools", help="Deprecated.")
897  parser.add_option("--arch",
898                    help=("The architecture to run tests for, "
899                          "'auto' or 'native' for auto-detect"),
900                    default="x64")
901  parser.add_option("--buildbot",
902                    help="Adapt to path structure used on buildbots and adds "
903                         "timestamps/level to all logged status messages",
904                    default=False, action="store_true")
905  parser.add_option("--device",
906                    help="The device ID to run Android tests on. If not given "
907                         "it will be autodetected.")
908  parser.add_option("--extra-flags",
909                    help="Additional flags to pass to the test executable",
910                    default="")
911  parser.add_option("--json-test-results",
912                    help="Path to a file for storing json results.")
913  parser.add_option("--json-test-results-secondary",
914                    "--json-test-results-no-patch",  # TODO(sergiyb): Deprecate.
915                    help="Path to a file for storing json results from run "
916                         "without patch or for reference build run.")
917  parser.add_option("--outdir", help="Base directory with compile output",
918                    default="out")
919  parser.add_option("--outdir-secondary",
920                    "--outdir-no-patch",  # TODO(sergiyb): Deprecate.
921                    help="Base directory with compile output without patch or "
922                         "for reference build")
923  parser.add_option("--binary-override-path",
924                    help="JavaScript engine binary. By default, d8 under "
925                    "architecture-specific build dir. "
926                    "Not supported in conjunction with outdir-secondary.")
927  parser.add_option("--prioritize",
928                    help="Raise the priority to nice -20 for the benchmarking "
929                    "process.Requires Linux, schedtool, and sudo privileges.",
930                    default=False, action="store_true")
931  parser.add_option("--affinitize",
932                    help="Run benchmarking process on the specified core. "
933                    "For example: "
934                    "--affinitize=0 will run the benchmark process on core 0. "
935                    "--affinitize=3 will run the benchmark process on core 3. "
936                    "Requires Linux, schedtool, and sudo privileges.",
937                    default=None)
938  parser.add_option("--noaslr",
939                    help="Disable ASLR for the duration of the benchmarked "
940                    "process. Requires Linux and sudo privileges.",
941                    default=False, action="store_true")
942  parser.add_option("--cpu-governor",
943                    help="Set cpu governor to specified policy for the "
944                    "duration of the benchmarked process. Typical options: "
945                    "'powersave' for more stable results, or 'performance' "
946                    "for shorter completion time of suite, with potentially "
947                    "more noise in results.")
948  parser.add_option("--filter",
949                    help="Only run the benchmarks beginning with this string. "
950                    "For example: "
951                    "--filter=JSTests/TypedArrays/ will run only TypedArray "
952                    "benchmarks from the JSTests suite.",
953                    default="")
954  parser.add_option("--run-count-multiplier", default=1, type="int",
955                    help="Multipled used to increase number of times each test "
956                    "is retried.")
957  parser.add_option("--dump-logcats-to",
958                    help="Writes logcat output from each test into specified "
959                    "directory. Only supported for android targets.")
960
961  (options, args) = parser.parse_args(args)
962
963  if options.buildbot:
964    logging.basicConfig(
965        level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
966  else:
967    logging.basicConfig(level=logging.INFO, format="%(message)s")
968
969  if len(args) == 0:  # pragma: no cover
970    parser.print_help()
971    return 1
972
973  if options.arch in ["auto", "native"]:  # pragma: no cover
974    options.arch = ARCH_GUESS
975
976  if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
977    logging.error("Unknown architecture %s", options.arch)
978    return 1
979
980  if (options.json_test_results_secondary and
981      not options.outdir_secondary):  # pragma: no cover
982    logging.error("For writing secondary json test results, a secondary outdir "
983                  "patch must be specified.")
984    return 1
985
986  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
987
988  if options.buildbot:
989    build_config = "Release"
990  else:
991    build_config = "%s.release" % options.arch
992
993  if options.binary_override_path == None:
994    options.shell_dir = os.path.join(workspace, options.outdir, build_config)
995    default_binary_name = "d8"
996  else:
997    if not os.path.isfile(options.binary_override_path):
998      logging.error("binary-override-path must be a file name")
999      return 1
1000    if options.outdir_secondary:
1001      logging.error("specify either binary-override-path or outdir-secondary")
1002      return 1
1003    options.shell_dir = os.path.abspath(
1004        os.path.dirname(options.binary_override_path))
1005    default_binary_name = os.path.basename(options.binary_override_path)
1006
1007  if options.outdir_secondary:
1008    options.shell_dir_secondary = os.path.join(
1009        workspace, options.outdir_secondary, build_config)
1010  else:
1011    options.shell_dir_secondary = None
1012
1013  if options.json_test_results:
1014    options.json_test_results = os.path.abspath(options.json_test_results)
1015
1016  if options.json_test_results_secondary:
1017    options.json_test_results_secondary = os.path.abspath(
1018        options.json_test_results_secondary)
1019
1020  # Ensure all arguments have absolute path before we start changing current
1021  # directory.
1022  args = map(os.path.abspath, args)
1023
1024  prev_aslr = None
1025  prev_cpu_gov = None
1026  platform = Platform.GetPlatform(options)
1027
1028  results = Results()
1029  results_secondary = Results()
1030  with CustomMachineConfiguration(governor = options.cpu_governor,
1031                                  disable_aslr = options.noaslr) as conf:
1032    for path in args:
1033      if not os.path.exists(path):  # pragma: no cover
1034        results.errors.append("Configuration file %s does not exist." % path)
1035        continue
1036
1037      with open(path) as f:
1038        suite = json.loads(f.read())
1039
1040      # If no name is given, default to the file name without .json.
1041      suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
1042
1043      # Setup things common to one test suite.
1044      platform.PreExecution()
1045
1046      # Build the graph/trace tree structure.
1047      default_parent = DefaultSentinel(default_binary_name)
1048      root = BuildGraphConfigs(suite, options.arch, default_parent)
1049
1050      # Callback to be called on each node on traversal.
1051      def NodeCB(node):
1052        platform.PreTests(node, path)
1053
1054      # Traverse graph/trace tree and iterate over all runnables.
1055      for runnable in FlattenRunnables(root, NodeCB):
1056        runnable_name = "/".join(runnable.graphs)
1057        if (not runnable_name.startswith(options.filter) and
1058            runnable_name + "/" != options.filter):
1059          continue
1060        logging.info(">>> Running suite: %s", runnable_name)
1061
1062        def Runner():
1063          """Output generator that reruns several times."""
1064          total_runs = runnable.run_count * options.run_count_multiplier
1065          for i in xrange(0, max(1, total_runs)):
1066            # TODO(machenbach): Allow timeout per arch like with run_count per
1067            # arch.
1068            yield platform.Run(runnable, i)
1069
1070        # Let runnable iterate over all runs and handle output.
1071        result, result_secondary = runnable.Run(
1072          Runner, trybot=options.shell_dir_secondary)
1073        results += result
1074        results_secondary += result_secondary
1075      platform.PostExecution()
1076
1077    if options.json_test_results:
1078      results.WriteToFile(options.json_test_results)
1079    else:  # pragma: no cover
1080      print results
1081
1082  if options.json_test_results_secondary:
1083    results_secondary.WriteToFile(options.json_test_results_secondary)
1084  else:  # pragma: no cover
1085    print results_secondary
1086
1087  return min(1, len(results.errors))
1088
1089if __name__ == "__main__":  # pragma: no cover
1090  sys.exit(Main(sys.argv[1:]))
1091