• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright 2014 the V8 project authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""
7Performance runner for d8.
8
9Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
10
11The suite json format is expected to be:
12{
13  "path": <relative path chunks to perf resources and main file>,
14  "name": <optional suite name, file name is default>,
15  "archs": [<architecture name for which this suite is run>, ...],
16  "binary": <name of binary to run, default "d8">,
17  "flags": [<flag to d8>, ...],
18  "test_flags": [<flag to the test file>, ...],
19  "run_count": <how often will this suite run (optional)>,
20  "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
21  "resources": [<js file to be moved to android device>, ...]
22  "main": <main js perf runner file>,
23  "results_regexp": <optional regexp>,
24  "results_processor": <optional python results processor script>,
25  "units": <the unit specification for the performance dashboard>,
26  "tests": [
27    {
28      "name": <name of the trace>,
29      "results_regexp": <optional more specific regexp>,
30      "results_processor": <optional python results processor script>,
31      "units": <the unit specification for the performance dashboard>,
32    }, ...
33  ]
34}
35
36The tests field can also nest other suites in arbitrary depth. A suite
37with a "main" file is a leaf suite that can contain one more level of
38tests.
39
40A suite's results_regexp is expected to have one string place holder
41"%s" for the trace name. A trace's results_regexp overwrites suite
42defaults.
43
44A suite's results_processor may point to an optional python script. If
45specified, it is called after running the tests like this (with a path
46relatve to the suite level's path):
47<results_processor file> <same flags as for d8> <suite level name> <output>
48
49The <output> is a temporary file containing d8 output. The results_regexp will
50be applied to the output of this script.
51
52A suite without "tests" is considered a performance test itself.
53
54Full example (suite with one runner):
55{
56  "path": ["."],
57  "flags": ["--expose-gc"],
58  "test_flags": ["5"],
59  "archs": ["ia32", "x64"],
60  "run_count": 5,
61  "run_count_ia32": 3,
62  "main": "run.js",
63  "results_regexp": "^%s: (.+)$",
64  "units": "score",
65  "tests": [
66    {"name": "Richards"},
67    {"name": "DeltaBlue"},
68    {"name": "NavierStokes",
69     "results_regexp": "^NavierStokes: (.+)$"}
70  ]
71}
72
73Full example (suite with several runners):
74{
75  "path": ["."],
76  "flags": ["--expose-gc"],
77  "archs": ["ia32", "x64"],
78  "run_count": 5,
79  "units": "score",
80  "tests": [
81    {"name": "Richards",
82     "path": ["richards"],
83     "main": "run.js",
84     "run_count": 3,
85     "results_regexp": "^Richards: (.+)$"},
86    {"name": "NavierStokes",
87     "path": ["navier_stokes"],
88     "main": "run.js",
89     "results_regexp": "^NavierStokes: (.+)$"}
90  ]
91}
92
93Path pieces are concatenated. D8 is always run with the suite's path as cwd.
94
95The test flags are passed to the js test file after '--'.
96"""
97
98from collections import OrderedDict
99import json
100import logging
101import math
102import optparse
103import os
104import re
105import subprocess
106import sys
107
108from testrunner.local import commands
109from testrunner.local import utils
110
111ARCH_GUESS = utils.DefaultArch()
112SUPPORTED_ARCHS = ["arm",
113                   "ia32",
114                   "mips",
115                   "mipsel",
116                   "nacl_ia32",
117                   "nacl_x64",
118                   "x64",
119                   "arm64"]
120
121GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
122RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
123RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
124TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
125
126
127def LoadAndroidBuildTools(path):  # pragma: no cover
128  assert os.path.exists(path)
129  sys.path.insert(0, path)
130
131  from pylib.device import adb_wrapper  # pylint: disable=F0401
132  from pylib.device import device_errors  # pylint: disable=F0401
133  from pylib.device import device_utils  # pylint: disable=F0401
134  from pylib.perf import cache_control  # pylint: disable=F0401
135  from pylib.perf import perf_control  # pylint: disable=F0401
136  global adb_wrapper
137  global cache_control
138  global device_errors
139  global device_utils
140  global perf_control
141
142
143def GeometricMean(values):
144  """Returns the geometric mean of a list of values.
145
146  The mean is calculated using log to avoid overflow.
147  """
148  values = map(float, values)
149  return str(math.exp(sum(map(math.log, values)) / len(values)))
150
151
152class Results(object):
153  """Place holder for result traces."""
154  def __init__(self, traces=None, errors=None):
155    self.traces = traces or []
156    self.errors = errors or []
157
158  def ToDict(self):
159    return {"traces": self.traces, "errors": self.errors}
160
161  def WriteToFile(self, file_name):
162    with open(file_name, "w") as f:
163      f.write(json.dumps(self.ToDict()))
164
165  def __add__(self, other):
166    self.traces += other.traces
167    self.errors += other.errors
168    return self
169
170  def __str__(self):  # pragma: no cover
171    return str(self.ToDict())
172
173
174class Measurement(object):
175  """Represents a series of results of one trace.
176
177  The results are from repetitive runs of the same executable. They are
178  gathered by repeated calls to ConsumeOutput.
179  """
180  def __init__(self, graphs, units, results_regexp, stddev_regexp):
181    self.name = graphs[-1]
182    self.graphs = graphs
183    self.units = units
184    self.results_regexp = results_regexp
185    self.stddev_regexp = stddev_regexp
186    self.results = []
187    self.errors = []
188    self.stddev = ""
189
190  def ConsumeOutput(self, stdout):
191    try:
192      result = re.search(self.results_regexp, stdout, re.M).group(1)
193      self.results.append(str(float(result)))
194    except ValueError:
195      self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
196                         % (self.results_regexp, self.name))
197    except:
198      self.errors.append("Regexp \"%s\" didn't match for test %s."
199                         % (self.results_regexp, self.name))
200
201    try:
202      if self.stddev_regexp and self.stddev:
203        self.errors.append("Test %s should only run once since a stddev "
204                           "is provided by the test." % self.name)
205      if self.stddev_regexp:
206        self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
207    except:
208      self.errors.append("Regexp \"%s\" didn't match for test %s."
209                         % (self.stddev_regexp, self.name))
210
211  def GetResults(self):
212    return Results([{
213      "graphs": self.graphs,
214      "units": self.units,
215      "results": self.results,
216      "stddev": self.stddev,
217    }], self.errors)
218
219
220class NullMeasurement(object):
221  """Null object to avoid having extra logic for configurations that didn't
222  run like running without patch on trybots.
223  """
224  def ConsumeOutput(self, stdout):
225    pass
226
227  def GetResults(self):
228    return Results()
229
230
231def Unzip(iterable):
232  left = []
233  right = []
234  for l, r in iterable:
235    left.append(l)
236    right.append(r)
237  return lambda: iter(left), lambda: iter(right)
238
239
240def AccumulateResults(
241    graph_names, trace_configs, iter_output, trybot, no_patch, calc_total):
242  """Iterates over the output of multiple benchmark reruns and accumulates
243  results for a configured list of traces.
244
245  Args:
246    graph_names: List of names that configure the base path of the traces. E.g.
247                 ['v8', 'Octane'].
248    trace_configs: List of "TraceConfig" instances. Each trace config defines
249                   how to perform a measurement.
250    iter_output: Iterator over the standard output of each test run.
251    trybot: Indicates that this is run in trybot mode, i.e. run twice, once
252            with once without patch.
253    no_patch: Indicates weather this is a trybot run without patch.
254    calc_total: Boolean flag to speficy the calculation of a summary trace.
255  Returns: A "Results" object.
256  """
257  measurements = [
258    trace.CreateMeasurement(trybot, no_patch) for trace in trace_configs]
259  for stdout in iter_output():
260    for measurement in measurements:
261      measurement.ConsumeOutput(stdout)
262
263  res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())
264
265  if not res.traces or not calc_total:
266    return res
267
268  # Assume all traces have the same structure.
269  if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
270    res.errors.append("Not all traces have the same number of results.")
271    return res
272
273  # Calculate the geometric means for all traces. Above we made sure that
274  # there is at least one trace and that the number of results is the same
275  # for each trace.
276  n_results = len(res.traces[0]["results"])
277  total_results = [GeometricMean(t["results"][i] for t in res.traces)
278                   for i in range(0, n_results)]
279  res.traces.append({
280    "graphs": graph_names + ["Total"],
281    "units": res.traces[0]["units"],
282    "results": total_results,
283    "stddev": "",
284  })
285  return res
286
287
288def AccumulateGenericResults(graph_names, suite_units, iter_output):
289  """Iterates over the output of multiple benchmark reruns and accumulates
290  generic results.
291
292  Args:
293    graph_names: List of names that configure the base path of the traces. E.g.
294                 ['v8', 'Octane'].
295    suite_units: Measurement default units as defined by the benchmark suite.
296    iter_output: Iterator over the standard output of each test run.
297  Returns: A "Results" object.
298  """
299  traces = OrderedDict()
300  for stdout in iter_output():
301    if stdout is None:
302      # The None value is used as a null object to simplify logic.
303      continue
304    for line in stdout.strip().splitlines():
305      match = GENERIC_RESULTS_RE.match(line)
306      if match:
307        stddev = ""
308        graph = match.group(1)
309        trace = match.group(2)
310        body = match.group(3)
311        units = match.group(4)
312        match_stddev = RESULT_STDDEV_RE.match(body)
313        match_list = RESULT_LIST_RE.match(body)
314        errors = []
315        if match_stddev:
316          result, stddev = map(str.strip, match_stddev.group(1).split(","))
317          results = [result]
318        elif match_list:
319          results = map(str.strip, match_list.group(1).split(","))
320        else:
321          results = [body.strip()]
322
323        try:
324          results = map(lambda r: str(float(r)), results)
325        except ValueError:
326          results = []
327          errors = ["Found non-numeric in %s" %
328                    "/".join(graph_names + [graph, trace])]
329
330        trace_result = traces.setdefault(trace, Results([{
331          "graphs": graph_names + [graph, trace],
332          "units": (units or suite_units).strip(),
333          "results": [],
334          "stddev": "",
335        }], errors))
336        trace_result.traces[0]["results"].extend(results)
337        trace_result.traces[0]["stddev"] = stddev
338
339  return reduce(lambda r, t: r + t, traces.itervalues(), Results())
340
341
342class Node(object):
343  """Represents a node in the suite tree structure."""
344  def __init__(self, *args):
345    self._children = []
346
347  def AppendChild(self, child):
348    self._children.append(child)
349
350
351class DefaultSentinel(Node):
352  """Fake parent node with all default values."""
353  def __init__(self):
354    super(DefaultSentinel, self).__init__()
355    self.binary = "d8"
356    self.run_count = 10
357    self.timeout = 60
358    self.path = []
359    self.graphs = []
360    self.flags = []
361    self.test_flags = []
362    self.resources = []
363    self.results_regexp = None
364    self.stddev_regexp = None
365    self.units = "score"
366    self.total = False
367
368
369class GraphConfig(Node):
370  """Represents a suite definition.
371
372  Can either be a leaf or an inner node that provides default values.
373  """
374  def __init__(self, suite, parent, arch):
375    super(GraphConfig, self).__init__()
376    self._suite = suite
377
378    assert isinstance(suite.get("path", []), list)
379    assert isinstance(suite["name"], basestring)
380    assert isinstance(suite.get("flags", []), list)
381    assert isinstance(suite.get("test_flags", []), list)
382    assert isinstance(suite.get("resources", []), list)
383
384    # Accumulated values.
385    self.path = parent.path[:] + suite.get("path", [])
386    self.graphs = parent.graphs[:] + [suite["name"]]
387    self.flags = parent.flags[:] + suite.get("flags", [])
388    self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
389
390    # Values independent of parent node.
391    self.resources = suite.get("resources", [])
392
393    # Descrete values (with parent defaults).
394    self.binary = suite.get("binary", parent.binary)
395    self.run_count = suite.get("run_count", parent.run_count)
396    self.run_count = suite.get("run_count_%s" % arch, self.run_count)
397    self.timeout = suite.get("timeout", parent.timeout)
398    self.timeout = suite.get("timeout_%s" % arch, self.timeout)
399    self.units = suite.get("units", parent.units)
400    self.total = suite.get("total", parent.total)
401
402    # A regular expression for results. If the parent graph provides a
403    # regexp and the current suite has none, a string place holder for the
404    # suite name is expected.
405    # TODO(machenbach): Currently that makes only sense for the leaf level.
406    # Multiple place holders for multiple levels are not supported.
407    if parent.results_regexp:
408      regexp_default = parent.results_regexp % re.escape(suite["name"])
409    else:
410      regexp_default = None
411    self.results_regexp = suite.get("results_regexp", regexp_default)
412
413    # A similar regular expression for the standard deviation (optional).
414    if parent.stddev_regexp:
415      stddev_default = parent.stddev_regexp % re.escape(suite["name"])
416    else:
417      stddev_default = None
418    self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
419
420
421class TraceConfig(GraphConfig):
422  """Represents a leaf in the suite tree structure."""
423  def __init__(self, suite, parent, arch):
424    super(TraceConfig, self).__init__(suite, parent, arch)
425    assert self.results_regexp
426
427  def CreateMeasurement(self, trybot, no_patch):
428    if not trybot and no_patch:
429      # Use null object for no-patch logic if this is not a trybot run.
430      return NullMeasurement()
431
432    return Measurement(
433        self.graphs,
434        self.units,
435        self.results_regexp,
436        self.stddev_regexp,
437    )
438
439
440class RunnableConfig(GraphConfig):
441  """Represents a runnable suite definition (i.e. has a main file).
442  """
443  @property
444  def main(self):
445    return self._suite.get("main", "")
446
447  def ChangeCWD(self, suite_path):
448    """Changes the cwd to to path defined in the current graph.
449
450    The tests are supposed to be relative to the suite configuration.
451    """
452    suite_dir = os.path.abspath(os.path.dirname(suite_path))
453    bench_dir = os.path.normpath(os.path.join(*self.path))
454    os.chdir(os.path.join(suite_dir, bench_dir))
455
456  def GetCommandFlags(self, extra_flags=None):
457    suffix = ["--"] + self.test_flags if self.test_flags else []
458    return self.flags + (extra_flags or []) + [self.main] + suffix
459
460  def GetCommand(self, shell_dir, extra_flags=None):
461    # TODO(machenbach): This requires +.exe if run on windows.
462    extra_flags = extra_flags or []
463    cmd = [os.path.join(shell_dir, self.binary)]
464    if self.binary != 'd8' and '--prof' in extra_flags:
465      print "Profiler supported only on a benchmark run with d8"
466    return cmd + self.GetCommandFlags(extra_flags=extra_flags)
467
468  def Run(self, runner, trybot):
469    """Iterates over several runs and handles the output for all traces."""
470    stdout_with_patch, stdout_no_patch = Unzip(runner())
471    return (
472        AccumulateResults(
473            self.graphs,
474            self._children,
475            iter_output=stdout_with_patch,
476            trybot=trybot,
477            no_patch=False,
478            calc_total=self.total,
479        ),
480        AccumulateResults(
481            self.graphs,
482            self._children,
483            iter_output=stdout_no_patch,
484            trybot=trybot,
485            no_patch=True,
486            calc_total=self.total,
487        ),
488    )
489
490
491class RunnableTraceConfig(TraceConfig, RunnableConfig):
492  """Represents a runnable suite definition that is a leaf."""
493  def __init__(self, suite, parent, arch):
494    super(RunnableTraceConfig, self).__init__(suite, parent, arch)
495
496  def Run(self, runner, trybot):
497    """Iterates over several runs and handles the output."""
498    measurement_with_patch = self.CreateMeasurement(trybot, False)
499    measurement_no_patch = self.CreateMeasurement(trybot, True)
500    for stdout_with_patch, stdout_no_patch in runner():
501      measurement_with_patch.ConsumeOutput(stdout_with_patch)
502      measurement_no_patch.ConsumeOutput(stdout_no_patch)
503    return (
504        measurement_with_patch.GetResults(),
505        measurement_no_patch.GetResults(),
506    )
507
508
509class RunnableGenericConfig(RunnableConfig):
510  """Represents a runnable suite definition with generic traces."""
511  def __init__(self, suite, parent, arch):
512    super(RunnableGenericConfig, self).__init__(suite, parent, arch)
513
514  def Run(self, runner, trybot):
515    stdout_with_patch, stdout_no_patch = Unzip(runner())
516    return (
517        AccumulateGenericResults(self.graphs, self.units, stdout_with_patch),
518        AccumulateGenericResults(self.graphs, self.units, stdout_no_patch),
519    )
520
521
522def MakeGraphConfig(suite, arch, parent):
523  """Factory method for making graph configuration objects."""
524  if isinstance(parent, RunnableConfig):
525    # Below a runnable can only be traces.
526    return TraceConfig(suite, parent, arch)
527  elif suite.get("main") is not None:
528    # A main file makes this graph runnable. Empty strings are accepted.
529    if suite.get("tests"):
530      # This graph has subgraphs (traces).
531      return RunnableConfig(suite, parent, arch)
532    else:
533      # This graph has no subgraphs, it's a leaf.
534      return RunnableTraceConfig(suite, parent, arch)
535  elif suite.get("generic"):
536    # This is a generic suite definition. It is either a runnable executable
537    # or has a main js file.
538    return RunnableGenericConfig(suite, parent, arch)
539  elif suite.get("tests"):
540    # This is neither a leaf nor a runnable.
541    return GraphConfig(suite, parent, arch)
542  else:  # pragma: no cover
543    raise Exception("Invalid suite configuration.")
544
545
546def BuildGraphConfigs(suite, arch, parent=None):
547  """Builds a tree structure of graph objects that corresponds to the suite
548  configuration.
549  """
550  parent = parent or DefaultSentinel()
551
552  # TODO(machenbach): Implement notion of cpu type?
553  if arch not in suite.get("archs", SUPPORTED_ARCHS):
554    return None
555
556  graph = MakeGraphConfig(suite, arch, parent)
557  for subsuite in suite.get("tests", []):
558    BuildGraphConfigs(subsuite, arch, graph)
559  parent.AppendChild(graph)
560  return graph
561
562
563def FlattenRunnables(node, node_cb):
564  """Generator that traverses the tree structure and iterates over all
565  runnables.
566  """
567  node_cb(node)
568  if isinstance(node, RunnableConfig):
569    yield node
570  elif isinstance(node, Node):
571    for child in node._children:
572      for result in FlattenRunnables(child, node_cb):
573        yield result
574  else:  # pragma: no cover
575    raise Exception("Invalid suite configuration.")
576
577
578class Platform(object):
579  def __init__(self, options):
580    self.shell_dir = options.shell_dir
581    self.shell_dir_no_patch = options.shell_dir_no_patch
582    self.extra_flags = options.extra_flags.split()
583
584  @staticmethod
585  def GetPlatform(options):
586    if options.android_build_tools:
587      return AndroidPlatform(options)
588    else:
589      return DesktopPlatform(options)
590
591  def _Run(self, runnable, count, no_patch=False):
592    raise NotImplementedError()  # pragma: no cover
593
594  def Run(self, runnable, count):
595    """Execute the benchmark's main file.
596
597    If options.shell_dir_no_patch is specified, the benchmark is run once with
598    and once without patch.
599    Args:
600      runnable: A Runnable benchmark instance.
601      count: The number of this (repeated) run.
602    Returns: A tuple with the benchmark outputs with and without patch. The
603             latter will be None if options.shell_dir_no_patch was not
604             specified.
605    """
606    stdout = self._Run(runnable, count, no_patch=False)
607    if self.shell_dir_no_patch:
608      return stdout, self._Run(runnable, count, no_patch=True)
609    else:
610      return stdout, None
611
612
613class DesktopPlatform(Platform):
614  def __init__(self, options):
615    super(DesktopPlatform, self).__init__(options)
616
617  def PreExecution(self):
618    pass
619
620  def PostExecution(self):
621    pass
622
623  def PreTests(self, node, path):
624    if isinstance(node, RunnableConfig):
625      node.ChangeCWD(path)
626
627  def _Run(self, runnable, count, no_patch=False):
628    suffix = ' - without patch' if no_patch else ''
629    shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir
630    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
631    try:
632      output = commands.Execute(
633          runnable.GetCommand(shell_dir, self.extra_flags),
634          timeout=runnable.timeout,
635      )
636    except OSError as e:  # pragma: no cover
637      print title % "OSError"
638      print e
639      return ""
640    print title % "Stdout"
641    print output.stdout
642    if output.stderr:  # pragma: no cover
643      # Print stderr for debugging.
644      print title % "Stderr"
645      print output.stderr
646    if output.timed_out:
647      print ">>> Test timed out after %ss." % runnable.timeout
648    if '--prof' in self.extra_flags:
649      os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
650      if os_prefix:
651        tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
652        subprocess.check_call(tick_tools + " --only-summary", shell=True)
653      else:  # pragma: no cover
654        print "Profiler option currently supported on Linux and Mac OS."
655    return output.stdout
656
657
658class AndroidPlatform(Platform):  # pragma: no cover
659  DEVICE_DIR = "/data/local/tmp/v8/"
660
661  def __init__(self, options):
662    super(AndroidPlatform, self).__init__(options)
663    LoadAndroidBuildTools(options.android_build_tools)
664
665    if not options.device:
666      # Detect attached device if not specified.
667      devices = adb_wrapper.AdbWrapper.Devices()
668      assert devices and len(devices) == 1, (
669          "None or multiple devices detected. Please specify the device on "
670          "the command-line with --device")
671      options.device = str(devices[0])
672    self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
673    self.device = device_utils.DeviceUtils(self.adb_wrapper)
674
675  def PreExecution(self):
676    perf = perf_control.PerfControl(self.device)
677    perf.SetHighPerfMode()
678
679    # Remember what we have already pushed to the device.
680    self.pushed = set()
681
682  def PostExecution(self):
683    perf = perf_control.PerfControl(self.device)
684    perf.SetDefaultPerfMode()
685    self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR])
686
687  def _PushFile(self, host_dir, file_name, target_rel=".",
688                skip_if_missing=False):
689    file_on_host = os.path.join(host_dir, file_name)
690    file_on_device_tmp = os.path.join(
691        AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
692    file_on_device = os.path.join(
693        AndroidPlatform.DEVICE_DIR, target_rel, file_name)
694    folder_on_device = os.path.dirname(file_on_device)
695
696    # Only attempt to push files that exist.
697    if not os.path.exists(file_on_host):
698      if not skip_if_missing:
699        logging.critical('Missing file on host: %s' % file_on_host)
700      return
701
702    # Only push files not yet pushed in one execution.
703    if file_on_host in self.pushed:
704      return
705    else:
706      self.pushed.add(file_on_host)
707
708    # Work-around for "text file busy" errors. Push the files to a temporary
709    # location and then copy them with a shell command.
710    output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
711    # Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
712    # Errors look like this: "failed to copy  ... ".
713    if output and not re.search('^[0-9]', output.splitlines()[-1]):
714      logging.critical('PUSH FAILED: ' + output)
715    self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device)
716    self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device))
717
718  def _PushExecutable(self, shell_dir, target_dir, binary):
719    self._PushFile(shell_dir, binary, target_dir)
720
721    # Push external startup data. Backwards compatible for revisions where
722    # these files didn't exist.
723    self._PushFile(
724        shell_dir,
725        "natives_blob.bin",
726        target_dir,
727        skip_if_missing=True,
728    )
729    self._PushFile(
730        shell_dir,
731        "snapshot_blob.bin",
732        target_dir,
733        skip_if_missing=True,
734    )
735
736  def PreTests(self, node, path):
737    suite_dir = os.path.abspath(os.path.dirname(path))
738    if node.path:
739      bench_rel = os.path.normpath(os.path.join(*node.path))
740      bench_abs = os.path.join(suite_dir, bench_rel)
741    else:
742      bench_rel = "."
743      bench_abs = suite_dir
744
745    self._PushExecutable(self.shell_dir, "bin", node.binary)
746    if self.shell_dir_no_patch:
747      self._PushExecutable(
748          self.shell_dir_no_patch, "bin_no_patch", node.binary)
749
750    if isinstance(node, RunnableConfig):
751      self._PushFile(bench_abs, node.main, bench_rel)
752    for resource in node.resources:
753      self._PushFile(bench_abs, resource, bench_rel)
754
755  def _Run(self, runnable, count, no_patch=False):
756    suffix = ' - without patch' if no_patch else ''
757    target_dir = "bin_no_patch" if no_patch else "bin"
758    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
759    cache = cache_control.CacheControl(self.device)
760    cache.DropRamCaches()
761    binary_on_device = os.path.join(
762        AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary)
763    cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags)
764
765    # Relative path to benchmark directory.
766    if runnable.path:
767      bench_rel = os.path.normpath(os.path.join(*runnable.path))
768    else:
769      bench_rel = "."
770
771    try:
772      output = self.device.RunShellCommand(
773          cmd,
774          cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
775          timeout=runnable.timeout,
776          retries=0,
777      )
778      stdout = "\n".join(output)
779      print title % "Stdout"
780      print stdout
781    except device_errors.CommandTimeoutError:
782      print ">>> Test timed out after %ss." % runnable.timeout
783      stdout = ""
784    return stdout
785
786
787# TODO: Implement results_processor.
788def Main(args):
789  logging.getLogger().setLevel(logging.INFO)
790  parser = optparse.OptionParser()
791  parser.add_option("--android-build-tools",
792                    help="Path to chromium's build/android. Specifying this "
793                         "option will run tests using android platform.")
794  parser.add_option("--arch",
795                    help=("The architecture to run tests for, "
796                          "'auto' or 'native' for auto-detect"),
797                    default="x64")
798  parser.add_option("--buildbot",
799                    help="Adapt to path structure used on buildbots",
800                    default=False, action="store_true")
801  parser.add_option("--device",
802                    help="The device ID to run Android tests on. If not given "
803                         "it will be autodetected.")
804  parser.add_option("--extra-flags",
805                    help="Additional flags to pass to the test executable",
806                    default="")
807  parser.add_option("--json-test-results",
808                    help="Path to a file for storing json results.")
809  parser.add_option("--json-test-results-no-patch",
810                    help="Path to a file for storing json results from run "
811                         "without patch.")
812  parser.add_option("--outdir", help="Base directory with compile output",
813                    default="out")
814  parser.add_option("--outdir-no-patch",
815                    help="Base directory with compile output without patch")
816  (options, args) = parser.parse_args(args)
817
818  if len(args) == 0:  # pragma: no cover
819    parser.print_help()
820    return 1
821
822  if options.arch in ["auto", "native"]:  # pragma: no cover
823    options.arch = ARCH_GUESS
824
825  if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
826    print "Unknown architecture %s" % options.arch
827    return 1
828
829  if options.device and not options.android_build_tools:  # pragma: no cover
830    print "Specifying a device requires Android build tools."
831    return 1
832
833  if (options.json_test_results_no_patch and
834      not options.outdir_no_patch):  # pragma: no cover
835    print("For writing json test results without patch, an outdir without "
836          "patch must be specified.")
837    return 1
838
839  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
840
841  if options.buildbot:
842    build_config = "Release"
843  else:
844    build_config = "%s.release" % options.arch
845
846  options.shell_dir = os.path.join(workspace, options.outdir, build_config)
847
848  if options.outdir_no_patch:
849    options.shell_dir_no_patch = os.path.join(
850        workspace, options.outdir_no_patch, build_config)
851  else:
852    options.shell_dir_no_patch = None
853
854  platform = Platform.GetPlatform(options)
855
856  results = Results()
857  results_no_patch = Results()
858  for path in args:
859    path = os.path.abspath(path)
860
861    if not os.path.exists(path):  # pragma: no cover
862      results.errors.append("Configuration file %s does not exist." % path)
863      continue
864
865    with open(path) as f:
866      suite = json.loads(f.read())
867
868    # If no name is given, default to the file name without .json.
869    suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
870
871    # Setup things common to one test suite.
872    platform.PreExecution()
873
874    # Build the graph/trace tree structure.
875    root = BuildGraphConfigs(suite, options.arch)
876
877    # Callback to be called on each node on traversal.
878    def NodeCB(node):
879      platform.PreTests(node, path)
880
881    # Traverse graph/trace tree and interate over all runnables.
882    for runnable in FlattenRunnables(root, NodeCB):
883      print ">>> Running suite: %s" % "/".join(runnable.graphs)
884
885      def Runner():
886        """Output generator that reruns several times."""
887        for i in xrange(0, max(1, runnable.run_count)):
888          # TODO(machenbach): Allow timeout per arch like with run_count per
889          # arch.
890          yield platform.Run(runnable, i)
891
892      # Let runnable iterate over all runs and handle output.
893      result, result_no_patch = runnable.Run(
894          Runner, trybot=options.shell_dir_no_patch)
895      results += result
896      results_no_patch += result_no_patch
897    platform.PostExecution()
898
899  if options.json_test_results:
900    results.WriteToFile(options.json_test_results)
901  else:  # pragma: no cover
902    print results
903
904  if options.json_test_results_no_patch:
905    results_no_patch.WriteToFile(options.json_test_results_no_patch)
906  else:  # pragma: no cover
907    print results_no_patch
908
909  return min(1, len(results.errors))
910
911if __name__ == "__main__":  # pragma: no cover
912  sys.exit(Main(sys.argv[1:]))
913