• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2012 the V8 project authors. All rights reserved.
2# Redistribution and use in source and binary forms, with or without
3# modification, are permitted provided that the following conditions are
4# met:
5#
6#     * Redistributions of source code must retain the above copyright
7#       notice, this list of conditions and the following disclaimer.
8#     * Redistributions in binary form must reproduce the above
9#       copyright notice, this list of conditions and the following
10#       disclaimer in the documentation and/or other materials provided
11#       with the distribution.
12#     * Neither the name of Google Inc. nor the names of its
13#       contributors may be used to endorse or promote products derived
14#       from this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28
29import collections
30import os
31import re
32import shutil
33import sys
34import time
35
36from pool import Pool
37from . import commands
38from . import perfdata
39from . import statusfile
40from . import testsuite
41from . import utils
42from ..objects import output
43
44
45# Base dir of the v8 checkout.
46BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
47    os.path.abspath(__file__)))))
48TEST_DIR = os.path.join(BASE_DIR, "test")
49
50
51class Instructions(object):
52  def __init__(self, command, test_id, timeout, verbose):
53    self.command = command
54    self.id = test_id
55    self.timeout = timeout
56    self.verbose = verbose
57
58
59# Structure that keeps global information per worker process.
60ProcessContext = collections.namedtuple(
61    "process_context", ["suites", "context"])
62
63
64def MakeProcessContext(context):
65  """Generate a process-local context.
66
67  This reloads all suites per process and stores the global context.
68
69  Args:
70    context: The global context from the test runner.
71  """
72  suite_paths = utils.GetSuitePaths(TEST_DIR)
73  suites = {}
74  for root in suite_paths:
75    # Don't reinitialize global state as this is concurrently called from
76    # different processes.
77    suite = testsuite.TestSuite.LoadTestSuite(
78        os.path.join(TEST_DIR, root), global_init=False)
79    if suite:
80      suites[suite.name] = suite
81  return ProcessContext(suites, context)
82
83
84def GetCommand(test, context):
85  d8testflag = []
86  shell = test.shell()
87  if shell == "d8":
88    d8testflag = ["--test"]
89  if utils.IsWindows():
90    shell += ".exe"
91  if context.random_seed:
92    d8testflag += ["--random-seed=%s" % context.random_seed]
93  cmd = (context.command_prefix +
94         [os.path.abspath(os.path.join(context.shell_dir, shell))] +
95         d8testflag +
96         test.suite.GetFlagsForTestCase(test, context) +
97         context.extra_flags)
98  return cmd
99
100
101def _GetInstructions(test, context):
102  command = GetCommand(test, context)
103  timeout = context.timeout
104  if ("--stress-opt" in test.flags or
105      "--stress-opt" in context.mode_flags or
106      "--stress-opt" in context.extra_flags):
107    timeout *= 4
108  if "--noenable-vfp3" in context.extra_flags:
109    timeout *= 2
110  # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
111  # the like.
112  if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
113    timeout *= 2
114  return Instructions(command, test.id, timeout, context.verbose)
115
116
117class Job(object):
118  """Stores data to be sent over the multi-process boundary.
119
120  All contained fields will be pickled/unpickled.
121  """
122
123  def Run(self, process_context):
124    """Executes the job.
125
126    Args:
127      process_context: Process-local information that is initialized by the
128                       executing worker.
129    """
130    raise NotImplementedError()
131
132
133def SetupProblem(exception, test):
134  stderr = ">>> EXCEPTION: %s\n" % exception
135  match = re.match(r"^.*No such file or directory: '(.*)'$", str(exception))
136  if match:
137    # Extra debuging information when files are claimed missing.
138    f = match.group(1)
139    stderr += ">>> File %s exists? -> %s\n" % (f, os.path.exists(f))
140  return test.id, output.Output(1, False, "", stderr, None), 0
141
142
143class TestJob(Job):
144  def __init__(self, test):
145    self.test = test
146
147  def _rename_coverage_data(self, output, context):
148    """Rename coverage data.
149
150    Rename files with PIDs to files with unique test IDs, because the number
151    of tests might be higher than pid_max. E.g.:
152    d8.1234.sancov -> d8.test.42.1.sancov, where 1234 was the process' PID,
153    42 is the test ID and 1 is the attempt (the same test might be rerun on
154    failures).
155    """
156    if context.sancov_dir and output.pid is not None:
157      sancov_file = os.path.join(
158          context.sancov_dir, "%s.%d.sancov" % (self.test.shell(), output.pid))
159
160      # Some tests are expected to fail and don't produce coverage data.
161      if os.path.exists(sancov_file):
162        parts = sancov_file.split(".")
163        new_sancov_file = ".".join(
164            parts[:-2] +
165            ["test", str(self.test.id), str(self.test.run)] +
166            parts[-1:]
167        )
168        assert not os.path.exists(new_sancov_file)
169        os.rename(sancov_file, new_sancov_file)
170
171  def Run(self, process_context):
172    try:
173      # Retrieve a new suite object on the worker-process side. The original
174      # suite object isn't pickled.
175      self.test.SetSuiteObject(process_context.suites)
176      instr = _GetInstructions(self.test, process_context.context)
177    except Exception, e:
178      return SetupProblem(e, self.test)
179
180    start_time = time.time()
181    output = commands.Execute(instr.command, instr.verbose, instr.timeout)
182    self._rename_coverage_data(output, process_context.context)
183    return (instr.id, output, time.time() - start_time)
184
185
186def RunTest(job, process_context):
187  return job.Run(process_context)
188
189
190class Runner(object):
191
192  def __init__(self, suites, progress_indicator, context):
193    self.datapath = os.path.join("out", "testrunner_data")
194    self.perf_data_manager = perfdata.GetPerfDataManager(
195        context, self.datapath)
196    self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
197    self.perf_failures = False
198    self.printed_allocations = False
199    self.tests = [ t for s in suites for t in s.tests ]
200
201    # Always pre-sort by status file, slowest tests first.
202    slow_key = lambda t: statusfile.IsSlow(t.outcomes)
203    self.tests.sort(key=slow_key, reverse=True)
204
205    # Sort by stored duration of not opted out.
206    if not context.no_sorting:
207      for t in self.tests:
208        t.duration = self.perfdata.FetchPerfData(t) or 1.0
209      self.tests.sort(key=lambda t: t.duration, reverse=True)
210
211    self._CommonInit(suites, progress_indicator, context)
212
213  def _CommonInit(self, suites, progress_indicator, context):
214    self.total = 0
215    for s in suites:
216      for t in s.tests:
217        t.id = self.total
218        self.total += 1
219    self.indicator = progress_indicator
220    progress_indicator.SetRunner(self)
221    self.context = context
222    self.succeeded = 0
223    self.remaining = self.total
224    self.failed = []
225    self.crashed = 0
226    self.reran_tests = 0
227
228  def _RunPerfSafe(self, fun):
229    try:
230      fun()
231    except Exception, e:
232      print("PerfData exception: %s" % e)
233      self.perf_failures = True
234
235  def _MaybeRerun(self, pool, test):
236    if test.run <= self.context.rerun_failures_count:
237      # Possibly rerun this test if its run count is below the maximum per
238      # test. <= as the flag controls reruns not including the first run.
239      if test.run == 1:
240        # Count the overall number of reran tests on the first rerun.
241        if self.reran_tests < self.context.rerun_failures_max:
242          self.reran_tests += 1
243        else:
244          # Don't rerun this if the overall number of rerun tests has been
245          # reached.
246          return
247      if test.run >= 2 and test.duration > self.context.timeout / 20.0:
248        # Rerun slow tests at most once.
249        return
250
251      # Rerun this test.
252      test.duration = None
253      test.output = None
254      test.run += 1
255      pool.add([TestJob(test)])
256      self.remaining += 1
257      self.total += 1
258
259  def _ProcessTestNormal(self, test, result, pool):
260    test.output = result[1]
261    test.duration = result[2]
262    has_unexpected_output = test.suite.HasUnexpectedOutput(test)
263    if has_unexpected_output:
264      self.failed.append(test)
265      if test.output.HasCrashed():
266        self.crashed += 1
267    else:
268      self.succeeded += 1
269    self.remaining -= 1
270    # For the indicator, everything that happens after the first run is treated
271    # as unexpected even if it flakily passes in order to include it in the
272    # output.
273    self.indicator.HasRun(test, has_unexpected_output or test.run > 1)
274    if has_unexpected_output:
275      # Rerun test failures after the indicator has processed the results.
276      self._VerbosePrint("Attempting to rerun test after failure.")
277      self._MaybeRerun(pool, test)
278    # Update the perf database if the test succeeded.
279    return not has_unexpected_output
280
281  def _ProcessTestPredictable(self, test, result, pool):
282    def HasDifferentAllocations(output1, output2):
283      def AllocationStr(stdout):
284        for line in reversed((stdout or "").splitlines()):
285          if line.startswith("### Allocations = "):
286            self.printed_allocations = True
287            return line
288        return ""
289      return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout))
290
291    # Always pass the test duration for the database update.
292    test.duration = result[2]
293    if test.run == 1 and result[1].HasTimedOut():
294      # If we get a timeout in the first run, we are already in an
295      # unpredictable state. Just report it as a failure and don't rerun.
296      test.output = result[1]
297      self.remaining -= 1
298      self.failed.append(test)
299      self.indicator.HasRun(test, True)
300    if test.run > 1 and HasDifferentAllocations(test.output, result[1]):
301      # From the second run on, check for different allocations. If a
302      # difference is found, call the indicator twice to report both tests.
303      # All runs of each test are counted as one for the statistic.
304      self.remaining -= 1
305      self.failed.append(test)
306      self.indicator.HasRun(test, True)
307      test.output = result[1]
308      self.indicator.HasRun(test, True)
309    elif test.run >= 3:
310      # No difference on the third run -> report a success.
311      self.remaining -= 1
312      self.succeeded += 1
313      test.output = result[1]
314      self.indicator.HasRun(test, False)
315    else:
316      # No difference yet and less than three runs -> add another run and
317      # remember the output for comparison.
318      test.run += 1
319      test.output = result[1]
320      pool.add([TestJob(test)])
321    # Always update the perf database.
322    return True
323
324  def Run(self, jobs):
325    self.indicator.Starting()
326    self._RunInternal(jobs)
327    self.indicator.Done()
328    if self.failed:
329      return 1
330    elif self.remaining:
331      return 2
332    return 0
333
334  def _RunInternal(self, jobs):
335    pool = Pool(jobs)
336    test_map = {}
337    queued_exception = [None]
338    def gen_tests():
339      for test in self.tests:
340        assert test.id >= 0
341        test_map[test.id] = test
342        try:
343          yield [TestJob(test)]
344        except Exception, e:
345          # If this failed, save the exception and re-raise it later (after
346          # all other tests have had a chance to run).
347          queued_exception[0] = e
348          continue
349    try:
350      it = pool.imap_unordered(
351          fn=RunTest,
352          gen=gen_tests(),
353          process_context_fn=MakeProcessContext,
354          process_context_args=[self.context],
355      )
356      for result in it:
357        if result.heartbeat:
358          self.indicator.Heartbeat()
359          continue
360        test = test_map[result.value[0]]
361        if self.context.predictable:
362          update_perf = self._ProcessTestPredictable(test, result.value, pool)
363        else:
364          update_perf = self._ProcessTestNormal(test, result.value, pool)
365        if update_perf:
366          self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test))
367    finally:
368      self._VerbosePrint("Closing process pool.")
369      pool.terminate()
370      self._VerbosePrint("Closing database connection.")
371      self._RunPerfSafe(lambda: self.perf_data_manager.close())
372      if self.perf_failures:
373        # Nuke perf data in case of failures. This might not work on windows as
374        # some files might still be open.
375        print "Deleting perf test data due to db corruption."
376        shutil.rmtree(self.datapath)
377    if queued_exception[0]:
378      raise queued_exception[0]
379
380    # Make sure that any allocations were printed in predictable mode (if we
381    # ran any tests).
382    assert (
383        not self.total or
384        not self.context.predictable or
385        self.printed_allocations
386    )
387
388  def _VerbosePrint(self, text):
389    if self.context.verbose:
390      print text
391      sys.stdout.flush()
392
393
394class BreakNowException(Exception):
395  def __init__(self, value):
396    self.value = value
397  def __str__(self):
398    return repr(self.value)
399