• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright 2017 the V8 project authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""
7Global system tests for V8 test runners and fuzzers.
8
9This hooks up the framework under tools/testrunner testing high-level scenarios
10with different test suite extensions and build configurations.
11"""
12
13# TODO(machenbach): Mock out util.GuessOS to make these tests really platform
14# independent.
15# TODO(machenbach): Move coverage recording to a global test entry point to
16# include other unittest suites in the coverage report.
17# TODO(machenbach): Coverage data from multiprocessing doesn't work.
18# TODO(majeski): Add some tests for the fuzzers.
19
20import collections
21import contextlib
22import json
23import os
24import shutil
25import subprocess
26import sys
27import tempfile
28import unittest
29
30from cStringIO import StringIO
31
32TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
33TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
34RUN_TESTS_PY = os.path.join(TOOLS_ROOT, 'run-tests.py')
35
36Result = collections.namedtuple(
37    'Result', ['stdout', 'stderr', 'returncode'])
38
39Result.__str__ = lambda self: (
40    '\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
41    (self.returncode, self.stdout, self.stderr))
42
43
44@contextlib.contextmanager
45def temp_dir():
46  """Wrapper making a temporary directory available."""
47  path = None
48  try:
49    path = tempfile.mkdtemp('v8_test_')
50    yield path
51  finally:
52    if path:
53      shutil.rmtree(path)
54
55
56@contextlib.contextmanager
57def temp_base(baseroot='testroot1'):
58  """Wrapper that sets up a temporary V8 test root.
59
60  Args:
61    baseroot: The folder with the test root blueprint. Relevant files will be
62        copied to the temporary test root, to guarantee a fresh setup with no
63        dirty state.
64  """
65  basedir = os.path.join(TEST_DATA_ROOT, baseroot)
66  with temp_dir() as tempbase:
67    builddir = os.path.join(tempbase, 'out', 'Release')
68    testroot = os.path.join(tempbase, 'test')
69    os.makedirs(builddir)
70    shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
71    shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)
72
73    for suite in os.listdir(os.path.join(basedir, 'test')):
74      os.makedirs(os.path.join(testroot, suite))
75      for entry in os.listdir(os.path.join(basedir, 'test', suite)):
76        shutil.copy(
77            os.path.join(basedir, 'test', suite, entry),
78            os.path.join(testroot, suite))
79    yield tempbase
80
81
82@contextlib.contextmanager
83def capture():
84  """Wrapper that replaces system stdout/stderr an provides the streams."""
85  oldout = sys.stdout
86  olderr = sys.stderr
87  try:
88    stdout=StringIO()
89    stderr=StringIO()
90    sys.stdout = stdout
91    sys.stderr = stderr
92    yield stdout, stderr
93  finally:
94    sys.stdout = oldout
95    sys.stderr = olderr
96
97
98def run_tests(basedir, *args, **kwargs):
99  """Executes the test runner with captured output."""
100  with capture() as (stdout, stderr):
101    sys_args = ['--command-prefix', sys.executable] + list(args)
102    if kwargs.get('infra_staging', False):
103      sys_args.append('--infra-staging')
104    else:
105      sys_args.append('--no-infra-staging')
106    code = standard_runner.StandardTestRunner(
107        basedir=basedir).execute(sys_args)
108    return Result(stdout.getvalue(), stderr.getvalue(), code)
109
110
111def override_build_config(basedir, **kwargs):
112  """Override the build config with new values provided as kwargs."""
113  path = os.path.join(basedir, 'out', 'Release', 'v8_build_config.json')
114  with open(path) as f:
115    config = json.load(f)
116    config.update(kwargs)
117  with open(path, 'w') as f:
118    json.dump(config, f)
119
120
121class SystemTest(unittest.TestCase):
122  @classmethod
123  def setUpClass(cls):
124    # Try to set up python coverage and run without it if not available.
125    cls._cov = None
126    try:
127      import coverage
128      if int(coverage.__version__.split('.')[0]) < 4:
129        cls._cov = None
130        print 'Python coverage version >= 4 required.'
131        raise ImportError()
132      cls._cov = coverage.Coverage(
133          source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
134          omit=['*unittest*', '*__init__.py'],
135      )
136      cls._cov.exclude('raise NotImplementedError')
137      cls._cov.exclude('if __name__ == .__main__.:')
138      cls._cov.exclude('except TestRunnerError:')
139      cls._cov.exclude('except KeyboardInterrupt:')
140      cls._cov.exclude('if options.verbose:')
141      cls._cov.exclude('if verbose:')
142      cls._cov.exclude('pass')
143      cls._cov.exclude('assert False')
144      cls._cov.start()
145    except ImportError:
146      print 'Running without python coverage.'
147    sys.path.append(TOOLS_ROOT)
148    global standard_runner
149    from testrunner import standard_runner
150    from testrunner.local import command
151    from testrunner.local import pool
152    command.setup_testing()
153    pool.setup_testing()
154
155  @classmethod
156  def tearDownClass(cls):
157    if cls._cov:
158      cls._cov.stop()
159      print ''
160      print cls._cov.report(show_missing=True)
161
162  def testPass(self):
163    """Test running only passing tests in two variants.
164
165    Also test printing durations.
166    """
167    with temp_base() as basedir:
168      result = run_tests(
169          basedir,
170          '--mode=Release',
171          '--progress=verbose',
172          '--variants=default,stress',
173          '--time',
174          'sweet/bananas',
175          'sweet/raspberries',
176      )
177      self.assertIn('Running 2 base tests', result.stdout, result)
178      self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
179      # TODO(majeski): Implement for test processors
180      # self.assertIn('Total time:', result.stderr, result)
181      # self.assertIn('sweet/bananas', result.stderr, result)
182      self.assertEqual(0, result.returncode, result)
183
184  def testShardedProc(self):
185    with temp_base() as basedir:
186      for shard in [1, 2]:
187        result = run_tests(
188            basedir,
189            '--mode=Release',
190            '--progress=verbose',
191            '--variants=default,stress',
192            '--shard-count=2',
193            '--shard-run=%d' % shard,
194            'sweet/bananas',
195            'sweet/raspberries',
196            infra_staging=True,
197        )
198        # One of the shards gets one variant of each test.
199        self.assertIn('Running 1 base tests', result.stdout, result)
200        self.assertIn('2 tests ran', result.stdout, result)
201        if shard == 1:
202          self.assertIn('Done running sweet/bananas', result.stdout, result)
203        else:
204          self.assertIn('Done running sweet/raspberries', result.stdout, result)
205        self.assertEqual(0, result.returncode, result)
206
207  @unittest.skip("incompatible with test processors")
208  def testSharded(self):
209    """Test running a particular shard."""
210    with temp_base() as basedir:
211      for shard in [1, 2]:
212        result = run_tests(
213            basedir,
214            '--mode=Release',
215            '--progress=verbose',
216            '--variants=default,stress',
217            '--shard-count=2',
218            '--shard-run=%d' % shard,
219            'sweet/bananas',
220            'sweet/raspberries',
221        )
222        # One of the shards gets one variant of each test.
223        self.assertIn('Running 2 tests', result.stdout, result)
224        self.assertIn('Done running sweet/bananas', result.stdout, result)
225        self.assertIn('Done running sweet/raspberries', result.stdout, result)
226        self.assertEqual(0, result.returncode, result)
227
228  def testFailProc(self):
229    self.testFail(infra_staging=True)
230
231  def testFail(self, infra_staging=True):
232    """Test running only failing tests in two variants."""
233    with temp_base() as basedir:
234      result = run_tests(
235          basedir,
236          '--mode=Release',
237          '--progress=verbose',
238          '--variants=default,stress',
239          'sweet/strawberries',
240          infra_staging=infra_staging,
241      )
242      if not infra_staging:
243        self.assertIn('Running 2 tests', result.stdout, result)
244      else:
245        self.assertIn('Running 1 base tests', result.stdout, result)
246        self.assertIn('2 tests ran', result.stdout, result)
247      self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
248      self.assertEqual(1, result.returncode, result)
249
250  def check_cleaned_json_output(self, expected_results_name, actual_json):
251    # Check relevant properties of the json output.
252    with open(actual_json) as f:
253      json_output = json.load(f)[0]
254      pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
255
256    # Replace duration in actual output as it's non-deterministic. Also
257    # replace the python executable prefix as it has a different absolute
258    # path dependent on where this runs.
259    def replace_variable_data(data):
260      data['duration'] = 1
261      data['command'] = ' '.join(
262          ['/usr/bin/python'] + data['command'].split()[1:])
263    for data in json_output['slowest_tests']:
264      replace_variable_data(data)
265    for data in json_output['results']:
266      replace_variable_data(data)
267    json_output['duration_mean'] = 1
268
269    with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
270      expected_test_results = json.load(f)
271
272    msg = None  # Set to pretty_json for bootstrapping.
273    self.assertDictEqual(json_output, expected_test_results, msg)
274
275  def testFailWithRerunAndJSONProc(self):
276    self.testFailWithRerunAndJSON(infra_staging=True)
277
278  def testFailWithRerunAndJSON(self, infra_staging=True):
279    """Test re-running a failing test and output to json."""
280    with temp_base() as basedir:
281      json_path = os.path.join(basedir, 'out.json')
282      result = run_tests(
283          basedir,
284          '--mode=Release',
285          '--progress=verbose',
286          '--variants=default',
287          '--rerun-failures-count=2',
288          '--random-seed=123',
289          '--json-test-results', json_path,
290          'sweet/strawberries',
291          infra_staging=infra_staging,
292      )
293      if not infra_staging:
294        self.assertIn('Running 1 tests', result.stdout, result)
295      else:
296        self.assertIn('Running 1 base tests', result.stdout, result)
297        self.assertIn('1 tests ran', result.stdout, result)
298      self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
299      if not infra_staging:
300        # We run one test, which fails and gets re-run twice.
301        self.assertIn('3 tests failed', result.stdout, result)
302      else:
303        # With test processors we don't count reruns as separated failures.
304        # TODO(majeski): fix it?
305        self.assertIn('1 tests failed', result.stdout, result)
306      self.assertEqual(0, result.returncode, result)
307
308      # TODO(majeski): Previously we only reported the variant flags in the
309      # flags field of the test result.
310      # After recent changes we report all flags, including the file names.
311      # This is redundant to the command. Needs investigation.
312      self.maxDiff = None
313      self.check_cleaned_json_output('expected_test_results1.json', json_path)
314
315  def testFlakeWithRerunAndJSONProc(self):
316    self.testFlakeWithRerunAndJSON(infra_staging=True)
317
318  def testFlakeWithRerunAndJSON(self, infra_staging=True):
319    """Test re-running a failing test and output to json."""
320    with temp_base(baseroot='testroot2') as basedir:
321      json_path = os.path.join(basedir, 'out.json')
322      result = run_tests(
323          basedir,
324          '--mode=Release',
325          '--progress=verbose',
326          '--variants=default',
327          '--rerun-failures-count=2',
328          '--random-seed=123',
329          '--json-test-results', json_path,
330          'sweet',
331          infra_staging=infra_staging,
332      )
333      if not infra_staging:
334        self.assertIn('Running 1 tests', result.stdout, result)
335        self.assertIn(
336            'Done running sweet/bananaflakes: FAIL', result.stdout, result)
337        self.assertIn('1 tests failed', result.stdout, result)
338      else:
339        self.assertIn('Running 1 base tests', result.stdout, result)
340        self.assertIn(
341            'Done running sweet/bananaflakes: pass', result.stdout, result)
342        self.assertIn('All tests succeeded', result.stdout, result)
343      self.assertEqual(0, result.returncode, result)
344      self.maxDiff = None
345      self.check_cleaned_json_output('expected_test_results2.json', json_path)
346
347  def testAutoDetect(self):
348    """Fake a build with several auto-detected options.
349
350    Using all those options at once doesn't really make much sense. This is
351    merely for getting coverage.
352    """
353    with temp_base() as basedir:
354      override_build_config(
355          basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
356          is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
357          v8_enable_i18n_support=False, v8_target_cpu='x86',
358          v8_use_snapshot=False)
359      result = run_tests(
360          basedir,
361          '--mode=Release',
362          '--progress=verbose',
363          '--variants=default',
364          'sweet/bananas',
365      )
366      expect_text = (
367          '>>> Autodetected:\n'
368          'asan\n'
369          'cfi_vptr\n'
370          'dcheck_always_on\n'
371          'msan\n'
372          'no_i18n\n'
373          'no_snap\n'
374          'tsan\n'
375          'ubsan_vptr\n'
376          '>>> Running tests for ia32.release')
377      self.assertIn(expect_text, result.stdout, result)
378      self.assertEqual(0, result.returncode, result)
379      # TODO(machenbach): Test some more implications of the auto-detected
380      # options, e.g. that the right env variables are set.
381
382  def testSkipsProc(self):
383    self.testSkips(infra_staging=True)
384
385  def testSkips(self, infra_staging=True):
386    """Test skipping tests in status file for a specific variant."""
387    with temp_base() as basedir:
388      result = run_tests(
389          basedir,
390          '--mode=Release',
391          '--progress=verbose',
392          '--variants=nooptimization',
393          'sweet/strawberries',
394          infra_staging=infra_staging,
395      )
396      if not infra_staging:
397        self.assertIn('Running 0 tests', result.stdout, result)
398      else:
399        self.assertIn('Running 1 base tests', result.stdout, result)
400        self.assertIn('0 tests ran', result.stdout, result)
401      self.assertEqual(2, result.returncode, result)
402
403  def testDefaultProc(self):
404    self.testDefault(infra_staging=True)
405
406  def testDefault(self, infra_staging=True):
407    """Test using default test suites, though no tests are run since they don't
408    exist in a test setting.
409    """
410    with temp_base() as basedir:
411      result = run_tests(
412          basedir,
413          '--mode=Release',
414          infra_staging=infra_staging,
415      )
416      if not infra_staging:
417        self.assertIn('Warning: no tests were run!', result.stdout, result)
418      else:
419        self.assertIn('Running 0 base tests', result.stdout, result)
420        self.assertIn('0 tests ran', result.stdout, result)
421      self.assertEqual(2, result.returncode, result)
422
423  def testNoBuildConfig(self):
424    """Test failing run when build config is not found."""
425    with temp_base() as basedir:
426      result = run_tests(basedir)
427      self.assertIn('Failed to load build config', result.stdout, result)
428      self.assertEqual(5, result.returncode, result)
429
430  def testGNOption(self):
431    """Test using gn option, but no gn build folder is found."""
432    with temp_base() as basedir:
433      # TODO(machenbach): This should fail gracefully.
434      with self.assertRaises(OSError):
435        run_tests(basedir, '--gn')
436
437  def testInconsistentMode(self):
438    """Test failing run when attempting to wrongly override the mode."""
439    with temp_base() as basedir:
440      override_build_config(basedir, is_debug=True)
441      result = run_tests(basedir, '--mode=Release')
442      self.assertIn('execution mode (release) for release is inconsistent '
443                    'with build config (debug)', result.stdout, result)
444      self.assertEqual(5, result.returncode, result)
445
446  def testInconsistentArch(self):
447    """Test failing run when attempting to wrongly override the arch."""
448    with temp_base() as basedir:
449      result = run_tests(basedir, '--mode=Release', '--arch=ia32')
450      self.assertIn(
451          '--arch value (ia32) inconsistent with build config (x64).',
452          result.stdout, result)
453      self.assertEqual(5, result.returncode, result)
454
455  def testWrongVariant(self):
456    """Test using a bogus variant."""
457    with temp_base() as basedir:
458      result = run_tests(basedir, '--mode=Release', '--variants=meh')
459      self.assertEqual(5, result.returncode, result)
460
461  def testModeFromBuildConfig(self):
462    """Test auto-detection of mode from build config."""
463    with temp_base() as basedir:
464      result = run_tests(basedir, '--outdir=out/Release', 'sweet/bananas')
465      self.assertIn('Running tests for x64.release', result.stdout, result)
466      self.assertEqual(0, result.returncode, result)
467
468  @unittest.skip("not available with test processors")
469  def testReport(self):
470    """Test the report feature.
471
472    This also exercises various paths in statusfile logic.
473    """
474    with temp_base() as basedir:
475      result = run_tests(
476          basedir,
477          '--mode=Release',
478          '--variants=default',
479          'sweet',
480          '--report',
481      )
482      self.assertIn(
483          '3 tests are expected to fail that we should fix',
484          result.stdout, result)
485      self.assertEqual(1, result.returncode, result)
486
487  @unittest.skip("not available with test processors")
488  def testWarnUnusedRules(self):
489    """Test the unused-rules feature."""
490    with temp_base() as basedir:
491      result = run_tests(
492          basedir,
493          '--mode=Release',
494          '--variants=default,nooptimization',
495          'sweet',
496          '--warn-unused',
497      )
498      self.assertIn( 'Unused rule: carrots', result.stdout, result)
499      self.assertIn( 'Unused rule: regress/', result.stdout, result)
500      self.assertEqual(1, result.returncode, result)
501
502  @unittest.skip("not available with test processors")
503  def testCatNoSources(self):
504    """Test printing sources, but the suite's tests have none available."""
505    with temp_base() as basedir:
506      result = run_tests(
507          basedir,
508          '--mode=Release',
509          '--variants=default',
510          'sweet/bananas',
511          '--cat',
512      )
513      self.assertIn('begin source: sweet/bananas', result.stdout, result)
514      self.assertIn('(no source available)', result.stdout, result)
515      self.assertEqual(0, result.returncode, result)
516
517  def testPredictableProc(self):
518    self.testPredictable(infra_staging=True)
519
520  def testPredictable(self, infra_staging=True):
521    """Test running a test in verify-predictable mode.
522
523    The test will fail because of missing allocation output. We verify that and
524    that the predictable flags are passed and printed after failure.
525    """
526    with temp_base() as basedir:
527      override_build_config(basedir, v8_enable_verify_predictable=True)
528      result = run_tests(
529          basedir,
530          '--mode=Release',
531          '--progress=verbose',
532          '--variants=default',
533          'sweet/bananas',
534          infra_staging=infra_staging,
535      )
536      if not infra_staging:
537        self.assertIn('Running 1 tests', result.stdout, result)
538      else:
539        self.assertIn('Running 1 base tests', result.stdout, result)
540        self.assertIn('1 tests ran', result.stdout, result)
541      self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
542      self.assertIn('Test had no allocation output', result.stdout, result)
543      self.assertIn('--predictable --verify_predictable', result.stdout, result)
544      self.assertEqual(1, result.returncode, result)
545
546  def testSlowArch(self):
547    """Test timeout factor manipulation on slow architecture."""
548    with temp_base() as basedir:
549      override_build_config(basedir, v8_target_cpu='arm64')
550      result = run_tests(
551          basedir,
552          '--mode=Release',
553          '--progress=verbose',
554          '--variants=default',
555          'sweet/bananas',
556      )
557      # TODO(machenbach): We don't have a way for testing if the correct
558      # timeout was used.
559      self.assertEqual(0, result.returncode, result)
560
561  def testRandomSeedStressWithDefaultProc(self):
562    self.testRandomSeedStressWithDefault(infra_staging=True)
563
564  def testRandomSeedStressWithDefault(self, infra_staging=True):
565    """Test using random-seed-stress feature has the right number of tests."""
566    with temp_base() as basedir:
567      result = run_tests(
568          basedir,
569          '--mode=Release',
570          '--progress=verbose',
571          '--variants=default',
572          '--random-seed-stress-count=2',
573          'sweet/bananas',
574          infra_staging=infra_staging,
575      )
576      if infra_staging:
577        self.assertIn('Running 1 base tests', result.stdout, result)
578        self.assertIn('2 tests ran', result.stdout, result)
579      else:
580        self.assertIn('Running 2 tests', result.stdout, result)
581      self.assertEqual(0, result.returncode, result)
582
583  def testRandomSeedStressWithSeed(self):
584    """Test using random-seed-stress feature passing a random seed."""
585    with temp_base() as basedir:
586      result = run_tests(
587          basedir,
588          '--mode=Release',
589          '--progress=verbose',
590          '--variants=default',
591          '--random-seed-stress-count=2',
592          '--random-seed=123',
593          'sweet/strawberries',
594      )
595      self.assertIn('Running 1 base tests', result.stdout, result)
596      self.assertIn('2 tests ran', result.stdout, result)
597      # We use a failing test so that the command is printed and we can verify
598      # that the right random seed was passed.
599      self.assertIn('--random-seed=123', result.stdout, result)
600      self.assertEqual(1, result.returncode, result)
601
602  def testSpecificVariants(self):
603    """Test using NO_VARIANTS modifiers in status files skips the desire tests.
604
605    The test runner cmd line configures 4 tests to run (2 tests * 2 variants).
606    But the status file applies a modifier to each skipping one of the
607    variants.
608    """
609    with temp_base() as basedir:
610      override_build_config(basedir, v8_use_snapshot=False)
611      result = run_tests(
612          basedir,
613          '--mode=Release',
614          '--progress=verbose',
615          '--variants=default,stress',
616          'sweet/bananas',
617          'sweet/raspberries',
618      )
619      # Both tests are either marked as running in only default or only
620      # slow variant.
621      self.assertIn('Running 2 base tests', result.stdout, result)
622      self.assertIn('2 tests ran', result.stdout, result)
623      self.assertEqual(0, result.returncode, result)
624
625  def testStatusFilePresubmit(self):
626    """Test that the fake status file is well-formed."""
627    with temp_base() as basedir:
628      from testrunner.local import statusfile
629      self.assertTrue(statusfile.PresubmitCheck(
630          os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
631
632  def testDotsProgressProc(self):
633    self.testDotsProgress(infra_staging=True)
634
635  def testDotsProgress(self, infra_staging=True):
636    with temp_base() as basedir:
637      result = run_tests(
638          basedir,
639          '--mode=Release',
640          '--progress=dots',
641          'sweet/cherries',
642          'sweet/bananas',
643          '--no-sorting', '-j1', # make results order deterministic
644          infra_staging=infra_staging,
645      )
646      if not infra_staging:
647        self.assertIn('Running 2 tests', result.stdout, result)
648      else:
649        self.assertIn('Running 2 base tests', result.stdout, result)
650        self.assertIn('2 tests ran', result.stdout, result)
651      self.assertIn('F.', result.stdout, result)
652      self.assertEqual(1, result.returncode, result)
653
654  def testMonoProgressProc(self):
655    self._testCompactProgress('mono', True)
656
657  def testMonoProgress(self):
658    self._testCompactProgress('mono', False)
659
660  def testColorProgressProc(self):
661    self._testCompactProgress('color', True)
662
663  def testColorProgress(self):
664    self._testCompactProgress('color', False)
665
666  def _testCompactProgress(self, name, infra_staging):
667    with temp_base() as basedir:
668      result = run_tests(
669          basedir,
670          '--mode=Release',
671          '--progress=%s' % name,
672          'sweet/cherries',
673          'sweet/bananas',
674          infra_staging=infra_staging,
675      )
676      if name == 'color':
677        expected = ('\033[34m% 100\033[0m|'
678                    '\033[32m+   1\033[0m|'
679                    '\033[31m-   1\033[0m]: Done')
680      else:
681        expected = '% 100|+   1|-   1]: Done'
682      self.assertIn(expected, result.stdout)
683      self.assertIn('sweet/cherries', result.stdout)
684      self.assertIn('sweet/bananas', result.stdout)
685      self.assertEqual(1, result.returncode, result)
686
687if __name__ == '__main__':
688  unittest.main()
689