• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1import faulthandler
2import importlib
3import io
4import os
5import sys
6import time
7import traceback
8import unittest
9from test import support
10from test.libregrtest.refleak import dash_R, clear_caches
11from test.libregrtest.save_env import saved_test_environment
12
13
14# Test result constants.
15PASSED = 1
16FAILED = 0
17ENV_CHANGED = -1
18SKIPPED = -2
19RESOURCE_DENIED = -3
20INTERRUPTED = -4
21CHILD_ERROR = -5   # error in a child process
22TEST_DID_NOT_RUN = -6   # error in a child process
23
24_FORMAT_TEST_RESULT = {
25    PASSED: '%s passed',
26    FAILED: '%s failed',
27    ENV_CHANGED: '%s failed (env changed)',
28    SKIPPED: '%s skipped',
29    RESOURCE_DENIED: '%s skipped (resource denied)',
30    INTERRUPTED: '%s interrupted',
31    CHILD_ERROR: '%s crashed',
32    TEST_DID_NOT_RUN: '%s run no tests',
33}
34
35# Minimum duration of a test to display its duration or to mention that
36# the test is running in background
37PROGRESS_MIN_TIME = 30.0   # seconds
38
39# small set of tests to determine if we have a basically functioning interpreter
40# (i.e. if any of these fail, then anything else is likely to follow)
41STDTESTS = [
42    'test_grammar',
43    'test_opcodes',
44    'test_dict',
45    'test_builtin',
46    'test_exceptions',
47    'test_types',
48    'test_unittest',
49    'test_doctest',
50    'test_doctest2',
51    'test_support'
52]
53
54# set of tests that we don't want to be executed when using regrtest
55NOTTESTS = set()
56
57
58def format_test_result(test_name, result):
59    fmt = _FORMAT_TEST_RESULT.get(result, "%s")
60    return fmt % test_name
61
62
63def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
64    """Return a list of all applicable test modules."""
65    testdir = findtestdir(testdir)
66    names = os.listdir(testdir)
67    tests = []
68    others = set(stdtests) | nottests
69    for name in names:
70        mod, ext = os.path.splitext(name)
71        if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
72            tests.append(mod)
73    return stdtests + sorted(tests)
74
75
76def get_abs_module(ns, test):
77    if test.startswith('test.') or ns.testdir:
78        return test
79    else:
80        # Always import it from the test package
81        return 'test.' + test
82
83
84def runtest(ns, test):
85    """Run a single test.
86
87    ns -- regrtest namespace of options
88    test -- the name of the test
89
90    Returns the tuple (result, test_time, xml_data), where result is one
91    of the constants:
92
93        INTERRUPTED      KeyboardInterrupt when run under -j
94        RESOURCE_DENIED  test skipped because resource denied
95        SKIPPED          test skipped for some other reason
96        ENV_CHANGED      test failed because it changed the execution environment
97        FAILED           test failed
98        PASSED           test passed
99        EMPTY_TEST_SUITE test ran no subtests.
100
101    If ns.xmlpath is not None, xml_data is a list containing each
102    generated testsuite element.
103    """
104
105    output_on_failure = ns.verbose3
106
107    use_timeout = (ns.timeout is not None)
108    if use_timeout:
109        faulthandler.dump_traceback_later(ns.timeout, exit=True)
110    try:
111        support.set_match_tests(ns.match_tests)
112        # reset the environment_altered flag to detect if a test altered
113        # the environment
114        support.environment_altered = False
115        support.junit_xml_list = xml_list = [] if ns.xmlpath else None
116        if ns.failfast:
117            support.failfast = True
118        if output_on_failure:
119            support.verbose = True
120
121            stream = io.StringIO()
122            orig_stdout = sys.stdout
123            orig_stderr = sys.stderr
124            try:
125                sys.stdout = stream
126                sys.stderr = stream
127                result = runtest_inner(ns, test, display_failure=False)
128                if result[0] != PASSED:
129                    output = stream.getvalue()
130                    orig_stderr.write(output)
131                    orig_stderr.flush()
132            finally:
133                sys.stdout = orig_stdout
134                sys.stderr = orig_stderr
135        else:
136            support.verbose = ns.verbose  # Tell tests to be moderately quiet
137            result = runtest_inner(ns, test, display_failure=not ns.verbose)
138
139        if xml_list:
140            import xml.etree.ElementTree as ET
141            xml_data = [ET.tostring(x).decode('us-ascii') for x in xml_list]
142        else:
143            xml_data = None
144        return result + (xml_data,)
145    finally:
146        if use_timeout:
147            faulthandler.cancel_dump_traceback_later()
148        cleanup_test_droppings(test, ns.verbose)
149        support.junit_xml_list = None
150
151
152def post_test_cleanup():
153    support.reap_children()
154
155
156def runtest_inner(ns, test, display_failure=True):
157    support.unload(test)
158
159    test_time = 0.0
160    refleak = False  # True if the test leaked references.
161    try:
162        abstest = get_abs_module(ns, test)
163        clear_caches()
164        with saved_test_environment(test, ns.verbose, ns.quiet, pgo=ns.pgo) as environment:
165            start_time = time.perf_counter()
166            the_module = importlib.import_module(abstest)
167            # If the test has a test_main, that will run the appropriate
168            # tests.  If not, use normal unittest test loading.
169            test_runner = getattr(the_module, "test_main", None)
170            if test_runner is None:
171                def test_runner():
172                    loader = unittest.TestLoader()
173                    tests = loader.loadTestsFromModule(the_module)
174                    for error in loader.errors:
175                        print(error, file=sys.stderr)
176                    if loader.errors:
177                        raise Exception("errors while loading tests")
178                    support.run_unittest(tests)
179            if ns.huntrleaks:
180                refleak = dash_R(the_module, test, test_runner, ns.huntrleaks)
181            else:
182                test_runner()
183            test_time = time.perf_counter() - start_time
184        post_test_cleanup()
185    except support.ResourceDenied as msg:
186        if not ns.quiet and not ns.pgo:
187            print(test, "skipped --", msg, flush=True)
188        return RESOURCE_DENIED, test_time
189    except unittest.SkipTest as msg:
190        if not ns.quiet and not ns.pgo:
191            print(test, "skipped --", msg, flush=True)
192        return SKIPPED, test_time
193    except KeyboardInterrupt:
194        raise
195    except support.TestFailed as msg:
196        if not ns.pgo:
197            if display_failure:
198                print("test", test, "failed --", msg, file=sys.stderr,
199                      flush=True)
200            else:
201                print("test", test, "failed", file=sys.stderr, flush=True)
202        return FAILED, test_time
203    except support.TestDidNotRun:
204        return TEST_DID_NOT_RUN, test_time
205    except:
206        msg = traceback.format_exc()
207        if not ns.pgo:
208            print("test", test, "crashed --", msg, file=sys.stderr,
209                  flush=True)
210        return FAILED, test_time
211    else:
212        if refleak:
213            return FAILED, test_time
214        if environment.changed:
215            return ENV_CHANGED, test_time
216        return PASSED, test_time
217
218
219def cleanup_test_droppings(testname, verbose):
220    import shutil
221    import stat
222    import gc
223
224    # First kill any dangling references to open files etc.
225    # This can also issue some ResourceWarnings which would otherwise get
226    # triggered during the following test run, and possibly produce failures.
227    gc.collect()
228
229    # Try to clean up junk commonly left behind.  While tests shouldn't leave
230    # any files or directories behind, when a test fails that can be tedious
231    # for it to arrange.  The consequences can be especially nasty on Windows,
232    # since if a test leaves a file open, it cannot be deleted by name (while
233    # there's nothing we can do about that here either, we can display the
234    # name of the offending test, which is a real help).
235    for name in (support.TESTFN,
236                 "db_home",
237                ):
238        if not os.path.exists(name):
239            continue
240
241        if os.path.isdir(name):
242            kind, nuker = "directory", shutil.rmtree
243        elif os.path.isfile(name):
244            kind, nuker = "file", os.unlink
245        else:
246            raise SystemError("os.path says %r exists but is neither "
247                              "directory nor file" % name)
248
249        if verbose:
250            print("%r left behind %s %r" % (testname, kind, name))
251        try:
252            # if we have chmod, fix possible permissions problems
253            # that might prevent cleanup
254            if (hasattr(os, 'chmod')):
255                os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
256            nuker(name)
257        except Exception as msg:
258            print(("%r left behind %s %r and it couldn't be "
259                "removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
260
261
262def findtestdir(path=None):
263    return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
264