• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1"""
2Tests of regrtest.py.
3
4Note: test_regrtest cannot be run twice in parallel.
5"""
6
7import contextlib
8import glob
9import io
10import os.path
11import platform
12import re
13import subprocess
14import sys
15import sysconfig
16import tempfile
17import textwrap
18import time
19import unittest
20from test import libregrtest
21from test import support
22from test.support import os_helper
23from test.libregrtest import utils, setup
24
25
26Py_DEBUG = hasattr(sys, 'gettotalrefcount')
27ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
28ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
29LOG_PREFIX = r'[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?'
30
31TEST_INTERRUPTED = textwrap.dedent("""
32    from signal import SIGINT, raise_signal
33    try:
34        raise_signal(SIGINT)
35    except ImportError:
36        import os
37        os.kill(os.getpid(), SIGINT)
38    """)
39
40
41class ParseArgsTestCase(unittest.TestCase):
42    """
43    Test regrtest's argument parsing, function _parse_args().
44    """
45
46    def checkError(self, args, msg):
47        with support.captured_stderr() as err, self.assertRaises(SystemExit):
48            libregrtest._parse_args(args)
49        self.assertIn(msg, err.getvalue())
50
51    def test_help(self):
52        for opt in '-h', '--help':
53            with self.subTest(opt=opt):
54                with support.captured_stdout() as out, \
55                     self.assertRaises(SystemExit):
56                    libregrtest._parse_args([opt])
57                self.assertIn('Run Python regression tests.', out.getvalue())
58
59    def test_timeout(self):
60        ns = libregrtest._parse_args(['--timeout', '4.2'])
61        self.assertEqual(ns.timeout, 4.2)
62        self.checkError(['--timeout'], 'expected one argument')
63        self.checkError(['--timeout', 'foo'], 'invalid float value')
64
65    def test_wait(self):
66        ns = libregrtest._parse_args(['--wait'])
67        self.assertTrue(ns.wait)
68
69    def test_worker_args(self):
70        ns = libregrtest._parse_args(['--worker-args', '[[], {}]'])
71        self.assertEqual(ns.worker_args, '[[], {}]')
72        self.checkError(['--worker-args'], 'expected one argument')
73
74    def test_start(self):
75        for opt in '-S', '--start':
76            with self.subTest(opt=opt):
77                ns = libregrtest._parse_args([opt, 'foo'])
78                self.assertEqual(ns.start, 'foo')
79                self.checkError([opt], 'expected one argument')
80
81    def test_verbose(self):
82        ns = libregrtest._parse_args(['-v'])
83        self.assertEqual(ns.verbose, 1)
84        ns = libregrtest._parse_args(['-vvv'])
85        self.assertEqual(ns.verbose, 3)
86        ns = libregrtest._parse_args(['--verbose'])
87        self.assertEqual(ns.verbose, 1)
88        ns = libregrtest._parse_args(['--verbose'] * 3)
89        self.assertEqual(ns.verbose, 3)
90        ns = libregrtest._parse_args([])
91        self.assertEqual(ns.verbose, 0)
92
93    def test_verbose2(self):
94        for opt in '-w', '--verbose2':
95            with self.subTest(opt=opt):
96                ns = libregrtest._parse_args([opt])
97                self.assertTrue(ns.verbose2)
98
99    def test_verbose3(self):
100        for opt in '-W', '--verbose3':
101            with self.subTest(opt=opt):
102                ns = libregrtest._parse_args([opt])
103                self.assertTrue(ns.verbose3)
104
105    def test_quiet(self):
106        for opt in '-q', '--quiet':
107            with self.subTest(opt=opt):
108                ns = libregrtest._parse_args([opt])
109                self.assertTrue(ns.quiet)
110                self.assertEqual(ns.verbose, 0)
111
112    def test_slowest(self):
113        for opt in '-o', '--slowest':
114            with self.subTest(opt=opt):
115                ns = libregrtest._parse_args([opt])
116                self.assertTrue(ns.print_slow)
117
118    def test_header(self):
119        ns = libregrtest._parse_args(['--header'])
120        self.assertTrue(ns.header)
121
122        ns = libregrtest._parse_args(['--verbose'])
123        self.assertTrue(ns.header)
124
125    def test_randomize(self):
126        for opt in '-r', '--randomize':
127            with self.subTest(opt=opt):
128                ns = libregrtest._parse_args([opt])
129                self.assertTrue(ns.randomize)
130
131    def test_randseed(self):
132        ns = libregrtest._parse_args(['--randseed', '12345'])
133        self.assertEqual(ns.random_seed, 12345)
134        self.assertTrue(ns.randomize)
135        self.checkError(['--randseed'], 'expected one argument')
136        self.checkError(['--randseed', 'foo'], 'invalid int value')
137
138    def test_fromfile(self):
139        for opt in '-f', '--fromfile':
140            with self.subTest(opt=opt):
141                ns = libregrtest._parse_args([opt, 'foo'])
142                self.assertEqual(ns.fromfile, 'foo')
143                self.checkError([opt], 'expected one argument')
144                self.checkError([opt, 'foo', '-s'], "don't go together")
145
146    def test_exclude(self):
147        for opt in '-x', '--exclude':
148            with self.subTest(opt=opt):
149                ns = libregrtest._parse_args([opt])
150                self.assertTrue(ns.exclude)
151
152    def test_single(self):
153        for opt in '-s', '--single':
154            with self.subTest(opt=opt):
155                ns = libregrtest._parse_args([opt])
156                self.assertTrue(ns.single)
157                self.checkError([opt, '-f', 'foo'], "don't go together")
158
159    def test_ignore(self):
160        for opt in '-i', '--ignore':
161            with self.subTest(opt=opt):
162                ns = libregrtest._parse_args([opt, 'pattern'])
163                self.assertEqual(ns.ignore_tests, ['pattern'])
164                self.checkError([opt], 'expected one argument')
165
166        self.addCleanup(os_helper.unlink, os_helper.TESTFN)
167        with open(os_helper.TESTFN, "w") as fp:
168            print('matchfile1', file=fp)
169            print('matchfile2', file=fp)
170
171        filename = os.path.abspath(os_helper.TESTFN)
172        ns = libregrtest._parse_args(['-m', 'match',
173                                      '--ignorefile', filename])
174        self.assertEqual(ns.ignore_tests,
175                         ['matchfile1', 'matchfile2'])
176
177    def test_match(self):
178        for opt in '-m', '--match':
179            with self.subTest(opt=opt):
180                ns = libregrtest._parse_args([opt, 'pattern'])
181                self.assertEqual(ns.match_tests, ['pattern'])
182                self.checkError([opt], 'expected one argument')
183
184        ns = libregrtest._parse_args(['-m', 'pattern1',
185                                      '-m', 'pattern2'])
186        self.assertEqual(ns.match_tests, ['pattern1', 'pattern2'])
187
188        self.addCleanup(os_helper.unlink, os_helper.TESTFN)
189        with open(os_helper.TESTFN, "w") as fp:
190            print('matchfile1', file=fp)
191            print('matchfile2', file=fp)
192
193        filename = os.path.abspath(os_helper.TESTFN)
194        ns = libregrtest._parse_args(['-m', 'match',
195                                      '--matchfile', filename])
196        self.assertEqual(ns.match_tests,
197                         ['match', 'matchfile1', 'matchfile2'])
198
199    def test_failfast(self):
200        for opt in '-G', '--failfast':
201            with self.subTest(opt=opt):
202                ns = libregrtest._parse_args([opt, '-v'])
203                self.assertTrue(ns.failfast)
204                ns = libregrtest._parse_args([opt, '-W'])
205                self.assertTrue(ns.failfast)
206                self.checkError([opt], '-G/--failfast needs either -v or -W')
207
208    def test_use(self):
209        for opt in '-u', '--use':
210            with self.subTest(opt=opt):
211                ns = libregrtest._parse_args([opt, 'gui,network'])
212                self.assertEqual(ns.use_resources, ['gui', 'network'])
213
214                ns = libregrtest._parse_args([opt, 'gui,none,network'])
215                self.assertEqual(ns.use_resources, ['network'])
216
217                expected = list(libregrtest.ALL_RESOURCES)
218                expected.remove('gui')
219                ns = libregrtest._parse_args([opt, 'all,-gui'])
220                self.assertEqual(ns.use_resources, expected)
221                self.checkError([opt], 'expected one argument')
222                self.checkError([opt, 'foo'], 'invalid resource')
223
224                # all + a resource not part of "all"
225                ns = libregrtest._parse_args([opt, 'all,tzdata'])
226                self.assertEqual(ns.use_resources,
227                                 list(libregrtest.ALL_RESOURCES) + ['tzdata'])
228
229                # test another resource which is not part of "all"
230                ns = libregrtest._parse_args([opt, 'extralargefile'])
231                self.assertEqual(ns.use_resources, ['extralargefile'])
232
233    def test_memlimit(self):
234        for opt in '-M', '--memlimit':
235            with self.subTest(opt=opt):
236                ns = libregrtest._parse_args([opt, '4G'])
237                self.assertEqual(ns.memlimit, '4G')
238                self.checkError([opt], 'expected one argument')
239
240    def test_testdir(self):
241        ns = libregrtest._parse_args(['--testdir', 'foo'])
242        self.assertEqual(ns.testdir, os.path.join(os_helper.SAVEDCWD, 'foo'))
243        self.checkError(['--testdir'], 'expected one argument')
244
245    def test_runleaks(self):
246        for opt in '-L', '--runleaks':
247            with self.subTest(opt=opt):
248                ns = libregrtest._parse_args([opt])
249                self.assertTrue(ns.runleaks)
250
251    def test_huntrleaks(self):
252        for opt in '-R', '--huntrleaks':
253            with self.subTest(opt=opt):
254                ns = libregrtest._parse_args([opt, ':'])
255                self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt'))
256                ns = libregrtest._parse_args([opt, '6:'])
257                self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt'))
258                ns = libregrtest._parse_args([opt, ':3'])
259                self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt'))
260                ns = libregrtest._parse_args([opt, '6:3:leaks.log'])
261                self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log'))
262                self.checkError([opt], 'expected one argument')
263                self.checkError([opt, '6'],
264                                'needs 2 or 3 colon-separated arguments')
265                self.checkError([opt, 'foo:'], 'invalid huntrleaks value')
266                self.checkError([opt, '6:foo'], 'invalid huntrleaks value')
267
268    def test_multiprocess(self):
269        for opt in '-j', '--multiprocess':
270            with self.subTest(opt=opt):
271                ns = libregrtest._parse_args([opt, '2'])
272                self.assertEqual(ns.use_mp, 2)
273                self.checkError([opt], 'expected one argument')
274                self.checkError([opt, 'foo'], 'invalid int value')
275                self.checkError([opt, '2', '-T'], "don't go together")
276                self.checkError([opt, '0', '-T'], "don't go together")
277
278    def test_coverage(self):
279        for opt in '-T', '--coverage':
280            with self.subTest(opt=opt):
281                ns = libregrtest._parse_args([opt])
282                self.assertTrue(ns.trace)
283
284    def test_coverdir(self):
285        for opt in '-D', '--coverdir':
286            with self.subTest(opt=opt):
287                ns = libregrtest._parse_args([opt, 'foo'])
288                self.assertEqual(ns.coverdir,
289                                 os.path.join(os_helper.SAVEDCWD, 'foo'))
290                self.checkError([opt], 'expected one argument')
291
292    def test_nocoverdir(self):
293        for opt in '-N', '--nocoverdir':
294            with self.subTest(opt=opt):
295                ns = libregrtest._parse_args([opt])
296                self.assertIsNone(ns.coverdir)
297
298    def test_threshold(self):
299        for opt in '-t', '--threshold':
300            with self.subTest(opt=opt):
301                ns = libregrtest._parse_args([opt, '1000'])
302                self.assertEqual(ns.threshold, 1000)
303                self.checkError([opt], 'expected one argument')
304                self.checkError([opt, 'foo'], 'invalid int value')
305
306    def test_nowindows(self):
307        for opt in '-n', '--nowindows':
308            with self.subTest(opt=opt):
309                with contextlib.redirect_stderr(io.StringIO()) as stderr:
310                    ns = libregrtest._parse_args([opt])
311                self.assertTrue(ns.nowindows)
312                err = stderr.getvalue()
313                self.assertIn('the --nowindows (-n) option is deprecated', err)
314
315    def test_forever(self):
316        for opt in '-F', '--forever':
317            with self.subTest(opt=opt):
318                ns = libregrtest._parse_args([opt])
319                self.assertTrue(ns.forever)
320
321    def test_unrecognized_argument(self):
322        self.checkError(['--xxx'], 'usage:')
323
324    def test_long_option__partial(self):
325        ns = libregrtest._parse_args(['--qui'])
326        self.assertTrue(ns.quiet)
327        self.assertEqual(ns.verbose, 0)
328
329    def test_two_options(self):
330        ns = libregrtest._parse_args(['--quiet', '--exclude'])
331        self.assertTrue(ns.quiet)
332        self.assertEqual(ns.verbose, 0)
333        self.assertTrue(ns.exclude)
334
335    def test_option_with_empty_string_value(self):
336        ns = libregrtest._parse_args(['--start', ''])
337        self.assertEqual(ns.start, '')
338
339    def test_arg(self):
340        ns = libregrtest._parse_args(['foo'])
341        self.assertEqual(ns.args, ['foo'])
342
343    def test_option_and_arg(self):
344        ns = libregrtest._parse_args(['--quiet', 'foo'])
345        self.assertTrue(ns.quiet)
346        self.assertEqual(ns.verbose, 0)
347        self.assertEqual(ns.args, ['foo'])
348
349    def test_arg_option_arg(self):
350        ns = libregrtest._parse_args(['test_unaryop', '-v', 'test_binop'])
351        self.assertEqual(ns.verbose, 1)
352        self.assertEqual(ns.args, ['test_unaryop', 'test_binop'])
353
354    def test_unknown_option(self):
355        self.checkError(['--unknown-option'],
356                        'unrecognized arguments: --unknown-option')
357
358
359class BaseTestCase(unittest.TestCase):
360    TEST_UNIQUE_ID = 1
361    TESTNAME_PREFIX = 'test_regrtest_'
362    TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+'
363
364    def setUp(self):
365        self.testdir = os.path.realpath(os.path.dirname(__file__))
366
367        self.tmptestdir = tempfile.mkdtemp()
368        self.addCleanup(os_helper.rmtree, self.tmptestdir)
369
370    def create_test(self, name=None, code=None):
371        if not name:
372            name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
373            BaseTestCase.TEST_UNIQUE_ID += 1
374
375        if code is None:
376            code = textwrap.dedent("""
377                    import unittest
378
379                    class Tests(unittest.TestCase):
380                        def test_empty_test(self):
381                            pass
382                """)
383
384        # test_regrtest cannot be run twice in parallel because
385        # of setUp() and create_test()
386        name = self.TESTNAME_PREFIX + name
387        path = os.path.join(self.tmptestdir, name + '.py')
388
389        self.addCleanup(os_helper.unlink, path)
390        # Use 'x' mode to ensure that we do not override existing tests
391        try:
392            with open(path, 'x', encoding='utf-8') as fp:
393                fp.write(code)
394        except PermissionError as exc:
395            if not sysconfig.is_python_build():
396                self.skipTest("cannot write %s: %s" % (path, exc))
397            raise
398        return name
399
400    def regex_search(self, regex, output):
401        match = re.search(regex, output, re.MULTILINE)
402        if not match:
403            self.fail("%r not found in %r" % (regex, output))
404        return match
405
406    def check_line(self, output, regex):
407        regex = re.compile(r'^' + regex, re.MULTILINE)
408        self.assertRegex(output, regex)
409
410    def parse_executed_tests(self, output):
411        regex = (r'^%s\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
412                 % (LOG_PREFIX, self.TESTNAME_REGEX))
413        parser = re.finditer(regex, output, re.MULTILINE)
414        return list(match.group(1) for match in parser)
415
416    def check_executed_tests(self, output, tests, skipped=(), failed=(),
417                             env_changed=(), omitted=(),
418                             rerun={}, no_test_ran=(),
419                             randomize=False, interrupted=False,
420                             fail_env_changed=False):
421        if isinstance(tests, str):
422            tests = [tests]
423        if isinstance(skipped, str):
424            skipped = [skipped]
425        if isinstance(failed, str):
426            failed = [failed]
427        if isinstance(env_changed, str):
428            env_changed = [env_changed]
429        if isinstance(omitted, str):
430            omitted = [omitted]
431        if isinstance(no_test_ran, str):
432            no_test_ran = [no_test_ran]
433
434        executed = self.parse_executed_tests(output)
435        if randomize:
436            self.assertEqual(set(executed), set(tests), output)
437        else:
438            self.assertEqual(executed, tests, output)
439
440        def plural(count):
441            return 's' if count != 1 else ''
442
443        def list_regex(line_format, tests):
444            count = len(tests)
445            names = ' '.join(sorted(tests))
446            regex = line_format % (count, plural(count))
447            regex = r'%s:\n    %s$' % (regex, names)
448            return regex
449
450        if skipped:
451            regex = list_regex('%s test%s skipped', skipped)
452            self.check_line(output, regex)
453
454        if failed:
455            regex = list_regex('%s test%s failed', failed)
456            self.check_line(output, regex)
457
458        if env_changed:
459            regex = list_regex('%s test%s altered the execution environment',
460                               env_changed)
461            self.check_line(output, regex)
462
463        if omitted:
464            regex = list_regex('%s test%s omitted', omitted)
465            self.check_line(output, regex)
466
467        if rerun:
468            regex = list_regex('%s re-run test%s', rerun.keys())
469            self.check_line(output, regex)
470            regex = LOG_PREFIX + r"Re-running failed tests in verbose mode"
471            self.check_line(output, regex)
472            for name, match in rerun.items():
473                regex = LOG_PREFIX + f"Re-running {name} in verbose mode \\(matching: {match}\\)"
474                self.check_line(output, regex)
475
476        if no_test_ran:
477            regex = list_regex('%s test%s run no tests', no_test_ran)
478            self.check_line(output, regex)
479
480        good = (len(tests) - len(skipped) - len(failed)
481                - len(omitted) - len(env_changed) - len(no_test_ran))
482        if good:
483            regex = r'%s test%s OK\.$' % (good, plural(good))
484            if not skipped and not failed and good > 1:
485                regex = 'All %s' % regex
486            self.check_line(output, regex)
487
488        if interrupted:
489            self.check_line(output, 'Test suite interrupted by signal SIGINT.')
490
491        result = []
492        if failed:
493            result.append('FAILURE')
494        elif fail_env_changed and env_changed:
495            result.append('ENV CHANGED')
496        if interrupted:
497            result.append('INTERRUPTED')
498        if not any((good, result, failed, interrupted, skipped,
499                    env_changed, fail_env_changed)):
500            result.append("NO TEST RUN")
501        elif not result:
502            result.append('SUCCESS')
503        result = ', '.join(result)
504        if rerun:
505            self.check_line(output, 'Tests result: FAILURE')
506            result = 'FAILURE then %s' % result
507
508        self.check_line(output, 'Tests result: %s' % result)
509
510    def parse_random_seed(self, output):
511        match = self.regex_search(r'Using random seed ([0-9]+)', output)
512        randseed = int(match.group(1))
513        self.assertTrue(0 <= randseed <= 10000000, randseed)
514        return randseed
515
516    def run_command(self, args, input=None, exitcode=0, **kw):
517        if not input:
518            input = ''
519        if 'stderr' not in kw:
520            kw['stderr'] = subprocess.STDOUT
521        proc = subprocess.run(args,
522                              universal_newlines=True,
523                              input=input,
524                              stdout=subprocess.PIPE,
525                              **kw)
526        if proc.returncode != exitcode:
527            msg = ("Command %s failed with exit code %s\n"
528                   "\n"
529                   "stdout:\n"
530                   "---\n"
531                   "%s\n"
532                   "---\n"
533                   % (str(args), proc.returncode, proc.stdout))
534            if proc.stderr:
535                msg += ("\n"
536                        "stderr:\n"
537                        "---\n"
538                        "%s"
539                        "---\n"
540                        % proc.stderr)
541            self.fail(msg)
542        return proc
543
544    def run_python(self, args, **kw):
545        args = [sys.executable, '-X', 'faulthandler', '-I', *args]
546        proc = self.run_command(args, **kw)
547        return proc.stdout
548
549
550class CheckActualTests(BaseTestCase):
551    def test_finds_expected_number_of_tests(self):
552        """
553        Check that regrtest appears to find the expected set of tests.
554        """
555        args = ['-Wd', '-E', '-bb', '-m', 'test.regrtest', '--list-tests']
556        output = self.run_python(args)
557        rough_number_of_tests_found = len(output.splitlines())
558        actual_testsuite_glob = os.path.join(glob.escape(os.path.dirname(__file__)),
559                                             'test*.py')
560        rough_counted_test_py_files = len(glob.glob(actual_testsuite_glob))
561        # We're not trying to duplicate test finding logic in here,
562        # just give a rough estimate of how many there should be and
563        # be near that.  This is a regression test to prevent mishaps
564        # such as https://bugs.python.org/issue37667 in the future.
565        # If you need to change the values in here during some
566        # mythical future test suite reorganization, don't go
567        # overboard with logic and keep that goal in mind.
568        self.assertGreater(rough_number_of_tests_found,
569                           rough_counted_test_py_files*9//10,
570                           msg='Unexpectedly low number of tests found in:\n'
571                           f'{", ".join(output.splitlines())}')
572
573
574class ProgramsTestCase(BaseTestCase):
575    """
576    Test various ways to run the Python test suite. Use options close
577    to options used on the buildbot.
578    """
579
580    NTEST = 4
581
582    def setUp(self):
583        super().setUp()
584
585        # Create NTEST tests doing nothing
586        self.tests = [self.create_test() for index in range(self.NTEST)]
587
588        self.python_args = ['-Wd', '-E', '-bb']
589        self.regrtest_args = ['-uall', '-rwW',
590                              '--testdir=%s' % self.tmptestdir]
591        self.regrtest_args.extend(('--timeout', '3600', '-j4'))
592        if sys.platform == 'win32':
593            self.regrtest_args.append('-n')
594
595    def check_output(self, output):
596        self.parse_random_seed(output)
597        self.check_executed_tests(output, self.tests, randomize=True)
598
599    def run_tests(self, args):
600        output = self.run_python(args)
601        self.check_output(output)
602
603    def test_script_regrtest(self):
604        # Lib/test/regrtest.py
605        script = os.path.join(self.testdir, 'regrtest.py')
606
607        args = [*self.python_args, script, *self.regrtest_args, *self.tests]
608        self.run_tests(args)
609
610    def test_module_test(self):
611        # -m test
612        args = [*self.python_args, '-m', 'test',
613                *self.regrtest_args, *self.tests]
614        self.run_tests(args)
615
616    def test_module_regrtest(self):
617        # -m test.regrtest
618        args = [*self.python_args, '-m', 'test.regrtest',
619                *self.regrtest_args, *self.tests]
620        self.run_tests(args)
621
622    def test_module_autotest(self):
623        # -m test.autotest
624        args = [*self.python_args, '-m', 'test.autotest',
625                *self.regrtest_args, *self.tests]
626        self.run_tests(args)
627
628    def test_module_from_test_autotest(self):
629        # from test import autotest
630        code = 'from test import autotest'
631        args = [*self.python_args, '-c', code,
632                *self.regrtest_args, *self.tests]
633        self.run_tests(args)
634
635    def test_script_autotest(self):
636        # Lib/test/autotest.py
637        script = os.path.join(self.testdir, 'autotest.py')
638        args = [*self.python_args, script, *self.regrtest_args, *self.tests]
639        self.run_tests(args)
640
641    @unittest.skipUnless(sysconfig.is_python_build(),
642                         'run_tests.py script is not installed')
643    def test_tools_script_run_tests(self):
644        # Tools/scripts/run_tests.py
645        script = os.path.join(ROOT_DIR, 'Tools', 'scripts', 'run_tests.py')
646        args = [script, *self.regrtest_args, *self.tests]
647        self.run_tests(args)
648
649    def run_batch(self, *args):
650        proc = self.run_command(args)
651        self.check_output(proc.stdout)
652
653    @unittest.skipUnless(sysconfig.is_python_build(),
654                         'test.bat script is not installed')
655    @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
656    def test_tools_buildbot_test(self):
657        # Tools\buildbot\test.bat
658        script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat')
659        test_args = ['--testdir=%s' % self.tmptestdir]
660        if platform.machine() == 'ARM64':
661            test_args.append('-arm64') # ARM 64-bit build
662        elif platform.machine() == 'ARM':
663            test_args.append('-arm32')   # 32-bit ARM build
664        elif platform.architecture()[0] == '64bit':
665            test_args.append('-x64')   # 64-bit build
666        if not Py_DEBUG:
667            test_args.append('+d')     # Release build, use python.exe
668        self.run_batch(script, *test_args, *self.tests)
669
670    @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
671    def test_pcbuild_rt(self):
672        # PCbuild\rt.bat
673        script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
674        if not os.path.isfile(script):
675            self.skipTest(f'File "{script}" does not exist')
676        rt_args = ["-q"]             # Quick, don't run tests twice
677        if platform.machine() == 'ARM64':
678            rt_args.append('-arm64') # ARM 64-bit build
679        elif platform.machine() == 'ARM':
680            rt_args.append('-arm32')   # 32-bit ARM build
681        elif platform.architecture()[0] == '64bit':
682            rt_args.append('-x64')   # 64-bit build
683        if Py_DEBUG:
684            rt_args.append('-d')     # Debug build, use python_d.exe
685        self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests)
686
687
688class ArgsTestCase(BaseTestCase):
689    """
690    Test arguments of the Python test suite.
691    """
692
693    def run_tests(self, *testargs, **kw):
694        cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs]
695        return self.run_python(cmdargs, **kw)
696
697    def test_failing_test(self):
698        # test a failing test
699        code = textwrap.dedent("""
700            import unittest
701
702            class FailingTest(unittest.TestCase):
703                def test_failing(self):
704                    self.fail("bug")
705        """)
706        test_ok = self.create_test('ok')
707        test_failing = self.create_test('failing', code=code)
708        tests = [test_ok, test_failing]
709
710        output = self.run_tests(*tests, exitcode=2)
711        self.check_executed_tests(output, tests, failed=test_failing)
712
713    def test_resources(self):
714        # test -u command line option
715        tests = {}
716        for resource in ('audio', 'network'):
717            code = textwrap.dedent("""
718                        from test import support; support.requires(%r)
719                        import unittest
720                        class PassingTest(unittest.TestCase):
721                            def test_pass(self):
722                                pass
723                    """ % resource)
724
725            tests[resource] = self.create_test(resource, code)
726        test_names = sorted(tests.values())
727
728        # -u all: 2 resources enabled
729        output = self.run_tests('-u', 'all', *test_names)
730        self.check_executed_tests(output, test_names)
731
732        # -u audio: 1 resource enabled
733        output = self.run_tests('-uaudio', *test_names)
734        self.check_executed_tests(output, test_names,
735                                  skipped=tests['network'])
736
737        # no option: 0 resources enabled
738        output = self.run_tests(*test_names)
739        self.check_executed_tests(output, test_names,
740                                  skipped=test_names)
741
742    def test_random(self):
743        # test -r and --randseed command line option
744        code = textwrap.dedent("""
745            import random
746            print("TESTRANDOM: %s" % random.randint(1, 1000))
747        """)
748        test = self.create_test('random', code)
749
750        # first run to get the output with the random seed
751        output = self.run_tests('-r', test)
752        randseed = self.parse_random_seed(output)
753        match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
754        test_random = int(match.group(1))
755
756        # try to reproduce with the random seed
757        output = self.run_tests('-r', '--randseed=%s' % randseed, test)
758        randseed2 = self.parse_random_seed(output)
759        self.assertEqual(randseed2, randseed)
760
761        match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
762        test_random2 = int(match.group(1))
763        self.assertEqual(test_random2, test_random)
764
765    def test_fromfile(self):
766        # test --fromfile
767        tests = [self.create_test() for index in range(5)]
768
769        # Write the list of files using a format similar to regrtest output:
770        # [1/2] test_1
771        # [2/2] test_2
772        filename = os_helper.TESTFN
773        self.addCleanup(os_helper.unlink, filename)
774
775        # test format '0:00:00 [2/7] test_opcodes -- test_grammar took 0 sec'
776        with open(filename, "w") as fp:
777            previous = None
778            for index, name in enumerate(tests, 1):
779                line = ("00:00:%02i [%s/%s] %s"
780                        % (index, index, len(tests), name))
781                if previous:
782                    line += " -- %s took 0 sec" % previous
783                print(line, file=fp)
784                previous = name
785
786        output = self.run_tests('--fromfile', filename)
787        self.check_executed_tests(output, tests)
788
789        # test format '[2/7] test_opcodes'
790        with open(filename, "w") as fp:
791            for index, name in enumerate(tests, 1):
792                print("[%s/%s] %s" % (index, len(tests), name), file=fp)
793
794        output = self.run_tests('--fromfile', filename)
795        self.check_executed_tests(output, tests)
796
797        # test format 'test_opcodes'
798        with open(filename, "w") as fp:
799            for name in tests:
800                print(name, file=fp)
801
802        output = self.run_tests('--fromfile', filename)
803        self.check_executed_tests(output, tests)
804
805        # test format 'Lib/test/test_opcodes.py'
806        with open(filename, "w") as fp:
807            for name in tests:
808                print('Lib/test/%s.py' % name, file=fp)
809
810        output = self.run_tests('--fromfile', filename)
811        self.check_executed_tests(output, tests)
812
813    def test_interrupted(self):
814        code = TEST_INTERRUPTED
815        test = self.create_test('sigint', code=code)
816        output = self.run_tests(test, exitcode=130)
817        self.check_executed_tests(output, test, omitted=test,
818                                  interrupted=True)
819
820    def test_slowest(self):
821        # test --slowest
822        tests = [self.create_test() for index in range(3)]
823        output = self.run_tests("--slowest", *tests)
824        self.check_executed_tests(output, tests)
825        regex = ('10 slowest tests:\n'
826                 '(?:- %s: .*\n){%s}'
827                 % (self.TESTNAME_REGEX, len(tests)))
828        self.check_line(output, regex)
829
830    def test_slowest_interrupted(self):
831        # Issue #25373: test --slowest with an interrupted test
832        code = TEST_INTERRUPTED
833        test = self.create_test("sigint", code=code)
834
835        for multiprocessing in (False, True):
836            with self.subTest(multiprocessing=multiprocessing):
837                if multiprocessing:
838                    args = ("--slowest", "-j2", test)
839                else:
840                    args = ("--slowest", test)
841                output = self.run_tests(*args, exitcode=130)
842                self.check_executed_tests(output, test,
843                                          omitted=test, interrupted=True)
844
845                regex = ('10 slowest tests:\n')
846                self.check_line(output, regex)
847
848    def test_coverage(self):
849        # test --coverage
850        test = self.create_test('coverage')
851        output = self.run_tests("--coverage", test)
852        self.check_executed_tests(output, [test])
853        regex = (r'lines +cov% +module +\(path\)\n'
854                 r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
855        self.check_line(output, regex)
856
857    def test_wait(self):
858        # test --wait
859        test = self.create_test('wait')
860        output = self.run_tests("--wait", test, input='key')
861        self.check_line(output, 'Press any key to continue')
862
863    def test_forever(self):
864        # test --forever
865        code = textwrap.dedent("""
866            import builtins
867            import unittest
868
869            class ForeverTester(unittest.TestCase):
870                def test_run(self):
871                    # Store the state in the builtins module, because the test
872                    # module is reload at each run
873                    if 'RUN' in builtins.__dict__:
874                        builtins.__dict__['RUN'] += 1
875                        if builtins.__dict__['RUN'] >= 3:
876                            self.fail("fail at the 3rd runs")
877                    else:
878                        builtins.__dict__['RUN'] = 1
879        """)
880        test = self.create_test('forever', code=code)
881        output = self.run_tests('--forever', test, exitcode=2)
882        self.check_executed_tests(output, [test]*3, failed=test)
883
884    def check_leak(self, code, what):
885        test = self.create_test('huntrleaks', code=code)
886
887        filename = 'reflog.txt'
888        self.addCleanup(os_helper.unlink, filename)
889        output = self.run_tests('--huntrleaks', '3:3:', test,
890                                exitcode=2,
891                                stderr=subprocess.STDOUT)
892        self.check_executed_tests(output, [test], failed=test)
893
894        line = 'beginning 6 repetitions\n123456\n......\n'
895        self.check_line(output, re.escape(line))
896
897        line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
898        self.assertIn(line2, output)
899
900        with open(filename) as fp:
901            reflog = fp.read()
902            self.assertIn(line2, reflog)
903
904    @unittest.skipUnless(Py_DEBUG, 'need a debug build')
905    def test_huntrleaks(self):
906        # test --huntrleaks
907        code = textwrap.dedent("""
908            import unittest
909
910            GLOBAL_LIST = []
911
912            class RefLeakTest(unittest.TestCase):
913                def test_leak(self):
914                    GLOBAL_LIST.append(object())
915        """)
916        self.check_leak(code, 'references')
917
918    @unittest.skipUnless(Py_DEBUG, 'need a debug build')
919    def test_huntrleaks_fd_leak(self):
920        # test --huntrleaks for file descriptor leak
921        code = textwrap.dedent("""
922            import os
923            import unittest
924
925            class FDLeakTest(unittest.TestCase):
926                def test_leak(self):
927                    fd = os.open(__file__, os.O_RDONLY)
928                    # bug: never close the file descriptor
929        """)
930        self.check_leak(code, 'file descriptors')
931
932    def test_list_tests(self):
933        # test --list-tests
934        tests = [self.create_test() for i in range(5)]
935        output = self.run_tests('--list-tests', *tests)
936        self.assertEqual(output.rstrip().splitlines(),
937                         tests)
938
939    def test_list_cases(self):
940        # test --list-cases
941        code = textwrap.dedent("""
942            import unittest
943
944            class Tests(unittest.TestCase):
945                def test_method1(self):
946                    pass
947                def test_method2(self):
948                    pass
949        """)
950        testname = self.create_test(code=code)
951
952        # Test --list-cases
953        all_methods = ['%s.Tests.test_method1' % testname,
954                       '%s.Tests.test_method2' % testname]
955        output = self.run_tests('--list-cases', testname)
956        self.assertEqual(output.splitlines(), all_methods)
957
958        # Test --list-cases with --match
959        all_methods = ['%s.Tests.test_method1' % testname]
960        output = self.run_tests('--list-cases',
961                                '-m', 'test_method1',
962                                testname)
963        self.assertEqual(output.splitlines(), all_methods)
964
965    @support.cpython_only
966    def test_crashed(self):
967        # Any code which causes a crash
968        code = 'import faulthandler; faulthandler._sigsegv()'
969        crash_test = self.create_test(name="crash", code=code)
970
971        tests = [crash_test]
972        output = self.run_tests("-j2", *tests, exitcode=2)
973        self.check_executed_tests(output, tests, failed=crash_test,
974                                  randomize=True)
975
976    def parse_methods(self, output):
977        regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
978        return [match.group(1) for match in regex.finditer(output)]
979
980    def test_ignorefile(self):
981        code = textwrap.dedent("""
982            import unittest
983
984            class Tests(unittest.TestCase):
985                def test_method1(self):
986                    pass
987                def test_method2(self):
988                    pass
989                def test_method3(self):
990                    pass
991                def test_method4(self):
992                    pass
993        """)
994        all_methods = ['test_method1', 'test_method2',
995                       'test_method3', 'test_method4']
996        testname = self.create_test(code=code)
997
998        # only run a subset
999        filename = os_helper.TESTFN
1000        self.addCleanup(os_helper.unlink, filename)
1001
1002        subset = [
1003            # only ignore the method name
1004            'test_method1',
1005            # ignore the full identifier
1006            '%s.Tests.test_method3' % testname]
1007        with open(filename, "w") as fp:
1008            for name in subset:
1009                print(name, file=fp)
1010
1011        output = self.run_tests("-v", "--ignorefile", filename, testname)
1012        methods = self.parse_methods(output)
1013        subset = ['test_method2', 'test_method4']
1014        self.assertEqual(methods, subset)
1015
1016    def test_matchfile(self):
1017        code = textwrap.dedent("""
1018            import unittest
1019
1020            class Tests(unittest.TestCase):
1021                def test_method1(self):
1022                    pass
1023                def test_method2(self):
1024                    pass
1025                def test_method3(self):
1026                    pass
1027                def test_method4(self):
1028                    pass
1029        """)
1030        all_methods = ['test_method1', 'test_method2',
1031                       'test_method3', 'test_method4']
1032        testname = self.create_test(code=code)
1033
1034        # by default, all methods should be run
1035        output = self.run_tests("-v", testname)
1036        methods = self.parse_methods(output)
1037        self.assertEqual(methods, all_methods)
1038
1039        # only run a subset
1040        filename = os_helper.TESTFN
1041        self.addCleanup(os_helper.unlink, filename)
1042
1043        subset = [
1044            # only match the method name
1045            'test_method1',
1046            # match the full identifier
1047            '%s.Tests.test_method3' % testname]
1048        with open(filename, "w") as fp:
1049            for name in subset:
1050                print(name, file=fp)
1051
1052        output = self.run_tests("-v", "--matchfile", filename, testname)
1053        methods = self.parse_methods(output)
1054        subset = ['test_method1', 'test_method3']
1055        self.assertEqual(methods, subset)
1056
1057    def test_env_changed(self):
1058        code = textwrap.dedent("""
1059            import unittest
1060
1061            class Tests(unittest.TestCase):
1062                def test_env_changed(self):
1063                    open("env_changed", "w").close()
1064        """)
1065        testname = self.create_test(code=code)
1066
1067        # don't fail by default
1068        output = self.run_tests(testname)
1069        self.check_executed_tests(output, [testname], env_changed=testname)
1070
1071        # fail with --fail-env-changed
1072        output = self.run_tests("--fail-env-changed", testname, exitcode=3)
1073        self.check_executed_tests(output, [testname], env_changed=testname,
1074                                  fail_env_changed=True)
1075
1076    def test_rerun_fail(self):
1077        # FAILURE then FAILURE
1078        code = textwrap.dedent("""
1079            import unittest
1080
1081            class Tests(unittest.TestCase):
1082                def test_succeed(self):
1083                    return
1084
1085                def test_fail_always(self):
1086                    # test that always fails
1087                    self.fail("bug")
1088        """)
1089        testname = self.create_test(code=code)
1090
1091        output = self.run_tests("-w", testname, exitcode=2)
1092        self.check_executed_tests(output, [testname],
1093                                  failed=testname, rerun={testname: "test_fail_always"})
1094
1095    def test_rerun_success(self):
1096        # FAILURE then SUCCESS
1097        code = textwrap.dedent("""
1098            import builtins
1099            import unittest
1100
1101            class Tests(unittest.TestCase):
1102                def test_succeed(self):
1103                    return
1104
1105                def test_fail_once(self):
1106                    if not hasattr(builtins, '_test_failed'):
1107                        builtins._test_failed = True
1108                        self.fail("bug")
1109        """)
1110        testname = self.create_test(code=code)
1111
1112        output = self.run_tests("-w", testname, exitcode=0)
1113        self.check_executed_tests(output, [testname],
1114                                  rerun={testname: "test_fail_once"})
1115
1116    def test_no_tests_ran(self):
1117        code = textwrap.dedent("""
1118            import unittest
1119
1120            class Tests(unittest.TestCase):
1121                def test_bug(self):
1122                    pass
1123        """)
1124        testname = self.create_test(code=code)
1125
1126        output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0)
1127        self.check_executed_tests(output, [testname], no_test_ran=testname)
1128
1129    def test_no_tests_ran_skip(self):
1130        code = textwrap.dedent("""
1131            import unittest
1132
1133            class Tests(unittest.TestCase):
1134                def test_skipped(self):
1135                    self.skipTest("because")
1136        """)
1137        testname = self.create_test(code=code)
1138
1139        output = self.run_tests(testname, exitcode=0)
1140        self.check_executed_tests(output, [testname])
1141
1142    def test_no_tests_ran_multiple_tests_nonexistent(self):
1143        code = textwrap.dedent("""
1144            import unittest
1145
1146            class Tests(unittest.TestCase):
1147                def test_bug(self):
1148                    pass
1149        """)
1150        testname = self.create_test(code=code)
1151        testname2 = self.create_test(code=code)
1152
1153        output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0)
1154        self.check_executed_tests(output, [testname, testname2],
1155                                  no_test_ran=[testname, testname2])
1156
1157    def test_no_test_ran_some_test_exist_some_not(self):
1158        code = textwrap.dedent("""
1159            import unittest
1160
1161            class Tests(unittest.TestCase):
1162                def test_bug(self):
1163                    pass
1164        """)
1165        testname = self.create_test(code=code)
1166        other_code = textwrap.dedent("""
1167            import unittest
1168
1169            class Tests(unittest.TestCase):
1170                def test_other_bug(self):
1171                    pass
1172        """)
1173        testname2 = self.create_test(code=other_code)
1174
1175        output = self.run_tests(testname, testname2, "-m", "nosuchtest",
1176                                "-m", "test_other_bug", exitcode=0)
1177        self.check_executed_tests(output, [testname, testname2],
1178                                  no_test_ran=[testname])
1179
1180    @support.cpython_only
1181    def test_findleaks(self):
1182        code = textwrap.dedent(r"""
1183            import _testcapi
1184            import gc
1185            import unittest
1186
1187            @_testcapi.with_tp_del
1188            class Garbage:
1189                def __tp_del__(self):
1190                    pass
1191
1192            class Tests(unittest.TestCase):
1193                def test_garbage(self):
1194                    # create an uncollectable object
1195                    obj = Garbage()
1196                    obj.ref_cycle = obj
1197                    obj = None
1198        """)
1199        testname = self.create_test(code=code)
1200
1201        output = self.run_tests("--fail-env-changed", testname, exitcode=3)
1202        self.check_executed_tests(output, [testname],
1203                                  env_changed=[testname],
1204                                  fail_env_changed=True)
1205
1206        # --findleaks is now basically an alias to --fail-env-changed
1207        output = self.run_tests("--findleaks", testname, exitcode=3)
1208        self.check_executed_tests(output, [testname],
1209                                  env_changed=[testname],
1210                                  fail_env_changed=True)
1211
1212    def test_multiprocessing_timeout(self):
1213        code = textwrap.dedent(r"""
1214            import time
1215            import unittest
1216            try:
1217                import faulthandler
1218            except ImportError:
1219                faulthandler = None
1220
1221            class Tests(unittest.TestCase):
1222                # test hangs and so should be stopped by the timeout
1223                def test_sleep(self):
1224                    # we want to test regrtest multiprocessing timeout,
1225                    # not faulthandler timeout
1226                    if faulthandler is not None:
1227                        faulthandler.cancel_dump_traceback_later()
1228
1229                    time.sleep(60 * 5)
1230        """)
1231        testname = self.create_test(code=code)
1232
1233        output = self.run_tests("-j2", "--timeout=1.0", testname, exitcode=2)
1234        self.check_executed_tests(output, [testname],
1235                                  failed=testname)
1236        self.assertRegex(output,
1237                         re.compile('%s timed out' % testname, re.MULTILINE))
1238
1239    def test_unraisable_exc(self):
1240        # --fail-env-changed must catch unraisable exception.
1241        # The exception must be displayed even if sys.stderr is redirected.
1242        code = textwrap.dedent(r"""
1243            import unittest
1244            import weakref
1245            from test.support import captured_stderr
1246
1247            class MyObject:
1248                pass
1249
1250            def weakref_callback(obj):
1251                raise Exception("weakref callback bug")
1252
1253            class Tests(unittest.TestCase):
1254                def test_unraisable_exc(self):
1255                    obj = MyObject()
1256                    ref = weakref.ref(obj, weakref_callback)
1257                    with captured_stderr() as stderr:
1258                        # call weakref_callback() which logs
1259                        # an unraisable exception
1260                        obj = None
1261                    self.assertEqual(stderr.getvalue(), '')
1262        """)
1263        testname = self.create_test(code=code)
1264
1265        output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
1266        self.check_executed_tests(output, [testname],
1267                                  env_changed=[testname],
1268                                  fail_env_changed=True)
1269        self.assertIn("Warning -- Unraisable exception", output)
1270        self.assertIn("Exception: weakref callback bug", output)
1271
1272    def test_threading_excepthook(self):
1273        # --fail-env-changed must catch uncaught thread exception.
1274        # The exception must be displayed even if sys.stderr is redirected.
1275        code = textwrap.dedent(r"""
1276            import threading
1277            import unittest
1278            from test.support import captured_stderr
1279
1280            class MyObject:
1281                pass
1282
1283            def func_bug():
1284                raise Exception("bug in thread")
1285
1286            class Tests(unittest.TestCase):
1287                def test_threading_excepthook(self):
1288                    with captured_stderr() as stderr:
1289                        thread = threading.Thread(target=func_bug)
1290                        thread.start()
1291                        thread.join()
1292                    self.assertEqual(stderr.getvalue(), '')
1293        """)
1294        testname = self.create_test(code=code)
1295
1296        output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
1297        self.check_executed_tests(output, [testname],
1298                                  env_changed=[testname],
1299                                  fail_env_changed=True)
1300        self.assertIn("Warning -- Uncaught thread exception", output)
1301        self.assertIn("Exception: bug in thread", output)
1302
1303    def test_unicode_guard_env(self):
1304        guard = os.environ.get(setup.UNICODE_GUARD_ENV)
1305        self.assertIsNotNone(guard, f"{setup.UNICODE_GUARD_ENV} not set")
1306        if guard != "\N{SMILING FACE WITH SUNGLASSES}":
1307            # Skip to signify that the env var value was changed by the user;
1308            # possibly to something ASCII to work around Unicode issues.
1309            self.skipTest("Modified guard")
1310
1311    def test_cleanup(self):
1312        dirname = os.path.join(self.tmptestdir, "test_python_123")
1313        os.mkdir(dirname)
1314        filename = os.path.join(self.tmptestdir, "test_python_456")
1315        open(filename, "wb").close()
1316        names = [dirname, filename]
1317
1318        cmdargs = ['-m', 'test',
1319                   '--tempdir=%s' % self.tmptestdir,
1320                   '--cleanup']
1321        self.run_python(cmdargs)
1322
1323        for name in names:
1324            self.assertFalse(os.path.exists(name), name)
1325
1326
1327class TestUtils(unittest.TestCase):
1328    def test_format_duration(self):
1329        self.assertEqual(utils.format_duration(0),
1330                         '0 ms')
1331        self.assertEqual(utils.format_duration(1e-9),
1332                         '1 ms')
1333        self.assertEqual(utils.format_duration(10e-3),
1334                         '10 ms')
1335        self.assertEqual(utils.format_duration(1.5),
1336                         '1.5 sec')
1337        self.assertEqual(utils.format_duration(1),
1338                         '1.0 sec')
1339        self.assertEqual(utils.format_duration(2 * 60),
1340                         '2 min')
1341        self.assertEqual(utils.format_duration(2 * 60 + 1),
1342                         '2 min 1 sec')
1343        self.assertEqual(utils.format_duration(3 * 3600),
1344                         '3 hour')
1345        self.assertEqual(utils.format_duration(3 * 3600  + 2 * 60 + 1),
1346                         '3 hour 2 min')
1347        self.assertEqual(utils.format_duration(3 * 3600 + 1),
1348                         '3 hour 1 sec')
1349
1350
1351if __name__ == '__main__':
1352    unittest.main()
1353