• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1"""
2Tests of regrtest.py.
3
4Note: test_regrtest cannot be run twice in parallel.
5"""
6
7import contextlib
8import faulthandler
9import glob
10import io
11import os.path
12import platform
13import re
14import subprocess
15import sys
16import sysconfig
17import tempfile
18import textwrap
19import unittest
20from test import libregrtest
21from test import support
22from test.libregrtest import utils
23
24
25Py_DEBUG = hasattr(sys, 'gettotalrefcount')
26ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
27ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
28LOG_PREFIX = r'[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?'
29
30TEST_INTERRUPTED = textwrap.dedent("""
31    from signal import SIGINT, raise_signal
32    try:
33        raise_signal(SIGINT)
34    except ImportError:
35        import os
36        os.kill(os.getpid(), SIGINT)
37    """)
38
39
40class ParseArgsTestCase(unittest.TestCase):
41    """
42    Test regrtest's argument parsing, function _parse_args().
43    """
44
45    def checkError(self, args, msg):
46        with support.captured_stderr() as err, self.assertRaises(SystemExit):
47            libregrtest._parse_args(args)
48        self.assertIn(msg, err.getvalue())
49
50    def test_help(self):
51        for opt in '-h', '--help':
52            with self.subTest(opt=opt):
53                with support.captured_stdout() as out, \
54                     self.assertRaises(SystemExit):
55                    libregrtest._parse_args([opt])
56                self.assertIn('Run Python regression tests.', out.getvalue())
57
58    @unittest.skipUnless(hasattr(faulthandler, 'dump_traceback_later'),
59                         "faulthandler.dump_traceback_later() required")
60    def test_timeout(self):
61        ns = libregrtest._parse_args(['--timeout', '4.2'])
62        self.assertEqual(ns.timeout, 4.2)
63        self.checkError(['--timeout'], 'expected one argument')
64        self.checkError(['--timeout', 'foo'], 'invalid float value')
65
66    def test_wait(self):
67        ns = libregrtest._parse_args(['--wait'])
68        self.assertTrue(ns.wait)
69
70    def test_worker_args(self):
71        ns = libregrtest._parse_args(['--worker-args', '[[], {}]'])
72        self.assertEqual(ns.worker_args, '[[], {}]')
73        self.checkError(['--worker-args'], 'expected one argument')
74
75    def test_start(self):
76        for opt in '-S', '--start':
77            with self.subTest(opt=opt):
78                ns = libregrtest._parse_args([opt, 'foo'])
79                self.assertEqual(ns.start, 'foo')
80                self.checkError([opt], 'expected one argument')
81
82    def test_verbose(self):
83        ns = libregrtest._parse_args(['-v'])
84        self.assertEqual(ns.verbose, 1)
85        ns = libregrtest._parse_args(['-vvv'])
86        self.assertEqual(ns.verbose, 3)
87        ns = libregrtest._parse_args(['--verbose'])
88        self.assertEqual(ns.verbose, 1)
89        ns = libregrtest._parse_args(['--verbose'] * 3)
90        self.assertEqual(ns.verbose, 3)
91        ns = libregrtest._parse_args([])
92        self.assertEqual(ns.verbose, 0)
93
94    def test_verbose2(self):
95        for opt in '-w', '--verbose2':
96            with self.subTest(opt=opt):
97                ns = libregrtest._parse_args([opt])
98                self.assertTrue(ns.verbose2)
99
100    def test_verbose3(self):
101        for opt in '-W', '--verbose3':
102            with self.subTest(opt=opt):
103                ns = libregrtest._parse_args([opt])
104                self.assertTrue(ns.verbose3)
105
106    def test_quiet(self):
107        for opt in '-q', '--quiet':
108            with self.subTest(opt=opt):
109                ns = libregrtest._parse_args([opt])
110                self.assertTrue(ns.quiet)
111                self.assertEqual(ns.verbose, 0)
112
113    def test_slowest(self):
114        for opt in '-o', '--slowest':
115            with self.subTest(opt=opt):
116                ns = libregrtest._parse_args([opt])
117                self.assertTrue(ns.print_slow)
118
119    def test_header(self):
120        ns = libregrtest._parse_args(['--header'])
121        self.assertTrue(ns.header)
122
123        ns = libregrtest._parse_args(['--verbose'])
124        self.assertTrue(ns.header)
125
126    def test_randomize(self):
127        for opt in '-r', '--randomize':
128            with self.subTest(opt=opt):
129                ns = libregrtest._parse_args([opt])
130                self.assertTrue(ns.randomize)
131
132    def test_randseed(self):
133        ns = libregrtest._parse_args(['--randseed', '12345'])
134        self.assertEqual(ns.random_seed, 12345)
135        self.assertTrue(ns.randomize)
136        self.checkError(['--randseed'], 'expected one argument')
137        self.checkError(['--randseed', 'foo'], 'invalid int value')
138
139    def test_fromfile(self):
140        for opt in '-f', '--fromfile':
141            with self.subTest(opt=opt):
142                ns = libregrtest._parse_args([opt, 'foo'])
143                self.assertEqual(ns.fromfile, 'foo')
144                self.checkError([opt], 'expected one argument')
145                self.checkError([opt, 'foo', '-s'], "don't go together")
146
147    def test_exclude(self):
148        for opt in '-x', '--exclude':
149            with self.subTest(opt=opt):
150                ns = libregrtest._parse_args([opt])
151                self.assertTrue(ns.exclude)
152
153    def test_single(self):
154        for opt in '-s', '--single':
155            with self.subTest(opt=opt):
156                ns = libregrtest._parse_args([opt])
157                self.assertTrue(ns.single)
158                self.checkError([opt, '-f', 'foo'], "don't go together")
159
160    def test_ignore(self):
161        for opt in '-i', '--ignore':
162            with self.subTest(opt=opt):
163                ns = libregrtest._parse_args([opt, 'pattern'])
164                self.assertEqual(ns.ignore_tests, ['pattern'])
165                self.checkError([opt], 'expected one argument')
166
167        self.addCleanup(support.unlink, support.TESTFN)
168        with open(support.TESTFN, "w") as fp:
169            print('matchfile1', file=fp)
170            print('matchfile2', file=fp)
171
172        filename = os.path.abspath(support.TESTFN)
173        ns = libregrtest._parse_args(['-m', 'match',
174                                      '--ignorefile', filename])
175        self.assertEqual(ns.ignore_tests,
176                         ['matchfile1', 'matchfile2'])
177
178    def test_match(self):
179        for opt in '-m', '--match':
180            with self.subTest(opt=opt):
181                ns = libregrtest._parse_args([opt, 'pattern'])
182                self.assertEqual(ns.match_tests, ['pattern'])
183                self.checkError([opt], 'expected one argument')
184
185        ns = libregrtest._parse_args(['-m', 'pattern1',
186                                      '-m', 'pattern2'])
187        self.assertEqual(ns.match_tests, ['pattern1', 'pattern2'])
188
189        self.addCleanup(support.unlink, support.TESTFN)
190        with open(support.TESTFN, "w") as fp:
191            print('matchfile1', file=fp)
192            print('matchfile2', file=fp)
193
194        filename = os.path.abspath(support.TESTFN)
195        ns = libregrtest._parse_args(['-m', 'match',
196                                      '--matchfile', filename])
197        self.assertEqual(ns.match_tests,
198                         ['match', 'matchfile1', 'matchfile2'])
199
200    def test_failfast(self):
201        for opt in '-G', '--failfast':
202            with self.subTest(opt=opt):
203                ns = libregrtest._parse_args([opt, '-v'])
204                self.assertTrue(ns.failfast)
205                ns = libregrtest._parse_args([opt, '-W'])
206                self.assertTrue(ns.failfast)
207                self.checkError([opt], '-G/--failfast needs either -v or -W')
208
209    def test_use(self):
210        for opt in '-u', '--use':
211            with self.subTest(opt=opt):
212                ns = libregrtest._parse_args([opt, 'gui,network'])
213                self.assertEqual(ns.use_resources, ['gui', 'network'])
214
215                ns = libregrtest._parse_args([opt, 'gui,none,network'])
216                self.assertEqual(ns.use_resources, ['network'])
217
218                expected = list(libregrtest.ALL_RESOURCES)
219                expected.remove('gui')
220                ns = libregrtest._parse_args([opt, 'all,-gui'])
221                self.assertEqual(ns.use_resources, expected)
222                self.checkError([opt], 'expected one argument')
223                self.checkError([opt, 'foo'], 'invalid resource')
224
225                # all + a resource not part of "all"
226                ns = libregrtest._parse_args([opt, 'all,tzdata'])
227                self.assertEqual(ns.use_resources,
228                                 list(libregrtest.ALL_RESOURCES) + ['tzdata'])
229
230                # test another resource which is not part of "all"
231                ns = libregrtest._parse_args([opt, 'extralargefile'])
232                self.assertEqual(ns.use_resources, ['extralargefile'])
233
234    def test_memlimit(self):
235        for opt in '-M', '--memlimit':
236            with self.subTest(opt=opt):
237                ns = libregrtest._parse_args([opt, '4G'])
238                self.assertEqual(ns.memlimit, '4G')
239                self.checkError([opt], 'expected one argument')
240
241    def test_testdir(self):
242        ns = libregrtest._parse_args(['--testdir', 'foo'])
243        self.assertEqual(ns.testdir, os.path.join(support.SAVEDCWD, 'foo'))
244        self.checkError(['--testdir'], 'expected one argument')
245
246    def test_runleaks(self):
247        for opt in '-L', '--runleaks':
248            with self.subTest(opt=opt):
249                ns = libregrtest._parse_args([opt])
250                self.assertTrue(ns.runleaks)
251
252    def test_huntrleaks(self):
253        for opt in '-R', '--huntrleaks':
254            with self.subTest(opt=opt):
255                ns = libregrtest._parse_args([opt, ':'])
256                self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt'))
257                ns = libregrtest._parse_args([opt, '6:'])
258                self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt'))
259                ns = libregrtest._parse_args([opt, ':3'])
260                self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt'))
261                ns = libregrtest._parse_args([opt, '6:3:leaks.log'])
262                self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log'))
263                self.checkError([opt], 'expected one argument')
264                self.checkError([opt, '6'],
265                                'needs 2 or 3 colon-separated arguments')
266                self.checkError([opt, 'foo:'], 'invalid huntrleaks value')
267                self.checkError([opt, '6:foo'], 'invalid huntrleaks value')
268
269    def test_multiprocess(self):
270        for opt in '-j', '--multiprocess':
271            with self.subTest(opt=opt):
272                ns = libregrtest._parse_args([opt, '2'])
273                self.assertEqual(ns.use_mp, 2)
274                self.checkError([opt], 'expected one argument')
275                self.checkError([opt, 'foo'], 'invalid int value')
276                self.checkError([opt, '2', '-T'], "don't go together")
277                self.checkError([opt, '0', '-T'], "don't go together")
278
279    def test_coverage(self):
280        for opt in '-T', '--coverage':
281            with self.subTest(opt=opt):
282                ns = libregrtest._parse_args([opt])
283                self.assertTrue(ns.trace)
284
285    def test_coverdir(self):
286        for opt in '-D', '--coverdir':
287            with self.subTest(opt=opt):
288                ns = libregrtest._parse_args([opt, 'foo'])
289                self.assertEqual(ns.coverdir,
290                                 os.path.join(support.SAVEDCWD, 'foo'))
291                self.checkError([opt], 'expected one argument')
292
293    def test_nocoverdir(self):
294        for opt in '-N', '--nocoverdir':
295            with self.subTest(opt=opt):
296                ns = libregrtest._parse_args([opt])
297                self.assertIsNone(ns.coverdir)
298
299    def test_threshold(self):
300        for opt in '-t', '--threshold':
301            with self.subTest(opt=opt):
302                ns = libregrtest._parse_args([opt, '1000'])
303                self.assertEqual(ns.threshold, 1000)
304                self.checkError([opt], 'expected one argument')
305                self.checkError([opt, 'foo'], 'invalid int value')
306
307    def test_nowindows(self):
308        for opt in '-n', '--nowindows':
309            with self.subTest(opt=opt):
310                with contextlib.redirect_stderr(io.StringIO()) as stderr:
311                    ns = libregrtest._parse_args([opt])
312                self.assertTrue(ns.nowindows)
313                err = stderr.getvalue()
314                self.assertIn('the --nowindows (-n) option is deprecated', err)
315
316    def test_forever(self):
317        for opt in '-F', '--forever':
318            with self.subTest(opt=opt):
319                ns = libregrtest._parse_args([opt])
320                self.assertTrue(ns.forever)
321
322    def test_unrecognized_argument(self):
323        self.checkError(['--xxx'], 'usage:')
324
325    def test_long_option__partial(self):
326        ns = libregrtest._parse_args(['--qui'])
327        self.assertTrue(ns.quiet)
328        self.assertEqual(ns.verbose, 0)
329
330    def test_two_options(self):
331        ns = libregrtest._parse_args(['--quiet', '--exclude'])
332        self.assertTrue(ns.quiet)
333        self.assertEqual(ns.verbose, 0)
334        self.assertTrue(ns.exclude)
335
336    def test_option_with_empty_string_value(self):
337        ns = libregrtest._parse_args(['--start', ''])
338        self.assertEqual(ns.start, '')
339
340    def test_arg(self):
341        ns = libregrtest._parse_args(['foo'])
342        self.assertEqual(ns.args, ['foo'])
343
344    def test_option_and_arg(self):
345        ns = libregrtest._parse_args(['--quiet', 'foo'])
346        self.assertTrue(ns.quiet)
347        self.assertEqual(ns.verbose, 0)
348        self.assertEqual(ns.args, ['foo'])
349
350    def test_arg_option_arg(self):
351        ns = libregrtest._parse_args(['test_unaryop', '-v', 'test_binop'])
352        self.assertEqual(ns.verbose, 1)
353        self.assertEqual(ns.args, ['test_unaryop', 'test_binop'])
354
355    def test_unknown_option(self):
356        self.checkError(['--unknown-option'],
357                        'unrecognized arguments: --unknown-option')
358
359
360class BaseTestCase(unittest.TestCase):
361    TEST_UNIQUE_ID = 1
362    TESTNAME_PREFIX = 'test_regrtest_'
363    TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+'
364
365    def setUp(self):
366        self.testdir = os.path.realpath(os.path.dirname(__file__))
367
368        self.tmptestdir = tempfile.mkdtemp()
369        self.addCleanup(support.rmtree, self.tmptestdir)
370
371    def create_test(self, name=None, code=None):
372        if not name:
373            name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
374            BaseTestCase.TEST_UNIQUE_ID += 1
375
376        if code is None:
377            code = textwrap.dedent("""
378                    import unittest
379
380                    class Tests(unittest.TestCase):
381                        def test_empty_test(self):
382                            pass
383                """)
384
385        # test_regrtest cannot be run twice in parallel because
386        # of setUp() and create_test()
387        name = self.TESTNAME_PREFIX + name
388        path = os.path.join(self.tmptestdir, name + '.py')
389
390        self.addCleanup(support.unlink, path)
391        # Use 'x' mode to ensure that we do not override existing tests
392        try:
393            with open(path, 'x', encoding='utf-8') as fp:
394                fp.write(code)
395        except PermissionError as exc:
396            if not sysconfig.is_python_build():
397                self.skipTest("cannot write %s: %s" % (path, exc))
398            raise
399        return name
400
401    def regex_search(self, regex, output):
402        match = re.search(regex, output, re.MULTILINE)
403        if not match:
404            self.fail("%r not found in %r" % (regex, output))
405        return match
406
407    def check_line(self, output, regex):
408        regex = re.compile(r'^' + regex, re.MULTILINE)
409        self.assertRegex(output, regex)
410
411    def parse_executed_tests(self, output):
412        regex = (r'^%s\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
413                 % (LOG_PREFIX, self.TESTNAME_REGEX))
414        parser = re.finditer(regex, output, re.MULTILINE)
415        return list(match.group(1) for match in parser)
416
417    def check_executed_tests(self, output, tests, skipped=(), failed=(),
418                             env_changed=(), omitted=(),
419                             rerun=(), no_test_ran=(),
420                             randomize=False, interrupted=False,
421                             fail_env_changed=False):
422        if isinstance(tests, str):
423            tests = [tests]
424        if isinstance(skipped, str):
425            skipped = [skipped]
426        if isinstance(failed, str):
427            failed = [failed]
428        if isinstance(env_changed, str):
429            env_changed = [env_changed]
430        if isinstance(omitted, str):
431            omitted = [omitted]
432        if isinstance(rerun, str):
433            rerun = [rerun]
434        if isinstance(no_test_ran, str):
435            no_test_ran = [no_test_ran]
436
437        executed = self.parse_executed_tests(output)
438        if randomize:
439            self.assertEqual(set(executed), set(tests), output)
440        else:
441            self.assertEqual(executed, tests, output)
442
443        def plural(count):
444            return 's' if count != 1 else ''
445
446        def list_regex(line_format, tests):
447            count = len(tests)
448            names = ' '.join(sorted(tests))
449            regex = line_format % (count, plural(count))
450            regex = r'%s:\n    %s$' % (regex, names)
451            return regex
452
453        if skipped:
454            regex = list_regex('%s test%s skipped', skipped)
455            self.check_line(output, regex)
456
457        if failed:
458            regex = list_regex('%s test%s failed', failed)
459            self.check_line(output, regex)
460
461        if env_changed:
462            regex = list_regex('%s test%s altered the execution environment',
463                               env_changed)
464            self.check_line(output, regex)
465
466        if omitted:
467            regex = list_regex('%s test%s omitted', omitted)
468            self.check_line(output, regex)
469
470        if rerun:
471            regex = list_regex('%s re-run test%s', rerun)
472            self.check_line(output, regex)
473            regex = LOG_PREFIX + r"Re-running failed tests in verbose mode"
474            self.check_line(output, regex)
475            for test_name in rerun:
476                regex = LOG_PREFIX + f"Re-running {test_name} in verbose mode"
477                self.check_line(output, regex)
478
479        if no_test_ran:
480            regex = list_regex('%s test%s run no tests', no_test_ran)
481            self.check_line(output, regex)
482
483        good = (len(tests) - len(skipped) - len(failed)
484                - len(omitted) - len(env_changed) - len(no_test_ran))
485        if good:
486            regex = r'%s test%s OK\.$' % (good, plural(good))
487            if not skipped and not failed and good > 1:
488                regex = 'All %s' % regex
489            self.check_line(output, regex)
490
491        if interrupted:
492            self.check_line(output, 'Test suite interrupted by signal SIGINT.')
493
494        result = []
495        if failed:
496            result.append('FAILURE')
497        elif fail_env_changed and env_changed:
498            result.append('ENV CHANGED')
499        if interrupted:
500            result.append('INTERRUPTED')
501        if not any((good, result, failed, interrupted, skipped,
502                    env_changed, fail_env_changed)):
503            result.append("NO TEST RUN")
504        elif not result:
505            result.append('SUCCESS')
506        result = ', '.join(result)
507        if rerun:
508            self.check_line(output, 'Tests result: FAILURE')
509            result = 'FAILURE then %s' % result
510
511        self.check_line(output, 'Tests result: %s' % result)
512
513    def parse_random_seed(self, output):
514        match = self.regex_search(r'Using random seed ([0-9]+)', output)
515        randseed = int(match.group(1))
516        self.assertTrue(0 <= randseed <= 10000000, randseed)
517        return randseed
518
519    def run_command(self, args, input=None, exitcode=0, **kw):
520        if not input:
521            input = ''
522        if 'stderr' not in kw:
523            kw['stderr'] = subprocess.PIPE
524        proc = subprocess.run(args,
525                              universal_newlines=True,
526                              input=input,
527                              stdout=subprocess.PIPE,
528                              **kw)
529        if proc.returncode != exitcode:
530            msg = ("Command %s failed with exit code %s\n"
531                   "\n"
532                   "stdout:\n"
533                   "---\n"
534                   "%s\n"
535                   "---\n"
536                   % (str(args), proc.returncode, proc.stdout))
537            if proc.stderr:
538                msg += ("\n"
539                        "stderr:\n"
540                        "---\n"
541                        "%s"
542                        "---\n"
543                        % proc.stderr)
544            self.fail(msg)
545        return proc
546
547    def run_python(self, args, **kw):
548        args = [sys.executable, '-X', 'faulthandler', '-I', *args]
549        proc = self.run_command(args, **kw)
550        return proc.stdout
551
552
553class CheckActualTests(BaseTestCase):
554    """
555    Check that regrtest appears to find the expected set of tests.
556    """
557
558    def test_finds_expected_number_of_tests(self):
559        args = ['-Wd', '-E', '-bb', '-m', 'test.regrtest', '--list-tests']
560        output = self.run_python(args)
561        rough_number_of_tests_found = len(output.splitlines())
562        actual_testsuite_glob = os.path.join(glob.escape(os.path.dirname(__file__)),
563                                             'test*.py')
564        rough_counted_test_py_files = len(glob.glob(actual_testsuite_glob))
565        # We're not trying to duplicate test finding logic in here,
566        # just give a rough estimate of how many there should be and
567        # be near that.  This is a regression test to prevent mishaps
568        # such as https://bugs.python.org/issue37667 in the future.
569        # If you need to change the values in here during some
570        # mythical future test suite reorganization, don't go
571        # overboard with logic and keep that goal in mind.
572        self.assertGreater(rough_number_of_tests_found,
573                           rough_counted_test_py_files*9//10,
574                           msg='Unexpectedly low number of tests found in:\n'
575                           f'{", ".join(output.splitlines())}')
576
577
578class ProgramsTestCase(BaseTestCase):
579    """
580    Test various ways to run the Python test suite. Use options close
581    to options used on the buildbot.
582    """
583
584    NTEST = 4
585
586    def setUp(self):
587        super().setUp()
588
589        # Create NTEST tests doing nothing
590        self.tests = [self.create_test() for index in range(self.NTEST)]
591
592        self.python_args = ['-Wd', '-E', '-bb']
593        self.regrtest_args = ['-uall', '-rwW',
594                              '--testdir=%s' % self.tmptestdir]
595        if hasattr(faulthandler, 'dump_traceback_later'):
596            self.regrtest_args.extend(('--timeout', '3600', '-j4'))
597        if sys.platform == 'win32':
598            self.regrtest_args.append('-n')
599
600    def check_output(self, output):
601        self.parse_random_seed(output)
602        self.check_executed_tests(output, self.tests, randomize=True)
603
604    def run_tests(self, args):
605        output = self.run_python(args)
606        self.check_output(output)
607
608    def test_script_regrtest(self):
609        # Lib/test/regrtest.py
610        script = os.path.join(self.testdir, 'regrtest.py')
611
612        args = [*self.python_args, script, *self.regrtest_args, *self.tests]
613        self.run_tests(args)
614
615    def test_module_test(self):
616        # -m test
617        args = [*self.python_args, '-m', 'test',
618                *self.regrtest_args, *self.tests]
619        self.run_tests(args)
620
621    def test_module_regrtest(self):
622        # -m test.regrtest
623        args = [*self.python_args, '-m', 'test.regrtest',
624                *self.regrtest_args, *self.tests]
625        self.run_tests(args)
626
627    def test_module_autotest(self):
628        # -m test.autotest
629        args = [*self.python_args, '-m', 'test.autotest',
630                *self.regrtest_args, *self.tests]
631        self.run_tests(args)
632
633    def test_module_from_test_autotest(self):
634        # from test import autotest
635        code = 'from test import autotest'
636        args = [*self.python_args, '-c', code,
637                *self.regrtest_args, *self.tests]
638        self.run_tests(args)
639
640    def test_script_autotest(self):
641        # Lib/test/autotest.py
642        script = os.path.join(self.testdir, 'autotest.py')
643        args = [*self.python_args, script, *self.regrtest_args, *self.tests]
644        self.run_tests(args)
645
646    @unittest.skipUnless(sysconfig.is_python_build(),
647                         'run_tests.py script is not installed')
648    def test_tools_script_run_tests(self):
649        # Tools/scripts/run_tests.py
650        script = os.path.join(ROOT_DIR, 'Tools', 'scripts', 'run_tests.py')
651        args = [script, *self.regrtest_args, *self.tests]
652        self.run_tests(args)
653
654    def run_batch(self, *args):
655        proc = self.run_command(args)
656        self.check_output(proc.stdout)
657
658    @unittest.skipUnless(sysconfig.is_python_build(),
659                         'test.bat script is not installed')
660    @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
661    def test_tools_buildbot_test(self):
662        # Tools\buildbot\test.bat
663        script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat')
664        test_args = ['--testdir=%s' % self.tmptestdir]
665        if platform.machine() == 'ARM64':
666            test_args.append('-arm64') # ARM 64-bit build
667        elif platform.architecture()[0] == '64bit':
668            test_args.append('-x64')   # 64-bit build
669        if not Py_DEBUG:
670            test_args.append('+d')     # Release build, use python.exe
671        self.run_batch(script, *test_args, *self.tests)
672
673    @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
674    def test_pcbuild_rt(self):
675        # PCbuild\rt.bat
676        script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
677        if not os.path.isfile(script):
678            self.skipTest(f'File "{script}" does not exist')
679        rt_args = ["-q"]             # Quick, don't run tests twice
680        if platform.machine() == 'ARM64':
681            rt_args.append('-arm64') # ARM 64-bit build
682        elif platform.architecture()[0] == '64bit':
683            rt_args.append('-x64')   # 64-bit build
684        if Py_DEBUG:
685            rt_args.append('-d')     # Debug build, use python_d.exe
686        self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests)
687
688
689class ArgsTestCase(BaseTestCase):
690    """
691    Test arguments of the Python test suite.
692    """
693
694    def run_tests(self, *testargs, **kw):
695        cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs]
696        return self.run_python(cmdargs, **kw)
697
698    def test_failing_test(self):
699        # test a failing test
700        code = textwrap.dedent("""
701            import unittest
702
703            class FailingTest(unittest.TestCase):
704                def test_failing(self):
705                    self.fail("bug")
706        """)
707        test_ok = self.create_test('ok')
708        test_failing = self.create_test('failing', code=code)
709        tests = [test_ok, test_failing]
710
711        output = self.run_tests(*tests, exitcode=2)
712        self.check_executed_tests(output, tests, failed=test_failing)
713
714    def test_resources(self):
715        # test -u command line option
716        tests = {}
717        for resource in ('audio', 'network'):
718            code = textwrap.dedent("""
719                        from test import support; support.requires(%r)
720                        import unittest
721                        class PassingTest(unittest.TestCase):
722                            def test_pass(self):
723                                pass
724                    """ % resource)
725
726            tests[resource] = self.create_test(resource, code)
727        test_names = sorted(tests.values())
728
729        # -u all: 2 resources enabled
730        output = self.run_tests('-u', 'all', *test_names)
731        self.check_executed_tests(output, test_names)
732
733        # -u audio: 1 resource enabled
734        output = self.run_tests('-uaudio', *test_names)
735        self.check_executed_tests(output, test_names,
736                                  skipped=tests['network'])
737
738        # no option: 0 resources enabled
739        output = self.run_tests(*test_names)
740        self.check_executed_tests(output, test_names,
741                                  skipped=test_names)
742
743    def test_random(self):
744        # test -r and --randseed command line option
745        code = textwrap.dedent("""
746            import random
747            print("TESTRANDOM: %s" % random.randint(1, 1000))
748        """)
749        test = self.create_test('random', code)
750
751        # first run to get the output with the random seed
752        output = self.run_tests('-r', test)
753        randseed = self.parse_random_seed(output)
754        match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
755        test_random = int(match.group(1))
756
757        # try to reproduce with the random seed
758        output = self.run_tests('-r', '--randseed=%s' % randseed, test)
759        randseed2 = self.parse_random_seed(output)
760        self.assertEqual(randseed2, randseed)
761
762        match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
763        test_random2 = int(match.group(1))
764        self.assertEqual(test_random2, test_random)
765
766    def test_fromfile(self):
767        # test --fromfile
768        tests = [self.create_test() for index in range(5)]
769
770        # Write the list of files using a format similar to regrtest output:
771        # [1/2] test_1
772        # [2/2] test_2
773        filename = support.TESTFN
774        self.addCleanup(support.unlink, filename)
775
776        # test format '0:00:00 [2/7] test_opcodes -- test_grammar took 0 sec'
777        with open(filename, "w") as fp:
778            previous = None
779            for index, name in enumerate(tests, 1):
780                line = ("00:00:%02i [%s/%s] %s"
781                        % (index, index, len(tests), name))
782                if previous:
783                    line += " -- %s took 0 sec" % previous
784                print(line, file=fp)
785                previous = name
786
787        output = self.run_tests('--fromfile', filename)
788        self.check_executed_tests(output, tests)
789
790        # test format '[2/7] test_opcodes'
791        with open(filename, "w") as fp:
792            for index, name in enumerate(tests, 1):
793                print("[%s/%s] %s" % (index, len(tests), name), file=fp)
794
795        output = self.run_tests('--fromfile', filename)
796        self.check_executed_tests(output, tests)
797
798        # test format 'test_opcodes'
799        with open(filename, "w") as fp:
800            for name in tests:
801                print(name, file=fp)
802
803        output = self.run_tests('--fromfile', filename)
804        self.check_executed_tests(output, tests)
805
806        # test format 'Lib/test/test_opcodes.py'
807        with open(filename, "w") as fp:
808            for name in tests:
809                print('Lib/test/%s.py' % name, file=fp)
810
811        output = self.run_tests('--fromfile', filename)
812        self.check_executed_tests(output, tests)
813
814    def test_interrupted(self):
815        code = TEST_INTERRUPTED
816        test = self.create_test('sigint', code=code)
817        output = self.run_tests(test, exitcode=130)
818        self.check_executed_tests(output, test, omitted=test,
819                                  interrupted=True)
820
821    def test_slowest(self):
822        # test --slowest
823        tests = [self.create_test() for index in range(3)]
824        output = self.run_tests("--slowest", *tests)
825        self.check_executed_tests(output, tests)
826        regex = ('10 slowest tests:\n'
827                 '(?:- %s: .*\n){%s}'
828                 % (self.TESTNAME_REGEX, len(tests)))
829        self.check_line(output, regex)
830
831    def test_slowest_interrupted(self):
832        # Issue #25373: test --slowest with an interrupted test
833        code = TEST_INTERRUPTED
834        test = self.create_test("sigint", code=code)
835
836        for multiprocessing in (False, True):
837            with self.subTest(multiprocessing=multiprocessing):
838                if multiprocessing:
839                    args = ("--slowest", "-j2", test)
840                else:
841                    args = ("--slowest", test)
842                output = self.run_tests(*args, exitcode=130)
843                self.check_executed_tests(output, test,
844                                          omitted=test, interrupted=True)
845
846                regex = ('10 slowest tests:\n')
847                self.check_line(output, regex)
848
849    def test_coverage(self):
850        # test --coverage
851        test = self.create_test('coverage')
852        output = self.run_tests("--coverage", test)
853        self.check_executed_tests(output, [test])
854        regex = (r'lines +cov% +module +\(path\)\n'
855                 r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
856        self.check_line(output, regex)
857
858    def test_wait(self):
859        # test --wait
860        test = self.create_test('wait')
861        output = self.run_tests("--wait", test, input='key')
862        self.check_line(output, 'Press any key to continue')
863
864    def test_forever(self):
865        # test --forever
866        code = textwrap.dedent("""
867            import builtins
868            import unittest
869
870            class ForeverTester(unittest.TestCase):
871                def test_run(self):
872                    # Store the state in the builtins module, because the test
873                    # module is reload at each run
874                    if 'RUN' in builtins.__dict__:
875                        builtins.__dict__['RUN'] += 1
876                        if builtins.__dict__['RUN'] >= 3:
877                            self.fail("fail at the 3rd runs")
878                    else:
879                        builtins.__dict__['RUN'] = 1
880        """)
881        test = self.create_test('forever', code=code)
882        output = self.run_tests('--forever', test, exitcode=2)
883        self.check_executed_tests(output, [test]*3, failed=test)
884
885    def check_leak(self, code, what):
886        test = self.create_test('huntrleaks', code=code)
887
888        filename = 'reflog.txt'
889        self.addCleanup(support.unlink, filename)
890        output = self.run_tests('--huntrleaks', '3:3:', test,
891                                exitcode=2,
892                                stderr=subprocess.STDOUT)
893        self.check_executed_tests(output, [test], failed=test)
894
895        line = 'beginning 6 repetitions\n123456\n......\n'
896        self.check_line(output, re.escape(line))
897
898        line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
899        self.assertIn(line2, output)
900
901        with open(filename) as fp:
902            reflog = fp.read()
903            self.assertIn(line2, reflog)
904
905    @unittest.skipUnless(Py_DEBUG, 'need a debug build')
906    def test_huntrleaks(self):
907        # test --huntrleaks
908        code = textwrap.dedent("""
909            import unittest
910
911            GLOBAL_LIST = []
912
913            class RefLeakTest(unittest.TestCase):
914                def test_leak(self):
915                    GLOBAL_LIST.append(object())
916        """)
917        self.check_leak(code, 'references')
918
919    @unittest.skipUnless(Py_DEBUG, 'need a debug build')
920    def test_huntrleaks_fd_leak(self):
921        # test --huntrleaks for file descriptor leak
922        code = textwrap.dedent("""
923            import os
924            import unittest
925
926            class FDLeakTest(unittest.TestCase):
927                def test_leak(self):
928                    fd = os.open(__file__, os.O_RDONLY)
929                    # bug: never close the file descriptor
930        """)
931        self.check_leak(code, 'file descriptors')
932
933    def test_list_tests(self):
934        # test --list-tests
935        tests = [self.create_test() for i in range(5)]
936        output = self.run_tests('--list-tests', *tests)
937        self.assertEqual(output.rstrip().splitlines(),
938                         tests)
939
940    def test_list_cases(self):
941        # test --list-cases
942        code = textwrap.dedent("""
943            import unittest
944
945            class Tests(unittest.TestCase):
946                def test_method1(self):
947                    pass
948                def test_method2(self):
949                    pass
950        """)
951        testname = self.create_test(code=code)
952
953        # Test --list-cases
954        all_methods = ['%s.Tests.test_method1' % testname,
955                       '%s.Tests.test_method2' % testname]
956        output = self.run_tests('--list-cases', testname)
957        self.assertEqual(output.splitlines(), all_methods)
958
959        # Test --list-cases with --match
960        all_methods = ['%s.Tests.test_method1' % testname]
961        output = self.run_tests('--list-cases',
962                                '-m', 'test_method1',
963                                testname)
964        self.assertEqual(output.splitlines(), all_methods)
965
966    @support.cpython_only
967    def test_crashed(self):
968        # Any code which causes a crash
969        code = 'import faulthandler; faulthandler._sigsegv()'
970        crash_test = self.create_test(name="crash", code=code)
971
972        tests = [crash_test]
973        output = self.run_tests("-j2", *tests, exitcode=2)
974        self.check_executed_tests(output, tests, failed=crash_test,
975                                  randomize=True)
976
977    def parse_methods(self, output):
978        regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
979        return [match.group(1) for match in regex.finditer(output)]
980
981    def test_ignorefile(self):
982        code = textwrap.dedent("""
983            import unittest
984
985            class Tests(unittest.TestCase):
986                def test_method1(self):
987                    pass
988                def test_method2(self):
989                    pass
990                def test_method3(self):
991                    pass
992                def test_method4(self):
993                    pass
994        """)
995        all_methods = ['test_method1', 'test_method2',
996                       'test_method3', 'test_method4']
997        testname = self.create_test(code=code)
998
999        # only run a subset
1000        filename = support.TESTFN
1001        self.addCleanup(support.unlink, filename)
1002
1003        subset = [
1004            # only ignore the method name
1005            'test_method1',
1006            # ignore the full identifier
1007            '%s.Tests.test_method3' % testname]
1008        with open(filename, "w") as fp:
1009            for name in subset:
1010                print(name, file=fp)
1011
1012        output = self.run_tests("-v", "--ignorefile", filename, testname)
1013        methods = self.parse_methods(output)
1014        subset = ['test_method2', 'test_method4']
1015        self.assertEqual(methods, subset)
1016
1017    def test_matchfile(self):
1018        code = textwrap.dedent("""
1019            import unittest
1020
1021            class Tests(unittest.TestCase):
1022                def test_method1(self):
1023                    pass
1024                def test_method2(self):
1025                    pass
1026                def test_method3(self):
1027                    pass
1028                def test_method4(self):
1029                    pass
1030        """)
1031        all_methods = ['test_method1', 'test_method2',
1032                       'test_method3', 'test_method4']
1033        testname = self.create_test(code=code)
1034
1035        # by default, all methods should be run
1036        output = self.run_tests("-v", testname)
1037        methods = self.parse_methods(output)
1038        self.assertEqual(methods, all_methods)
1039
1040        # only run a subset
1041        filename = support.TESTFN
1042        self.addCleanup(support.unlink, filename)
1043
1044        subset = [
1045            # only match the method name
1046            'test_method1',
1047            # match the full identifier
1048            '%s.Tests.test_method3' % testname]
1049        with open(filename, "w") as fp:
1050            for name in subset:
1051                print(name, file=fp)
1052
1053        output = self.run_tests("-v", "--matchfile", filename, testname)
1054        methods = self.parse_methods(output)
1055        subset = ['test_method1', 'test_method3']
1056        self.assertEqual(methods, subset)
1057
1058    def test_env_changed(self):
1059        code = textwrap.dedent("""
1060            import unittest
1061
1062            class Tests(unittest.TestCase):
1063                def test_env_changed(self):
1064                    open("env_changed", "w").close()
1065        """)
1066        testname = self.create_test(code=code)
1067
1068        # don't fail by default
1069        output = self.run_tests(testname)
1070        self.check_executed_tests(output, [testname], env_changed=testname)
1071
1072        # fail with --fail-env-changed
1073        output = self.run_tests("--fail-env-changed", testname, exitcode=3)
1074        self.check_executed_tests(output, [testname], env_changed=testname,
1075                                  fail_env_changed=True)
1076
1077    def test_rerun_fail(self):
1078        # FAILURE then FAILURE
1079        code = textwrap.dedent("""
1080            import unittest
1081
1082            class Tests(unittest.TestCase):
1083                def test_bug(self):
1084                    # test always fail
1085                    self.fail("bug")
1086        """)
1087        testname = self.create_test(code=code)
1088
1089        output = self.run_tests("-w", testname, exitcode=2)
1090        self.check_executed_tests(output, [testname],
1091                                  failed=testname, rerun=testname)
1092
1093    def test_rerun_success(self):
1094        # FAILURE then SUCCESS
1095        code = textwrap.dedent("""
1096            import builtins
1097            import unittest
1098
1099            class Tests(unittest.TestCase):
1100                failed = False
1101
1102                def test_fail_once(self):
1103                    if not hasattr(builtins, '_test_failed'):
1104                        builtins._test_failed = True
1105                        self.fail("bug")
1106        """)
1107        testname = self.create_test(code=code)
1108
1109        output = self.run_tests("-w", testname, exitcode=0)
1110        self.check_executed_tests(output, [testname],
1111                                  rerun=testname)
1112
1113    def test_no_tests_ran(self):
1114        code = textwrap.dedent("""
1115            import unittest
1116
1117            class Tests(unittest.TestCase):
1118                def test_bug(self):
1119                    pass
1120        """)
1121        testname = self.create_test(code=code)
1122
1123        output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0)
1124        self.check_executed_tests(output, [testname], no_test_ran=testname)
1125
1126    def test_no_tests_ran_skip(self):
1127        code = textwrap.dedent("""
1128            import unittest
1129
1130            class Tests(unittest.TestCase):
1131                def test_skipped(self):
1132                    self.skipTest("because")
1133        """)
1134        testname = self.create_test(code=code)
1135
1136        output = self.run_tests(testname, exitcode=0)
1137        self.check_executed_tests(output, [testname])
1138
1139    def test_no_tests_ran_multiple_tests_nonexistent(self):
1140        code = textwrap.dedent("""
1141            import unittest
1142
1143            class Tests(unittest.TestCase):
1144                def test_bug(self):
1145                    pass
1146        """)
1147        testname = self.create_test(code=code)
1148        testname2 = self.create_test(code=code)
1149
1150        output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0)
1151        self.check_executed_tests(output, [testname, testname2],
1152                                  no_test_ran=[testname, testname2])
1153
1154    def test_no_test_ran_some_test_exist_some_not(self):
1155        code = textwrap.dedent("""
1156            import unittest
1157
1158            class Tests(unittest.TestCase):
1159                def test_bug(self):
1160                    pass
1161        """)
1162        testname = self.create_test(code=code)
1163        other_code = textwrap.dedent("""
1164            import unittest
1165
1166            class Tests(unittest.TestCase):
1167                def test_other_bug(self):
1168                    pass
1169        """)
1170        testname2 = self.create_test(code=other_code)
1171
1172        output = self.run_tests(testname, testname2, "-m", "nosuchtest",
1173                                "-m", "test_other_bug", exitcode=0)
1174        self.check_executed_tests(output, [testname, testname2],
1175                                  no_test_ran=[testname])
1176
1177    @support.cpython_only
1178    def test_findleaks(self):
1179        code = textwrap.dedent(r"""
1180            import _testcapi
1181            import gc
1182            import unittest
1183
1184            @_testcapi.with_tp_del
1185            class Garbage:
1186                def __tp_del__(self):
1187                    pass
1188
1189            class Tests(unittest.TestCase):
1190                def test_garbage(self):
1191                    # create an uncollectable object
1192                    obj = Garbage()
1193                    obj.ref_cycle = obj
1194                    obj = None
1195        """)
1196        testname = self.create_test(code=code)
1197
1198        output = self.run_tests("--fail-env-changed", testname, exitcode=3)
1199        self.check_executed_tests(output, [testname],
1200                                  env_changed=[testname],
1201                                  fail_env_changed=True)
1202
1203        # --findleaks is now basically an alias to --fail-env-changed
1204        output = self.run_tests("--findleaks", testname, exitcode=3)
1205        self.check_executed_tests(output, [testname],
1206                                  env_changed=[testname],
1207                                  fail_env_changed=True)
1208
1209    def test_multiprocessing_timeout(self):
1210        code = textwrap.dedent(r"""
1211            import time
1212            import unittest
1213            try:
1214                import faulthandler
1215            except ImportError:
1216                faulthandler = None
1217
1218            class Tests(unittest.TestCase):
1219                # test hangs and so should be stopped by the timeout
1220                def test_sleep(self):
1221                    # we want to test regrtest multiprocessing timeout,
1222                    # not faulthandler timeout
1223                    if faulthandler is not None:
1224                        faulthandler.cancel_dump_traceback_later()
1225
1226                    time.sleep(60 * 5)
1227        """)
1228        testname = self.create_test(code=code)
1229
1230        output = self.run_tests("-j2", "--timeout=1.0", testname, exitcode=2)
1231        self.check_executed_tests(output, [testname],
1232                                  failed=testname)
1233        self.assertRegex(output,
1234                         re.compile('%s timed out' % testname, re.MULTILINE))
1235
1236    def test_cleanup(self):
1237        dirname = os.path.join(self.tmptestdir, "test_python_123")
1238        os.mkdir(dirname)
1239        filename = os.path.join(self.tmptestdir, "test_python_456")
1240        open(filename, "wb").close()
1241        names = [dirname, filename]
1242
1243        cmdargs = ['-m', 'test',
1244                   '--tempdir=%s' % self.tmptestdir,
1245                   '--cleanup']
1246        self.run_python(cmdargs)
1247
1248        for name in names:
1249            self.assertFalse(os.path.exists(name), name)
1250
1251
1252class TestUtils(unittest.TestCase):
1253    def test_format_duration(self):
1254        self.assertEqual(utils.format_duration(0),
1255                         '0 ms')
1256        self.assertEqual(utils.format_duration(1e-9),
1257                         '1 ms')
1258        self.assertEqual(utils.format_duration(10e-3),
1259                         '10 ms')
1260        self.assertEqual(utils.format_duration(1.5),
1261                         '1.5 sec')
1262        self.assertEqual(utils.format_duration(1),
1263                         '1.0 sec')
1264        self.assertEqual(utils.format_duration(2 * 60),
1265                         '2 min')
1266        self.assertEqual(utils.format_duration(2 * 60 + 1),
1267                         '2 min 1 sec')
1268        self.assertEqual(utils.format_duration(3 * 3600),
1269                         '3 hour')
1270        self.assertEqual(utils.format_duration(3 * 3600  + 2 * 60 + 1),
1271                         '3 hour 2 min')
1272        self.assertEqual(utils.format_duration(3 * 3600 + 1),
1273                         '3 hour 1 sec')
1274
1275
1276if __name__ == '__main__':
1277    unittest.main()
1278