1""" 2Tests of regrtest.py. 3 4Note: test_regrtest cannot be run twice in parallel. 5""" 6 7import contextlib 8import dataclasses 9import glob 10import io 11import locale 12import os.path 13import platform 14import random 15import re 16import shlex 17import signal 18import subprocess 19import sys 20import sysconfig 21import tempfile 22import textwrap 23import unittest 24from xml.etree import ElementTree 25 26from test import support 27from test.support import os_helper, without_optimizer 28from test.libregrtest import cmdline 29from test.libregrtest import main 30from test.libregrtest import setup 31from test.libregrtest import utils 32from test.libregrtest.filter import get_match_tests, set_match_tests, match_test 33from test.libregrtest.result import TestStats 34from test.libregrtest.utils import normalize_test_name 35 36if not support.has_subprocess_support: 37 raise unittest.SkipTest("test module requires subprocess") 38 39ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..') 40ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR)) 41LOG_PREFIX = r'[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?' 42 43EXITCODE_BAD_TEST = 2 44EXITCODE_ENV_CHANGED = 3 45EXITCODE_NO_TESTS_RAN = 4 46EXITCODE_RERUN_FAIL = 5 47EXITCODE_INTERRUPTED = 130 48 49TEST_INTERRUPTED = textwrap.dedent(""" 50 from signal import SIGINT, raise_signal 51 try: 52 raise_signal(SIGINT) 53 except ImportError: 54 import os 55 os.kill(os.getpid(), SIGINT) 56 """) 57 58 59class ParseArgsTestCase(unittest.TestCase): 60 """ 61 Test regrtest's argument parsing, function _parse_args(). 62 """ 63 64 @staticmethod 65 def parse_args(args): 66 return cmdline._parse_args(args) 67 68 def checkError(self, args, msg): 69 with support.captured_stderr() as err, self.assertRaises(SystemExit): 70 self.parse_args(args) 71 self.assertIn(msg, err.getvalue()) 72 73 def test_help(self): 74 for opt in '-h', '--help': 75 with self.subTest(opt=opt): 76 with support.captured_stdout() as out, \ 77 self.assertRaises(SystemExit): 78 self.parse_args([opt]) 79 self.assertIn('Run Python regression tests.', out.getvalue()) 80 81 def test_timeout(self): 82 ns = self.parse_args(['--timeout', '4.2']) 83 self.assertEqual(ns.timeout, 4.2) 84 85 # negative, zero and empty string are treated as "no timeout" 86 for value in ('-1', '0', ''): 87 with self.subTest(value=value): 88 ns = self.parse_args([f'--timeout={value}']) 89 self.assertEqual(ns.timeout, None) 90 91 self.checkError(['--timeout'], 'expected one argument') 92 self.checkError(['--timeout', 'foo'], 'invalid timeout value:') 93 94 def test_wait(self): 95 ns = self.parse_args(['--wait']) 96 self.assertTrue(ns.wait) 97 98 def test_start(self): 99 for opt in '-S', '--start': 100 with self.subTest(opt=opt): 101 ns = self.parse_args([opt, 'foo']) 102 self.assertEqual(ns.start, 'foo') 103 self.checkError([opt], 'expected one argument') 104 105 def test_verbose(self): 106 ns = self.parse_args(['-v']) 107 self.assertEqual(ns.verbose, 1) 108 ns = self.parse_args(['-vvv']) 109 self.assertEqual(ns.verbose, 3) 110 ns = self.parse_args(['--verbose']) 111 self.assertEqual(ns.verbose, 1) 112 ns = self.parse_args(['--verbose'] * 3) 113 self.assertEqual(ns.verbose, 3) 114 ns = self.parse_args([]) 115 self.assertEqual(ns.verbose, 0) 116 117 def test_rerun(self): 118 for opt in '-w', '--rerun', '--verbose2': 119 with self.subTest(opt=opt): 120 ns = self.parse_args([opt]) 121 self.assertTrue(ns.rerun) 122 123 def test_verbose3(self): 124 for opt in '-W', '--verbose3': 125 with self.subTest(opt=opt): 126 ns = self.parse_args([opt]) 127 self.assertTrue(ns.verbose3) 128 129 def test_quiet(self): 130 for opt in '-q', '--quiet': 131 with self.subTest(opt=opt): 132 ns = self.parse_args([opt]) 133 self.assertTrue(ns.quiet) 134 self.assertEqual(ns.verbose, 0) 135 136 def test_slowest(self): 137 for opt in '-o', '--slowest': 138 with self.subTest(opt=opt): 139 ns = self.parse_args([opt]) 140 self.assertTrue(ns.print_slow) 141 142 def test_header(self): 143 ns = self.parse_args(['--header']) 144 self.assertTrue(ns.header) 145 146 ns = self.parse_args(['--verbose']) 147 self.assertTrue(ns.header) 148 149 def test_randomize(self): 150 for opt in ('-r', '--randomize'): 151 with self.subTest(opt=opt): 152 ns = self.parse_args([opt]) 153 self.assertTrue(ns.randomize) 154 155 with os_helper.EnvironmentVarGuard() as env: 156 # with SOURCE_DATE_EPOCH 157 env['SOURCE_DATE_EPOCH'] = '1697839080' 158 ns = self.parse_args(['--randomize']) 159 regrtest = main.Regrtest(ns) 160 self.assertFalse(regrtest.randomize) 161 self.assertIsInstance(regrtest.random_seed, str) 162 self.assertEqual(regrtest.random_seed, '1697839080') 163 164 # without SOURCE_DATE_EPOCH 165 del env['SOURCE_DATE_EPOCH'] 166 ns = self.parse_args(['--randomize']) 167 regrtest = main.Regrtest(ns) 168 self.assertTrue(regrtest.randomize) 169 self.assertIsInstance(regrtest.random_seed, int) 170 171 def test_randseed(self): 172 ns = self.parse_args(['--randseed', '12345']) 173 self.assertEqual(ns.random_seed, 12345) 174 self.assertTrue(ns.randomize) 175 self.checkError(['--randseed'], 'expected one argument') 176 self.checkError(['--randseed', 'foo'], 'invalid int value') 177 178 def test_fromfile(self): 179 for opt in '-f', '--fromfile': 180 with self.subTest(opt=opt): 181 ns = self.parse_args([opt, 'foo']) 182 self.assertEqual(ns.fromfile, 'foo') 183 self.checkError([opt], 'expected one argument') 184 self.checkError([opt, 'foo', '-s'], "don't go together") 185 186 def test_exclude(self): 187 for opt in '-x', '--exclude': 188 with self.subTest(opt=opt): 189 ns = self.parse_args([opt]) 190 self.assertTrue(ns.exclude) 191 192 def test_single(self): 193 for opt in '-s', '--single': 194 with self.subTest(opt=opt): 195 ns = self.parse_args([opt]) 196 self.assertTrue(ns.single) 197 self.checkError([opt, '-f', 'foo'], "don't go together") 198 199 def test_match(self): 200 for opt in '-m', '--match': 201 with self.subTest(opt=opt): 202 ns = self.parse_args([opt, 'pattern']) 203 self.assertEqual(ns.match_tests, [('pattern', True)]) 204 self.checkError([opt], 'expected one argument') 205 206 for opt in '-i', '--ignore': 207 with self.subTest(opt=opt): 208 ns = self.parse_args([opt, 'pattern']) 209 self.assertEqual(ns.match_tests, [('pattern', False)]) 210 self.checkError([opt], 'expected one argument') 211 212 ns = self.parse_args(['-m', 'pattern1', '-m', 'pattern2']) 213 self.assertEqual(ns.match_tests, [('pattern1', True), ('pattern2', True)]) 214 215 ns = self.parse_args(['-m', 'pattern1', '-i', 'pattern2']) 216 self.assertEqual(ns.match_tests, [('pattern1', True), ('pattern2', False)]) 217 218 ns = self.parse_args(['-i', 'pattern1', '-m', 'pattern2']) 219 self.assertEqual(ns.match_tests, [('pattern1', False), ('pattern2', True)]) 220 221 self.addCleanup(os_helper.unlink, os_helper.TESTFN) 222 with open(os_helper.TESTFN, "w") as fp: 223 print('matchfile1', file=fp) 224 print('matchfile2', file=fp) 225 226 filename = os.path.abspath(os_helper.TESTFN) 227 ns = self.parse_args(['-m', 'match', '--matchfile', filename]) 228 self.assertEqual(ns.match_tests, 229 [('match', True), ('matchfile1', True), ('matchfile2', True)]) 230 231 ns = self.parse_args(['-i', 'match', '--ignorefile', filename]) 232 self.assertEqual(ns.match_tests, 233 [('match', False), ('matchfile1', False), ('matchfile2', False)]) 234 235 def test_failfast(self): 236 for opt in '-G', '--failfast': 237 with self.subTest(opt=opt): 238 ns = self.parse_args([opt, '-v']) 239 self.assertTrue(ns.failfast) 240 ns = self.parse_args([opt, '-W']) 241 self.assertTrue(ns.failfast) 242 self.checkError([opt], '-G/--failfast needs either -v or -W') 243 244 def test_use(self): 245 for opt in '-u', '--use': 246 with self.subTest(opt=opt): 247 ns = self.parse_args([opt, 'gui,network']) 248 self.assertEqual(ns.use_resources, ['gui', 'network']) 249 250 ns = self.parse_args([opt, 'gui,none,network']) 251 self.assertEqual(ns.use_resources, ['network']) 252 253 expected = list(cmdline.ALL_RESOURCES) 254 expected.remove('gui') 255 ns = self.parse_args([opt, 'all,-gui']) 256 self.assertEqual(ns.use_resources, expected) 257 self.checkError([opt], 'expected one argument') 258 self.checkError([opt, 'foo'], 'invalid resource') 259 260 # all + a resource not part of "all" 261 ns = self.parse_args([opt, 'all,tzdata']) 262 self.assertEqual(ns.use_resources, 263 list(cmdline.ALL_RESOURCES) + ['tzdata']) 264 265 # test another resource which is not part of "all" 266 ns = self.parse_args([opt, 'extralargefile']) 267 self.assertEqual(ns.use_resources, ['extralargefile']) 268 269 def test_memlimit(self): 270 for opt in '-M', '--memlimit': 271 with self.subTest(opt=opt): 272 ns = self.parse_args([opt, '4G']) 273 self.assertEqual(ns.memlimit, '4G') 274 self.checkError([opt], 'expected one argument') 275 276 def test_testdir(self): 277 ns = self.parse_args(['--testdir', 'foo']) 278 self.assertEqual(ns.testdir, os.path.join(os_helper.SAVEDCWD, 'foo')) 279 self.checkError(['--testdir'], 'expected one argument') 280 281 def test_runleaks(self): 282 for opt in '-L', '--runleaks': 283 with self.subTest(opt=opt): 284 ns = self.parse_args([opt]) 285 self.assertTrue(ns.runleaks) 286 287 def test_huntrleaks(self): 288 for opt in '-R', '--huntrleaks': 289 with self.subTest(opt=opt): 290 ns = self.parse_args([opt, ':']) 291 self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt')) 292 ns = self.parse_args([opt, '6:']) 293 self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt')) 294 ns = self.parse_args([opt, ':3']) 295 self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt')) 296 ns = self.parse_args([opt, '6:3:leaks.log']) 297 self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log')) 298 self.checkError([opt], 'expected one argument') 299 self.checkError([opt, '6'], 300 'needs 2 or 3 colon-separated arguments') 301 self.checkError([opt, 'foo:'], 'invalid huntrleaks value') 302 self.checkError([opt, '6:foo'], 'invalid huntrleaks value') 303 304 def test_multiprocess(self): 305 for opt in '-j', '--multiprocess': 306 with self.subTest(opt=opt): 307 ns = self.parse_args([opt, '2']) 308 self.assertEqual(ns.use_mp, 2) 309 self.checkError([opt], 'expected one argument') 310 self.checkError([opt, 'foo'], 'invalid int value') 311 312 def test_coverage_sequential(self): 313 for opt in '-T', '--coverage': 314 with self.subTest(opt=opt): 315 with support.captured_stderr() as stderr: 316 ns = self.parse_args([opt]) 317 self.assertTrue(ns.trace) 318 self.assertIn( 319 "collecting coverage without -j is imprecise", 320 stderr.getvalue(), 321 ) 322 323 @unittest.skipUnless(support.Py_DEBUG, 'need a debug build') 324 def test_coverage_mp(self): 325 for opt in '-T', '--coverage': 326 with self.subTest(opt=opt): 327 ns = self.parse_args([opt, '-j1']) 328 self.assertTrue(ns.trace) 329 330 def test_coverdir(self): 331 for opt in '-D', '--coverdir': 332 with self.subTest(opt=opt): 333 ns = self.parse_args([opt, 'foo']) 334 self.assertEqual(ns.coverdir, 335 os.path.join(os_helper.SAVEDCWD, 'foo')) 336 self.checkError([opt], 'expected one argument') 337 338 def test_nocoverdir(self): 339 for opt in '-N', '--nocoverdir': 340 with self.subTest(opt=opt): 341 ns = self.parse_args([opt]) 342 self.assertIsNone(ns.coverdir) 343 344 def test_threshold(self): 345 for opt in '-t', '--threshold': 346 with self.subTest(opt=opt): 347 ns = self.parse_args([opt, '1000']) 348 self.assertEqual(ns.threshold, 1000) 349 self.checkError([opt], 'expected one argument') 350 self.checkError([opt, 'foo'], 'invalid int value') 351 352 def test_nowindows(self): 353 for opt in '-n', '--nowindows': 354 with self.subTest(opt=opt): 355 with contextlib.redirect_stderr(io.StringIO()) as stderr: 356 ns = self.parse_args([opt]) 357 self.assertTrue(ns.nowindows) 358 err = stderr.getvalue() 359 self.assertIn('the --nowindows (-n) option is deprecated', err) 360 361 def test_forever(self): 362 for opt in '-F', '--forever': 363 with self.subTest(opt=opt): 364 ns = self.parse_args([opt]) 365 self.assertTrue(ns.forever) 366 367 def test_unrecognized_argument(self): 368 self.checkError(['--xxx'], 'usage:') 369 370 def test_long_option__partial(self): 371 ns = self.parse_args(['--qui']) 372 self.assertTrue(ns.quiet) 373 self.assertEqual(ns.verbose, 0) 374 375 def test_two_options(self): 376 ns = self.parse_args(['--quiet', '--exclude']) 377 self.assertTrue(ns.quiet) 378 self.assertEqual(ns.verbose, 0) 379 self.assertTrue(ns.exclude) 380 381 def test_option_with_empty_string_value(self): 382 ns = self.parse_args(['--start', '']) 383 self.assertEqual(ns.start, '') 384 385 def test_arg(self): 386 ns = self.parse_args(['foo']) 387 self.assertEqual(ns.args, ['foo']) 388 389 def test_option_and_arg(self): 390 ns = self.parse_args(['--quiet', 'foo']) 391 self.assertTrue(ns.quiet) 392 self.assertEqual(ns.verbose, 0) 393 self.assertEqual(ns.args, ['foo']) 394 395 def test_arg_option_arg(self): 396 ns = self.parse_args(['test_unaryop', '-v', 'test_binop']) 397 self.assertEqual(ns.verbose, 1) 398 self.assertEqual(ns.args, ['test_unaryop', 'test_binop']) 399 400 def test_unknown_option(self): 401 self.checkError(['--unknown-option'], 402 'unrecognized arguments: --unknown-option') 403 404 def create_regrtest(self, args): 405 ns = cmdline._parse_args(args) 406 407 # Check Regrtest attributes which are more reliable than Namespace 408 # which has an unclear API 409 with os_helper.EnvironmentVarGuard() as env: 410 # Ignore SOURCE_DATE_EPOCH env var if it's set 411 if 'SOURCE_DATE_EPOCH' in env: 412 del env['SOURCE_DATE_EPOCH'] 413 414 regrtest = main.Regrtest(ns) 415 416 return regrtest 417 418 def check_ci_mode(self, args, use_resources, rerun=True): 419 regrtest = self.create_regrtest(args) 420 self.assertEqual(regrtest.num_workers, -1) 421 self.assertEqual(regrtest.want_rerun, rerun) 422 self.assertTrue(regrtest.randomize) 423 self.assertIsInstance(regrtest.random_seed, int) 424 self.assertTrue(regrtest.fail_env_changed) 425 self.assertTrue(regrtest.print_slowest) 426 self.assertTrue(regrtest.output_on_failure) 427 self.assertEqual(sorted(regrtest.use_resources), sorted(use_resources)) 428 return regrtest 429 430 def test_fast_ci(self): 431 args = ['--fast-ci'] 432 use_resources = sorted(cmdline.ALL_RESOURCES) 433 use_resources.remove('cpu') 434 regrtest = self.check_ci_mode(args, use_resources) 435 self.assertEqual(regrtest.timeout, 10 * 60) 436 437 def test_fast_ci_python_cmd(self): 438 args = ['--fast-ci', '--python', 'python -X dev'] 439 use_resources = sorted(cmdline.ALL_RESOURCES) 440 use_resources.remove('cpu') 441 regrtest = self.check_ci_mode(args, use_resources, rerun=False) 442 self.assertEqual(regrtest.timeout, 10 * 60) 443 self.assertEqual(regrtest.python_cmd, ('python', '-X', 'dev')) 444 445 def test_fast_ci_resource(self): 446 # it should be possible to override resources individually 447 args = ['--fast-ci', '-u-network'] 448 use_resources = sorted(cmdline.ALL_RESOURCES) 449 use_resources.remove('cpu') 450 use_resources.remove('network') 451 self.check_ci_mode(args, use_resources) 452 453 def test_slow_ci(self): 454 args = ['--slow-ci'] 455 use_resources = sorted(cmdline.ALL_RESOURCES) 456 regrtest = self.check_ci_mode(args, use_resources) 457 self.assertEqual(regrtest.timeout, 20 * 60) 458 459 def test_dont_add_python_opts(self): 460 args = ['--dont-add-python-opts'] 461 ns = cmdline._parse_args(args) 462 self.assertFalse(ns._add_python_opts) 463 464 def test_bisect(self): 465 args = ['--bisect'] 466 regrtest = self.create_regrtest(args) 467 self.assertTrue(regrtest.want_bisect) 468 469 def test_verbose3_huntrleaks(self): 470 args = ['-R', '3:10', '--verbose3'] 471 with support.captured_stderr(): 472 regrtest = self.create_regrtest(args) 473 self.assertIsNotNone(regrtest.hunt_refleak) 474 self.assertEqual(regrtest.hunt_refleak.warmups, 3) 475 self.assertEqual(regrtest.hunt_refleak.runs, 10) 476 self.assertFalse(regrtest.output_on_failure) 477 478 def test_single_process(self): 479 args = ['-j2', '--single-process'] 480 with support.captured_stderr(): 481 regrtest = self.create_regrtest(args) 482 self.assertEqual(regrtest.num_workers, 0) 483 self.assertTrue(regrtest.single_process) 484 485 args = ['--fast-ci', '--single-process'] 486 with support.captured_stderr(): 487 regrtest = self.create_regrtest(args) 488 self.assertEqual(regrtest.num_workers, 0) 489 self.assertTrue(regrtest.single_process) 490 491 492@dataclasses.dataclass(slots=True) 493class Rerun: 494 name: str 495 match: str | None 496 success: bool 497 498 499class BaseTestCase(unittest.TestCase): 500 TEST_UNIQUE_ID = 1 501 TESTNAME_PREFIX = 'test_regrtest_' 502 TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+' 503 504 def setUp(self): 505 self.testdir = os.path.realpath(os.path.dirname(__file__)) 506 507 self.tmptestdir = tempfile.mkdtemp() 508 self.addCleanup(os_helper.rmtree, self.tmptestdir) 509 510 def create_test(self, name=None, code=None): 511 if not name: 512 name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID 513 BaseTestCase.TEST_UNIQUE_ID += 1 514 515 if code is None: 516 code = textwrap.dedent(""" 517 import unittest 518 519 class Tests(unittest.TestCase): 520 def test_empty_test(self): 521 pass 522 """) 523 524 # test_regrtest cannot be run twice in parallel because 525 # of setUp() and create_test() 526 name = self.TESTNAME_PREFIX + name 527 path = os.path.join(self.tmptestdir, name + '.py') 528 529 self.addCleanup(os_helper.unlink, path) 530 # Use 'x' mode to ensure that we do not override existing tests 531 try: 532 with open(path, 'x', encoding='utf-8') as fp: 533 fp.write(code) 534 except PermissionError as exc: 535 if not sysconfig.is_python_build(): 536 self.skipTest("cannot write %s: %s" % (path, exc)) 537 raise 538 return name 539 540 def regex_search(self, regex, output): 541 match = re.search(regex, output, re.MULTILINE) 542 if not match: 543 self.fail("%r not found in %r" % (regex, output)) 544 return match 545 546 def check_line(self, output, pattern, full=False, regex=True): 547 if not regex: 548 pattern = re.escape(pattern) 549 if full: 550 pattern += '\n' 551 regex = re.compile(r'^' + pattern, re.MULTILINE) 552 self.assertRegex(output, regex) 553 554 def parse_executed_tests(self, output): 555 regex = (r'^%s\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)' 556 % (LOG_PREFIX, self.TESTNAME_REGEX)) 557 parser = re.finditer(regex, output, re.MULTILINE) 558 return list(match.group(1) for match in parser) 559 560 def check_executed_tests(self, output, tests, *, stats, 561 skipped=(), failed=(), 562 env_changed=(), omitted=(), 563 rerun=None, run_no_tests=(), 564 resource_denied=(), 565 randomize=False, parallel=False, interrupted=False, 566 fail_env_changed=False, 567 forever=False, filtered=False): 568 if isinstance(tests, str): 569 tests = [tests] 570 if isinstance(skipped, str): 571 skipped = [skipped] 572 if isinstance(resource_denied, str): 573 resource_denied = [resource_denied] 574 if isinstance(failed, str): 575 failed = [failed] 576 if isinstance(env_changed, str): 577 env_changed = [env_changed] 578 if isinstance(omitted, str): 579 omitted = [omitted] 580 if isinstance(run_no_tests, str): 581 run_no_tests = [run_no_tests] 582 if isinstance(stats, int): 583 stats = TestStats(stats) 584 if parallel: 585 randomize = True 586 587 rerun_failed = [] 588 if rerun is not None and not env_changed: 589 failed = [rerun.name] 590 if not rerun.success: 591 rerun_failed.append(rerun.name) 592 593 executed = self.parse_executed_tests(output) 594 total_tests = list(tests) 595 if rerun is not None: 596 total_tests.append(rerun.name) 597 if randomize: 598 self.assertEqual(set(executed), set(total_tests), output) 599 else: 600 self.assertEqual(executed, total_tests, output) 601 602 def plural(count): 603 return 's' if count != 1 else '' 604 605 def list_regex(line_format, tests): 606 count = len(tests) 607 names = ' '.join(sorted(tests)) 608 regex = line_format % (count, plural(count)) 609 regex = r'%s:\n %s$' % (regex, names) 610 return regex 611 612 if skipped: 613 regex = list_regex('%s test%s skipped', skipped) 614 self.check_line(output, regex) 615 616 if resource_denied: 617 regex = list_regex(r'%s test%s skipped \(resource denied\)', resource_denied) 618 self.check_line(output, regex) 619 620 if failed: 621 regex = list_regex('%s test%s failed', failed) 622 self.check_line(output, regex) 623 624 if env_changed: 625 regex = list_regex(r'%s test%s altered the execution environment ' 626 r'\(env changed\)', 627 env_changed) 628 self.check_line(output, regex) 629 630 if omitted: 631 regex = list_regex('%s test%s omitted', omitted) 632 self.check_line(output, regex) 633 634 if rerun is not None: 635 regex = list_regex('%s re-run test%s', [rerun.name]) 636 self.check_line(output, regex) 637 regex = LOG_PREFIX + r"Re-running 1 failed tests in verbose mode" 638 self.check_line(output, regex) 639 regex = fr"Re-running {rerun.name} in verbose mode" 640 if rerun.match: 641 regex = fr"{regex} \(matching: {rerun.match}\)" 642 self.check_line(output, regex) 643 644 if run_no_tests: 645 regex = list_regex('%s test%s run no tests', run_no_tests) 646 self.check_line(output, regex) 647 648 good = (len(tests) - len(skipped) - len(resource_denied) - len(failed) 649 - len(omitted) - len(env_changed) - len(run_no_tests)) 650 if good: 651 regex = r'%s test%s OK\.' % (good, plural(good)) 652 if not skipped and not failed and (rerun is None or rerun.success) and good > 1: 653 regex = 'All %s' % regex 654 self.check_line(output, regex, full=True) 655 656 if interrupted: 657 self.check_line(output, 'Test suite interrupted by signal SIGINT.') 658 659 # Total tests 660 text = f'run={stats.tests_run:,}' 661 if filtered: 662 text = fr'{text} \(filtered\)' 663 parts = [text] 664 if stats.failures: 665 parts.append(f'failures={stats.failures:,}') 666 if stats.skipped: 667 parts.append(f'skipped={stats.skipped:,}') 668 line = fr'Total tests: {" ".join(parts)}' 669 self.check_line(output, line, full=True) 670 671 # Total test files 672 run = len(total_tests) - len(resource_denied) 673 if rerun is not None: 674 total_failed = len(rerun_failed) 675 total_rerun = 1 676 else: 677 total_failed = len(failed) 678 total_rerun = 0 679 if interrupted: 680 run = 0 681 text = f'run={run}' 682 if not forever: 683 text = f'{text}/{len(tests)}' 684 if filtered: 685 text = fr'{text} \(filtered\)' 686 report = [text] 687 for name, ntest in ( 688 ('failed', total_failed), 689 ('env_changed', len(env_changed)), 690 ('skipped', len(skipped)), 691 ('resource_denied', len(resource_denied)), 692 ('rerun', total_rerun), 693 ('run_no_tests', len(run_no_tests)), 694 ): 695 if ntest: 696 report.append(f'{name}={ntest}') 697 line = fr'Total test files: {" ".join(report)}' 698 self.check_line(output, line, full=True) 699 700 # Result 701 state = [] 702 if failed: 703 state.append('FAILURE') 704 elif fail_env_changed and env_changed: 705 state.append('ENV CHANGED') 706 if interrupted: 707 state.append('INTERRUPTED') 708 if not any((good, failed, interrupted, skipped, 709 env_changed, fail_env_changed)): 710 state.append("NO TESTS RAN") 711 elif not state: 712 state.append('SUCCESS') 713 state = ', '.join(state) 714 if rerun is not None: 715 new_state = 'SUCCESS' if rerun.success else 'FAILURE' 716 state = f'{state} then {new_state}' 717 self.check_line(output, f'Result: {state}', full=True) 718 719 def parse_random_seed(self, output: str) -> str: 720 match = self.regex_search(r'Using random seed: (.*)', output) 721 return match.group(1) 722 723 def run_command(self, args, input=None, exitcode=0, **kw): 724 if not input: 725 input = '' 726 if 'stderr' not in kw: 727 kw['stderr'] = subprocess.STDOUT 728 729 env = kw.pop('env', None) 730 if env is None: 731 env = dict(os.environ) 732 env.pop('SOURCE_DATE_EPOCH', None) 733 734 proc = subprocess.run(args, 735 text=True, 736 input=input, 737 stdout=subprocess.PIPE, 738 env=env, 739 **kw) 740 if proc.returncode != exitcode: 741 msg = ("Command %s failed with exit code %s, but exit code %s expected!\n" 742 "\n" 743 "stdout:\n" 744 "---\n" 745 "%s\n" 746 "---\n" 747 % (str(args), proc.returncode, exitcode, proc.stdout)) 748 if proc.stderr: 749 msg += ("\n" 750 "stderr:\n" 751 "---\n" 752 "%s" 753 "---\n" 754 % proc.stderr) 755 self.fail(msg) 756 return proc 757 758 def run_python(self, args, **kw): 759 extraargs = [] 760 if 'uops' in sys._xoptions: 761 # Pass -X uops along 762 extraargs.extend(['-X', 'uops']) 763 args = [sys.executable, *extraargs, '-X', 'faulthandler', '-I', *args] 764 proc = self.run_command(args, **kw) 765 return proc.stdout 766 767 768class CheckActualTests(BaseTestCase): 769 def test_finds_expected_number_of_tests(self): 770 """ 771 Check that regrtest appears to find the expected set of tests. 772 """ 773 args = ['-Wd', '-E', '-bb', '-m', 'test.regrtest', '--list-tests'] 774 output = self.run_python(args) 775 rough_number_of_tests_found = len(output.splitlines()) 776 actual_testsuite_glob = os.path.join(glob.escape(os.path.dirname(__file__)), 777 'test*.py') 778 rough_counted_test_py_files = len(glob.glob(actual_testsuite_glob)) 779 # We're not trying to duplicate test finding logic in here, 780 # just give a rough estimate of how many there should be and 781 # be near that. This is a regression test to prevent mishaps 782 # such as https://bugs.python.org/issue37667 in the future. 783 # If you need to change the values in here during some 784 # mythical future test suite reorganization, don't go 785 # overboard with logic and keep that goal in mind. 786 self.assertGreater(rough_number_of_tests_found, 787 rough_counted_test_py_files*9//10, 788 msg='Unexpectedly low number of tests found in:\n' 789 f'{", ".join(output.splitlines())}') 790 791 792class ProgramsTestCase(BaseTestCase): 793 """ 794 Test various ways to run the Python test suite. Use options close 795 to options used on the buildbot. 796 """ 797 798 NTEST = 4 799 800 def setUp(self): 801 super().setUp() 802 803 # Create NTEST tests doing nothing 804 self.tests = [self.create_test() for index in range(self.NTEST)] 805 806 self.python_args = ['-Wd', '-E', '-bb'] 807 self.regrtest_args = ['-uall', '-rwW', 808 '--testdir=%s' % self.tmptestdir] 809 self.regrtest_args.extend(('--timeout', '3600', '-j4')) 810 if sys.platform == 'win32': 811 self.regrtest_args.append('-n') 812 813 def check_output(self, output): 814 randseed = self.parse_random_seed(output) 815 self.assertTrue(randseed.isdigit(), randseed) 816 817 self.check_executed_tests(output, self.tests, 818 randomize=True, stats=len(self.tests)) 819 820 def run_tests(self, args, env=None): 821 output = self.run_python(args, env=env) 822 self.check_output(output) 823 824 def test_script_regrtest(self): 825 # Lib/test/regrtest.py 826 script = os.path.join(self.testdir, 'regrtest.py') 827 828 args = [*self.python_args, script, *self.regrtest_args, *self.tests] 829 self.run_tests(args) 830 831 def test_module_test(self): 832 # -m test 833 args = [*self.python_args, '-m', 'test', 834 *self.regrtest_args, *self.tests] 835 self.run_tests(args) 836 837 def test_module_regrtest(self): 838 # -m test.regrtest 839 args = [*self.python_args, '-m', 'test.regrtest', 840 *self.regrtest_args, *self.tests] 841 self.run_tests(args) 842 843 def test_module_autotest(self): 844 # -m test.autotest 845 args = [*self.python_args, '-m', 'test.autotest', 846 *self.regrtest_args, *self.tests] 847 self.run_tests(args) 848 849 def test_module_from_test_autotest(self): 850 # from test import autotest 851 code = 'from test import autotest' 852 args = [*self.python_args, '-c', code, 853 *self.regrtest_args, *self.tests] 854 self.run_tests(args) 855 856 def test_script_autotest(self): 857 # Lib/test/autotest.py 858 script = os.path.join(self.testdir, 'autotest.py') 859 args = [*self.python_args, script, *self.regrtest_args, *self.tests] 860 self.run_tests(args) 861 862 def run_batch(self, *args): 863 proc = self.run_command(args) 864 self.check_output(proc.stdout) 865 866 @unittest.skipUnless(sysconfig.is_python_build(), 867 'test.bat script is not installed') 868 @unittest.skipUnless(sys.platform == 'win32', 'Windows only') 869 def test_tools_buildbot_test(self): 870 # Tools\buildbot\test.bat 871 script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat') 872 test_args = ['--testdir=%s' % self.tmptestdir] 873 if platform.machine() == 'ARM64': 874 test_args.append('-arm64') # ARM 64-bit build 875 elif platform.machine() == 'ARM': 876 test_args.append('-arm32') # 32-bit ARM build 877 elif platform.architecture()[0] == '64bit': 878 test_args.append('-x64') # 64-bit build 879 if not support.Py_DEBUG: 880 test_args.append('+d') # Release build, use python.exe 881 if sysconfig.get_config_var("Py_GIL_DISABLED"): 882 test_args.append('--disable-gil') 883 self.run_batch(script, *test_args, *self.tests) 884 885 @unittest.skipUnless(sys.platform == 'win32', 'Windows only') 886 def test_pcbuild_rt(self): 887 # PCbuild\rt.bat 888 script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat') 889 if not os.path.isfile(script): 890 self.skipTest(f'File "{script}" does not exist') 891 rt_args = ["-q"] # Quick, don't run tests twice 892 if platform.machine() == 'ARM64': 893 rt_args.append('-arm64') # ARM 64-bit build 894 elif platform.machine() == 'ARM': 895 rt_args.append('-arm32') # 32-bit ARM build 896 elif platform.architecture()[0] == '64bit': 897 rt_args.append('-x64') # 64-bit build 898 if support.Py_DEBUG: 899 rt_args.append('-d') # Debug build, use python_d.exe 900 if sysconfig.get_config_var("Py_GIL_DISABLED"): 901 rt_args.append('--disable-gil') 902 self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests) 903 904 905class ArgsTestCase(BaseTestCase): 906 """ 907 Test arguments of the Python test suite. 908 """ 909 910 def run_tests(self, *testargs, **kw): 911 cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs] 912 return self.run_python(cmdargs, **kw) 913 914 def test_success(self): 915 code = textwrap.dedent(""" 916 import unittest 917 918 class PassingTests(unittest.TestCase): 919 def test_test1(self): 920 pass 921 922 def test_test2(self): 923 pass 924 925 def test_test3(self): 926 pass 927 """) 928 tests = [self.create_test(f'ok{i}', code=code) for i in range(1, 6)] 929 930 output = self.run_tests(*tests) 931 self.check_executed_tests(output, tests, 932 stats=3 * len(tests)) 933 934 def test_skip(self): 935 code = textwrap.dedent(""" 936 import unittest 937 raise unittest.SkipTest("nope") 938 """) 939 test_ok = self.create_test('ok') 940 test_skip = self.create_test('skip', code=code) 941 tests = [test_ok, test_skip] 942 943 output = self.run_tests(*tests) 944 self.check_executed_tests(output, tests, 945 skipped=[test_skip], 946 stats=1) 947 948 def test_failing_test(self): 949 # test a failing test 950 code = textwrap.dedent(""" 951 import unittest 952 953 class FailingTest(unittest.TestCase): 954 def test_failing(self): 955 self.fail("bug") 956 """) 957 test_ok = self.create_test('ok') 958 test_failing = self.create_test('failing', code=code) 959 tests = [test_ok, test_failing] 960 961 output = self.run_tests(*tests, exitcode=EXITCODE_BAD_TEST) 962 self.check_executed_tests(output, tests, failed=test_failing, 963 stats=TestStats(2, 1)) 964 965 def test_resources(self): 966 # test -u command line option 967 tests = {} 968 for resource in ('audio', 'network'): 969 code = textwrap.dedent(""" 970 from test import support; support.requires(%r) 971 import unittest 972 class PassingTest(unittest.TestCase): 973 def test_pass(self): 974 pass 975 """ % resource) 976 977 tests[resource] = self.create_test(resource, code) 978 test_names = sorted(tests.values()) 979 980 # -u all: 2 resources enabled 981 output = self.run_tests('-u', 'all', *test_names) 982 self.check_executed_tests(output, test_names, stats=2) 983 984 # -u audio: 1 resource enabled 985 output = self.run_tests('-uaudio', *test_names) 986 self.check_executed_tests(output, test_names, 987 resource_denied=tests['network'], 988 stats=1) 989 990 # no option: 0 resources enabled 991 output = self.run_tests(*test_names, exitcode=EXITCODE_NO_TESTS_RAN) 992 self.check_executed_tests(output, test_names, 993 resource_denied=test_names, 994 stats=0) 995 996 def test_random(self): 997 # test -r and --randseed command line option 998 code = textwrap.dedent(""" 999 import random 1000 print("TESTRANDOM: %s" % random.randint(1, 1000)) 1001 """) 1002 test = self.create_test('random', code) 1003 1004 # first run to get the output with the random seed 1005 output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN) 1006 randseed = self.parse_random_seed(output) 1007 match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output) 1008 test_random = int(match.group(1)) 1009 1010 # try to reproduce with the random seed 1011 output = self.run_tests('-r', f'--randseed={randseed}', test, 1012 exitcode=EXITCODE_NO_TESTS_RAN) 1013 randseed2 = self.parse_random_seed(output) 1014 self.assertEqual(randseed2, randseed) 1015 1016 match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output) 1017 test_random2 = int(match.group(1)) 1018 self.assertEqual(test_random2, test_random) 1019 1020 # check that random.seed is used by default 1021 output = self.run_tests(test, exitcode=EXITCODE_NO_TESTS_RAN) 1022 randseed = self.parse_random_seed(output) 1023 self.assertTrue(randseed.isdigit(), randseed) 1024 1025 # check SOURCE_DATE_EPOCH (integer) 1026 timestamp = '1697839080' 1027 env = dict(os.environ, SOURCE_DATE_EPOCH=timestamp) 1028 output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN, 1029 env=env) 1030 randseed = self.parse_random_seed(output) 1031 self.assertEqual(randseed, timestamp) 1032 self.check_line(output, 'TESTRANDOM: 520') 1033 1034 # check SOURCE_DATE_EPOCH (string) 1035 env = dict(os.environ, SOURCE_DATE_EPOCH='XYZ') 1036 output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN, 1037 env=env) 1038 randseed = self.parse_random_seed(output) 1039 self.assertEqual(randseed, 'XYZ') 1040 self.check_line(output, 'TESTRANDOM: 22') 1041 1042 # check SOURCE_DATE_EPOCH (empty string): ignore the env var 1043 env = dict(os.environ, SOURCE_DATE_EPOCH='') 1044 output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN, 1045 env=env) 1046 randseed = self.parse_random_seed(output) 1047 self.assertTrue(randseed.isdigit(), randseed) 1048 1049 def test_fromfile(self): 1050 # test --fromfile 1051 tests = [self.create_test() for index in range(5)] 1052 1053 # Write the list of files using a format similar to regrtest output: 1054 # [1/2] test_1 1055 # [2/2] test_2 1056 filename = os_helper.TESTFN 1057 self.addCleanup(os_helper.unlink, filename) 1058 1059 # test format '0:00:00 [2/7] test_opcodes -- test_grammar took 0 sec' 1060 with open(filename, "w") as fp: 1061 previous = None 1062 for index, name in enumerate(tests, 1): 1063 line = ("00:00:%02i [%s/%s] %s" 1064 % (index, index, len(tests), name)) 1065 if previous: 1066 line += " -- %s took 0 sec" % previous 1067 print(line, file=fp) 1068 previous = name 1069 1070 output = self.run_tests('--fromfile', filename) 1071 stats = len(tests) 1072 self.check_executed_tests(output, tests, stats=stats) 1073 1074 # test format '[2/7] test_opcodes' 1075 with open(filename, "w") as fp: 1076 for index, name in enumerate(tests, 1): 1077 print("[%s/%s] %s" % (index, len(tests), name), file=fp) 1078 1079 output = self.run_tests('--fromfile', filename) 1080 self.check_executed_tests(output, tests, stats=stats) 1081 1082 # test format 'test_opcodes' 1083 with open(filename, "w") as fp: 1084 for name in tests: 1085 print(name, file=fp) 1086 1087 output = self.run_tests('--fromfile', filename) 1088 self.check_executed_tests(output, tests, stats=stats) 1089 1090 # test format 'Lib/test/test_opcodes.py' 1091 with open(filename, "w") as fp: 1092 for name in tests: 1093 print('Lib/test/%s.py' % name, file=fp) 1094 1095 output = self.run_tests('--fromfile', filename) 1096 self.check_executed_tests(output, tests, stats=stats) 1097 1098 def test_interrupted(self): 1099 code = TEST_INTERRUPTED 1100 test = self.create_test('sigint', code=code) 1101 output = self.run_tests(test, exitcode=EXITCODE_INTERRUPTED) 1102 self.check_executed_tests(output, test, omitted=test, 1103 interrupted=True, stats=0) 1104 1105 def test_slowest(self): 1106 # test --slowest 1107 tests = [self.create_test() for index in range(3)] 1108 output = self.run_tests("--slowest", *tests) 1109 self.check_executed_tests(output, tests, stats=len(tests)) 1110 regex = ('10 slowest tests:\n' 1111 '(?:- %s: .*\n){%s}' 1112 % (self.TESTNAME_REGEX, len(tests))) 1113 self.check_line(output, regex) 1114 1115 def test_slowest_interrupted(self): 1116 # Issue #25373: test --slowest with an interrupted test 1117 code = TEST_INTERRUPTED 1118 test = self.create_test("sigint", code=code) 1119 1120 for multiprocessing in (False, True): 1121 with self.subTest(multiprocessing=multiprocessing): 1122 if multiprocessing: 1123 args = ("--slowest", "-j2", test) 1124 else: 1125 args = ("--slowest", test) 1126 output = self.run_tests(*args, exitcode=EXITCODE_INTERRUPTED) 1127 self.check_executed_tests(output, test, 1128 omitted=test, interrupted=True, 1129 stats=0) 1130 1131 regex = ('10 slowest tests:\n') 1132 self.check_line(output, regex) 1133 1134 def test_coverage(self): 1135 # test --coverage 1136 test = self.create_test('coverage') 1137 output = self.run_tests("--coverage", test) 1138 self.check_executed_tests(output, [test], stats=1) 1139 regex = (r'lines +cov% +module +\(path\)\n' 1140 r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+') 1141 self.check_line(output, regex) 1142 1143 def test_wait(self): 1144 # test --wait 1145 test = self.create_test('wait') 1146 output = self.run_tests("--wait", test, input='key') 1147 self.check_line(output, 'Press any key to continue') 1148 1149 def test_forever(self): 1150 # test --forever 1151 code = textwrap.dedent(""" 1152 import builtins 1153 import unittest 1154 1155 class ForeverTester(unittest.TestCase): 1156 def test_run(self): 1157 # Store the state in the builtins module, because the test 1158 # module is reload at each run 1159 if 'RUN' in builtins.__dict__: 1160 builtins.__dict__['RUN'] += 1 1161 if builtins.__dict__['RUN'] >= 3: 1162 self.fail("fail at the 3rd runs") 1163 else: 1164 builtins.__dict__['RUN'] = 1 1165 """) 1166 test = self.create_test('forever', code=code) 1167 1168 # --forever 1169 output = self.run_tests('--forever', test, exitcode=EXITCODE_BAD_TEST) 1170 self.check_executed_tests(output, [test]*3, failed=test, 1171 stats=TestStats(3, 1), 1172 forever=True) 1173 1174 # --forever --rerun 1175 output = self.run_tests('--forever', '--rerun', test, exitcode=0) 1176 self.check_executed_tests(output, [test]*3, 1177 rerun=Rerun(test, 1178 match='test_run', 1179 success=True), 1180 stats=TestStats(4, 1), 1181 forever=True) 1182 1183 @without_optimizer 1184 def check_leak(self, code, what, *, run_workers=False): 1185 test = self.create_test('huntrleaks', code=code) 1186 1187 filename = 'reflog.txt' 1188 self.addCleanup(os_helper.unlink, filename) 1189 cmd = ['--huntrleaks', '3:3:'] 1190 if run_workers: 1191 cmd.append('-j1') 1192 cmd.append(test) 1193 output = self.run_tests(*cmd, 1194 exitcode=EXITCODE_BAD_TEST, 1195 stderr=subprocess.STDOUT) 1196 self.check_executed_tests(output, [test], failed=test, stats=1) 1197 1198 line = r'beginning 6 repetitions. .*\n123:456\n[.0-9X]{3} 111\n' 1199 self.check_line(output, line) 1200 1201 line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what) 1202 self.assertIn(line2, output) 1203 1204 with open(filename) as fp: 1205 reflog = fp.read() 1206 self.assertIn(line2, reflog) 1207 1208 @unittest.skipUnless(support.Py_DEBUG, 'need a debug build') 1209 def check_huntrleaks(self, *, run_workers: bool): 1210 # test --huntrleaks 1211 code = textwrap.dedent(""" 1212 import unittest 1213 1214 GLOBAL_LIST = [] 1215 1216 class RefLeakTest(unittest.TestCase): 1217 def test_leak(self): 1218 GLOBAL_LIST.append(object()) 1219 """) 1220 self.check_leak(code, 'references', run_workers=run_workers) 1221 1222 def test_huntrleaks(self): 1223 self.check_huntrleaks(run_workers=False) 1224 1225 def test_huntrleaks_mp(self): 1226 self.check_huntrleaks(run_workers=True) 1227 1228 @unittest.skipUnless(support.Py_DEBUG, 'need a debug build') 1229 def test_huntrleaks_bisect(self): 1230 # test --huntrleaks --bisect 1231 code = textwrap.dedent(""" 1232 import unittest 1233 1234 GLOBAL_LIST = [] 1235 1236 class RefLeakTest(unittest.TestCase): 1237 def test1(self): 1238 pass 1239 1240 def test2(self): 1241 pass 1242 1243 def test3(self): 1244 GLOBAL_LIST.append(object()) 1245 1246 def test4(self): 1247 pass 1248 """) 1249 1250 test = self.create_test('huntrleaks', code=code) 1251 1252 filename = 'reflog.txt' 1253 self.addCleanup(os_helper.unlink, filename) 1254 cmd = ['--huntrleaks', '3:3:', '--bisect', test] 1255 output = self.run_tests(*cmd, 1256 exitcode=EXITCODE_BAD_TEST, 1257 stderr=subprocess.STDOUT) 1258 1259 self.assertIn(f"Bisect {test}", output) 1260 self.assertIn(f"Bisect {test}: exit code 0", output) 1261 1262 # test3 is the one which leaks 1263 self.assertIn("Bisection completed in", output) 1264 self.assertIn( 1265 "Tests (1):\n" 1266 f"* {test}.RefLeakTest.test3\n", 1267 output) 1268 1269 @unittest.skipUnless(support.Py_DEBUG, 'need a debug build') 1270 def test_huntrleaks_fd_leak(self): 1271 # test --huntrleaks for file descriptor leak 1272 code = textwrap.dedent(""" 1273 import os 1274 import unittest 1275 1276 class FDLeakTest(unittest.TestCase): 1277 def test_leak(self): 1278 fd = os.open(__file__, os.O_RDONLY) 1279 # bug: never close the file descriptor 1280 """) 1281 self.check_leak(code, 'file descriptors') 1282 1283 def test_list_tests(self): 1284 # test --list-tests 1285 tests = [self.create_test() for i in range(5)] 1286 output = self.run_tests('--list-tests', *tests) 1287 self.assertEqual(output.rstrip().splitlines(), 1288 tests) 1289 1290 def test_list_cases(self): 1291 # test --list-cases 1292 code = textwrap.dedent(""" 1293 import unittest 1294 1295 class Tests(unittest.TestCase): 1296 def test_method1(self): 1297 pass 1298 def test_method2(self): 1299 pass 1300 """) 1301 testname = self.create_test(code=code) 1302 1303 # Test --list-cases 1304 all_methods = ['%s.Tests.test_method1' % testname, 1305 '%s.Tests.test_method2' % testname] 1306 output = self.run_tests('--list-cases', testname) 1307 self.assertEqual(output.splitlines(), all_methods) 1308 1309 # Test --list-cases with --match 1310 all_methods = ['%s.Tests.test_method1' % testname] 1311 output = self.run_tests('--list-cases', 1312 '-m', 'test_method1', 1313 testname) 1314 self.assertEqual(output.splitlines(), all_methods) 1315 1316 @support.cpython_only 1317 def test_crashed(self): 1318 # Any code which causes a crash 1319 code = 'import faulthandler; faulthandler._sigsegv()' 1320 crash_test = self.create_test(name="crash", code=code) 1321 1322 tests = [crash_test] 1323 output = self.run_tests("-j2", *tests, exitcode=EXITCODE_BAD_TEST) 1324 self.check_executed_tests(output, tests, failed=crash_test, 1325 parallel=True, stats=0) 1326 1327 def parse_methods(self, output): 1328 regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE) 1329 return [match.group(1) for match in regex.finditer(output)] 1330 1331 def test_ignorefile(self): 1332 code = textwrap.dedent(""" 1333 import unittest 1334 1335 class Tests(unittest.TestCase): 1336 def test_method1(self): 1337 pass 1338 def test_method2(self): 1339 pass 1340 def test_method3(self): 1341 pass 1342 def test_method4(self): 1343 pass 1344 """) 1345 testname = self.create_test(code=code) 1346 1347 # only run a subset 1348 filename = os_helper.TESTFN 1349 self.addCleanup(os_helper.unlink, filename) 1350 1351 subset = [ 1352 # only ignore the method name 1353 'test_method1', 1354 # ignore the full identifier 1355 '%s.Tests.test_method3' % testname] 1356 with open(filename, "w") as fp: 1357 for name in subset: 1358 print(name, file=fp) 1359 1360 output = self.run_tests("-v", "--ignorefile", filename, testname) 1361 methods = self.parse_methods(output) 1362 subset = ['test_method2', 'test_method4'] 1363 self.assertEqual(methods, subset) 1364 1365 def test_matchfile(self): 1366 code = textwrap.dedent(""" 1367 import unittest 1368 1369 class Tests(unittest.TestCase): 1370 def test_method1(self): 1371 pass 1372 def test_method2(self): 1373 pass 1374 def test_method3(self): 1375 pass 1376 def test_method4(self): 1377 pass 1378 """) 1379 all_methods = ['test_method1', 'test_method2', 1380 'test_method3', 'test_method4'] 1381 testname = self.create_test(code=code) 1382 1383 # by default, all methods should be run 1384 output = self.run_tests("-v", testname) 1385 methods = self.parse_methods(output) 1386 self.assertEqual(methods, all_methods) 1387 1388 # only run a subset 1389 filename = os_helper.TESTFN 1390 self.addCleanup(os_helper.unlink, filename) 1391 1392 subset = [ 1393 # only match the method name 1394 'test_method1', 1395 # match the full identifier 1396 '%s.Tests.test_method3' % testname] 1397 with open(filename, "w") as fp: 1398 for name in subset: 1399 print(name, file=fp) 1400 1401 output = self.run_tests("-v", "--matchfile", filename, testname) 1402 methods = self.parse_methods(output) 1403 subset = ['test_method1', 'test_method3'] 1404 self.assertEqual(methods, subset) 1405 1406 def test_env_changed(self): 1407 code = textwrap.dedent(""" 1408 import unittest 1409 1410 class Tests(unittest.TestCase): 1411 def test_env_changed(self): 1412 open("env_changed", "w").close() 1413 """) 1414 testname = self.create_test(code=code) 1415 1416 # don't fail by default 1417 output = self.run_tests(testname) 1418 self.check_executed_tests(output, [testname], 1419 env_changed=testname, stats=1) 1420 1421 # fail with --fail-env-changed 1422 output = self.run_tests("--fail-env-changed", testname, 1423 exitcode=EXITCODE_ENV_CHANGED) 1424 self.check_executed_tests(output, [testname], env_changed=testname, 1425 fail_env_changed=True, stats=1) 1426 1427 # rerun 1428 output = self.run_tests("--rerun", testname) 1429 self.check_executed_tests(output, [testname], 1430 env_changed=testname, 1431 rerun=Rerun(testname, 1432 match=None, 1433 success=True), 1434 stats=2) 1435 1436 def test_rerun_fail(self): 1437 # FAILURE then FAILURE 1438 code = textwrap.dedent(""" 1439 import unittest 1440 1441 class Tests(unittest.TestCase): 1442 def test_succeed(self): 1443 return 1444 1445 def test_fail_always(self): 1446 # test that always fails 1447 self.fail("bug") 1448 """) 1449 testname = self.create_test(code=code) 1450 1451 output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) 1452 self.check_executed_tests(output, [testname], 1453 rerun=Rerun(testname, 1454 "test_fail_always", 1455 success=False), 1456 stats=TestStats(3, 2)) 1457 1458 def test_rerun_success(self): 1459 # FAILURE then SUCCESS 1460 marker_filename = os.path.abspath("regrtest_marker_filename") 1461 self.addCleanup(os_helper.unlink, marker_filename) 1462 self.assertFalse(os.path.exists(marker_filename)) 1463 1464 code = textwrap.dedent(f""" 1465 import os.path 1466 import unittest 1467 1468 marker_filename = {marker_filename!r} 1469 1470 class Tests(unittest.TestCase): 1471 def test_succeed(self): 1472 return 1473 1474 def test_fail_once(self): 1475 if not os.path.exists(marker_filename): 1476 open(marker_filename, "w").close() 1477 self.fail("bug") 1478 """) 1479 testname = self.create_test(code=code) 1480 1481 # FAILURE then SUCCESS => exit code 0 1482 output = self.run_tests("--rerun", testname, exitcode=0) 1483 self.check_executed_tests(output, [testname], 1484 rerun=Rerun(testname, 1485 match="test_fail_once", 1486 success=True), 1487 stats=TestStats(3, 1)) 1488 os_helper.unlink(marker_filename) 1489 1490 # with --fail-rerun, exit code EXITCODE_RERUN_FAIL 1491 # on "FAILURE then SUCCESS" state. 1492 output = self.run_tests("--rerun", "--fail-rerun", testname, 1493 exitcode=EXITCODE_RERUN_FAIL) 1494 self.check_executed_tests(output, [testname], 1495 rerun=Rerun(testname, 1496 match="test_fail_once", 1497 success=True), 1498 stats=TestStats(3, 1)) 1499 os_helper.unlink(marker_filename) 1500 1501 def test_rerun_setup_class_hook_failure(self): 1502 # FAILURE then FAILURE 1503 code = textwrap.dedent(""" 1504 import unittest 1505 1506 class ExampleTests(unittest.TestCase): 1507 @classmethod 1508 def setUpClass(self): 1509 raise RuntimeError('Fail') 1510 1511 def test_success(self): 1512 return 1513 """) 1514 testname = self.create_test(code=code) 1515 1516 output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) 1517 self.check_executed_tests(output, testname, 1518 failed=[testname], 1519 rerun=Rerun(testname, 1520 match="ExampleTests", 1521 success=False), 1522 stats=0) 1523 1524 def test_rerun_teardown_class_hook_failure(self): 1525 # FAILURE then FAILURE 1526 code = textwrap.dedent(""" 1527 import unittest 1528 1529 class ExampleTests(unittest.TestCase): 1530 @classmethod 1531 def tearDownClass(self): 1532 raise RuntimeError('Fail') 1533 1534 def test_success(self): 1535 return 1536 """) 1537 testname = self.create_test(code=code) 1538 1539 output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) 1540 self.check_executed_tests(output, testname, 1541 failed=[testname], 1542 rerun=Rerun(testname, 1543 match="ExampleTests", 1544 success=False), 1545 stats=2) 1546 1547 def test_rerun_setup_module_hook_failure(self): 1548 # FAILURE then FAILURE 1549 code = textwrap.dedent(""" 1550 import unittest 1551 1552 def setUpModule(): 1553 raise RuntimeError('Fail') 1554 1555 class ExampleTests(unittest.TestCase): 1556 def test_success(self): 1557 return 1558 """) 1559 testname = self.create_test(code=code) 1560 1561 output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) 1562 self.check_executed_tests(output, testname, 1563 failed=[testname], 1564 rerun=Rerun(testname, 1565 match=None, 1566 success=False), 1567 stats=0) 1568 1569 def test_rerun_teardown_module_hook_failure(self): 1570 # FAILURE then FAILURE 1571 code = textwrap.dedent(""" 1572 import unittest 1573 1574 def tearDownModule(): 1575 raise RuntimeError('Fail') 1576 1577 class ExampleTests(unittest.TestCase): 1578 def test_success(self): 1579 return 1580 """) 1581 testname = self.create_test(code=code) 1582 1583 output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) 1584 self.check_executed_tests(output, [testname], 1585 failed=[testname], 1586 rerun=Rerun(testname, 1587 match=None, 1588 success=False), 1589 stats=2) 1590 1591 def test_rerun_setup_hook_failure(self): 1592 # FAILURE then FAILURE 1593 code = textwrap.dedent(""" 1594 import unittest 1595 1596 class ExampleTests(unittest.TestCase): 1597 def setUp(self): 1598 raise RuntimeError('Fail') 1599 1600 def test_success(self): 1601 return 1602 """) 1603 testname = self.create_test(code=code) 1604 1605 output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) 1606 self.check_executed_tests(output, testname, 1607 failed=[testname], 1608 rerun=Rerun(testname, 1609 match="test_success", 1610 success=False), 1611 stats=2) 1612 1613 def test_rerun_teardown_hook_failure(self): 1614 # FAILURE then FAILURE 1615 code = textwrap.dedent(""" 1616 import unittest 1617 1618 class ExampleTests(unittest.TestCase): 1619 def tearDown(self): 1620 raise RuntimeError('Fail') 1621 1622 def test_success(self): 1623 return 1624 """) 1625 testname = self.create_test(code=code) 1626 1627 output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) 1628 self.check_executed_tests(output, testname, 1629 failed=[testname], 1630 rerun=Rerun(testname, 1631 match="test_success", 1632 success=False), 1633 stats=2) 1634 1635 def test_rerun_async_setup_hook_failure(self): 1636 # FAILURE then FAILURE 1637 code = textwrap.dedent(""" 1638 import unittest 1639 1640 class ExampleTests(unittest.IsolatedAsyncioTestCase): 1641 async def asyncSetUp(self): 1642 raise RuntimeError('Fail') 1643 1644 async def test_success(self): 1645 return 1646 """) 1647 testname = self.create_test(code=code) 1648 1649 output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) 1650 self.check_executed_tests(output, testname, 1651 rerun=Rerun(testname, 1652 match="test_success", 1653 success=False), 1654 stats=2) 1655 1656 def test_rerun_async_teardown_hook_failure(self): 1657 # FAILURE then FAILURE 1658 code = textwrap.dedent(""" 1659 import unittest 1660 1661 class ExampleTests(unittest.IsolatedAsyncioTestCase): 1662 async def asyncTearDown(self): 1663 raise RuntimeError('Fail') 1664 1665 async def test_success(self): 1666 return 1667 """) 1668 testname = self.create_test(code=code) 1669 1670 output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST) 1671 self.check_executed_tests(output, testname, 1672 failed=[testname], 1673 rerun=Rerun(testname, 1674 match="test_success", 1675 success=False), 1676 stats=2) 1677 1678 def test_no_tests_ran(self): 1679 code = textwrap.dedent(""" 1680 import unittest 1681 1682 class Tests(unittest.TestCase): 1683 def test_bug(self): 1684 pass 1685 """) 1686 testname = self.create_test(code=code) 1687 1688 output = self.run_tests(testname, "-m", "nosuchtest", 1689 exitcode=EXITCODE_NO_TESTS_RAN) 1690 self.check_executed_tests(output, [testname], 1691 run_no_tests=testname, 1692 stats=0, filtered=True) 1693 1694 def test_no_tests_ran_skip(self): 1695 code = textwrap.dedent(""" 1696 import unittest 1697 1698 class Tests(unittest.TestCase): 1699 def test_skipped(self): 1700 self.skipTest("because") 1701 """) 1702 testname = self.create_test(code=code) 1703 1704 output = self.run_tests(testname) 1705 self.check_executed_tests(output, [testname], 1706 stats=TestStats(1, skipped=1)) 1707 1708 def test_no_tests_ran_multiple_tests_nonexistent(self): 1709 code = textwrap.dedent(""" 1710 import unittest 1711 1712 class Tests(unittest.TestCase): 1713 def test_bug(self): 1714 pass 1715 """) 1716 testname = self.create_test(code=code) 1717 testname2 = self.create_test(code=code) 1718 1719 output = self.run_tests(testname, testname2, "-m", "nosuchtest", 1720 exitcode=EXITCODE_NO_TESTS_RAN) 1721 self.check_executed_tests(output, [testname, testname2], 1722 run_no_tests=[testname, testname2], 1723 stats=0, filtered=True) 1724 1725 def test_no_test_ran_some_test_exist_some_not(self): 1726 code = textwrap.dedent(""" 1727 import unittest 1728 1729 class Tests(unittest.TestCase): 1730 def test_bug(self): 1731 pass 1732 """) 1733 testname = self.create_test(code=code) 1734 other_code = textwrap.dedent(""" 1735 import unittest 1736 1737 class Tests(unittest.TestCase): 1738 def test_other_bug(self): 1739 pass 1740 """) 1741 testname2 = self.create_test(code=other_code) 1742 1743 output = self.run_tests(testname, testname2, "-m", "nosuchtest", 1744 "-m", "test_other_bug", exitcode=0) 1745 self.check_executed_tests(output, [testname, testname2], 1746 run_no_tests=[testname], 1747 stats=1, filtered=True) 1748 1749 @support.cpython_only 1750 def test_uncollectable(self): 1751 try: 1752 import _testcapi 1753 except ImportError: 1754 raise unittest.SkipTest("requires _testcapi") 1755 code = textwrap.dedent(r""" 1756 import _testcapi 1757 import gc 1758 import unittest 1759 1760 @_testcapi.with_tp_del 1761 class Garbage: 1762 def __tp_del__(self): 1763 pass 1764 1765 class Tests(unittest.TestCase): 1766 def test_garbage(self): 1767 # create an uncollectable object 1768 obj = Garbage() 1769 obj.ref_cycle = obj 1770 obj = None 1771 """) 1772 testname = self.create_test(code=code) 1773 1774 output = self.run_tests("--fail-env-changed", testname, 1775 exitcode=EXITCODE_ENV_CHANGED) 1776 self.check_executed_tests(output, [testname], 1777 env_changed=[testname], 1778 fail_env_changed=True, 1779 stats=1) 1780 1781 def test_multiprocessing_timeout(self): 1782 code = textwrap.dedent(r""" 1783 import time 1784 import unittest 1785 try: 1786 import faulthandler 1787 except ImportError: 1788 faulthandler = None 1789 1790 class Tests(unittest.TestCase): 1791 # test hangs and so should be stopped by the timeout 1792 def test_sleep(self): 1793 # we want to test regrtest multiprocessing timeout, 1794 # not faulthandler timeout 1795 if faulthandler is not None: 1796 faulthandler.cancel_dump_traceback_later() 1797 1798 time.sleep(60 * 5) 1799 """) 1800 testname = self.create_test(code=code) 1801 1802 output = self.run_tests("-j2", "--timeout=1.0", testname, 1803 exitcode=EXITCODE_BAD_TEST) 1804 self.check_executed_tests(output, [testname], 1805 failed=testname, stats=0) 1806 self.assertRegex(output, 1807 re.compile('%s timed out' % testname, re.MULTILINE)) 1808 1809 def test_unraisable_exc(self): 1810 # --fail-env-changed must catch unraisable exception. 1811 # The exception must be displayed even if sys.stderr is redirected. 1812 code = textwrap.dedent(r""" 1813 import unittest 1814 import weakref 1815 from test.support import captured_stderr 1816 1817 class MyObject: 1818 pass 1819 1820 def weakref_callback(obj): 1821 raise Exception("weakref callback bug") 1822 1823 class Tests(unittest.TestCase): 1824 def test_unraisable_exc(self): 1825 obj = MyObject() 1826 ref = weakref.ref(obj, weakref_callback) 1827 with captured_stderr() as stderr: 1828 # call weakref_callback() which logs 1829 # an unraisable exception 1830 obj = None 1831 self.assertEqual(stderr.getvalue(), '') 1832 """) 1833 testname = self.create_test(code=code) 1834 1835 output = self.run_tests("--fail-env-changed", "-v", testname, 1836 exitcode=EXITCODE_ENV_CHANGED) 1837 self.check_executed_tests(output, [testname], 1838 env_changed=[testname], 1839 fail_env_changed=True, 1840 stats=1) 1841 self.assertIn("Warning -- Unraisable exception", output) 1842 self.assertIn("Exception: weakref callback bug", output) 1843 1844 def test_threading_excepthook(self): 1845 # --fail-env-changed must catch uncaught thread exception. 1846 # The exception must be displayed even if sys.stderr is redirected. 1847 code = textwrap.dedent(r""" 1848 import threading 1849 import unittest 1850 from test.support import captured_stderr 1851 1852 class MyObject: 1853 pass 1854 1855 def func_bug(): 1856 raise Exception("bug in thread") 1857 1858 class Tests(unittest.TestCase): 1859 def test_threading_excepthook(self): 1860 with captured_stderr() as stderr: 1861 thread = threading.Thread(target=func_bug) 1862 thread.start() 1863 thread.join() 1864 self.assertEqual(stderr.getvalue(), '') 1865 """) 1866 testname = self.create_test(code=code) 1867 1868 output = self.run_tests("--fail-env-changed", "-v", testname, 1869 exitcode=EXITCODE_ENV_CHANGED) 1870 self.check_executed_tests(output, [testname], 1871 env_changed=[testname], 1872 fail_env_changed=True, 1873 stats=1) 1874 self.assertIn("Warning -- Uncaught thread exception", output) 1875 self.assertIn("Exception: bug in thread", output) 1876 1877 def test_print_warning(self): 1878 # bpo-45410: The order of messages must be preserved when -W and 1879 # support.print_warning() are used. 1880 code = textwrap.dedent(r""" 1881 import sys 1882 import unittest 1883 from test import support 1884 1885 class MyObject: 1886 pass 1887 1888 def func_bug(): 1889 raise Exception("bug in thread") 1890 1891 class Tests(unittest.TestCase): 1892 def test_print_warning(self): 1893 print("msg1: stdout") 1894 support.print_warning("msg2: print_warning") 1895 # Fail with ENV CHANGED to see print_warning() log 1896 support.environment_altered = True 1897 """) 1898 testname = self.create_test(code=code) 1899 1900 # Expect an output like: 1901 # 1902 # test_threading_excepthook (test.test_x.Tests) ... msg1: stdout 1903 # Warning -- msg2: print_warning 1904 # ok 1905 regex = (r"test_print_warning.*msg1: stdout\n" 1906 r"Warning -- msg2: print_warning\n" 1907 r"ok\n") 1908 for option in ("-v", "-W"): 1909 with self.subTest(option=option): 1910 cmd = ["--fail-env-changed", option, testname] 1911 output = self.run_tests(*cmd, exitcode=EXITCODE_ENV_CHANGED) 1912 self.check_executed_tests(output, [testname], 1913 env_changed=[testname], 1914 fail_env_changed=True, 1915 stats=1) 1916 self.assertRegex(output, regex) 1917 1918 def test_unicode_guard_env(self): 1919 guard = os.environ.get(setup.UNICODE_GUARD_ENV) 1920 self.assertIsNotNone(guard, f"{setup.UNICODE_GUARD_ENV} not set") 1921 if guard.isascii(): 1922 # Skip to signify that the env var value was changed by the user; 1923 # possibly to something ASCII to work around Unicode issues. 1924 self.skipTest("Modified guard") 1925 1926 def test_cleanup(self): 1927 dirname = os.path.join(self.tmptestdir, "test_python_123") 1928 os.mkdir(dirname) 1929 filename = os.path.join(self.tmptestdir, "test_python_456") 1930 open(filename, "wb").close() 1931 names = [dirname, filename] 1932 1933 cmdargs = ['-m', 'test', 1934 '--tempdir=%s' % self.tmptestdir, 1935 '--cleanup'] 1936 self.run_python(cmdargs) 1937 1938 for name in names: 1939 self.assertFalse(os.path.exists(name), name) 1940 1941 @unittest.skipIf(support.is_wasi, 1942 'checking temp files is not implemented on WASI') 1943 def test_leak_tmp_file(self): 1944 code = textwrap.dedent(r""" 1945 import os.path 1946 import tempfile 1947 import unittest 1948 1949 class FileTests(unittest.TestCase): 1950 def test_leak_tmp_file(self): 1951 filename = os.path.join(tempfile.gettempdir(), 'mytmpfile') 1952 with open(filename, "wb") as fp: 1953 fp.write(b'content') 1954 """) 1955 testnames = [self.create_test(code=code) for _ in range(3)] 1956 1957 output = self.run_tests("--fail-env-changed", "-v", "-j2", *testnames, 1958 exitcode=EXITCODE_ENV_CHANGED) 1959 self.check_executed_tests(output, testnames, 1960 env_changed=testnames, 1961 fail_env_changed=True, 1962 parallel=True, 1963 stats=len(testnames)) 1964 for testname in testnames: 1965 self.assertIn(f"Warning -- {testname} leaked temporary " 1966 f"files (1): mytmpfile", 1967 output) 1968 1969 def test_worker_decode_error(self): 1970 # gh-109425: Use "backslashreplace" error handler to decode stdout. 1971 if sys.platform == 'win32': 1972 encoding = locale.getencoding() 1973 else: 1974 encoding = sys.stdout.encoding 1975 if encoding is None: 1976 encoding = sys.__stdout__.encoding 1977 if encoding is None: 1978 self.skipTest("cannot get regrtest worker encoding") 1979 1980 nonascii = bytes(ch for ch in range(128, 256)) 1981 corrupted_output = b"nonascii:%s\n" % (nonascii,) 1982 # gh-108989: On Windows, assertion errors are written in UTF-16: when 1983 # decoded each letter is follow by a NUL character. 1984 assertion_failed = 'Assertion failed: tstate_is_alive(tstate)\n' 1985 corrupted_output += assertion_failed.encode('utf-16-le') 1986 try: 1987 corrupted_output.decode(encoding) 1988 except UnicodeDecodeError: 1989 pass 1990 else: 1991 self.skipTest(f"{encoding} can decode non-ASCII bytes") 1992 1993 expected_line = corrupted_output.decode(encoding, 'backslashreplace') 1994 1995 code = textwrap.dedent(fr""" 1996 import sys 1997 import unittest 1998 1999 class Tests(unittest.TestCase): 2000 def test_pass(self): 2001 pass 2002 2003 # bytes which cannot be decoded from UTF-8 2004 corrupted_output = {corrupted_output!a} 2005 sys.stdout.buffer.write(corrupted_output) 2006 sys.stdout.buffer.flush() 2007 """) 2008 testname = self.create_test(code=code) 2009 2010 output = self.run_tests("--fail-env-changed", "-v", "-j1", testname) 2011 self.check_executed_tests(output, [testname], 2012 parallel=True, 2013 stats=1) 2014 self.check_line(output, expected_line, regex=False) 2015 2016 def test_doctest(self): 2017 code = textwrap.dedent(r''' 2018 import doctest 2019 import sys 2020 from test import support 2021 2022 def my_function(): 2023 """ 2024 Pass: 2025 2026 >>> 1 + 1 2027 2 2028 2029 Failure: 2030 2031 >>> 2 + 3 2032 23 2033 >>> 1 + 1 2034 11 2035 2036 Skipped test (ignored): 2037 2038 >>> id(1.0) # doctest: +SKIP 2039 7948648 2040 """ 2041 2042 def load_tests(loader, tests, pattern): 2043 tests.addTest(doctest.DocTestSuite()) 2044 return tests 2045 ''') 2046 testname = self.create_test(code=code) 2047 2048 output = self.run_tests("--fail-env-changed", "-v", "-j1", testname, 2049 exitcode=EXITCODE_BAD_TEST) 2050 self.check_executed_tests(output, [testname], 2051 failed=[testname], 2052 parallel=True, 2053 stats=TestStats(1, 1, 0)) 2054 2055 def _check_random_seed(self, run_workers: bool): 2056 # gh-109276: When -r/--randomize is used, random.seed() is called 2057 # with the same random seed before running each test file. 2058 code = textwrap.dedent(r''' 2059 import random 2060 import unittest 2061 2062 class RandomSeedTest(unittest.TestCase): 2063 def test_randint(self): 2064 numbers = [random.randint(0, 1000) for _ in range(10)] 2065 print(f"Random numbers: {numbers}") 2066 ''') 2067 tests = [self.create_test(name=f'test_random{i}', code=code) 2068 for i in range(1, 3+1)] 2069 2070 random_seed = 856_656_202 2071 cmd = ["--randomize", f"--randseed={random_seed}"] 2072 if run_workers: 2073 # run as many worker processes than the number of tests 2074 cmd.append(f'-j{len(tests)}') 2075 cmd.extend(tests) 2076 output = self.run_tests(*cmd) 2077 2078 random.seed(random_seed) 2079 # Make the assumption that nothing consume entropy between libregrest 2080 # setup_tests() which calls random.seed() and RandomSeedTest calling 2081 # random.randint(). 2082 numbers = [random.randint(0, 1000) for _ in range(10)] 2083 expected = f"Random numbers: {numbers}" 2084 2085 regex = r'^Random numbers: .*$' 2086 matches = re.findall(regex, output, flags=re.MULTILINE) 2087 self.assertEqual(matches, [expected] * len(tests)) 2088 2089 def test_random_seed(self): 2090 self._check_random_seed(run_workers=False) 2091 2092 def test_random_seed_workers(self): 2093 self._check_random_seed(run_workers=True) 2094 2095 def test_python_command(self): 2096 code = textwrap.dedent(r""" 2097 import sys 2098 import unittest 2099 2100 class WorkerTests(unittest.TestCase): 2101 def test_dev_mode(self): 2102 self.assertTrue(sys.flags.dev_mode) 2103 """) 2104 tests = [self.create_test(code=code) for _ in range(3)] 2105 2106 # Custom Python command: "python -X dev" 2107 python_cmd = [sys.executable, '-X', 'dev'] 2108 # test.libregrtest.cmdline uses shlex.split() to parse the Python 2109 # command line string 2110 python_cmd = shlex.join(python_cmd) 2111 2112 output = self.run_tests("--python", python_cmd, "-j0", *tests) 2113 self.check_executed_tests(output, tests, 2114 stats=len(tests), parallel=True) 2115 2116 def test_unload_tests(self): 2117 # Test that unloading test modules does not break tests 2118 # that import from other tests. 2119 # The test execution order matters for this test. 2120 # Both test_regrtest_a and test_regrtest_c which are executed before 2121 # and after test_regrtest_b import a submodule from the test_regrtest_b 2122 # package and use it in testing. test_regrtest_b itself does not import 2123 # that submodule. 2124 # Previously test_regrtest_c failed because test_regrtest_b.util in 2125 # sys.modules was left after test_regrtest_a (making the import 2126 # statement no-op), but new test_regrtest_b without the util attribute 2127 # was imported for test_regrtest_b. 2128 testdir = os.path.join(os.path.dirname(__file__), 2129 'regrtestdata', 'import_from_tests') 2130 tests = [f'test_regrtest_{name}' for name in ('a', 'b', 'c')] 2131 args = ['-Wd', '-E', '-bb', '-m', 'test', '--testdir=%s' % testdir, *tests] 2132 output = self.run_python(args) 2133 self.check_executed_tests(output, tests, stats=3) 2134 2135 def check_add_python_opts(self, option): 2136 # --fast-ci and --slow-ci add "-u -W default -bb -E" options to Python 2137 try: 2138 import _testinternalcapi 2139 except ImportError: 2140 raise unittest.SkipTest("requires _testinternalcapi") 2141 code = textwrap.dedent(r""" 2142 import sys 2143 import unittest 2144 from test import support 2145 try: 2146 from _testinternalcapi import get_config 2147 except ImportError: 2148 get_config = None 2149 2150 # WASI/WASM buildbots don't use -E option 2151 use_environment = (support.is_emscripten or support.is_wasi) 2152 2153 class WorkerTests(unittest.TestCase): 2154 @unittest.skipUnless(get_config is None, 'need get_config()') 2155 def test_config(self): 2156 config = get_config()['config'] 2157 # -u option 2158 self.assertEqual(config['buffered_stdio'], 0) 2159 # -W default option 2160 self.assertTrue(config['warnoptions'], ['default']) 2161 # -bb option 2162 self.assertTrue(config['bytes_warning'], 2) 2163 # -E option 2164 self.assertTrue(config['use_environment'], use_environment) 2165 2166 def test_python_opts(self): 2167 # -u option 2168 self.assertTrue(sys.__stdout__.write_through) 2169 self.assertTrue(sys.__stderr__.write_through) 2170 2171 # -W default option 2172 self.assertTrue(sys.warnoptions, ['default']) 2173 2174 # -bb option 2175 self.assertEqual(sys.flags.bytes_warning, 2) 2176 2177 # -E option 2178 self.assertEqual(not sys.flags.ignore_environment, 2179 use_environment) 2180 """) 2181 testname = self.create_test(code=code) 2182 2183 # Use directly subprocess to control the exact command line 2184 cmd = [sys.executable, 2185 "-m", "test", option, 2186 f'--testdir={self.tmptestdir}', 2187 testname] 2188 proc = subprocess.run(cmd, 2189 stdout=subprocess.PIPE, 2190 stderr=subprocess.STDOUT, 2191 text=True) 2192 self.assertEqual(proc.returncode, 0, proc) 2193 2194 def test_add_python_opts(self): 2195 for opt in ("--fast-ci", "--slow-ci"): 2196 with self.subTest(opt=opt): 2197 self.check_add_python_opts(opt) 2198 2199 # gh-76319: Raising SIGSEGV on Android may not cause a crash. 2200 @unittest.skipIf(support.is_android, 2201 'raising SIGSEGV on Android is unreliable') 2202 def test_worker_output_on_failure(self): 2203 try: 2204 from faulthandler import _sigsegv 2205 except ImportError: 2206 self.skipTest("need faulthandler._sigsegv") 2207 2208 code = textwrap.dedent(r""" 2209 import faulthandler 2210 import unittest 2211 from test import support 2212 2213 class CrashTests(unittest.TestCase): 2214 def test_crash(self): 2215 print("just before crash!", flush=True) 2216 2217 with support.SuppressCrashReport(): 2218 faulthandler._sigsegv(True) 2219 """) 2220 testname = self.create_test(code=code) 2221 2222 # Sanitizers must not handle SIGSEGV (ex: for test_enable_fd()) 2223 env = dict(os.environ) 2224 option = 'handle_segv=0' 2225 support.set_sanitizer_env_var(env, option) 2226 2227 output = self.run_tests("-j1", testname, 2228 exitcode=EXITCODE_BAD_TEST, 2229 env=env) 2230 self.check_executed_tests(output, testname, 2231 failed=[testname], 2232 stats=0, parallel=True) 2233 if not support.MS_WINDOWS: 2234 exitcode = -int(signal.SIGSEGV) 2235 self.assertIn(f"Exit code {exitcode} (SIGSEGV)", output) 2236 self.check_line(output, "just before crash!", full=True, regex=False) 2237 2238 def test_verbose3(self): 2239 code = textwrap.dedent(r""" 2240 import unittest 2241 from test import support 2242 2243 class VerboseTests(unittest.TestCase): 2244 def test_pass(self): 2245 print("SPAM SPAM SPAM") 2246 """) 2247 testname = self.create_test(code=code) 2248 2249 # Run sequentially 2250 output = self.run_tests("--verbose3", testname) 2251 self.check_executed_tests(output, testname, stats=1) 2252 self.assertNotIn('SPAM SPAM SPAM', output) 2253 2254 # -R option needs a debug build 2255 if support.Py_DEBUG: 2256 # Check for reference leaks, run in parallel 2257 output = self.run_tests("-R", "3:3", "-j1", "--verbose3", testname) 2258 self.check_executed_tests(output, testname, stats=1, parallel=True) 2259 self.assertNotIn('SPAM SPAM SPAM', output) 2260 2261 def test_xml(self): 2262 code = textwrap.dedent(r""" 2263 import unittest 2264 from test import support 2265 2266 class VerboseTests(unittest.TestCase): 2267 def test_failed(self): 2268 print("abc \x1b def") 2269 self.fail() 2270 """) 2271 testname = self.create_test(code=code) 2272 2273 # Run sequentially 2274 filename = os_helper.TESTFN 2275 self.addCleanup(os_helper.unlink, filename) 2276 2277 output = self.run_tests(testname, "--junit-xml", filename, 2278 exitcode=EXITCODE_BAD_TEST) 2279 self.check_executed_tests(output, testname, 2280 failed=testname, 2281 stats=TestStats(1, 1, 0)) 2282 2283 # Test generated XML 2284 with open(filename, encoding="utf8") as fp: 2285 content = fp.read() 2286 2287 testsuite = ElementTree.fromstring(content) 2288 self.assertEqual(int(testsuite.get('tests')), 1) 2289 self.assertEqual(int(testsuite.get('errors')), 0) 2290 self.assertEqual(int(testsuite.get('failures')), 1) 2291 2292 testcase = testsuite[0][0] 2293 self.assertEqual(testcase.get('status'), 'run') 2294 self.assertEqual(testcase.get('result'), 'completed') 2295 self.assertGreater(float(testcase.get('time')), 0) 2296 for out in testcase.iter('system-out'): 2297 self.assertEqual(out.text, r"abc \x1b def") 2298 2299 2300class TestUtils(unittest.TestCase): 2301 def test_format_duration(self): 2302 self.assertEqual(utils.format_duration(0), 2303 '0 ms') 2304 self.assertEqual(utils.format_duration(1e-9), 2305 '1 ms') 2306 self.assertEqual(utils.format_duration(10e-3), 2307 '10 ms') 2308 self.assertEqual(utils.format_duration(1.5), 2309 '1.5 sec') 2310 self.assertEqual(utils.format_duration(1), 2311 '1.0 sec') 2312 self.assertEqual(utils.format_duration(2 * 60), 2313 '2 min') 2314 self.assertEqual(utils.format_duration(2 * 60 + 1), 2315 '2 min 1 sec') 2316 self.assertEqual(utils.format_duration(3 * 3600), 2317 '3 hour') 2318 self.assertEqual(utils.format_duration(3 * 3600 + 2 * 60 + 1), 2319 '3 hour 2 min') 2320 self.assertEqual(utils.format_duration(3 * 3600 + 1), 2321 '3 hour 1 sec') 2322 2323 def test_normalize_test_name(self): 2324 normalize = normalize_test_name 2325 self.assertEqual(normalize('test_access (test.test_os.FileTests.test_access)'), 2326 'test_access') 2327 self.assertEqual(normalize('setUpClass (test.test_os.ChownFileTests)', is_error=True), 2328 'ChownFileTests') 2329 self.assertEqual(normalize('test_success (test.test_bug.ExampleTests.test_success)', is_error=True), 2330 'test_success') 2331 self.assertIsNone(normalize('setUpModule (test.test_x)', is_error=True)) 2332 self.assertIsNone(normalize('tearDownModule (test.test_module)', is_error=True)) 2333 2334 def test_get_signal_name(self): 2335 for exitcode, expected in ( 2336 (-int(signal.SIGINT), 'SIGINT'), 2337 (-int(signal.SIGSEGV), 'SIGSEGV'), 2338 (128 + int(signal.SIGABRT), 'SIGABRT'), 2339 (3221225477, "STATUS_ACCESS_VIOLATION"), 2340 (0xC00000FD, "STATUS_STACK_OVERFLOW"), 2341 ): 2342 self.assertEqual(utils.get_signal_name(exitcode), expected, exitcode) 2343 2344 def test_format_resources(self): 2345 format_resources = utils.format_resources 2346 ALL_RESOURCES = utils.ALL_RESOURCES 2347 self.assertEqual( 2348 format_resources(("network",)), 2349 'resources (1): network') 2350 self.assertEqual( 2351 format_resources(("audio", "decimal", "network")), 2352 'resources (3): audio,decimal,network') 2353 self.assertEqual( 2354 format_resources(ALL_RESOURCES), 2355 'resources: all') 2356 self.assertEqual( 2357 format_resources(tuple(name for name in ALL_RESOURCES 2358 if name != "cpu")), 2359 'resources: all,-cpu') 2360 self.assertEqual( 2361 format_resources((*ALL_RESOURCES, "tzdata")), 2362 'resources: all,tzdata') 2363 2364 def test_match_test(self): 2365 class Test: 2366 def __init__(self, test_id): 2367 self.test_id = test_id 2368 2369 def id(self): 2370 return self.test_id 2371 2372 # Restore patterns once the test completes 2373 patterns = get_match_tests() 2374 self.addCleanup(set_match_tests, patterns) 2375 2376 test_access = Test('test.test_os.FileTests.test_access') 2377 test_chdir = Test('test.test_os.Win32ErrorTests.test_chdir') 2378 test_copy = Test('test.test_shutil.TestCopy.test_copy') 2379 2380 # Test acceptance 2381 with support.swap_attr(support, '_test_matchers', ()): 2382 # match all 2383 set_match_tests([]) 2384 self.assertTrue(match_test(test_access)) 2385 self.assertTrue(match_test(test_chdir)) 2386 2387 # match all using None 2388 set_match_tests(None) 2389 self.assertTrue(match_test(test_access)) 2390 self.assertTrue(match_test(test_chdir)) 2391 2392 # match the full test identifier 2393 set_match_tests([(test_access.id(), True)]) 2394 self.assertTrue(match_test(test_access)) 2395 self.assertFalse(match_test(test_chdir)) 2396 2397 # match the module name 2398 set_match_tests([('test_os', True)]) 2399 self.assertTrue(match_test(test_access)) 2400 self.assertTrue(match_test(test_chdir)) 2401 self.assertFalse(match_test(test_copy)) 2402 2403 # Test '*' pattern 2404 set_match_tests([('test_*', True)]) 2405 self.assertTrue(match_test(test_access)) 2406 self.assertTrue(match_test(test_chdir)) 2407 2408 # Test case sensitivity 2409 set_match_tests([('filetests', True)]) 2410 self.assertFalse(match_test(test_access)) 2411 set_match_tests([('FileTests', True)]) 2412 self.assertTrue(match_test(test_access)) 2413 2414 # Test pattern containing '.' and a '*' metacharacter 2415 set_match_tests([('*test_os.*.test_*', True)]) 2416 self.assertTrue(match_test(test_access)) 2417 self.assertTrue(match_test(test_chdir)) 2418 self.assertFalse(match_test(test_copy)) 2419 2420 # Multiple patterns 2421 set_match_tests([(test_access.id(), True), (test_chdir.id(), True)]) 2422 self.assertTrue(match_test(test_access)) 2423 self.assertTrue(match_test(test_chdir)) 2424 self.assertFalse(match_test(test_copy)) 2425 2426 set_match_tests([('test_access', True), ('DONTMATCH', True)]) 2427 self.assertTrue(match_test(test_access)) 2428 self.assertFalse(match_test(test_chdir)) 2429 2430 # Test rejection 2431 with support.swap_attr(support, '_test_matchers', ()): 2432 # match the full test identifier 2433 set_match_tests([(test_access.id(), False)]) 2434 self.assertFalse(match_test(test_access)) 2435 self.assertTrue(match_test(test_chdir)) 2436 2437 # match the module name 2438 set_match_tests([('test_os', False)]) 2439 self.assertFalse(match_test(test_access)) 2440 self.assertFalse(match_test(test_chdir)) 2441 self.assertTrue(match_test(test_copy)) 2442 2443 # Test '*' pattern 2444 set_match_tests([('test_*', False)]) 2445 self.assertFalse(match_test(test_access)) 2446 self.assertFalse(match_test(test_chdir)) 2447 2448 # Test case sensitivity 2449 set_match_tests([('filetests', False)]) 2450 self.assertTrue(match_test(test_access)) 2451 set_match_tests([('FileTests', False)]) 2452 self.assertFalse(match_test(test_access)) 2453 2454 # Test pattern containing '.' and a '*' metacharacter 2455 set_match_tests([('*test_os.*.test_*', False)]) 2456 self.assertFalse(match_test(test_access)) 2457 self.assertFalse(match_test(test_chdir)) 2458 self.assertTrue(match_test(test_copy)) 2459 2460 # Multiple patterns 2461 set_match_tests([(test_access.id(), False), (test_chdir.id(), False)]) 2462 self.assertFalse(match_test(test_access)) 2463 self.assertFalse(match_test(test_chdir)) 2464 self.assertTrue(match_test(test_copy)) 2465 2466 set_match_tests([('test_access', False), ('DONTMATCH', False)]) 2467 self.assertFalse(match_test(test_access)) 2468 self.assertTrue(match_test(test_chdir)) 2469 2470 # Test mixed filters 2471 with support.swap_attr(support, '_test_matchers', ()): 2472 set_match_tests([('*test_os', False), ('test_access', True)]) 2473 self.assertTrue(match_test(test_access)) 2474 self.assertFalse(match_test(test_chdir)) 2475 self.assertTrue(match_test(test_copy)) 2476 2477 set_match_tests([('*test_os', True), ('test_access', False)]) 2478 self.assertFalse(match_test(test_access)) 2479 self.assertTrue(match_test(test_chdir)) 2480 self.assertFalse(match_test(test_copy)) 2481 2482 def test_sanitize_xml(self): 2483 sanitize_xml = utils.sanitize_xml 2484 2485 # escape invalid XML characters 2486 self.assertEqual(sanitize_xml('abc \x1b\x1f def'), 2487 r'abc \x1b\x1f def') 2488 self.assertEqual(sanitize_xml('nul:\x00, bell:\x07'), 2489 r'nul:\x00, bell:\x07') 2490 self.assertEqual(sanitize_xml('surrogate:\uDC80'), 2491 r'surrogate:\udc80') 2492 self.assertEqual(sanitize_xml('illegal \uFFFE and \uFFFF'), 2493 r'illegal \ufffe and \uffff') 2494 2495 # no escape for valid XML characters 2496 self.assertEqual(sanitize_xml('a\n\tb'), 2497 'a\n\tb') 2498 self.assertEqual(sanitize_xml('valid t\xe9xt \u20ac'), 2499 'valid t\xe9xt \u20ac') 2500 2501 2502if __name__ == '__main__': 2503 unittest.main() 2504