1# Copyright 2014 Dirk Pranke. All rights reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14 15import io 16import json 17import os 18import sys 19import textwrap 20 21 22from typ import main 23from typ import test_case 24from typ import Host 25from typ import VERSION 26from typ.fakes import test_result_server_fake 27 28 29is_python3 = bool(sys.version_info.major == 3) 30 31if is_python3: # pragma: python3 32 # pylint: disable=redefined-builtin,invalid-name 33 unicode = str 34 35d = textwrap.dedent 36 37 38PASS_TEST_PY = """ 39import unittest 40class PassingTest(unittest.TestCase): 41 def test_pass(self): 42 pass 43""" 44 45 46PASS_TEST_FILES = {'pass_test.py': PASS_TEST_PY} 47 48 49FAIL_TEST_PY = """ 50import unittest 51class FailingTest(unittest.TestCase): 52 def test_fail(self): 53 self.fail() 54""" 55 56 57FAIL_TEST_FILES = {'fail_test.py': FAIL_TEST_PY} 58 59 60OUTPUT_TEST_PY = """ 61import sys 62import unittest 63 64class PassTest(unittest.TestCase): 65 def test_out(self): 66 sys.stdout.write("hello on stdout\\n") 67 sys.stdout.flush() 68 69 def test_err(self): 70 sys.stderr.write("hello on stderr\\n") 71 72class FailTest(unittest.TestCase): 73 def test_out_err_fail(self): 74 sys.stdout.write("hello on stdout\\n") 75 sys.stdout.flush() 76 sys.stderr.write("hello on stderr\\n") 77 self.fail() 78""" 79 80 81OUTPUT_TEST_FILES = {'output_test.py': OUTPUT_TEST_PY} 82 83 84SF_TEST_PY = """ 85import sys 86import unittest 87 88class SkipMethods(unittest.TestCase): 89 @unittest.skip('reason') 90 def test_reason(self): 91 self.fail() 92 93 @unittest.skipIf(True, 'reason') 94 def test_skip_if_true(self): 95 self.fail() 96 97 @unittest.skipIf(False, 'reason') 98 def test_skip_if_false(self): 99 self.fail() 100 101 102class SkipSetup(unittest.TestCase): 103 def setUp(self): 104 self.skipTest('setup failed') 105 106 def test_notrun(self): 107 self.fail() 108 109 110@unittest.skip('skip class') 111class SkipClass(unittest.TestCase): 112 def test_method(self): 113 self.fail() 114 115class SetupClass(unittest.TestCase): 116 @classmethod 117 def setUpClass(cls): 118 sys.stdout.write('in setupClass\\n') 119 sys.stdout.flush() 120 assert False, 'setupClass failed' 121 122 def test_method1(self): 123 pass 124 125 def test_method2(self): 126 pass 127 128class ExpectedFailures(unittest.TestCase): 129 @unittest.expectedFailure 130 def test_fail(self): 131 self.fail() 132 133 @unittest.expectedFailure 134 def test_pass(self): 135 pass 136""" 137 138 139SF_TEST_FILES = {'sf_test.py': SF_TEST_PY} 140 141 142LOAD_TEST_PY = """ 143import unittest 144def load_tests(_, _2, _3): 145 class BaseTest(unittest.TestCase): 146 pass 147 148 def method_fail(self): 149 self.fail() 150 151 def method_pass(self): 152 pass 153 154 setattr(BaseTest, "test_fail", method_fail) 155 setattr(BaseTest, "test_pass", method_pass) 156 suite = unittest.TestSuite() 157 suite.addTest(BaseTest("test_fail")) 158 suite.addTest(BaseTest("test_pass")) 159 return suite 160""" 161 162 163LOAD_TEST_FILES = {'load_test.py': LOAD_TEST_PY} 164 165 166path_to_main = os.path.join( 167 os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 168 'runner.py') 169 170 171class TestCli(test_case.MainTestCase): 172 prog = [sys.executable, path_to_main] 173 files_to_ignore = ['*.pyc'] 174 175 def test_bad_arg(self): 176 self.check(['--bad-arg'], ret=2, out='', 177 rerr='.*: error: unrecognized arguments: --bad-arg\n') 178 self.check(['-help'], ret=2, out='', 179 rerr=(".*: error: argument -h/--help: " 180 "ignored explicit argument 'elp'\n")) 181 182 def test_bad_metadata(self): 183 self.check(['--metadata', 'foo'], ret=2, err='', 184 out='Error: malformed --metadata "foo"\n') 185 186 def test_basic(self): 187 self.check([], files=PASS_TEST_FILES, 188 ret=0, 189 out=('[1/1] pass_test.PassingTest.test_pass passed\n' 190 '1 test run, 0 failures.\n'), err='') 191 192 def test_coverage(self): 193 try: 194 import coverage # pylint: disable=W0612 195 files = { 196 'pass_test.py': PASS_TEST_PY, 197 'fail_test.py': FAIL_TEST_PY, 198 } 199 self.check(['-c', 'pass_test'], files=files, ret=0, err='', 200 out=d("""\ 201 [1/1] pass_test.PassingTest.test_pass passed 202 1 test run, 0 failures. 203 204 Name Stmts Miss Cover 205 ------------------------------- 206 fail_test 4 4 0% 207 pass_test 4 0 100% 208 ------------------------------- 209 TOTAL 8 4 50% 210 """)) 211 except ImportError: # pragma: no cover 212 # We can never cover this line, since running coverage means 213 # that import will succeed. 214 self.check(['-c'], files=PASS_TEST_FILES, ret=1, 215 out='Error: coverage is not installed\n', err='') 216 217 def test_debugger(self): 218 if sys.version_info.major == 3: # pragma: python3 219 return 220 else: # pragma: python2 221 _, out, _, _ = self.check(['-d'], stdin='quit()\n', 222 files=PASS_TEST_FILES, ret=0, err='') 223 self.assertIn('(Pdb) ', out) 224 225 def test_dryrun(self): 226 self.check(['-n'], files=PASS_TEST_FILES, ret=0, err='', 227 out=d("""\ 228 [1/1] pass_test.PassingTest.test_pass passed 229 1 test run, 0 failures. 230 """)) 231 232 def test_error(self): 233 files = {'err_test.py': d("""\ 234 import unittest 235 class ErrTest(unittest.TestCase): 236 def test_err(self): 237 foo = bar 238 """)} 239 _, out, _, _ = self.check([''], files=files, ret=1, err='') 240 self.assertIn('[1/1] err_test.ErrTest.test_err failed unexpectedly', 241 out) 242 self.assertIn('1 test run, 1 failure', out) 243 244 def test_fail(self): 245 _, out, _, _ = self.check([], files=FAIL_TEST_FILES, ret=1, err='') 246 self.assertIn('fail_test.FailingTest.test_fail failed unexpectedly', 247 out) 248 249 def test_fail_then_pass(self): 250 files = {'fail_then_pass_test.py': d("""\ 251 import unittest 252 count = 0 253 class FPTest(unittest.TestCase): 254 def test_count(self): 255 global count 256 count += 1 257 if count == 1: 258 self.fail() 259 """)} 260 _, out, _, files = self.check(['--retry-limit', '3', 261 '--write-full-results-to', 262 'full_results.json'], 263 files=files, ret=0, err='') 264 self.assertIn('Retrying failed tests (attempt #1 of 3)', out) 265 self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out) 266 self.assertIn('1 test run, 0 failures.\n', out) 267 results = json.loads(files['full_results.json']) 268 self.assertEqual( 269 results['tests'][ 270 'fail_then_pass_test']['FPTest']['test_count']['actual'], 271 'FAIL PASS') 272 273 def test_failures_are_not_elided(self): 274 _, out, _, _ = self.check(['--terminal-width=20'], 275 files=FAIL_TEST_FILES, ret=1, err='') 276 self.assertIn('[1/1] fail_test.FailingTest.test_fail failed ' 277 'unexpectedly:\n', out) 278 279 def test_file_list(self): 280 files = PASS_TEST_FILES 281 self.check(['-f', '-'], files=files, stdin='pass_test\n', ret=0) 282 self.check(['-f', '-'], files=files, stdin='pass_test.PassingTest\n', 283 ret=0) 284 self.check(['-f', '-'], files=files, 285 stdin='pass_test.PassingTest.test_pass\n', 286 ret=0) 287 files = {'pass_test.py': PASS_TEST_PY, 288 'test_list.txt': 'pass_test.PassingTest.test_pass\n'} 289 self.check(['-f', 'test_list.txt'], files=files, ret=0) 290 291 def test_find(self): 292 files = PASS_TEST_FILES 293 self.check(['-l'], files=files, ret=0, 294 out='pass_test.PassingTest.test_pass\n') 295 self.check(['-l', 'pass_test'], files=files, ret=0, err='', 296 out='pass_test.PassingTest.test_pass\n') 297 self.check(['-l', 'pass_test.py'], files=files, ret=0, err='', 298 out='pass_test.PassingTest.test_pass\n') 299 self.check(['-l', './pass_test.py'], files=files, ret=0, err='', 300 out='pass_test.PassingTest.test_pass\n') 301 self.check(['-l', '.'], files=files, ret=0, err='', 302 out='pass_test.PassingTest.test_pass\n') 303 self.check(['-l', 'pass_test.PassingTest.test_pass'], files=files, 304 ret=0, err='', 305 out='pass_test.PassingTest.test_pass\n') 306 self.check(['-l', '.'], files=files, ret=0, err='', 307 out='pass_test.PassingTest.test_pass\n') 308 309 def test_find_from_subdirs(self): 310 files = { 311 'foo/__init__.py': '', 312 'foo/pass_test.py': PASS_TEST_PY, 313 'bar/__init__.py': '', 314 'bar/tmp': '', 315 316 } 317 self.check(['-l', '../foo/pass_test.py'], files=files, cwd='bar', 318 ret=0, err='', 319 out='foo.pass_test.PassingTest.test_pass\n') 320 self.check(['-l', 'foo'], files=files, cwd='bar', 321 ret=0, err='', 322 out='foo.pass_test.PassingTest.test_pass\n') 323 self.check(['-l', '--path', '../foo', 'pass_test'], 324 files=files, cwd='bar', ret=0, err='', 325 out='pass_test.PassingTest.test_pass\n') 326 327 def test_help(self): 328 self.check(['--help'], ret=0, rout='.*', err='') 329 330 def test_import_failure_missing_file(self): 331 self.check(['-l', 'foo'], ret=1, err='', 332 rout='Failed to load "foo".*') 333 334 def test_import_failure_missing_package(self): 335 files = {'foo.py': d("""\ 336 import unittest 337 import package_that_does_not_exist 338 339 class ImportFailureTest(unittest.TestCase): 340 def test_case(self): 341 pass 342 """)} 343 self.check(['-l', 'foo.py'], files=files, ret=1, err='', 344 rout=('Failed to load "foo.py": No module named ' 345 '\'?package_that_does_not_exist\'?\n')) 346 347 def test_import_failure_no_tests(self): 348 files = {'foo.py': 'import unittest'} 349 self.check(['-l', 'foo.bar'], files=files, ret=1, err='', 350 rout='Failed to load "foo.bar":.*') 351 352 def test_import_failure_syntax_error(self): 353 files = {'syn_test.py': d("""\ 354 import unittest 355 356 class SyntaxErrorTest(unittest.TestCase): 357 def test_syntax_error_in_test(self): 358 syntax error 359 """)} 360 _, out, _, _ = self.check([], files=files, ret=1, err='') 361 self.assertIn('Failed to import test module: syn_test', out) 362 self.assertIn((' syntax error\n' 363 ' ^\n' 364 'SyntaxError: invalid syntax\n'), out) 365 366 def test_interrupt(self): 367 files = {'interrupt_test.py': d("""\ 368 import unittest 369 class Foo(unittest.TestCase): 370 def test_interrupt(self): 371 raise KeyboardInterrupt() 372 """)} 373 self.check(['-j', '1'], files=files, ret=130, out='', 374 err='interrupted, exiting\n') 375 376 def test_isolate(self): 377 self.check(['--isolate', '*test_pass*'], files=PASS_TEST_FILES, ret=0, 378 out=('[1/1] pass_test.PassingTest.test_pass passed\n' 379 '1 test run, 0 failures.\n'), err='') 380 381 def test_load_tests_failure(self): 382 files = {'foo_test.py': d("""\ 383 import unittest 384 385 def load_tests(_, _2, _3): 386 raise ValueError('this should fail') 387 """)} 388 self.check([], files=files, ret=1, err='', 389 out=('foo_test.load_tests() failed: this should fail\n')) 390 391 def test_load_tests_single_worker(self): 392 files = LOAD_TEST_FILES 393 _, out, _, _ = self.check(['-j', '1', '-v'], files=files, ret=1, 394 err='') 395 self.assertIn('[1/2] load_test.BaseTest.test_fail failed', out) 396 self.assertIn('[2/2] load_test.BaseTest.test_pass passed', out) 397 self.assertIn('2 tests run, 1 failure.\n', out) 398 399 def test_load_tests_multiple_workers(self): 400 _, out, _, _ = self.check([], files=LOAD_TEST_FILES, ret=1, err='') 401 402 # The output for this test is nondeterministic since we may run 403 # two tests in parallel. So, we just test that some of the substrings 404 # we care about are present. 405 self.assertIn('test_pass passed', out) 406 self.assertIn('test_fail failed', out) 407 self.assertIn('2 tests run, 1 failure.\n', out) 408 409 def test_missing_builder_name(self): 410 self.check(['--test-results-server', 'localhost'], ret=2, 411 out=('Error: --builder-name must be specified ' 412 'along with --test-result-server\n' 413 'Error: --master-name must be specified ' 414 'along with --test-result-server\n' 415 'Error: --test-type must be specified ' 416 'along with --test-result-server\n'), err='') 417 418 def test_ninja_status_env(self): 419 self.check(['-v', 'output_test.PassTest.test_out'], 420 files=OUTPUT_TEST_FILES, aenv={'NINJA_STATUS': 'ns: '}, 421 out=d("""\ 422 ns: output_test.PassTest.test_out passed 423 1 test run, 0 failures. 424 """), err='') 425 426 def test_output_for_failures(self): 427 _, out, _, _ = self.check(['output_test.FailTest'], 428 files=OUTPUT_TEST_FILES, 429 ret=1, err='') 430 self.assertIn('[1/1] output_test.FailTest.test_out_err_fail ' 431 'failed unexpectedly:\n' 432 ' hello on stdout\n' 433 ' hello on stderr\n', out) 434 435 def test_quiet(self): 436 self.check(['-q'], files=PASS_TEST_FILES, ret=0, err='', out='') 437 438 def test_retry_limit(self): 439 _, out, _, _ = self.check(['--retry-limit', '2'], 440 files=FAIL_TEST_FILES, ret=1, err='') 441 self.assertIn('Retrying failed tests', out) 442 lines = out.splitlines() 443 self.assertEqual(len([l for l in lines 444 if 'test_fail failed unexpectedly:' in l]), 445 3) 446 447 def test_skip(self): 448 self.check(['--skip', '*test_fail*'], files=FAIL_TEST_FILES, ret=1, 449 out='No tests to run.\n', err='') 450 451 files = {'fail_test.py': FAIL_TEST_PY, 452 'pass_test.py': PASS_TEST_PY} 453 self.check(['-j', '1', '--skip', '*test_fail*'], files=files, ret=0, 454 out=('[1/2] fail_test.FailingTest.test_fail was skipped\n' 455 '[2/2] pass_test.PassingTest.test_pass passed\n' 456 '2 tests run, 0 failures.\n'), err='') 457 458 # This tests that we print test_started updates for skipped tests 459 # properly. It also tests how overwriting works. 460 _, out, _, _ = self.check(['-j', '1', '--overwrite', '--skip', 461 '*test_fail*'], files=files, ret=0, 462 err='', universal_newlines=False) 463 464 # We test this string separately and call out.strip() to 465 # avoid the trailing \r\n we get on windows, while keeping 466 # the \r's elsewhere in the string. 467 self.assertMultiLineEqual( 468 out.strip(), 469 ('[0/2] fail_test.FailingTest.test_fail\r' 470 ' \r' 471 '[1/2] fail_test.FailingTest.test_fail was skipped\r' 472 ' \r' 473 '[1/2] pass_test.PassingTest.test_pass\r' 474 ' \r' 475 '[2/2] pass_test.PassingTest.test_pass passed\r' 476 ' \r' 477 '2 tests run, 0 failures.')) 478 479 def test_skips_and_failures(self): 480 _, out, _, _ = self.check(['-j', '1', '-v', '-v'], files=SF_TEST_FILES, 481 ret=1, err='') 482 483 # We do a bunch of assertIn()'s to work around the non-portable 484 # tracebacks. 485 self.assertIn(('[1/9] sf_test.ExpectedFailures.test_fail failed:\n' 486 ' Traceback '), out) 487 self.assertIn(('[2/9] sf_test.ExpectedFailures.test_pass ' 488 'passed unexpectedly'), out) 489 self.assertIn(('[3/9] sf_test.SetupClass.test_method1 ' 490 'failed unexpectedly:\n' 491 ' in setupClass\n'), out) 492 self.assertIn(('[4/9] sf_test.SetupClass.test_method2 ' 493 'failed unexpectedly:\n' 494 ' in setupClass\n'), out) 495 self.assertIn(('[5/9] sf_test.SkipClass.test_method was skipped:\n' 496 ' skip class\n'), out) 497 self.assertIn(('[6/9] sf_test.SkipMethods.test_reason was skipped:\n' 498 ' reason\n'), out) 499 self.assertIn(('[7/9] sf_test.SkipMethods.test_skip_if_false ' 500 'failed unexpectedly:\n' 501 ' Traceback'), out) 502 self.assertIn(('[8/9] sf_test.SkipMethods.test_skip_if_true ' 503 'was skipped:\n' 504 ' reason\n' 505 '[9/9] sf_test.SkipSetup.test_notrun was skipped:\n' 506 ' setup failed\n' 507 '9 tests run, 4 failures.\n'), out) 508 509 def test_skip_and_all(self): 510 # --all should override --skip 511 self.check(['-l', '--skip', '*test_pass'], 512 files=PASS_TEST_FILES, ret=1, err='', 513 out='No tests to run.\n') 514 self.check(['-l', '--all', '--skip', '*test_pass'], 515 files=PASS_TEST_FILES, ret=0, err='', 516 out='pass_test.PassingTest.test_pass\n') 517 518 def test_skip_decorators_and_all(self): 519 _, out, _, _ = self.check(['--all', '-j', '1', '-v', '-v'], 520 files=SF_TEST_FILES, ret=1, err='') 521 self.assertIn('sf_test.SkipClass.test_method failed', out) 522 self.assertIn('sf_test.SkipMethods.test_reason failed', out) 523 self.assertIn('sf_test.SkipMethods.test_skip_if_true failed', out) 524 self.assertIn('sf_test.SkipMethods.test_skip_if_false failed', out) 525 526 # --all does not override explicit calls to skipTest(), only 527 # the decorators. 528 self.assertIn('sf_test.SkipSetup.test_notrun was skipped', out) 529 530 def test_subdir(self): 531 files = { 532 'foo/__init__.py': '', 533 'foo/bar/__init__.py': '', 534 'foo/bar/pass_test.py': PASS_TEST_PY 535 } 536 self.check(['foo/bar'], files=files, ret=0, err='', 537 out=d("""\ 538 [1/1] foo.bar.pass_test.PassingTest.test_pass passed 539 1 test run, 0 failures. 540 """)) 541 542 def test_timing(self): 543 self.check(['-t'], files=PASS_TEST_FILES, ret=0, err='', 544 rout=(r'\[1/1\] pass_test.PassingTest.test_pass passed ' 545 r'\d+.\d+s\n' 546 r'1 test run in \d+.\d+s, 0 failures.')) 547 548 def test_test_results_server(self): 549 server = test_result_server_fake.start() 550 self.assertNotEqual(server, None, 'could not start fake server') 551 552 try: 553 self.check(['--test-results-server', 554 '%s:%d' % server.server_address, 555 '--master-name', 'fake_master', 556 '--builder-name', 'fake_builder', 557 '--test-type', 'typ_tests', 558 '--metadata', 'foo=bar'], 559 files=PASS_TEST_FILES, ret=0, err='', 560 out=('[1/1] pass_test.PassingTest.test_pass passed\n' 561 '1 test run, 0 failures.\n')) 562 563 finally: 564 posts = server.stop() 565 566 self.assertEqual(len(posts), 1) 567 payload = posts[0][2].decode('utf8') 568 self.assertIn('"test_pass": {"expected": "PASS", "actual": "PASS"}', 569 payload) 570 self.assertTrue(payload.endswith('--\r\n')) 571 self.assertNotEqual(server.log.getvalue(), '') 572 573 def test_test_results_server_error(self): 574 server = test_result_server_fake.start(code=500) 575 self.assertNotEqual(server, None, 'could not start fake server') 576 577 try: 578 self.check(['--test-results-server', 579 '%s:%d' % server.server_address, 580 '--master-name', 'fake_master', 581 '--builder-name', 'fake_builder', 582 '--test-type', 'typ_tests', 583 '--metadata', 'foo=bar'], 584 files=PASS_TEST_FILES, ret=1, err='', 585 out=('[1/1] pass_test.PassingTest.test_pass passed\n' 586 '1 test run, 0 failures.\n' 587 'Uploading the JSON results raised ' 588 '"HTTP Error 500: Internal Server Error"\n')) 589 590 finally: 591 _ = server.stop() 592 593 def test_test_results_server_not_running(self): 594 self.check(['--test-results-server', 'localhost:99999', 595 '--master-name', 'fake_master', 596 '--builder-name', 'fake_builder', 597 '--test-type', 'typ_tests', 598 '--metadata', 'foo=bar'], 599 files=PASS_TEST_FILES, ret=1, err='', 600 rout=(r'\[1/1\] pass_test.PassingTest.test_pass passed\n' 601 '1 test run, 0 failures.\n' 602 'Uploading the JSON results raised .*\n')) 603 604 def test_verbose_2(self): 605 self.check(['-vv', '-j', '1', 'output_test.PassTest'], 606 files=OUTPUT_TEST_FILES, ret=0, 607 out=d("""\ 608 [1/2] output_test.PassTest.test_err passed: 609 hello on stderr 610 [2/2] output_test.PassTest.test_out passed: 611 hello on stdout 612 2 tests run, 0 failures. 613 """), err='') 614 615 def test_verbose_3(self): 616 self.check(['-vvv', '-j', '1', 'output_test.PassTest'], 617 files=OUTPUT_TEST_FILES, ret=0, 618 out=d("""\ 619 [0/2] output_test.PassTest.test_err queued 620 [1/2] output_test.PassTest.test_err passed: 621 hello on stderr 622 [1/2] output_test.PassTest.test_out queued 623 [2/2] output_test.PassTest.test_out passed: 624 hello on stdout 625 2 tests run, 0 failures. 626 """), err='') 627 628 def test_version(self): 629 self.check('--version', ret=0, out=(VERSION + '\n')) 630 631 def test_write_full_results_to(self): 632 _, _, _, files = self.check(['--write-full-results-to', 633 'results.json'], files=PASS_TEST_FILES) 634 self.assertIn('results.json', files) 635 results = json.loads(files['results.json']) 636 self.assertEqual(results['interrupted'], False) 637 self.assertEqual(results['path_delimiter'], '.') 638 self.assertEqual(results['tests'], 639 {u'pass_test': { 640 u'PassingTest': { 641 u'test_pass': { 642 u'actual': u'PASS', 643 u'expected': u'PASS', 644 } 645 } 646 }}) 647 648 def test_write_trace_to(self): 649 _, _, _, files = self.check(['--write-trace-to', 'trace.json'], 650 files=PASS_TEST_FILES) 651 self.assertIn('trace.json', files) 652 trace_obj = json.loads(files['trace.json']) 653 self.assertEqual(trace_obj['otherData'], {}) 654 self.assertEqual(len(trace_obj['traceEvents']), 5) 655 event = trace_obj['traceEvents'][0] 656 self.assertEqual(event['name'], 'pass_test.PassingTest.test_pass') 657 self.assertEqual(event['ph'], 'X') 658 self.assertEqual(event['tid'], 1) 659 self.assertEqual(event['args']['expected'], ['Pass']) 660 self.assertEqual(event['args']['actual'], 'Pass') 661 662 663class TestMain(TestCli): 664 prog = [] 665 666 def make_host(self): 667 return Host() 668 669 def call(self, host, argv, stdin, env): 670 stdin = unicode(stdin) 671 host.stdin = io.StringIO(stdin) 672 if env: 673 host.getenv = env.get 674 host.capture_output() 675 orig_sys_path = sys.path[:] 676 orig_sys_modules = list(sys.modules.keys()) 677 678 try: 679 ret = main(argv + ['-j', '1'], host) 680 finally: 681 out, err = host.restore_output() 682 modules_to_unload = [] 683 for k in sys.modules: 684 if k not in orig_sys_modules: 685 modules_to_unload.append(k) 686 for k in modules_to_unload: 687 del sys.modules[k] 688 sys.path = orig_sys_path 689 690 return ret, out, err 691 692 def test_debugger(self): 693 # TODO: this test seems to hang under coverage. 694 pass 695