1#!/usr/bin/env vpython3 2# Copyright 2021 The Chromium Authors 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6# pylint: disable=protected-access 7 8import datetime 9import os 10import sys 11import tempfile 12import unittest 13import unittest.mock as mock 14 15from pyfakefs import fake_filesystem_unittest # pylint:disable=import-error 16 17from flake_suppressor_common import common_typing as ct 18from flake_suppressor_common import expectations 19from flake_suppressor_common import unittest_utils as uu 20 21 22# Note for all tests in this class: We can safely check the contents of the file 23# at the end despite potentially having multiple added lines because Python 3.7+ 24# guarantees that dictionaries remember insertion order, so there is no risk of 25# the order of modification changing. 26@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only') 27class IterateThroughResultsForUserUnittest(fake_filesystem_unittest.TestCase): 28 def setUp(self) -> None: 29 self._new_stdout = open(os.devnull, 'w') 30 self.setUpPyfakefs() 31 self._expectations = uu.UnitTestExpectationProcessor() 32 # Redirect stdout since the tested function prints a lot. 33 self._old_stdout = sys.stdout 34 sys.stdout = self._new_stdout 35 36 self._input_patcher = mock.patch.object(expectations.ExpectationProcessor, 37 'PromptUserForExpectationAction') 38 self._input_mock = self._input_patcher.start() 39 self.addCleanup(self._input_patcher.stop) 40 41 self.result_map = { 42 'pixel_integration_test': { 43 'foo_test': { 44 tuple(['win']): ['a'], 45 tuple(['mac']): ['b'], 46 }, 47 'bar_test': { 48 tuple(['win']): ['c'], 49 }, 50 }, 51 } 52 53 self.expectation_file = os.path.join(uu.ABSOLUTE_EXPECTATION_FILE_DIRECTORY, 54 'pixel_expectations.txt') 55 uu.CreateFile(self, self.expectation_file) 56 expectation_file_contents = uu.TAG_HEADER + """\ 57[ win ] some_test [ Failure ] 58[ mac ] some_test [ Failure ] 59[ android ] some_test [ Failure ] 60""" 61 with open(self.expectation_file, 'w') as outfile: 62 outfile.write(expectation_file_contents) 63 64 self._expectation_file_patcher = mock.patch.object( 65 uu.UnitTestExpectationProcessor, 'GetExpectationFileForSuite') 66 self._expectation_file_mock = self._expectation_file_patcher.start() 67 self._expectation_file_mock.return_value = self.expectation_file 68 self.addCleanup(self._expectation_file_patcher.stop) 69 70 def tearDown(self) -> None: 71 sys.stdout = self._old_stdout 72 self._new_stdout.close() 73 74 def testIterateThroughResultsForUserIgnoreNoGroupByTags(self) -> None: 75 """Tests that everything appears to function with ignore and no group.""" 76 self._input_mock.return_value = (None, None) 77 self._expectations.IterateThroughResultsForUser(self.result_map, False, 78 True) 79 expected_contents = uu.TAG_HEADER + """\ 80[ win ] some_test [ Failure ] 81[ mac ] some_test [ Failure ] 82[ android ] some_test [ Failure ] 83""" 84 with open(self.expectation_file) as infile: 85 self.assertEqual(infile.read(), expected_contents) 86 87 def testIterateThroughResultsForUserIgnoreGroupByTags(self) -> None: 88 """Tests that everything appears to function with ignore and grouping.""" 89 self._input_mock.return_value = (None, None) 90 self._expectations.IterateThroughResultsForUser(self.result_map, True, True) 91 expected_contents = uu.TAG_HEADER + """\ 92[ win ] some_test [ Failure ] 93[ mac ] some_test [ Failure ] 94[ android ] some_test [ Failure ] 95""" 96 with open(self.expectation_file) as infile: 97 self.assertEqual(infile.read(), expected_contents) 98 99 def testIterateThroughResultsForUserRetryNoGroupByTags(self) -> None: 100 """Tests that everything appears to function with retry and no group.""" 101 self._input_mock.return_value = ('RetryOnFailure', '') 102 self._expectations.IterateThroughResultsForUser(self.result_map, False, 103 True) 104 expected_contents = uu.TAG_HEADER + """\ 105[ win ] some_test [ Failure ] 106[ mac ] some_test [ Failure ] 107[ android ] some_test [ Failure ] 108[ win ] foo_test [ RetryOnFailure ] 109[ mac ] foo_test [ RetryOnFailure ] 110[ win ] bar_test [ RetryOnFailure ] 111""" 112 with open(self.expectation_file) as infile: 113 self.assertEqual(infile.read(), expected_contents) 114 115 def testIterateThroughResultsForUserRetryGroupByTags(self) -> None: 116 """Tests that everything appears to function with retry and grouping.""" 117 self._input_mock.return_value = ('RetryOnFailure', 'crbug.com/1') 118 self._expectations.IterateThroughResultsForUser(self.result_map, True, True) 119 expected_contents = uu.TAG_HEADER + """\ 120[ win ] some_test [ Failure ] 121crbug.com/1 [ win ] foo_test [ RetryOnFailure ] 122crbug.com/1 [ win ] bar_test [ RetryOnFailure ] 123[ mac ] some_test [ Failure ] 124crbug.com/1 [ mac ] foo_test [ RetryOnFailure ] 125[ android ] some_test [ Failure ] 126""" 127 with open(self.expectation_file) as infile: 128 self.assertEqual(infile.read(), expected_contents) 129 130 def testIterateThroughResultsForUserFailNoGroupByTags(self) -> None: 131 """Tests that everything appears to function with failure and no group.""" 132 self._input_mock.return_value = ('Failure', 'crbug.com/1') 133 self._expectations.IterateThroughResultsForUser(self.result_map, False, 134 True) 135 expected_contents = uu.TAG_HEADER + """\ 136[ win ] some_test [ Failure ] 137[ mac ] some_test [ Failure ] 138[ android ] some_test [ Failure ] 139crbug.com/1 [ win ] foo_test [ Failure ] 140crbug.com/1 [ mac ] foo_test [ Failure ] 141crbug.com/1 [ win ] bar_test [ Failure ] 142""" 143 with open(self.expectation_file) as infile: 144 self.assertEqual(infile.read(), expected_contents) 145 146 def testIterateThroughResultsForUserFailGroupByTags(self) -> None: 147 """Tests that everything appears to function with failure and grouping.""" 148 self._input_mock.return_value = ('Failure', '') 149 self._expectations.IterateThroughResultsForUser(self.result_map, True, True) 150 expected_contents = uu.TAG_HEADER + """\ 151[ win ] some_test [ Failure ] 152[ win ] foo_test [ Failure ] 153[ win ] bar_test [ Failure ] 154[ mac ] some_test [ Failure ] 155[ mac ] foo_test [ Failure ] 156[ android ] some_test [ Failure ] 157""" 158 with open(self.expectation_file) as infile: 159 self.assertEqual(infile.read(), expected_contents) 160 161 def testIterateThroughResultsForUserNoIncludeAllTags(self) -> None: 162 """Tests that everything appears to function without including all tags""" 163 self.result_map = { 164 'pixel_integration_test': { 165 'foo_test': { 166 tuple(['win', 'win10']): ['a'], 167 tuple(['mac']): ['b'], 168 }, 169 'bar_test': { 170 tuple(['win']): ['c'], 171 }, 172 }, 173 } 174 self._input_mock.return_value = ('RetryOnFailure', '') 175 self._expectations.IterateThroughResultsForUser(self.result_map, False, 176 False) 177 expected_contents = uu.TAG_HEADER + """\ 178[ win ] some_test [ Failure ] 179[ mac ] some_test [ Failure ] 180[ android ] some_test [ Failure ] 181[ win10 ] foo_test [ RetryOnFailure ] 182[ mac ] foo_test [ RetryOnFailure ] 183[ win ] bar_test [ RetryOnFailure ] 184""" 185 with open(self.expectation_file) as infile: 186 self.assertEqual(infile.read(), expected_contents) 187 188 189@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only') 190class IterateThroughResultsWithThresholdsUnittest( 191 fake_filesystem_unittest.TestCase): 192 def setUp(self) -> None: 193 self.setUpPyfakefs() 194 self._expectations = uu.UnitTestExpectationProcessor() 195 self.result_map = { 196 'pixel_integration_test': { 197 'foo_test': { 198 tuple(['win']): ['a'], 199 tuple(['mac']): ['b'], 200 }, 201 'bar_test': { 202 tuple(['win']): ['c'], 203 }, 204 }, 205 } 206 207 self.expectation_file = os.path.join(uu.ABSOLUTE_EXPECTATION_FILE_DIRECTORY, 208 'pixel_expectations.txt') 209 uu.CreateFile(self, self.expectation_file) 210 expectation_file_contents = uu.TAG_HEADER + """\ 211[ win ] some_test [ Failure ] 212[ mac ] some_test [ Failure ] 213[ android ] some_test [ Failure ] 214""" 215 with open(self.expectation_file, 'w') as outfile: 216 outfile.write(expectation_file_contents) 217 218 self._expectation_file_patcher = mock.patch.object( 219 uu.UnitTestExpectationProcessor, 'GetExpectationFileForSuite') 220 self._expectation_file_mock = self._expectation_file_patcher.start() 221 self._expectation_file_mock.return_value = self.expectation_file 222 self.addCleanup(self._expectation_file_patcher.stop) 223 224 def testGroupByTags(self) -> None: 225 """Tests that threshold-based expectations work when grouping by tags.""" 226 result_counts = { 227 tuple(['win']): { 228 # We expect this to be ignored since it has a 1% flake rate. 229 'foo_test': 100, 230 # We expect this to be RetryOnFailure since it has a 25% flake rate. 231 'bar_test': 4, 232 }, 233 tuple(['mac']): { 234 # We expect this to be Failure since it has a 50% flake rate. 235 'foo_test': 2 236 } 237 } 238 self._expectations.IterateThroughResultsWithThresholds( 239 self.result_map, True, result_counts, 0.02, 0.5, True) 240 expected_contents = uu.TAG_HEADER + """\ 241[ win ] some_test [ Failure ] 242[ win ] bar_test [ RetryOnFailure ] 243[ mac ] some_test [ Failure ] 244[ mac ] foo_test [ Failure ] 245[ android ] some_test [ Failure ] 246""" 247 with open(self.expectation_file) as infile: 248 self.assertEqual(infile.read(), expected_contents) 249 250 def testNoGroupByTags(self) -> None: 251 """Tests that threshold-based expectations work when not grouping by tags""" 252 result_counts = { 253 tuple(['win']): { 254 # We expect this to be ignored since it has a 1% flake rate. 255 'foo_test': 100, 256 # We expect this to be RetryOnFailure since it has a 25% flake rate. 257 'bar_test': 4, 258 }, 259 tuple(['mac']): { 260 # We expect this to be Failure since it has a 50% flake rate. 261 'foo_test': 2 262 } 263 } 264 self._expectations.IterateThroughResultsWithThresholds( 265 self.result_map, False, result_counts, 0.02, 0.5, True) 266 expected_contents = uu.TAG_HEADER + """\ 267[ win ] some_test [ Failure ] 268[ mac ] some_test [ Failure ] 269[ android ] some_test [ Failure ] 270[ mac ] foo_test [ Failure ] 271[ win ] bar_test [ RetryOnFailure ] 272""" 273 with open(self.expectation_file) as infile: 274 self.assertEqual(infile.read(), expected_contents) 275 276 def testNoIncludeAllTags(self) -> None: 277 """Tests that threshold-based expectations work when filtering tags.""" 278 self.result_map = { 279 'pixel_integration_test': { 280 'foo_test': { 281 tuple(['win', 'win10']): ['a'], 282 tuple(['mac']): ['b'], 283 }, 284 'bar_test': { 285 tuple(['win', 'win10']): ['c'], 286 }, 287 }, 288 } 289 290 result_counts = { 291 tuple(['win', 'win10']): { 292 # We expect this to be ignored since it has a 1% flake rate. 293 'foo_test': 100, 294 # We expect this to be RetryOnFailure since it has a 25% flake rate. 295 'bar_test': 4, 296 }, 297 tuple(['mac']): { 298 # We expect this to be Failure since it has a 50% flake rate. 299 'foo_test': 2 300 } 301 } 302 self._expectations.IterateThroughResultsWithThresholds( 303 self.result_map, False, result_counts, 0.02, 0.5, False) 304 expected_contents = uu.TAG_HEADER + """\ 305[ win ] some_test [ Failure ] 306[ mac ] some_test [ Failure ] 307[ android ] some_test [ Failure ] 308[ mac ] foo_test [ Failure ] 309[ win10 ] bar_test [ RetryOnFailure ] 310""" 311 with open(self.expectation_file) as infile: 312 self.assertEqual(infile.read(), expected_contents) 313 314 315@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only') 316class CreateExpectationsForAllResultsUnittest(fake_filesystem_unittest.TestCase 317 ): 318 def setUp(self) -> None: 319 self.setUpPyfakefs() 320 self._expectations = uu.UnitTestExpectationProcessor() 321 self.result_map = { 322 'pixel_integration_test': { 323 'foo_test': { 324 tuple(['win']): [ 325 ct.ResultTupleType(ct.ResultStatus.FAIL, 326 'http://ci.chromium.org/b/1111', 327 datetime.date(2022, 1, 328 2), False, ['Pass']), 329 ct.ResultTupleType(ct.ResultStatus.FAIL, 330 'http://ci.chromium.org/b/2222', 331 datetime.date(2022, 1, 332 1), False, ['Pass']), 333 ct.ResultTupleType(ct.ResultStatus.FAIL, 334 'http://ci.chromium.org/b/3333', 335 datetime.date(2022, 1, 336 4), False, ['Pass']), 337 ], 338 tuple(['mac']): [ 339 ct.ResultTupleType(ct.ResultStatus.FAIL, 340 'http://ci.chromium.org/b/1111', 341 datetime.date(2022, 1, 342 5), False, ['Pass']), 343 ct.ResultTupleType(ct.ResultStatus.FAIL, 344 'http://ci.chromium.org/b/2222', 345 datetime.date(2022, 1, 346 6), False, ['Pass']), 347 ct.ResultTupleType(ct.ResultStatus.FAIL, 348 'http://ci.chromium.org/b/3333', 349 datetime.date(2022, 1, 350 3), False, ['Pass']), 351 ], 352 }, 353 'bar_test': { 354 tuple(['win']): [ 355 ct.ResultTupleType(ct.ResultStatus.FAIL, 356 'http://ci.chromium.org/b/4444', 357 datetime.date(2022, 1, 358 9), False, ['Pass']), 359 ct.ResultTupleType(ct.ResultStatus.FAIL, 360 'http://ci.chromium.org/b/5555', 361 datetime.date(2022, 1, 362 8), False, ['Pass']), 363 ct.ResultTupleType(ct.ResultStatus.FAIL, 364 'http://ci.chromium.org/b/6666', 365 datetime.date(2022, 1, 366 7), False, ['Pass']), 367 ], 368 }, 369 'baz_test': { 370 # This test config causes build fail on less than 2 consecutive 371 # days, and thus should not exist in the output. 372 tuple(['win']): [ 373 ct.ResultTupleType(ct.ResultStatus.FAIL, 374 'http://ci.chromium.org/b/7777', 375 datetime.date(2021, 1, 376 10), False, ['Pass']), 377 ct.ResultTupleType(ct.ResultStatus.FAIL, 378 'http://ci.chromium.org/b/8888', 379 datetime.date(2022, 1, 380 10), False, ['Pass']), 381 ct.ResultTupleType(ct.ResultStatus.FAIL, 382 'http://ci.chromium.org/b/9999', 383 datetime.date(2023, 1, 384 10), False, ['Pass']), 385 ], 386 tuple(['mac']): [ 387 ct.ResultTupleType(ct.ResultStatus.FAIL, 388 'http://ci.chromium.org/b/7777', 389 datetime.date(2022, 1, 390 10), False, ['Pass']), 391 ct.ResultTupleType(ct.ResultStatus.FAIL, 392 'http://ci.chromium.org/b/8888', 393 datetime.date(2022, 1, 394 10), False, ['Pass']), 395 ], 396 }, 397 'wpt_test': { 398 # Test for same test in all builders over threshold. 399 tuple(['win']): [ 400 ct.ResultTupleType(ct.ResultStatus.FAIL, 401 'http://ci.chromium.org/b/1234', 402 datetime.date(2021, 1, 403 10), False, ['Pass']), 404 ], 405 tuple(['mac']): [ 406 ct.ResultTupleType(ct.ResultStatus.FAIL, 407 'http://ci.chromium.org/b/2345', 408 datetime.date(2022, 1, 409 11), False, ['Pass']), 410 ct.ResultTupleType(ct.ResultStatus.FAIL, 411 'http://ci.chromium.org/b/3456', 412 datetime.date(2022, 1, 413 12), False, ['Pass']), 414 ], 415 }, 416 }, 417 } 418 self.build_fail_total_number_threshold = 3 419 self.build_fail_consecutive_day_threshold = 2 420 421 self.expectation_file = os.path.join(uu.ABSOLUTE_EXPECTATION_FILE_DIRECTORY, 422 'pixel_expectations.txt') 423 uu.CreateFile(self, self.expectation_file) 424 expectation_file_contents = uu.TAG_HEADER + """\ 425[ win ] some_test [ Failure Pass ] 426[ mac ] some_test [ Failure Pass ] 427[ android ] some_test [ Failure Pass ] 428""" 429 with open(self.expectation_file, 'w') as outfile: 430 outfile.write(expectation_file_contents) 431 432 self._expectation_file_patcher = mock.patch.object( 433 uu.UnitTestExpectationProcessor, 'GetExpectationFileForSuite') 434 self._expectation_file_mock = self._expectation_file_patcher.start() 435 self._expectation_file_mock.return_value = self.expectation_file 436 self.addCleanup(self._expectation_file_patcher.stop) 437 438 def testGroupByTags(self) -> None: 439 """Tests that threshold-based expectations work when grouping by tags.""" 440 self._expectations.CreateExpectationsForAllResults( 441 self.result_map, True, True, self.build_fail_total_number_threshold, 442 self.build_fail_consecutive_day_threshold) 443 expected_contents = uu.TAG_HEADER + """\ 444[ win ] some_test [ Failure Pass ] 445[ win ] foo_test [ Failure Pass ] 446[ win ] bar_test [ Failure Pass ] 447[ win ] wpt_test [ Failure Pass ] 448[ mac ] some_test [ Failure Pass ] 449[ mac ] foo_test [ Failure Pass ] 450[ mac ] wpt_test [ Failure Pass ] 451[ android ] some_test [ Failure Pass ] 452""" 453 with open(self.expectation_file) as infile: 454 self.assertEqual(infile.read(), expected_contents) 455 456 def testNoGroupByTags(self) -> None: 457 """Tests that threshold-based expectations work when not grouping by tags""" 458 self._expectations.CreateExpectationsForAllResults( 459 self.result_map, False, True, self.build_fail_total_number_threshold, 460 self.build_fail_consecutive_day_threshold) 461 expected_contents = uu.TAG_HEADER + """\ 462[ win ] some_test [ Failure Pass ] 463[ mac ] some_test [ Failure Pass ] 464[ android ] some_test [ Failure Pass ] 465[ win ] foo_test [ Failure Pass ] 466[ mac ] foo_test [ Failure Pass ] 467[ win ] bar_test [ Failure Pass ] 468[ win ] wpt_test [ Failure Pass ] 469[ mac ] wpt_test [ Failure Pass ] 470""" 471 with open(self.expectation_file) as infile: 472 self.assertEqual(infile.read(), expected_contents) 473 474 def testNoIncludeAllTags(self) -> None: 475 """Tests that threshold-based expectations work when filtering tags.""" 476 self.result_map = { 477 'pixel_integration_test': { 478 'foo_test': { 479 tuple(['win', 'win10']): [ 480 ct.ResultTupleType(ct.ResultStatus.FAIL, 481 'http://ci.chromium.org/b/1111', 482 datetime.date(2022, 1, 483 2), False, ['Pass']), 484 ct.ResultTupleType(ct.ResultStatus.FAIL, 485 'http://ci.chromium.org/b/2222', 486 datetime.date(2022, 1, 487 1), False, ['Pass']), 488 ct.ResultTupleType(ct.ResultStatus.FAIL, 489 'http://ci.chromium.org/b/3333', 490 datetime.date(2022, 1, 491 4), False, ['Pass']), 492 ], 493 tuple(['mac']): [ 494 ct.ResultTupleType(ct.ResultStatus.FAIL, 495 'http://ci.chromium.org/b/1111', 496 datetime.date(2022, 1, 497 5), False, ['Pass']), 498 ct.ResultTupleType(ct.ResultStatus.FAIL, 499 'http://ci.chromium.org/b/2222', 500 datetime.date(2022, 1, 501 6), False, ['Pass']), 502 ct.ResultTupleType(ct.ResultStatus.FAIL, 503 'http://ci.chromium.org/b/3333', 504 datetime.date(2022, 1, 505 3), False, ['Pass']), 506 ], 507 }, 508 'bar_test': { 509 tuple(['win', 'win10']): [ 510 ct.ResultTupleType(ct.ResultStatus.FAIL, 511 'http://ci.chromium.org/b/4444', 512 datetime.date(2022, 1, 513 9), False, ['Pass']), 514 ct.ResultTupleType(ct.ResultStatus.FAIL, 515 'http://ci.chromium.org/b/5555', 516 datetime.date(2022, 1, 517 8), False, ['Pass']), 518 ct.ResultTupleType(ct.ResultStatus.FAIL, 519 'http://ci.chromium.org/b/6666', 520 datetime.date(2022, 1, 521 7), False, ['Pass']), 522 ], 523 }, 524 'baz_test': { 525 # This test config causes build fail on less than 2 consecutive 526 # days, and thus should not exist in the output. 527 tuple(['win']): [ 528 ct.ResultTupleType(ct.ResultStatus.FAIL, 529 'http://ci.chromium.org/b/7777', 530 datetime.date(2021, 1, 531 10), False, ['Pass']), 532 ct.ResultTupleType(ct.ResultStatus.FAIL, 533 'http://ci.chromium.org/b/8888', 534 datetime.date(2022, 1, 535 10), False, ['Pass']), 536 ct.ResultTupleType(ct.ResultStatus.FAIL, 537 'http://ci.chromium.org/b/9999', 538 datetime.date(2023, 1, 539 10), False, ['Pass']), 540 ], 541 tuple(['mac']): [ 542 ct.ResultTupleType(ct.ResultStatus.FAIL, 543 'http://ci.chromium.org/b/7777', 544 datetime.date(2022, 1, 545 10), False, ['Pass']), 546 ct.ResultTupleType(ct.ResultStatus.FAIL, 547 'http://ci.chromium.org/b/8888', 548 datetime.date(2022, 1, 549 10), False, ['Pass']), 550 ], 551 }, 552 }, 553 } 554 self._expectations.CreateExpectationsForAllResults( 555 self.result_map, False, False, self.build_fail_total_number_threshold, 556 self.build_fail_consecutive_day_threshold) 557 expected_contents = uu.TAG_HEADER + """\ 558[ win ] some_test [ Failure Pass ] 559[ mac ] some_test [ Failure Pass ] 560[ android ] some_test [ Failure Pass ] 561[ win10 ] foo_test [ Failure Pass ] 562[ mac ] foo_test [ Failure Pass ] 563[ win10 ] bar_test [ Failure Pass ] 564""" 565 with open(self.expectation_file) as infile: 566 self.assertEqual(infile.read(), expected_contents) 567 568 569@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only') 570class FindFailuresInSameConditionUnittest(unittest.TestCase): 571 def setUp(self) -> None: 572 self._expectations = uu.UnitTestExpectationProcessor() 573 self.result_map = { 574 'pixel_integration_test': { 575 'foo_test': { 576 tuple(['win']): ['a'], 577 tuple(['mac']): ['a', 'b'], 578 }, 579 'bar_test': { 580 tuple(['win']): ['a', 'b', 'c'], 581 tuple(['mac']): ['a', 'b', 'c', 'd'], 582 }, 583 }, 584 'webgl_conformance_integration_test': { 585 'foo_test': { 586 tuple(['win']): ['a', 'b', 'c', 'd', 'e'], 587 tuple(['mac']): ['a', 'b', 'c', 'd', 'e', 'f'], 588 }, 589 'bar_test': { 590 tuple(['win']): ['a', 'b', 'c', 'd', 'e', 'f', 'g'], 591 tuple(['mac']): ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], 592 }, 593 }, 594 } 595 596 def testFindFailuresInSameTest(self) -> None: 597 other_failures = self._expectations.FindFailuresInSameTest( 598 self.result_map, 'pixel_integration_test', 'foo_test', tuple(['win'])) 599 self.assertEqual(other_failures, [(tuple(['mac']), 2)]) 600 601 def testFindFailuresInSameConfig(self) -> None: 602 typ_tag_ordered_result_map = self._expectations._ReorderMapByTypTags( 603 self.result_map) 604 other_failures = self._expectations.FindFailuresInSameConfig( 605 typ_tag_ordered_result_map, 'pixel_integration_test', 'foo_test', 606 tuple(['win'])) 607 expected_other_failures = [ 608 ('pixel_integration_test.bar_test', 3), 609 ('webgl_conformance_integration_test.foo_test', 5), 610 ('webgl_conformance_integration_test.bar_test', 7), 611 ] 612 self.assertEqual(len(other_failures), len(expected_other_failures)) 613 self.assertEqual(set(other_failures), set(expected_other_failures)) 614 615 616@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only') 617class ModifyFileForResultUnittest(fake_filesystem_unittest.TestCase): 618 def setUp(self) -> None: 619 self.setUpPyfakefs() 620 self._expectations = uu.UnitTestExpectationProcessor() 621 self.expectation_file = os.path.join(uu.ABSOLUTE_EXPECTATION_FILE_DIRECTORY, 622 'expectation.txt') 623 uu.CreateFile(self, self.expectation_file) 624 self._expectation_file_patcher = mock.patch.object( 625 uu.UnitTestExpectationProcessor, 'GetExpectationFileForSuite') 626 self._expectation_file_mock = self._expectation_file_patcher.start() 627 self.addCleanup(self._expectation_file_patcher.stop) 628 self._expectation_file_mock.return_value = self.expectation_file 629 630 def testNoGroupByTags(self) -> None: 631 """Tests that not grouping by tags appends to the end.""" 632 expectation_file_contents = uu.TAG_HEADER + """\ 633[ win ] some_test [ Failure ] 634 635[ mac ] some_test [ Failure ] 636""" 637 with open(self.expectation_file, 'w') as outfile: 638 outfile.write(expectation_file_contents) 639 self._expectations.ModifyFileForResult('some_file', 'some_test', 640 ('win', 'win10'), '', 'Failure', 641 False, True) 642 expected_contents = uu.TAG_HEADER + """\ 643[ win ] some_test [ Failure ] 644 645[ mac ] some_test [ Failure ] 646[ win win10 ] some_test [ Failure ] 647""" 648 with open(self.expectation_file) as infile: 649 self.assertEqual(infile.read(), expected_contents) 650 651 def testGroupByTagsNoMatch(self) -> None: 652 """Tests that grouping by tags but finding no match appends to the end.""" 653 expectation_file_contents = uu.TAG_HEADER + """\ 654[ mac ] some_test [ Failure ] 655""" 656 with open(self.expectation_file, 'w') as outfile: 657 outfile.write(expectation_file_contents) 658 self._expectations.ModifyFileForResult('some_file', 'some_test', 659 ('win', 'win10'), '', 'Failure', 660 True, True) 661 expected_contents = uu.TAG_HEADER + """\ 662[ mac ] some_test [ Failure ] 663[ win win10 ] some_test [ Failure ] 664""" 665 with open(self.expectation_file) as infile: 666 self.assertEqual(infile.read(), expected_contents) 667 668 def testGroupByTagsMatch(self) -> None: 669 """Tests that grouping by tags and finding a match adds mid-file.""" 670 expectation_file_contents = uu.TAG_HEADER + """\ 671[ win ] some_test [ Failure ] 672 673[ mac ] some_test [ Failure ] 674""" 675 with open(self.expectation_file, 'w') as outfile: 676 outfile.write(expectation_file_contents) 677 self._expectations.ModifyFileForResult('some_file', 'foo_test', 678 ('win', 'win10'), '', 'Failure', 679 True, True) 680 expected_contents = uu.TAG_HEADER + """\ 681[ win ] some_test [ Failure ] 682[ win ] foo_test [ Failure ] 683 684[ mac ] some_test [ Failure ] 685""" 686 with open(self.expectation_file) as infile: 687 self.assertEqual(infile.read(), expected_contents) 688 689 690@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only') 691class FilterToMostSpecificTagTypeUnittest(fake_filesystem_unittest.TestCase): 692 def setUp(self) -> None: 693 self._expectations = uu.UnitTestExpectationProcessor() 694 self.setUpPyfakefs() 695 with tempfile.NamedTemporaryFile(delete=False) as tf: 696 self.expectation_file = tf.name 697 698 def testBasic(self): 699 """Tests that only the most specific tags are kept.""" 700 expectation_file_contents = """\ 701# tags: [ tag1_least_specific tag1_middle_specific tag1_most_specific ] 702# tags: [ tag2_least_specific tag2_middle_specific tag2_most_specific ]""" 703 with open(self.expectation_file, 'w') as outfile: 704 outfile.write(expectation_file_contents) 705 706 tags = ('tag1_least_specific', 'tag1_most_specific', 'tag2_middle_specific', 707 'tag2_least_specific') 708 filtered_tags = self._expectations.FilterToMostSpecificTypTags( 709 tags, self.expectation_file) 710 self.assertEqual(filtered_tags, 711 ('tag1_most_specific', 'tag2_middle_specific')) 712 713 def testSingleTags(self) -> None: 714 """Tests that functionality works as expected with single tags.""" 715 expectation_file_contents = """\ 716# tags: [ tag1_most_specific ] 717# tags: [ tag2_most_specific ]""" 718 with open(self.expectation_file, 'w') as outfile: 719 outfile.write(expectation_file_contents) 720 721 tags = ('tag1_most_specific', 'tag2_most_specific') 722 filtered_tags = self._expectations.FilterToMostSpecificTypTags( 723 tags, self.expectation_file) 724 self.assertEqual(filtered_tags, tags) 725 726 def testUnusedTags(self) -> None: 727 """Tests that functionality works as expected with extra/unused tags.""" 728 expectation_file_contents = """\ 729# tags: [ tag1_least_specific tag1_middle_specific tag1_most_specific ] 730# tags: [ tag2_least_specific tag2_middle_specific tag2_most_specific ] 731# tags: [ some_unused_tag ]""" 732 with open(self.expectation_file, 'w') as outfile: 733 outfile.write(expectation_file_contents) 734 735 tags = ('tag1_least_specific', 'tag1_most_specific', 'tag2_middle_specific', 736 'tag2_least_specific') 737 filtered_tags = self._expectations.FilterToMostSpecificTypTags( 738 tags, self.expectation_file) 739 self.assertEqual(filtered_tags, 740 ('tag1_most_specific', 'tag2_middle_specific')) 741 742 def testMultiline(self) -> None: 743 """Tests that functionality works when tags cover multiple lines.""" 744 expectation_file_contents = """\ 745# tags: [ tag1_least_specific 746# tag1_middle_specific 747# tag1_most_specific ] 748# tags: [ tag2_least_specific 749# tag2_middle_specific tag2_most_specific ]""" 750 with open(self.expectation_file, 'w') as outfile: 751 outfile.write(expectation_file_contents) 752 753 tags = ('tag1_least_specific', 'tag1_middle_specific', 'tag1_most_specific', 754 'tag2_middle_specific', 'tag2_least_specific') 755 filtered_tags = self._expectations.FilterToMostSpecificTypTags( 756 tags, self.expectation_file) 757 self.assertEqual(filtered_tags, 758 ('tag1_most_specific', 'tag2_middle_specific')) 759 760 def testMissingTags(self) -> None: 761 """Tests that a file not having all tags is an error.""" 762 expectation_file_contents = """\ 763# tags: [ tag1_least_specific tag1_middle_specific ] 764# tags: [ tag2_least_specific tag2_middle_specific tag2_most_specific ]""" 765 with open(self.expectation_file, 'w') as outfile: 766 outfile.write(expectation_file_contents) 767 768 tags = ('tag1_least_specific', 'tag1_most_specific', 'tag2_middle_specific', 769 'tag2_least_specific') 770 with self.assertRaises(RuntimeError): 771 self._expectations.FilterToMostSpecificTypTags(tags, 772 self.expectation_file) 773 774 775@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only') 776class FindBestInsertionLineForExpectationUnittest( 777 fake_filesystem_unittest.TestCase): 778 def setUp(self) -> None: 779 self.setUpPyfakefs() 780 self._expectations = uu.UnitTestExpectationProcessor() 781 self.expectation_file = os.path.join(uu.ABSOLUTE_EXPECTATION_FILE_DIRECTORY, 782 'expectation.txt') 783 uu.CreateFile(self, self.expectation_file) 784 expectation_file_contents = uu.TAG_HEADER + """\ 785[ win ] some_test [ Failure ] 786 787[ mac ] some_test [ Failure ] 788 789[ win release ] bar_test [ Failure ] 790[ win ] foo_test [ Failure ] 791 792[ chromeos ] some_test [ Failure ] 793""" 794 with open(self.expectation_file, 'w') as outfile: 795 outfile.write(expectation_file_contents) 796 797 def testNoMatchingTags(self) -> None: 798 """Tests behavior when there are no expectations with matching tags.""" 799 insertion_line, tags = ( 800 self._expectations.FindBestInsertionLineForExpectation( 801 tuple(['android']), self.expectation_file)) 802 self.assertEqual(insertion_line, -1) 803 self.assertEqual(tags, set()) 804 805 def testMatchingTagsLastEntryChosen(self) -> None: 806 """Tests that the last matching line is chosen.""" 807 insertion_line, tags = ( 808 self._expectations.FindBestInsertionLineForExpectation( 809 tuple(['win']), self.expectation_file)) 810 # We expect "[ win ] foo_test [ Failure ]" to be chosen 811 expected_line = len(uu.TAG_HEADER.splitlines()) + 6 812 self.assertEqual(insertion_line, expected_line) 813 self.assertEqual(tags, set(['win'])) 814 815 def testMatchingTagsClosestMatchChosen(self) -> None: 816 """Tests that the closest tag match is chosen.""" 817 insertion_line, tags = ( 818 self._expectations.FindBestInsertionLineForExpectation( 819 ('win', 'release'), self.expectation_file)) 820 # We expect "[ win release ] bar_test [ Failure ]" to be chosen 821 expected_line = len(uu.TAG_HEADER.splitlines()) + 5 822 self.assertEqual(insertion_line, expected_line) 823 self.assertEqual(tags, set(['win', 'release'])) 824 825 826class AssertCheckoutIsUpToDateUnittest(unittest.TestCase): 827 def setUp(self) -> None: 828 self._expectations = uu.UnitTestExpectationProcessor() 829 self._origin_patcher = mock.patch( 830 'flake_suppressor_common.expectations.ExpectationProcessor.' 831 'GetOriginExpectationFileContents') 832 self._origin_mock = self._origin_patcher.start() 833 self.addCleanup(self._origin_patcher.stop) 834 self._local_patcher = mock.patch( 835 'flake_suppressor_common.expectations.' + 836 'ExpectationProcessor.GetLocalCheckoutExpectationFileContents') 837 self._local_mock = self._local_patcher.start() 838 self.addCleanup(self._local_patcher.stop) 839 840 def testContentsMatch(self) -> None: 841 """Tests the happy path where the contents match.""" 842 self._origin_mock.return_value = { 843 'foo.txt': 'foo_content', 844 'bar.txt': 'bar_content', 845 } 846 self._local_mock.return_value = { 847 'bar.txt': 'bar_content', 848 'foo.txt': 'foo_content', 849 } 850 self._expectations.AssertCheckoutIsUpToDate() 851 852 def testContentsDoNotMatch(self) -> None: 853 """Tests that mismatched contents results in a failure.""" 854 self._origin_mock.return_value = { 855 'foo.txt': 'foo_content', 856 'bar.txt': 'bar_content', 857 } 858 # Differing keys. 859 self._local_mock.return_value = { 860 'bar.txt': 'bar_content', 861 'foo2.txt': 'foo_content', 862 } 863 with self.assertRaises(RuntimeError): 864 self._expectations.AssertCheckoutIsUpToDate() 865 866 # Differing values. 867 self._local_mock.return_value = { 868 'bar.txt': 'bar_content', 869 'foo.txt': 'foo_content2', 870 } 871 with self.assertRaises(RuntimeError): 872 self._expectations.AssertCheckoutIsUpToDate() 873 874 875class OverFailedBuildThresholdUnittest(unittest.TestCase): 876 def setUp(self) -> None: 877 self.build_fail_total_number_threshold = 3 878 879 def testOverThreshold(self) -> None: 880 """Tests functionality when |result_tuple_list| passes 881 |build_fail_total_number_threshold|. 882 883 True is expected output on these inputs. 884 """ 885 result_tuple_list = [ 886 ct.ResultTupleType(ct.ResultStatus.FAIL, 887 'http://ci.chromium.org/b/1111', 888 datetime.date(2021, 1, 1), False, ['Pass']), 889 ct.ResultTupleType(ct.ResultStatus.FAIL, 890 'http://ci.chromium.org/b/2222', 891 datetime.date(2022, 1, 1), False, ['Pass']), 892 ct.ResultTupleType(ct.ResultStatus.FAIL, 893 'http://ci.chromium.org/b/3333', 894 datetime.date(2023, 1, 1), False, ['Pass']), 895 ] 896 self.assertTrue( 897 expectations.OverFailedBuildThreshold( 898 result_tuple_list, self.build_fail_total_number_threshold)) 899 900 def testUnderThreshold(self) -> None: 901 """Tests functionality when |result_tuple_list| cannot pass 902 |build_fail_total_number_threshold|. 903 904 False is expected output on these inputs. 905 """ 906 result_tuple_list = [ 907 ct.ResultTupleType(ct.ResultStatus.FAIL, 908 'http://ci.chromium.org/b/1111', 909 datetime.date(2022, 1, 1), False, ['Pass']), 910 ct.ResultTupleType(ct.ResultStatus.FAIL, 911 'http://ci.chromium.org/b/2222', 912 datetime.date(2022, 1, 2), False, ['Pass']), 913 ] 914 self.assertFalse( 915 expectations.OverFailedBuildThreshold( 916 result_tuple_list, self.build_fail_total_number_threshold)) 917 918 result_tuple_list = [ 919 ct.ResultTupleType(ct.ResultStatus.FAIL, 920 'http://ci.chromium.org/b/1111', 921 datetime.date(2022, 1, 1), False, ['Pass']), 922 ct.ResultTupleType(ct.ResultStatus.FAIL, 923 'http://ci.chromium.org/b/2222', 924 datetime.date(2022, 1, 2), False, ['Pass']), 925 ct.ResultTupleType(ct.ResultStatus.FAIL, 926 'http://ci.chromium.org/b/2222', 927 datetime.date(2022, 1, 3), False, ['Pass']), 928 ] 929 self.assertFalse( 930 expectations.OverFailedBuildThreshold( 931 result_tuple_list, self.build_fail_total_number_threshold)) 932 933 934class OverFailedBuildByDayThresholdUnittest(unittest.TestCase): 935 def setUp(self) -> None: 936 self.build_fail_consecutive_day_threshold = 3 937 938 def testOverThreshold(self) -> None: 939 """Tests functionality when |result_tuple_list| passes 940 |build_fail_consecutive_day_threshold|. 941 942 True is expected output on these inputs. 943 """ 944 result_tuple_list = [ 945 ct.ResultTupleType(ct.ResultStatus.FAIL, 946 'http://ci.chromium.org/b/1111', 947 datetime.date(2022, 1, 2), False, ['Pass']), 948 ct.ResultTupleType(ct.ResultStatus.FAIL, 949 'http://ci.chromium.org/b/2222', 950 datetime.date(2022, 1, 1), False, ['Pass']), 951 ct.ResultTupleType(ct.ResultStatus.FAIL, 952 'http://ci.chromium.org/b/3333', 953 datetime.date(2022, 1, 3), False, ['Pass']), 954 ] 955 self.assertTrue( 956 expectations.OverFailedBuildByDayThreshold( 957 result_tuple_list, self.build_fail_consecutive_day_threshold)) 958 959 def testUnderThreshold(self) -> None: 960 """Tests functionality when |result_tuple_list| cannot pass 961 |build_fail_consecutive_day_threshold|. 962 963 False is expected output on these inputs. 964 """ 965 result_tuple_list = [ 966 ct.ResultTupleType(ct.ResultStatus.FAIL, 967 'http://ci.chromium.org/b/1111', 968 datetime.date(2022, 1, 1), False, ['Pass']), 969 ct.ResultTupleType(ct.ResultStatus.FAIL, 970 'http://ci.chromium.org/b/2222', 971 datetime.date(2022, 1, 1), False, ['Pass']), 972 ct.ResultTupleType(ct.ResultStatus.FAIL, 973 'http://ci.chromium.org/b/3333', 974 datetime.date(2022, 1, 1), False, ['Pass']), 975 ] 976 self.assertFalse( 977 expectations.OverFailedBuildByDayThreshold( 978 result_tuple_list, self.build_fail_consecutive_day_threshold)) 979 980 result_tuple_list = [ 981 ct.ResultTupleType(ct.ResultStatus.FAIL, 982 'http://ci.chromium.org/b/1111', 983 datetime.date(2022, 1, 1), False, ['Pass']), 984 ct.ResultTupleType(ct.ResultStatus.FAIL, 985 'http://ci.chromium.org/b/2222', 986 datetime.date(2022, 1, 2), False, ['Pass']), 987 ] 988 self.assertFalse( 989 expectations.OverFailedBuildByDayThreshold( 990 result_tuple_list, self.build_fail_consecutive_day_threshold)) 991 992 result_tuple_list = [ 993 ct.ResultTupleType(ct.ResultStatus.FAIL, 994 'http://ci.chromium.org/b/1111', 995 datetime.date(2022, 1, 1), False, ['Pass']), 996 ct.ResultTupleType(ct.ResultStatus.FAIL, 997 'http://ci.chromium.org/b/2222', 998 datetime.date(2022, 1, 2), False, ['Pass']), 999 ct.ResultTupleType(ct.ResultStatus.FAIL, 1000 'http://ci.chromium.org/b/3333', 1001 datetime.date(2022, 1, 4), False, ['Pass']), 1002 ] 1003 self.assertFalse( 1004 expectations.OverFailedBuildByDayThreshold( 1005 result_tuple_list, self.build_fail_consecutive_day_threshold)) 1006 1007 1008if __name__ == '__main__': 1009 unittest.main(verbosity=2) 1010