1#!/usr/bin/python 2# 3# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 4# Use of this source code is governed by a BSD-style license that can be 5# found in the LICENSE file. 6 7# pylint: disable-msg=C0111 8 9"""Unit tests for server/cros/dynamic_suite/job_status.py.""" 10 11import mox 12import shutil 13import tempfile 14import time 15import unittest 16import os 17import common 18 19from autotest_lib.server import frontend 20from autotest_lib.server.cros import host_lock_manager 21from autotest_lib.server.cros.dynamic_suite import host_spec 22from autotest_lib.server.cros.dynamic_suite import job_status 23from autotest_lib.server.cros.dynamic_suite.fakes import FakeHost, FakeJob 24from autotest_lib.server.cros.dynamic_suite.fakes import FakeStatus 25 26 27DEFAULT_WAITTIMEOUT_MINS = 60 * 4 28 29 30class StatusTest(mox.MoxTestBase): 31 """Unit tests for job_status.Status. 32 """ 33 34 35 def setUp(self): 36 super(StatusTest, self).setUp() 37 self.afe = self.mox.CreateMock(frontend.AFE) 38 self.tko = self.mox.CreateMock(frontend.TKO) 39 40 self.tmpdir = tempfile.mkdtemp(suffix=type(self).__name__) 41 42 43 def tearDown(self): 44 super(StatusTest, self).tearDown() 45 shutil.rmtree(self.tmpdir, ignore_errors=True) 46 47 48 def testGatherJobHostnamesAllRan(self): 49 """All entries for the job were assigned hosts.""" 50 job = FakeJob(0, []) 51 expected_hosts = ['host2', 'host1'] 52 entries = [{'status': 'Running', 53 'host': {'hostname': h}} for h in expected_hosts] 54 self.afe.run('get_host_queue_entries', job=job.id).AndReturn(entries) 55 self.mox.ReplayAll() 56 57 self.assertEquals(sorted(expected_hosts), 58 sorted(job_status.gather_job_hostnames(self.afe, 59 job))) 60 61 62 def testGatherJobHostnamesSomeRan(self): 63 """Not all entries for the job were assigned hosts.""" 64 job = FakeJob(0, []) 65 expected_hosts = ['host2', 'host1'] 66 entries = [{'status': 'Running', 67 'host': {'hostname': h}} for h in expected_hosts] 68 entries.append({'status': 'Running', 'host': None}) 69 self.afe.run('get_host_queue_entries', job=job.id).AndReturn(entries) 70 self.mox.ReplayAll() 71 72 self.assertEquals(sorted(expected_hosts + [None]), 73 sorted(job_status.gather_job_hostnames(self.afe, 74 job))) 75 76 77 def testGatherJobHostnamesSomeStillQueued(self): 78 """Not all entries for the job were Running, though all had hosts.""" 79 job = FakeJob(0, []) 80 expected_hosts = ['host2', 'host1'] 81 entries = [{'status': 'Running', 82 'host': {'hostname': h}} for h in expected_hosts] 83 entries[-1]['status'] = 'Queued' 84 self.afe.run('get_host_queue_entries', job=job.id).AndReturn(entries) 85 self.mox.ReplayAll() 86 87 self.assertTrue(expected_hosts[-1] not in 88 job_status.gather_job_hostnames(self.afe, job)) 89 90 91 def testWaitForJobToStart(self): 92 """Ensure we detect when a job has started running.""" 93 self.mox.StubOutWithMock(time, 'sleep') 94 95 job = FakeJob(0, []) 96 self.afe.get_jobs(id=job.id, not_yet_run=True).AndReturn([job]) 97 self.afe.get_jobs(id=job.id, not_yet_run=True).AndReturn([]) 98 time.sleep(mox.IgnoreArg()).MultipleTimes() 99 self.mox.ReplayAll() 100 101 job_status.wait_for_jobs_to_start(self.afe, [job]) 102 103 104 def testWaitForMultipleJobsToStart(self): 105 """Ensure we detect when all jobs have started running.""" 106 self.mox.StubOutWithMock(time, 'sleep') 107 108 job0 = FakeJob(0, []) 109 job1 = FakeJob(1, []) 110 self.afe.get_jobs(id=job0.id, not_yet_run=True).AndReturn([job0]) 111 self.afe.get_jobs(id=job1.id, not_yet_run=True).AndReturn([job1]) 112 self.afe.get_jobs(id=job0.id, not_yet_run=True).AndReturn([]) 113 self.afe.get_jobs(id=job1.id, not_yet_run=True).AndReturn([job1]) 114 self.afe.get_jobs(id=job1.id, not_yet_run=True).AndReturn([]) 115 time.sleep(mox.IgnoreArg()).MultipleTimes() 116 self.mox.ReplayAll() 117 118 job_status.wait_for_jobs_to_start(self.afe, [job0, job1]) 119 120 121 def testWaitForJobToStartAlreadyStarted(self): 122 """Ensure we don't wait forever if a job already started.""" 123 job = FakeJob(0, []) 124 self.afe.get_jobs(id=job.id, not_yet_run=True).AndReturn([]) 125 self.mox.ReplayAll() 126 job_status.wait_for_jobs_to_start(self.afe, [job]) 127 128 129 def testWaitForJobToFinish(self): 130 """Ensure we detect when a job has finished.""" 131 self.mox.StubOutWithMock(time, 'sleep') 132 133 job = FakeJob(0, []) 134 self.afe.get_jobs(id=job.id, finished=True).AndReturn([]) 135 self.afe.get_jobs(id=job.id, finished=True).AndReturn([job]) 136 time.sleep(mox.IgnoreArg()).MultipleTimes() 137 self.mox.ReplayAll() 138 139 job_status.wait_for_jobs_to_finish(self.afe, [job]) 140 141 142 def testWaitForMultipleJobsToFinish(self): 143 """Ensure we detect when all jobs have stopped running.""" 144 self.mox.StubOutWithMock(time, 'sleep') 145 146 job0 = FakeJob(0, []) 147 job1 = FakeJob(1, []) 148 self.afe.get_jobs(id=job0.id, finished=True).AndReturn([]) 149 self.afe.get_jobs(id=job1.id, finished=True).AndReturn([]) 150 self.afe.get_jobs(id=job0.id, finished=True).AndReturn([]) 151 self.afe.get_jobs(id=job1.id, finished=True).AndReturn([job1]) 152 self.afe.get_jobs(id=job0.id, finished=True).AndReturn([job0]) 153 time.sleep(mox.IgnoreArg()).MultipleTimes() 154 self.mox.ReplayAll() 155 156 job_status.wait_for_jobs_to_finish(self.afe, [job0, job1]) 157 158 159 def testWaitForJobToFinishAlreadyFinished(self): 160 """Ensure we don't wait forever if a job already finished.""" 161 job = FakeJob(0, []) 162 self.afe.get_jobs(id=job.id, finished=True).AndReturn([job]) 163 self.mox.ReplayAll() 164 job_status.wait_for_jobs_to_finish(self.afe, [job]) 165 166 167 def expect_hosts_query_and_lock(self, jobs, manager, running_hosts, 168 do_lock=True): 169 """Expect asking for a job's hosts and, potentially, lock them. 170 171 job_status.gather_job_hostnames() should be mocked out prior to call. 172 173 @param jobs: a lists of FakeJobs with a valid ID. 174 @param manager: mocked out HostLockManager 175 @param running_hosts: list of FakeHosts that should be listed as 176 'Running'. 177 @param do_lock: If |manager| should expect |running_hosts| to get 178 added and locked. 179 @return nothing, but self.afe, job_status.gather_job_hostnames, and 180 manager will have expectations set. 181 """ 182 used_hostnames = [] 183 for job in jobs: 184 job_status.gather_job_hostnames( 185 mox.IgnoreArg(), job).InAnyOrder().AndReturn(job.hostnames) 186 used_hostnames.extend([h for h in job.hostnames if h]) 187 188 if used_hostnames: 189 self.afe.get_hosts(mox.SameElementsAs(used_hostnames), 190 status='Running').AndReturn(running_hosts) 191 if do_lock: 192 manager.lock([h.hostname for h in running_hosts]) 193 194 195 def testWaitForSingleJobHostsToRunAndGetLocked(self): 196 """Ensure we lock all running hosts as they're discovered.""" 197 self.mox.StubOutWithMock(time, 'sleep') 198 self.mox.StubOutWithMock(job_status, 'gather_job_hostnames') 199 200 manager = self.mox.CreateMock(host_lock_manager.HostLockManager) 201 expected_hostnames=['host1', 'host0'] 202 expected_hosts = [FakeHost(h) for h in expected_hostnames] 203 job = FakeJob(7, hostnames=[None, None]) 204 205 time.sleep(mox.IgnoreArg()).MultipleTimes() 206 self.expect_hosts_query_and_lock([job], manager, [], False) 207 # First, only one test in the job has had a host assigned at all. 208 # Since no hosts are running, expect no locking. 209 job.hostnames = [None] + expected_hostnames[1:] 210 self.expect_hosts_query_and_lock([job], manager, [], False) 211 212 # Then, that host starts running, but no other tests have hosts. 213 self.expect_hosts_query_and_lock([job], manager, expected_hosts[1:]) 214 215 # The second test gets a host assigned, but it's not yet running. 216 # Since no new running hosts are found, no locking should happen. 217 job.hostnames = expected_hostnames 218 self.expect_hosts_query_and_lock([job], manager, expected_hosts[1:], 219 False) 220 # The second test's host starts running as well. 221 self.expect_hosts_query_and_lock([job], manager, expected_hosts) 222 223 # The last loop update; doesn't impact behavior. 224 job_status.gather_job_hostnames(mox.IgnoreArg(), 225 job).AndReturn(expected_hostnames) 226 self.mox.ReplayAll() 227 self.assertEquals( 228 sorted(expected_hostnames), 229 sorted(job_status.wait_for_and_lock_job_hosts(self.afe, 230 [job], 231 manager))) 232 233 234 def testWaitForAndLockWithTimeOutInStartJobs(self): 235 """If we experience a timeout, no locked hosts are returned""" 236 self.mox.StubOutWithMock(job_status, 'gather_job_hostnames') 237 self.mox.StubOutWithMock(job_status, '_abort_jobs_if_timedout') 238 239 job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), 240 mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(True) 241 manager = self.mox.CreateMock(host_lock_manager.HostLockManager) 242 expected_hostnames=['host1', 'host0'] 243 expected_hosts = [FakeHost(h) for h in expected_hostnames] 244 job = FakeJob(7, hostnames=[None, None]) 245 job_status.gather_job_hostnames(mox.IgnoreArg(), 246 job).AndReturn(expected_hostnames) 247 self.mox.ReplayAll() 248 self.assertFalse(job_status.wait_for_and_lock_job_hosts(self.afe, 249 [job], manager, wait_timeout_mins=DEFAULT_WAITTIMEOUT_MINS)) 250 251 252 def testWaitForAndLockWithTimedOutSubJobs(self): 253 """If we experience a timeout, no locked hosts are returned""" 254 self.mox.StubOutWithMock(job_status, 'gather_job_hostnames') 255 self.mox.StubOutWithMock(job_status, '_abort_jobs_if_timedout') 256 257 job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), 258 mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(True) 259 manager = self.mox.CreateMock(host_lock_manager.HostLockManager) 260 expected_hostnames=['host1', 'host0'] 261 expected_hosts = [FakeHost(h) for h in expected_hostnames] 262 job = FakeJob(7, hostnames=[None, None]) 263 job_status.gather_job_hostnames(mox.IgnoreArg(), 264 job).AndReturn(expected_hostnames) 265 self.mox.ReplayAll() 266 self.assertEquals(set(), 267 job_status.wait_for_and_lock_job_hosts(self.afe, [job], 268 manager, wait_timeout_mins=DEFAULT_WAITTIMEOUT_MINS)) 269 270 271 def testWaitForSingleJobHostsWithTimeout(self): 272 """Discover a single host for this job then timeout.""" 273 self.mox.StubOutWithMock(time, 'sleep') 274 self.mox.StubOutWithMock(job_status, 'gather_job_hostnames') 275 self.mox.StubOutWithMock(job_status, '_abort_jobs_if_timedout') 276 277 manager = self.mox.CreateMock(host_lock_manager.HostLockManager) 278 expected_hostnames=['host1', 'host0'] 279 expected_hosts = [FakeHost(h) for h in expected_hostnames] 280 job = FakeJob(7, hostnames=[None, None]) 281 282 time.sleep(mox.IgnoreArg()).MultipleTimes() 283 job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), 284 mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) 285 self.expect_hosts_query_and_lock([job], manager, [], False) 286 287 # First, only one test in the job has had a host assigned at all. 288 # Since no hosts are running, expect no locking. 289 job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), 290 mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) 291 job.hostnames = [None] + expected_hostnames[1:] 292 self.expect_hosts_query_and_lock([job], manager, [], False) 293 294 # Then, that host starts running, but no other tests have hosts. 295 job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), 296 mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) 297 self.expect_hosts_query_and_lock([job], manager, expected_hosts[1:]) 298 299 # The second test gets a host assigned, but it's not yet running. 300 # Since no new running hosts are found, no locking should happen. 301 job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), 302 mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(False) 303 job.hostnames = expected_hostnames 304 self.expect_hosts_query_and_lock([job], manager, expected_hosts[1:], 305 False) 306 307 # A timeout occurs, and only the locked hosts should be returned. 308 job_status._abort_jobs_if_timedout(mox.IgnoreArg(), mox.IgnoreArg(), 309 mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(True) 310 311 # The last loop update; doesn't impact behavior. 312 job_status.gather_job_hostnames(mox.IgnoreArg(), 313 job).AndReturn(expected_hostnames) 314 self.mox.ReplayAll() 315 316 # Because of the timeout only one host is returned. 317 expect_timeout_hostnames = ['host0'] 318 self.assertEquals(sorted(expect_timeout_hostnames),sorted( 319 job_status.wait_for_and_lock_job_hosts(self.afe, 320 [job],manager, wait_timeout_mins=DEFAULT_WAITTIMEOUT_MINS))) 321 322 323 def testWaitForSingleJobHostsToRunAndGetLockedSerially(self): 324 """Lock running hosts as discovered, serially.""" 325 self.mox.StubOutWithMock(time, 'sleep') 326 self.mox.StubOutWithMock(job_status, 'gather_job_hostnames') 327 328 manager = self.mox.CreateMock(host_lock_manager.HostLockManager) 329 expected_hostnames=['host1', 'host0'] 330 expected_hosts = [FakeHost(h) for h in expected_hostnames] 331 job = FakeJob(7, hostnames=[None, None]) 332 333 time.sleep(mox.IgnoreArg()).MultipleTimes() 334 self.expect_hosts_query_and_lock([job], manager, [], False) 335 # First, only one test in the job has had a host assigned at all. 336 # Since no hosts are running, expect no locking. 337 job.hostnames = [None] + expected_hostnames[1:] 338 self.expect_hosts_query_and_lock([job], manager, [], False) 339 340 # Then, that host starts running, but no other tests have hosts. 341 self.expect_hosts_query_and_lock([job], manager, expected_hosts[1:]) 342 343 # The second test gets a host assigned, but it's not yet running. 344 # Since no new running hosts are found, no locking should happen. 345 job.hostnames = expected_hostnames 346 self.expect_hosts_query_and_lock([job], manager, expected_hosts[1:], 347 False) 348 # The second test's host starts running as well, and the first stops. 349 self.expect_hosts_query_and_lock([job], manager, expected_hosts[:1]) 350 351 # The last loop update; doesn't impact behavior. 352 job_status.gather_job_hostnames(mox.IgnoreArg(), 353 job).AndReturn(expected_hostnames) 354 self.mox.ReplayAll() 355 self.assertEquals( 356 sorted(expected_hostnames), 357 sorted(job_status.wait_for_and_lock_job_hosts(self.afe, 358 [job], 359 manager))) 360 361 362 def testWaitForMultiJobHostsToRunAndGetLocked(self): 363 """Ensure we lock all running hosts for all jobs as discovered.""" 364 self.mox.StubOutWithMock(time, 'sleep') 365 self.mox.StubOutWithMock(job_status, 'gather_job_hostnames') 366 367 manager = self.mox.CreateMock(host_lock_manager.HostLockManager) 368 expected_hostnames = ['host1', 'host0', 'host2'] 369 expected_hosts = [FakeHost(h) for h in expected_hostnames] 370 job0 = FakeJob(0, hostnames=[]) 371 job1 = FakeJob(1, hostnames=[]) 372 373 time.sleep(mox.IgnoreArg()).MultipleTimes() 374 # First, only one test in either job has had a host assigned at all. 375 # Since no hosts are running, expect no locking. 376 job0.hostnames = [None, expected_hostnames[2]] 377 job1.hostnames = [None] 378 self.expect_hosts_query_and_lock([job0, job1], manager, [], False) 379 380 # Then, that host starts running, but no other tests have hosts. 381 self.expect_hosts_query_and_lock([job0, job1], manager, 382 expected_hosts[2:]) 383 384 # The test in the second job gets a host assigned, but it's not yet 385 # running. 386 # Since no new running hosts are found, no locking should happen. 387 job1.hostnames = expected_hostnames[1:2] 388 self.expect_hosts_query_and_lock([job0, job1], manager, 389 expected_hosts[2:], False) 390 391 # The second job's test's host starts running as well. 392 self.expect_hosts_query_and_lock([job0, job1], manager, 393 expected_hosts[1:]) 394 395 # All three hosts across both jobs are now running. 396 job0.hostnames = [expected_hostnames[0], expected_hostnames[2]] 397 self.expect_hosts_query_and_lock([job0, job1], manager, expected_hosts) 398 399 # The last loop update; doesn't impact behavior. 400 job_status.gather_job_hostnames(mox.IgnoreArg(), 401 job0).AndReturn(job0.hostnames) 402 job_status.gather_job_hostnames(mox.IgnoreArg(), 403 job1).AndReturn(job1.hostnames) 404 405 self.mox.ReplayAll() 406 self.assertEquals( 407 sorted(expected_hostnames), 408 sorted(job_status.wait_for_and_lock_job_hosts(self.afe, 409 [job0, job1], 410 manager))) 411 412 413 def expect_result_gathering(self, job): 414 self.afe.get_jobs(id=job.id, finished=True).AndReturn(job) 415 self.expect_yield_job_entries(job) 416 417 418 def expect_yield_job_entries(self, job): 419 entries = [s.entry for s in job.statuses] 420 self.afe.run('get_host_queue_entries', 421 job=job.id).AndReturn(entries) 422 if True not in map(lambda e: 'aborted' in e and e['aborted'], entries): 423 self.tko.get_job_test_statuses_from_db(job.id).AndReturn( 424 job.statuses) 425 426 427 def testWaitForResults(self): 428 """Should gather status and return records for job summaries.""" 429 jobs = [FakeJob(0, [FakeStatus('GOOD', 'T0', ''), 430 FakeStatus('GOOD', 'T1', '')]), 431 FakeJob(1, [FakeStatus('ERROR', 'T0', 'err', False), 432 FakeStatus('GOOD', 'T1', '')]), 433 FakeJob(2, [FakeStatus('TEST_NA', 'T0', 'no')]), 434 FakeJob(3, [FakeStatus('FAIL', 'T0', 'broken')]), 435 FakeJob(4, [FakeStatus('ERROR', 'SERVER_JOB', 'server error'), 436 FakeStatus('GOOD', 'T0', '')]),] 437 438 # TODO: Write a better test for the case where we yield 439 # results for aborts vs cannot yield results because of 440 # a premature abort. Currently almost all client aborts 441 # have been converted to failures, and when aborts do happen 442 # they result in server job failures for which we always 443 # want results. 444 # FakeJob(5, [FakeStatus('ERROR', 'T0', 'gah', True)]), 445 # The next job shouldn't be recorded in the results. 446 # FakeJob(6, [FakeStatus('GOOD', 'SERVER_JOB', '')])] 447 448 for status in jobs[4].statuses: 449 status.entry['job'] = {'name': 'broken_infra_job'} 450 451 # To simulate a job that isn't ready the first time we check. 452 self.afe.get_jobs(id=jobs[0].id, finished=True).AndReturn([]) 453 # Expect all the rest of the jobs to be good to go the first time. 454 for job in jobs[1:]: 455 self.expect_result_gathering(job) 456 # Then, expect job[0] to be ready. 457 self.expect_result_gathering(jobs[0]) 458 # Expect us to poll twice. 459 self.mox.StubOutWithMock(time, 'sleep') 460 time.sleep(5) 461 time.sleep(5) 462 self.mox.ReplayAll() 463 464 results = [result for result in job_status.wait_for_results(self.afe, 465 self.tko, 466 jobs)] 467 for job in jobs[:6]: # the 'GOOD' SERVER_JOB shouldn't be there. 468 for status in job.statuses: 469 self.assertTrue(True in map(status.equals_record, results)) 470 471 472 def testWaitForChildResults(self): 473 """Should gather status and return records for job summaries.""" 474 parent_job_id = 54321 475 jobs = [FakeJob(0, [FakeStatus('GOOD', 'T0', ''), 476 FakeStatus('GOOD', 'T1', '')], 477 parent_job_id=parent_job_id), 478 FakeJob(1, [FakeStatus('ERROR', 'T0', 'err', False), 479 FakeStatus('GOOD', 'T1', '')], 480 parent_job_id=parent_job_id), 481 FakeJob(2, [FakeStatus('TEST_NA', 'T0', 'no')], 482 parent_job_id=parent_job_id), 483 FakeJob(3, [FakeStatus('FAIL', 'T0', 'broken')], 484 parent_job_id=parent_job_id), 485 FakeJob(4, [FakeStatus('ERROR', 'SERVER_JOB', 'server error'), 486 FakeStatus('GOOD', 'T0', '')], 487 parent_job_id=parent_job_id),] 488 489 # TODO: Write a better test for the case where we yield 490 # results for aborts vs cannot yield results because of 491 # a premature abort. Currently almost all client aborts 492 # have been converted to failures and when aborts do happen 493 # they result in server job failures for which we always 494 # want results. 495 #FakeJob(5, [FakeStatus('ERROR', 'T0', 'gah', True)], 496 # parent_job_id=parent_job_id), 497 # The next job shouldn't be recorded in the results. 498 #FakeJob(6, [FakeStatus('GOOD', 'SERVER_JOB', '')], 499 # parent_job_id=12345)] 500 for status in jobs[4].statuses: 501 status.entry['job'] = {'name': 'broken_infra_job'} 502 503 # Expect one call to get a list of all child jobs. 504 self.afe.get_jobs(parent_job_id=parent_job_id).AndReturn(jobs[:6]) 505 506 # Have the first two jobs be finished by the first polling, 507 # and the remaining ones (not including #6) for the second polling. 508 self.afe.get_jobs(parent_job_id=parent_job_id, 509 finished=True).AndReturn([jobs[1]]) 510 self.expect_yield_job_entries(jobs[1]) 511 512 self.afe.get_jobs(parent_job_id=parent_job_id, 513 finished=True).AndReturn(jobs[:2]) 514 self.expect_yield_job_entries(jobs[0]) 515 516 self.afe.get_jobs(parent_job_id=parent_job_id, 517 finished=True).AndReturn(jobs[:6]) 518 for job in jobs[2:6]: 519 self.expect_yield_job_entries(job) 520 # Then, expect job[0] to be ready. 521 522 # Expect us to poll thrice 523 self.mox.StubOutWithMock(time, 'sleep') 524 time.sleep(5) 525 time.sleep(5) 526 time.sleep(5) 527 self.mox.ReplayAll() 528 529 results = [result for result in job_status.wait_for_child_results( 530 self.afe, 531 self.tko, 532 parent_job_id)] 533 for job in jobs[:6]: # the 'GOOD' SERVER_JOB shouldn't be there. 534 for status in job.statuses: 535 self.assertTrue(True in map(status.equals_record, results)) 536 537 538 def testYieldSubdir(self): 539 """Make sure subdir are properly set for test and non-test status.""" 540 job_tag = '0-owner/172.33.44.55' 541 job_name = 'broken_infra_job' 542 job = FakeJob(0, [FakeStatus('ERROR', 'SERVER_JOB', 'server error', 543 subdir='---', job_tag=job_tag), 544 FakeStatus('GOOD', 'T0', '', 545 subdir='T0.subdir', job_tag=job_tag)], 546 parent_job_id=54321) 547 for status in job.statuses: 548 status.entry['job'] = {'name': job_name} 549 self.expect_yield_job_entries(job) 550 self.mox.ReplayAll() 551 results = list(job_status._yield_job_results(self.afe, self.tko, job)) 552 for i in range(len(results)): 553 result = results[i] 554 if result.test_name.endswith('SERVER_JOB'): 555 expected_name = '%s_%s' % (job_name, job.statuses[i].test_name) 556 expected_subdir = job_tag 557 else: 558 expected_name = job.statuses[i].test_name 559 expected_subdir = os.path.join(job_tag, job.statuses[i].subdir) 560 self.assertEqual(results[i].test_name, expected_name) 561 self.assertEqual(results[i].subdir, expected_subdir) 562 563 564 def testGatherPerHostResults(self): 565 """Should gather per host results.""" 566 # For the 0th job, the 1st entry is more bad/specific. 567 # For all the others, it's the 0th that we expect. 568 jobs = [FakeJob(0, [FakeStatus('FAIL', 'T0', '', hostname='h0'), 569 FakeStatus('FAIL', 'T1', 'bad', hostname='h0')]), 570 FakeJob(1, [FakeStatus('ERROR', 'T0', 'err', False, 'h1'), 571 FakeStatus('GOOD', 'T1', '', hostname='h1')]), 572 FakeJob(2, [FakeStatus('TEST_NA', 'T0', 'no', hostname='h2')]), 573 FakeJob(3, [FakeStatus('FAIL', 'T0', 'broken', hostname='h3')]), 574 FakeJob(4, [FakeStatus('ERROR', 'T0', 'gah', True, 'h4')]), 575 FakeJob(5, [FakeStatus('GOOD', 'T0', 'Yay', hostname='h5')])] 576 # Method under test returns status available right now. 577 for job in jobs: 578 entries = map(lambda s: s.entry, job.statuses) 579 self.afe.run('get_host_queue_entries', 580 job=job.id).AndReturn(entries) 581 self.tko.get_job_test_statuses_from_db(job.id).AndReturn( 582 job.statuses) 583 self.mox.ReplayAll() 584 585 results = job_status.gather_per_host_results(self.afe, 586 self.tko, 587 jobs).values() 588 for status in [jobs[0].statuses[1]] + [j.statuses[0] for j in jobs[1:]]: 589 self.assertTrue(True in map(status.equals_hostname_record, results)) 590 591 592 def _prepareForReporting(self, results): 593 def callable(x): 594 pass 595 596 record_entity = self.mox.CreateMock(callable) 597 group = self.mox.CreateMock(host_spec.HostGroup) 598 599 statuses = {} 600 all_bad = True not in results.itervalues() 601 for hostname, result in results.iteritems(): 602 status = self.mox.CreateMock(job_status.Status) 603 status.record_all(record_entity).InAnyOrder('recording') 604 status.is_good().InAnyOrder('recording').AndReturn(result) 605 if not result: 606 status.test_name = 'test' 607 if not all_bad: 608 status.override_status('WARN').InAnyOrder('recording') 609 else: 610 group.mark_host_success(hostname).InAnyOrder('recording') 611 statuses[hostname] = status 612 613 return (statuses, group, record_entity) 614 615 616 def testRecordAndReportGoodResults(self): 617 """Record and report success across the board.""" 618 results = {'h1': True, 'h2': True} 619 (statuses, group, record_entity) = self._prepareForReporting(results) 620 group.enough_hosts_succeeded().AndReturn(True) 621 self.mox.ReplayAll() 622 623 success = job_status.check_and_record_reimage_results(statuses, 624 group, 625 record_entity) 626 self.assertTrue(success) 627 628 629 def testRecordAndReportOkayResults(self): 630 """Record and report success of at least one host.""" 631 results = {'h1': False, 'h2': True} 632 (statuses, group, record_entity) = self._prepareForReporting(results) 633 group.enough_hosts_succeeded().AndReturn(True) 634 self.mox.ReplayAll() 635 636 success = job_status.check_and_record_reimage_results(statuses, 637 group, 638 record_entity) 639 self.assertTrue(success) 640 641 642 def testRecordAndReportBadResults(self): 643 """Record and report failure across the board.""" 644 results = {'h1': False, 'h2': False} 645 (statuses, group, record_entity) = self._prepareForReporting(results) 646 group.enough_hosts_succeeded().AndReturn(False) 647 self.mox.ReplayAll() 648 649 success = job_status.check_and_record_reimage_results(statuses, 650 group, 651 record_entity) 652 self.assertFalse(success) 653 654 655if __name__ == '__main__': 656 unittest.main() 657