• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/python
2#pylint: disable-msg=C0111
3
4# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
5# Use of this source code is governed by a BSD-style license that can be
6# found in the LICENSE file.
7import collections
8
9import common
10
11from autotest_lib.client.common_lib import host_queue_entry_states
12from autotest_lib.client.common_lib.test_utils import unittest
13from autotest_lib.frontend import setup_django_environment
14from autotest_lib.frontend.afe import frontend_test_utils
15from autotest_lib.frontend.afe import models
16from autotest_lib.frontend.afe import rdb_model_extensions
17from autotest_lib.scheduler import rdb
18from autotest_lib.scheduler import rdb_hosts
19from autotest_lib.scheduler import rdb_lib
20from autotest_lib.scheduler import rdb_requests
21from autotest_lib.scheduler import rdb_testing_utils
22from autotest_lib.server.cros import provision
23
24
25class AssignmentValidator(object):
26    """Utility class to check that priority inversion doesn't happen. """
27
28
29    @staticmethod
30    def check_acls_deps(host, request):
31        """Check if a host and request match by comparing acls and deps.
32
33        @param host: A dictionary representing attributes of the host.
34        @param request: A request, as defined in rdb_requests.
35
36        @return True if the deps/acls of the request match the host.
37        """
38        # Unfortunately the hosts labels are labelnames, not ids.
39        request_deps = set([l.name for l in
40                models.Label.objects.filter(id__in=request.deps)])
41        return (set(host['labels']).intersection(request_deps) == request_deps
42                and set(host['acls']).intersection(request.acls))
43
44
45    @staticmethod
46    def find_matching_host_for_request(hosts, request):
47        """Find a host from the given list of hosts, matching the request.
48
49        @param hosts: A list of dictionaries representing host attributes.
50        @param requetst: The unsatisfied request.
51
52        @return: A host, if a matching host is found from the input list.
53        """
54        if not hosts or not request:
55            return None
56        for host in hosts:
57            if AssignmentValidator.check_acls_deps(host, request):
58                return host
59
60
61    @staticmethod
62    def sort_requests(requests):
63        """Sort the requests by priority.
64
65        @param requests: Unordered requests.
66
67        @return: A list of requests ordered by priority.
68        """
69        return sorted(collections.Counter(requests).items(),
70                key=lambda request: request[0].priority, reverse=True)
71
72
73    @staticmethod
74    def verify_priority(request_queue, result):
75        requests = AssignmentValidator.sort_requests(request_queue)
76        for request, count in requests:
77            hosts = result.get(request)
78            # The request was completely satisfied.
79            if hosts and len(hosts) == count:
80                continue
81            # Go through all hosts given to lower priority requests and
82            # make sure we couldn't have allocated one of them for this
83            # unsatisfied higher priority request.
84            lower_requests = requests[requests.index((request,count))+1:]
85            for lower_request, count in lower_requests:
86                if (lower_request.priority < request.priority and
87                    AssignmentValidator.find_matching_host_for_request(
88                            result.get(lower_request), request)):
89                    raise ValueError('Priority inversion occured between '
90                            'priorities %s and %s' %
91                            (request.priority, lower_request.priority))
92
93
94    @staticmethod
95    def priority_checking_response_handler(request_manager):
96        """Fake response handler wrapper for any request_manager.
97
98        Check that higher priority requests get a response over lower priority
99        requests, by re-validating all the hosts assigned to a lower priority
100        request against the unsatisfied higher priority ones.
101
102        @param request_manager: A request_manager as defined in rdb_lib.
103
104        @raises ValueError: If priority inversion is detected.
105        """
106        # Fist call the rdb to make its decisions, then sort the requests
107        # by priority and make sure unsatisfied requests higher up in the list
108        # could not have been satisfied by hosts assigned to requests lower
109        # down in the list.
110        result = request_manager.api_call(request_manager.request_queue)
111        if not result:
112            raise ValueError('Expected results but got none.')
113        AssignmentValidator.verify_priority(
114                request_manager.request_queue, result)
115        for hosts in result.values():
116            for host in hosts:
117                yield host
118
119
120class BaseRDBTest(rdb_testing_utils.AbstractBaseRDBTester, unittest.TestCase):
121    _config_section = 'AUTOTEST_WEB'
122
123
124    def testAcquireLeasedHostBasic(self):
125        """Test that acquisition of a leased host doesn't happen.
126
127        @raises AssertionError: If the one host that satisfies the request
128            is acquired.
129        """
130        job = self.create_job(deps=set(['a']))
131        host = self.db_helper.create_host('h1', deps=set(['a']))
132        host.leased = 1
133        host.save()
134        queue_entries = self._dispatcher._refresh_pending_queue_entries()
135        hosts = list(rdb_lib.acquire_hosts(queue_entries))
136        self.assertTrue(len(hosts) == 1 and hosts[0] is None)
137
138
139    def testAcquireLeasedHostRace(self):
140        """Test behaviour when hosts are leased just before acquisition.
141
142        If a fraction of the hosts somehow get leased between finding and
143        acquisition, the rdb should just return the remaining hosts for the
144        request to use.
145
146        @raises AssertionError: If both the requests get a host successfully,
147            since one host gets leased before the final attempt to lease both.
148        """
149        j1 = self.create_job(deps=set(['a']))
150        j2 = self.create_job(deps=set(['a']))
151        hosts = [self.db_helper.create_host('h1', deps=set(['a'])),
152                 self.db_helper.create_host('h2', deps=set(['a']))]
153
154        @rdb_hosts.return_rdb_host
155        def local_find_hosts(host_query_manger, deps, acls):
156            """Return a predetermined list of hosts, one of which is leased."""
157            h1 = models.Host.objects.get(hostname='h1')
158            h1.leased = 1
159            h1.save()
160            h2 = models.Host.objects.get(hostname='h2')
161            return [h1, h2]
162
163        self.god.stub_with(rdb.AvailableHostQueryManager, 'find_hosts',
164                           local_find_hosts)
165        queue_entries = self._dispatcher._refresh_pending_queue_entries()
166        hosts = list(rdb_lib.acquire_hosts(queue_entries))
167        self.assertTrue(len(hosts) == 2 and None in hosts)
168        self.check_hosts(iter(hosts))
169
170
171    def testHostReleaseStates(self):
172        """Test that we will only release an unused host if it is in Ready.
173
174        @raises AssertionError: If the host gets released in any other state.
175        """
176        host = self.db_helper.create_host('h1', deps=set(['x']))
177        for state in rdb_model_extensions.AbstractHostModel.Status.names:
178            host.status = state
179            host.leased = 1
180            host.save()
181            self._release_unused_hosts()
182            host = models.Host.objects.get(hostname='h1')
183            self.assertTrue(host.leased == (state != 'Ready'))
184
185
186    def testHostReleseHQE(self):
187        """Test that we will not release a ready host if it's being used.
188
189        @raises AssertionError: If the host is released even though it has
190            been assigned to an active hqe.
191        """
192        # Create a host and lease it out in Ready.
193        host = self.db_helper.create_host('h1', deps=set(['x']))
194        host.status = 'Ready'
195        host.leased = 1
196        host.save()
197
198        # Create a job and give its hqe the leased host.
199        job = self.create_job(deps=set(['x']))
200        self.db_helper.add_host_to_job(host, job.id)
201        hqe = models.HostQueueEntry.objects.get(job_id=job.id)
202
203        # Activate the hqe by setting its state.
204        hqe.status = host_queue_entry_states.ACTIVE_STATUSES[0]
205        hqe.save()
206
207        # Make sure the hqes host isn't released, even if its in ready.
208        self._release_unused_hosts()
209        host = models.Host.objects.get(hostname='h1')
210        self.assertTrue(host.leased == 1)
211
212
213    def testBasicDepsAcls(self):
214        """Test a basic deps/acls request.
215
216        Make sure that a basic request with deps and acls, finds a host from
217        the ready pool that has matching labels and is in a matching aclgroups.
218
219        @raises AssertionError: If the request doesn't find a host, since the
220            we insert a matching host in the ready pool.
221        """
222        deps = set(['a', 'b'])
223        acls = set(['a', 'b'])
224        self.db_helper.create_host('h1', deps=deps, acls=acls)
225        job = self.create_job(user='autotest_system', deps=deps, acls=acls)
226        queue_entries = self._dispatcher._refresh_pending_queue_entries()
227        matching_host  = rdb_lib.acquire_hosts(queue_entries).next()
228        self.check_host_assignment(job.id, matching_host.id)
229        self.assertTrue(matching_host.leased == 1)
230
231
232    def testPreferredDeps(self):
233        """Test that perferred deps is respected.
234
235        If multiple hosts satisfied a job's deps, the one with preferred
236        label will be assigned to the job.
237
238        @raises AssertionError: If a host without a preferred label is
239                                assigned to the job instead of one with
240                                a preferred label.
241        """
242        lumpy_deps = set(['board:lumpy'])
243        stumpy_deps = set(['board:stumpy'])
244        stumpy_deps_with_crosversion = set(
245                ['board:stumpy', 'cros-version:lumpy-release/R41-6323.0.0'])
246
247        acls = set(['a', 'b'])
248        # Hosts lumpy1 and lumpy2 are created as a control group,
249        # which ensures that if no preferred label is used, the host
250        # with a smaller id will be chosen first. We need to make sure
251        # stumpy2 was chosen because it has a cros-version label, but not
252        # because of other randomness.
253        self.db_helper.create_host('lumpy1', deps=lumpy_deps, acls=acls)
254        self.db_helper.create_host('lumpy2', deps=lumpy_deps, acls=acls)
255        self.db_helper.create_host('stumpy1', deps=stumpy_deps, acls=acls)
256        self.db_helper.create_host(
257                    'stumpy2', deps=stumpy_deps_with_crosversion , acls=acls)
258        job_1 = self.create_job(user='autotest_system',
259                              deps=lumpy_deps, acls=acls)
260        job_2 = self.create_job(user='autotest_system',
261                              deps=stumpy_deps_with_crosversion, acls=acls)
262        queue_entries = self._dispatcher._refresh_pending_queue_entries()
263        matching_hosts  = list(rdb_lib.acquire_hosts(queue_entries))
264        assignment = {}
265        import logging
266        for job, host in zip(queue_entries, matching_hosts):
267            self.check_host_assignment(job.id, host.id)
268            assignment[job.id] = host.hostname
269        self.assertEqual(assignment[job_1.id], 'lumpy1')
270        self.assertEqual(assignment[job_2.id], 'stumpy2')
271
272
273    def testBadDeps(self):
274        """Test that we find no hosts when only acls match.
275
276        @raises AssertionError: If the request finds a host, since the only
277            host in the ready pool will not have matching deps.
278        """
279        host_labels = set(['a'])
280        job_deps = set(['b'])
281        acls = set(['a', 'b'])
282        self.db_helper.create_host('h1', deps=host_labels, acls=acls)
283        job = self.create_job(user='autotest_system', deps=job_deps, acls=acls)
284        queue_entries = self._dispatcher._refresh_pending_queue_entries()
285        matching_host  = rdb_lib.acquire_hosts(queue_entries).next()
286        self.assert_(not matching_host)
287
288
289    def testBadAcls(self):
290        """Test that we find no hosts when only deps match.
291
292        @raises AssertionError: If the request finds a host, since the only
293            host in the ready pool will not have matching acls.
294        """
295        deps = set(['a'])
296        host_acls = set(['a'])
297        job_acls = set(['b'])
298        self.db_helper.create_host('h1', deps=deps, acls=host_acls)
299
300        # Create the job as a new user who is only in the 'b' and 'Everyone'
301        # aclgroups. Though there are several hosts in the Everyone group, the
302        # 1 host that has the 'a' dep isn't.
303        job = self.create_job(user='new_user', deps=deps, acls=job_acls)
304        queue_entries = self._dispatcher._refresh_pending_queue_entries()
305        matching_host  = rdb_lib.acquire_hosts(queue_entries).next()
306        self.assert_(not matching_host)
307
308
309    def testBasicPriority(self):
310        """Test that priority inversion doesn't happen.
311
312        Schedule 2 jobs with the same deps, acls and user, but different
313        priorities, and confirm that the higher priority request gets the host.
314        This confirmation happens through the AssignmentValidator.
315
316        @raises AssertionError: If the un important request gets host h1 instead
317            of the important request.
318        """
319        deps = set(['a', 'b'])
320        acls = set(['a', 'b'])
321        self.db_helper.create_host('h1', deps=deps, acls=acls)
322        important_job = self.create_job(user='autotest_system',
323                deps=deps, acls=acls, priority=2)
324        un_important_job = self.create_job(user='autotest_system',
325                deps=deps, acls=acls, priority=0)
326        queue_entries = self._dispatcher._refresh_pending_queue_entries()
327
328        self.god.stub_with(rdb_requests.BaseHostRequestManager, 'response',
329                AssignmentValidator.priority_checking_response_handler)
330        self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
331
332
333    def testPriorityLevels(self):
334        """Test that priority inversion doesn't happen.
335
336        Increases a job's priority and makes several requests for hosts,
337        checking that priority inversion doesn't happen.
338
339        @raises AssertionError: If the unimportant job gets h1 while it is
340            still unimportant, or doesn't get h1 while after it becomes the
341            most important job.
342        """
343        deps = set(['a', 'b'])
344        acls = set(['a', 'b'])
345        self.db_helper.create_host('h1', deps=deps, acls=acls)
346
347        # Create jobs that will bucket differently and confirm that jobs in an
348        # earlier bucket get a host.
349        first_job = self.create_job(user='autotest_system', deps=deps, acls=acls)
350        important_job = self.create_job(user='autotest_system', deps=deps,
351                acls=acls, priority=2)
352        deps.pop()
353        unimportant_job = self.create_job(user='someother_system', deps=deps,
354                acls=acls, priority=1)
355        queue_entries = self._dispatcher._refresh_pending_queue_entries()
356
357        self.god.stub_with(rdb_requests.BaseHostRequestManager, 'response',
358                AssignmentValidator.priority_checking_response_handler)
359        self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
360
361        # Elevate the priority of the unimportant job, so we now have
362        # 2 jobs at the same priority.
363        self.db_helper.increment_priority(job_id=unimportant_job.id)
364        queue_entries = self._dispatcher._refresh_pending_queue_entries()
365        self._release_unused_hosts()
366        self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
367
368        # Prioritize the first job, and confirm that it gets the host over the
369        # jobs that got it the last time.
370        self.db_helper.increment_priority(job_id=unimportant_job.id)
371        queue_entries = self._dispatcher._refresh_pending_queue_entries()
372        self._release_unused_hosts()
373        self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
374
375
376    def testFrontendJobScheduling(self):
377        """Test that basic frontend job scheduling.
378
379        @raises AssertionError: If the received and requested host don't match,
380            or the mis-matching host is returned instead.
381        """
382        deps = set(['x', 'y'])
383        acls = set(['a', 'b'])
384
385        # Create 2 frontend jobs and only one matching host.
386        matching_job = self.create_job(acls=acls, deps=deps)
387        matching_host = self.db_helper.create_host('h1', acls=acls, deps=deps)
388        mis_matching_job = self.create_job(acls=acls, deps=deps)
389        mis_matching_host = self.db_helper.create_host(
390                'h2', acls=acls, deps=deps.pop())
391        self.db_helper.add_host_to_job(matching_host, matching_job.id)
392        self.db_helper.add_host_to_job(mis_matching_host, mis_matching_job.id)
393
394        # Check that only the matching host is returned, and that we get 'None'
395        # for the second request.
396        queue_entries = self._dispatcher._refresh_pending_queue_entries()
397        hosts = list(rdb_lib.acquire_hosts(queue_entries))
398        self.assertTrue(len(hosts) == 2 and None in hosts)
399        returned_host = [host for host in hosts if host].pop()
400        self.assertTrue(matching_host.id == returned_host.id)
401
402
403    def testFrontendJobPriority(self):
404        """Test that frontend job scheduling doesn't ignore priorities.
405
406        @raises ValueError: If the priorities of frontend jobs are ignored.
407        """
408        board = 'x'
409        high_priority = self.create_job(priority=2, deps=set([board]))
410        low_priority = self.create_job(priority=1, deps=set([board]))
411        host = self.db_helper.create_host('h1', deps=set([board]))
412        self.db_helper.add_host_to_job(host, low_priority.id)
413        self.db_helper.add_host_to_job(host, high_priority.id)
414
415        queue_entries = self._dispatcher._refresh_pending_queue_entries()
416
417        def local_response_handler(request_manager):
418            """Confirms that a higher priority frontend job gets a host.
419
420            @raises ValueError: If priority inversion happens and the job
421                with priority 1 gets the host instead.
422            """
423            result = request_manager.api_call(request_manager.request_queue)
424            if not result:
425                raise ValueError('Excepted the high priority request to '
426                                 'get a host, but the result is empty.')
427            for request, hosts in result.iteritems():
428                if request.priority == 1:
429                    raise ValueError('Priority of frontend job ignored.')
430                if len(hosts) > 1:
431                    raise ValueError('Multiple hosts returned against one '
432                                     'frontend job scheduling request.')
433                yield hosts[0]
434
435        self.god.stub_with(rdb_requests.BaseHostRequestManager, 'response',
436                           local_response_handler)
437        self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
438
439
440    def testSuiteOrderedHostAcquisition(self):
441        """Test that older suite jobs acquire hosts first.
442
443        Make sure older suite jobs get hosts first, but not at the expense of
444        higher priority jobs.
445
446        @raises ValueError: If unexpected acquisitions occur, eg:
447            suite_job_2 acquires the last 2 hosts instead of suite_job_1.
448            isolated_important_job doesn't get any hosts.
449            Any job acquires more hosts than necessary.
450        """
451        board = 'x'
452
453        # Create 2 suites such that the later suite has an ordering of deps
454        # that places it ahead of the earlier suite, if parent_job_id is
455        # ignored.
456        suite_without_dep = self.create_suite(num=2, priority=0, board=board)
457
458        suite_with_dep = self.create_suite(num=1, priority=0, board=board)
459        self.db_helper.add_deps_to_job(suite_with_dep[0], dep_names=list('y'))
460
461        # Create an important job that should be ahead of the first suite,
462        # because priority trumps parent_job_id and time of creation.
463        isolated_important_job = self.create_job(priority=3, deps=set([board]))
464
465        # Create 3 hosts, all with the deps to satisfy the last suite.
466        for i in range(0, 3):
467            self.db_helper.create_host('h%s' % i, deps=set([board, 'y']))
468
469        queue_entries = self._dispatcher._refresh_pending_queue_entries()
470
471        def local_response_handler(request_manager):
472            """Reorder requests and check host acquisition.
473
474            @raises ValueError: If unexpected/no acquisitions occur.
475            """
476            if any([request for request in request_manager.request_queue
477                    if request.parent_job_id is None]):
478                raise ValueError('Parent_job_id can never be None.')
479
480            # This will result in the ordering:
481            # [suite_2_1, suite_1_*, suite_1_*, isolated_important_job]
482            # The priority scheduling order should be:
483            # [isolated_important_job, suite_1_*, suite_1_*, suite_2_1]
484            # Since:
485            #   a. the isolated_important_job is the most important.
486            #   b. suite_1 was created before suite_2, regardless of deps
487            disorderly_queue = sorted(request_manager.request_queue,
488                    key=lambda r: -r.parent_job_id)
489            request_manager.request_queue = disorderly_queue
490            result = request_manager.api_call(request_manager.request_queue)
491            if not result:
492                raise ValueError('Expected results but got none.')
493
494            # Verify that the isolated_important_job got a host, and that the
495            # first suite got both remaining free hosts.
496            for request, hosts in result.iteritems():
497                if request.parent_job_id == 0:
498                    if len(hosts) > 1:
499                        raise ValueError('First job acquired more hosts than '
500                                'necessary. Response map: %s' % result)
501                    continue
502                if request.parent_job_id == 1:
503                    if len(hosts) < 2:
504                        raise ValueError('First suite job requests were not '
505                                'satisfied. Response_map: %s' % result)
506                    continue
507                # The second suite job got hosts instead of one of
508                # the others. Eitherway this is a failure.
509                raise ValueError('Unexpected host acquisition '
510                        'Response map: %s' % result)
511            yield None
512
513        self.god.stub_with(rdb_requests.BaseHostRequestManager, 'response',
514                           local_response_handler)
515        list(rdb_lib.acquire_hosts(queue_entries))
516
517
518    def testConfigurations(self):
519        """Test that configurations don't matter.
520        @raises AssertionError: If the request doesn't find a host,
521                 this will happen if configurations are not stripped out.
522        """
523        self.god.stub_with(provision.Cleanup,
524                           '_actions',
525                           {'action': 'fakeTest'})
526        job_labels = set(['action', 'a'])
527        host_deps = set(['a'])
528        db_host = self.db_helper.create_host('h1', deps=host_deps)
529        self.create_job(user='autotest_system', deps=job_labels)
530        queue_entries = self._dispatcher._refresh_pending_queue_entries()
531        matching_host = rdb_lib.acquire_hosts(queue_entries).next()
532        self.assert_(matching_host.id == db_host.id)
533
534
535class RDBMinDutTest(
536        rdb_testing_utils.AbstractBaseRDBTester, unittest.TestCase):
537    """Test AvailableHostRequestHandler"""
538
539    _config_section = 'AUTOTEST_WEB'
540
541
542    def min_dut_test_helper(self, num_hosts, suite_settings):
543        """A helper function to test min_dut logic.
544
545        @param num_hosts: Total number of hosts to create.
546        @param suite_settings: A dictionary specify how suites would be created
547                               and verified.
548                E.g.  {'priority': 10, 'num_jobs': 3,
549                       'min_duts':2, 'expected_aquired': 1}
550                       With this setting, will create a suite that has 3
551                       child jobs, with priority 10 and min_duts 2.
552                       The suite is expected to get 1 dut.
553        """
554        acls = set(['fake_acl'])
555        hosts = []
556        for i in range (0, num_hosts):
557            hosts.append(self.db_helper.create_host(
558                'h%d' % i, deps=set(['board:lumpy']), acls=acls))
559        suites = {}
560        suite_min_duts = {}
561        for setting in suite_settings:
562            s = self.create_suite(num=setting['num_jobs'],
563                                  priority=setting['priority'],
564                                  board='board:lumpy', acls=acls)
565            # Empty list will be used to store acquired hosts.
566            suites[s['parent_job'].id] = (setting, [])
567            suite_min_duts[s['parent_job'].id] = setting['min_duts']
568        queue_entries = self._dispatcher._refresh_pending_queue_entries()
569        matching_hosts = rdb_lib.acquire_hosts(queue_entries, suite_min_duts)
570        for host, queue_entry in zip(matching_hosts, queue_entries):
571            if host:
572                suites[queue_entry.job.parent_job_id][1].append(host)
573
574        for setting, hosts in suites.itervalues():
575            self.assertEqual(len(hosts),setting['expected_aquired'])
576
577
578    def testHighPriorityTakeAll(self):
579        """Min duts not satisfied."""
580        num_hosts = 1
581        suite1 = {'priority':20, 'num_jobs': 3, 'min_duts': 2,
582                  'expected_aquired': 1}
583        suite2 = {'priority':10, 'num_jobs': 7, 'min_duts': 5,
584                  'expected_aquired': 0}
585        self.min_dut_test_helper(num_hosts, [suite1, suite2])
586
587
588    def testHighPriorityMinSatisfied(self):
589        """High priority min duts satisfied."""
590        num_hosts = 4
591        suite1 = {'priority':20, 'num_jobs': 4, 'min_duts': 2,
592                  'expected_aquired': 2}
593        suite2 = {'priority':10, 'num_jobs': 7, 'min_duts': 5,
594                  'expected_aquired': 2}
595        self.min_dut_test_helper(num_hosts, [suite1, suite2])
596
597
598    def testAllPrioritiesMinSatisfied(self):
599        """Min duts satisfied."""
600        num_hosts = 7
601        suite1 = {'priority':20, 'num_jobs': 4, 'min_duts': 2,
602                  'expected_aquired': 2}
603        suite2 = {'priority':10, 'num_jobs': 7, 'min_duts': 5,
604                  'expected_aquired': 5}
605        self.min_dut_test_helper(num_hosts, [suite1, suite2])
606
607
608    def testHighPrioritySatisfied(self):
609        """Min duts satisfied, high priority suite satisfied."""
610        num_hosts = 10
611        suite1 = {'priority':20, 'num_jobs': 4, 'min_duts': 2,
612                  'expected_aquired': 4}
613        suite2 = {'priority':10, 'num_jobs': 7, 'min_duts': 5,
614                  'expected_aquired': 6}
615        self.min_dut_test_helper(num_hosts, [suite1, suite2])
616
617
618    def testEqualPriorityFirstSuiteMinSatisfied(self):
619        """Equal priority, earlier suite got min duts."""
620        num_hosts = 4
621        suite1 = {'priority':20, 'num_jobs': 4, 'min_duts': 2,
622                  'expected_aquired': 2}
623        suite2 = {'priority':20, 'num_jobs': 7, 'min_duts': 5,
624                  'expected_aquired': 2}
625        self.min_dut_test_helper(num_hosts, [suite1, suite2])
626
627
628    def testEqualPriorityAllSuitesMinSatisfied(self):
629        """Equal priority, all suites got min duts."""
630        num_hosts = 7
631        suite1 = {'priority':20, 'num_jobs': 4, 'min_duts': 2,
632                  'expected_aquired': 2}
633        suite2 = {'priority':20, 'num_jobs': 7, 'min_duts': 5,
634                  'expected_aquired': 5}
635        self.min_dut_test_helper(num_hosts, [suite1, suite2])
636
637
638if __name__ == '__main__':
639    unittest.main()
640