• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import errno
6import os
7import re
8import shutil
9import signal
10import stat
11import subprocess
12import sys
13import tempfile
14import threading
15
16import logging
17# Turn the logging level to INFO before importing other autotest
18# code, to avoid having failed import logging messages confuse the
19# test_that user.
20logging.basicConfig(level=logging.INFO)
21
22import common
23from autotest_lib.client.common_lib.cros import dev_server, retry
24from autotest_lib.client.common_lib import logging_manager
25from autotest_lib.server.cros.dynamic_suite import suite, constants
26from autotest_lib.server.cros import provision
27from autotest_lib.server.hosts import factory
28from autotest_lib.server import autoserv_utils
29from autotest_lib.server import server_logging_config
30from autotest_lib.server import utils
31from autotest_lib.utils import labellib
32
33
34_autoserv_proc = None
35_sigint_handler_lock = threading.Lock()
36
37_AUTOSERV_SIGINT_TIMEOUT_SECONDS = 5
38NO_BOARD = 'ad_hoc_board'
39NO_BUILD = 'ad_hoc_build'
40_SUITE_REGEX = r'suite:(.*)'
41
42_TEST_KEY_FILENAME = 'testing_rsa'
43TEST_KEY_PATH = ('/mnt/host/source/src/scripts/mod_for_test_scripts/'
44                  'ssh_keys/%s' % _TEST_KEY_FILENAME)
45
46_LATEST_RESULTS_DIRECTORY = '/tmp/test_that_latest'
47
48
49class TestThatRunError(Exception):
50    """Raised if test_that encounters something unexpected while running."""
51
52
53class TestThatProvisioningError(Exception):
54    """Raised when it fails to provision the DUT to the requested build."""
55
56
57def add_common_args(parser):
58    """
59    Add common arguments for both test_that and test_droid to their parser.
60
61    @param parser: argparse.ArgumentParser object to add arguments to.
62    """
63    parser.add_argument('tests', nargs='+', metavar='TEST',
64                        help='Run given test(s). Use suite:SUITE to specify '
65                             'test suite. Use e:[NAME_PATTERN] to specify a '
66                             'NAME-matching regular expression. Use '
67                             'f:[FILE_PATTERN] to specify a filename matching '
68                             'regular expression. Specified regular '
69                             'expressions will be implicitly wrapped in '
70                             '^ and $.')
71    parser.add_argument('--fast', action='store_true', dest='fast_mode',
72                        default=False,
73                        help='Enable fast mode.  This will cause test_droid '
74                             'to skip time consuming steps like sysinfo and '
75                             'collecting crash information.')
76    parser.add_argument('--args', metavar='ARGS',
77                        help='Whitespace separated argument string to pass '
78                             'through to test. Only supported for runs '
79                             'against a local DUT. '
80                             "e.g. --args='foo=bar cat=\"in a hat\"'.")
81    parser.add_argument('--results_dir', metavar='RESULTS_DIR', default=None,
82                        help='Instead of storing results in a new subdirectory'
83                             ' of /tmp , store results in RESULTS_DIR. If '
84                             'RESULTS_DIR already exists, it will be deleted.')
85    parser.add_argument('--pretend', action='store_true', default=False,
86                        help='Print autoserv commands that would be run, '
87                             'rather than running them.')
88    parser.add_argument('--no-experimental', action='store_true',
89                        default=False, dest='no_experimental',
90                        help='When scheduling a suite, skip any tests marked '
91                             'as experimental. Applies only to tests scheduled'
92                             ' via suite:[SUITE].')
93    parser.add_argument('--enforce-deps', action='store_true',
94                        default=False, dest='enforce_deps',
95                        help='Skip tests whose DEPENDENCIES can not '
96                             'be satisfied.')
97    parser.add_argument('--debug', action='store_true',
98                        help='Include DEBUG level messages in stdout. Note: '
99                             'these messages will be included in output log '
100                             'file regardless. In addition, turn on autoserv '
101                             'verbosity.')
102    parser.add_argument('--iterations', action='store', type=int, default=1,
103                        help='Number of times to run the tests specified.')
104    parser.add_argument('--ssh_verbosity', action='store', type=int,
105                        choices=[0, 1, 2, 3], default=0,
106                        help='Verbosity level for ssh, between 0 and 3 '
107                             'inclusive.')
108    parser.add_argument('--ssh_options', action='store', default=None,
109                        help='A string giving additional options to be '
110                        'added to ssh commands.')
111
112
113class LocalSuite(suite.Suite):
114    """Subclass of Suite with methods for running locally"""
115
116    def handle_local_result(self, job_id, results_dir, record):
117        """
118        Handle recording and/or retrying a completed job run locally.
119
120        @param job_id: int ID of job
121        @param results_dir: absolute path where test results were stored.
122        @param record: callable that records job status
123
124        @returns: new job_id if a job was scheduled for retry, None otherwise.
125        """
126        logging.debug('Parsing test results for job %s',job_id)
127        code = generate_report(results_dir, just_status_code=True)
128        logging.debug('Handling result of job %s',job_id)
129        logging.debug(self._retry_handler._retry_map)
130        if code == 0:
131            logging.debug('All tests for job %s succeeded, no retry', job_id)
132            if self._retry_handler.job_present(job_id):
133                self._retry_handler.set_attempted(job_id)
134            return None
135
136        new_job_id = None
137        go_ahead = (self._job_retry and
138                    self._retry_handler._should_retry_local_job(job_id))
139        if go_ahead:
140            new_job_id = self._retry_local_result(job_id, record)
141        return new_job_id
142
143    def _retry_local_result(self, job_id, record):
144        """
145        Retry a test job by id.
146
147        @param job_id: int ID of job
148        @param record: callable that records job status.
149                 prototype:
150                   record(base_job.status_log_entry)
151
152        @returns: new job_id if a job was scheduled for retry, None otherwise.
153        """
154        test = self._jobs_to_tests[job_id]
155        logging.debug('Attempting to retry job %s, test %s', job_id, test.name)
156        test.fast = False
157        new_job = self._schedule_test(
158                record=record, test=test, retry_for=job_id)
159        if new_job:
160            return new_job.id
161        return None
162
163    def test_name_from_job(self, job_id):
164        """Find the name of the test run by a job with a given job ID."""
165        if self._jobs_to_tests[job_id]:
166            return self._jobs_to_tests[job_id].name
167
168
169
170def fetch_local_suite(autotest_path, suite_predicate, afe, test_arg, remote,
171                      build=NO_BUILD, board=NO_BOARD,
172                      results_directory=None, no_experimental=False,
173                      ignore_deps=True):
174    """Create a suite from the given suite predicate.
175
176    Satisfaction of dependencies is enforced by Suite.schedule() if
177    ignore_deps is False. Note that this method assumes only one host,
178    i.e. |remote|, was added to afe. Suite.schedule() will not
179    schedule a job if none of the hosts in the afe (in our case,
180    just one host |remote|) has a label that matches a requested
181    test dependency.
182
183    @param autotest_path: Absolute path to autotest (in sysroot or
184                          custom autotest directory set by --autotest_dir).
185    @param suite_predicate: callable that takes ControlData objects, and
186                            returns True on those that should be in suite
187    @param afe: afe object to schedule against (typically a directAFE)
188    @param test_arg: String. An individual TEST command line argument, e.g.
189                     'login_CryptohomeMounted' or 'suite:smoke'.
190    @param remote: String representing the IP of the remote host.
191    @param build: Build to schedule suite for.
192    @param board: Board to schedule suite for.
193    @param results_directory: Absolute path of directory to store results in.
194                              (results will be stored in subdirectory of this).
195    @param no_experimental: Skip experimental tests when scheduling a suite.
196    @param ignore_deps: If True, test dependencies will be ignored.
197
198    @returns: A LocalSuite object.
199
200    """
201    fs_getter = suite.create_fs_getter(autotest_path)
202    devserver = dev_server.ImageServer('')
203    my_suite = LocalSuite.create_from_predicates(
204        [suite_predicate],
205        {provision.CROS_VERSION_PREFIX: build},
206        constants.BOARD_PREFIX + board,
207        devserver, fs_getter, afe=afe,
208        ignore_deps=ignore_deps,
209        results_dir=results_directory,
210        forgiving_parser=False,
211        job_retry=True
212    )
213    if len(my_suite.tests) == 0:
214        (similarity_predicate, similarity_description) = (
215                get_predicate_for_possible_test_arg(test_arg))
216        logging.error('No test found, searching for possible tests with %s',
217                      similarity_description)
218        possible_tests = suite.find_possible_tests(fs_getter,
219                                                         similarity_predicate)
220        raise ValueError('Found no tests. Check your suite name, test name, '
221                         'or test matching wildcard.\nDid you mean any of '
222                         'following tests?\n  %s' % '\n  '.join(possible_tests))
223
224    if not ignore_deps:
225        # Log tests whose dependencies can't be satisfied.
226        labels = [label.name for label in
227                  afe.get_labels(host__hostname=remote)]
228        for test in my_suite.tests:
229            if test.experimental and no_experimental:
230                continue
231            unsatisfiable_deps = set(test.dependencies).difference(labels)
232            if unsatisfiable_deps:
233                logging.warning('%s will be skipped, unsatisfiable '
234                             'test dependencies: %s', test.name,
235                             unsatisfiable_deps)
236    return my_suite
237
238
239def _run_autoserv(command, pretend=False):
240    """Run autoserv command.
241
242    Run the autoserv command and wait on it. Log the stdout.
243    Ensure that SIGINT signals are passed along to autoserv.
244
245    @param command: the autoserv command to run.
246    @returns: exit code of the command.
247
248    """
249    if not pretend:
250        logging.debug('Running autoserv command: %s', command)
251        global _autoserv_proc
252        _autoserv_proc = subprocess.Popen(command,
253                                          stdout=subprocess.PIPE,
254                                          stderr=subprocess.STDOUT)
255        # This incantation forces unbuffered reading from stdout,
256        # so that autoserv output can be displayed to the user
257        # immediately.
258        for message in iter(_autoserv_proc.stdout.readline, b''):
259            logging.info('autoserv| %s', message.strip())
260
261        _autoserv_proc.wait()
262        returncode = _autoserv_proc.returncode
263        _autoserv_proc = None
264    else:
265        logging.info('Pretend mode. Would run autoserv command: %s',
266                     command)
267        returncode = 0
268    return returncode
269
270
271def run_provisioning_job(provision_label, host, autotest_path,
272                         results_directory, fast_mode,
273                         ssh_verbosity=0, ssh_options=None,
274                         pretend=False, autoserv_verbose=False):
275    """Shell out to autoserv to run provisioning job.
276
277    @param provision_label: Label to provision the machine to.
278    @param host: Hostname of DUT.
279    @param autotest_path: Absolute path of autotest directory.
280    @param results_directory: Absolute path of directory to store results in.
281                              (results will be stored in subdirectory of this).
282    @param fast_mode: bool to use fast mode (disables slow autotest features).
283    @param ssh_verbosity: SSH verbosity level, passed along to autoserv_utils
284    @param ssh_options: Additional ssh options to be passed to autoserv_utils
285    @param pretend: If True, will print out autoserv commands rather than
286                    running them.
287    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
288
289    @returns: Absolute path of directory where results were stored.
290
291    """
292    # TODO(fdeng): When running against a local DUT, autoserv
293    # is still hitting the AFE in the lab.
294    # provision_AutoUpdate checks the current build of DUT by
295    # retrieving build info from AFE. crosbug.com/295178
296    results_directory = os.path.join(results_directory, 'results-provision')
297    command = autoserv_utils.autoserv_run_job_command(
298            os.path.join(autotest_path, 'server'),
299            machines=host, job=None, verbose=autoserv_verbose,
300            results_directory=results_directory,
301            fast_mode=fast_mode, ssh_verbosity=ssh_verbosity,
302            ssh_options=ssh_options,
303            extra_args=['--provision', '--job-labels', provision_label],
304            no_console_prefix=True)
305    if _run_autoserv(command, pretend) != 0:
306        raise TestThatProvisioningError('Command returns non-zero code: %s ' %
307                                        command)
308    return results_directory
309
310
311def run_job(job, host, autotest_path, results_directory, fast_mode,
312            id_digits=1, ssh_verbosity=0, ssh_options=None,
313            args=None, pretend=False,
314            autoserv_verbose=False, host_attributes={}):
315    """
316    Shell out to autoserv to run an individual test job.
317
318    @param job: A Job object containing the control file contents and other
319                relevent metadata for this test.
320    @param host: Hostname of DUT to run test against.
321    @param autotest_path: Absolute path of autotest directory.
322    @param results_directory: Absolute path of directory to store results in.
323                              (results will be stored in subdirectory of this).
324    @param fast_mode: bool to use fast mode (disables slow autotest features).
325    @param id_digits: The minimum number of digits that job ids should be
326                      0-padded to when formatting as a string for results
327                      directory.
328    @param ssh_verbosity: SSH verbosity level, passed along to autoserv_utils
329    @param ssh_options: Additional ssh options to be passed to autoserv_utils
330    @param args: String that should be passed as args parameter to autoserv,
331                 and then ultimitely to test itself.
332    @param pretend: If True, will print out autoserv commands rather than
333                    running them.
334    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
335    @param host_attributes: Dict of host attributes to pass into autoserv.
336
337    @returns: a tuple, return code of the job and absolute path of directory
338              where results were stored.
339    """
340    with tempfile.NamedTemporaryFile() as temp_file:
341        temp_file.write(job.control_file)
342        temp_file.flush()
343        name_tail = job.name.split('/')[-1]
344        results_directory = os.path.join(results_directory,
345                                         'results-%0*d-%s' % (id_digits, job.id,
346                                                              name_tail))
347        # Drop experimental keyval in the keval file in the job result folder.
348        os.makedirs(results_directory)
349        utils.write_keyval(results_directory,
350                           {constants.JOB_EXPERIMENTAL_KEY: job.keyvals[
351                                   constants.JOB_EXPERIMENTAL_KEY]})
352        extra_args = [temp_file.name]
353        if args:
354            extra_args.extend(['--args', args])
355
356        command = autoserv_utils.autoserv_run_job_command(
357                os.path.join(autotest_path, 'server'),
358                machines=host, job=job, verbose=autoserv_verbose,
359                results_directory=results_directory,
360                fast_mode=fast_mode, ssh_verbosity=ssh_verbosity,
361                ssh_options=ssh_options,
362                extra_args=extra_args,
363                no_console_prefix=True,
364                use_packaging=False,
365                host_attributes=host_attributes)
366
367        code = _run_autoserv(command, pretend)
368        return code, results_directory
369
370
371def setup_local_afe():
372    """
373    Setup a local afe database and return a direct_afe object to access it.
374
375    @returns: A autotest_lib.frontend.afe.direct_afe instance.
376    """
377    # This import statement is delayed until now rather than running at
378    # module load time, because it kicks off a local sqlite :memory: backed
379    # database, and we don't need that unless we are doing a local run.
380    from autotest_lib.frontend import setup_django_lite_environment
381    from autotest_lib.frontend.afe import direct_afe
382    return direct_afe.directAFE()
383
384
385def get_predicate_for_test_arg(test):
386    """
387    Gets a suite predicte function for a given command-line argument.
388
389    @param test: String. An individual TEST command line argument, e.g.
390                         'login_CryptohomeMounted' or 'suite:smoke'
391    @returns: A (predicate, string) tuple with the necessary suite
392              predicate, and a description string of the suite that
393              this predicate will produce.
394    """
395    suitematch = re.match(_SUITE_REGEX, test)
396    name_pattern_match = re.match(r'e:(.*)', test)
397    file_pattern_match = re.match(r'f:(.*)', test)
398    if suitematch:
399        suitename = suitematch.group(1)
400        return (suite.name_in_tag_predicate(suitename),
401                'suite named %s' % suitename)
402    if name_pattern_match:
403        pattern = '^%s$' % name_pattern_match.group(1)
404        return (suite.test_name_matches_pattern_predicate(pattern),
405                'suite to match name pattern %s' % pattern)
406    if file_pattern_match:
407        pattern = '^%s$' % file_pattern_match.group(1)
408        return (suite.test_file_matches_pattern_predicate(pattern),
409                'suite to match file name pattern %s' % pattern)
410    return (suite.test_name_equals_predicate(test),
411            'job named %s' % test)
412
413
414def get_predicate_for_possible_test_arg(test):
415    """
416    Gets a suite predicte function to calculate the similarity of given test
417    and possible tests.
418
419    @param test: String. An individual TEST command line argument, e.g.
420                         'login_CryptohomeMounted' or 'suite:smoke'
421    @returns: A (predicate, string) tuple with the necessary suite
422              predicate, and a description string of the suite that
423              this predicate will produce.
424    """
425    suitematch = re.match(_SUITE_REGEX, test)
426    name_pattern_match = re.match(r'e:(.*)', test)
427    file_pattern_match = re.match(r'f:(.*)', test)
428    if suitematch:
429        suitename = suitematch.group(1)
430        return (suite.name_in_tag_similarity_predicate(suitename),
431                'suite name similar to %s' % suitename)
432    if name_pattern_match:
433        pattern = '^%s$' % name_pattern_match.group(1)
434        return (suite.test_name_similarity_predicate(pattern),
435                'job name similar to %s' % pattern)
436    if file_pattern_match:
437        pattern = '^%s$' % file_pattern_match.group(1)
438        return (suite.test_file_similarity_predicate(pattern),
439                'suite to match file name similar to %s' % pattern)
440    return (suite.test_name_similarity_predicate(test),
441            'job name similar to %s' % test)
442
443
444def add_ssh_identity(temp_directory, ssh_private_key=TEST_KEY_PATH):
445    """Add an ssh identity to the agent.
446
447    TODO (sbasi) b/26186193: Add support for test_droid and make TEST_KEY_PATH
448    not Chrome OS specific.
449
450    @param temp_directory: A directory to copy the |private key| into.
451    @param ssh_private_key: Path to the ssh private key to use for testing.
452    """
453    # Add the testing key to the current ssh agent.
454    if os.environ.has_key('SSH_AGENT_PID'):
455        # Copy the testing key to the temp directory and make it NOT
456        # world-readable. Otherwise, ssh-add complains.
457        shutil.copy(ssh_private_key, temp_directory)
458        key_copy_path = os.path.join(temp_directory,
459                                     os.path.basename(ssh_private_key))
460        os.chmod(key_copy_path, stat.S_IRUSR | stat.S_IWUSR)
461        p = subprocess.Popen(['ssh-add', key_copy_path],
462                             stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
463        p_out, _ = p.communicate()
464        for line in p_out.splitlines():
465            logging.info(line)
466    else:
467        logging.warning('There appears to be no running ssh-agent. Attempting '
468                        'to continue without running ssh-add, but ssh commands '
469                        'may fail.')
470
471
472def _auto_detect_labels(afe, remote):
473    """Automatically detect host labels and add them to the host in afe.
474
475    Note that the label of board will not be auto-detected.
476    This method assumes the host |remote| has already been added to afe.
477
478    @param afe: A direct_afe object used to interact with local afe database.
479    @param remote: The hostname of the remote device.
480
481    """
482    cros_host = factory.create_host(remote)
483    labels_to_create = [label for label in cros_host.get_labels()
484                        if not label.startswith(constants.BOARD_PREFIX)]
485    labels_to_add_to_afe_host = []
486    for label in labels_to_create:
487        new_label = afe.create_label(label)
488        labels_to_add_to_afe_host.append(new_label.name)
489    hosts = afe.get_hosts(hostname=remote)
490    if not hosts:
491        raise TestThatRunError('Unexpected error: %s has not '
492                               'been added to afe.' % remote)
493    afe_host = hosts[0]
494    afe_host.add_labels(labels_to_add_to_afe_host)
495
496
497def perform_local_run(afe, autotest_path, tests, remote, fast_mode,
498                      build=NO_BUILD, board=NO_BOARD, args=None,
499                      pretend=False, no_experimental=False,
500                      ignore_deps=True,
501                      results_directory=None, ssh_verbosity=0,
502                      ssh_options=None,
503                      autoserv_verbose=False,
504                      iterations=1,
505                      host_attributes={}):
506    """Perform local run of tests.
507
508    This method enforces satisfaction of test dependencies for tests that are
509    run as a part of a suite.
510
511    @param afe: A direct_afe object used to interact with local afe database.
512    @param autotest_path: Absolute path of autotest installed in sysroot or
513                          custom autotest path set by --autotest_dir.
514    @param tests: List of strings naming tests and suites to run. Suite strings
515                  should be formed like "suite:smoke".
516    @param remote: Remote hostname.
517    @param fast_mode: bool to use fast mode (disables slow autotest features).
518    @param build: String specifying build for local run.
519    @param board: String specifyinb board for local run.
520    @param args: String that should be passed as args parameter to autoserv,
521                 and then ultimitely to test itself.
522    @param pretend: If True, will print out autoserv commands rather than
523                    running them.
524    @param no_experimental: Skip experimental tests when scheduling a suite.
525    @param ignore_deps: If True, test dependencies will be ignored.
526    @param results_directory: Directory to store results in. Defaults to None,
527                              in which case results will be stored in a new
528                              subdirectory of /tmp
529    @param ssh_verbosity: SSH verbosity level, passed through to
530                          autoserv_utils.
531    @param ssh_options: Additional ssh options to be passed to autoserv_utils
532    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
533    @param iterations: int number of times to schedule tests.
534    @param host_attributes: Dict of host attributes to pass into autoserv.
535
536    @returns: A list of return codes each job that has run. Or [1] if
537              provision failed prior to running any jobs.
538    """
539    args = _set_default_servo_args(args)
540    # Create host in afe, add board and build labels.
541    cros_version_label = labellib.format_keyval_label(
542        labellib.KeyvalLabel(labellib.Key.CROS_VERSION, build))
543
544    build_label = afe.create_label(cros_version_label)
545    board_label = afe.create_label(constants.BOARD_PREFIX + board)
546    new_host = afe.create_host(remote)
547    new_host.add_labels([build_label.name, board_label.name])
548    if not ignore_deps:
549        logging.info('Auto-detecting labels for %s', remote)
550        _auto_detect_labels(afe, remote)
551    # Provision the host to |build|.
552    if build != NO_BUILD:
553        logging.info('Provisioning %s...', cros_version_label)
554        try:
555            run_provisioning_job(cros_version_label, remote, autotest_path,
556                                 results_directory, fast_mode,
557                                 ssh_verbosity, ssh_options,
558                                 pretend, autoserv_verbose)
559        except TestThatProvisioningError as e:
560            logging.error('Provisioning %s to %s failed, tests are aborted, '
561                          'failure reason: %s',
562                          remote, cros_version_label, e)
563            return [1]
564
565    # Create suites that will be scheduled.
566    suites_and_descriptions = []
567    for test in tests:
568        (predicate, description) = get_predicate_for_test_arg(test)
569        logging.info('Fetching suite for %s...', description)
570        suite = fetch_local_suite(autotest_path, predicate, afe, test_arg=test,
571                                  remote=remote,
572                                  build=build, board=board,
573                                  results_directory=results_directory,
574                                  no_experimental=no_experimental,
575                                  ignore_deps=ignore_deps)
576        suites_and_descriptions.append((suite, description))
577
578    jobs_to_suites = {}
579    null_logger = lambda log_entry, log_in_subdir=False: None
580    # Schedule the suites, looping over iterations if necessary.
581    for iteration in range(iterations):
582        if iteration > 0:
583            logging.info('Repeating scheduling for iteration %d:', iteration)
584
585        for suite, description in suites_and_descriptions:
586            logging.info('Scheduling suite for %s...', description)
587            ntests = suite.schedule(null_logger)
588            logging.debug('jobs: %s nonzero job_retries: %s',
589                          len(suite._jobs_to_tests),
590                          len([True for (job_id, test) in
591                               suite._jobs_to_tests.items()]))
592            logging.info('... scheduled %s job(s).', ntests)
593            for job in suite.jobs:
594                jobs_to_suites[job.id] = suite
595
596    if not afe.get_jobs():
597        logging.info('No jobs scheduled. End of local run.')
598        return []
599
600    last_job_id = afe.get_jobs()[-1].id
601    job_id_digits = len(str(last_job_id))
602    codes = []
603    job_queue = afe.get_jobs()
604    completed_job_ids = set()
605    while job_queue:
606      logging.info('%s jobs in job queue', len(job_queue))
607      for job in job_queue:
608          suite = jobs_to_suites.get(job.id)
609          if not suite:
610              logging.error('Job %s not run, no associated suite.', job.id)
611          else:
612              logging.debug('Running job %s of test %s',
613                            job.id, suite.test_name_from_job(job.id))
614              code, abs_dir = run_job(
615                  job, remote, autotest_path, results_directory,
616                  fast_mode, job_id_digits, ssh_verbosity, ssh_options, args,
617                  pretend, autoserv_verbose, host_attributes)
618              codes.append(code)
619              logging.debug("Code: %s, Results in %s", code, abs_dir)
620              new_id = suite.handle_local_result(job.id, abs_dir, null_logger)
621              if new_id:
622                  jobs_to_suites[new_id] = jobs_to_suites[job.id]
623          completed_job_ids.add(job.id)
624      all_jobs = afe.get_jobs(not_yet_run=True, running=True)
625      new_jobs = set(job for job in all_jobs if job.id not in completed_job_ids)
626      logging.debug('%s incomplete jobs, %s jobs total',
627                    len(new_jobs), len(all_jobs))
628      job_queue = list(new_jobs)
629    return codes
630
631
632def _set_default_servo_args(args):
633    """Add default servo arguments for backward compatibitlity.
634
635    See crbug.com/881006 for context.  Some servo related defaults were baked
636    into the autotest ServoHost code. These have now been deleted. A side effect
637    was that users of test_that relied on these defaults for some tests to work
638    magically in the chroot environment.
639
640    Current plan is to add back these defaults to test_that invocations for
641    backwards compatibility of these use cases. There is no planned removal date
642    for this hack.
643
644    @return modified args str.
645    """
646    # args is a str with whitespace separated key=value arguments.
647    # Avoid parsing args here (to avoid adding another implicit constraint on
648    # the exact args format) by adding defaults only in the obvious cases where
649    # relevant keys are entirely missing.
650    if args is None:
651        args = ''
652    if 'servo_host' not in args:
653        args += ' servo_host=localhost'
654    if 'servo_port' not in args:
655        args += ' servo_port=9999'
656    return args
657
658
659def sigint_handler(signum, stack_frame):
660    #pylint: disable-msg=C0111
661    """Handle SIGINT or SIGTERM to a local test_that run.
662
663    This handler sends a SIGINT to the running autoserv process,
664    if one is running, giving it up to 5 seconds to clean up and exit. After
665    the timeout elapses, autoserv is killed. In either case, after autoserv
666    exits then this process exits with status 1.
667    """
668    # If multiple signals arrive before handler is unset, ignore duplicates
669    if not _sigint_handler_lock.acquire(False):
670        return
671    try:
672        # Ignore future signals by unsetting handler.
673        signal.signal(signal.SIGINT, signal.SIG_IGN)
674        signal.signal(signal.SIGTERM, signal.SIG_IGN)
675
676        logging.warning('Received SIGINT or SIGTERM. Cleaning up and exiting.')
677        if _autoserv_proc:
678            logging.warning('Sending SIGINT to autoserv process. Waiting up '
679                            'to %s seconds for cleanup.',
680                            _AUTOSERV_SIGINT_TIMEOUT_SECONDS)
681            _autoserv_proc.send_signal(signal.SIGINT)
682            timed_out, _ = retry.timeout(_autoserv_proc.wait,
683                    timeout_sec=_AUTOSERV_SIGINT_TIMEOUT_SECONDS)
684            if timed_out:
685                _autoserv_proc.kill()
686                logging.warning('Timed out waiting for autoserv to handle '
687                                'SIGINT. Killed autoserv.')
688    finally:
689        _sigint_handler_lock.release() # this is not really necessary?
690        sys.exit(1)
691
692
693def create_results_directory(results_directory=None, board_name=None):
694    """Create a results directory.
695
696    If no directory is specified this method will create and return a
697    temp directory to hold results. If a directory name is specified this
698    method will create a directory at the given path, provided it doesn't
699    already exist.
700
701    @param results_directory: The path to the results_directory to create.
702
703    @return results_directory: A path to the results_directory, ready for use.
704    """
705    if results_directory is None:
706        # Create a results_directory as subdir of /tmp
707        dirname_prefix='test_that_results_'
708        if board_name is not None:
709            dirname_prefix += (board_name + '_')
710        results_directory = tempfile.mkdtemp(prefix=dirname_prefix)
711    else:
712        # Delete results_directory if it already exists.
713        try:
714            shutil.rmtree(results_directory)
715        except OSError as e:
716            if e.errno != errno.ENOENT:
717                raise
718
719        # Create results_directory if it does not exist
720        try:
721            os.makedirs(results_directory)
722        except OSError as e:
723            if e.errno != errno.EEXIST:
724                raise
725    return results_directory
726
727def generate_report(directory,
728                    whitelist_chrome_crashes=False,
729                    just_status_code=False, html_report=False):
730    """Parse the test result files in the given directory into a report
731
732    @param directory: string, the absolute path of the directory to look in
733    @param whitelist_chrome_crashes: boolean, ignore Chrome crashes in the
734    report. Default: False, report Chrome crashes.
735    @param just_status_code: boolean, skip the report and only parse the files
736    to determine whether there were failures. Default: False, generate report.
737    """
738    test_report_command = [os.path.join(os.path.dirname(__file__),
739                                        'generate_test_report')]
740    # Experimental test results do not influence the exit code.
741    test_report_command.append('--ignore_experimental_tests')
742    if html_report:
743        test_report_command.append('--html')
744        test_report_command.append('--html-report-dir=%s' % directory)
745    if whitelist_chrome_crashes:
746        test_report_command.append('--whitelist_chrome_crashes')
747    if just_status_code:
748        test_report_command.append('--just_status_code')
749    test_report_command.append(directory)
750    status_code = subprocess.call(test_report_command)
751    if not just_status_code:
752        with open(os.path.join(directory, 'test_report.log'),
753                  'w') as report_log:
754            subprocess.call(test_report_command, stdout=report_log)
755    return status_code
756
757
758def perform_run_from_autotest_root(autotest_path, argv, tests, remote,
759                                   build=NO_BUILD, board=NO_BOARD, args=None,
760                                   pretend=False, no_experimental=False,
761                                   ignore_deps=True,
762                                   results_directory=None, ssh_verbosity=0,
763                                   ssh_options=None,
764                                   iterations=1, fast_mode=False, debug=False,
765                                   whitelist_chrome_crashes=False,
766                                   host_attributes={}):
767    """
768    Perform a test_that run, from the |autotest_path|.
769
770    This function is to be called from test_that/test_droid's main() script,
771    when tests are executed from the |autotest_path|. It handles all stages
772    of a test run that come after the bootstrap into |autotest_path|.
773
774    @param autotest_path: Full absolute path to the autotest root directory.
775    @param argv: The arguments list, as passed to main(...)
776    @param tests: List of strings naming tests and suites to run. Suite strings
777                  should be formed like "suite:smoke".
778    @param remote: Remote hostname.
779    @param build: String specifying build for local run.
780    @param board: String specifying board for local run.
781    @param args: String that should be passed as args parameter to autoserv,
782                 and then ultimitely to test itself.
783    @param pretend: If True, will print out autoserv commands rather than
784                    running them.
785    @param no_experimental: Skip experimental tests when scheduling a suite.
786    @param ignore_deps: If True, test dependencies will be ignored.
787    @param results_directory: Directory to store results in. Defaults to None,
788                              in which case results will be stored in a new
789                              subdirectory of /tmp
790    @param ssh_verbosity: SSH verbosity level, passed through to
791                          autoserv_utils.
792    @param ssh_options: Additional ssh options to be passed to autoserv_utils
793    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
794    @param iterations: int number of times to schedule tests.
795    @param fast_mode: bool to use fast mode (disables slow autotest features).
796    @param debug: Logging and autoserv verbosity.
797    @param whitelist_chrome_crashes: If True, whitelist chrome crashes.
798    @param host_attributes: Dict of host attributes to pass into autoserv.
799
800    @returns: A return code that test_that should exit with.
801    """
802    if results_directory is None or not os.path.exists(results_directory):
803        raise ValueError('Expected valid results directory, got %s' %
804                          results_directory)
805
806    logging_manager.configure_logging(
807            server_logging_config.ServerLoggingConfig(),
808            results_dir=results_directory,
809            use_console=True,
810            verbose=debug,
811            debug_log_name='test_that')
812    logging.info('Began logging to %s', results_directory)
813
814    logging.debug('test_that command line was: %s', argv)
815
816    signal.signal(signal.SIGINT, sigint_handler)
817    signal.signal(signal.SIGTERM, sigint_handler)
818
819    afe = setup_local_afe()
820    codes = perform_local_run(afe, autotest_path, tests, remote, fast_mode,
821                      build, board,
822                      args=args,
823                      pretend=pretend,
824                      no_experimental=no_experimental,
825                      ignore_deps=ignore_deps,
826                      results_directory=results_directory,
827                      ssh_verbosity=ssh_verbosity,
828                      ssh_options=ssh_options,
829                      autoserv_verbose=debug,
830                      iterations=iterations,
831                      host_attributes=host_attributes)
832    if pretend:
833        logging.info('Finished pretend run. Exiting.')
834        return 0
835
836    final_result = generate_report(
837        results_directory,
838        whitelist_chrome_crashes=whitelist_chrome_crashes, html_report=True)
839    try:
840        os.unlink(_LATEST_RESULTS_DIRECTORY)
841    except OSError:
842        pass
843    link_target = os.path.relpath(results_directory,
844                                  os.path.dirname(_LATEST_RESULTS_DIRECTORY))
845    if any(codes):
846        logging.error('Autoserv encountered unexpected errors '
847                      'when executing jobs.')
848        final_result = final_result or 1
849    os.symlink(link_target, _LATEST_RESULTS_DIRECTORY)
850    logging.info('Finished running tests. Results can be found in %s or %s',
851                 results_directory, _LATEST_RESULTS_DIRECTORY)
852    return final_result
853