• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import errno
6import os
7import re
8import shutil
9import signal
10import stat
11import subprocess
12import sys
13import tempfile
14import threading
15
16import logging
17# Turn the logging level to INFO before importing other autotest
18# code, to avoid having failed import logging messages confuse the
19# test_that user.
20logging.basicConfig(level=logging.INFO)
21
22import common
23from autotest_lib.client.common_lib.cros import dev_server, retry
24from autotest_lib.client.common_lib import logging_manager
25from autotest_lib.server.cros.dynamic_suite import suite, constants
26from autotest_lib.server.cros import provision
27from autotest_lib.server.hosts import factory
28from autotest_lib.server import autoserv_utils
29from autotest_lib.server import server_logging_config
30from autotest_lib.server import utils
31from autotest_lib.utils import labellib
32
33
34_autoserv_proc = None
35_sigint_handler_lock = threading.Lock()
36
37_AUTOSERV_SIGINT_TIMEOUT_SECONDS = 5
38NO_BOARD = 'ad_hoc_board'
39NO_BUILD = 'ad_hoc_build'
40_SUITE_REGEX = r'suite:(.*)'
41
42_TEST_KEY_FILENAME = 'testing_rsa'
43TEST_KEY_PATH = ('/mnt/host/source/src/scripts/mod_for_test_scripts/'
44                  'ssh_keys/%s' % _TEST_KEY_FILENAME)
45
46_LATEST_RESULTS_DIRECTORY = '/tmp/test_that_latest'
47
48
49class TestThatRunError(Exception):
50    """Raised if test_that encounters something unexpected while running."""
51
52
53class TestThatProvisioningError(Exception):
54    """Raised when it fails to provision the DUT to the requested build."""
55
56
57def add_common_args(parser):
58    """
59    Add common arguments for both test_that and test_droid to their parser.
60
61    @param parser: argparse.ArgumentParser object to add arguments to.
62    """
63    parser.add_argument('tests', nargs='+', metavar='TEST',
64                        help='Run given test(s). Use suite:SUITE to specify '
65                             'test suite. Use e:[NAME_PATTERN] to specify a '
66                             'NAME-matching regular expression. Use '
67                             'f:[FILE_PATTERN] to specify a filename matching '
68                             'regular expression. Specified regular '
69                             'expressions will be implicitly wrapped in '
70                             '^ and $.')
71    parser.add_argument('--fast', action='store_true', dest='fast_mode',
72                        default=False,
73                        help='Enable fast mode.  This will cause test_droid '
74                             'to skip time consuming steps like sysinfo and '
75                             'collecting crash information.')
76    parser.add_argument('--args', metavar='ARGS',
77                        help='Whitespace separated argument string to pass '
78                             'through to test. Only supported for runs '
79                             'against a local DUT. '
80                             "e.g. --args='foo=bar cat=\"in a hat\"'.")
81    parser.add_argument('--results_dir', metavar='RESULTS_DIR', default=None,
82                        help='Instead of storing results in a new subdirectory'
83                             ' of /tmp , store results in RESULTS_DIR. If '
84                             'RESULTS_DIR already exists, it will be deleted.')
85    parser.add_argument('--pretend', action='store_true', default=False,
86                        help='Print autoserv commands that would be run, '
87                             'rather than running them.')
88    parser.add_argument('--no-experimental', action='store_true',
89                        default=False, dest='no_experimental',
90                        help='When scheduling a suite, skip any tests marked '
91                             'as experimental. Applies only to tests scheduled'
92                             ' via suite:[SUITE].')
93    parser.add_argument('--enforce-deps', action='store_true',
94                        default=False, dest='enforce_deps',
95                        help='Skip tests whose DEPENDENCIES can not '
96                             'be satisfied.')
97    parser.add_argument('--debug', action='store_true',
98                        help='Include DEBUG level messages in stdout. Note: '
99                             'these messages will be included in output log '
100                             'file regardless. In addition, turn on autoserv '
101                             'verbosity.')
102    parser.add_argument('--iterations', action='store', type=int, default=1,
103                        help='Number of times to run the tests specified.')
104    parser.add_argument('--ssh_verbosity', action='store', type=int,
105                        choices=[0, 1, 2, 3], default=0,
106                        help='Verbosity level for ssh, between 0 and 3 '
107                             'inclusive.')
108    parser.add_argument('--ssh_options', action='store', default=None,
109                        help='A string giving additional options to be '
110                        'added to ssh commands.')
111
112
113
114def fetch_local_suite(autotest_path, suite_predicate, afe, test_arg, remote,
115                      build=NO_BUILD, board=NO_BOARD,
116                      results_directory=None, no_experimental=False,
117                      ignore_deps=True):
118    """Create a suite from the given suite predicate.
119
120    Satisfaction of dependencies is enforced by Suite.schedule() if
121    ignore_deps is False. Note that this method assumes only one host,
122    i.e. |remote|, was added to afe. Suite.schedule() will not
123    schedule a job if none of the hosts in the afe (in our case,
124    just one host |remote|) has a label that matches a requested
125    test dependency.
126
127    @param autotest_path: Absolute path to autotest (in sysroot or
128                          custom autotest directory set by --autotest_dir).
129    @param suite_predicate: callable that takes ControlData objects, and
130                            returns True on those that should be in suite
131    @param afe: afe object to schedule against (typically a directAFE)
132    @param test_arg: String. An individual TEST command line argument, e.g.
133                     'login_CryptohomeMounted' or 'suite:smoke'.
134    @param remote: String representing the IP of the remote host.
135    @param build: Build to schedule suite for.
136    @param board: Board to schedule suite for.
137    @param results_directory: Absolute path of directory to store results in.
138                              (results will be stored in subdirectory of this).
139    @param no_experimental: Skip experimental tests when scheduling a suite.
140    @param ignore_deps: If True, test dependencies will be ignored.
141
142    @returns: A suite.Suite object.
143
144    """
145    fs_getter = suite.create_fs_getter(autotest_path)
146    devserver = dev_server.ImageServer('')
147    my_suite = suite.Suite.create_from_predicates([suite_predicate],
148            {provision.CROS_VERSION_PREFIX: build},
149            constants.BOARD_PREFIX + board,
150            devserver, fs_getter, afe=afe,
151            ignore_deps=ignore_deps,
152            results_dir=results_directory, forgiving_parser=False)
153    if len(my_suite.tests) == 0:
154        (similarity_predicate, similarity_description) = (
155                get_predicate_for_possible_test_arg(test_arg))
156        logging.error('No test found, searching for possible tests with %s',
157                      similarity_description)
158        possible_tests = suite.find_possible_tests(fs_getter,
159                                                         similarity_predicate)
160        raise ValueError('Found no tests. Check your suite name, test name, '
161                         'or test matching wildcard.\nDid you mean any of '
162                         'following tests?\n  %s' % '\n  '.join(possible_tests))
163
164    if not ignore_deps:
165        # Log tests whose dependencies can't be satisfied.
166        labels = [label.name for label in
167                  afe.get_labels(host__hostname=remote)]
168        for test in my_suite.tests:
169            if test.experimental and no_experimental:
170                continue
171            unsatisfiable_deps = set(test.dependencies).difference(labels)
172            if unsatisfiable_deps:
173                logging.warning('%s will be skipped, unsatisfiable '
174                             'test dependencies: %s', test.name,
175                             unsatisfiable_deps)
176    return my_suite
177
178
179def _run_autoserv(command, pretend=False):
180    """Run autoserv command.
181
182    Run the autoserv command and wait on it. Log the stdout.
183    Ensure that SIGINT signals are passed along to autoserv.
184
185    @param command: the autoserv command to run.
186    @returns: exit code of the command.
187
188    """
189    if not pretend:
190        logging.debug('Running autoserv command: %s', command)
191        global _autoserv_proc
192        _autoserv_proc = subprocess.Popen(command,
193                                          stdout=subprocess.PIPE,
194                                          stderr=subprocess.STDOUT)
195        # This incantation forces unbuffered reading from stdout,
196        # so that autoserv output can be displayed to the user
197        # immediately.
198        for message in iter(_autoserv_proc.stdout.readline, b''):
199            logging.info('autoserv| %s', message.strip())
200
201        _autoserv_proc.wait()
202        returncode = _autoserv_proc.returncode
203        _autoserv_proc = None
204    else:
205        logging.info('Pretend mode. Would run autoserv command: %s',
206                     command)
207        returncode = 0
208    return returncode
209
210
211def run_provisioning_job(provision_label, host, autotest_path,
212                         results_directory, fast_mode,
213                         ssh_verbosity=0, ssh_options=None,
214                         pretend=False, autoserv_verbose=False):
215    """Shell out to autoserv to run provisioning job.
216
217    @param provision_label: Label to provision the machine to.
218    @param host: Hostname of DUT.
219    @param autotest_path: Absolute path of autotest directory.
220    @param results_directory: Absolute path of directory to store results in.
221                              (results will be stored in subdirectory of this).
222    @param fast_mode: bool to use fast mode (disables slow autotest features).
223    @param ssh_verbosity: SSH verbosity level, passed along to autoserv_utils
224    @param ssh_options: Additional ssh options to be passed to autoserv_utils
225    @param pretend: If True, will print out autoserv commands rather than
226                    running them.
227    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
228
229    @returns: Absolute path of directory where results were stored.
230
231    """
232    # TODO(fdeng): When running against a local DUT, autoserv
233    # is still hitting the AFE in the lab.
234    # provision_AutoUpdate checks the current build of DUT by
235    # retrieving build info from AFE. crosbug.com/295178
236    results_directory = os.path.join(results_directory, 'results-provision')
237    command = autoserv_utils.autoserv_run_job_command(
238            os.path.join(autotest_path, 'server'),
239            machines=host, job=None, verbose=autoserv_verbose,
240            results_directory=results_directory,
241            fast_mode=fast_mode, ssh_verbosity=ssh_verbosity,
242            ssh_options=ssh_options,
243            extra_args=['--provision', '--job-labels', provision_label],
244            no_console_prefix=True)
245    if _run_autoserv(command, pretend) != 0:
246        raise TestThatProvisioningError('Command returns non-zero code: %s ' %
247                                        command)
248    return results_directory
249
250
251def run_job(job, host, autotest_path, results_directory, fast_mode,
252            id_digits=1, ssh_verbosity=0, ssh_options=None,
253            args=None, pretend=False,
254            autoserv_verbose=False, host_attributes={}):
255    """
256    Shell out to autoserv to run an individual test job.
257
258    @param job: A Job object containing the control file contents and other
259                relevent metadata for this test.
260    @param host: Hostname of DUT to run test against.
261    @param autotest_path: Absolute path of autotest directory.
262    @param results_directory: Absolute path of directory to store results in.
263                              (results will be stored in subdirectory of this).
264    @param fast_mode: bool to use fast mode (disables slow autotest features).
265    @param id_digits: The minimum number of digits that job ids should be
266                      0-padded to when formatting as a string for results
267                      directory.
268    @param ssh_verbosity: SSH verbosity level, passed along to autoserv_utils
269    @param ssh_options: Additional ssh options to be passed to autoserv_utils
270    @param args: String that should be passed as args parameter to autoserv,
271                 and then ultimitely to test itself.
272    @param pretend: If True, will print out autoserv commands rather than
273                    running them.
274    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
275    @param host_attributes: Dict of host attributes to pass into autoserv.
276
277    @returns: a tuple, return code of the job and absolute path of directory
278              where results were stored.
279    """
280    with tempfile.NamedTemporaryFile() as temp_file:
281        temp_file.write(job.control_file)
282        temp_file.flush()
283        name_tail = job.name.split('/')[-1]
284        results_directory = os.path.join(results_directory,
285                                         'results-%0*d-%s' % (id_digits, job.id,
286                                                              name_tail))
287        # Drop experimental keyval in the keval file in the job result folder.
288        os.makedirs(results_directory)
289        utils.write_keyval(results_directory,
290                           {constants.JOB_EXPERIMENTAL_KEY: job.keyvals[
291                                   constants.JOB_EXPERIMENTAL_KEY]})
292        extra_args = [temp_file.name]
293        if args:
294            extra_args.extend(['--args', args])
295
296        command = autoserv_utils.autoserv_run_job_command(
297                os.path.join(autotest_path, 'server'),
298                machines=host, job=job, verbose=autoserv_verbose,
299                results_directory=results_directory,
300                fast_mode=fast_mode, ssh_verbosity=ssh_verbosity,
301                ssh_options=ssh_options,
302                extra_args=extra_args,
303                no_console_prefix=True,
304                use_packaging=False,
305                host_attributes=host_attributes)
306
307        code = _run_autoserv(command, pretend)
308        return code, results_directory
309
310
311def setup_local_afe():
312    """
313    Setup a local afe database and return a direct_afe object to access it.
314
315    @returns: A autotest_lib.frontend.afe.direct_afe instance.
316    """
317    # This import statement is delayed until now rather than running at
318    # module load time, because it kicks off a local sqlite :memory: backed
319    # database, and we don't need that unless we are doing a local run.
320    from autotest_lib.frontend import setup_django_lite_environment
321    from autotest_lib.frontend.afe import direct_afe
322    return direct_afe.directAFE()
323
324
325def get_predicate_for_test_arg(test):
326    """
327    Gets a suite predicte function for a given command-line argument.
328
329    @param test: String. An individual TEST command line argument, e.g.
330                         'login_CryptohomeMounted' or 'suite:smoke'
331    @returns: A (predicate, string) tuple with the necessary suite
332              predicate, and a description string of the suite that
333              this predicate will produce.
334    """
335    suitematch = re.match(_SUITE_REGEX, test)
336    name_pattern_match = re.match(r'e:(.*)', test)
337    file_pattern_match = re.match(r'f:(.*)', test)
338    if suitematch:
339        suitename = suitematch.group(1)
340        return (suite.name_in_tag_predicate(suitename),
341                'suite named %s' % suitename)
342    if name_pattern_match:
343        pattern = '^%s$' % name_pattern_match.group(1)
344        return (suite.test_name_matches_pattern_predicate(pattern),
345                'suite to match name pattern %s' % pattern)
346    if file_pattern_match:
347        pattern = '^%s$' % file_pattern_match.group(1)
348        return (suite.test_file_matches_pattern_predicate(pattern),
349                'suite to match file name pattern %s' % pattern)
350    return (suite.test_name_equals_predicate(test),
351            'job named %s' % test)
352
353
354def get_predicate_for_possible_test_arg(test):
355    """
356    Gets a suite predicte function to calculate the similarity of given test
357    and possible tests.
358
359    @param test: String. An individual TEST command line argument, e.g.
360                         'login_CryptohomeMounted' or 'suite:smoke'
361    @returns: A (predicate, string) tuple with the necessary suite
362              predicate, and a description string of the suite that
363              this predicate will produce.
364    """
365    suitematch = re.match(_SUITE_REGEX, test)
366    name_pattern_match = re.match(r'e:(.*)', test)
367    file_pattern_match = re.match(r'f:(.*)', test)
368    if suitematch:
369        suitename = suitematch.group(1)
370        return (suite.name_in_tag_similarity_predicate(suitename),
371                'suite name similar to %s' % suitename)
372    if name_pattern_match:
373        pattern = '^%s$' % name_pattern_match.group(1)
374        return (suite.test_name_similarity_predicate(pattern),
375                'job name similar to %s' % pattern)
376    if file_pattern_match:
377        pattern = '^%s$' % file_pattern_match.group(1)
378        return (suite.test_file_similarity_predicate(pattern),
379                'suite to match file name similar to %s' % pattern)
380    return (suite.test_name_similarity_predicate(test),
381            'job name similar to %s' % test)
382
383
384def add_ssh_identity(temp_directory, ssh_private_key=TEST_KEY_PATH):
385    """Add an ssh identity to the agent.
386
387    TODO (sbasi) b/26186193: Add support for test_droid and make TEST_KEY_PATH
388    not Chrome OS specific.
389
390    @param temp_directory: A directory to copy the |private key| into.
391    @param ssh_private_key: Path to the ssh private key to use for testing.
392    """
393    # Add the testing key to the current ssh agent.
394    if os.environ.has_key('SSH_AGENT_PID'):
395        # Copy the testing key to the temp directory and make it NOT
396        # world-readable. Otherwise, ssh-add complains.
397        shutil.copy(ssh_private_key, temp_directory)
398        key_copy_path = os.path.join(temp_directory,
399                                     os.path.basename(ssh_private_key))
400        os.chmod(key_copy_path, stat.S_IRUSR | stat.S_IWUSR)
401        p = subprocess.Popen(['ssh-add', key_copy_path],
402                             stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
403        p_out, _ = p.communicate()
404        for line in p_out.splitlines():
405            logging.info(line)
406    else:
407        logging.warning('There appears to be no running ssh-agent. Attempting '
408                        'to continue without running ssh-add, but ssh commands '
409                        'may fail.')
410
411
412def _auto_detect_labels(afe, remote):
413    """Automatically detect host labels and add them to the host in afe.
414
415    Note that the label of board will not be auto-detected.
416    This method assumes the host |remote| has already been added to afe.
417
418    @param afe: A direct_afe object used to interact with local afe database.
419    @param remote: The hostname of the remote device.
420
421    """
422    cros_host = factory.create_host(remote)
423    labels_to_create = [label for label in cros_host.get_labels()
424                        if not label.startswith(constants.BOARD_PREFIX)]
425    labels_to_add_to_afe_host = []
426    for label in labels_to_create:
427        new_label = afe.create_label(label)
428        labels_to_add_to_afe_host.append(new_label.name)
429    hosts = afe.get_hosts(hostname=remote)
430    if not hosts:
431        raise TestThatRunError('Unexpected error: %s has not '
432                               'been added to afe.' % remote)
433    afe_host = hosts[0]
434    afe_host.add_labels(labels_to_add_to_afe_host)
435
436
437def perform_local_run(afe, autotest_path, tests, remote, fast_mode,
438                      build=NO_BUILD, board=NO_BOARD, args=None,
439                      pretend=False, no_experimental=False,
440                      ignore_deps=True,
441                      results_directory=None, ssh_verbosity=0,
442                      ssh_options=None,
443                      autoserv_verbose=False,
444                      iterations=1,
445                      host_attributes={}):
446    """Perform local run of tests.
447
448    This method enforces satisfaction of test dependencies for tests that are
449    run as a part of a suite.
450
451    @param afe: A direct_afe object used to interact with local afe database.
452    @param autotest_path: Absolute path of autotest installed in sysroot or
453                          custom autotest path set by --autotest_dir.
454    @param tests: List of strings naming tests and suites to run. Suite strings
455                  should be formed like "suite:smoke".
456    @param remote: Remote hostname.
457    @param fast_mode: bool to use fast mode (disables slow autotest features).
458    @param build: String specifying build for local run.
459    @param board: String specifyinb board for local run.
460    @param args: String that should be passed as args parameter to autoserv,
461                 and then ultimitely to test itself.
462    @param pretend: If True, will print out autoserv commands rather than
463                    running them.
464    @param no_experimental: Skip experimental tests when scheduling a suite.
465    @param ignore_deps: If True, test dependencies will be ignored.
466    @param results_directory: Directory to store results in. Defaults to None,
467                              in which case results will be stored in a new
468                              subdirectory of /tmp
469    @param ssh_verbosity: SSH verbosity level, passed through to
470                          autoserv_utils.
471    @param ssh_options: Additional ssh options to be passed to autoserv_utils
472    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
473    @param iterations: int number of times to schedule tests.
474    @param host_attributes: Dict of host attributes to pass into autoserv.
475
476    @returns: A list of return codes each job that has run. Or [1] if
477              provision failed prior to running any jobs.
478    """
479    # Create host in afe, add board and build labels.
480    cros_version_label = labellib.format_keyval_label(
481        labellib.KeyvalLabel(labellib.Key.CROS_VERSION, build))
482
483    build_label = afe.create_label(cros_version_label)
484    board_label = afe.create_label(constants.BOARD_PREFIX + board)
485    new_host = afe.create_host(remote)
486    new_host.add_labels([build_label.name, board_label.name])
487    if not ignore_deps:
488        logging.info('Auto-detecting labels for %s', remote)
489        _auto_detect_labels(afe, remote)
490    # Provision the host to |build|.
491    if build != NO_BUILD:
492        logging.info('Provisioning %s...', cros_version_label)
493        try:
494            run_provisioning_job(cros_version_label, remote, autotest_path,
495                                 results_directory, fast_mode,
496                                 ssh_verbosity, ssh_options,
497                                 pretend, autoserv_verbose)
498        except TestThatProvisioningError as e:
499            logging.error('Provisioning %s to %s failed, tests are aborted, '
500                          'failure reason: %s',
501                          remote, cros_version_label, e)
502            return [1]
503
504    # Create suites that will be scheduled.
505    suites_and_descriptions = []
506    for test in tests:
507        (predicate, description) = get_predicate_for_test_arg(test)
508        logging.info('Fetching suite for %s...', description)
509        suite = fetch_local_suite(autotest_path, predicate, afe, test_arg=test,
510                                  remote=remote,
511                                  build=build, board=board,
512                                  results_directory=results_directory,
513                                  no_experimental=no_experimental,
514                                  ignore_deps=ignore_deps)
515        suites_and_descriptions.append((suite, description))
516
517    # Schedule the suites, looping over iterations if necessary.
518    for iteration in range(iterations):
519        if iteration > 0:
520            logging.info('Repeating scheduling for iteration %d:', iteration)
521
522        for suite, description in suites_and_descriptions:
523            logging.info('Scheduling suite for %s...', description)
524            ntests = suite.schedule(
525                    lambda log_entry, log_in_subdir=False: None)
526            logging.info('... scheduled %s job(s).', ntests)
527
528    if not afe.get_jobs():
529        logging.info('No jobs scheduled. End of local run.')
530        return []
531
532    last_job_id = afe.get_jobs()[-1].id
533    job_id_digits = len(str(last_job_id))
534    codes = []
535    job_queue = afe.get_jobs()
536    completed_job_ids = set()
537    while job_queue:
538      for job in job_queue:
539          code, _ = run_job(job, remote, autotest_path, results_directory,
540                  fast_mode, job_id_digits, ssh_verbosity, ssh_options, args,
541                  pretend, autoserv_verbose, host_attributes)
542          completed_job_ids.add(job.id)
543          codes.append(code)
544      new_jobs = set(job for job in afe.get_jobs(not_yet_run=True, running=True)
545                     if job.id not in completed_job_ids)
546      job_queue = list(new_jobs)
547    return codes
548
549
550def sigint_handler(signum, stack_frame):
551    #pylint: disable-msg=C0111
552    """Handle SIGINT or SIGTERM to a local test_that run.
553
554    This handler sends a SIGINT to the running autoserv process,
555    if one is running, giving it up to 5 seconds to clean up and exit. After
556    the timeout elapses, autoserv is killed. In either case, after autoserv
557    exits then this process exits with status 1.
558    """
559    # If multiple signals arrive before handler is unset, ignore duplicates
560    if not _sigint_handler_lock.acquire(False):
561        return
562    try:
563        # Ignore future signals by unsetting handler.
564        signal.signal(signal.SIGINT, signal.SIG_IGN)
565        signal.signal(signal.SIGTERM, signal.SIG_IGN)
566
567        logging.warning('Received SIGINT or SIGTERM. Cleaning up and exiting.')
568        if _autoserv_proc:
569            logging.warning('Sending SIGINT to autoserv process. Waiting up '
570                            'to %s seconds for cleanup.',
571                            _AUTOSERV_SIGINT_TIMEOUT_SECONDS)
572            _autoserv_proc.send_signal(signal.SIGINT)
573            timed_out, _ = retry.timeout(_autoserv_proc.wait,
574                    timeout_sec=_AUTOSERV_SIGINT_TIMEOUT_SECONDS)
575            if timed_out:
576                _autoserv_proc.kill()
577                logging.warning('Timed out waiting for autoserv to handle '
578                                'SIGINT. Killed autoserv.')
579    finally:
580        _sigint_handler_lock.release() # this is not really necessary?
581        sys.exit(1)
582
583
584def create_results_directory(results_directory=None):
585    """Create a results directory.
586
587    If no directory is specified this method will create and return a
588    temp directory to hold results. If a directory name is specified this
589    method will create a directory at the given path, provided it doesn't
590    already exist.
591
592    @param results_directory: The path to the results_directory to create.
593
594    @return results_directory: A path to the results_directory, ready for use.
595    """
596    if results_directory is None:
597        # Create a results_directory as subdir of /tmp
598        results_directory = tempfile.mkdtemp(prefix='test_that_results_')
599    else:
600        # Delete results_directory if it already exists.
601        try:
602            shutil.rmtree(results_directory)
603        except OSError as e:
604            if e.errno != errno.ENOENT:
605                raise
606
607        # Create results_directory if it does not exist
608        try:
609            os.makedirs(results_directory)
610        except OSError as e:
611            if e.errno != errno.EEXIST:
612                raise
613    return results_directory
614
615
616def perform_run_from_autotest_root(autotest_path, argv, tests, remote,
617                                   build=NO_BUILD, board=NO_BOARD, args=None,
618                                   pretend=False, no_experimental=False,
619                                   ignore_deps=True,
620                                   results_directory=None, ssh_verbosity=0,
621                                   ssh_options=None,
622                                   iterations=1, fast_mode=False, debug=False,
623                                   whitelist_chrome_crashes=False,
624                                   host_attributes={}):
625    """
626    Perform a test_that run, from the |autotest_path|.
627
628    This function is to be called from test_that/test_droid's main() script,
629    when tests are executed from the |autotest_path|. It handles all stages
630    of a test run that come after the bootstrap into |autotest_path|.
631
632    @param autotest_path: Full absolute path to the autotest root directory.
633    @param argv: The arguments list, as passed to main(...)
634    @param tests: List of strings naming tests and suites to run. Suite strings
635                  should be formed like "suite:smoke".
636    @param remote: Remote hostname.
637    @param build: String specifying build for local run.
638    @param board: String specifyinb board for local run.
639    @param args: String that should be passed as args parameter to autoserv,
640                 and then ultimitely to test itself.
641    @param pretend: If True, will print out autoserv commands rather than
642                    running them.
643    @param no_experimental: Skip experimental tests when scheduling a suite.
644    @param ignore_deps: If True, test dependencies will be ignored.
645    @param results_directory: Directory to store results in. Defaults to None,
646                              in which case results will be stored in a new
647                              subdirectory of /tmp
648    @param ssh_verbosity: SSH verbosity level, passed through to
649                          autoserv_utils.
650    @param ssh_options: Additional ssh options to be passed to autoserv_utils
651    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
652    @param iterations: int number of times to schedule tests.
653    @param fast_mode: bool to use fast mode (disables slow autotest features).
654    @param debug: Logging and autoserv verbosity.
655    @param whitelist_chrome_crashes: If True, whitelist chrome crashes.
656    @param host_attributes: Dict of host attributes to pass into autoserv.
657
658    @returns: A return code that test_that should exit with.
659    """
660    if results_directory is None or not os.path.exists(results_directory):
661        raise ValueError('Expected valid results directory, got %s' %
662                          results_directory)
663
664    logging_manager.configure_logging(
665            server_logging_config.ServerLoggingConfig(),
666            results_dir=results_directory,
667            use_console=True,
668            verbose=debug,
669            debug_log_name='test_that')
670    logging.info('Began logging to %s', results_directory)
671
672    logging.debug('test_that command line was: %s', argv)
673
674    signal.signal(signal.SIGINT, sigint_handler)
675    signal.signal(signal.SIGTERM, sigint_handler)
676
677    afe = setup_local_afe()
678    codes = perform_local_run(afe, autotest_path, tests, remote, fast_mode,
679                      build, board,
680                      args=args,
681                      pretend=pretend,
682                      no_experimental=no_experimental,
683                      ignore_deps=ignore_deps,
684                      results_directory=results_directory,
685                      ssh_verbosity=ssh_verbosity,
686                      ssh_options=ssh_options,
687                      autoserv_verbose=debug,
688                      iterations=iterations,
689                      host_attributes=host_attributes)
690    if pretend:
691        logging.info('Finished pretend run. Exiting.')
692        return 0
693
694    test_report_command = [os.path.join(os.path.dirname(__file__),
695                                        'generate_test_report')]
696    # Experimental test results do not influence the exit code.
697    test_report_command.append('--ignore_experimental_tests')
698    if whitelist_chrome_crashes:
699        test_report_command.append('--whitelist_chrome_crashes')
700    test_report_command.append(results_directory)
701    final_result = subprocess.call(test_report_command)
702    with open(os.path.join(results_directory, 'test_report.log'),
703              'w') as report_log:
704        subprocess.call(test_report_command, stdout=report_log)
705    try:
706        os.unlink(_LATEST_RESULTS_DIRECTORY)
707    except OSError:
708        pass
709    link_target = os.path.relpath(results_directory,
710                                  os.path.dirname(_LATEST_RESULTS_DIRECTORY))
711    if any(codes):
712        logging.error('Autoserv encountered unexpected errors '
713                      'when executing jobs.')
714        final_result = final_result or 1
715    os.symlink(link_target, _LATEST_RESULTS_DIRECTORY)
716    logging.info('Finished running tests. Results can be found in %s or %s',
717                 results_directory, _LATEST_RESULTS_DIRECTORY)
718    return final_result
719