• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/python -u
2# Copyright 2007-2008 Martin J. Bligh <mbligh@google.com>, Google Inc.
3# Released under the GPL v2
4
5"""
6Run a control file through the server side engine
7"""
8
9import datetime
10import contextlib
11import getpass
12import logging
13import os
14import re
15import signal
16import socket
17import sys
18import traceback
19import time
20import urllib2
21
22
23import common
24from autotest_lib.client.common_lib import control_data
25from autotest_lib.client.common_lib import error
26from autotest_lib.client.common_lib import global_config
27from autotest_lib.client.common_lib import utils
28
29try:
30    from chromite.lib import metrics
31except ImportError:
32    metrics = utils.metrics_mock
33
34try:
35    from autotest_lib.puppylab import results_mocker
36except ImportError:
37    results_mocker = None
38
39_CONFIG = global_config.global_config
40
41
42# Number of seconds to wait before returning if testing mode is enabled
43TESTING_MODE_SLEEP_SECS = 1
44
45
46from autotest_lib.server import frontend
47from autotest_lib.server import server_logging_config
48from autotest_lib.server import server_job, utils, autoserv_parser, autotest
49from autotest_lib.server import utils as server_utils
50from autotest_lib.server import site_utils
51from autotest_lib.server.cros.dynamic_suite import frontend_wrappers
52from autotest_lib.site_utils import job_directories
53from autotest_lib.site_utils import job_overhead
54from autotest_lib.site_utils import lxc
55from autotest_lib.site_utils.lxc import utils as lxc_utils
56from autotest_lib.client.common_lib import pidfile, logging_manager
57
58
59# Control segment to stage server-side package.
60STAGE_SERVER_SIDE_PACKAGE_CONTROL_FILE = server_job._control_segment_path(
61        'stage_server_side_package')
62
63# Command line to start servod in a moblab.
64START_SERVOD_CMD = 'sudo start servod BOARD=%s PORT=%s'
65STOP_SERVOD_CMD = 'sudo stop servod'
66
67def log_alarm(signum, frame):
68    logging.error("Received SIGALARM. Ignoring and continuing on.")
69    sys.exit(1)
70
71
72def _get_machines(parser):
73    """Get a list of machine names from command line arg -m or a file.
74
75    @param parser: Parser for the command line arguments.
76
77    @return: A list of machine names from command line arg -m or the
78             machines file specified in the command line arg -M.
79    """
80    if parser.options.machines:
81        machines = parser.options.machines.replace(',', ' ').strip().split()
82    else:
83        machines = []
84    machines_file = parser.options.machines_file
85    if machines_file:
86        machines = []
87        for m in open(machines_file, 'r').readlines():
88            # remove comments, spaces
89            m = re.sub('#.*', '', m).strip()
90            if m:
91                machines.append(m)
92        logging.debug('Read list of machines from file: %s', machines_file)
93        logging.debug('Machines: %s', ','.join(machines))
94
95    if machines:
96        for machine in machines:
97            if not machine or re.search('\s', machine):
98                parser.parser.error("Invalid machine: %s" % str(machine))
99        machines = list(set(machines))
100        machines.sort()
101    return machines
102
103
104def _stage_ssp(parser):
105    """Stage server-side package.
106
107    This function calls a control segment to stage server-side package based on
108    the job and autoserv command line option. The detail implementation could
109    be different for each host type. Currently, only CrosHost has
110    stage_server_side_package function defined.
111    The script returns None if no server-side package is available. However,
112    it may raise exception if it failed for reasons other than artifact (the
113    server-side package) not found.
114
115    @param parser: Command line arguments parser passed in the autoserv process.
116
117    @return: (ssp_url, error_msg), where
118              ssp_url is a url to the autotest server-side package. None if
119              server-side package is not supported.
120              error_msg is a string indicating the failures. None if server-
121              side package is staged successfully.
122    """
123    machines_list = _get_machines(parser)
124    machines_list = server_job.get_machine_dicts(
125            machines_list, parser.options.lab, parser.options.host_attributes)
126
127    # If test_source_build is not specified, default to use server-side test
128    # code from build specified in --image.
129    namespace = {'machines': machines_list,
130                 'image': (parser.options.test_source_build or
131                           parser.options.image),}
132    script_locals = {}
133    execfile(STAGE_SERVER_SIDE_PACKAGE_CONTROL_FILE, namespace, script_locals)
134    return script_locals['ssp_url'], script_locals['error_msg']
135
136
137def _run_with_ssp(job, container_name, job_id, results, parser, ssp_url,
138                  job_folder, machines):
139    """Run the server job with server-side packaging.
140
141    @param job: The server job object.
142    @param container_name: Name of the container to run the test.
143    @param job_id: ID of the test job.
144    @param results: Folder to store results. This could be different from
145                    parser.options.results:
146                    parser.options.results  can be set to None for results to be
147                    stored in a temp folder.
148                    results can be None for autoserv run requires no logging.
149    @param parser: Command line parser that contains the options.
150    @param ssp_url: url of the staged server-side package.
151    @param job_folder: Name of the job result folder.
152    @param machines: A list of machines to run the test.
153    """
154    bucket = lxc.ContainerBucket()
155    control = (parser.args[0] if len(parser.args) > 0 and parser.args[0] != ''
156               else None)
157    try:
158        dut_name = machines[0] if len(machines) >= 1 else None
159        test_container = bucket.setup_test(container_name, job_id, ssp_url,
160                                           results, control=control,
161                                           job_folder=job_folder,
162                                           dut_name=dut_name)
163    except Exception as e:
164        job.record('FAIL', None, None,
165                   'Failed to setup container for test: %s. Check logs in '
166                   'ssp_logs folder for more details.' % e)
167        raise
168
169    args = sys.argv[:]
170    args.remove('--require-ssp')
171    # --parent_job_id is only useful in autoserv running in host, not in
172    # container. Include this argument will cause test to fail for builds before
173    # CL 286265 was merged.
174    if '--parent_job_id' in args:
175        index = args.index('--parent_job_id')
176        args.remove('--parent_job_id')
177        # Remove the actual parent job id in command line arg.
178        del args[index]
179
180    # A dictionary of paths to replace in the command line. Key is the path to
181    # be replaced with the one in value.
182    paths_to_replace = {}
183    # Replace the control file path with the one in container.
184    if control:
185        container_control_filename = os.path.join(
186                lxc.CONTROL_TEMP_PATH, os.path.basename(control))
187        paths_to_replace[control] = container_control_filename
188    # Update result directory with the one in container.
189    container_result_dir = os.path.join(lxc.RESULT_DIR_FMT % job_folder)
190    if parser.options.results:
191        paths_to_replace[parser.options.results] = container_result_dir
192    # Update parse_job directory with the one in container. The assumption is
193    # that the result folder to be parsed is always the same as the results_dir.
194    if parser.options.parse_job:
195        paths_to_replace[parser.options.parse_job] = container_result_dir
196
197    args = [paths_to_replace.get(arg, arg) for arg in args]
198
199    # Apply --use-existing-results, results directory is aready created and
200    # mounted in container. Apply this arg to avoid exception being raised.
201    if not '--use-existing-results' in args:
202        args.append('--use-existing-results')
203
204    # Make sure autoserv running in container using a different pid file.
205    if not '--pidfile-label' in args:
206        args.extend(['--pidfile-label', 'container_autoserv'])
207
208    cmd_line = ' '.join(["'%s'" % arg if ' ' in arg else arg for arg in args])
209    logging.info('Run command in container: %s', cmd_line)
210    success = False
211    try:
212        test_container.attach_run(cmd_line)
213        success = True
214    except Exception as e:
215        # If the test run inside container fails without generating any log,
216        # write a message to status.log to help troubleshooting.
217        debug_files = os.listdir(os.path.join(results, 'debug'))
218        if not debug_files:
219            job.record('FAIL', None, None,
220                       'Failed to run test inside the container: %s. Check '
221                       'logs in ssp_logs folder for more details.' % e)
222        raise
223    finally:
224        metrics.Counter(
225            'chromeos/autotest/experimental/execute_job_in_ssp').increment(
226                fields={'success': success})
227        test_container.destroy()
228
229
230def correct_results_folder_permission(results):
231    """Make sure the results folder has the right permission settings.
232
233    For tests running with server-side packaging, the results folder has the
234    owner of root. This must be changed to the user running the autoserv
235    process, so parsing job can access the results folder.
236    TODO(dshi): crbug.com/459344 Remove this function when test container can be
237    unprivileged container.
238
239    @param results: Path to the results folder.
240
241    """
242    if not results:
243        return
244
245    utils.run('sudo -n chown -R %s "%s"' % (os.getuid(), results))
246    utils.run('sudo -n chgrp -R %s "%s"' % (os.getgid(), results))
247
248
249def _start_servod(machine):
250    """Try to start servod in moblab if it's not already running or running with
251    different board or port.
252
253    @param machine: Name of the dut used for test.
254    """
255    if not utils.is_moblab():
256        return
257
258    logging.debug('Trying to start servod.')
259    try:
260        afe = frontend.AFE()
261        board = server_utils.get_board_from_afe(machine, afe)
262        hosts = afe.get_hosts(hostname=machine)
263        servo_host = hosts[0].attributes.get('servo_host', None)
264        servo_port = hosts[0].attributes.get('servo_port', 9999)
265        if not servo_host in ['localhost', '127.0.0.1']:
266            logging.warn('Starting servod is aborted. The dut\'s servo_host '
267                         'attribute is not set to localhost.')
268            return
269    except (urllib2.HTTPError, urllib2.URLError):
270        # Ignore error if RPC failed to get board
271        logging.error('Failed to get board name from AFE. Start servod is '
272                      'aborted')
273        return
274
275    try:
276        pid = utils.run('pgrep servod').stdout
277        cmd_line = utils.run('ps -fp %s' % pid).stdout
278        if ('--board %s' % board in cmd_line and
279            '--port %s' % servo_port in cmd_line):
280            logging.debug('Servod is already running with given board and port.'
281                          ' There is no need to restart servod.')
282            return
283        logging.debug('Servod is running with different board or port. '
284                      'Stopping existing servod.')
285        utils.run('sudo stop servod')
286    except error.CmdError:
287        # servod is not running.
288        pass
289
290    try:
291        utils.run(START_SERVOD_CMD % (board, servo_port))
292        logging.debug('Servod is started')
293    except error.CmdError as e:
294        logging.error('Servod failed to be started, error: %s', e)
295
296
297def run_autoserv(pid_file_manager, results, parser, ssp_url, use_ssp):
298    """Run server job with given options.
299
300    @param pid_file_manager: PidFileManager used to monitor the autoserv process
301    @param results: Folder to store results.
302    @param parser: Parser for the command line arguments.
303    @param ssp_url: Url to server-side package.
304    @param use_ssp: Set to True to run with server-side packaging.
305    """
306    if parser.options.warn_no_ssp:
307        # Post a warning in the log.
308        logging.warn('Autoserv is required to run with server-side packaging. '
309                     'However, no drone is found to support server-side '
310                     'packaging. The test will be executed in a drone without '
311                     'server-side packaging supported.')
312
313    # send stdin to /dev/null
314    dev_null = os.open(os.devnull, os.O_RDONLY)
315    os.dup2(dev_null, sys.stdin.fileno())
316    os.close(dev_null)
317
318    # Create separate process group if the process is not a process group
319    # leader. This allows autoserv process to keep running after the caller
320    # process (drone manager call) exits.
321    if os.getpid() != os.getpgid(0):
322        os.setsid()
323
324    # Container name is predefined so the container can be destroyed in
325    # handle_sigterm.
326    job_or_task_id = job_directories.get_job_id_or_task_id(
327            parser.options.results)
328    container_name = (lxc.TEST_CONTAINER_NAME_FMT %
329                      (job_or_task_id, time.time(), os.getpid()))
330    job_folder = job_directories.get_job_folder_name(parser.options.results)
331
332    # Implement SIGTERM handler
333    def handle_sigterm(signum, frame):
334        logging.debug('Received SIGTERM')
335        if pid_file_manager:
336            pid_file_manager.close_file(1, signal.SIGTERM)
337        logging.debug('Finished writing to pid_file. Killing process.')
338
339        # Update results folder's file permission. This needs to be done ASAP
340        # before the parsing process tries to access the log.
341        if use_ssp and results:
342            correct_results_folder_permission(results)
343
344        # TODO (sbasi) - remove the time.sleep when crbug.com/302815 is solved.
345        # This sleep allows the pending output to be logged before the kill
346        # signal is sent.
347        time.sleep(.1)
348        if use_ssp:
349            logging.debug('Destroy container %s before aborting the autoserv '
350                          'process.', container_name)
351            try:
352                bucket = lxc.ContainerBucket()
353                container = bucket.get(container_name)
354                if container:
355                    container.destroy()
356                else:
357                    logging.debug('Container %s is not found.', container_name)
358            except:
359                # Handle any exception so the autoserv process can be aborted.
360                logging.exception('Failed to destroy container %s.',
361                                  container_name)
362            # Try to correct the result file permission again after the
363            # container is destroyed, as the container might have created some
364            # new files in the result folder.
365            if results:
366                correct_results_folder_permission(results)
367
368        os.killpg(os.getpgrp(), signal.SIGKILL)
369
370    # Set signal handler
371    signal.signal(signal.SIGTERM, handle_sigterm)
372
373    # faulthandler is only needed to debug in the Lab and is not avaliable to
374    # be imported in the chroot as part of VMTest, so Try-Except it.
375    try:
376        import faulthandler
377        faulthandler.register(signal.SIGTERM, all_threads=True, chain=True)
378        logging.debug('faulthandler registered on SIGTERM.')
379    except ImportError:
380        sys.exc_clear()
381
382    # Ignore SIGTTOU's generated by output from forked children.
383    signal.signal(signal.SIGTTOU, signal.SIG_IGN)
384
385    # If we received a SIGALARM, let's be loud about it.
386    signal.signal(signal.SIGALRM, log_alarm)
387
388    # Server side tests that call shell scripts often depend on $USER being set
389    # but depending on how you launch your autotest scheduler it may not be set.
390    os.environ['USER'] = getpass.getuser()
391
392    label = parser.options.label
393    group_name = parser.options.group_name
394    user = parser.options.user
395    client = parser.options.client
396    server = parser.options.server
397    install_before = parser.options.install_before
398    install_after = parser.options.install_after
399    verify = parser.options.verify
400    repair = parser.options.repair
401    cleanup = parser.options.cleanup
402    provision = parser.options.provision
403    reset = parser.options.reset
404    job_labels = parser.options.job_labels
405    no_tee = parser.options.no_tee
406    parse_job = parser.options.parse_job
407    execution_tag = parser.options.execution_tag
408    if not execution_tag:
409        execution_tag = parse_job
410    ssh_user = parser.options.ssh_user
411    ssh_port = parser.options.ssh_port
412    ssh_pass = parser.options.ssh_pass
413    collect_crashinfo = parser.options.collect_crashinfo
414    control_filename = parser.options.control_filename
415    test_retry = parser.options.test_retry
416    verify_job_repo_url = parser.options.verify_job_repo_url
417    skip_crash_collection = parser.options.skip_crash_collection
418    ssh_verbosity = int(parser.options.ssh_verbosity)
419    ssh_options = parser.options.ssh_options
420    no_use_packaging = parser.options.no_use_packaging
421    host_attributes = parser.options.host_attributes
422    in_lab = bool(parser.options.lab)
423
424    # can't be both a client and a server side test
425    if client and server:
426        parser.parser.error("Can not specify a test as both server and client!")
427
428    if provision and client:
429        parser.parser.error("Cannot specify provisioning and client!")
430
431    is_special_task = (verify or repair or cleanup or collect_crashinfo or
432                       provision or reset)
433    if len(parser.args) < 1 and not is_special_task:
434        parser.parser.error("Missing argument: control file")
435
436    if ssh_verbosity > 0:
437        # ssh_verbosity is an integer between 0 and 3, inclusive
438        ssh_verbosity_flag = '-' + 'v' * ssh_verbosity
439    else:
440        ssh_verbosity_flag = ''
441
442    # We have a control file unless it's just a verify/repair/cleanup job
443    if len(parser.args) > 0:
444        control = parser.args[0]
445    else:
446        control = None
447
448    machines = _get_machines(parser)
449    if group_name and len(machines) < 2:
450        parser.parser.error('-G %r may only be supplied with more than one '
451                            'machine.' % group_name)
452
453    kwargs = {'group_name': group_name, 'tag': execution_tag,
454              'disable_sysinfo': parser.options.disable_sysinfo}
455    if parser.options.parent_job_id:
456        kwargs['parent_job_id'] = int(parser.options.parent_job_id)
457    if control_filename:
458        kwargs['control_filename'] = control_filename
459    if host_attributes:
460        kwargs['host_attributes'] = host_attributes
461    kwargs['in_lab'] = in_lab
462    job = server_job.server_job(control, parser.args[1:], results, label,
463                                user, machines, client, parse_job,
464                                ssh_user, ssh_port, ssh_pass,
465                                ssh_verbosity_flag, ssh_options,
466                                test_retry, **kwargs)
467
468    job.logging.start_logging()
469    job.init_parser()
470
471    # perform checks
472    job.precheck()
473
474    # run the job
475    exit_code = 0
476    auto_start_servod = _CONFIG.get_config_value(
477            'AUTOSERV', 'auto_start_servod', type=bool, default=False)
478
479    site_utils.SetupTsMonGlobalState('autoserv', indirect=False,
480                                     short_lived=True)
481    try:
482        try:
483            if repair:
484                if auto_start_servod and len(machines) == 1:
485                    _start_servod(machines[0])
486                job.repair(job_labels)
487            elif verify:
488                job.verify(job_labels)
489            elif provision:
490                job.provision(job_labels)
491            elif reset:
492                job.reset(job_labels)
493            elif cleanup:
494                job.cleanup(job_labels)
495            else:
496                if auto_start_servod and len(machines) == 1:
497                    _start_servod(machines[0])
498                if use_ssp:
499                    try:
500                        _run_with_ssp(job, container_name, job_or_task_id,
501                                        results, parser, ssp_url, job_folder,
502                                        machines)
503                    finally:
504                        # Update the ownership of files in result folder.
505                        correct_results_folder_permission(results)
506                else:
507                    if collect_crashinfo:
508                        # Update the ownership of files in result folder. If the
509                        # job to collect crashinfo was running inside container
510                        # (SSP) and crashed before correcting folder permission,
511                        # the result folder might have wrong permission setting.
512                        try:
513                            correct_results_folder_permission(results)
514                        except:
515                            # Ignore any error as the user may not have root
516                            # permission to run sudo command.
517                            pass
518                    metric_name = ('chromeos/autotest/experimental/'
519                                   'autoserv_job_run_duration')
520                    f = {'in_container': utils.is_in_container(),
521                         'success': False}
522                    with metrics.SecondsTimer(metric_name, fields=f) as c:
523                        job.run(install_before, install_after,
524                                verify_job_repo_url=verify_job_repo_url,
525                                only_collect_crashinfo=collect_crashinfo,
526                                skip_crash_collection=skip_crash_collection,
527                                job_labels=job_labels,
528                                use_packaging=(not no_use_packaging))
529                        c['success'] = True
530
531        finally:
532            job.close()
533            # Special task doesn't run parse, so result summary needs to be
534            # built here.
535            if results and (repair or verify or reset or cleanup or provision):
536                site_utils.collect_result_sizes(results)
537    except:
538        exit_code = 1
539        traceback.print_exc()
540    finally:
541        metrics.Flush()
542
543    if pid_file_manager:
544        pid_file_manager.num_tests_failed = job.num_tests_failed
545        pid_file_manager.close_file(exit_code)
546    job.cleanup_parser()
547
548    sys.exit(exit_code)
549
550
551def record_autoserv(options, duration_secs):
552    """Record autoserv end-to-end time in metadata db.
553
554    @param options: parser options.
555    @param duration_secs: How long autoserv has taken, in secs.
556    """
557    # Get machine hostname
558    machines = options.machines.replace(
559            ',', ' ').strip().split() if options.machines else []
560    num_machines = len(machines)
561    if num_machines > 1:
562        # Skip the case where atomic group is used.
563        return
564    elif num_machines == 0:
565        machines.append('hostless')
566
567    # Determine the status that will be reported.
568    s = job_overhead.STATUS
569    task_mapping = {
570            'reset': s.RESETTING, 'verify': s.VERIFYING,
571            'provision': s.PROVISIONING, 'repair': s.REPAIRING,
572            'cleanup': s.CLEANING, 'collect_crashinfo': s.GATHERING}
573    match = filter(lambda task: getattr(options, task, False) == True,
574                   task_mapping)
575    status = task_mapping[match[0]] if match else s.RUNNING
576    is_special_task = status not in [s.RUNNING, s.GATHERING]
577    job_or_task_id = job_directories.get_job_id_or_task_id(options.results)
578    job_overhead.record_state_duration(
579            job_or_task_id, machines[0], status, duration_secs,
580            is_special_task=is_special_task)
581
582
583def main():
584    start_time = datetime.datetime.now()
585    # grab the parser
586    parser = autoserv_parser.autoserv_parser
587    parser.parse_args()
588
589    if len(sys.argv) == 1:
590        parser.parser.print_help()
591        sys.exit(1)
592
593    if parser.options.no_logging:
594        results = None
595    else:
596        results = parser.options.results
597        if not results:
598            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
599        results = os.path.abspath(results)
600        resultdir_exists = False
601        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
602            if os.path.exists(os.path.join(results, filename)):
603                resultdir_exists = True
604        if not parser.options.use_existing_results and resultdir_exists:
605            error = "Error: results directory already exists: %s\n" % results
606            sys.stderr.write(error)
607            sys.exit(1)
608
609        # Now that we certified that there's no leftover results dir from
610        # previous jobs, lets create the result dir since the logging system
611        # needs to create the log file in there.
612        if not os.path.isdir(results):
613            os.makedirs(results)
614
615    # If the job requires to run with server-side package, try to stage server-
616    # side package first. If that fails with error that autotest server package
617    # does not exist, fall back to run the job without using server-side
618    # packaging. If option warn_no_ssp is specified, that means autoserv is
619    # running in a drone does not support SSP, thus no need to stage server-side
620    # package.
621    ssp_url = None
622    ssp_url_warning = False
623    if (not parser.options.warn_no_ssp and parser.options.require_ssp):
624        ssp_url, ssp_error_msg = _stage_ssp(parser)
625        # The build does not have autotest server package. Fall back to not
626        # to use server-side package. Logging is postponed until logging being
627        # set up.
628        ssp_url_warning = not ssp_url
629
630    # Server-side packaging will only be used if it's required and the package
631    # is available. If warn_no_ssp is specified, it means that autoserv is
632    # running in a drone does not have SSP supported and a warning will be logs.
633    # Therefore, it should not run with SSP.
634    use_ssp = (not parser.options.warn_no_ssp and parser.options.require_ssp
635               and ssp_url)
636    if use_ssp:
637        log_dir = os.path.join(results, 'ssp_logs') if results else None
638        if log_dir and not os.path.exists(log_dir):
639            os.makedirs(log_dir)
640    else:
641        log_dir = results
642
643    logging_manager.configure_logging(
644            server_logging_config.ServerLoggingConfig(),
645            results_dir=log_dir,
646            use_console=not parser.options.no_tee,
647            verbose=parser.options.verbose,
648            no_console_prefix=parser.options.no_console_prefix)
649
650    if ssp_url_warning:
651        logging.warn(
652                'Autoserv is required to run with server-side packaging. '
653                'However, no server-side package can be found based on '
654                '`--image`, host attribute job_repo_url or host OS version '
655                'label. It could be that the build to test is older than the '
656                'minimum version that supports server-side packaging. The test '
657                'will be executed without using erver-side packaging. '
658                'Following is the detailed error:\n%s', ssp_error_msg)
659
660    if results:
661        logging.info("Results placed in %s" % results)
662
663        # wait until now to perform this check, so it get properly logged
664        if (parser.options.use_existing_results and not resultdir_exists and
665            not utils.is_in_container()):
666            logging.error("No existing results directory found: %s", results)
667            sys.exit(1)
668
669    logging.debug('autoserv is running in drone %s.', socket.gethostname())
670    logging.debug('autoserv command was: %s', ' '.join(sys.argv))
671
672    if parser.options.write_pidfile and results:
673        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
674                                                  results)
675        pid_file_manager.open_file()
676    else:
677        pid_file_manager = None
678
679    autotest.BaseAutotest.set_install_in_tmpdir(
680        parser.options.install_in_tmpdir)
681
682    try:
683        # Take the first argument as control file name, get the test name from
684        # the control file.
685        if (len(parser.args) > 0 and parser.args[0] != '' and
686            parser.options.machines):
687            try:
688                test_name = control_data.parse_control(parser.args[0],
689                                                       raise_warnings=True).name
690            except control_data.ControlVariableException:
691                logging.debug('Failed to retrieve test name from control file.')
692                test_name = None
693    except control_data.ControlVariableException as e:
694        logging.error(str(e))
695    exit_code = 0
696    # TODO(beeps): Extend this to cover different failure modes.
697    # Testing exceptions are matched against labels sent to autoserv. Eg,
698    # to allow only the hostless job to run, specify
699    # testing_exceptions: test_suite in the shadow_config. To allow both
700    # the hostless job and dummy_Pass to run, specify
701    # testing_exceptions: test_suite,dummy_Pass. You can figure out
702    # what label autoserv is invoked with by looking through the logs of a test
703    # for the autoserv command's -l option.
704    testing_exceptions = _CONFIG.get_config_value(
705            'AUTOSERV', 'testing_exceptions', type=list, default=[])
706    test_mode = _CONFIG.get_config_value(
707            'AUTOSERV', 'testing_mode', type=bool, default=False)
708    test_mode = (results_mocker and test_mode and not
709                 any([ex in parser.options.label
710                      for ex in testing_exceptions]))
711    is_task = (parser.options.verify or parser.options.repair or
712               parser.options.provision or parser.options.reset or
713               parser.options.cleanup or parser.options.collect_crashinfo)
714    try:
715        try:
716            if test_mode:
717                # The parser doesn't run on tasks anyway, so we can just return
718                # happy signals without faking results.
719                if not is_task:
720                    machine = parser.options.results.split('/')[-1]
721
722                    # TODO(beeps): The proper way to do this would be to
723                    # refactor job creation so we can invoke job.record
724                    # directly. To do that one needs to pipe the test_name
725                    # through run_autoserv and bail just before invoking
726                    # the server job. See the comment in
727                    # puppylab/results_mocker for more context.
728                    results_mocker.ResultsMocker(
729                            test_name if test_name else 'unknown-test',
730                            parser.options.results, machine
731                            ).mock_results()
732                return
733            else:
734                run_autoserv(pid_file_manager, results, parser, ssp_url,
735                             use_ssp)
736        except SystemExit as e:
737            exit_code = e.code
738            if exit_code:
739                logging.exception('Uncaught SystemExit with code %s', exit_code)
740        except Exception:
741            # If we don't know what happened, we'll classify it as
742            # an 'abort' and return 1.
743            logging.exception('Uncaught Exception, exit_code = 1.')
744            exit_code = 1
745    finally:
746        if pid_file_manager:
747            pid_file_manager.close_file(exit_code)
748        # Record the autoserv duration time. Must be called
749        # just before the system exits to ensure accuracy.
750        duration_secs = (datetime.datetime.now() - start_time).total_seconds()
751        record_autoserv(parser.options, duration_secs)
752    sys.exit(exit_code)
753
754
755if __name__ == '__main__':
756    main()
757