• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2017 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""
6Convenience functions for use by tests or whomever.
7
8There's no really good way to do this, as this isn't a class we can do
9inheritance with, just a collection of static methods.
10"""
11
12# pylint: disable=missing-docstring
13
14import StringIO
15import errno
16import inspect
17import itertools
18import logging
19import os
20import pickle
21import random
22import re
23import resource
24import select
25import shutil
26import signal
27import socket
28import string
29import struct
30import subprocess
31import textwrap
32import time
33import urllib2
34import urlparse
35import uuid
36import warnings
37
38try:
39    import hashlib
40except ImportError:
41    import md5
42    import sha
43
44import common
45
46from autotest_lib.client.common_lib import env
47from autotest_lib.client.common_lib import error
48from autotest_lib.client.common_lib import global_config
49from autotest_lib.client.common_lib import logging_manager
50from autotest_lib.client.common_lib import metrics_mock_class
51from autotest_lib.client.cros import constants
52
53from autotest_lib.client.common_lib.lsbrelease_utils import *
54
55
56def deprecated(func):
57    """This is a decorator which can be used to mark functions as deprecated.
58    It will result in a warning being emmitted when the function is used."""
59    def new_func(*args, **dargs):
60        warnings.warn("Call to deprecated function %s." % func.__name__,
61                      category=DeprecationWarning)
62        return func(*args, **dargs)
63    new_func.__name__ = func.__name__
64    new_func.__doc__ = func.__doc__
65    new_func.__dict__.update(func.__dict__)
66    return new_func
67
68
69class _NullStream(object):
70    def write(self, data):
71        pass
72
73
74    def flush(self):
75        pass
76
77
78TEE_TO_LOGS = object()
79_the_null_stream = _NullStream()
80
81DEVNULL = object()
82
83DEFAULT_STDOUT_LEVEL = logging.DEBUG
84DEFAULT_STDERR_LEVEL = logging.ERROR
85
86# prefixes for logging stdout/stderr of commands
87STDOUT_PREFIX = '[stdout] '
88STDERR_PREFIX = '[stderr] '
89
90# safe characters for the shell (do not need quoting)
91SHELL_QUOTING_WHITELIST = frozenset(string.ascii_letters +
92                                    string.digits +
93                                    '_-+=')
94
95def custom_warning_handler(message, category, filename, lineno, file=None,
96                           line=None):
97    """Custom handler to log at the WARNING error level. Ignores |file|."""
98    logging.warning(warnings.formatwarning(message, category, filename, lineno,
99                                           line))
100
101warnings.showwarning = custom_warning_handler
102
103def get_stream_tee_file(stream, level, prefix=''):
104    if stream is None:
105        return _the_null_stream
106    if stream is DEVNULL:
107        return None
108    if stream is TEE_TO_LOGS:
109        return logging_manager.LoggingFile(level=level, prefix=prefix)
110    return stream
111
112
113def _join_with_nickname(base_string, nickname):
114    if nickname:
115        return '%s BgJob "%s" ' % (base_string, nickname)
116    return base_string
117
118
119# TODO: Cleanup and possibly eliminate |unjoinable|, which is only used in our
120# master-ssh connection process, while fixing underlying
121# semantics problem in BgJob. See crbug.com/279312
122class BgJob(object):
123    def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
124                 stdin=None, stderr_level=DEFAULT_STDERR_LEVEL, nickname=None,
125                 unjoinable=False, env=None, extra_paths=None):
126        """Create and start a new BgJob.
127
128        This constructor creates a new BgJob, and uses Popen to start a new
129        subprocess with given command. It returns without blocking on execution
130        of the subprocess.
131
132        After starting a new BgJob, use output_prepare to connect the process's
133        stdout and stderr pipes to the stream of your choice.
134
135        When the job is running, the jobs's output streams are only read from
136        when process_output is called.
137
138        @param command: command to be executed in new subprocess. May be either
139                        a list, or a string (in which case Popen will be called
140                        with shell=True)
141        @param stdout_tee: (Optional) a file like object, TEE_TO_LOGS or
142                           DEVNULL.
143                           If not given, after finishing the process, the
144                           stdout data from subprocess is available in
145                           result.stdout.
146                           If a file like object is given, in process_output(),
147                           the stdout data from the subprocess will be handled
148                           by the given file like object.
149                           If TEE_TO_LOGS is given, in process_output(), the
150                           stdout data from the subprocess will be handled by
151                           the standard logging_manager.
152                           If DEVNULL is given, the stdout of the subprocess
153                           will be just discarded. In addition, even after
154                           cleanup(), result.stdout will be just an empty
155                           string (unlike the case where stdout_tee is not
156                           given).
157        @param stderr_tee: Same as stdout_tee, but for stderr.
158        @param verbose: Boolean, make BgJob logging more verbose.
159        @param stdin: Stream object, will be passed to Popen as the new
160                      process's stdin.
161        @param stderr_level: A logging level value. If stderr_tee was set to
162                             TEE_TO_LOGS, sets the level that tee'd
163                             stderr output will be logged at. Ignored
164                             otherwise.
165        @param nickname: Optional string, to be included in logging messages
166        @param unjoinable: Optional bool, default False.
167                           This should be True for BgJobs running in background
168                           and will never be joined with join_bg_jobs(), such
169                           as the master-ssh connection. Instead, it is
170                           caller's responsibility to terminate the subprocess
171                           correctly, e.g. by calling nuke_subprocess().
172                           This will lead that, calling join_bg_jobs(),
173                           process_output() or cleanup() will result in an
174                           InvalidBgJobCall exception.
175                           Also, |stdout_tee| and |stderr_tee| must be set to
176                           DEVNULL, otherwise InvalidBgJobCall is raised.
177        @param env: Dict containing environment variables used in subprocess.
178        @param extra_paths: Optional string list, to be prepended to the PATH
179                            env variable in env (or os.environ dict if env is
180                            not specified).
181        """
182        self.command = command
183        self.unjoinable = unjoinable
184        if (unjoinable and (stdout_tee != DEVNULL or stderr_tee != DEVNULL)):
185            raise error.InvalidBgJobCall(
186                'stdout_tee and stderr_tee must be DEVNULL for '
187                'unjoinable BgJob')
188        self._stdout_tee = get_stream_tee_file(
189                stdout_tee, DEFAULT_STDOUT_LEVEL,
190                prefix=_join_with_nickname(STDOUT_PREFIX, nickname))
191        self._stderr_tee = get_stream_tee_file(
192                stderr_tee, stderr_level,
193                prefix=_join_with_nickname(STDERR_PREFIX, nickname))
194        self.result = CmdResult(command)
195
196        # allow for easy stdin input by string, we'll let subprocess create
197        # a pipe for stdin input and we'll write to it in the wait loop
198        if isinstance(stdin, basestring):
199            self.string_stdin = stdin
200            stdin = subprocess.PIPE
201        else:
202            self.string_stdin = None
203
204        # Prepend extra_paths to env['PATH'] if necessary.
205        if extra_paths:
206            env = (os.environ if env is None else env).copy()
207            oldpath = env.get('PATH')
208            env['PATH'] = os.pathsep.join(
209                    extra_paths + ([oldpath] if oldpath else []))
210
211        if verbose:
212            logging.debug("Running '%s'", command)
213
214        if type(command) == list:
215            shell = False
216            executable = None
217        else:
218            shell = True
219            executable = '/bin/bash'
220
221        with open('/dev/null', 'w') as devnull:
222            self.sp = subprocess.Popen(
223                command,
224                stdin=stdin,
225                stdout=devnull if stdout_tee == DEVNULL else subprocess.PIPE,
226                stderr=devnull if stderr_tee == DEVNULL else subprocess.PIPE,
227                preexec_fn=self._reset_sigpipe,
228                shell=shell, executable=executable,
229                env=env, close_fds=True)
230
231        self._cleanup_called = False
232        self._stdout_file = (
233            None if stdout_tee == DEVNULL else StringIO.StringIO())
234        self._stderr_file = (
235            None if stderr_tee == DEVNULL else StringIO.StringIO())
236
237    def process_output(self, stdout=True, final_read=False):
238        """Read from process's output stream, and write data to destinations.
239
240        This function reads up to 1024 bytes from the background job's
241        stdout or stderr stream, and writes the resulting data to the BgJob's
242        output tee and to the stream set up in output_prepare.
243
244        Warning: Calls to process_output will block on reads from the
245        subprocess stream, and will block on writes to the configured
246        destination stream.
247
248        @param stdout: True = read and process data from job's stdout.
249                       False = from stderr.
250                       Default: True
251        @param final_read: Do not read only 1024 bytes from stream. Instead,
252                           read and process all data until end of the stream.
253
254        """
255        if self.unjoinable:
256            raise error.InvalidBgJobCall('Cannot call process_output on '
257                                         'a job with unjoinable BgJob')
258        if stdout:
259            pipe, buf, tee = (
260                self.sp.stdout, self._stdout_file, self._stdout_tee)
261        else:
262            pipe, buf, tee = (
263                self.sp.stderr, self._stderr_file, self._stderr_tee)
264
265        if not pipe:
266            return
267
268        if final_read:
269            # read in all the data we can from pipe and then stop
270            data = []
271            while select.select([pipe], [], [], 0)[0]:
272                data.append(os.read(pipe.fileno(), 1024))
273                if len(data[-1]) == 0:
274                    break
275            data = "".join(data)
276        else:
277            # perform a single read
278            data = os.read(pipe.fileno(), 1024)
279        buf.write(data)
280        tee.write(data)
281
282    def cleanup(self):
283        """Clean up after BgJob.
284
285        Flush the stdout_tee and stderr_tee buffers, close the
286        subprocess stdout and stderr buffers, and saves data from
287        the configured stdout and stderr destination streams to
288        self.result. Duplicate calls ignored with a warning.
289        """
290        if self.unjoinable:
291            raise error.InvalidBgJobCall('Cannot call cleanup on '
292                                         'a job with a unjoinable BgJob')
293        if self._cleanup_called:
294            logging.warning('BgJob [%s] received a duplicate call to '
295                            'cleanup. Ignoring.', self.command)
296            return
297        try:
298            if self.sp.stdout:
299                self._stdout_tee.flush()
300                self.sp.stdout.close()
301                self.result.stdout = self._stdout_file.getvalue()
302
303            if self.sp.stderr:
304                self._stderr_tee.flush()
305                self.sp.stderr.close()
306                self.result.stderr = self._stderr_file.getvalue()
307        finally:
308            self._cleanup_called = True
309
310    def _reset_sigpipe(self):
311        if not env.IN_MOD_WSGI:
312            signal.signal(signal.SIGPIPE, signal.SIG_DFL)
313
314
315def ip_to_long(ip):
316    # !L is a long in network byte order
317    return struct.unpack('!L', socket.inet_aton(ip))[0]
318
319
320def long_to_ip(number):
321    # See above comment.
322    return socket.inet_ntoa(struct.pack('!L', number))
323
324
325def create_subnet_mask(bits):
326    return (1 << 32) - (1 << 32-bits)
327
328
329def format_ip_with_mask(ip, mask_bits):
330    masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
331    return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
332
333
334def normalize_hostname(alias):
335    ip = socket.gethostbyname(alias)
336    return socket.gethostbyaddr(ip)[0]
337
338
339def get_ip_local_port_range():
340    match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
341                     read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
342    return (int(match.group(1)), int(match.group(2)))
343
344
345def set_ip_local_port_range(lower, upper):
346    write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
347                   '%d %d\n' % (lower, upper))
348
349
350def read_one_line(filename):
351    return open(filename, 'r').readline().rstrip('\n')
352
353
354def read_file(filename):
355    f = open(filename)
356    try:
357        return f.read()
358    finally:
359        f.close()
360
361
362def get_field(data, param, linestart="", sep=" "):
363    """
364    Parse data from string.
365    @param data: Data to parse.
366        example:
367          data:
368             cpu   324 345 34  5 345
369             cpu0  34  11  34 34  33
370             ^^^^
371             start of line
372             params 0   1   2  3   4
373    @param param: Position of parameter after linestart marker.
374    @param linestart: String to which start line with parameters.
375    @param sep: Separator between parameters regular expression.
376    """
377    search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
378    find = search.search(data)
379    if find != None:
380        return re.split("%s" % sep, find.group(1))[param]
381    else:
382        print "There is no line which starts with %s in data." % linestart
383        return None
384
385
386def write_one_line(filename, line):
387    open_write_close(filename, str(line).rstrip('\n') + '\n')
388
389
390def open_write_close(filename, data):
391    f = open(filename, 'w')
392    try:
393        f.write(data)
394    finally:
395        f.close()
396
397
398def locate_file(path, base_dir=None):
399    """Locates a file.
400
401    @param path: The path of the file being located. Could be absolute or relative
402        path. For relative path, it tries to locate the file from base_dir.
403    @param base_dir (optional): Base directory of the relative path.
404
405    @returns Absolute path of the file if found. None if path is None.
406    @raises error.TestFail if the file is not found.
407    """
408    if path is None:
409        return None
410
411    if not os.path.isabs(path) and base_dir is not None:
412        # Assume the relative path is based in autotest directory.
413        path = os.path.join(base_dir, path)
414    if not os.path.isfile(path):
415        raise error.TestFail('ERROR: Unable to find %s' % path)
416    return path
417
418
419def matrix_to_string(matrix, header=None):
420    """
421    Return a pretty, aligned string representation of a nxm matrix.
422
423    This representation can be used to print any tabular data, such as
424    database results. It works by scanning the lengths of each element
425    in each column, and determining the format string dynamically.
426
427    @param matrix: Matrix representation (list with n rows of m elements).
428    @param header: Optional tuple or list with header elements to be displayed.
429    """
430    if type(header) is list:
431        header = tuple(header)
432    lengths = []
433    if header:
434        for column in header:
435            lengths.append(len(column))
436    for row in matrix:
437        for i, column in enumerate(row):
438            column = unicode(column).encode("utf-8")
439            cl = len(column)
440            try:
441                ml = lengths[i]
442                if cl > ml:
443                    lengths[i] = cl
444            except IndexError:
445                lengths.append(cl)
446
447    lengths = tuple(lengths)
448    format_string = ""
449    for length in lengths:
450        format_string += "%-" + str(length) + "s "
451    format_string += "\n"
452
453    matrix_str = ""
454    if header:
455        matrix_str += format_string % header
456    for row in matrix:
457        matrix_str += format_string % tuple(row)
458
459    return matrix_str
460
461
462def read_keyval(path, type_tag=None):
463    """
464    Read a key-value pair format file into a dictionary, and return it.
465    Takes either a filename or directory name as input. If it's a
466    directory name, we assume you want the file to be called keyval.
467
468    @param path: Full path of the file to read from.
469    @param type_tag: If not None, only keyvals with key ending
470                     in a suffix {type_tag} will be collected.
471    """
472    if os.path.isdir(path):
473        path = os.path.join(path, 'keyval')
474    if not os.path.exists(path):
475        return {}
476
477    if type_tag:
478        pattern = r'^([-\.\w]+)\{%s\}=(.*)$' % type_tag
479    else:
480        pattern = r'^([-\.\w]+)=(.*)$'
481
482    keyval = {}
483    f = open(path)
484    for line in f:
485        line = re.sub('#.*', '', line).rstrip()
486        if not line:
487            continue
488        match = re.match(pattern, line)
489        if match:
490            key = match.group(1)
491            value = match.group(2)
492            if re.search('^\d+$', value):
493                value = int(value)
494            elif re.search('^(\d+\.)?\d+$', value):
495                value = float(value)
496            keyval[key] = value
497        else:
498            raise ValueError('Invalid format line: %s' % line)
499    f.close()
500    return keyval
501
502
503def write_keyval(path, dictionary, type_tag=None):
504    """
505    Write a key-value pair format file out to a file. This uses append
506    mode to open the file, so existing text will not be overwritten or
507    reparsed.
508
509    If type_tag is None, then the key must be composed of alphanumeric
510    characters (or dashes+underscores). However, if type-tag is not
511    null then the keys must also have "{type_tag}" as a suffix. At
512    the moment the only valid values of type_tag are "attr" and "perf".
513
514    @param path: full path of the file to be written
515    @param dictionary: the items to write
516    @param type_tag: see text above
517    """
518    if os.path.isdir(path):
519        path = os.path.join(path, 'keyval')
520    keyval = open(path, 'a')
521
522    if type_tag is None:
523        key_regex = re.compile(r'^[-\.\w]+$')
524    else:
525        if type_tag not in ('attr', 'perf'):
526            raise ValueError('Invalid type tag: %s' % type_tag)
527        escaped_tag = re.escape(type_tag)
528        key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
529    try:
530        for key in sorted(dictionary.keys()):
531            if not key_regex.search(key):
532                raise ValueError('Invalid key: %s' % key)
533            keyval.write('%s=%s\n' % (key, dictionary[key]))
534    finally:
535        keyval.close()
536
537
538def is_url(path):
539    """Return true if path looks like a URL"""
540    # for now, just handle http and ftp
541    url_parts = urlparse.urlparse(path)
542    return (url_parts[0] in ('http', 'ftp'))
543
544
545def urlopen(url, data=None, timeout=5):
546    """Wrapper to urllib2.urlopen with timeout addition."""
547
548    # Save old timeout
549    old_timeout = socket.getdefaulttimeout()
550    socket.setdefaulttimeout(timeout)
551    try:
552        return urllib2.urlopen(url, data=data)
553    finally:
554        socket.setdefaulttimeout(old_timeout)
555
556
557def urlretrieve(url, filename, data=None, timeout=300):
558    """Retrieve a file from given url."""
559    logging.debug('Fetching %s -> %s', url, filename)
560
561    src_file = urlopen(url, data=data, timeout=timeout)
562    try:
563        dest_file = open(filename, 'wb')
564        try:
565            shutil.copyfileobj(src_file, dest_file)
566        finally:
567            dest_file.close()
568    finally:
569        src_file.close()
570
571
572def hash(type, input=None):
573    """
574    Returns an hash object of type md5 or sha1. This function is implemented in
575    order to encapsulate hash objects in a way that is compatible with python
576    2.4 and python 2.6 without warnings.
577
578    Note that even though python 2.6 hashlib supports hash types other than
579    md5 and sha1, we are artificially limiting the input values in order to
580    make the function to behave exactly the same among both python
581    implementations.
582
583    @param input: Optional input string that will be used to update the hash.
584    """
585    if type not in ['md5', 'sha1']:
586        raise ValueError("Unsupported hash type: %s" % type)
587
588    try:
589        hash = hashlib.new(type)
590    except NameError:
591        if type == 'md5':
592            hash = md5.new()
593        elif type == 'sha1':
594            hash = sha.new()
595
596    if input:
597        hash.update(input)
598
599    return hash
600
601
602def get_file(src, dest, permissions=None):
603    """Get a file from src, which can be local or a remote URL"""
604    if src == dest:
605        return
606
607    if is_url(src):
608        urlretrieve(src, dest)
609    else:
610        shutil.copyfile(src, dest)
611
612    if permissions:
613        os.chmod(dest, permissions)
614    return dest
615
616
617def unmap_url(srcdir, src, destdir='.'):
618    """
619    Receives either a path to a local file or a URL.
620    returns either the path to the local file, or the fetched URL
621
622    unmap_url('/usr/src', 'foo.tar', '/tmp')
623                            = '/usr/src/foo.tar'
624    unmap_url('/usr/src', 'http://site/file', '/tmp')
625                            = '/tmp/file'
626                            (after retrieving it)
627    """
628    if is_url(src):
629        url_parts = urlparse.urlparse(src)
630        filename = os.path.basename(url_parts[2])
631        dest = os.path.join(destdir, filename)
632        return get_file(src, dest)
633    else:
634        return os.path.join(srcdir, src)
635
636
637def update_version(srcdir, preserve_srcdir, new_version, install,
638                   *args, **dargs):
639    """
640    Make sure srcdir is version new_version
641
642    If not, delete it and install() the new version.
643
644    In the preserve_srcdir case, we just check it's up to date,
645    and if not, we rerun install, without removing srcdir
646    """
647    versionfile = os.path.join(srcdir, '.version')
648    install_needed = True
649
650    if os.path.exists(versionfile):
651        old_version = pickle.load(open(versionfile))
652        if old_version == new_version:
653            install_needed = False
654
655    if install_needed:
656        if not preserve_srcdir and os.path.exists(srcdir):
657            shutil.rmtree(srcdir)
658        install(*args, **dargs)
659        if os.path.exists(srcdir):
660            pickle.dump(new_version, open(versionfile, 'w'))
661
662
663def get_stderr_level(stderr_is_expected):
664    if stderr_is_expected:
665        return DEFAULT_STDOUT_LEVEL
666    return DEFAULT_STDERR_LEVEL
667
668
669def run(command, timeout=None, ignore_status=False,
670        stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
671        stderr_is_expected=None, args=(), nickname=None, ignore_timeout=False,
672        env=None, extra_paths=None):
673    """
674    Run a command on the host.
675
676    @param command: the command line string.
677    @param timeout: time limit in seconds before attempting to kill the
678            running process. The run() function will take a few seconds
679            longer than 'timeout' to complete if it has to kill the process.
680    @param ignore_status: do not raise an exception, no matter what the exit
681            code of the command is.
682    @param stdout_tee: optional file-like object to which stdout data
683            will be written as it is generated (data will still be stored
684            in result.stdout).
685    @param stderr_tee: likewise for stderr.
686    @param verbose: if True, log the command being run.
687    @param stdin: stdin to pass to the executed process (can be a file
688            descriptor, a file object of a real file or a string).
689    @param stderr_is_expected: if True, stderr will be logged at the same level
690            as stdout
691    @param args: sequence of strings of arguments to be given to the command
692            inside " quotes after they have been escaped for that; each
693            element in the sequence will be given as a separate command
694            argument
695    @param nickname: Short string that will appear in logging messages
696                     associated with this command.
697    @param ignore_timeout: If True, timeouts are ignored otherwise if a
698            timeout occurs it will raise CmdTimeoutError.
699    @param env: Dict containing environment variables used in a subprocess.
700    @param extra_paths: Optional string list, to be prepended to the PATH
701                        env variable in env (or os.environ dict if env is
702                        not specified).
703
704    @return a CmdResult object or None if the command timed out and
705            ignore_timeout is True
706
707    @raise CmdError: the exit code of the command execution was not 0
708    @raise CmdTimeoutError: the command timed out and ignore_timeout is False.
709    """
710    if isinstance(args, basestring):
711        raise TypeError('Got a string for the "args" keyword argument, '
712                        'need a sequence.')
713
714    # In some cases, command will actually be a list
715    # (For example, see get_user_hash in client/cros/cryptohome.py.)
716    # So, to cover that case, detect if it's a string or not and convert it
717    # into one if necessary.
718    if not isinstance(command, basestring):
719        command = ' '.join([sh_quote_word(arg) for arg in command])
720
721    command = ' '.join([command] + [sh_quote_word(arg) for arg in args])
722    if stderr_is_expected is None:
723        stderr_is_expected = ignore_status
724
725    try:
726        bg_job = join_bg_jobs(
727            (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
728                   stderr_level=get_stderr_level(stderr_is_expected),
729                   nickname=nickname, env=env, extra_paths=extra_paths),),
730            timeout)[0]
731    except error.CmdTimeoutError:
732        if not ignore_timeout:
733            raise
734        return None
735
736    if not ignore_status and bg_job.result.exit_status:
737        raise error.CmdError(command, bg_job.result,
738                             "Command returned non-zero exit status")
739
740    return bg_job.result
741
742
743def run_parallel(commands, timeout=None, ignore_status=False,
744                 stdout_tee=None, stderr_tee=None,
745                 nicknames=[]):
746    """
747    Behaves the same as run() with the following exceptions:
748
749    - commands is a list of commands to run in parallel.
750    - ignore_status toggles whether or not an exception should be raised
751      on any error.
752
753    @return: a list of CmdResult objects
754    """
755    bg_jobs = []
756    for (command, nickname) in itertools.izip_longest(commands, nicknames):
757        bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
758                             stderr_level=get_stderr_level(ignore_status),
759                             nickname=nickname))
760
761    # Updates objects in bg_jobs list with their process information
762    join_bg_jobs(bg_jobs, timeout)
763
764    for bg_job in bg_jobs:
765        if not ignore_status and bg_job.result.exit_status:
766            raise error.CmdError(command, bg_job.result,
767                                 "Command returned non-zero exit status")
768
769    return [bg_job.result for bg_job in bg_jobs]
770
771
772@deprecated
773def run_bg(command):
774    """Function deprecated. Please use BgJob class instead."""
775    bg_job = BgJob(command)
776    return bg_job.sp, bg_job.result
777
778
779def join_bg_jobs(bg_jobs, timeout=None):
780    """Joins the bg_jobs with the current thread.
781
782    Returns the same list of bg_jobs objects that was passed in.
783    """
784    if any(bg_job.unjoinable for bg_job in bg_jobs):
785        raise error.InvalidBgJobCall(
786                'join_bg_jobs cannot be called for unjoinable bg_job')
787
788    timeout_error = False
789    try:
790        # We are holding ends to stdin, stdout pipes
791        # hence we need to be sure to close those fds no mater what
792        start_time = time.time()
793        timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
794
795        for bg_job in bg_jobs:
796            # Process stdout and stderr
797            bg_job.process_output(stdout=True,final_read=True)
798            bg_job.process_output(stdout=False,final_read=True)
799    finally:
800        # close our ends of the pipes to the sp no matter what
801        for bg_job in bg_jobs:
802            bg_job.cleanup()
803
804    if timeout_error:
805        # TODO: This needs to be fixed to better represent what happens when
806        # running in parallel. However this is backwards compatable, so it will
807        # do for the time being.
808        raise error.CmdTimeoutError(
809                bg_jobs[0].command, bg_jobs[0].result,
810                "Command(s) did not complete within %d seconds" % timeout)
811
812
813    return bg_jobs
814
815
816def _wait_for_commands(bg_jobs, start_time, timeout):
817    """Waits for background jobs by select polling their stdout/stderr.
818
819    @param bg_jobs: A list of background jobs to wait on.
820    @param start_time: Time used to calculate the timeout lifetime of a job.
821    @param timeout: The timeout of the list of bg_jobs.
822
823    @return: True if the return was due to a timeout, False otherwise.
824    """
825
826    # To check for processes which terminate without producing any output
827    # a 1 second timeout is used in select.
828    SELECT_TIMEOUT = 1
829
830    read_list = []
831    write_list = []
832    reverse_dict = {}
833
834    for bg_job in bg_jobs:
835        if bg_job.sp.stdout:
836            read_list.append(bg_job.sp.stdout)
837            reverse_dict[bg_job.sp.stdout] = (bg_job, True)
838        if bg_job.sp.stderr:
839            read_list.append(bg_job.sp.stderr)
840            reverse_dict[bg_job.sp.stderr] = (bg_job, False)
841        if bg_job.string_stdin is not None:
842            write_list.append(bg_job.sp.stdin)
843            reverse_dict[bg_job.sp.stdin] = bg_job
844
845    if timeout:
846        stop_time = start_time + timeout
847        time_left = stop_time - time.time()
848    else:
849        time_left = None # so that select never times out
850
851    while not timeout or time_left > 0:
852        # select will return when we may write to stdin, when there is
853        # stdout/stderr output we can read (including when it is
854        # EOF, that is the process has terminated) or when a non-fatal
855        # signal was sent to the process. In the last case the select returns
856        # EINTR, and we continue waiting for the job if the signal handler for
857        # the signal that interrupted the call allows us to.
858        try:
859            read_ready, write_ready, _ = select.select(read_list, write_list,
860                                                       [], SELECT_TIMEOUT)
861        except select.error as v:
862            if v[0] == errno.EINTR:
863                logging.warning(v)
864                continue
865            else:
866                raise
867        # os.read() has to be used instead of
868        # subproc.stdout.read() which will otherwise block
869        for file_obj in read_ready:
870            bg_job, is_stdout = reverse_dict[file_obj]
871            bg_job.process_output(is_stdout)
872
873        for file_obj in write_ready:
874            # we can write PIPE_BUF bytes without blocking
875            # POSIX requires PIPE_BUF is >= 512
876            bg_job = reverse_dict[file_obj]
877            file_obj.write(bg_job.string_stdin[:512])
878            bg_job.string_stdin = bg_job.string_stdin[512:]
879            # no more input data, close stdin, remove it from the select set
880            if not bg_job.string_stdin:
881                file_obj.close()
882                write_list.remove(file_obj)
883                del reverse_dict[file_obj]
884
885        all_jobs_finished = True
886        for bg_job in bg_jobs:
887            if bg_job.result.exit_status is not None:
888                continue
889
890            bg_job.result.exit_status = bg_job.sp.poll()
891            if bg_job.result.exit_status is not None:
892                # process exited, remove its stdout/stdin from the select set
893                bg_job.result.duration = time.time() - start_time
894                if bg_job.sp.stdout:
895                    read_list.remove(bg_job.sp.stdout)
896                    del reverse_dict[bg_job.sp.stdout]
897                if bg_job.sp.stderr:
898                    read_list.remove(bg_job.sp.stderr)
899                    del reverse_dict[bg_job.sp.stderr]
900            else:
901                all_jobs_finished = False
902
903        if all_jobs_finished:
904            return False
905
906        if timeout:
907            time_left = stop_time - time.time()
908
909    # Kill all processes which did not complete prior to timeout
910    for bg_job in bg_jobs:
911        if bg_job.result.exit_status is not None:
912            continue
913
914        logging.warning('run process timeout (%s) fired on: %s', timeout,
915                        bg_job.command)
916        if nuke_subprocess(bg_job.sp) is None:
917            # If process could not be SIGKILL'd, log kernel stack.
918            logging.warning(read_file('/proc/%d/stack' % bg_job.sp.pid))
919        bg_job.result.exit_status = bg_job.sp.poll()
920        bg_job.result.duration = time.time() - start_time
921
922    return True
923
924
925def pid_is_alive(pid):
926    """
927    True if process pid exists and is not yet stuck in Zombie state.
928    Zombies are impossible to move between cgroups, etc.
929    pid can be integer, or text of integer.
930    """
931    path = '/proc/%s/stat' % pid
932
933    try:
934        stat = read_one_line(path)
935    except IOError:
936        if not os.path.exists(path):
937            # file went away
938            return False
939        raise
940
941    return stat.split()[2] != 'Z'
942
943
944def signal_pid(pid, sig):
945    """
946    Sends a signal to a process id. Returns True if the process terminated
947    successfully, False otherwise.
948    """
949    try:
950        os.kill(pid, sig)
951    except OSError:
952        # The process may have died before we could kill it.
953        pass
954
955    for i in range(5):
956        if not pid_is_alive(pid):
957            return True
958        time.sleep(1)
959
960    # The process is still alive
961    return False
962
963
964def nuke_subprocess(subproc):
965    # check if the subprocess is still alive, first
966    if subproc.poll() is not None:
967        return subproc.poll()
968
969    # the process has not terminated within timeout,
970    # kill it via an escalating series of signals.
971    signal_queue = [signal.SIGTERM, signal.SIGKILL]
972    for sig in signal_queue:
973        signal_pid(subproc.pid, sig)
974        if subproc.poll() is not None:
975            return subproc.poll()
976
977
978def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
979    # the process has not terminated within timeout,
980    # kill it via an escalating series of signals.
981    pid_path = '/proc/%d/'
982    if not os.path.exists(pid_path % pid):
983        # Assume that if the pid does not exist in proc it is already dead.
984        logging.error('No listing in /proc for pid:%d.', pid)
985        raise error.AutoservPidAlreadyDeadError('Could not kill nonexistant '
986                                                'pid: %s.', pid)
987    for sig in signal_queue:
988        if signal_pid(pid, sig):
989            return
990
991    # no signal successfully terminated the process
992    raise error.AutoservRunError('Could not kill %d for process name: %s' % (
993            pid, get_process_name(pid)), None)
994
995
996def system(command, timeout=None, ignore_status=False):
997    """
998    Run a command
999
1000    @param timeout: timeout in seconds
1001    @param ignore_status: if ignore_status=False, throw an exception if the
1002            command's exit code is non-zero
1003            if ignore_stauts=True, return the exit code.
1004
1005    @return exit status of command
1006            (note, this will always be zero unless ignore_status=True)
1007    """
1008    return run(command, timeout=timeout, ignore_status=ignore_status,
1009               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
1010
1011
1012def system_parallel(commands, timeout=None, ignore_status=False):
1013    """This function returns a list of exit statuses for the respective
1014    list of commands."""
1015    return [bg_jobs.exit_status for bg_jobs in
1016            run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
1017                         stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1018
1019
1020def system_output(command, timeout=None, ignore_status=False,
1021                  retain_output=False, args=()):
1022    """
1023    Run a command and return the stdout output.
1024
1025    @param command: command string to execute.
1026    @param timeout: time limit in seconds before attempting to kill the
1027            running process. The function will take a few seconds longer
1028            than 'timeout' to complete if it has to kill the process.
1029    @param ignore_status: do not raise an exception, no matter what the exit
1030            code of the command is.
1031    @param retain_output: set to True to make stdout/stderr of the command
1032            output to be also sent to the logging system
1033    @param args: sequence of strings of arguments to be given to the command
1034            inside " quotes after they have been escaped for that; each
1035            element in the sequence will be given as a separate command
1036            argument
1037
1038    @return a string with the stdout output of the command.
1039    """
1040    if retain_output:
1041        out = run(command, timeout=timeout, ignore_status=ignore_status,
1042                  stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
1043                  args=args).stdout
1044    else:
1045        out = run(command, timeout=timeout, ignore_status=ignore_status,
1046                  args=args).stdout
1047    if out[-1:] == '\n':
1048        out = out[:-1]
1049    return out
1050
1051
1052def system_output_parallel(commands, timeout=None, ignore_status=False,
1053                           retain_output=False):
1054    if retain_output:
1055        out = [bg_job.stdout for bg_job
1056               in run_parallel(commands, timeout=timeout,
1057                               ignore_status=ignore_status,
1058                               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1059    else:
1060        out = [bg_job.stdout for bg_job in run_parallel(commands,
1061                                  timeout=timeout, ignore_status=ignore_status)]
1062    for x in out:
1063        if out[-1:] == '\n': out = out[:-1]
1064    return out
1065
1066
1067def strip_unicode(input):
1068    if type(input) == list:
1069        return [strip_unicode(i) for i in input]
1070    elif type(input) == dict:
1071        output = {}
1072        for key in input.keys():
1073            output[str(key)] = strip_unicode(input[key])
1074        return output
1075    elif type(input) == unicode:
1076        return str(input)
1077    else:
1078        return input
1079
1080
1081def get_cpu_percentage(function, *args, **dargs):
1082    """Returns a tuple containing the CPU% and return value from function call.
1083
1084    This function calculates the usage time by taking the difference of
1085    the user and system times both before and after the function call.
1086    """
1087    child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
1088    self_pre = resource.getrusage(resource.RUSAGE_SELF)
1089    start = time.time()
1090    to_return = function(*args, **dargs)
1091    elapsed = time.time() - start
1092    self_post = resource.getrusage(resource.RUSAGE_SELF)
1093    child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
1094
1095    # Calculate CPU Percentage
1096    s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
1097    c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
1098    cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
1099
1100    return cpu_percent, to_return
1101
1102
1103def get_arch(run_function=run):
1104    """
1105    Get the hardware architecture of the machine.
1106    If specified, run_function should return a CmdResult object and throw a
1107    CmdError exception.
1108    If run_function is anything other than utils.run(), it is used to
1109    execute the commands. By default (when set to utils.run()) this will
1110    just examine os.uname()[4].
1111    """
1112
1113    # Short circuit from the common case.
1114    if run_function == run:
1115        return re.sub(r'i\d86$', 'i386', os.uname()[4])
1116
1117    # Otherwise, use the run_function in case it hits a remote machine.
1118    arch = run_function('/bin/uname -m').stdout.rstrip()
1119    if re.match(r'i\d86$', arch):
1120        arch = 'i386'
1121    return arch
1122
1123def get_arch_userspace(run_function=run):
1124    """
1125    Get the architecture by userspace (possibly different from kernel).
1126    """
1127    archs = {
1128        'arm': 'ELF 32-bit.*, ARM,',
1129        'i386': 'ELF 32-bit.*, Intel 80386,',
1130        'x86_64': 'ELF 64-bit.*, x86-64,',
1131    }
1132
1133    cmd = 'file --brief --dereference /bin/sh'
1134    filestr = run_function(cmd).stdout.rstrip()
1135    for a, regex in archs.iteritems():
1136        if re.match(regex, filestr):
1137            return a
1138
1139    return get_arch()
1140
1141
1142def get_num_logical_cpus_per_socket(run_function=run):
1143    """
1144    Get the number of cores (including hyperthreading) per cpu.
1145    run_function is used to execute the commands. It defaults to
1146    utils.run() but a custom method (if provided) should be of the
1147    same schema as utils.run. It should return a CmdResult object and
1148    throw a CmdError exception.
1149    """
1150    siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1151    num_siblings = map(int,
1152                       re.findall(r'^siblings\s*:\s*(\d+)\s*$',
1153                                  siblings, re.M))
1154    if len(num_siblings) == 0:
1155        raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1156    if min(num_siblings) != max(num_siblings):
1157        raise error.TestError('Number of siblings differ %r' %
1158                              num_siblings)
1159    return num_siblings[0]
1160
1161
1162def merge_trees(src, dest):
1163    """
1164    Merges a source directory tree at 'src' into a destination tree at
1165    'dest'. If a path is a file in both trees than the file in the source
1166    tree is APPENDED to the one in the destination tree. If a path is
1167    a directory in both trees then the directories are recursively merged
1168    with this function. In any other case, the function will skip the
1169    paths that cannot be merged (instead of failing).
1170    """
1171    if not os.path.exists(src):
1172        return # exists only in dest
1173    elif not os.path.exists(dest):
1174        if os.path.isfile(src):
1175            shutil.copy2(src, dest) # file only in src
1176        else:
1177            shutil.copytree(src, dest, symlinks=True) # dir only in src
1178        return
1179    elif os.path.isfile(src) and os.path.isfile(dest):
1180        # src & dest are files in both trees, append src to dest
1181        destfile = open(dest, "a")
1182        try:
1183            srcfile = open(src)
1184            try:
1185                destfile.write(srcfile.read())
1186            finally:
1187                srcfile.close()
1188        finally:
1189            destfile.close()
1190    elif os.path.isdir(src) and os.path.isdir(dest):
1191        # src & dest are directories in both trees, so recursively merge
1192        for name in os.listdir(src):
1193            merge_trees(os.path.join(src, name), os.path.join(dest, name))
1194    else:
1195        # src & dest both exist, but are incompatible
1196        return
1197
1198
1199class CmdResult(object):
1200    """
1201    Command execution result.
1202
1203    command:     String containing the command line itself
1204    exit_status: Integer exit code of the process
1205    stdout:      String containing stdout of the process
1206    stderr:      String containing stderr of the process
1207    duration:    Elapsed wall clock time running the process
1208    """
1209
1210
1211    def __init__(self, command="", stdout="", stderr="",
1212                 exit_status=None, duration=0):
1213        self.command = command
1214        self.exit_status = exit_status
1215        self.stdout = stdout
1216        self.stderr = stderr
1217        self.duration = duration
1218
1219
1220    def __eq__(self, other):
1221        if type(self) == type(other):
1222            return (self.command == other.command
1223                    and self.exit_status == other.exit_status
1224                    and self.stdout == other.stdout
1225                    and self.stderr == other.stderr
1226                    and self.duration == other.duration)
1227        else:
1228            return NotImplemented
1229
1230
1231    def __repr__(self):
1232        wrapper = textwrap.TextWrapper(width = 78,
1233                                       initial_indent="\n    ",
1234                                       subsequent_indent="    ")
1235
1236        stdout = self.stdout.rstrip()
1237        if stdout:
1238            stdout = "\nstdout:\n%s" % stdout
1239
1240        stderr = self.stderr.rstrip()
1241        if stderr:
1242            stderr = "\nstderr:\n%s" % stderr
1243
1244        return ("* Command: %s\n"
1245                "Exit status: %s\n"
1246                "Duration: %s\n"
1247                "%s"
1248                "%s"
1249                % (wrapper.fill(str(self.command)), self.exit_status,
1250                self.duration, stdout, stderr))
1251
1252
1253class run_randomly:
1254    def __init__(self, run_sequentially=False):
1255        # Run sequentially is for debugging control files
1256        self.test_list = []
1257        self.run_sequentially = run_sequentially
1258
1259
1260    def add(self, *args, **dargs):
1261        test = (args, dargs)
1262        self.test_list.append(test)
1263
1264
1265    def run(self, fn):
1266        while self.test_list:
1267            test_index = random.randint(0, len(self.test_list)-1)
1268            if self.run_sequentially:
1269                test_index = 0
1270            (args, dargs) = self.test_list.pop(test_index)
1271            fn(*args, **dargs)
1272
1273
1274def import_site_module(path, module, dummy=None, modulefile=None):
1275    """
1276    Try to import the site specific module if it exists.
1277
1278    @param path full filename of the source file calling this (ie __file__)
1279    @param module full module name
1280    @param dummy dummy value to return in case there is no symbol to import
1281    @param modulefile module filename
1282
1283    @return site specific module or dummy
1284
1285    @raises ImportError if the site file exists but imports fails
1286    """
1287    short_module = module[module.rfind(".") + 1:]
1288
1289    if not modulefile:
1290        modulefile = short_module + ".py"
1291
1292    if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1293        return __import__(module, {}, {}, [short_module])
1294    return dummy
1295
1296
1297def import_site_symbol(path, module, name, dummy=None, modulefile=None):
1298    """
1299    Try to import site specific symbol from site specific file if it exists
1300
1301    @param path full filename of the source file calling this (ie __file__)
1302    @param module full module name
1303    @param name symbol name to be imported from the site file
1304    @param dummy dummy value to return in case there is no symbol to import
1305    @param modulefile module filename
1306
1307    @return site specific symbol or dummy
1308
1309    @raises ImportError if the site file exists but imports fails
1310    """
1311    module = import_site_module(path, module, modulefile=modulefile)
1312    if not module:
1313        return dummy
1314
1315    # special unique value to tell us if the symbol can't be imported
1316    cant_import = object()
1317
1318    obj = getattr(module, name, cant_import)
1319    if obj is cant_import:
1320        return dummy
1321
1322    return obj
1323
1324
1325def import_site_class(path, module, classname, baseclass, modulefile=None):
1326    """
1327    Try to import site specific class from site specific file if it exists
1328
1329    Args:
1330        path: full filename of the source file calling this (ie __file__)
1331        module: full module name
1332        classname: class name to be loaded from site file
1333        baseclass: base class object to return when no site file present or
1334            to mixin when site class exists but is not inherited from baseclass
1335        modulefile: module filename
1336
1337    Returns: baseclass if site specific class does not exist, the site specific
1338        class if it exists and is inherited from baseclass or a mixin of the
1339        site specific class and baseclass when the site specific class exists
1340        and is not inherited from baseclass
1341
1342    Raises: ImportError if the site file exists but imports fails
1343    """
1344
1345    res = import_site_symbol(path, module, classname, None, modulefile)
1346    if res:
1347        if not issubclass(res, baseclass):
1348            # if not a subclass of baseclass then mix in baseclass with the
1349            # site specific class object and return the result
1350            res = type(classname, (res, baseclass), {})
1351    else:
1352        res = baseclass
1353
1354    return res
1355
1356
1357def import_site_function(path, module, funcname, dummy, modulefile=None):
1358    """
1359    Try to import site specific function from site specific file if it exists
1360
1361    Args:
1362        path: full filename of the source file calling this (ie __file__)
1363        module: full module name
1364        funcname: function name to be imported from site file
1365        dummy: dummy function to return in case there is no function to import
1366        modulefile: module filename
1367
1368    Returns: site specific function object or dummy
1369
1370    Raises: ImportError if the site file exists but imports fails
1371    """
1372
1373    return import_site_symbol(path, module, funcname, dummy, modulefile)
1374
1375
1376def _get_pid_path(program_name):
1377    my_path = os.path.dirname(__file__)
1378    return os.path.abspath(os.path.join(my_path, "..", "..",
1379                                        "%s.pid" % program_name))
1380
1381
1382def write_pid(program_name):
1383    """
1384    Try to drop <program_name>.pid in the main autotest directory.
1385
1386    Args:
1387      program_name: prefix for file name
1388    """
1389    pidfile = open(_get_pid_path(program_name), "w")
1390    try:
1391        pidfile.write("%s\n" % os.getpid())
1392    finally:
1393        pidfile.close()
1394
1395
1396def delete_pid_file_if_exists(program_name):
1397    """
1398    Tries to remove <program_name>.pid from the main autotest directory.
1399    """
1400    pidfile_path = _get_pid_path(program_name)
1401
1402    try:
1403        os.remove(pidfile_path)
1404    except OSError:
1405        if not os.path.exists(pidfile_path):
1406            return
1407        raise
1408
1409
1410def get_pid_from_file(program_name):
1411    """
1412    Reads the pid from <program_name>.pid in the autotest directory.
1413
1414    @param program_name the name of the program
1415    @return the pid if the file exists, None otherwise.
1416    """
1417    pidfile_path = _get_pid_path(program_name)
1418    if not os.path.exists(pidfile_path):
1419        return None
1420
1421    pidfile = open(_get_pid_path(program_name), 'r')
1422
1423    try:
1424        try:
1425            pid = int(pidfile.readline())
1426        except IOError:
1427            if not os.path.exists(pidfile_path):
1428                return None
1429            raise
1430    finally:
1431        pidfile.close()
1432
1433    return pid
1434
1435
1436def get_process_name(pid):
1437    """
1438    Get process name from PID.
1439    @param pid: PID of process.
1440    @return: Process name if PID stat file exists or 'Dead PID' if it does not.
1441    """
1442    pid_stat_path = "/proc/%d/stat"
1443    if not os.path.exists(pid_stat_path % pid):
1444        return "Dead Pid"
1445    return get_field(read_file(pid_stat_path % pid), 1)[1:-1]
1446
1447
1448def program_is_alive(program_name):
1449    """
1450    Checks if the process is alive and not in Zombie state.
1451
1452    @param program_name the name of the program
1453    @return True if still alive, False otherwise
1454    """
1455    pid = get_pid_from_file(program_name)
1456    if pid is None:
1457        return False
1458    return pid_is_alive(pid)
1459
1460
1461def signal_program(program_name, sig=signal.SIGTERM):
1462    """
1463    Sends a signal to the process listed in <program_name>.pid
1464
1465    @param program_name the name of the program
1466    @param sig signal to send
1467    """
1468    pid = get_pid_from_file(program_name)
1469    if pid:
1470        signal_pid(pid, sig)
1471
1472
1473def get_relative_path(path, reference):
1474    """Given 2 absolute paths "path" and "reference", compute the path of
1475    "path" as relative to the directory "reference".
1476
1477    @param path the absolute path to convert to a relative path
1478    @param reference an absolute directory path to which the relative
1479        path will be computed
1480    """
1481    # normalize the paths (remove double slashes, etc)
1482    assert(os.path.isabs(path))
1483    assert(os.path.isabs(reference))
1484
1485    path = os.path.normpath(path)
1486    reference = os.path.normpath(reference)
1487
1488    # we could use os.path.split() but it splits from the end
1489    path_list = path.split(os.path.sep)[1:]
1490    ref_list = reference.split(os.path.sep)[1:]
1491
1492    # find the longest leading common path
1493    for i in xrange(min(len(path_list), len(ref_list))):
1494        if path_list[i] != ref_list[i]:
1495            # decrement i so when exiting this loop either by no match or by
1496            # end of range we are one step behind
1497            i -= 1
1498            break
1499    i += 1
1500    # drop the common part of the paths, not interested in that anymore
1501    del path_list[:i]
1502
1503    # for each uncommon component in the reference prepend a ".."
1504    path_list[:0] = ['..'] * (len(ref_list) - i)
1505
1506    return os.path.join(*path_list)
1507
1508
1509def sh_escape(command):
1510    """
1511    Escape special characters from a command so that it can be passed
1512    as a double quoted (" ") string in a (ba)sh command.
1513
1514    Args:
1515            command: the command string to escape.
1516
1517    Returns:
1518            The escaped command string. The required englobing double
1519            quotes are NOT added and so should be added at some point by
1520            the caller.
1521
1522    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1523    """
1524    command = command.replace("\\", "\\\\")
1525    command = command.replace("$", r'\$')
1526    command = command.replace('"', r'\"')
1527    command = command.replace('`', r'\`')
1528    return command
1529
1530
1531def sh_quote_word(text, whitelist=SHELL_QUOTING_WHITELIST):
1532    r"""Quote a string to make it safe as a single word in a shell command.
1533
1534    POSIX shell syntax recognizes no escape characters inside a single-quoted
1535    string.  So, single quotes can safely quote any string of characters except
1536    a string with a single quote character.  A single quote character must be
1537    quoted with the sequence '\'' which translates to:
1538        '  -> close current quote
1539        \' -> insert a literal single quote
1540        '  -> reopen quoting again.
1541
1542    This is safe for all combinations of characters, including embedded and
1543    trailing backslashes in odd or even numbers.
1544
1545    This is also safe for nesting, e.g. the following is a valid use:
1546
1547        adb_command = 'adb shell %s' % (
1548                sh_quote_word('echo %s' % sh_quote_word('hello world')))
1549
1550    @param text: The string to be quoted into a single word for the shell.
1551    @param whitelist: Optional list of characters that do not need quoting.
1552                      Defaults to a known good list of characters.
1553
1554    @return A string, possibly quoted, safe as a single word for a shell.
1555    """
1556    if all(c in whitelist for c in text):
1557        return text
1558    return "'" + text.replace("'", r"'\''") + "'"
1559
1560
1561def configure(extra=None, configure='./configure'):
1562    """
1563    Run configure passing in the correct host, build, and target options.
1564
1565    @param extra: extra command line arguments to pass to configure
1566    @param configure: which configure script to use
1567    """
1568    args = []
1569    if 'CHOST' in os.environ:
1570        args.append('--host=' + os.environ['CHOST'])
1571    if 'CBUILD' in os.environ:
1572        args.append('--build=' + os.environ['CBUILD'])
1573    if 'CTARGET' in os.environ:
1574        args.append('--target=' + os.environ['CTARGET'])
1575    if extra:
1576        args.append(extra)
1577
1578    system('%s %s' % (configure, ' '.join(args)))
1579
1580
1581def make(extra='', make='make', timeout=None, ignore_status=False):
1582    """
1583    Run make, adding MAKEOPTS to the list of options.
1584
1585    @param extra: extra command line arguments to pass to make.
1586    """
1587    cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1588    return system(cmd, timeout=timeout, ignore_status=ignore_status)
1589
1590
1591def compare_versions(ver1, ver2):
1592    """Version number comparison between ver1 and ver2 strings.
1593
1594    >>> compare_tuple("1", "2")
1595    -1
1596    >>> compare_tuple("foo-1.1", "foo-1.2")
1597    -1
1598    >>> compare_tuple("1.2", "1.2a")
1599    -1
1600    >>> compare_tuple("1.2b", "1.2a")
1601    1
1602    >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1603    -1
1604
1605    Args:
1606        ver1: version string
1607        ver2: version string
1608
1609    Returns:
1610        int:  1 if ver1 >  ver2
1611              0 if ver1 == ver2
1612             -1 if ver1 <  ver2
1613    """
1614    ax = re.split('[.-]', ver1)
1615    ay = re.split('[.-]', ver2)
1616    while len(ax) > 0 and len(ay) > 0:
1617        cx = ax.pop(0)
1618        cy = ay.pop(0)
1619        maxlen = max(len(cx), len(cy))
1620        c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1621        if c != 0:
1622            return c
1623    return cmp(len(ax), len(ay))
1624
1625
1626def args_to_dict(args):
1627    """Convert autoserv extra arguments in the form of key=val or key:val to a
1628    dictionary.  Each argument key is converted to lowercase dictionary key.
1629
1630    Args:
1631        args - list of autoserv extra arguments.
1632
1633    Returns:
1634        dictionary
1635    """
1636    arg_re = re.compile(r'(\w+)[:=](.*)$')
1637    dict = {}
1638    for arg in args:
1639        match = arg_re.match(arg)
1640        if match:
1641            dict[match.group(1).lower()] = match.group(2)
1642        else:
1643            logging.warning("args_to_dict: argument '%s' doesn't match "
1644                            "'%s' pattern. Ignored.", arg, arg_re.pattern)
1645    return dict
1646
1647
1648def get_unused_port():
1649    """
1650    Finds a semi-random available port. A race condition is still
1651    possible after the port number is returned, if another process
1652    happens to bind it.
1653
1654    Returns:
1655        A port number that is unused on both TCP and UDP.
1656    """
1657
1658    def try_bind(port, socket_type, socket_proto):
1659        s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1660        try:
1661            try:
1662                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1663                s.bind(('', port))
1664                return s.getsockname()[1]
1665            except socket.error:
1666                return None
1667        finally:
1668            s.close()
1669
1670    # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1671    # same port over and over. So always try TCP first.
1672    while True:
1673        # Ask the OS for an unused port.
1674        port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1675        # Check if this port is unused on the other protocol.
1676        if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1677            return port
1678
1679
1680def ask(question, auto=False):
1681    """
1682    Raw input with a prompt that emulates logging.
1683
1684    @param question: Question to be asked
1685    @param auto: Whether to return "y" instead of asking the question
1686    """
1687    if auto:
1688        logging.info("%s (y/n) y", question)
1689        return "y"
1690    return raw_input("%s INFO | %s (y/n) " %
1691                     (time.strftime("%H:%M:%S", time.localtime()), question))
1692
1693
1694def rdmsr(address, cpu=0):
1695    """
1696    Reads an x86 MSR from the specified CPU, returns as long integer.
1697    """
1698    with open('/dev/cpu/%s/msr' % cpu, 'r', 0) as fd:
1699        fd.seek(address)
1700        return struct.unpack('=Q', fd.read(8))[0]
1701
1702
1703def wait_for_value(func,
1704                   expected_value=None,
1705                   min_threshold=None,
1706                   max_threshold=None,
1707                   timeout_sec=10):
1708    """
1709    Returns the value of func().  If |expected_value|, |min_threshold|, and
1710    |max_threshold| are not set, returns immediately.
1711
1712    If |expected_value| is set, polls the return value until |expected_value| is
1713    reached, and returns that value.
1714
1715    If either |max_threshold| or |min_threshold| is set, this function will
1716    will repeatedly call func() until the return value reaches or exceeds one of
1717    these thresholds.
1718
1719    Polling will stop after |timeout_sec| regardless of these thresholds.
1720
1721    @param func: function whose return value is to be waited on.
1722    @param expected_value: wait for func to return this value.
1723    @param min_threshold: wait for func value to reach or fall below this value.
1724    @param max_threshold: wait for func value to reach or rise above this value.
1725    @param timeout_sec: Number of seconds to wait before giving up and
1726                        returning whatever value func() last returned.
1727
1728    Return value:
1729        The most recent return value of func().
1730    """
1731    value = None
1732    start_time_sec = time.time()
1733    while True:
1734        value = func()
1735        if (expected_value is None and \
1736            min_threshold is None and \
1737            max_threshold is None) or \
1738           (expected_value is not None and value == expected_value) or \
1739           (min_threshold is not None and value <= min_threshold) or \
1740           (max_threshold is not None and value >= max_threshold):
1741            break
1742
1743        if time.time() - start_time_sec >= timeout_sec:
1744            break
1745        time.sleep(0.1)
1746
1747    return value
1748
1749
1750def wait_for_value_changed(func,
1751                           old_value=None,
1752                           timeout_sec=10):
1753    """
1754    Returns the value of func().
1755
1756    The function polls the return value until it is different from |old_value|,
1757    and returns that value.
1758
1759    Polling will stop after |timeout_sec|.
1760
1761    @param func: function whose return value is to be waited on.
1762    @param old_value: wait for func to return a value different from this.
1763    @param timeout_sec: Number of seconds to wait before giving up and
1764                        returning whatever value func() last returned.
1765
1766    @returns The most recent return value of func().
1767    """
1768    value = None
1769    start_time_sec = time.time()
1770    while True:
1771        value = func()
1772        if value != old_value:
1773            break
1774
1775        if time.time() - start_time_sec >= timeout_sec:
1776            break
1777        time.sleep(0.1)
1778
1779    return value
1780
1781
1782CONFIG = global_config.global_config
1783
1784# Keep checking if the pid is alive every second until the timeout (in seconds)
1785CHECK_PID_IS_ALIVE_TIMEOUT = 6
1786
1787_LOCAL_HOST_LIST = ('localhost', '127.0.0.1')
1788
1789# The default address of a vm gateway.
1790DEFAULT_VM_GATEWAY = '10.0.2.2'
1791
1792# Google Storage bucket URI to store results in.
1793DEFAULT_OFFLOAD_GSURI = CONFIG.get_config_value(
1794        'CROS', 'results_storage_server', default=None)
1795
1796# Default Moblab Ethernet Interface.
1797_MOBLAB_ETH_0 = 'eth0'
1798_MOBLAB_ETH_1 = 'eth1'
1799
1800# A list of subnets that requires dedicated devserver and drone in the same
1801# subnet. Each item is a tuple of (subnet_ip, mask_bits), e.g.,
1802# ('192.168.0.0', 24))
1803RESTRICTED_SUBNETS = []
1804
1805def _setup_restricted_subnets():
1806    restricted_subnets_list = CONFIG.get_config_value(
1807            'CROS', 'restricted_subnets', type=list, default=[])
1808    # TODO(dshi): Remove the code to split subnet with `:` after R51 is
1809    # off stable channel, and update shadow config to use `/` as
1810    # delimiter for consistency.
1811    for subnet in restricted_subnets_list:
1812        ip, mask_bits = subnet.split('/') if '/' in subnet \
1813                        else subnet.split(':')
1814        RESTRICTED_SUBNETS.append((ip, int(mask_bits)))
1815
1816_setup_restricted_subnets()
1817
1818# regex pattern for CLIENT/wireless_ssid_ config. For example, global config
1819# can have following config in CLIENT section to indicate that hosts in subnet
1820# 192.168.0.1/24 should use wireless ssid of `ssid_1`
1821# wireless_ssid_192.168.0.1/24: ssid_1
1822WIRELESS_SSID_PATTERN = 'wireless_ssid_(.*)/(\d+)'
1823
1824
1825def get_built_in_ethernet_nic_name():
1826    """Gets the moblab public network interface.
1827
1828    If the eth0 is an USB interface, try to use eth1 instead. Otherwise
1829    use eth0 by default.
1830    """
1831    try:
1832        cmd_result = run('readlink -f /sys/class/net/eth0')
1833        if cmd_result.exit_status == 0 and 'usb' in cmd_result.stdout:
1834            cmd_result = run('readlink -f /sys/class/net/eth1')
1835            if cmd_result.exit_status == 0 and not ('usb' in cmd_result.stdout):
1836                logging.info('Eth0 is a USB dongle, use eth1 as moblab nic.')
1837                return _MOBLAB_ETH_1
1838    except error.CmdError:
1839        # readlink is not supported.
1840        logging.info('No readlink available, use eth0 as moblab nic.')
1841        pass
1842    return _MOBLAB_ETH_0
1843
1844
1845def ping(host, deadline=None, tries=None, timeout=60):
1846    """Attempt to ping |host|.
1847
1848    Shell out to 'ping' if host is an IPv4 addres or 'ping6' if host is an
1849    IPv6 address to try to reach |host| for |timeout| seconds.
1850    Returns exit code of ping.
1851
1852    Per 'man ping', if you specify BOTH |deadline| and |tries|, ping only
1853    returns 0 if we get responses to |tries| pings within |deadline| seconds.
1854
1855    Specifying |deadline| or |count| alone should return 0 as long as
1856    some packets receive responses.
1857
1858    Note that while this works with literal IPv6 addresses it will not work
1859    with hostnames that resolve to IPv6 only.
1860
1861    @param host: the host to ping.
1862    @param deadline: seconds within which |tries| pings must succeed.
1863    @param tries: number of pings to send.
1864    @param timeout: number of seconds after which to kill 'ping' command.
1865    @return exit code of ping command.
1866    """
1867    args = [host]
1868    ping_cmd = 'ping6' if re.search(r':.*:', host) else 'ping'
1869
1870    if deadline:
1871        args.append('-w%d' % deadline)
1872    if tries:
1873        args.append('-c%d' % tries)
1874
1875    return run(ping_cmd, args=args, verbose=True,
1876                          ignore_status=True, timeout=timeout,
1877                          stdout_tee=TEE_TO_LOGS,
1878                          stderr_tee=TEE_TO_LOGS).exit_status
1879
1880
1881def host_is_in_lab_zone(hostname):
1882    """Check if the host is in the CLIENT.dns_zone.
1883
1884    @param hostname: The hostname to check.
1885    @returns True if hostname.dns_zone resolves, otherwise False.
1886    """
1887    host_parts = hostname.split('.')
1888    dns_zone = CONFIG.get_config_value('CLIENT', 'dns_zone', default=None)
1889    fqdn = '%s.%s' % (host_parts[0], dns_zone)
1890    try:
1891        socket.gethostbyname(fqdn)
1892        return True
1893    except socket.gaierror:
1894        return False
1895
1896
1897def host_could_be_in_afe(hostname):
1898    """Check if the host could be in Autotest Front End.
1899
1900    Report whether or not a host could be in AFE, without actually
1901    consulting AFE. This method exists because some systems are in the
1902    lab zone, but not actually managed by AFE.
1903
1904    @param hostname: The hostname to check.
1905    @returns True if hostname is in lab zone, and does not match *-dev-*
1906    """
1907    # Do the 'dev' check first, so that we skip DNS lookup if the
1908    # hostname matches. This should give us greater resilience to lab
1909    # failures.
1910    return (hostname.find('-dev-') == -1) and host_is_in_lab_zone(hostname)
1911
1912
1913def get_chrome_version(job_views):
1914    """
1915    Retrieves the version of the chrome binary associated with a job.
1916
1917    When a test runs we query the chrome binary for it's version and drop
1918    that value into a client keyval. To retrieve the chrome version we get all
1919    the views associated with a test from the db, including those of the
1920    server and client jobs, and parse the version out of the first test view
1921    that has it. If we never ran a single test in the suite the job_views
1922    dictionary will not contain a chrome version.
1923
1924    This method cannot retrieve the chrome version from a dictionary that
1925    does not conform to the structure of an autotest tko view.
1926
1927    @param job_views: a list of a job's result views, as returned by
1928                      the get_detailed_test_views method in rpc_interface.
1929    @return: The chrome version string, or None if one can't be found.
1930    """
1931
1932    # Aborted jobs have no views.
1933    if not job_views:
1934        return None
1935
1936    for view in job_views:
1937        if (view.get('attributes')
1938            and constants.CHROME_VERSION in view['attributes'].keys()):
1939
1940            return view['attributes'].get(constants.CHROME_VERSION)
1941
1942    logging.warning('Could not find chrome version for failure.')
1943    return None
1944
1945
1946def get_default_interface_mac_address():
1947    """Returns the default moblab MAC address."""
1948    return get_interface_mac_address(
1949            get_built_in_ethernet_nic_name())
1950
1951
1952def get_interface_mac_address(interface):
1953    """Return the MAC address of a given interface.
1954
1955    @param interface: Interface to look up the MAC address of.
1956    """
1957    interface_link = run(
1958            'ip addr show %s | grep link/ether' % interface).stdout
1959    # The output will be in the format of:
1960    # 'link/ether <mac> brd ff:ff:ff:ff:ff:ff'
1961    return interface_link.split()[1]
1962
1963
1964def get_moblab_id():
1965    """Gets the moblab random id.
1966
1967    The random id file is cached on disk. If it does not exist, a new file is
1968    created the first time.
1969
1970    @returns the moblab random id.
1971    """
1972    moblab_id_filepath = '/home/moblab/.moblab_id'
1973    try:
1974        if os.path.exists(moblab_id_filepath):
1975            with open(moblab_id_filepath, 'r') as moblab_id_file:
1976                random_id = moblab_id_file.read()
1977        else:
1978            random_id = uuid.uuid1().hex
1979            with open(moblab_id_filepath, 'w') as moblab_id_file:
1980                moblab_id_file.write('%s' % random_id)
1981    except IOError as e:
1982        # Possible race condition, another process has created the file.
1983        # Sleep a second to make sure the file gets closed.
1984        logging.info(e)
1985        time.sleep(1)
1986        with open(moblab_id_filepath, 'r') as moblab_id_file:
1987            random_id = moblab_id_file.read()
1988    return random_id
1989
1990
1991def get_offload_gsuri():
1992    """Return the GSURI to offload test results to.
1993
1994    For the normal use case this is the results_storage_server in the
1995    global_config.
1996
1997    However partners using Moblab will be offloading their results to a
1998    subdirectory of their image storage buckets. The subdirectory is
1999    determined by the MAC Address of the Moblab device.
2000
2001    @returns gsuri to offload test results to.
2002    """
2003    # For non-moblab, use results_storage_server or default.
2004    if not is_moblab():
2005        return DEFAULT_OFFLOAD_GSURI
2006
2007    # For moblab, use results_storage_server or image_storage_server as bucket
2008    # name and mac-address/moblab_id as path.
2009    gsuri = DEFAULT_OFFLOAD_GSURI
2010    if not gsuri:
2011        gsuri = "%sresults/" % CONFIG.get_config_value('CROS', 'image_storage_server')
2012
2013    return '%s%s/%s/' % (
2014            gsuri, get_interface_mac_address(get_built_in_ethernet_nic_name()),
2015            get_moblab_id())
2016
2017
2018# TODO(petermayo): crosbug.com/31826 Share this with _GsUpload in
2019# //chromite.git/buildbot/prebuilt.py somewhere/somehow
2020def gs_upload(local_file, remote_file, acl, result_dir=None,
2021              transfer_timeout=300, acl_timeout=300):
2022    """Upload to GS bucket.
2023
2024    @param local_file: Local file to upload
2025    @param remote_file: Remote location to upload the local_file to.
2026    @param acl: name or file used for controlling access to the uploaded
2027                file.
2028    @param result_dir: Result directory if you want to add tracing to the
2029                       upload.
2030    @param transfer_timeout: Timeout for this upload call.
2031    @param acl_timeout: Timeout for the acl call needed to confirm that
2032                        the uploader has permissions to execute the upload.
2033
2034    @raise CmdError: the exit code of the gsutil call was not 0.
2035
2036    @returns True/False - depending on if the upload succeeded or failed.
2037    """
2038    # https://developers.google.com/storage/docs/accesscontrol#extension
2039    CANNED_ACLS = ['project-private', 'private', 'public-read',
2040                   'public-read-write', 'authenticated-read',
2041                   'bucket-owner-read', 'bucket-owner-full-control']
2042    _GSUTIL_BIN = 'gsutil'
2043    acl_cmd = None
2044    if acl in CANNED_ACLS:
2045        cmd = '%s cp -a %s %s %s' % (_GSUTIL_BIN, acl, local_file, remote_file)
2046    else:
2047        # For private uploads we assume that the overlay board is set up
2048        # properly and a googlestore_acl.xml is present, if not this script
2049        # errors
2050        cmd = '%s cp -a private %s %s' % (_GSUTIL_BIN, local_file, remote_file)
2051        if not os.path.exists(acl):
2052            logging.error('Unable to find ACL File %s.', acl)
2053            return False
2054        acl_cmd = '%s setacl %s %s' % (_GSUTIL_BIN, acl, remote_file)
2055    if not result_dir:
2056        run(cmd, timeout=transfer_timeout, verbose=True)
2057        if acl_cmd:
2058            run(acl_cmd, timeout=acl_timeout, verbose=True)
2059        return True
2060    with open(os.path.join(result_dir, 'tracing'), 'w') as ftrace:
2061        ftrace.write('Preamble\n')
2062        run(cmd, timeout=transfer_timeout, verbose=True,
2063                       stdout_tee=ftrace, stderr_tee=ftrace)
2064        if acl_cmd:
2065            ftrace.write('\nACL setting\n')
2066            # Apply the passed in ACL xml file to the uploaded object.
2067            run(acl_cmd, timeout=acl_timeout, verbose=True,
2068                           stdout_tee=ftrace, stderr_tee=ftrace)
2069        ftrace.write('Postamble\n')
2070        return True
2071
2072
2073def gs_ls(uri_pattern):
2074    """Returns a list of URIs that match a given pattern.
2075
2076    @param uri_pattern: a GS URI pattern, may contain wildcards
2077
2078    @return A list of URIs matching the given pattern.
2079
2080    @raise CmdError: the gsutil command failed.
2081
2082    """
2083    gs_cmd = ' '.join(['gsutil', 'ls', uri_pattern])
2084    result = system_output(gs_cmd).splitlines()
2085    return [path.rstrip() for path in result if path]
2086
2087
2088def nuke_pids(pid_list, signal_queue=[signal.SIGTERM, signal.SIGKILL]):
2089    """
2090    Given a list of pid's, kill them via an esclating series of signals.
2091
2092    @param pid_list: List of PID's to kill.
2093    @param signal_queue: Queue of signals to send the PID's to terminate them.
2094
2095    @return: A mapping of the signal name to the number of processes it
2096        was sent to.
2097    """
2098    sig_count = {}
2099    # Though this is slightly hacky it beats hardcoding names anyday.
2100    sig_names = dict((k, v) for v, k in signal.__dict__.iteritems()
2101                     if v.startswith('SIG'))
2102    for sig in signal_queue:
2103        logging.debug('Sending signal %s to the following pids:', sig)
2104        sig_count[sig_names.get(sig, 'unknown_signal')] = len(pid_list)
2105        for pid in pid_list:
2106            logging.debug('Pid %d', pid)
2107            try:
2108                os.kill(pid, sig)
2109            except OSError:
2110                # The process may have died from a previous signal before we
2111                # could kill it.
2112                pass
2113        if sig == signal.SIGKILL:
2114            return sig_count
2115        pid_list = [pid for pid in pid_list if pid_is_alive(pid)]
2116        if not pid_list:
2117            break
2118        time.sleep(CHECK_PID_IS_ALIVE_TIMEOUT)
2119    failed_list = []
2120    for pid in pid_list:
2121        if pid_is_alive(pid):
2122            failed_list.append('Could not kill %d for process name: %s.' % pid,
2123                               get_process_name(pid))
2124    if failed_list:
2125        raise error.AutoservRunError('Following errors occured: %s' %
2126                                     failed_list, None)
2127    return sig_count
2128
2129
2130def externalize_host(host):
2131    """Returns an externally accessible host name.
2132
2133    @param host: a host name or address (string)
2134
2135    @return An externally visible host name or address
2136
2137    """
2138    return socket.gethostname() if host in _LOCAL_HOST_LIST else host
2139
2140
2141def urlopen_socket_timeout(url, data=None, timeout=5):
2142    """
2143    Wrapper to urllib2.urlopen with a socket timeout.
2144
2145    This method will convert all socket timeouts to
2146    TimeoutExceptions, so we can use it in conjunction
2147    with the rpc retry decorator and continue to handle
2148    other URLErrors as we see fit.
2149
2150    @param url: The url to open.
2151    @param data: The data to send to the url (eg: the urlencoded dictionary
2152                 used with a POST call).
2153    @param timeout: The timeout for this urlopen call.
2154
2155    @return: The response of the urlopen call.
2156
2157    @raises: error.TimeoutException when a socket timeout occurs.
2158             urllib2.URLError for errors that not caused by timeout.
2159             urllib2.HTTPError for errors like 404 url not found.
2160    """
2161    old_timeout = socket.getdefaulttimeout()
2162    socket.setdefaulttimeout(timeout)
2163    try:
2164        return urllib2.urlopen(url, data=data)
2165    except urllib2.URLError as e:
2166        if type(e.reason) is socket.timeout:
2167            raise error.TimeoutException(str(e))
2168        raise
2169    finally:
2170        socket.setdefaulttimeout(old_timeout)
2171
2172
2173def parse_chrome_version(version_string):
2174    """
2175    Parse a chrome version string and return version and milestone.
2176
2177    Given a chrome version of the form "W.X.Y.Z", return "W.X.Y.Z" as
2178    the version and "W" as the milestone.
2179
2180    @param version_string: Chrome version string.
2181    @return: a tuple (chrome_version, milestone). If the incoming version
2182             string is not of the form "W.X.Y.Z", chrome_version will
2183             be set to the incoming "version_string" argument and the
2184             milestone will be set to the empty string.
2185    """
2186    match = re.search('(\d+)\.\d+\.\d+\.\d+', version_string)
2187    ver = match.group(0) if match else version_string
2188    milestone = match.group(1) if match else ''
2189    return ver, milestone
2190
2191
2192def is_localhost(server):
2193    """Check if server is equivalent to localhost.
2194
2195    @param server: Name of the server to check.
2196
2197    @return: True if given server is equivalent to localhost.
2198
2199    @raise socket.gaierror: If server name failed to be resolved.
2200    """
2201    if server in _LOCAL_HOST_LIST:
2202        return True
2203    try:
2204        return (socket.gethostbyname(socket.gethostname()) ==
2205                socket.gethostbyname(server))
2206    except socket.gaierror:
2207        logging.error('Failed to resolve server name %s.', server)
2208        return False
2209
2210
2211def is_puppylab_vm(server):
2212    """Check if server is a virtual machine in puppylab.
2213
2214    In the virtual machine testing environment (i.e., puppylab), each
2215    shard VM has a hostname like localhost:<port>.
2216
2217    @param server: Server name to check.
2218
2219    @return True if given server is a virtual machine in puppylab.
2220
2221    """
2222    # TODO(mkryu): This is a puppylab specific hack. Please update
2223    # this method if you have a better solution.
2224    regex = re.compile(r'(.+):\d+')
2225    m = regex.match(server)
2226    if m:
2227        return m.group(1) in _LOCAL_HOST_LIST
2228    return False
2229
2230
2231def get_function_arg_value(func, arg_name, args, kwargs):
2232    """Get the value of the given argument for the function.
2233
2234    @param func: Function being called with given arguments.
2235    @param arg_name: Name of the argument to look for value.
2236    @param args: arguments for function to be called.
2237    @param kwargs: keyword arguments for function to be called.
2238
2239    @return: The value of the given argument for the function.
2240
2241    @raise ValueError: If the argument is not listed function arguemnts.
2242    @raise KeyError: If no value is found for the given argument.
2243    """
2244    if arg_name in kwargs:
2245        return kwargs[arg_name]
2246
2247    argspec = inspect.getargspec(func)
2248    index = argspec.args.index(arg_name)
2249    try:
2250        return args[index]
2251    except IndexError:
2252        try:
2253            # The argument can use a default value. Reverse the default value
2254            # so argument with default value can be counted from the last to
2255            # the first.
2256            return argspec.defaults[::-1][len(argspec.args) - index - 1]
2257        except IndexError:
2258            raise KeyError('Argument %s is not given a value. argspec: %s, '
2259                           'args:%s, kwargs:%s' %
2260                           (arg_name, argspec, args, kwargs))
2261
2262
2263def has_systemd():
2264    """Check if the host is running systemd.
2265
2266    @return: True if the host uses systemd, otherwise returns False.
2267    """
2268    return os.path.basename(os.readlink('/proc/1/exe')) == 'systemd'
2269
2270
2271def version_match(build_version, release_version, update_url=''):
2272    """Compare release versino from lsb-release with cros-version label.
2273
2274    build_version is a string based on build name. It is prefixed with builder
2275    info and branch ID, e.g., lumpy-release/R43-6809.0.0. It may not include
2276    builder info, e.g., lumpy-release, in which case, update_url shall be passed
2277    in to determine if the build is a trybot or pgo-generate build.
2278    release_version is retrieved from lsb-release.
2279    These two values might not match exactly.
2280
2281    The method is designed to compare version for following 6 scenarios with
2282    samples of build version and expected release version:
2283    1. trybot non-release build (paladin, pre-cq or test-ap build).
2284    build version:   trybot-lumpy-paladin/R27-3837.0.0-b123
2285    release version: 3837.0.2013_03_21_1340
2286
2287    2. trybot release build.
2288    build version:   trybot-lumpy-release/R27-3837.0.0-b456
2289    release version: 3837.0.0
2290
2291    3. buildbot official release build.
2292    build version:   lumpy-release/R27-3837.0.0
2293    release version: 3837.0.0
2294
2295    4. non-official paladin rc build.
2296    build version:   lumpy-paladin/R27-3878.0.0-rc7
2297    release version: 3837.0.0-rc7
2298
2299    5. chrome-perf build.
2300    build version:   lumpy-chrome-perf/R28-3837.0.0-b2996
2301    release version: 3837.0.0
2302
2303    6. pgo-generate build.
2304    build version:   lumpy-release-pgo-generate/R28-3837.0.0-b2996
2305    release version: 3837.0.0-pgo-generate
2306
2307    TODO: This logic has a bug if a trybot paladin build failed to be
2308    installed in a DUT running an older trybot paladin build with same
2309    platform number, but different build number (-b###). So to conclusively
2310    determine if a tryjob paladin build is imaged successfully, we may need
2311    to find out the date string from update url.
2312
2313    @param build_version: Build name for cros version, e.g.
2314                          peppy-release/R43-6809.0.0 or R43-6809.0.0
2315    @param release_version: Release version retrieved from lsb-release,
2316                            e.g., 6809.0.0
2317    @param update_url: Update url which include the full builder information.
2318                       Default is set to empty string.
2319
2320    @return: True if the values match, otherwise returns False.
2321    """
2322    # If the build is from release, CQ or PFQ builder, cros-version label must
2323    # be ended with release version in lsb-release.
2324    if build_version.endswith(release_version):
2325        return True
2326
2327    # Remove R#- and -b# at the end of build version
2328    stripped_version = re.sub(r'(R\d+-|-b\d+)', '', build_version)
2329    # Trim the builder info, e.g., trybot-lumpy-paladin/
2330    stripped_version = stripped_version.split('/')[-1]
2331
2332    is_trybot_non_release_build = (
2333            re.match(r'.*trybot-.+-(paladin|pre-cq|test-ap|toolchain)',
2334                     build_version) or
2335            re.match(r'.*trybot-.+-(paladin|pre-cq|test-ap|toolchain)',
2336                     update_url))
2337
2338    # Replace date string with 0 in release_version
2339    release_version_no_date = re.sub(r'\d{4}_\d{2}_\d{2}_\d+', '0',
2340                                    release_version)
2341    has_date_string = release_version != release_version_no_date
2342
2343    is_pgo_generate_build = (
2344            re.match(r'.+-pgo-generate', build_version) or
2345            re.match(r'.+-pgo-generate', update_url))
2346
2347    # Remove |-pgo-generate| in release_version
2348    release_version_no_pgo = release_version.replace('-pgo-generate', '')
2349    has_pgo_generate = release_version != release_version_no_pgo
2350
2351    if is_trybot_non_release_build:
2352        if not has_date_string:
2353            logging.error('A trybot paladin or pre-cq build is expected. '
2354                          'Version "%s" is not a paladin or pre-cq  build.',
2355                          release_version)
2356            return False
2357        return stripped_version == release_version_no_date
2358    elif is_pgo_generate_build:
2359        if not has_pgo_generate:
2360            logging.error('A pgo-generate build is expected. Version '
2361                          '"%s" is not a pgo-generate build.',
2362                          release_version)
2363            return False
2364        return stripped_version == release_version_no_pgo
2365    else:
2366        if has_date_string:
2367            logging.error('Unexpected date found in a non trybot paladin or '
2368                          'pre-cq build.')
2369            return False
2370        # Versioned build, i.e., rc or release build.
2371        return stripped_version == release_version
2372
2373
2374def get_real_user():
2375    """Get the real user that runs the script.
2376
2377    The function check environment variable SUDO_USER for the user if the
2378    script is run with sudo. Otherwise, it returns the value of environment
2379    variable USER.
2380
2381    @return: The user name that runs the script.
2382
2383    """
2384    user = os.environ.get('SUDO_USER')
2385    if not user:
2386        user = os.environ.get('USER')
2387    return user
2388
2389
2390def get_service_pid(service_name):
2391    """Return pid of service.
2392
2393    @param service_name: string name of service.
2394
2395    @return: pid or 0 if service is not running.
2396    """
2397    if has_systemd():
2398        # systemctl show prints 'MainPID=0' if the service is not running.
2399        cmd_result = run('systemctl show -p MainPID %s' %
2400                                    service_name, ignore_status=True)
2401        return int(cmd_result.stdout.split('=')[1])
2402    else:
2403        cmd_result = run('status %s' % service_name,
2404                                        ignore_status=True)
2405        if 'start/running' in cmd_result.stdout:
2406            return int(cmd_result.stdout.split()[3])
2407        return 0
2408
2409
2410def control_service(service_name, action='start', ignore_status=True):
2411    """Controls a service. It can be used to start, stop or restart
2412    a service.
2413
2414    @param service_name: string service to be restarted.
2415
2416    @param action: string choice of action to control command.
2417
2418    @param ignore_status: boolean ignore if system command fails.
2419
2420    @return: status code of the executed command.
2421    """
2422    if action not in ('start', 'stop', 'restart'):
2423        raise ValueError('Unknown action supplied as parameter.')
2424
2425    control_cmd = action + ' ' + service_name
2426    if has_systemd():
2427        control_cmd = 'systemctl ' + control_cmd
2428    return system(control_cmd, ignore_status=ignore_status)
2429
2430
2431def restart_service(service_name, ignore_status=True):
2432    """Restarts a service
2433
2434    @param service_name: string service to be restarted.
2435
2436    @param ignore_status: boolean ignore if system command fails.
2437
2438    @return: status code of the executed command.
2439    """
2440    return control_service(service_name, action='restart', ignore_status=ignore_status)
2441
2442
2443def start_service(service_name, ignore_status=True):
2444    """Starts a service
2445
2446    @param service_name: string service to be started.
2447
2448    @param ignore_status: boolean ignore if system command fails.
2449
2450    @return: status code of the executed command.
2451    """
2452    return control_service(service_name, action='start', ignore_status=ignore_status)
2453
2454
2455def stop_service(service_name, ignore_status=True):
2456    """Stops a service
2457
2458    @param service_name: string service to be stopped.
2459
2460    @param ignore_status: boolean ignore if system command fails.
2461
2462    @return: status code of the executed command.
2463    """
2464    return control_service(service_name, action='stop', ignore_status=ignore_status)
2465
2466
2467def sudo_require_password():
2468    """Test if the process can run sudo command without using password.
2469
2470    @return: True if the process needs password to run sudo command.
2471
2472    """
2473    try:
2474        run('sudo -n true')
2475        return False
2476    except error.CmdError:
2477        logging.warn('sudo command requires password.')
2478        return True
2479
2480
2481def is_in_container():
2482    """Check if the process is running inside a container.
2483
2484    @return: True if the process is running inside a container, otherwise False.
2485    """
2486    result = run('grep -q "/lxc/" /proc/1/cgroup',
2487                            verbose=False, ignore_status=True)
2488    if result.exit_status == 0:
2489        return True
2490
2491    # Check "container" environment variable for lxd/lxc containers.
2492    if os.environ.get('container') == 'lxc':
2493        return True
2494
2495    return False
2496
2497
2498def is_flash_installed():
2499    """
2500    The Adobe Flash binary is only distributed with internal builds.
2501    """
2502    return (os.path.exists('/opt/google/chrome/pepper/libpepflashplayer.so')
2503        and os.path.exists('/opt/google/chrome/pepper/pepper-flash.info'))
2504
2505
2506def verify_flash_installed():
2507    """
2508    The Adobe Flash binary is only distributed with internal builds.
2509    Warn users of public builds of the extra dependency.
2510    """
2511    if not is_flash_installed():
2512        raise error.TestNAError('No Adobe Flash binary installed.')
2513
2514
2515def is_in_same_subnet(ip_1, ip_2, mask_bits=24):
2516    """Check if two IP addresses are in the same subnet with given mask bits.
2517
2518    The two IP addresses are string of IPv4, e.g., '192.168.0.3'.
2519
2520    @param ip_1: First IP address to compare.
2521    @param ip_2: Second IP address to compare.
2522    @param mask_bits: Number of mask bits for subnet comparison. Default to 24.
2523
2524    @return: True if the two IP addresses are in the same subnet.
2525
2526    """
2527    mask = ((2L<<mask_bits-1) -1)<<(32-mask_bits)
2528    ip_1_num = struct.unpack('!I', socket.inet_aton(ip_1))[0]
2529    ip_2_num = struct.unpack('!I', socket.inet_aton(ip_2))[0]
2530    return ip_1_num & mask == ip_2_num & mask
2531
2532
2533def get_ip_address(hostname):
2534    """Get the IP address of given hostname.
2535
2536    @param hostname: Hostname of a DUT.
2537
2538    @return: The IP address of given hostname. None if failed to resolve
2539             hostname.
2540    """
2541    try:
2542        if hostname:
2543            return socket.gethostbyname(hostname)
2544    except socket.gaierror as e:
2545        logging.error('Failed to get IP address of %s, error: %s.', hostname, e)
2546
2547
2548def get_servers_in_same_subnet(host_ip, mask_bits, servers=None,
2549                               server_ip_map=None):
2550    """Get the servers in the same subnet of the given host ip.
2551
2552    @param host_ip: The IP address of a dut to look for devserver.
2553    @param mask_bits: Number of mask bits.
2554    @param servers: A list of servers to be filtered by subnet specified by
2555                    host_ip and mask_bits.
2556    @param server_ip_map: A map between the server name and its IP address.
2557            The map can be pre-built for better performance, e.g., when
2558            allocating a drone for an agent task.
2559
2560    @return: A list of servers in the same subnet of the given host ip.
2561
2562    """
2563    matched_servers = []
2564    if not servers and not server_ip_map:
2565        raise ValueError('Either `servers` or `server_ip_map` must be given.')
2566    if not servers:
2567        servers = server_ip_map.keys()
2568    # Make sure server_ip_map is an empty dict if it's not set.
2569    if not server_ip_map:
2570        server_ip_map = {}
2571    for server in servers:
2572        server_ip = server_ip_map.get(server, get_ip_address(server))
2573        if server_ip and is_in_same_subnet(server_ip, host_ip, mask_bits):
2574            matched_servers.append(server)
2575    return matched_servers
2576
2577
2578def get_restricted_subnet(hostname, restricted_subnets=RESTRICTED_SUBNETS):
2579    """Get the restricted subnet of given hostname.
2580
2581    @param hostname: Name of the host to look for matched restricted subnet.
2582    @param restricted_subnets: A list of restricted subnets, default is set to
2583            RESTRICTED_SUBNETS.
2584
2585    @return: A tuple of (subnet_ip, mask_bits), which defines a restricted
2586             subnet.
2587    """
2588    host_ip = get_ip_address(hostname)
2589    if not host_ip:
2590        return
2591    for subnet_ip, mask_bits in restricted_subnets:
2592        if is_in_same_subnet(subnet_ip, host_ip, mask_bits):
2593            return subnet_ip, mask_bits
2594
2595
2596def get_wireless_ssid(hostname):
2597    """Get the wireless ssid based on given hostname.
2598
2599    The method tries to locate the wireless ssid in the same subnet of given
2600    hostname first. If none is found, it returns the default setting in
2601    CLIENT/wireless_ssid.
2602
2603    @param hostname: Hostname of the test device.
2604
2605    @return: wireless ssid for the test device.
2606    """
2607    default_ssid = CONFIG.get_config_value('CLIENT', 'wireless_ssid',
2608                                           default=None)
2609    host_ip = get_ip_address(hostname)
2610    if not host_ip:
2611        return default_ssid
2612
2613    # Get all wireless ssid in the global config.
2614    ssids = CONFIG.get_config_value_regex('CLIENT', WIRELESS_SSID_PATTERN)
2615
2616    # There could be multiple subnet matches, pick the one with most strict
2617    # match, i.e., the one with highest maskbit.
2618    matched_ssid = default_ssid
2619    matched_maskbit = -1
2620    for key, value in ssids.items():
2621        # The config key filtered by regex WIRELESS_SSID_PATTERN has a format of
2622        # wireless_ssid_[subnet_ip]/[maskbit], for example:
2623        # wireless_ssid_192.168.0.1/24
2624        # Following line extract the subnet ip and mask bit from the key name.
2625        match = re.match(WIRELESS_SSID_PATTERN, key)
2626        subnet_ip, maskbit = match.groups()
2627        maskbit = int(maskbit)
2628        if (is_in_same_subnet(subnet_ip, host_ip, maskbit) and
2629            maskbit > matched_maskbit):
2630            matched_ssid = value
2631            matched_maskbit = maskbit
2632    return matched_ssid
2633
2634
2635def parse_launch_control_build(build_name):
2636    """Get branch, target, build_id from the given Launch Control build_name.
2637
2638    @param build_name: Name of a Launch Control build, should be formated as
2639                       branch/target/build_id
2640
2641    @return: Tuple of branch, target, build_id
2642    @raise ValueError: If the build_name is not correctly formated.
2643    """
2644    branch, target, build_id = build_name.split('/')
2645    return branch, target, build_id
2646
2647
2648def parse_android_target(target):
2649    """Get board and build type from the given target.
2650
2651    @param target: Name of an Android build target, e.g., shamu-eng.
2652
2653    @return: Tuple of board, build_type
2654    @raise ValueError: If the target is not correctly formated.
2655    """
2656    board, build_type = target.split('-')
2657    return board, build_type
2658
2659
2660def parse_launch_control_target(target):
2661    """Parse the build target and type from a Launch Control target.
2662
2663    The Launch Control target has the format of build_target-build_type, e.g.,
2664    shamu-eng or dragonboard-userdebug. This method extracts the build target
2665    and type from the target name.
2666
2667    @param target: Name of a Launch Control target, e.g., shamu-eng.
2668
2669    @return: (build_target, build_type), e.g., ('shamu', 'userdebug')
2670    """
2671    match = re.match('(?P<build_target>.+)-(?P<build_type>[^-]+)', target)
2672    if match:
2673        return match.group('build_target'), match.group('build_type')
2674    else:
2675        return None, None
2676
2677
2678def is_launch_control_build(build):
2679    """Check if a given build is a Launch Control build.
2680
2681    @param build: Name of a build, e.g.,
2682                  ChromeOS build: daisy-release/R50-1234.0.0
2683                  Launch Control build: git_mnc_release/shamu-eng
2684
2685    @return: True if the build name matches the pattern of a Launch Control
2686             build, False otherwise.
2687    """
2688    try:
2689        _, target, _ = parse_launch_control_build(build)
2690        build_target, _ = parse_launch_control_target(target)
2691        if build_target:
2692            return True
2693    except ValueError:
2694        # parse_launch_control_build or parse_launch_control_target failed.
2695        pass
2696    return False
2697
2698
2699def which(exec_file):
2700    """Finds an executable file.
2701
2702    If the file name contains a path component, it is checked as-is.
2703    Otherwise, we check with each of the path components found in the system
2704    PATH prepended. This behavior is similar to the 'which' command-line tool.
2705
2706    @param exec_file: Name or path to desired executable.
2707
2708    @return: An actual path to the executable, or None if not found.
2709    """
2710    if os.path.dirname(exec_file):
2711        return exec_file if os.access(exec_file, os.X_OK) else None
2712    sys_path = os.environ.get('PATH')
2713    prefix_list = sys_path.split(os.pathsep) if sys_path else []
2714    for prefix in prefix_list:
2715        path = os.path.join(prefix, exec_file)
2716        if os.access(path, os.X_OK):
2717            return path
2718
2719
2720class TimeoutError(error.TestError):
2721    """Error raised when we time out when waiting on a condition."""
2722    pass
2723
2724
2725def poll_for_condition(condition,
2726                       exception=None,
2727                       timeout=10,
2728                       sleep_interval=0.1,
2729                       desc=None):
2730    """Polls until a condition becomes true.
2731
2732    @param condition: function taking no args and returning bool
2733    @param exception: exception to throw if condition doesn't become true
2734    @param timeout: maximum number of seconds to wait
2735    @param sleep_interval: time to sleep between polls
2736    @param desc: description of default TimeoutError used if 'exception' is
2737                 None
2738
2739    @return The true value that caused the poll loop to terminate.
2740
2741    @raise 'exception' arg if supplied; TimeoutError otherwise
2742    """
2743    start_time = time.time()
2744    while True:
2745        value = condition()
2746        if value:
2747            return value
2748        if time.time() + sleep_interval - start_time > timeout:
2749            if exception:
2750                logging.error(exception)
2751                raise exception
2752
2753            if desc:
2754                desc = 'Timed out waiting for condition: ' + desc
2755            else:
2756                desc = 'Timed out waiting for unnamed condition'
2757            logging.error(desc)
2758            raise TimeoutError(desc)
2759
2760        time.sleep(sleep_interval)
2761
2762
2763class metrics_mock(metrics_mock_class.mock_class_base):
2764    """mock class for metrics in case chromite is not installed."""
2765    pass
2766