• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2017 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""
6Convenience functions for use by tests or whomever.
7
8There's no really good way to do this, as this isn't a class we can do
9inheritance with, just a collection of static methods.
10"""
11
12# pylint: disable=missing-docstring
13
14import StringIO
15import collections
16import datetime
17import errno
18import inspect
19import itertools
20import logging
21import os
22import pickle
23import Queue
24import random
25import re
26import resource
27import select
28import shutil
29import signal
30import socket
31import string
32import struct
33import subprocess
34import textwrap
35import threading
36import time
37import urllib2
38import urlparse
39import uuid
40import warnings
41
42try:
43    import hashlib
44except ImportError:
45    import md5
46    import sha
47
48import common
49
50from autotest_lib.client.common_lib import env
51from autotest_lib.client.common_lib import error
52from autotest_lib.client.common_lib import global_config
53from autotest_lib.client.common_lib import logging_manager
54from autotest_lib.client.common_lib import metrics_mock_class
55from autotest_lib.client.cros import constants
56
57# pylint: disable=wildcard-import
58from autotest_lib.client.common_lib.lsbrelease_utils import *
59
60
61def deprecated(func):
62    """This is a decorator which can be used to mark functions as deprecated.
63    It will result in a warning being emmitted when the function is used."""
64    def new_func(*args, **dargs):
65        warnings.warn("Call to deprecated function %s." % func.__name__,
66                      category=DeprecationWarning)
67        return func(*args, **dargs)
68    new_func.__name__ = func.__name__
69    new_func.__doc__ = func.__doc__
70    new_func.__dict__.update(func.__dict__)
71    return new_func
72
73
74class _NullStream(object):
75    def write(self, data):
76        pass
77
78
79    def flush(self):
80        pass
81
82
83TEE_TO_LOGS = object()
84_the_null_stream = _NullStream()
85
86DEVNULL = object()
87
88DEFAULT_STDOUT_LEVEL = logging.DEBUG
89DEFAULT_STDERR_LEVEL = logging.ERROR
90
91# prefixes for logging stdout/stderr of commands
92STDOUT_PREFIX = '[stdout] '
93STDERR_PREFIX = '[stderr] '
94
95# safe characters for the shell (do not need quoting)
96SHELL_QUOTING_WHITELIST = frozenset(string.ascii_letters +
97                                    string.digits +
98                                    '_-+=')
99
100def custom_warning_handler(message, category, filename, lineno, file=None,
101                           line=None):
102    """Custom handler to log at the WARNING error level. Ignores |file|."""
103    logging.warning(warnings.formatwarning(message, category, filename, lineno,
104                                           line))
105
106warnings.showwarning = custom_warning_handler
107
108def get_stream_tee_file(stream, level, prefix=''):
109    if stream is None:
110        return _the_null_stream
111    if stream is DEVNULL:
112        return None
113    if stream is TEE_TO_LOGS:
114        return logging_manager.LoggingFile(level=level, prefix=prefix)
115    return stream
116
117
118def _join_with_nickname(base_string, nickname):
119    if nickname:
120        return '%s BgJob "%s" ' % (base_string, nickname)
121    return base_string
122
123
124# TODO: Cleanup and possibly eliminate |unjoinable|, which is only used in our
125# master-ssh connection process, while fixing underlying
126# semantics problem in BgJob. See crbug.com/279312
127class BgJob(object):
128    def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
129                 stdin=None, stdout_level=DEFAULT_STDOUT_LEVEL,
130                 stderr_level=DEFAULT_STDERR_LEVEL, nickname=None,
131                 unjoinable=False, env=None, extra_paths=None):
132        """Create and start a new BgJob.
133
134        This constructor creates a new BgJob, and uses Popen to start a new
135        subprocess with given command. It returns without blocking on execution
136        of the subprocess.
137
138        After starting a new BgJob, use output_prepare to connect the process's
139        stdout and stderr pipes to the stream of your choice.
140
141        When the job is running, the jobs's output streams are only read from
142        when process_output is called.
143
144        @param command: command to be executed in new subprocess. May be either
145                        a list, or a string (in which case Popen will be called
146                        with shell=True)
147        @param stdout_tee: (Optional) a file like object, TEE_TO_LOGS or
148                           DEVNULL.
149                           If not given, after finishing the process, the
150                           stdout data from subprocess is available in
151                           result.stdout.
152                           If a file like object is given, in process_output(),
153                           the stdout data from the subprocess will be handled
154                           by the given file like object.
155                           If TEE_TO_LOGS is given, in process_output(), the
156                           stdout data from the subprocess will be handled by
157                           the standard logging_manager.
158                           If DEVNULL is given, the stdout of the subprocess
159                           will be just discarded. In addition, even after
160                           cleanup(), result.stdout will be just an empty
161                           string (unlike the case where stdout_tee is not
162                           given).
163        @param stderr_tee: Same as stdout_tee, but for stderr.
164        @param verbose: Boolean, make BgJob logging more verbose.
165        @param stdin: Stream object, will be passed to Popen as the new
166                      process's stdin.
167        @param stdout_level: A logging level value. If stdout_tee was set to
168                             TEE_TO_LOGS, sets the level that tee'd
169                             stdout output will be logged at. Ignored
170                             otherwise.
171        @param stderr_level: Same as stdout_level, but for stderr.
172        @param nickname: Optional string, to be included in logging messages
173        @param unjoinable: Optional bool, default False.
174                           This should be True for BgJobs running in background
175                           and will never be joined with join_bg_jobs(), such
176                           as the master-ssh connection. Instead, it is
177                           caller's responsibility to terminate the subprocess
178                           correctly, e.g. by calling nuke_subprocess().
179                           This will lead that, calling join_bg_jobs(),
180                           process_output() or cleanup() will result in an
181                           InvalidBgJobCall exception.
182                           Also, |stdout_tee| and |stderr_tee| must be set to
183                           DEVNULL, otherwise InvalidBgJobCall is raised.
184        @param env: Dict containing environment variables used in subprocess.
185        @param extra_paths: Optional string list, to be prepended to the PATH
186                            env variable in env (or os.environ dict if env is
187                            not specified).
188        """
189        self.command = command
190        self.unjoinable = unjoinable
191        if (unjoinable and (stdout_tee != DEVNULL or stderr_tee != DEVNULL)):
192            raise error.InvalidBgJobCall(
193                'stdout_tee and stderr_tee must be DEVNULL for '
194                'unjoinable BgJob')
195        self._stdout_tee = get_stream_tee_file(
196                stdout_tee, stdout_level,
197                prefix=_join_with_nickname(STDOUT_PREFIX, nickname))
198        self._stderr_tee = get_stream_tee_file(
199                stderr_tee, stderr_level,
200                prefix=_join_with_nickname(STDERR_PREFIX, nickname))
201        self.result = CmdResult(command)
202
203        # allow for easy stdin input by string, we'll let subprocess create
204        # a pipe for stdin input and we'll write to it in the wait loop
205        if isinstance(stdin, basestring):
206            self.string_stdin = stdin
207            stdin = subprocess.PIPE
208        else:
209            self.string_stdin = None
210
211        # Prepend extra_paths to env['PATH'] if necessary.
212        if extra_paths:
213            env = (os.environ if env is None else env).copy()
214            oldpath = env.get('PATH')
215            env['PATH'] = os.pathsep.join(
216                    extra_paths + ([oldpath] if oldpath else []))
217
218        if verbose:
219            logging.debug("Running '%s'", command)
220
221        if type(command) == list:
222            shell = False
223            executable = None
224        else:
225            shell = True
226            executable = '/bin/bash'
227
228        with open('/dev/null', 'w') as devnull:
229            self.sp = subprocess.Popen(
230                command,
231                stdin=stdin,
232                stdout=devnull if stdout_tee == DEVNULL else subprocess.PIPE,
233                stderr=devnull if stderr_tee == DEVNULL else subprocess.PIPE,
234                preexec_fn=self._reset_sigpipe,
235                shell=shell, executable=executable,
236                env=env, close_fds=True)
237
238        self._cleanup_called = False
239        self._stdout_file = (
240            None if stdout_tee == DEVNULL else StringIO.StringIO())
241        self._stderr_file = (
242            None if stderr_tee == DEVNULL else StringIO.StringIO())
243
244    def process_output(self, stdout=True, final_read=False):
245        """Read from process's output stream, and write data to destinations.
246
247        This function reads up to 1024 bytes from the background job's
248        stdout or stderr stream, and writes the resulting data to the BgJob's
249        output tee and to the stream set up in output_prepare.
250
251        Warning: Calls to process_output will block on reads from the
252        subprocess stream, and will block on writes to the configured
253        destination stream.
254
255        @param stdout: True = read and process data from job's stdout.
256                       False = from stderr.
257                       Default: True
258        @param final_read: Do not read only 1024 bytes from stream. Instead,
259                           read and process all data until end of the stream.
260
261        """
262        if self.unjoinable:
263            raise error.InvalidBgJobCall('Cannot call process_output on '
264                                         'a job with unjoinable BgJob')
265        if stdout:
266            pipe, buf, tee = (
267                self.sp.stdout, self._stdout_file, self._stdout_tee)
268        else:
269            pipe, buf, tee = (
270                self.sp.stderr, self._stderr_file, self._stderr_tee)
271
272        if not pipe:
273            return
274
275        if final_read:
276            # read in all the data we can from pipe and then stop
277            data = []
278            while select.select([pipe], [], [], 0)[0]:
279                data.append(os.read(pipe.fileno(), 1024))
280                if len(data[-1]) == 0:
281                    break
282            data = "".join(data)
283        else:
284            # perform a single read
285            data = os.read(pipe.fileno(), 1024)
286        buf.write(data)
287        tee.write(data)
288
289    def cleanup(self):
290        """Clean up after BgJob.
291
292        Flush the stdout_tee and stderr_tee buffers, close the
293        subprocess stdout and stderr buffers, and saves data from
294        the configured stdout and stderr destination streams to
295        self.result. Duplicate calls ignored with a warning.
296        """
297        if self.unjoinable:
298            raise error.InvalidBgJobCall('Cannot call cleanup on '
299                                         'a job with a unjoinable BgJob')
300        if self._cleanup_called:
301            logging.warning('BgJob [%s] received a duplicate call to '
302                            'cleanup. Ignoring.', self.command)
303            return
304        try:
305            if self.sp.stdout:
306                self._stdout_tee.flush()
307                self.sp.stdout.close()
308                self.result.stdout = self._stdout_file.getvalue()
309
310            if self.sp.stderr:
311                self._stderr_tee.flush()
312                self.sp.stderr.close()
313                self.result.stderr = self._stderr_file.getvalue()
314        finally:
315            self._cleanup_called = True
316
317    def _reset_sigpipe(self):
318        if not env.IN_MOD_WSGI:
319            signal.signal(signal.SIGPIPE, signal.SIG_DFL)
320
321
322def ip_to_long(ip):
323    # !L is a long in network byte order
324    return struct.unpack('!L', socket.inet_aton(ip))[0]
325
326
327def long_to_ip(number):
328    # See above comment.
329    return socket.inet_ntoa(struct.pack('!L', number))
330
331
332def create_subnet_mask(bits):
333    return (1 << 32) - (1 << 32-bits)
334
335
336def format_ip_with_mask(ip, mask_bits):
337    masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
338    return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
339
340
341def normalize_hostname(alias):
342    ip = socket.gethostbyname(alias)
343    return socket.gethostbyaddr(ip)[0]
344
345
346def get_ip_local_port_range():
347    match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
348                     read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
349    return (int(match.group(1)), int(match.group(2)))
350
351
352def set_ip_local_port_range(lower, upper):
353    write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
354                   '%d %d\n' % (lower, upper))
355
356
357def read_one_line(filename):
358    f = open(filename, 'r')
359    try:
360        return f.readline().rstrip('\n')
361    finally:
362        f.close()
363
364
365def read_file(filename):
366    f = open(filename)
367    try:
368        return f.read()
369    finally:
370        f.close()
371
372
373def get_field(data, param, linestart="", sep=" "):
374    """
375    Parse data from string.
376    @param data: Data to parse.
377        example:
378          data:
379             cpu   324 345 34  5 345
380             cpu0  34  11  34 34  33
381             ^^^^
382             start of line
383             params 0   1   2  3   4
384    @param param: Position of parameter after linestart marker.
385    @param linestart: String to which start line with parameters.
386    @param sep: Separator between parameters regular expression.
387    """
388    search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
389    find = search.search(data)
390    if find != None:
391        return re.split("%s" % sep, find.group(1))[param]
392    else:
393        print "There is no line which starts with %s in data." % linestart
394        return None
395
396
397def write_one_line(filename, line):
398    open_write_close(filename, str(line).rstrip('\n') + '\n')
399
400
401def open_write_close(filename, data):
402    f = open(filename, 'w')
403    try:
404        f.write(data)
405    finally:
406        f.close()
407
408
409def locate_file(path, base_dir=None):
410    """Locates a file.
411
412    @param path: The path of the file being located. Could be absolute or
413        relative path. For relative path, it tries to locate the file from
414        base_dir.
415
416    @param base_dir (optional): Base directory of the relative path.
417
418    @returns Absolute path of the file if found. None if path is None.
419    @raises error.TestFail if the file is not found.
420    """
421    if path is None:
422        return None
423
424    if not os.path.isabs(path) and base_dir is not None:
425        # Assume the relative path is based in autotest directory.
426        path = os.path.join(base_dir, path)
427    if not os.path.isfile(path):
428        raise error.TestFail('ERROR: Unable to find %s' % path)
429    return path
430
431
432def matrix_to_string(matrix, header=None):
433    """
434    Return a pretty, aligned string representation of a nxm matrix.
435
436    This representation can be used to print any tabular data, such as
437    database results. It works by scanning the lengths of each element
438    in each column, and determining the format string dynamically.
439
440    @param matrix: Matrix representation (list with n rows of m elements).
441    @param header: Optional tuple or list with header elements to be displayed.
442    """
443    if type(header) is list:
444        header = tuple(header)
445    lengths = []
446    if header:
447        for column in header:
448            lengths.append(len(column))
449    for row in matrix:
450        for i, column in enumerate(row):
451            column = unicode(column).encode("utf-8")
452            cl = len(column)
453            try:
454                ml = lengths[i]
455                if cl > ml:
456                    lengths[i] = cl
457            except IndexError:
458                lengths.append(cl)
459
460    lengths = tuple(lengths)
461    format_string = ""
462    for length in lengths:
463        format_string += "%-" + str(length) + "s "
464    format_string += "\n"
465
466    matrix_str = ""
467    if header:
468        matrix_str += format_string % header
469    for row in matrix:
470        matrix_str += format_string % tuple(row)
471
472    return matrix_str
473
474
475def read_keyval(path, type_tag=None):
476    """
477    Read a key-value pair format file into a dictionary, and return it.
478    Takes either a filename or directory name as input. If it's a
479    directory name, we assume you want the file to be called keyval.
480
481    @param path: Full path of the file to read from.
482    @param type_tag: If not None, only keyvals with key ending
483                     in a suffix {type_tag} will be collected.
484    """
485    if os.path.isdir(path):
486        path = os.path.join(path, 'keyval')
487    if not os.path.exists(path):
488        return {}
489
490    if type_tag:
491        pattern = r'^([-\.\w]+)\{%s\}=(.*)$' % type_tag
492    else:
493        pattern = r'^([-\.\w]+)=(.*)$'
494
495    keyval = {}
496    f = open(path)
497    for line in f:
498        line = re.sub('#.*', '', line).rstrip()
499        if not line:
500            continue
501        match = re.match(pattern, line)
502        if match:
503            key = match.group(1)
504            value = match.group(2)
505            if re.search('^\d+$', value):
506                value = int(value)
507            elif re.search('^(\d+\.)?\d+$', value):
508                value = float(value)
509            keyval[key] = value
510        else:
511            raise ValueError('Invalid format line: %s' % line)
512    f.close()
513    return keyval
514
515
516def write_keyval(path, dictionary, type_tag=None):
517    """
518    Write a key-value pair format file out to a file. This uses append
519    mode to open the file, so existing text will not be overwritten or
520    reparsed.
521
522    If type_tag is None, then the key must be composed of alphanumeric
523    characters (or dashes+underscores). However, if type-tag is not
524    null then the keys must also have "{type_tag}" as a suffix. At
525    the moment the only valid values of type_tag are "attr" and "perf".
526
527    @param path: full path of the file to be written
528    @param dictionary: the items to write
529    @param type_tag: see text above
530    """
531    if os.path.isdir(path):
532        path = os.path.join(path, 'keyval')
533    keyval = open(path, 'a')
534
535    if type_tag is None:
536        key_regex = re.compile(r'^[-\.\w]+$')
537    else:
538        if type_tag not in ('attr', 'perf'):
539            raise ValueError('Invalid type tag: %s' % type_tag)
540        escaped_tag = re.escape(type_tag)
541        key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
542    try:
543        for key in sorted(dictionary.keys()):
544            if not key_regex.search(key):
545                raise ValueError('Invalid key: %s' % key)
546            keyval.write('%s=%s\n' % (key, dictionary[key]))
547    finally:
548        keyval.close()
549
550
551def is_url(path):
552    """Return true if path looks like a URL"""
553    # for now, just handle http and ftp
554    url_parts = urlparse.urlparse(path)
555    return (url_parts[0] in ('http', 'ftp'))
556
557
558def urlopen(url, data=None, timeout=5):
559    """Wrapper to urllib2.urlopen with timeout addition."""
560
561    # Save old timeout
562    old_timeout = socket.getdefaulttimeout()
563    socket.setdefaulttimeout(timeout)
564    try:
565        return urllib2.urlopen(url, data=data)
566    finally:
567        socket.setdefaulttimeout(old_timeout)
568
569
570def urlretrieve(url, filename, data=None, timeout=300):
571    """Retrieve a file from given url."""
572    logging.debug('Fetching %s -> %s', url, filename)
573
574    src_file = urlopen(url, data=data, timeout=timeout)
575    try:
576        dest_file = open(filename, 'wb')
577        try:
578            shutil.copyfileobj(src_file, dest_file)
579        finally:
580            dest_file.close()
581    finally:
582        src_file.close()
583
584
585def hash(hashtype, input=None):
586    """
587    Returns an hash object of type md5 or sha1. This function is implemented in
588    order to encapsulate hash objects in a way that is compatible with python
589    2.4 and python 2.6 without warnings.
590
591    Note that even though python 2.6 hashlib supports hash types other than
592    md5 and sha1, we are artificially limiting the input values in order to
593    make the function to behave exactly the same among both python
594    implementations.
595
596    @param input: Optional input string that will be used to update the hash.
597    """
598    # pylint: disable=redefined-builtin
599    if hashtype not in ['md5', 'sha1']:
600        raise ValueError("Unsupported hash type: %s" % hashtype)
601
602    try:
603        computed_hash = hashlib.new(hashtype)
604    except NameError:
605        if hashtype == 'md5':
606            computed_hash = md5.new()
607        elif hashtype == 'sha1':
608            computed_hash = sha.new()
609
610    if input:
611        computed_hash.update(input)
612
613    return computed_hash
614
615
616def get_file(src, dest, permissions=None):
617    """Get a file from src, which can be local or a remote URL"""
618    if src == dest:
619        return
620
621    if is_url(src):
622        urlretrieve(src, dest)
623    else:
624        shutil.copyfile(src, dest)
625
626    if permissions:
627        os.chmod(dest, permissions)
628    return dest
629
630
631def unmap_url(srcdir, src, destdir='.'):
632    """
633    Receives either a path to a local file or a URL.
634    returns either the path to the local file, or the fetched URL
635
636    unmap_url('/usr/src', 'foo.tar', '/tmp')
637                            = '/usr/src/foo.tar'
638    unmap_url('/usr/src', 'http://site/file', '/tmp')
639                            = '/tmp/file'
640                            (after retrieving it)
641    """
642    if is_url(src):
643        url_parts = urlparse.urlparse(src)
644        filename = os.path.basename(url_parts[2])
645        dest = os.path.join(destdir, filename)
646        return get_file(src, dest)
647    else:
648        return os.path.join(srcdir, src)
649
650
651def update_version(srcdir, preserve_srcdir, new_version, install,
652                   *args, **dargs):
653    """
654    Make sure srcdir is version new_version
655
656    If not, delete it and install() the new version.
657
658    In the preserve_srcdir case, we just check it's up to date,
659    and if not, we rerun install, without removing srcdir
660    """
661    versionfile = os.path.join(srcdir, '.version')
662    install_needed = True
663
664    if os.path.exists(versionfile):
665        old_version = pickle.load(open(versionfile))
666        if old_version == new_version:
667            install_needed = False
668
669    if install_needed:
670        if not preserve_srcdir and os.path.exists(srcdir):
671            shutil.rmtree(srcdir)
672        install(*args, **dargs)
673        if os.path.exists(srcdir):
674            pickle.dump(new_version, open(versionfile, 'w'))
675
676
677def get_stderr_level(stderr_is_expected, stdout_level=DEFAULT_STDOUT_LEVEL):
678    if stderr_is_expected:
679        return stdout_level
680    return DEFAULT_STDERR_LEVEL
681
682
683def run(command, timeout=None, ignore_status=False, stdout_tee=None,
684        stderr_tee=None, verbose=True, stdin=None, stderr_is_expected=None,
685        stdout_level=None, stderr_level=None, args=(), nickname=None,
686        ignore_timeout=False, env=None, extra_paths=None):
687    """
688    Run a command on the host.
689
690    @param command: the command line string.
691    @param timeout: time limit in seconds before attempting to kill the
692            running process. The run() function will take a few seconds
693            longer than 'timeout' to complete if it has to kill the process.
694    @param ignore_status: do not raise an exception, no matter what the exit
695            code of the command is.
696    @param stdout_tee: optional file-like object to which stdout data
697            will be written as it is generated (data will still be stored
698            in result.stdout unless this is DEVNULL).
699    @param stderr_tee: likewise for stderr.
700    @param verbose: if True, log the command being run.
701    @param stdin: stdin to pass to the executed process (can be a file
702            descriptor, a file object of a real file or a string).
703    @param stderr_is_expected: if True, stderr will be logged at the same level
704            as stdout
705    @param stdout_level: logging level used if stdout_tee is TEE_TO_LOGS;
706            if None, a default is used.
707    @param stderr_level: like stdout_level but for stderr.
708    @param args: sequence of strings of arguments to be given to the command
709            inside " quotes after they have been escaped for that; each
710            element in the sequence will be given as a separate command
711            argument
712    @param nickname: Short string that will appear in logging messages
713                     associated with this command.
714    @param ignore_timeout: If True, timeouts are ignored otherwise if a
715            timeout occurs it will raise CmdTimeoutError.
716    @param env: Dict containing environment variables used in a subprocess.
717    @param extra_paths: Optional string list, to be prepended to the PATH
718                        env variable in env (or os.environ dict if env is
719                        not specified).
720
721    @return a CmdResult object or None if the command timed out and
722            ignore_timeout is True
723
724    @raise CmdError: the exit code of the command execution was not 0
725    @raise CmdTimeoutError: the command timed out and ignore_timeout is False.
726    """
727    if isinstance(args, basestring):
728        raise TypeError('Got a string for the "args" keyword argument, '
729                        'need a sequence.')
730
731    # In some cases, command will actually be a list
732    # (For example, see get_user_hash in client/cros/cryptohome.py.)
733    # So, to cover that case, detect if it's a string or not and convert it
734    # into one if necessary.
735    if not isinstance(command, basestring):
736        command = ' '.join([sh_quote_word(arg) for arg in command])
737
738    command = ' '.join([command] + [sh_quote_word(arg) for arg in args])
739
740    if stderr_is_expected is None:
741        stderr_is_expected = ignore_status
742    if stdout_level is None:
743        stdout_level = DEFAULT_STDOUT_LEVEL
744    if stderr_level is None:
745        stderr_level = get_stderr_level(stderr_is_expected, stdout_level)
746
747    try:
748        bg_job = join_bg_jobs(
749            (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
750                   stdout_level=stdout_level, stderr_level=stderr_level,
751                   nickname=nickname, env=env, extra_paths=extra_paths),),
752            timeout)[0]
753    except error.CmdTimeoutError:
754        if not ignore_timeout:
755            raise
756        return None
757
758    if not ignore_status and bg_job.result.exit_status:
759        raise error.CmdError(command, bg_job.result,
760                             "Command returned non-zero exit status")
761
762    return bg_job.result
763
764
765def run_parallel(commands, timeout=None, ignore_status=False,
766                 stdout_tee=None, stderr_tee=None,
767                 nicknames=None):
768    """
769    Behaves the same as run() with the following exceptions:
770
771    - commands is a list of commands to run in parallel.
772    - ignore_status toggles whether or not an exception should be raised
773      on any error.
774
775    @return: a list of CmdResult objects
776    """
777    bg_jobs = []
778    if nicknames is None:
779        nicknames = []
780    for (command, nickname) in itertools.izip_longest(commands, nicknames):
781        bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
782                             stderr_level=get_stderr_level(ignore_status),
783                             nickname=nickname))
784
785    # Updates objects in bg_jobs list with their process information
786    join_bg_jobs(bg_jobs, timeout)
787
788    for bg_job in bg_jobs:
789        if not ignore_status and bg_job.result.exit_status:
790            raise error.CmdError(command, bg_job.result,
791                                 "Command returned non-zero exit status")
792
793    return [bg_job.result for bg_job in bg_jobs]
794
795
796@deprecated
797def run_bg(command):
798    """Function deprecated. Please use BgJob class instead."""
799    bg_job = BgJob(command)
800    return bg_job.sp, bg_job.result
801
802
803def join_bg_jobs(bg_jobs, timeout=None):
804    """Joins the bg_jobs with the current thread.
805
806    Returns the same list of bg_jobs objects that was passed in.
807    """
808    if any(bg_job.unjoinable for bg_job in bg_jobs):
809        raise error.InvalidBgJobCall(
810                'join_bg_jobs cannot be called for unjoinable bg_job')
811
812    timeout_error = False
813    try:
814        # We are holding ends to stdin, stdout pipes
815        # hence we need to be sure to close those fds no mater what
816        start_time = time.time()
817        timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
818
819        for bg_job in bg_jobs:
820            # Process stdout and stderr
821            bg_job.process_output(stdout=True,final_read=True)
822            bg_job.process_output(stdout=False,final_read=True)
823    finally:
824        # close our ends of the pipes to the sp no matter what
825        for bg_job in bg_jobs:
826            bg_job.cleanup()
827
828    if timeout_error:
829        # TODO: This needs to be fixed to better represent what happens when
830        # running in parallel. However this is backwards compatable, so it will
831        # do for the time being.
832        raise error.CmdTimeoutError(
833                bg_jobs[0].command, bg_jobs[0].result,
834                "Command(s) did not complete within %d seconds" % timeout)
835
836
837    return bg_jobs
838
839
840def _wait_for_commands(bg_jobs, start_time, timeout):
841    """Waits for background jobs by select polling their stdout/stderr.
842
843    @param bg_jobs: A list of background jobs to wait on.
844    @param start_time: Time used to calculate the timeout lifetime of a job.
845    @param timeout: The timeout of the list of bg_jobs.
846
847    @return: True if the return was due to a timeout, False otherwise.
848    """
849
850    # To check for processes which terminate without producing any output
851    # a 1 second timeout is used in select.
852    SELECT_TIMEOUT = 1
853
854    read_list = []
855    write_list = []
856    reverse_dict = {}
857
858    for bg_job in bg_jobs:
859        if bg_job.sp.stdout:
860            read_list.append(bg_job.sp.stdout)
861            reverse_dict[bg_job.sp.stdout] = (bg_job, True)
862        if bg_job.sp.stderr:
863            read_list.append(bg_job.sp.stderr)
864            reverse_dict[bg_job.sp.stderr] = (bg_job, False)
865        if bg_job.string_stdin is not None:
866            write_list.append(bg_job.sp.stdin)
867            reverse_dict[bg_job.sp.stdin] = bg_job
868
869    if timeout:
870        stop_time = start_time + timeout
871        time_left = stop_time - time.time()
872    else:
873        time_left = None # so that select never times out
874
875    while not timeout or time_left > 0:
876        # select will return when we may write to stdin, when there is
877        # stdout/stderr output we can read (including when it is
878        # EOF, that is the process has terminated) or when a non-fatal
879        # signal was sent to the process. In the last case the select returns
880        # EINTR, and we continue waiting for the job if the signal handler for
881        # the signal that interrupted the call allows us to.
882        try:
883            read_ready, write_ready, _ = select.select(read_list, write_list,
884                                                       [], SELECT_TIMEOUT)
885        except select.error as v:
886            if v[0] == errno.EINTR:
887                logging.warning(v)
888                continue
889            else:
890                raise
891        # os.read() has to be used instead of
892        # subproc.stdout.read() which will otherwise block
893        for file_obj in read_ready:
894            bg_job, is_stdout = reverse_dict[file_obj]
895            bg_job.process_output(is_stdout)
896
897        for file_obj in write_ready:
898            # we can write PIPE_BUF bytes without blocking
899            # POSIX requires PIPE_BUF is >= 512
900            bg_job = reverse_dict[file_obj]
901            file_obj.write(bg_job.string_stdin[:512])
902            bg_job.string_stdin = bg_job.string_stdin[512:]
903            # no more input data, close stdin, remove it from the select set
904            if not bg_job.string_stdin:
905                file_obj.close()
906                write_list.remove(file_obj)
907                del reverse_dict[file_obj]
908
909        all_jobs_finished = True
910        for bg_job in bg_jobs:
911            if bg_job.result.exit_status is not None:
912                continue
913
914            bg_job.result.exit_status = bg_job.sp.poll()
915            if bg_job.result.exit_status is not None:
916                # process exited, remove its stdout/stdin from the select set
917                bg_job.result.duration = time.time() - start_time
918                if bg_job.sp.stdout:
919                    read_list.remove(bg_job.sp.stdout)
920                    del reverse_dict[bg_job.sp.stdout]
921                if bg_job.sp.stderr:
922                    read_list.remove(bg_job.sp.stderr)
923                    del reverse_dict[bg_job.sp.stderr]
924            else:
925                all_jobs_finished = False
926
927        if all_jobs_finished:
928            return False
929
930        if timeout:
931            time_left = stop_time - time.time()
932
933    # Kill all processes which did not complete prior to timeout
934    for bg_job in bg_jobs:
935        if bg_job.result.exit_status is not None:
936            continue
937
938        logging.warning('run process timeout (%s) fired on: %s', timeout,
939                        bg_job.command)
940        if nuke_subprocess(bg_job.sp) is None:
941            # If process could not be SIGKILL'd, log kernel stack.
942            logging.warning(read_file('/proc/%d/stack' % bg_job.sp.pid))
943        bg_job.result.exit_status = bg_job.sp.poll()
944        bg_job.result.duration = time.time() - start_time
945
946    return True
947
948
949def pid_is_alive(pid):
950    """
951    True if process pid exists and is not yet stuck in Zombie state.
952    Zombies are impossible to move between cgroups, etc.
953    pid can be integer, or text of integer.
954    """
955    path = '/proc/%s/stat' % pid
956
957    try:
958        stat = read_one_line(path)
959    except IOError:
960        if not os.path.exists(path):
961            # file went away
962            return False
963        raise
964
965    return stat.split()[2] != 'Z'
966
967
968def signal_pid(pid, sig):
969    """
970    Sends a signal to a process id. Returns True if the process terminated
971    successfully, False otherwise.
972    """
973    try:
974        os.kill(pid, sig)
975    except OSError:
976        # The process may have died before we could kill it.
977        pass
978
979    for _ in range(5):
980        if not pid_is_alive(pid):
981            return True
982        time.sleep(1)
983
984    # The process is still alive
985    return False
986
987
988def nuke_subprocess(subproc):
989    # check if the subprocess is still alive, first
990    if subproc.poll() is not None:
991        return subproc.poll()
992
993    # the process has not terminated within timeout,
994    # kill it via an escalating series of signals.
995    signal_queue = [signal.SIGTERM, signal.SIGKILL]
996    for sig in signal_queue:
997        signal_pid(subproc.pid, sig)
998        if subproc.poll() is not None:
999            return subproc.poll()
1000
1001
1002def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
1003    # the process has not terminated within timeout,
1004    # kill it via an escalating series of signals.
1005    pid_path = '/proc/%d/'
1006    if not os.path.exists(pid_path % pid):
1007        # Assume that if the pid does not exist in proc it is already dead.
1008        logging.error('No listing in /proc for pid:%d.', pid)
1009        raise error.AutoservPidAlreadyDeadError('Could not kill nonexistant '
1010                                                'pid: %s.', pid)
1011    for sig in signal_queue:
1012        if signal_pid(pid, sig):
1013            return
1014
1015    # no signal successfully terminated the process
1016    raise error.AutoservRunError('Could not kill %d for process name: %s' % (
1017            pid, get_process_name(pid)), None)
1018
1019
1020def system(command, timeout=None, ignore_status=False):
1021    """
1022    Run a command
1023
1024    @param timeout: timeout in seconds
1025    @param ignore_status: if ignore_status=False, throw an exception if the
1026            command's exit code is non-zero
1027            if ignore_stauts=True, return the exit code.
1028
1029    @return exit status of command
1030            (note, this will always be zero unless ignore_status=True)
1031    """
1032    return run(command, timeout=timeout, ignore_status=ignore_status,
1033               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
1034
1035
1036def system_parallel(commands, timeout=None, ignore_status=False):
1037    """This function returns a list of exit statuses for the respective
1038    list of commands."""
1039    return [bg_jobs.exit_status for bg_jobs in
1040            run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
1041                         stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1042
1043
1044def system_output(command, timeout=None, ignore_status=False,
1045                  retain_output=False, args=()):
1046    """
1047    Run a command and return the stdout output.
1048
1049    @param command: command string to execute.
1050    @param timeout: time limit in seconds before attempting to kill the
1051            running process. The function will take a few seconds longer
1052            than 'timeout' to complete if it has to kill the process.
1053    @param ignore_status: do not raise an exception, no matter what the exit
1054            code of the command is.
1055    @param retain_output: set to True to make stdout/stderr of the command
1056            output to be also sent to the logging system
1057    @param args: sequence of strings of arguments to be given to the command
1058            inside " quotes after they have been escaped for that; each
1059            element in the sequence will be given as a separate command
1060            argument
1061
1062    @return a string with the stdout output of the command.
1063    """
1064    if retain_output:
1065        out = run(command, timeout=timeout, ignore_status=ignore_status,
1066                  stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
1067                  args=args).stdout
1068    else:
1069        out = run(command, timeout=timeout, ignore_status=ignore_status,
1070                  args=args).stdout
1071    if out[-1:] == '\n':
1072        out = out[:-1]
1073    return out
1074
1075
1076def system_output_parallel(commands, timeout=None, ignore_status=False,
1077                           retain_output=False):
1078    if retain_output:
1079        out = [bg_job.stdout for bg_job
1080               in run_parallel(commands, timeout=timeout,
1081                               ignore_status=ignore_status,
1082                               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1083    else:
1084        out = [bg_job.stdout for bg_job in run_parallel(commands,
1085                                  timeout=timeout, ignore_status=ignore_status)]
1086    for _ in out:
1087        if out[-1:] == '\n':
1088            out = out[:-1]
1089    return out
1090
1091
1092def strip_unicode(input_obj):
1093    if type(input_obj) == list:
1094        return [strip_unicode(i) for i in input_obj]
1095    elif type(input_obj) == dict:
1096        output = {}
1097        for key in input_obj.keys():
1098            output[str(key)] = strip_unicode(input_obj[key])
1099        return output
1100    elif type(input_obj) == unicode:
1101        return str(input_obj)
1102    else:
1103        return input_obj
1104
1105
1106def get_cpu_percentage(function, *args, **dargs):
1107    """Returns a tuple containing the CPU% and return value from function call.
1108
1109    This function calculates the usage time by taking the difference of
1110    the user and system times both before and after the function call.
1111    """
1112    child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
1113    self_pre = resource.getrusage(resource.RUSAGE_SELF)
1114    start = time.time()
1115    to_return = function(*args, **dargs)
1116    elapsed = time.time() - start
1117    self_post = resource.getrusage(resource.RUSAGE_SELF)
1118    child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
1119
1120    # Calculate CPU Percentage
1121    s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
1122    c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
1123    cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
1124
1125    return cpu_percent, to_return
1126
1127
1128def get_arch(run_function=run):
1129    """
1130    Get the hardware architecture of the machine.
1131    If specified, run_function should return a CmdResult object and throw a
1132    CmdError exception.
1133    If run_function is anything other than utils.run(), it is used to
1134    execute the commands. By default (when set to utils.run()) this will
1135    just examine os.uname()[4].
1136    """
1137
1138    # Short circuit from the common case.
1139    if run_function == run:
1140        return re.sub(r'i\d86$', 'i386', os.uname()[4])
1141
1142    # Otherwise, use the run_function in case it hits a remote machine.
1143    arch = run_function('/bin/uname -m').stdout.rstrip()
1144    if re.match(r'i\d86$', arch):
1145        arch = 'i386'
1146    return arch
1147
1148def get_arch_userspace(run_function=run):
1149    """
1150    Get the architecture by userspace (possibly different from kernel).
1151    """
1152    archs = {
1153        'arm': 'ELF 32-bit.*, ARM,',
1154        'arm64': 'ELF 64-bit.*, ARM aarch64,',
1155        'i386': 'ELF 32-bit.*, Intel 80386,',
1156        'x86_64': 'ELF 64-bit.*, x86-64,',
1157    }
1158
1159    cmd = 'file --brief --dereference /bin/sh'
1160    filestr = run_function(cmd).stdout.rstrip()
1161    for a, regex in archs.iteritems():
1162        if re.match(regex, filestr):
1163            return a
1164
1165    return get_arch()
1166
1167
1168def get_num_logical_cpus_per_socket(run_function=run):
1169    """
1170    Get the number of cores (including hyperthreading) per cpu.
1171    run_function is used to execute the commands. It defaults to
1172    utils.run() but a custom method (if provided) should be of the
1173    same schema as utils.run. It should return a CmdResult object and
1174    throw a CmdError exception.
1175    """
1176    siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1177    num_siblings = map(int,
1178                       re.findall(r'^siblings\s*:\s*(\d+)\s*$',
1179                                  siblings, re.M))
1180    if len(num_siblings) == 0:
1181        raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1182    if min(num_siblings) != max(num_siblings):
1183        raise error.TestError('Number of siblings differ %r' %
1184                              num_siblings)
1185    return num_siblings[0]
1186
1187
1188def set_high_performance_mode(host=None):
1189    """
1190    Sets the kernel governor mode to the highest setting.
1191    Returns previous governor state.
1192    """
1193    original_governors = get_scaling_governor_states(host)
1194    set_scaling_governors('performance', host)
1195    return original_governors
1196
1197
1198def set_scaling_governors(value, host=None):
1199    """
1200    Sets all scaling governor to string value.
1201    Sample values: 'performance', 'interactive', 'ondemand', 'powersave'.
1202    """
1203    paths = _get_cpufreq_paths('scaling_governor', host)
1204    if not paths:
1205        logging.info("Could not set governor states, as no files of the form "
1206                     "'/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor' "
1207                     "were found.")
1208    run_func = host.run if host else system
1209    for path in paths:
1210        cmd = 'echo %s > %s' % (value, path)
1211        logging.info('Writing scaling governor mode \'%s\' -> %s', value, path)
1212        # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
1213        run_func(cmd, ignore_status=True)
1214
1215
1216def _get_cpufreq_paths(filename, host=None):
1217    """
1218    Returns a list of paths to the governors.
1219    """
1220    run_func = host.run if host else run
1221    glob = '/sys/devices/system/cpu/cpu*/cpufreq/' + filename
1222    # Simple glob expansion; note that CPUs may come and go, causing these
1223    # paths to change at any time.
1224    cmd = 'echo ' + glob
1225    try:
1226        paths = run_func(cmd, verbose=False).stdout.split()
1227    except error.CmdError:
1228        return []
1229    # If the glob result equals itself, then we likely didn't match any real
1230    # paths (assuming 'cpu*' is not a real path).
1231    if paths == [glob]:
1232        return []
1233    return paths
1234
1235
1236def get_scaling_governor_states(host=None):
1237    """
1238    Returns a list of (performance governor path, current state) tuples.
1239    """
1240    paths = _get_cpufreq_paths('scaling_governor', host)
1241    path_value_list = []
1242    run_func = host.run if host else run
1243    for path in paths:
1244        value = run_func('head -n 1 %s' % path, verbose=False).stdout
1245        path_value_list.append((path, value))
1246    return path_value_list
1247
1248
1249def restore_scaling_governor_states(path_value_list, host=None):
1250    """
1251    Restores governor states. Inverse operation to get_scaling_governor_states.
1252    """
1253    run_func = host.run if host else system
1254    for (path, value) in path_value_list:
1255        cmd = 'echo %s > %s' % (value.rstrip('\n'), path)
1256        # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
1257        run_func(cmd, ignore_status=True)
1258
1259
1260def merge_trees(src, dest):
1261    """
1262    Merges a source directory tree at 'src' into a destination tree at
1263    'dest'. If a path is a file in both trees than the file in the source
1264    tree is APPENDED to the one in the destination tree. If a path is
1265    a directory in both trees then the directories are recursively merged
1266    with this function. In any other case, the function will skip the
1267    paths that cannot be merged (instead of failing).
1268    """
1269    if not os.path.exists(src):
1270        return # exists only in dest
1271    elif not os.path.exists(dest):
1272        if os.path.isfile(src):
1273            shutil.copy2(src, dest) # file only in src
1274        else:
1275            shutil.copytree(src, dest, symlinks=True) # dir only in src
1276        return
1277    elif os.path.isfile(src) and os.path.isfile(dest):
1278        # src & dest are files in both trees, append src to dest
1279        destfile = open(dest, "a")
1280        try:
1281            srcfile = open(src)
1282            try:
1283                destfile.write(srcfile.read())
1284            finally:
1285                srcfile.close()
1286        finally:
1287            destfile.close()
1288    elif os.path.isdir(src) and os.path.isdir(dest):
1289        # src & dest are directories in both trees, so recursively merge
1290        for name in os.listdir(src):
1291            merge_trees(os.path.join(src, name), os.path.join(dest, name))
1292    else:
1293        # src & dest both exist, but are incompatible
1294        return
1295
1296
1297class CmdResult(object):
1298    """
1299    Command execution result.
1300
1301    command:     String containing the command line itself
1302    exit_status: Integer exit code of the process
1303    stdout:      String containing stdout of the process
1304    stderr:      String containing stderr of the process
1305    duration:    Elapsed wall clock time running the process
1306    """
1307
1308
1309    def __init__(self, command="", stdout="", stderr="",
1310                 exit_status=None, duration=0):
1311        self.command = command
1312        self.exit_status = exit_status
1313        self.stdout = stdout
1314        self.stderr = stderr
1315        self.duration = duration
1316
1317
1318    def __eq__(self, other):
1319        if type(self) == type(other):
1320            return (self.command == other.command
1321                    and self.exit_status == other.exit_status
1322                    and self.stdout == other.stdout
1323                    and self.stderr == other.stderr
1324                    and self.duration == other.duration)
1325        else:
1326            return NotImplemented
1327
1328
1329    def __repr__(self):
1330        wrapper = textwrap.TextWrapper(width = 78,
1331                                       initial_indent="\n    ",
1332                                       subsequent_indent="    ")
1333
1334        stdout = self.stdout.rstrip()
1335        if stdout:
1336            stdout = "\nstdout:\n%s" % stdout
1337
1338        stderr = self.stderr.rstrip()
1339        if stderr:
1340            stderr = "\nstderr:\n%s" % stderr
1341
1342        return ("* Command: %s\n"
1343                "Exit status: %s\n"
1344                "Duration: %s\n"
1345                "%s"
1346                "%s"
1347                % (wrapper.fill(str(self.command)), self.exit_status,
1348                self.duration, stdout, stderr))
1349
1350
1351class run_randomly:
1352    def __init__(self, run_sequentially=False):
1353        # Run sequentially is for debugging control files
1354        self.test_list = []
1355        self.run_sequentially = run_sequentially
1356
1357
1358    def add(self, *args, **dargs):
1359        test = (args, dargs)
1360        self.test_list.append(test)
1361
1362
1363    def run(self, fn):
1364        while self.test_list:
1365            test_index = random.randint(0, len(self.test_list)-1)
1366            if self.run_sequentially:
1367                test_index = 0
1368            (args, dargs) = self.test_list.pop(test_index)
1369            fn(*args, **dargs)
1370
1371
1372def import_site_module(path, module, dummy=None, modulefile=None):
1373    """
1374    Try to import the site specific module if it exists.
1375
1376    @param path full filename of the source file calling this (ie __file__)
1377    @param module full module name
1378    @param dummy dummy value to return in case there is no symbol to import
1379    @param modulefile module filename
1380
1381    @return site specific module or dummy
1382
1383    @raises ImportError if the site file exists but imports fails
1384    """
1385    short_module = module[module.rfind(".") + 1:]
1386
1387    if not modulefile:
1388        modulefile = short_module + ".py"
1389
1390    if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1391        return __import__(module, {}, {}, [short_module])
1392    return dummy
1393
1394
1395def import_site_symbol(path, module, name, dummy=None, modulefile=None):
1396    """
1397    Try to import site specific symbol from site specific file if it exists
1398
1399    @param path full filename of the source file calling this (ie __file__)
1400    @param module full module name
1401    @param name symbol name to be imported from the site file
1402    @param dummy dummy value to return in case there is no symbol to import
1403    @param modulefile module filename
1404
1405    @return site specific symbol or dummy
1406
1407    @raises ImportError if the site file exists but imports fails
1408    """
1409    module = import_site_module(path, module, modulefile=modulefile)
1410    if not module:
1411        return dummy
1412
1413    # special unique value to tell us if the symbol can't be imported
1414    cant_import = object()
1415
1416    obj = getattr(module, name, cant_import)
1417    if obj is cant_import:
1418        return dummy
1419
1420    return obj
1421
1422
1423def import_site_class(path, module, classname, baseclass, modulefile=None):
1424    """
1425    Try to import site specific class from site specific file if it exists
1426
1427    Args:
1428        path: full filename of the source file calling this (ie __file__)
1429        module: full module name
1430        classname: class name to be loaded from site file
1431        baseclass: base class object to return when no site file present or
1432            to mixin when site class exists but is not inherited from baseclass
1433        modulefile: module filename
1434
1435    Returns: baseclass if site specific class does not exist, the site specific
1436        class if it exists and is inherited from baseclass or a mixin of the
1437        site specific class and baseclass when the site specific class exists
1438        and is not inherited from baseclass
1439
1440    Raises: ImportError if the site file exists but imports fails
1441    """
1442
1443    res = import_site_symbol(path, module, classname, None, modulefile)
1444    if res:
1445        if not issubclass(res, baseclass):
1446            # if not a subclass of baseclass then mix in baseclass with the
1447            # site specific class object and return the result
1448            res = type(classname, (res, baseclass), {})
1449    else:
1450        res = baseclass
1451
1452    return res
1453
1454
1455def import_site_function(path, module, funcname, dummy, modulefile=None):
1456    """
1457    Try to import site specific function from site specific file if it exists
1458
1459    Args:
1460        path: full filename of the source file calling this (ie __file__)
1461        module: full module name
1462        funcname: function name to be imported from site file
1463        dummy: dummy function to return in case there is no function to import
1464        modulefile: module filename
1465
1466    Returns: site specific function object or dummy
1467
1468    Raises: ImportError if the site file exists but imports fails
1469    """
1470
1471    return import_site_symbol(path, module, funcname, dummy, modulefile)
1472
1473
1474def _get_pid_path(program_name):
1475    my_path = os.path.dirname(__file__)
1476    return os.path.abspath(os.path.join(my_path, "..", "..",
1477                                        "%s.pid" % program_name))
1478
1479
1480def write_pid(program_name):
1481    """
1482    Try to drop <program_name>.pid in the main autotest directory.
1483
1484    Args:
1485      program_name: prefix for file name
1486    """
1487    pidfile = open(_get_pid_path(program_name), "w")
1488    try:
1489        pidfile.write("%s\n" % os.getpid())
1490    finally:
1491        pidfile.close()
1492
1493
1494def delete_pid_file_if_exists(program_name):
1495    """
1496    Tries to remove <program_name>.pid from the main autotest directory.
1497    """
1498    pidfile_path = _get_pid_path(program_name)
1499
1500    try:
1501        os.remove(pidfile_path)
1502    except OSError:
1503        if not os.path.exists(pidfile_path):
1504            return
1505        raise
1506
1507
1508def get_pid_from_file(program_name):
1509    """
1510    Reads the pid from <program_name>.pid in the autotest directory.
1511
1512    @param program_name the name of the program
1513    @return the pid if the file exists, None otherwise.
1514    """
1515    pidfile_path = _get_pid_path(program_name)
1516    if not os.path.exists(pidfile_path):
1517        return None
1518
1519    pidfile = open(_get_pid_path(program_name), 'r')
1520
1521    try:
1522        try:
1523            pid = int(pidfile.readline())
1524        except IOError:
1525            if not os.path.exists(pidfile_path):
1526                return None
1527            raise
1528    finally:
1529        pidfile.close()
1530
1531    return pid
1532
1533
1534def get_process_name(pid):
1535    """
1536    Get process name from PID.
1537    @param pid: PID of process.
1538    @return: Process name if PID stat file exists or 'Dead PID' if it does not.
1539    """
1540    pid_stat_path = "/proc/%d/stat"
1541    if not os.path.exists(pid_stat_path % pid):
1542        return "Dead Pid"
1543    return get_field(read_file(pid_stat_path % pid), 1)[1:-1]
1544
1545
1546def program_is_alive(program_name):
1547    """
1548    Checks if the process is alive and not in Zombie state.
1549
1550    @param program_name the name of the program
1551    @return True if still alive, False otherwise
1552    """
1553    pid = get_pid_from_file(program_name)
1554    if pid is None:
1555        return False
1556    return pid_is_alive(pid)
1557
1558
1559def signal_program(program_name, sig=signal.SIGTERM):
1560    """
1561    Sends a signal to the process listed in <program_name>.pid
1562
1563    @param program_name the name of the program
1564    @param sig signal to send
1565    """
1566    pid = get_pid_from_file(program_name)
1567    if pid:
1568        signal_pid(pid, sig)
1569
1570
1571def get_relative_path(path, reference):
1572    """Given 2 absolute paths "path" and "reference", compute the path of
1573    "path" as relative to the directory "reference".
1574
1575    @param path the absolute path to convert to a relative path
1576    @param reference an absolute directory path to which the relative
1577        path will be computed
1578    """
1579    # normalize the paths (remove double slashes, etc)
1580    assert(os.path.isabs(path))
1581    assert(os.path.isabs(reference))
1582
1583    path = os.path.normpath(path)
1584    reference = os.path.normpath(reference)
1585
1586    # we could use os.path.split() but it splits from the end
1587    path_list = path.split(os.path.sep)[1:]
1588    ref_list = reference.split(os.path.sep)[1:]
1589
1590    # find the longest leading common path
1591    for i in xrange(min(len(path_list), len(ref_list))):
1592        if path_list[i] != ref_list[i]:
1593            # decrement i so when exiting this loop either by no match or by
1594            # end of range we are one step behind
1595            i -= 1
1596            break
1597    i += 1
1598    # drop the common part of the paths, not interested in that anymore
1599    del path_list[:i]
1600
1601    # for each uncommon component in the reference prepend a ".."
1602    path_list[:0] = ['..'] * (len(ref_list) - i)
1603
1604    return os.path.join(*path_list)
1605
1606
1607def sh_escape(command):
1608    """
1609    Escape special characters from a command so that it can be passed
1610    as a double quoted (" ") string in a (ba)sh command.
1611
1612    Args:
1613            command: the command string to escape.
1614
1615    Returns:
1616            The escaped command string. The required englobing double
1617            quotes are NOT added and so should be added at some point by
1618            the caller.
1619
1620    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1621    """
1622    command = command.replace("\\", "\\\\")
1623    command = command.replace("$", r'\$')
1624    command = command.replace('"', r'\"')
1625    command = command.replace('`', r'\`')
1626    return command
1627
1628
1629def sh_quote_word(text, whitelist=SHELL_QUOTING_WHITELIST):
1630    r"""Quote a string to make it safe as a single word in a shell command.
1631
1632    POSIX shell syntax recognizes no escape characters inside a single-quoted
1633    string.  So, single quotes can safely quote any string of characters except
1634    a string with a single quote character.  A single quote character must be
1635    quoted with the sequence '\'' which translates to:
1636        '  -> close current quote
1637        \' -> insert a literal single quote
1638        '  -> reopen quoting again.
1639
1640    This is safe for all combinations of characters, including embedded and
1641    trailing backslashes in odd or even numbers.
1642
1643    This is also safe for nesting, e.g. the following is a valid use:
1644
1645        adb_command = 'adb shell %s' % (
1646                sh_quote_word('echo %s' % sh_quote_word('hello world')))
1647
1648    @param text: The string to be quoted into a single word for the shell.
1649    @param whitelist: Optional list of characters that do not need quoting.
1650                      Defaults to a known good list of characters.
1651
1652    @return A string, possibly quoted, safe as a single word for a shell.
1653    """
1654    if all(c in whitelist for c in text):
1655        return text
1656    return "'" + text.replace("'", r"'\''") + "'"
1657
1658
1659def configure(extra=None, configure='./configure'):
1660    """
1661    Run configure passing in the correct host, build, and target options.
1662
1663    @param extra: extra command line arguments to pass to configure
1664    @param configure: which configure script to use
1665    """
1666    args = []
1667    if 'CHOST' in os.environ:
1668        args.append('--host=' + os.environ['CHOST'])
1669    if 'CBUILD' in os.environ:
1670        args.append('--build=' + os.environ['CBUILD'])
1671    if 'CTARGET' in os.environ:
1672        args.append('--target=' + os.environ['CTARGET'])
1673    if extra:
1674        args.append(extra)
1675
1676    system('%s %s' % (configure, ' '.join(args)))
1677
1678
1679def make(extra='', make='make', timeout=None, ignore_status=False):
1680    """
1681    Run make, adding MAKEOPTS to the list of options.
1682
1683    @param extra: extra command line arguments to pass to make.
1684    """
1685    cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1686    return system(cmd, timeout=timeout, ignore_status=ignore_status)
1687
1688
1689def compare_versions(ver1, ver2):
1690    """Version number comparison between ver1 and ver2 strings.
1691
1692    >>> compare_tuple("1", "2")
1693    -1
1694    >>> compare_tuple("foo-1.1", "foo-1.2")
1695    -1
1696    >>> compare_tuple("1.2", "1.2a")
1697    -1
1698    >>> compare_tuple("1.2b", "1.2a")
1699    1
1700    >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1701    -1
1702
1703    Args:
1704        ver1: version string
1705        ver2: version string
1706
1707    Returns:
1708        int:  1 if ver1 >  ver2
1709              0 if ver1 == ver2
1710             -1 if ver1 <  ver2
1711    """
1712    ax = re.split('[.-]', ver1)
1713    ay = re.split('[.-]', ver2)
1714    while len(ax) > 0 and len(ay) > 0:
1715        cx = ax.pop(0)
1716        cy = ay.pop(0)
1717        maxlen = max(len(cx), len(cy))
1718        c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1719        if c != 0:
1720            return c
1721    return cmp(len(ax), len(ay))
1722
1723
1724def args_to_dict(args):
1725    """Convert autoserv extra arguments in the form of key=val or key:val to a
1726    dictionary.  Each argument key is converted to lowercase dictionary key.
1727
1728    Args:
1729        args - list of autoserv extra arguments.
1730
1731    Returns:
1732        dictionary
1733    """
1734    arg_re = re.compile(r'(\w+)[:=](.*)$')
1735    args_dict = {}
1736    for arg in args:
1737        match = arg_re.match(arg)
1738        if match:
1739            args_dict[match.group(1).lower()] = match.group(2)
1740        else:
1741            logging.warning("args_to_dict: argument '%s' doesn't match "
1742                            "'%s' pattern. Ignored.", arg, arg_re.pattern)
1743    return args_dict
1744
1745
1746def get_unused_port():
1747    """
1748    Finds a semi-random available port. A race condition is still
1749    possible after the port number is returned, if another process
1750    happens to bind it.
1751
1752    Returns:
1753        A port number that is unused on both TCP and UDP.
1754    """
1755
1756    def try_bind(port, socket_type, socket_proto):
1757        s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1758        try:
1759            try:
1760                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1761                s.bind(('', port))
1762                return s.getsockname()[1]
1763            except socket.error:
1764                return None
1765        finally:
1766            s.close()
1767
1768    # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1769    # same port over and over. So always try TCP first.
1770    while True:
1771        # Ask the OS for an unused port.
1772        port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1773        # Check if this port is unused on the other protocol.
1774        if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1775            return port
1776
1777
1778def ask(question, auto=False):
1779    """
1780    Raw input with a prompt that emulates logging.
1781
1782    @param question: Question to be asked
1783    @param auto: Whether to return "y" instead of asking the question
1784    """
1785    if auto:
1786        logging.info("%s (y/n) y", question)
1787        return "y"
1788    return raw_input("%s INFO | %s (y/n) " %
1789                     (time.strftime("%H:%M:%S", time.localtime()), question))
1790
1791
1792def rdmsr(address, cpu=0):
1793    """
1794    Reads an x86 MSR from the specified CPU, returns as long integer.
1795    """
1796    with open('/dev/cpu/%s/msr' % cpu, 'r', 0) as fd:
1797        fd.seek(address)
1798        return struct.unpack('=Q', fd.read(8))[0]
1799
1800
1801def wait_for_value(func,
1802                   expected_value=None,
1803                   min_threshold=None,
1804                   max_threshold=None,
1805                   timeout_sec=10):
1806    """
1807    Returns the value of func().  If |expected_value|, |min_threshold|, and
1808    |max_threshold| are not set, returns immediately.
1809
1810    If |expected_value| is set, polls the return value until |expected_value| is
1811    reached, and returns that value.
1812
1813    If either |max_threshold| or |min_threshold| is set, this function will
1814    will repeatedly call func() until the return value reaches or exceeds one of
1815    these thresholds.
1816
1817    Polling will stop after |timeout_sec| regardless of these thresholds.
1818
1819    @param func: function whose return value is to be waited on.
1820    @param expected_value: wait for func to return this value.
1821    @param min_threshold: wait for func value to reach or fall below this value.
1822    @param max_threshold: wait for func value to reach or rise above this value.
1823    @param timeout_sec: Number of seconds to wait before giving up and
1824                        returning whatever value func() last returned.
1825
1826    Return value:
1827        The most recent return value of func().
1828    """
1829    value = None
1830    start_time_sec = time.time()
1831    while True:
1832        value = func()
1833        if (expected_value is None and \
1834            min_threshold is None and \
1835            max_threshold is None) or \
1836           (expected_value is not None and value == expected_value) or \
1837           (min_threshold is not None and value <= min_threshold) or \
1838           (max_threshold is not None and value >= max_threshold):
1839            break
1840
1841        if time.time() - start_time_sec >= timeout_sec:
1842            break
1843        time.sleep(0.1)
1844
1845    return value
1846
1847
1848def wait_for_value_changed(func,
1849                           old_value=None,
1850                           timeout_sec=10):
1851    """
1852    Returns the value of func().
1853
1854    The function polls the return value until it is different from |old_value|,
1855    and returns that value.
1856
1857    Polling will stop after |timeout_sec|.
1858
1859    @param func: function whose return value is to be waited on.
1860    @param old_value: wait for func to return a value different from this.
1861    @param timeout_sec: Number of seconds to wait before giving up and
1862                        returning whatever value func() last returned.
1863
1864    @returns The most recent return value of func().
1865    """
1866    value = None
1867    start_time_sec = time.time()
1868    while True:
1869        value = func()
1870        if value != old_value:
1871            break
1872
1873        if time.time() - start_time_sec >= timeout_sec:
1874            break
1875        time.sleep(0.1)
1876
1877    return value
1878
1879
1880CONFIG = global_config.global_config
1881
1882# Keep checking if the pid is alive every second until the timeout (in seconds)
1883CHECK_PID_IS_ALIVE_TIMEOUT = 6
1884
1885_LOCAL_HOST_LIST = ('localhost', '127.0.0.1')
1886
1887# The default address of a vm gateway.
1888DEFAULT_VM_GATEWAY = '10.0.2.2'
1889
1890# Google Storage bucket URI to store results in.
1891DEFAULT_OFFLOAD_GSURI = CONFIG.get_config_value(
1892        'CROS', 'results_storage_server', default=None)
1893
1894# Default Moblab Ethernet Interface.
1895_MOBLAB_ETH_0 = 'eth0'
1896_MOBLAB_ETH_1 = 'eth1'
1897
1898# A list of subnets that requires dedicated devserver and drone in the same
1899# subnet. Each item is a tuple of (subnet_ip, mask_bits), e.g.,
1900# ('192.168.0.0', 24))
1901RESTRICTED_SUBNETS = []
1902
1903def _setup_restricted_subnets():
1904    restricted_subnets_list = CONFIG.get_config_value(
1905            'CROS', 'restricted_subnets', type=list, default=[])
1906    # TODO(dshi): Remove the code to split subnet with `:` after R51 is
1907    # off stable channel, and update shadow config to use `/` as
1908    # delimiter for consistency.
1909    for subnet in restricted_subnets_list:
1910        ip, mask_bits = subnet.split('/') if '/' in subnet \
1911                        else subnet.split(':')
1912        RESTRICTED_SUBNETS.append((ip, int(mask_bits)))
1913
1914_setup_restricted_subnets()
1915
1916# regex pattern for CLIENT/wireless_ssid_ config. For example, global config
1917# can have following config in CLIENT section to indicate that hosts in subnet
1918# 192.168.0.1/24 should use wireless ssid of `ssid_1`
1919# wireless_ssid_192.168.0.1/24: ssid_1
1920WIRELESS_SSID_PATTERN = 'wireless_ssid_(.*)/(\d+)'
1921
1922
1923def get_moblab_serial_number():
1924    """Gets a unique identifier for the moblab.
1925
1926    Serial number is the prefered identifier, use it if
1927    present, however fallback is the ethernet mac address.
1928    """
1929    for vpd_key in ['serial_number', 'ethernet_mac']:
1930      try:
1931          cmd_result = run('sudo vpd -g %s' % vpd_key)
1932          if cmd_result and cmd_result.stdout:
1933            return cmd_result.stdout
1934      except error.CmdError as e:
1935          logging.error(str(e))
1936          logging.info(vpd_key)
1937    return 'NoSerialNumber'
1938
1939
1940def ping(host, deadline=None, tries=None, timeout=60, user=None):
1941    """Attempt to ping |host|.
1942
1943    Shell out to 'ping' if host is an IPv4 addres or 'ping6' if host is an
1944    IPv6 address to try to reach |host| for |timeout| seconds.
1945    Returns exit code of ping.
1946
1947    Per 'man ping', if you specify BOTH |deadline| and |tries|, ping only
1948    returns 0 if we get responses to |tries| pings within |deadline| seconds.
1949
1950    Specifying |deadline| or |count| alone should return 0 as long as
1951    some packets receive responses.
1952
1953    Note that while this works with literal IPv6 addresses it will not work
1954    with hostnames that resolve to IPv6 only.
1955
1956    @param host: the host to ping.
1957    @param deadline: seconds within which |tries| pings must succeed.
1958    @param tries: number of pings to send.
1959    @param timeout: number of seconds after which to kill 'ping' command.
1960    @return exit code of ping command.
1961    """
1962    args = [host]
1963    cmd = 'ping6' if re.search(r':.*:', host) else 'ping'
1964
1965    if deadline:
1966        args.append('-w%d' % deadline)
1967    if tries:
1968        args.append('-c%d' % tries)
1969
1970    if user != None:
1971        args = [user, '-c', ' '.join([cmd] + args)]
1972        cmd = 'su'
1973
1974    return run(cmd, args=args, verbose=True,
1975                          ignore_status=True, timeout=timeout,
1976                          stdout_tee=TEE_TO_LOGS,
1977                          stderr_tee=TEE_TO_LOGS).exit_status
1978
1979
1980def host_is_in_lab_zone(hostname):
1981    """Check if the host is in the CLIENT.dns_zone.
1982
1983    @param hostname: The hostname to check.
1984    @returns True if hostname.dns_zone resolves, otherwise False.
1985    """
1986    host_parts = hostname.split('.')
1987    dns_zone = CONFIG.get_config_value('CLIENT', 'dns_zone', default=None)
1988    fqdn = '%s.%s' % (host_parts[0], dns_zone)
1989    logging.debug('Checking if host %s is in lab zone.', fqdn)
1990    try:
1991        socket.gethostbyname(fqdn)
1992        return True
1993    except socket.gaierror:
1994        return False
1995
1996
1997def in_moblab_ssp():
1998    """Detects if this execution is inside an SSP container on moblab."""
1999    config_is_moblab = CONFIG.get_config_value('SSP', 'is_moblab', type=bool,
2000                                               default=False)
2001    return is_in_container() and config_is_moblab
2002
2003
2004def get_chrome_version(job_views):
2005    """
2006    Retrieves the version of the chrome binary associated with a job.
2007
2008    When a test runs we query the chrome binary for it's version and drop
2009    that value into a client keyval. To retrieve the chrome version we get all
2010    the views associated with a test from the db, including those of the
2011    server and client jobs, and parse the version out of the first test view
2012    that has it. If we never ran a single test in the suite the job_views
2013    dictionary will not contain a chrome version.
2014
2015    This method cannot retrieve the chrome version from a dictionary that
2016    does not conform to the structure of an autotest tko view.
2017
2018    @param job_views: a list of a job's result views, as returned by
2019                      the get_detailed_test_views method in rpc_interface.
2020    @return: The chrome version string, or None if one can't be found.
2021    """
2022
2023    # Aborted jobs have no views.
2024    if not job_views:
2025        return None
2026
2027    for view in job_views:
2028        if (view.get('attributes')
2029            and constants.CHROME_VERSION in view['attributes'].keys()):
2030
2031            return view['attributes'].get(constants.CHROME_VERSION)
2032
2033    logging.warning('Could not find chrome version for failure.')
2034    return None
2035
2036
2037def get_moblab_id():
2038    """Gets the moblab random id.
2039
2040    The random id file is cached on disk. If it does not exist, a new file is
2041    created the first time.
2042
2043    @returns the moblab random id.
2044    """
2045    moblab_id_filepath = '/home/moblab/.moblab_id'
2046    try:
2047        if os.path.exists(moblab_id_filepath):
2048            with open(moblab_id_filepath, 'r') as moblab_id_file:
2049                random_id = moblab_id_file.read()
2050        else:
2051            random_id = uuid.uuid1().hex
2052            with open(moblab_id_filepath, 'w') as moblab_id_file:
2053                moblab_id_file.write('%s' % random_id)
2054    except IOError as e:
2055        # Possible race condition, another process has created the file.
2056        # Sleep a second to make sure the file gets closed.
2057        logging.info(e)
2058        time.sleep(1)
2059        with open(moblab_id_filepath, 'r') as moblab_id_file:
2060            random_id = moblab_id_file.read()
2061    return random_id
2062
2063
2064def get_offload_gsuri():
2065    """Return the GSURI to offload test results to.
2066
2067    For the normal use case this is the results_storage_server in the
2068    global_config.
2069
2070    However partners using Moblab will be offloading their results to a
2071    subdirectory of their image storage buckets. The subdirectory is
2072    determined by the MAC Address of the Moblab device.
2073
2074    @returns gsuri to offload test results to.
2075    """
2076    # For non-moblab, use results_storage_server or default.
2077    if not is_moblab():  # pylint: disable=undefined-variable
2078        return DEFAULT_OFFLOAD_GSURI
2079
2080    # For moblab, use results_storage_server or image_storage_server as bucket
2081    # name and mac-address/moblab_id as path.
2082    gsuri = DEFAULT_OFFLOAD_GSURI
2083    if not gsuri:
2084        gsuri = "%sresults/" % CONFIG.get_config_value('CROS',
2085                                                       'image_storage_server')
2086
2087    return '%s%s/%s/' % (gsuri, get_moblab_serial_number(), get_moblab_id())
2088
2089
2090# TODO(petermayo): crosbug.com/31826 Share this with _GsUpload in
2091# //chromite.git/buildbot/prebuilt.py somewhere/somehow
2092def gs_upload(local_file, remote_file, acl, result_dir=None,
2093              transfer_timeout=300, acl_timeout=300):
2094    """Upload to GS bucket.
2095
2096    @param local_file: Local file to upload
2097    @param remote_file: Remote location to upload the local_file to.
2098    @param acl: name or file used for controlling access to the uploaded
2099                file.
2100    @param result_dir: Result directory if you want to add tracing to the
2101                       upload.
2102    @param transfer_timeout: Timeout for this upload call.
2103    @param acl_timeout: Timeout for the acl call needed to confirm that
2104                        the uploader has permissions to execute the upload.
2105
2106    @raise CmdError: the exit code of the gsutil call was not 0.
2107
2108    @returns True/False - depending on if the upload succeeded or failed.
2109    """
2110    # https://developers.google.com/storage/docs/accesscontrol#extension
2111    CANNED_ACLS = ['project-private', 'private', 'public-read',
2112                   'public-read-write', 'authenticated-read',
2113                   'bucket-owner-read', 'bucket-owner-full-control']
2114    _GSUTIL_BIN = 'gsutil'
2115    acl_cmd = None
2116    if acl in CANNED_ACLS:
2117        cmd = '%s cp -a %s %s %s' % (_GSUTIL_BIN, acl, local_file, remote_file)
2118    else:
2119        # For private uploads we assume that the overlay board is set up
2120        # properly and a googlestore_acl.xml is present, if not this script
2121        # errors
2122        cmd = '%s cp -a private %s %s' % (_GSUTIL_BIN, local_file, remote_file)
2123        if not os.path.exists(acl):
2124            logging.error('Unable to find ACL File %s.', acl)
2125            return False
2126        acl_cmd = '%s setacl %s %s' % (_GSUTIL_BIN, acl, remote_file)
2127    if not result_dir:
2128        run(cmd, timeout=transfer_timeout, verbose=True)
2129        if acl_cmd:
2130            run(acl_cmd, timeout=acl_timeout, verbose=True)
2131        return True
2132    with open(os.path.join(result_dir, 'tracing'), 'w') as ftrace:
2133        ftrace.write('Preamble\n')
2134        run(cmd, timeout=transfer_timeout, verbose=True,
2135                       stdout_tee=ftrace, stderr_tee=ftrace)
2136        if acl_cmd:
2137            ftrace.write('\nACL setting\n')
2138            # Apply the passed in ACL xml file to the uploaded object.
2139            run(acl_cmd, timeout=acl_timeout, verbose=True,
2140                           stdout_tee=ftrace, stderr_tee=ftrace)
2141        ftrace.write('Postamble\n')
2142        return True
2143
2144
2145def gs_ls(uri_pattern):
2146    """Returns a list of URIs that match a given pattern.
2147
2148    @param uri_pattern: a GS URI pattern, may contain wildcards
2149
2150    @return A list of URIs matching the given pattern.
2151
2152    @raise CmdError: the gsutil command failed.
2153
2154    """
2155    gs_cmd = ' '.join(['gsutil', 'ls', uri_pattern])
2156    result = system_output(gs_cmd).splitlines()
2157    return [path.rstrip() for path in result if path]
2158
2159
2160def nuke_pids(pid_list, signal_queue=None):
2161    """
2162    Given a list of pid's, kill them via an esclating series of signals.
2163
2164    @param pid_list: List of PID's to kill.
2165    @param signal_queue: Queue of signals to send the PID's to terminate them.
2166
2167    @return: A mapping of the signal name to the number of processes it
2168        was sent to.
2169    """
2170    if signal_queue is None:
2171        signal_queue = [signal.SIGTERM, signal.SIGKILL]
2172    sig_count = {}
2173    # Though this is slightly hacky it beats hardcoding names anyday.
2174    sig_names = dict((k, v) for v, k in signal.__dict__.iteritems()
2175                     if v.startswith('SIG'))
2176    for sig in signal_queue:
2177        logging.debug('Sending signal %s to the following pids:', sig)
2178        sig_count[sig_names.get(sig, 'unknown_signal')] = len(pid_list)
2179        for pid in pid_list:
2180            logging.debug('Pid %d', pid)
2181            try:
2182                os.kill(pid, sig)
2183            except OSError:
2184                # The process may have died from a previous signal before we
2185                # could kill it.
2186                pass
2187        if sig == signal.SIGKILL:
2188            return sig_count
2189        pid_list = [pid for pid in pid_list if pid_is_alive(pid)]
2190        if not pid_list:
2191            break
2192        time.sleep(CHECK_PID_IS_ALIVE_TIMEOUT)
2193    failed_list = []
2194    for pid in pid_list:
2195        if pid_is_alive(pid):
2196            failed_list.append('Could not kill %d for process name: %s.' % pid,
2197                               get_process_name(pid))
2198    if failed_list:
2199        raise error.AutoservRunError('Following errors occured: %s' %
2200                                     failed_list, None)
2201    return sig_count
2202
2203
2204def externalize_host(host):
2205    """Returns an externally accessible host name.
2206
2207    @param host: a host name or address (string)
2208
2209    @return An externally visible host name or address
2210
2211    """
2212    return socket.gethostname() if host in _LOCAL_HOST_LIST else host
2213
2214
2215def urlopen_socket_timeout(url, data=None, timeout=5):
2216    """
2217    Wrapper to urllib2.urlopen with a socket timeout.
2218
2219    This method will convert all socket timeouts to
2220    TimeoutExceptions, so we can use it in conjunction
2221    with the rpc retry decorator and continue to handle
2222    other URLErrors as we see fit.
2223
2224    @param url: The url to open.
2225    @param data: The data to send to the url (eg: the urlencoded dictionary
2226                 used with a POST call).
2227    @param timeout: The timeout for this urlopen call.
2228
2229    @return: The response of the urlopen call.
2230
2231    @raises: error.TimeoutException when a socket timeout occurs.
2232             urllib2.URLError for errors that not caused by timeout.
2233             urllib2.HTTPError for errors like 404 url not found.
2234    """
2235    old_timeout = socket.getdefaulttimeout()
2236    socket.setdefaulttimeout(timeout)
2237    try:
2238        return urllib2.urlopen(url, data=data)
2239    except urllib2.URLError as e:
2240        if type(e.reason) is socket.timeout:
2241            raise error.TimeoutException(str(e))
2242        raise
2243    finally:
2244        socket.setdefaulttimeout(old_timeout)
2245
2246
2247def parse_chrome_version(version_string):
2248    """
2249    Parse a chrome version string and return version and milestone.
2250
2251    Given a chrome version of the form "W.X.Y.Z", return "W.X.Y.Z" as
2252    the version and "W" as the milestone.
2253
2254    @param version_string: Chrome version string.
2255    @return: a tuple (chrome_version, milestone). If the incoming version
2256             string is not of the form "W.X.Y.Z", chrome_version will
2257             be set to the incoming "version_string" argument and the
2258             milestone will be set to the empty string.
2259    """
2260    match = re.search('(\d+)\.\d+\.\d+\.\d+', version_string)
2261    ver = match.group(0) if match else version_string
2262    milestone = match.group(1) if match else ''
2263    return ver, milestone
2264
2265
2266def is_localhost(server):
2267    """Check if server is equivalent to localhost.
2268
2269    @param server: Name of the server to check.
2270
2271    @return: True if given server is equivalent to localhost.
2272
2273    @raise socket.gaierror: If server name failed to be resolved.
2274    """
2275    if server in _LOCAL_HOST_LIST:
2276        return True
2277    try:
2278        return (socket.gethostbyname(socket.gethostname()) ==
2279                socket.gethostbyname(server))
2280    except socket.gaierror:
2281        logging.error('Failed to resolve server name %s.', server)
2282        return False
2283
2284
2285def get_function_arg_value(func, arg_name, args, kwargs):
2286    """Get the value of the given argument for the function.
2287
2288    @param func: Function being called with given arguments.
2289    @param arg_name: Name of the argument to look for value.
2290    @param args: arguments for function to be called.
2291    @param kwargs: keyword arguments for function to be called.
2292
2293    @return: The value of the given argument for the function.
2294
2295    @raise ValueError: If the argument is not listed function arguemnts.
2296    @raise KeyError: If no value is found for the given argument.
2297    """
2298    if arg_name in kwargs:
2299        return kwargs[arg_name]
2300
2301    argspec = inspect.getargspec(func)
2302    index = argspec.args.index(arg_name)
2303    try:
2304        return args[index]
2305    except IndexError:
2306        try:
2307            # The argument can use a default value. Reverse the default value
2308            # so argument with default value can be counted from the last to
2309            # the first.
2310            return argspec.defaults[::-1][len(argspec.args) - index - 1]
2311        except IndexError:
2312            raise KeyError('Argument %s is not given a value. argspec: %s, '
2313                           'args:%s, kwargs:%s' %
2314                           (arg_name, argspec, args, kwargs))
2315
2316
2317def has_systemd():
2318    """Check if the host is running systemd.
2319
2320    @return: True if the host uses systemd, otherwise returns False.
2321    """
2322    return os.path.basename(os.readlink('/proc/1/exe')) == 'systemd'
2323
2324
2325def get_real_user():
2326    """Get the real user that runs the script.
2327
2328    The function check environment variable SUDO_USER for the user if the
2329    script is run with sudo. Otherwise, it returns the value of environment
2330    variable USER.
2331
2332    @return: The user name that runs the script.
2333
2334    """
2335    user = os.environ.get('SUDO_USER')
2336    if not user:
2337        user = os.environ.get('USER')
2338    return user
2339
2340
2341def get_service_pid(service_name):
2342    """Return pid of service.
2343
2344    @param service_name: string name of service.
2345
2346    @return: pid or 0 if service is not running.
2347    """
2348    if has_systemd():
2349        # systemctl show prints 'MainPID=0' if the service is not running.
2350        cmd_result = run('systemctl show -p MainPID %s' %
2351                                    service_name, ignore_status=True)
2352        return int(cmd_result.stdout.split('=')[1])
2353    else:
2354        cmd_result = run('status %s' % service_name,
2355                                        ignore_status=True)
2356        if 'start/running' in cmd_result.stdout:
2357            return int(cmd_result.stdout.split()[3])
2358        return 0
2359
2360
2361def control_service(service_name, action='start', ignore_status=True):
2362    """Controls a service. It can be used to start, stop or restart
2363    a service.
2364
2365    @param service_name: string service to be restarted.
2366
2367    @param action: string choice of action to control command.
2368
2369    @param ignore_status: boolean ignore if system command fails.
2370
2371    @return: status code of the executed command.
2372    """
2373    if action not in ('start', 'stop', 'restart'):
2374        raise ValueError('Unknown action supplied as parameter.')
2375
2376    control_cmd = action + ' ' + service_name
2377    if has_systemd():
2378        control_cmd = 'systemctl ' + control_cmd
2379    return system(control_cmd, ignore_status=ignore_status)
2380
2381
2382def restart_service(service_name, ignore_status=True):
2383    """Restarts a service
2384
2385    @param service_name: string service to be restarted.
2386
2387    @param ignore_status: boolean ignore if system command fails.
2388
2389    @return: status code of the executed command.
2390    """
2391    return control_service(service_name, action='restart',
2392                           ignore_status=ignore_status)
2393
2394
2395def start_service(service_name, ignore_status=True):
2396    """Starts a service
2397
2398    @param service_name: string service to be started.
2399
2400    @param ignore_status: boolean ignore if system command fails.
2401
2402    @return: status code of the executed command.
2403    """
2404    return control_service(service_name, action='start',
2405                           ignore_status=ignore_status)
2406
2407
2408def stop_service(service_name, ignore_status=True):
2409    """Stops a service
2410
2411    @param service_name: string service to be stopped.
2412
2413    @param ignore_status: boolean ignore if system command fails.
2414
2415    @return: status code of the executed command.
2416    """
2417    return control_service(service_name, action='stop',
2418                           ignore_status=ignore_status)
2419
2420
2421def sudo_require_password():
2422    """Test if the process can run sudo command without using password.
2423
2424    @return: True if the process needs password to run sudo command.
2425
2426    """
2427    try:
2428        run('sudo -n true')
2429        return False
2430    except error.CmdError:
2431        logging.warn('sudo command requires password.')
2432        return True
2433
2434
2435def is_in_container():
2436    """Check if the process is running inside a container.
2437
2438    @return: True if the process is running inside a container, otherwise False.
2439    """
2440    result = run('grep -q "/lxc/" /proc/1/cgroup',
2441                            verbose=False, ignore_status=True)
2442    if result.exit_status == 0:
2443        return True
2444
2445    # Check "container" environment variable for lxd/lxc containers.
2446    if os.environ.get('container') == 'lxc':
2447        return True
2448
2449    return False
2450
2451
2452def is_flash_installed():
2453    """
2454    The Adobe Flash binary is only distributed with internal builds.
2455    """
2456    return (os.path.exists('/opt/google/chrome/pepper/libpepflashplayer.so')
2457        and os.path.exists('/opt/google/chrome/pepper/pepper-flash.info'))
2458
2459
2460def verify_flash_installed():
2461    """
2462    The Adobe Flash binary is only distributed with internal builds.
2463    Warn users of public builds of the extra dependency.
2464    """
2465    if not is_flash_installed():
2466        raise error.TestNAError('No Adobe Flash binary installed.')
2467
2468
2469def is_in_same_subnet(ip_1, ip_2, mask_bits=24):
2470    """Check if two IP addresses are in the same subnet with given mask bits.
2471
2472    The two IP addresses are string of IPv4, e.g., '192.168.0.3'.
2473
2474    @param ip_1: First IP address to compare.
2475    @param ip_2: Second IP address to compare.
2476    @param mask_bits: Number of mask bits for subnet comparison. Default to 24.
2477
2478    @return: True if the two IP addresses are in the same subnet.
2479
2480    """
2481    mask = ((2L<<mask_bits-1) -1)<<(32-mask_bits)
2482    ip_1_num = struct.unpack('!I', socket.inet_aton(ip_1))[0]
2483    ip_2_num = struct.unpack('!I', socket.inet_aton(ip_2))[0]
2484    return ip_1_num & mask == ip_2_num & mask
2485
2486
2487def get_ip_address(hostname):
2488    """Get the IP address of given hostname.
2489
2490    @param hostname: Hostname of a DUT.
2491
2492    @return: The IP address of given hostname. None if failed to resolve
2493             hostname.
2494    """
2495    try:
2496        if hostname:
2497            return socket.gethostbyname(hostname)
2498    except socket.gaierror as e:
2499        logging.error('Failed to get IP address of %s, error: %s.', hostname, e)
2500
2501
2502def get_servers_in_same_subnet(host_ip, mask_bits, servers=None,
2503                               server_ip_map=None):
2504    """Get the servers in the same subnet of the given host ip.
2505
2506    @param host_ip: The IP address of a dut to look for devserver.
2507    @param mask_bits: Number of mask bits.
2508    @param servers: A list of servers to be filtered by subnet specified by
2509                    host_ip and mask_bits.
2510    @param server_ip_map: A map between the server name and its IP address.
2511            The map can be pre-built for better performance, e.g., when
2512            allocating a drone for an agent task.
2513
2514    @return: A list of servers in the same subnet of the given host ip.
2515
2516    """
2517    matched_servers = []
2518    if not servers and not server_ip_map:
2519        raise ValueError('Either `servers` or `server_ip_map` must be given.')
2520    if not servers:
2521        servers = server_ip_map.keys()
2522    # Make sure server_ip_map is an empty dict if it's not set.
2523    if not server_ip_map:
2524        server_ip_map = {}
2525    for server in servers:
2526        server_ip = server_ip_map.get(server, get_ip_address(server))
2527        if server_ip and is_in_same_subnet(server_ip, host_ip, mask_bits):
2528            matched_servers.append(server)
2529    return matched_servers
2530
2531
2532def get_restricted_subnet(hostname, restricted_subnets=None):
2533    """Get the restricted subnet of given hostname.
2534
2535    @param hostname: Name of the host to look for matched restricted subnet.
2536    @param restricted_subnets: A list of restricted subnets, default is set to
2537            RESTRICTED_SUBNETS.
2538
2539    @return: A tuple of (subnet_ip, mask_bits), which defines a restricted
2540             subnet.
2541    """
2542    if restricted_subnets is None:
2543        restricted_subnets=RESTRICTED_SUBNETS
2544    host_ip = get_ip_address(hostname)
2545    if not host_ip:
2546        return
2547    for subnet_ip, mask_bits in restricted_subnets:
2548        if is_in_same_subnet(subnet_ip, host_ip, mask_bits):
2549            return subnet_ip, mask_bits
2550
2551
2552def get_wireless_ssid(hostname):
2553    """Get the wireless ssid based on given hostname.
2554
2555    The method tries to locate the wireless ssid in the same subnet of given
2556    hostname first. If none is found, it returns the default setting in
2557    CLIENT/wireless_ssid.
2558
2559    @param hostname: Hostname of the test device.
2560
2561    @return: wireless ssid for the test device.
2562    """
2563    default_ssid = CONFIG.get_config_value('CLIENT', 'wireless_ssid',
2564                                           default=None)
2565    host_ip = get_ip_address(hostname)
2566    if not host_ip:
2567        return default_ssid
2568
2569    # Get all wireless ssid in the global config.
2570    ssids = CONFIG.get_config_value_regex('CLIENT', WIRELESS_SSID_PATTERN)
2571
2572    # There could be multiple subnet matches, pick the one with most strict
2573    # match, i.e., the one with highest maskbit.
2574    matched_ssid = default_ssid
2575    matched_maskbit = -1
2576    for key, value in ssids.items():
2577        # The config key filtered by regex WIRELESS_SSID_PATTERN has a format of
2578        # wireless_ssid_[subnet_ip]/[maskbit], for example:
2579        # wireless_ssid_192.168.0.1/24
2580        # Following line extract the subnet ip and mask bit from the key name.
2581        match = re.match(WIRELESS_SSID_PATTERN, key)
2582        subnet_ip, maskbit = match.groups()
2583        maskbit = int(maskbit)
2584        if (is_in_same_subnet(subnet_ip, host_ip, maskbit) and
2585            maskbit > matched_maskbit):
2586            matched_ssid = value
2587            matched_maskbit = maskbit
2588    return matched_ssid
2589
2590
2591def parse_launch_control_build(build_name):
2592    """Get branch, target, build_id from the given Launch Control build_name.
2593
2594    @param build_name: Name of a Launch Control build, should be formated as
2595                       branch/target/build_id
2596
2597    @return: Tuple of branch, target, build_id
2598    @raise ValueError: If the build_name is not correctly formated.
2599    """
2600    branch, target, build_id = build_name.split('/')
2601    return branch, target, build_id
2602
2603
2604def parse_android_target(target):
2605    """Get board and build type from the given target.
2606
2607    @param target: Name of an Android build target, e.g., shamu-eng.
2608
2609    @return: Tuple of board, build_type
2610    @raise ValueError: If the target is not correctly formated.
2611    """
2612    board, build_type = target.split('-')
2613    return board, build_type
2614
2615
2616def parse_launch_control_target(target):
2617    """Parse the build target and type from a Launch Control target.
2618
2619    The Launch Control target has the format of build_target-build_type, e.g.,
2620    shamu-eng or dragonboard-userdebug. This method extracts the build target
2621    and type from the target name.
2622
2623    @param target: Name of a Launch Control target, e.g., shamu-eng.
2624
2625    @return: (build_target, build_type), e.g., ('shamu', 'userdebug')
2626    """
2627    match = re.match('(?P<build_target>.+)-(?P<build_type>[^-]+)', target)
2628    if match:
2629        return match.group('build_target'), match.group('build_type')
2630    else:
2631        return None, None
2632
2633
2634def is_launch_control_build(build):
2635    """Check if a given build is a Launch Control build.
2636
2637    @param build: Name of a build, e.g.,
2638                  ChromeOS build: daisy-release/R50-1234.0.0
2639                  Launch Control build: git_mnc_release/shamu-eng
2640
2641    @return: True if the build name matches the pattern of a Launch Control
2642             build, False otherwise.
2643    """
2644    try:
2645        _, target, _ = parse_launch_control_build(build)
2646        build_target, _ = parse_launch_control_target(target)
2647        if build_target:
2648            return True
2649    except ValueError:
2650        # parse_launch_control_build or parse_launch_control_target failed.
2651        pass
2652    return False
2653
2654
2655def which(exec_file):
2656    """Finds an executable file.
2657
2658    If the file name contains a path component, it is checked as-is.
2659    Otherwise, we check with each of the path components found in the system
2660    PATH prepended. This behavior is similar to the 'which' command-line tool.
2661
2662    @param exec_file: Name or path to desired executable.
2663
2664    @return: An actual path to the executable, or None if not found.
2665    """
2666    if os.path.dirname(exec_file):
2667        return exec_file if os.access(exec_file, os.X_OK) else None
2668    sys_path = os.environ.get('PATH')
2669    prefix_list = sys_path.split(os.pathsep) if sys_path else []
2670    for prefix in prefix_list:
2671        path = os.path.join(prefix, exec_file)
2672        if os.access(path, os.X_OK):
2673            return path
2674
2675
2676class TimeoutError(error.TestError):
2677    """Error raised when poll_for_condition() failed to poll within time.
2678
2679    It may embed a reason (either a string or an exception object) so that
2680    the caller of poll_for_condition() can handle failure better.
2681    """
2682
2683    def __init__(self, message=None, reason=None):
2684        """Constructor.
2685
2686        It supports three invocations:
2687        1) TimeoutError()
2688        2) TimeoutError(message): with customized message.
2689        3) TimeoutError(message, reason): with message and reason for timeout.
2690        """
2691        self.reason = reason
2692        if self.reason:
2693            reason_str = 'Reason: ' + repr(self.reason)
2694            if message:
2695                message += '. ' + reason_str
2696            else:
2697                message = reason_str
2698
2699        if message:
2700            super(TimeoutError, self).__init__(message)
2701        else:
2702            super(TimeoutError, self).__init__()
2703
2704
2705class Timer(object):
2706    """A synchronous timer to evaluate if timout is reached.
2707
2708    Usage:
2709      timer = Timer(timeout_sec)
2710      while timer.sleep(sleep_interval):
2711        # do something...
2712    """
2713    def __init__(self, timeout):
2714        """Constructor.
2715
2716        Note that timer won't start until next() is called.
2717
2718        @param timeout: timer timeout in seconds.
2719        """
2720        self.timeout = timeout
2721        self.deadline = 0
2722
2723    def sleep(self, interval):
2724        """Checks if it has sufficient time to sleep; sleeps if so.
2725
2726        It blocks for |interval| seconds if it has time to sleep.
2727        If timer is not ticked yet, kicks it off and returns True without
2728        sleep.
2729
2730        @param interval: sleep interval in seconds.
2731        @return True if it has sleeped or just kicked off the timer. False
2732                otherwise.
2733        """
2734        now = time.time()
2735        if not self.deadline:
2736            self.deadline = now + self.timeout
2737            return True
2738        if now + interval < self.deadline:
2739            time.sleep(interval)
2740            return True
2741        return False
2742
2743
2744def poll_for_condition(condition,
2745                       exception=None,
2746                       timeout=10,
2747                       sleep_interval=0.1,
2748                       desc=None):
2749    """Polls until a condition is evaluated to true.
2750
2751    @param condition: function taking no args and returning anything that will
2752                      evaluate to True in a conditional check
2753    @param exception: exception to throw if condition doesn't evaluate to true
2754    @param timeout: maximum number of seconds to wait
2755    @param sleep_interval: time to sleep between polls
2756    @param desc: description of default TimeoutError used if 'exception' is
2757                 None
2758
2759    @return The evaluated value that caused the poll loop to terminate.
2760
2761    @raise 'exception' arg if supplied; TimeoutError otherwise
2762    """
2763    start_time = time.time()
2764    while True:
2765        value = condition()
2766        if value:
2767            return value
2768        if time.time() + sleep_interval - start_time > timeout:
2769            if exception:
2770                logging.error('Will raise error %r due to unexpected return: '
2771                              '%r', exception, value)
2772                raise exception # pylint: disable=raising-bad-type
2773
2774            if desc:
2775                desc = 'Timed out waiting for condition: ' + desc
2776            else:
2777                desc = 'Timed out waiting for unnamed condition'
2778            logging.error(desc)
2779            raise TimeoutError(message=desc)
2780
2781        time.sleep(sleep_interval)
2782
2783
2784def poll_for_condition_ex(condition, timeout=10, sleep_interval=0.1, desc=None):
2785    """Polls until a condition is evaluated to true or until timeout.
2786
2787    Similiar to poll_for_condition, except that it handles exceptions
2788    condition() raises. If timeout is not reached, the exception is dropped and
2789    poll for condition after a sleep; otherwise, the exception is embedded into
2790    TimeoutError to raise.
2791
2792    @param condition: function taking no args and returning anything that will
2793                      evaluate to True in a conditional check
2794    @param timeout: maximum number of seconds to wait
2795    @param sleep_interval: time to sleep between polls
2796    @param desc: description of the condition
2797
2798    @return The evaluated value that caused the poll loop to terminate.
2799
2800    @raise TimeoutError. If condition() raised exception, it is embedded in
2801           raised TimeoutError.
2802    """
2803    timer = Timer(timeout)
2804    while timer.sleep(sleep_interval):
2805        reason = None
2806        try:
2807            value = condition()
2808            if value:
2809                return value
2810        except BaseException as e:
2811            reason = e
2812
2813    if desc is None:
2814        desc = 'unamed condition'
2815    if reason is None:
2816        reason = 'condition evaluted as false'
2817    to_raise = TimeoutError(message='Timed out waiting for ' + desc,
2818                            reason=reason)
2819    logging.error(str(to_raise))
2820    raise to_raise
2821
2822
2823def threaded_return(function):
2824    """
2825    Decorator to add to a function to get that function to return a thread
2826    object, but with the added benefit of storing its return value.
2827
2828    @param function: function object to be run in the thread
2829
2830    @return a threading.Thread object, that has already been started, is
2831            recording its result, and can be completed and its result
2832            fetched by calling .finish()
2833    """
2834    def wrapped_t(queue, *args, **kwargs):
2835        """
2836        Calls the decorated function as normal, but appends the output into
2837        the passed-in threadsafe queue.
2838        """
2839        ret = function(*args, **kwargs)
2840        queue.put(ret)
2841
2842    def wrapped_finish(threaded_object):
2843        """
2844        Provides a utility to this thread object, getting its result while
2845        simultaneously joining the thread.
2846        """
2847        ret = threaded_object.get()
2848        threaded_object.join()
2849        return ret
2850
2851    def wrapper(*args, **kwargs):
2852        """
2853        Creates the queue and starts the thread, then assigns extra attributes
2854        to the thread to give it result-storing capability.
2855        """
2856        q = Queue.Queue()
2857        t = threading.Thread(target=wrapped_t, args=(q,) + args, kwargs=kwargs)
2858        t.start()
2859        t.result_queue = q
2860        t.get = t.result_queue.get
2861        t.finish = lambda: wrapped_finish(t)
2862        return t
2863
2864    # for the decorator
2865    return wrapper
2866
2867
2868@threaded_return
2869def background_sample_until_condition(
2870        function,
2871        condition=lambda: True,
2872        timeout=10,
2873        sleep_interval=1):
2874    """
2875    Records the value of the function until the condition is False or the
2876    timeout is reached. Runs as a background thread, so it's nonblocking.
2877    Usage might look something like:
2878
2879    def function():
2880        return get_value()
2881    def condition():
2882        return self._keep_sampling
2883
2884    # main thread
2885    sample_thread = utils.background_sample_until_condition(
2886        function=function,condition=condition)
2887    # do other work
2888    # ...
2889    self._keep_sampling = False
2890    # blocking call to get result and join the thread
2891    result = sample_thread.finish()
2892
2893    @param function: function object, 0 args, to be continually polled
2894    @param condition: function object, 0 args, to say when to stop polling
2895    @param timeout: maximum number of seconds to wait
2896    @param number of seconds to wait in between polls
2897
2898    @return a thread object that has already been started and is running in
2899            the background, whose run must be stopped with .finish(), which
2900            also returns a list of the results from the sample function
2901    """
2902    log = []
2903
2904    end_time = datetime.datetime.now() + datetime.timedelta(
2905            seconds = timeout + sleep_interval)
2906
2907    while condition() and datetime.datetime.now() < end_time:
2908        log.append(function())
2909        time.sleep(sleep_interval)
2910    return log
2911
2912
2913class metrics_mock(metrics_mock_class.mock_class_base):
2914    """mock class for metrics in case chromite is not installed."""
2915    pass
2916
2917
2918MountInfo = collections.namedtuple('MountInfo', ['root', 'mount_point', 'tags'])
2919
2920
2921def get_mount_info(process='self', mount_point=None):
2922    """Retrieves information about currently mounted file systems.
2923
2924    @param mount_point: (optional) The mount point (a path).  If this is
2925                        provided, only information about the given mount point
2926                        is returned.  If this is omitted, info about all mount
2927                        points is returned.
2928    @param process: (optional) The process id (or the string 'self') of the
2929                    process whose mountinfo will be obtained.  If this is
2930                    omitted, info about the current process is returned.
2931
2932    @return A generator yielding one MountInfo object for each relevant mount
2933            found in /proc/PID/mountinfo.
2934    """
2935    with open('/proc/{}/mountinfo'.format(process)) as f:
2936        for line in f.readlines():
2937            # These lines are formatted according to the proc(5) manpage.
2938            # Sample line:
2939            # 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root \
2940            #     rw,errors=continue
2941            # Fields (descriptions omitted for fields we don't care about)
2942            # 3: the root of the mount.
2943            # 4: the mount point.
2944            # 5: mount options.
2945            # 6: tags.  There can be more than one of these.  This is where
2946            #    shared mounts are indicated.
2947            # 7: a dash separator marking the end of the tags.
2948            mountinfo = line.split()
2949            if mount_point is None or mountinfo[4] == mount_point:
2950                tags = []
2951                for field in mountinfo[6:]:
2952                    if field == '-':
2953                        break
2954                    tags.append(field.split(':')[0])
2955                yield MountInfo(root = mountinfo[3],
2956                                mount_point = mountinfo[4],
2957                                tags = tags)
2958
2959
2960# Appended suffix for chart tablet naming convention in test lab
2961CHART_ADDRESS_SUFFIX = '-tablet'
2962
2963
2964def get_lab_chart_address(hostname):
2965    """Convert lab DUT hostname to address of camera box chart tablet"""
2966    return hostname + CHART_ADDRESS_SUFFIX if is_in_container() else None
2967