• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2017 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""
6Convenience functions for use by tests or whomever.
7
8There's no really good way to do this, as this isn't a class we can do
9inheritance with, just a collection of static methods.
10"""
11
12# pylint: disable=missing-docstring
13
14import StringIO
15import collections
16import datetime
17import errno
18import inspect
19import itertools
20import logging
21import os
22import pickle
23import Queue
24import random
25import re
26import resource
27import select
28import shutil
29import signal
30import socket
31import string
32import struct
33import subprocess
34import textwrap
35import threading
36import time
37import urllib2
38import urlparse
39import uuid
40import warnings
41
42try:
43    import hashlib
44except ImportError:
45    import md5
46    import sha
47
48import common
49
50from autotest_lib.client.common_lib import env
51from autotest_lib.client.common_lib import error
52from autotest_lib.client.common_lib import global_config
53from autotest_lib.client.common_lib import logging_manager
54from autotest_lib.client.common_lib import metrics_mock_class
55from autotest_lib.client.cros import constants
56
57# pylint: disable=wildcard-import
58from autotest_lib.client.common_lib.lsbrelease_utils import *
59
60
61def deprecated(func):
62    """This is a decorator which can be used to mark functions as deprecated.
63    It will result in a warning being emmitted when the function is used."""
64    def new_func(*args, **dargs):
65        warnings.warn("Call to deprecated function %s." % func.__name__,
66                      category=DeprecationWarning)
67        return func(*args, **dargs)
68    new_func.__name__ = func.__name__
69    new_func.__doc__ = func.__doc__
70    new_func.__dict__.update(func.__dict__)
71    return new_func
72
73
74class _NullStream(object):
75    def write(self, data):
76        pass
77
78
79    def flush(self):
80        pass
81
82
83TEE_TO_LOGS = object()
84_the_null_stream = _NullStream()
85
86DEVNULL = object()
87
88DEFAULT_STDOUT_LEVEL = logging.DEBUG
89DEFAULT_STDERR_LEVEL = logging.ERROR
90
91# prefixes for logging stdout/stderr of commands
92STDOUT_PREFIX = '[stdout] '
93STDERR_PREFIX = '[stderr] '
94
95# safe characters for the shell (do not need quoting)
96SHELL_QUOTING_WHITELIST = frozenset(string.ascii_letters +
97                                    string.digits +
98                                    '_-+=')
99
100def custom_warning_handler(message, category, filename, lineno, file=None,
101                           line=None):
102    """Custom handler to log at the WARNING error level. Ignores |file|."""
103    logging.warning(warnings.formatwarning(message, category, filename, lineno,
104                                           line))
105
106warnings.showwarning = custom_warning_handler
107
108def get_stream_tee_file(stream, level, prefix=''):
109    if stream is None:
110        return _the_null_stream
111    if stream is DEVNULL:
112        return None
113    if stream is TEE_TO_LOGS:
114        return logging_manager.LoggingFile(level=level, prefix=prefix)
115    return stream
116
117
118def _join_with_nickname(base_string, nickname):
119    if nickname:
120        return '%s BgJob "%s" ' % (base_string, nickname)
121    return base_string
122
123
124# TODO: Cleanup and possibly eliminate |unjoinable|, which is only used in our
125# master-ssh connection process, while fixing underlying
126# semantics problem in BgJob. See crbug.com/279312
127class BgJob(object):
128    def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
129                 stdin=None, stdout_level=DEFAULT_STDOUT_LEVEL,
130                 stderr_level=DEFAULT_STDERR_LEVEL, nickname=None,
131                 unjoinable=False, env=None, extra_paths=None):
132        """Create and start a new BgJob.
133
134        This constructor creates a new BgJob, and uses Popen to start a new
135        subprocess with given command. It returns without blocking on execution
136        of the subprocess.
137
138        After starting a new BgJob, use output_prepare to connect the process's
139        stdout and stderr pipes to the stream of your choice.
140
141        When the job is running, the jobs's output streams are only read from
142        when process_output is called.
143
144        @param command: command to be executed in new subprocess. May be either
145                        a list, or a string (in which case Popen will be called
146                        with shell=True)
147        @param stdout_tee: (Optional) a file like object, TEE_TO_LOGS or
148                           DEVNULL.
149                           If not given, after finishing the process, the
150                           stdout data from subprocess is available in
151                           result.stdout.
152                           If a file like object is given, in process_output(),
153                           the stdout data from the subprocess will be handled
154                           by the given file like object.
155                           If TEE_TO_LOGS is given, in process_output(), the
156                           stdout data from the subprocess will be handled by
157                           the standard logging_manager.
158                           If DEVNULL is given, the stdout of the subprocess
159                           will be just discarded. In addition, even after
160                           cleanup(), result.stdout will be just an empty
161                           string (unlike the case where stdout_tee is not
162                           given).
163        @param stderr_tee: Same as stdout_tee, but for stderr.
164        @param verbose: Boolean, make BgJob logging more verbose.
165        @param stdin: Stream object, will be passed to Popen as the new
166                      process's stdin.
167        @param stdout_level: A logging level value. If stdout_tee was set to
168                             TEE_TO_LOGS, sets the level that tee'd
169                             stdout output will be logged at. Ignored
170                             otherwise.
171        @param stderr_level: Same as stdout_level, but for stderr.
172        @param nickname: Optional string, to be included in logging messages
173        @param unjoinable: Optional bool, default False.
174                           This should be True for BgJobs running in background
175                           and will never be joined with join_bg_jobs(), such
176                           as the master-ssh connection. Instead, it is
177                           caller's responsibility to terminate the subprocess
178                           correctly, e.g. by calling nuke_subprocess().
179                           This will lead that, calling join_bg_jobs(),
180                           process_output() or cleanup() will result in an
181                           InvalidBgJobCall exception.
182                           Also, |stdout_tee| and |stderr_tee| must be set to
183                           DEVNULL, otherwise InvalidBgJobCall is raised.
184        @param env: Dict containing environment variables used in subprocess.
185        @param extra_paths: Optional string list, to be prepended to the PATH
186                            env variable in env (or os.environ dict if env is
187                            not specified).
188        """
189        self.command = command
190        self.unjoinable = unjoinable
191        if (unjoinable and (stdout_tee != DEVNULL or stderr_tee != DEVNULL)):
192            raise error.InvalidBgJobCall(
193                'stdout_tee and stderr_tee must be DEVNULL for '
194                'unjoinable BgJob')
195        self._stdout_tee = get_stream_tee_file(
196                stdout_tee, stdout_level,
197                prefix=_join_with_nickname(STDOUT_PREFIX, nickname))
198        self._stderr_tee = get_stream_tee_file(
199                stderr_tee, stderr_level,
200                prefix=_join_with_nickname(STDERR_PREFIX, nickname))
201        self.result = CmdResult(command)
202
203        # allow for easy stdin input by string, we'll let subprocess create
204        # a pipe for stdin input and we'll write to it in the wait loop
205        if isinstance(stdin, basestring):
206            self.string_stdin = stdin
207            stdin = subprocess.PIPE
208        else:
209            self.string_stdin = None
210
211        # Prepend extra_paths to env['PATH'] if necessary.
212        if extra_paths:
213            env = (os.environ if env is None else env).copy()
214            oldpath = env.get('PATH')
215            env['PATH'] = os.pathsep.join(
216                    extra_paths + ([oldpath] if oldpath else []))
217
218        if verbose:
219            logging.debug("Running '%s'", command)
220
221        if type(command) == list:
222            shell = False
223            executable = None
224        else:
225            shell = True
226            executable = '/bin/bash'
227
228        with open('/dev/null', 'w') as devnull:
229            self.sp = subprocess.Popen(
230                command,
231                stdin=stdin,
232                stdout=devnull if stdout_tee == DEVNULL else subprocess.PIPE,
233                stderr=devnull if stderr_tee == DEVNULL else subprocess.PIPE,
234                preexec_fn=self._reset_sigpipe,
235                shell=shell, executable=executable,
236                env=env, close_fds=True)
237
238        self._cleanup_called = False
239        self._stdout_file = (
240            None if stdout_tee == DEVNULL else StringIO.StringIO())
241        self._stderr_file = (
242            None if stderr_tee == DEVNULL else StringIO.StringIO())
243
244    def process_output(self, stdout=True, final_read=False):
245        """Read from process's output stream, and write data to destinations.
246
247        This function reads up to 1024 bytes from the background job's
248        stdout or stderr stream, and writes the resulting data to the BgJob's
249        output tee and to the stream set up in output_prepare.
250
251        Warning: Calls to process_output will block on reads from the
252        subprocess stream, and will block on writes to the configured
253        destination stream.
254
255        @param stdout: True = read and process data from job's stdout.
256                       False = from stderr.
257                       Default: True
258        @param final_read: Do not read only 1024 bytes from stream. Instead,
259                           read and process all data until end of the stream.
260
261        """
262        if self.unjoinable:
263            raise error.InvalidBgJobCall('Cannot call process_output on '
264                                         'a job with unjoinable BgJob')
265        if stdout:
266            pipe, buf, tee = (
267                self.sp.stdout, self._stdout_file, self._stdout_tee)
268        else:
269            pipe, buf, tee = (
270                self.sp.stderr, self._stderr_file, self._stderr_tee)
271
272        if not pipe:
273            return
274
275        if final_read:
276            # read in all the data we can from pipe and then stop
277            data = []
278            while select.select([pipe], [], [], 0)[0]:
279                data.append(os.read(pipe.fileno(), 1024))
280                if len(data[-1]) == 0:
281                    break
282            data = "".join(data)
283        else:
284            # perform a single read
285            data = os.read(pipe.fileno(), 1024)
286        buf.write(data)
287        tee.write(data)
288
289    def cleanup(self):
290        """Clean up after BgJob.
291
292        Flush the stdout_tee and stderr_tee buffers, close the
293        subprocess stdout and stderr buffers, and saves data from
294        the configured stdout and stderr destination streams to
295        self.result. Duplicate calls ignored with a warning.
296        """
297        if self.unjoinable:
298            raise error.InvalidBgJobCall('Cannot call cleanup on '
299                                         'a job with a unjoinable BgJob')
300        if self._cleanup_called:
301            logging.warning('BgJob [%s] received a duplicate call to '
302                            'cleanup. Ignoring.', self.command)
303            return
304        try:
305            if self.sp.stdout:
306                self._stdout_tee.flush()
307                self.sp.stdout.close()
308                self.result.stdout = self._stdout_file.getvalue()
309
310            if self.sp.stderr:
311                self._stderr_tee.flush()
312                self.sp.stderr.close()
313                self.result.stderr = self._stderr_file.getvalue()
314        finally:
315            self._cleanup_called = True
316
317    def _reset_sigpipe(self):
318        if not env.IN_MOD_WSGI:
319            signal.signal(signal.SIGPIPE, signal.SIG_DFL)
320
321
322def ip_to_long(ip):
323    # !L is a long in network byte order
324    return struct.unpack('!L', socket.inet_aton(ip))[0]
325
326
327def long_to_ip(number):
328    # See above comment.
329    return socket.inet_ntoa(struct.pack('!L', number))
330
331
332def create_subnet_mask(bits):
333    return (1 << 32) - (1 << 32-bits)
334
335
336def format_ip_with_mask(ip, mask_bits):
337    masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
338    return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
339
340
341def normalize_hostname(alias):
342    ip = socket.gethostbyname(alias)
343    return socket.gethostbyaddr(ip)[0]
344
345
346def get_ip_local_port_range():
347    match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
348                     read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
349    return (int(match.group(1)), int(match.group(2)))
350
351
352def set_ip_local_port_range(lower, upper):
353    write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
354                   '%d %d\n' % (lower, upper))
355
356
357def read_one_line(filename):
358    f = open(filename, 'r')
359    try:
360        return f.readline().rstrip('\n')
361    finally:
362        f.close()
363
364
365def read_file(filename):
366    f = open(filename)
367    try:
368        return f.read()
369    finally:
370        f.close()
371
372
373def get_field(data, param, linestart="", sep=" "):
374    """
375    Parse data from string.
376    @param data: Data to parse.
377        example:
378          data:
379             cpu   324 345 34  5 345
380             cpu0  34  11  34 34  33
381             ^^^^
382             start of line
383             params 0   1   2  3   4
384    @param param: Position of parameter after linestart marker.
385    @param linestart: String to which start line with parameters.
386    @param sep: Separator between parameters regular expression.
387    """
388    search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
389    find = search.search(data)
390    if find != None:
391        return re.split("%s" % sep, find.group(1))[param]
392    else:
393        print "There is no line which starts with %s in data." % linestart
394        return None
395
396
397def write_one_line(filename, line):
398    open_write_close(filename, str(line).rstrip('\n') + '\n')
399
400
401def open_write_close(filename, data):
402    f = open(filename, 'w')
403    try:
404        f.write(data)
405    finally:
406        f.close()
407
408
409def locate_file(path, base_dir=None):
410    """Locates a file.
411
412    @param path: The path of the file being located. Could be absolute or
413        relative path. For relative path, it tries to locate the file from
414        base_dir.
415
416    @param base_dir (optional): Base directory of the relative path.
417
418    @returns Absolute path of the file if found. None if path is None.
419    @raises error.TestFail if the file is not found.
420    """
421    if path is None:
422        return None
423
424    if not os.path.isabs(path) and base_dir is not None:
425        # Assume the relative path is based in autotest directory.
426        path = os.path.join(base_dir, path)
427    if not os.path.isfile(path):
428        raise error.TestFail('ERROR: Unable to find %s' % path)
429    return path
430
431
432def matrix_to_string(matrix, header=None):
433    """
434    Return a pretty, aligned string representation of a nxm matrix.
435
436    This representation can be used to print any tabular data, such as
437    database results. It works by scanning the lengths of each element
438    in each column, and determining the format string dynamically.
439
440    @param matrix: Matrix representation (list with n rows of m elements).
441    @param header: Optional tuple or list with header elements to be displayed.
442    """
443    if type(header) is list:
444        header = tuple(header)
445    lengths = []
446    if header:
447        for column in header:
448            lengths.append(len(column))
449    for row in matrix:
450        for i, column in enumerate(row):
451            column = unicode(column).encode("utf-8")
452            cl = len(column)
453            try:
454                ml = lengths[i]
455                if cl > ml:
456                    lengths[i] = cl
457            except IndexError:
458                lengths.append(cl)
459
460    lengths = tuple(lengths)
461    format_string = ""
462    for length in lengths:
463        format_string += "%-" + str(length) + "s "
464    format_string += "\n"
465
466    matrix_str = ""
467    if header:
468        matrix_str += format_string % header
469    for row in matrix:
470        matrix_str += format_string % tuple(row)
471
472    return matrix_str
473
474
475def read_keyval(path, type_tag=None):
476    """
477    Read a key-value pair format file into a dictionary, and return it.
478    Takes either a filename or directory name as input. If it's a
479    directory name, we assume you want the file to be called keyval.
480
481    @param path: Full path of the file to read from.
482    @param type_tag: If not None, only keyvals with key ending
483                     in a suffix {type_tag} will be collected.
484    """
485    if os.path.isdir(path):
486        path = os.path.join(path, 'keyval')
487    if not os.path.exists(path):
488        return {}
489
490    if type_tag:
491        pattern = r'^([-\.\w]+)\{%s\}=(.*)$' % type_tag
492    else:
493        pattern = r'^([-\.\w]+)=(.*)$'
494
495    keyval = {}
496    f = open(path)
497    for line in f:
498        line = re.sub('#.*', '', line).rstrip()
499        if not line:
500            continue
501        match = re.match(pattern, line)
502        if match:
503            key = match.group(1)
504            value = match.group(2)
505            if re.search('^\d+$', value):
506                value = int(value)
507            elif re.search('^(\d+\.)?\d+$', value):
508                value = float(value)
509            keyval[key] = value
510        else:
511            raise ValueError('Invalid format line: %s' % line)
512    f.close()
513    return keyval
514
515
516def write_keyval(path, dictionary, type_tag=None):
517    """
518    Write a key-value pair format file out to a file. This uses append
519    mode to open the file, so existing text will not be overwritten or
520    reparsed.
521
522    If type_tag is None, then the key must be composed of alphanumeric
523    characters (or dashes+underscores). However, if type-tag is not
524    null then the keys must also have "{type_tag}" as a suffix. At
525    the moment the only valid values of type_tag are "attr" and "perf".
526
527    @param path: full path of the file to be written
528    @param dictionary: the items to write
529    @param type_tag: see text above
530    """
531    if os.path.isdir(path):
532        path = os.path.join(path, 'keyval')
533    keyval = open(path, 'a')
534
535    if type_tag is None:
536        key_regex = re.compile(r'^[-\.\w]+$')
537    else:
538        if type_tag not in ('attr', 'perf'):
539            raise ValueError('Invalid type tag: %s' % type_tag)
540        escaped_tag = re.escape(type_tag)
541        key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
542    try:
543        for key in sorted(dictionary.keys()):
544            if not key_regex.search(key):
545                raise ValueError('Invalid key: %s' % key)
546            keyval.write('%s=%s\n' % (key, dictionary[key]))
547    finally:
548        keyval.close()
549
550
551def is_url(path):
552    """Return true if path looks like a URL"""
553    # for now, just handle http and ftp
554    url_parts = urlparse.urlparse(path)
555    return (url_parts[0] in ('http', 'ftp'))
556
557
558def urlopen(url, data=None, timeout=5):
559    """Wrapper to urllib2.urlopen with timeout addition."""
560
561    # Save old timeout
562    old_timeout = socket.getdefaulttimeout()
563    socket.setdefaulttimeout(timeout)
564    try:
565        return urllib2.urlopen(url, data=data)
566    finally:
567        socket.setdefaulttimeout(old_timeout)
568
569
570def urlretrieve(url, filename, data=None, timeout=300):
571    """Retrieve a file from given url."""
572    logging.debug('Fetching %s -> %s', url, filename)
573
574    src_file = urlopen(url, data=data, timeout=timeout)
575    try:
576        dest_file = open(filename, 'wb')
577        try:
578            shutil.copyfileobj(src_file, dest_file)
579        finally:
580            dest_file.close()
581    finally:
582        src_file.close()
583
584
585def hash(hashtype, input=None):
586    """
587    Returns an hash object of type md5 or sha1. This function is implemented in
588    order to encapsulate hash objects in a way that is compatible with python
589    2.4 and python 2.6 without warnings.
590
591    Note that even though python 2.6 hashlib supports hash types other than
592    md5 and sha1, we are artificially limiting the input values in order to
593    make the function to behave exactly the same among both python
594    implementations.
595
596    @param input: Optional input string that will be used to update the hash.
597    """
598    # pylint: disable=redefined-builtin
599    if hashtype not in ['md5', 'sha1']:
600        raise ValueError("Unsupported hash type: %s" % hashtype)
601
602    try:
603        computed_hash = hashlib.new(hashtype)
604    except NameError:
605        if hashtype == 'md5':
606            computed_hash = md5.new()
607        elif hashtype == 'sha1':
608            computed_hash = sha.new()
609
610    if input:
611        computed_hash.update(input)
612
613    return computed_hash
614
615
616def get_file(src, dest, permissions=None):
617    """Get a file from src, which can be local or a remote URL"""
618    if src == dest:
619        return
620
621    if is_url(src):
622        urlretrieve(src, dest)
623    else:
624        shutil.copyfile(src, dest)
625
626    if permissions:
627        os.chmod(dest, permissions)
628    return dest
629
630
631def unmap_url(srcdir, src, destdir='.'):
632    """
633    Receives either a path to a local file or a URL.
634    returns either the path to the local file, or the fetched URL
635
636    unmap_url('/usr/src', 'foo.tar', '/tmp')
637                            = '/usr/src/foo.tar'
638    unmap_url('/usr/src', 'http://site/file', '/tmp')
639                            = '/tmp/file'
640                            (after retrieving it)
641    """
642    if is_url(src):
643        url_parts = urlparse.urlparse(src)
644        filename = os.path.basename(url_parts[2])
645        dest = os.path.join(destdir, filename)
646        return get_file(src, dest)
647    else:
648        return os.path.join(srcdir, src)
649
650
651def update_version(srcdir, preserve_srcdir, new_version, install,
652                   *args, **dargs):
653    """
654    Make sure srcdir is version new_version
655
656    If not, delete it and install() the new version.
657
658    In the preserve_srcdir case, we just check it's up to date,
659    and if not, we rerun install, without removing srcdir
660    """
661    versionfile = os.path.join(srcdir, '.version')
662    install_needed = True
663
664    if os.path.exists(versionfile):
665        old_version = pickle.load(open(versionfile))
666        if old_version == new_version:
667            install_needed = False
668
669    if install_needed:
670        if not preserve_srcdir and os.path.exists(srcdir):
671            shutil.rmtree(srcdir)
672        install(*args, **dargs)
673        if os.path.exists(srcdir):
674            pickle.dump(new_version, open(versionfile, 'w'))
675
676
677def get_stderr_level(stderr_is_expected, stdout_level=DEFAULT_STDOUT_LEVEL):
678    if stderr_is_expected:
679        return stdout_level
680    return DEFAULT_STDERR_LEVEL
681
682
683def run(command, timeout=None, ignore_status=False, stdout_tee=None,
684        stderr_tee=None, verbose=True, stdin=None, stderr_is_expected=None,
685        stdout_level=None, stderr_level=None, args=(), nickname=None,
686        ignore_timeout=False, env=None, extra_paths=None):
687    """
688    Run a command on the host.
689
690    @param command: the command line string.
691    @param timeout: time limit in seconds before attempting to kill the
692            running process. The run() function will take a few seconds
693            longer than 'timeout' to complete if it has to kill the process.
694    @param ignore_status: do not raise an exception, no matter what the exit
695            code of the command is.
696    @param stdout_tee: optional file-like object to which stdout data
697            will be written as it is generated (data will still be stored
698            in result.stdout unless this is DEVNULL).
699    @param stderr_tee: likewise for stderr.
700    @param verbose: if True, log the command being run.
701    @param stdin: stdin to pass to the executed process (can be a file
702            descriptor, a file object of a real file or a string).
703    @param stderr_is_expected: if True, stderr will be logged at the same level
704            as stdout
705    @param stdout_level: logging level used if stdout_tee is TEE_TO_LOGS;
706            if None, a default is used.
707    @param stderr_level: like stdout_level but for stderr.
708    @param args: sequence of strings of arguments to be given to the command
709            inside " quotes after they have been escaped for that; each
710            element in the sequence will be given as a separate command
711            argument
712    @param nickname: Short string that will appear in logging messages
713                     associated with this command.
714    @param ignore_timeout: If True, timeouts are ignored otherwise if a
715            timeout occurs it will raise CmdTimeoutError.
716    @param env: Dict containing environment variables used in a subprocess.
717    @param extra_paths: Optional string list, to be prepended to the PATH
718                        env variable in env (or os.environ dict if env is
719                        not specified).
720
721    @return a CmdResult object or None if the command timed out and
722            ignore_timeout is True
723    @rtype: CmdResult
724
725    @raise CmdError: the exit code of the command execution was not 0
726    @raise CmdTimeoutError: the command timed out and ignore_timeout is False.
727    """
728    if isinstance(args, basestring):
729        raise TypeError('Got a string for the "args" keyword argument, '
730                        'need a sequence.')
731
732    # In some cases, command will actually be a list
733    # (For example, see get_user_hash in client/cros/cryptohome.py.)
734    # So, to cover that case, detect if it's a string or not and convert it
735    # into one if necessary.
736    if not isinstance(command, basestring):
737        command = ' '.join([sh_quote_word(arg) for arg in command])
738
739    command = ' '.join([command] + [sh_quote_word(arg) for arg in args])
740
741    if stderr_is_expected is None:
742        stderr_is_expected = ignore_status
743    if stdout_level is None:
744        stdout_level = DEFAULT_STDOUT_LEVEL
745    if stderr_level is None:
746        stderr_level = get_stderr_level(stderr_is_expected, stdout_level)
747
748    try:
749        bg_job = join_bg_jobs(
750            (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
751                   stdout_level=stdout_level, stderr_level=stderr_level,
752                   nickname=nickname, env=env, extra_paths=extra_paths),),
753            timeout)[0]
754    except error.CmdTimeoutError:
755        if not ignore_timeout:
756            raise
757        return None
758
759    if not ignore_status and bg_job.result.exit_status:
760        raise error.CmdError(command, bg_job.result,
761                             "Command returned non-zero exit status")
762
763    return bg_job.result
764
765
766def run_parallel(commands, timeout=None, ignore_status=False,
767                 stdout_tee=None, stderr_tee=None,
768                 nicknames=None):
769    """
770    Behaves the same as run() with the following exceptions:
771
772    - commands is a list of commands to run in parallel.
773    - ignore_status toggles whether or not an exception should be raised
774      on any error.
775
776    @return: a list of CmdResult objects
777    """
778    bg_jobs = []
779    if nicknames is None:
780        nicknames = []
781    for (command, nickname) in itertools.izip_longest(commands, nicknames):
782        bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
783                             stderr_level=get_stderr_level(ignore_status),
784                             nickname=nickname))
785
786    # Updates objects in bg_jobs list with their process information
787    join_bg_jobs(bg_jobs, timeout)
788
789    for bg_job in bg_jobs:
790        if not ignore_status and bg_job.result.exit_status:
791            raise error.CmdError(command, bg_job.result,
792                                 "Command returned non-zero exit status")
793
794    return [bg_job.result for bg_job in bg_jobs]
795
796
797@deprecated
798def run_bg(command):
799    """Function deprecated. Please use BgJob class instead."""
800    bg_job = BgJob(command)
801    return bg_job.sp, bg_job.result
802
803
804def join_bg_jobs(bg_jobs, timeout=None):
805    """Joins the bg_jobs with the current thread.
806
807    Returns the same list of bg_jobs objects that was passed in.
808    """
809    if any(bg_job.unjoinable for bg_job in bg_jobs):
810        raise error.InvalidBgJobCall(
811                'join_bg_jobs cannot be called for unjoinable bg_job')
812
813    timeout_error = False
814    try:
815        # We are holding ends to stdin, stdout pipes
816        # hence we need to be sure to close those fds no mater what
817        start_time = time.time()
818        timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
819
820        for bg_job in bg_jobs:
821            # Process stdout and stderr
822            bg_job.process_output(stdout=True,final_read=True)
823            bg_job.process_output(stdout=False,final_read=True)
824    finally:
825        # close our ends of the pipes to the sp no matter what
826        for bg_job in bg_jobs:
827            bg_job.cleanup()
828
829    if timeout_error:
830        # TODO: This needs to be fixed to better represent what happens when
831        # running in parallel. However this is backwards compatable, so it will
832        # do for the time being.
833        raise error.CmdTimeoutError(
834                bg_jobs[0].command, bg_jobs[0].result,
835                "Command(s) did not complete within %d seconds" % timeout)
836
837
838    return bg_jobs
839
840
841def _wait_for_commands(bg_jobs, start_time, timeout):
842    """Waits for background jobs by select polling their stdout/stderr.
843
844    @param bg_jobs: A list of background jobs to wait on.
845    @param start_time: Time used to calculate the timeout lifetime of a job.
846    @param timeout: The timeout of the list of bg_jobs.
847
848    @return: True if the return was due to a timeout, False otherwise.
849    """
850
851    # To check for processes which terminate without producing any output
852    # a 1 second timeout is used in select.
853    SELECT_TIMEOUT = 1
854
855    read_list = []
856    write_list = []
857    reverse_dict = {}
858
859    for bg_job in bg_jobs:
860        if bg_job.sp.stdout:
861            read_list.append(bg_job.sp.stdout)
862            reverse_dict[bg_job.sp.stdout] = (bg_job, True)
863        if bg_job.sp.stderr:
864            read_list.append(bg_job.sp.stderr)
865            reverse_dict[bg_job.sp.stderr] = (bg_job, False)
866        if bg_job.string_stdin is not None:
867            write_list.append(bg_job.sp.stdin)
868            reverse_dict[bg_job.sp.stdin] = bg_job
869
870    if timeout:
871        stop_time = start_time + timeout
872        time_left = stop_time - time.time()
873    else:
874        time_left = None # so that select never times out
875
876    while not timeout or time_left > 0:
877        # select will return when we may write to stdin, when there is
878        # stdout/stderr output we can read (including when it is
879        # EOF, that is the process has terminated) or when a non-fatal
880        # signal was sent to the process. In the last case the select returns
881        # EINTR, and we continue waiting for the job if the signal handler for
882        # the signal that interrupted the call allows us to.
883        try:
884            read_ready, write_ready, _ = select.select(read_list, write_list,
885                                                       [], SELECT_TIMEOUT)
886        except select.error as v:
887            if v[0] == errno.EINTR:
888                logging.warning(v)
889                continue
890            else:
891                raise
892        # os.read() has to be used instead of
893        # subproc.stdout.read() which will otherwise block
894        for file_obj in read_ready:
895            bg_job, is_stdout = reverse_dict[file_obj]
896            bg_job.process_output(is_stdout)
897
898        for file_obj in write_ready:
899            # we can write PIPE_BUF bytes without blocking
900            # POSIX requires PIPE_BUF is >= 512
901            bg_job = reverse_dict[file_obj]
902            file_obj.write(bg_job.string_stdin[:512])
903            bg_job.string_stdin = bg_job.string_stdin[512:]
904            # no more input data, close stdin, remove it from the select set
905            if not bg_job.string_stdin:
906                file_obj.close()
907                write_list.remove(file_obj)
908                del reverse_dict[file_obj]
909
910        all_jobs_finished = True
911        for bg_job in bg_jobs:
912            if bg_job.result.exit_status is not None:
913                continue
914
915            bg_job.result.exit_status = bg_job.sp.poll()
916            if bg_job.result.exit_status is not None:
917                # process exited, remove its stdout/stdin from the select set
918                bg_job.result.duration = time.time() - start_time
919                if bg_job.sp.stdout:
920                    read_list.remove(bg_job.sp.stdout)
921                    del reverse_dict[bg_job.sp.stdout]
922                if bg_job.sp.stderr:
923                    read_list.remove(bg_job.sp.stderr)
924                    del reverse_dict[bg_job.sp.stderr]
925            else:
926                all_jobs_finished = False
927
928        if all_jobs_finished:
929            return False
930
931        if timeout:
932            time_left = stop_time - time.time()
933
934    # Kill all processes which did not complete prior to timeout
935    for bg_job in bg_jobs:
936        if bg_job.result.exit_status is not None:
937            continue
938
939        logging.warning('run process timeout (%s) fired on: %s', timeout,
940                        bg_job.command)
941        if nuke_subprocess(bg_job.sp) is None:
942            # If process could not be SIGKILL'd, log kernel stack.
943            logging.warning(read_file('/proc/%d/stack' % bg_job.sp.pid))
944        bg_job.result.exit_status = bg_job.sp.poll()
945        bg_job.result.duration = time.time() - start_time
946
947    return True
948
949
950def pid_is_alive(pid):
951    """
952    True if process pid exists and is not yet stuck in Zombie state.
953    Zombies are impossible to move between cgroups, etc.
954    pid can be integer, or text of integer.
955    """
956    path = '/proc/%s/stat' % pid
957
958    try:
959        stat = read_one_line(path)
960    except IOError:
961        if not os.path.exists(path):
962            # file went away
963            return False
964        raise
965
966    return stat.split()[2] != 'Z'
967
968
969def signal_pid(pid, sig):
970    """
971    Sends a signal to a process id. Returns True if the process terminated
972    successfully, False otherwise.
973    """
974    try:
975        os.kill(pid, sig)
976    except OSError:
977        # The process may have died before we could kill it.
978        pass
979
980    for _ in range(5):
981        if not pid_is_alive(pid):
982            return True
983        time.sleep(1)
984
985    # The process is still alive
986    return False
987
988
989def nuke_subprocess(subproc):
990    # check if the subprocess is still alive, first
991    if subproc.poll() is not None:
992        return subproc.poll()
993
994    # the process has not terminated within timeout,
995    # kill it via an escalating series of signals.
996    signal_queue = [signal.SIGTERM, signal.SIGKILL]
997    for sig in signal_queue:
998        signal_pid(subproc.pid, sig)
999        if subproc.poll() is not None:
1000            return subproc.poll()
1001
1002
1003def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
1004    # the process has not terminated within timeout,
1005    # kill it via an escalating series of signals.
1006    pid_path = '/proc/%d/'
1007    if not os.path.exists(pid_path % pid):
1008        # Assume that if the pid does not exist in proc it is already dead.
1009        logging.error('No listing in /proc for pid:%d.', pid)
1010        raise error.AutoservPidAlreadyDeadError('Could not kill nonexistant '
1011                                                'pid: %s.', pid)
1012    for sig in signal_queue:
1013        if signal_pid(pid, sig):
1014            return
1015
1016    # no signal successfully terminated the process
1017    raise error.AutoservRunError('Could not kill %d for process name: %s' % (
1018            pid, get_process_name(pid)), None)
1019
1020
1021def system(command, timeout=None, ignore_status=False):
1022    """
1023    Run a command
1024
1025    @param timeout: timeout in seconds
1026    @param ignore_status: if ignore_status=False, throw an exception if the
1027            command's exit code is non-zero
1028            if ignore_stauts=True, return the exit code.
1029
1030    @return exit status of command
1031            (note, this will always be zero unless ignore_status=True)
1032    """
1033    return run(command, timeout=timeout, ignore_status=ignore_status,
1034               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
1035
1036
1037def system_parallel(commands, timeout=None, ignore_status=False):
1038    """This function returns a list of exit statuses for the respective
1039    list of commands."""
1040    return [bg_jobs.exit_status for bg_jobs in
1041            run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
1042                         stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1043
1044
1045def system_output(command, timeout=None, ignore_status=False,
1046                  retain_output=False, args=()):
1047    """
1048    Run a command and return the stdout output.
1049
1050    @param command: command string to execute.
1051    @param timeout: time limit in seconds before attempting to kill the
1052            running process. The function will take a few seconds longer
1053            than 'timeout' to complete if it has to kill the process.
1054    @param ignore_status: do not raise an exception, no matter what the exit
1055            code of the command is.
1056    @param retain_output: set to True to make stdout/stderr of the command
1057            output to be also sent to the logging system
1058    @param args: sequence of strings of arguments to be given to the command
1059            inside " quotes after they have been escaped for that; each
1060            element in the sequence will be given as a separate command
1061            argument
1062
1063    @return a string with the stdout output of the command.
1064    """
1065    if retain_output:
1066        out = run(command, timeout=timeout, ignore_status=ignore_status,
1067                  stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
1068                  args=args).stdout
1069    else:
1070        out = run(command, timeout=timeout, ignore_status=ignore_status,
1071                  args=args).stdout
1072    if out[-1:] == '\n':
1073        out = out[:-1]
1074    return out
1075
1076
1077def system_output_parallel(commands, timeout=None, ignore_status=False,
1078                           retain_output=False):
1079    if retain_output:
1080        out = [bg_job.stdout for bg_job
1081               in run_parallel(commands, timeout=timeout,
1082                               ignore_status=ignore_status,
1083                               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1084    else:
1085        out = [bg_job.stdout for bg_job in run_parallel(commands,
1086                                  timeout=timeout, ignore_status=ignore_status)]
1087    for _ in out:
1088        if out[-1:] == '\n':
1089            out = out[:-1]
1090    return out
1091
1092
1093def strip_unicode(input_obj):
1094    if type(input_obj) == list:
1095        return [strip_unicode(i) for i in input_obj]
1096    elif type(input_obj) == dict:
1097        output = {}
1098        for key in input_obj.keys():
1099            output[str(key)] = strip_unicode(input_obj[key])
1100        return output
1101    elif type(input_obj) == unicode:
1102        return str(input_obj)
1103    else:
1104        return input_obj
1105
1106
1107def get_cpu_percentage(function, *args, **dargs):
1108    """Returns a tuple containing the CPU% and return value from function call.
1109
1110    This function calculates the usage time by taking the difference of
1111    the user and system times both before and after the function call.
1112    """
1113    child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
1114    self_pre = resource.getrusage(resource.RUSAGE_SELF)
1115    start = time.time()
1116    to_return = function(*args, **dargs)
1117    elapsed = time.time() - start
1118    self_post = resource.getrusage(resource.RUSAGE_SELF)
1119    child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
1120
1121    # Calculate CPU Percentage
1122    s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
1123    c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
1124    cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
1125
1126    return cpu_percent, to_return
1127
1128
1129def get_arch(run_function=run):
1130    """
1131    Get the hardware architecture of the machine.
1132    If specified, run_function should return a CmdResult object and throw a
1133    CmdError exception.
1134    If run_function is anything other than utils.run(), it is used to
1135    execute the commands. By default (when set to utils.run()) this will
1136    just examine os.uname()[4].
1137    """
1138
1139    # Short circuit from the common case.
1140    if run_function == run:
1141        return re.sub(r'i\d86$', 'i386', os.uname()[4])
1142
1143    # Otherwise, use the run_function in case it hits a remote machine.
1144    arch = run_function('/bin/uname -m').stdout.rstrip()
1145    if re.match(r'i\d86$', arch):
1146        arch = 'i386'
1147    return arch
1148
1149def get_arch_userspace(run_function=run):
1150    """
1151    Get the architecture by userspace (possibly different from kernel).
1152    """
1153    archs = {
1154        'arm': 'ELF 32-bit.*, ARM,',
1155        'arm64': 'ELF 64-bit.*, ARM aarch64,',
1156        'i386': 'ELF 32-bit.*, Intel 80386,',
1157        'x86_64': 'ELF 64-bit.*, x86-64,',
1158    }
1159
1160    cmd = 'file --brief --dereference /bin/sh'
1161    filestr = run_function(cmd).stdout.rstrip()
1162    for a, regex in archs.iteritems():
1163        if re.match(regex, filestr):
1164            return a
1165
1166    return get_arch()
1167
1168
1169def get_num_logical_cpus_per_socket(run_function=run):
1170    """
1171    Get the number of cores (including hyperthreading) per cpu.
1172    run_function is used to execute the commands. It defaults to
1173    utils.run() but a custom method (if provided) should be of the
1174    same schema as utils.run. It should return a CmdResult object and
1175    throw a CmdError exception.
1176    """
1177    siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1178    num_siblings = [int(x) for x in
1179                    re.findall(r'^siblings\s*:\s*(\d+)\s*$', siblings, re.M)]
1180    if len(num_siblings) == 0:
1181        raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1182    if min(num_siblings) != max(num_siblings):
1183        raise error.TestError('Number of siblings differ %r' %
1184                              num_siblings)
1185    return num_siblings[0]
1186
1187
1188def set_high_performance_mode(host=None):
1189    """
1190    Sets the kernel governor mode to the highest setting.
1191    Returns previous governor state.
1192    """
1193    original_governors = get_scaling_governor_states(host)
1194    set_scaling_governors('performance', host)
1195    return original_governors
1196
1197
1198def set_scaling_governors(value, host=None):
1199    """
1200    Sets all scaling governor to string value.
1201    Sample values: 'performance', 'interactive', 'ondemand', 'powersave'.
1202    """
1203    paths = _get_cpufreq_paths('scaling_governor', host)
1204    if not paths:
1205        logging.info("Could not set governor states, as no files of the form "
1206                     "'/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor' "
1207                     "were found.")
1208    run_func = host.run if host else system
1209    for path in paths:
1210        cmd = 'echo %s > %s' % (value, path)
1211        logging.info('Writing scaling governor mode \'%s\' -> %s', value, path)
1212        # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
1213        run_func(cmd, ignore_status=True)
1214
1215
1216def _get_cpufreq_paths(filename, host=None):
1217    """
1218    Returns a list of paths to the governors.
1219    """
1220    run_func = host.run if host else run
1221    glob = '/sys/devices/system/cpu/cpu*/cpufreq/' + filename
1222    # Simple glob expansion; note that CPUs may come and go, causing these
1223    # paths to change at any time.
1224    cmd = 'echo ' + glob
1225    try:
1226        paths = run_func(cmd, verbose=False).stdout.split()
1227    except error.CmdError:
1228        return []
1229    # If the glob result equals itself, then we likely didn't match any real
1230    # paths (assuming 'cpu*' is not a real path).
1231    if paths == [glob]:
1232        return []
1233    return paths
1234
1235
1236def get_scaling_governor_states(host=None):
1237    """
1238    Returns a list of (performance governor path, current state) tuples.
1239    """
1240    paths = _get_cpufreq_paths('scaling_governor', host)
1241    path_value_list = []
1242    run_func = host.run if host else run
1243    for path in paths:
1244        value = run_func('head -n 1 %s' % path, verbose=False).stdout
1245        path_value_list.append((path, value))
1246    return path_value_list
1247
1248
1249def restore_scaling_governor_states(path_value_list, host=None):
1250    """
1251    Restores governor states. Inverse operation to get_scaling_governor_states.
1252    """
1253    run_func = host.run if host else system
1254    for (path, value) in path_value_list:
1255        cmd = 'echo %s > %s' % (value.rstrip('\n'), path)
1256        # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
1257        run_func(cmd, ignore_status=True)
1258
1259
1260def merge_trees(src, dest):
1261    """
1262    Merges a source directory tree at 'src' into a destination tree at
1263    'dest'. If a path is a file in both trees than the file in the source
1264    tree is APPENDED to the one in the destination tree. If a path is
1265    a directory in both trees then the directories are recursively merged
1266    with this function. In any other case, the function will skip the
1267    paths that cannot be merged (instead of failing).
1268    """
1269    if not os.path.exists(src):
1270        return # exists only in dest
1271    elif not os.path.exists(dest):
1272        if os.path.isfile(src):
1273            shutil.copy2(src, dest) # file only in src
1274        else:
1275            shutil.copytree(src, dest, symlinks=True) # dir only in src
1276        return
1277    elif os.path.isfile(src) and os.path.isfile(dest):
1278        # src & dest are files in both trees, append src to dest
1279        destfile = open(dest, "a")
1280        try:
1281            srcfile = open(src)
1282            try:
1283                destfile.write(srcfile.read())
1284            finally:
1285                srcfile.close()
1286        finally:
1287            destfile.close()
1288    elif os.path.isdir(src) and os.path.isdir(dest):
1289        # src & dest are directories in both trees, so recursively merge
1290        for name in os.listdir(src):
1291            merge_trees(os.path.join(src, name), os.path.join(dest, name))
1292    else:
1293        # src & dest both exist, but are incompatible
1294        return
1295
1296
1297class CmdResult(object):
1298    """
1299    Command execution result.
1300
1301    command:     String containing the command line itself
1302    exit_status: Integer exit code of the process
1303    stdout:      String containing stdout of the process
1304    stderr:      String containing stderr of the process
1305    duration:    Elapsed wall clock time running the process
1306    """
1307
1308
1309    def __init__(self, command="", stdout="", stderr="",
1310                 exit_status=None, duration=0):
1311        self.command = command
1312        self.exit_status = exit_status
1313        self.stdout = stdout
1314        self.stderr = stderr
1315        self.duration = duration
1316
1317
1318    def __eq__(self, other):
1319        if type(self) == type(other):
1320            return (self.command == other.command
1321                    and self.exit_status == other.exit_status
1322                    and self.stdout == other.stdout
1323                    and self.stderr == other.stderr
1324                    and self.duration == other.duration)
1325        else:
1326            return NotImplemented
1327
1328
1329    def __repr__(self):
1330        wrapper = textwrap.TextWrapper(width = 78,
1331                                       initial_indent="\n    ",
1332                                       subsequent_indent="    ")
1333
1334        stdout = self.stdout.rstrip()
1335        if stdout:
1336            stdout = "\nstdout:\n%s" % stdout
1337
1338        stderr = self.stderr.rstrip()
1339        if stderr:
1340            stderr = "\nstderr:\n%s" % stderr
1341
1342        return ("* Command: %s\n"
1343                "Exit status: %s\n"
1344                "Duration: %s\n"
1345                "%s"
1346                "%s"
1347                % (wrapper.fill(str(self.command)), self.exit_status,
1348                self.duration, stdout, stderr))
1349
1350
1351class run_randomly:
1352    def __init__(self, run_sequentially=False):
1353        # Run sequentially is for debugging control files
1354        self.test_list = []
1355        self.run_sequentially = run_sequentially
1356
1357
1358    def add(self, *args, **dargs):
1359        test = (args, dargs)
1360        self.test_list.append(test)
1361
1362
1363    def run(self, fn):
1364        while self.test_list:
1365            test_index = random.randint(0, len(self.test_list)-1)
1366            if self.run_sequentially:
1367                test_index = 0
1368            (args, dargs) = self.test_list.pop(test_index)
1369            fn(*args, **dargs)
1370
1371
1372def import_site_module(path, module, dummy=None, modulefile=None):
1373    """
1374    Try to import the site specific module if it exists.
1375
1376    @param path full filename of the source file calling this (ie __file__)
1377    @param module full module name
1378    @param dummy dummy value to return in case there is no symbol to import
1379    @param modulefile module filename
1380
1381    @return site specific module or dummy
1382
1383    @raises ImportError if the site file exists but imports fails
1384    """
1385    short_module = module[module.rfind(".") + 1:]
1386
1387    if not modulefile:
1388        modulefile = short_module + ".py"
1389
1390    if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1391        return __import__(module, {}, {}, [short_module])
1392    return dummy
1393
1394
1395def import_site_symbol(path, module, name, dummy=None, modulefile=None):
1396    """
1397    Try to import site specific symbol from site specific file if it exists
1398
1399    @param path full filename of the source file calling this (ie __file__)
1400    @param module full module name
1401    @param name symbol name to be imported from the site file
1402    @param dummy dummy value to return in case there is no symbol to import
1403    @param modulefile module filename
1404
1405    @return site specific symbol or dummy
1406
1407    @raises ImportError if the site file exists but imports fails
1408    """
1409    module = import_site_module(path, module, modulefile=modulefile)
1410    if not module:
1411        return dummy
1412
1413    # special unique value to tell us if the symbol can't be imported
1414    cant_import = object()
1415
1416    obj = getattr(module, name, cant_import)
1417    if obj is cant_import:
1418        return dummy
1419
1420    return obj
1421
1422
1423def import_site_class(path, module, classname, baseclass, modulefile=None):
1424    """
1425    Try to import site specific class from site specific file if it exists
1426
1427    Args:
1428        path: full filename of the source file calling this (ie __file__)
1429        module: full module name
1430        classname: class name to be loaded from site file
1431        baseclass: base class object to return when no site file present or
1432            to mixin when site class exists but is not inherited from baseclass
1433        modulefile: module filename
1434
1435    Returns: baseclass if site specific class does not exist, the site specific
1436        class if it exists and is inherited from baseclass or a mixin of the
1437        site specific class and baseclass when the site specific class exists
1438        and is not inherited from baseclass
1439
1440    Raises: ImportError if the site file exists but imports fails
1441    """
1442
1443    res = import_site_symbol(path, module, classname, None, modulefile)
1444    if res:
1445        if not issubclass(res, baseclass):
1446            # if not a subclass of baseclass then mix in baseclass with the
1447            # site specific class object and return the result
1448            res = type(classname, (res, baseclass), {})
1449    else:
1450        res = baseclass
1451
1452    return res
1453
1454
1455def import_site_function(path, module, funcname, dummy, modulefile=None):
1456    """
1457    Try to import site specific function from site specific file if it exists
1458
1459    Args:
1460        path: full filename of the source file calling this (ie __file__)
1461        module: full module name
1462        funcname: function name to be imported from site file
1463        dummy: dummy function to return in case there is no function to import
1464        modulefile: module filename
1465
1466    Returns: site specific function object or dummy
1467
1468    Raises: ImportError if the site file exists but imports fails
1469    """
1470
1471    return import_site_symbol(path, module, funcname, dummy, modulefile)
1472
1473
1474def _get_pid_path(program_name):
1475    my_path = os.path.dirname(__file__)
1476    return os.path.abspath(os.path.join(my_path, "..", "..",
1477                                        "%s.pid" % program_name))
1478
1479
1480def write_pid(program_name):
1481    """
1482    Try to drop <program_name>.pid in the main autotest directory.
1483
1484    Args:
1485      program_name: prefix for file name
1486    """
1487    pidfile = open(_get_pid_path(program_name), "w")
1488    try:
1489        pidfile.write("%s\n" % os.getpid())
1490    finally:
1491        pidfile.close()
1492
1493
1494def delete_pid_file_if_exists(program_name):
1495    """
1496    Tries to remove <program_name>.pid from the main autotest directory.
1497    """
1498    pidfile_path = _get_pid_path(program_name)
1499
1500    try:
1501        os.remove(pidfile_path)
1502    except OSError:
1503        if not os.path.exists(pidfile_path):
1504            return
1505        raise
1506
1507
1508def get_pid_from_file(program_name):
1509    """
1510    Reads the pid from <program_name>.pid in the autotest directory.
1511
1512    @param program_name the name of the program
1513    @return the pid if the file exists, None otherwise.
1514    """
1515    pidfile_path = _get_pid_path(program_name)
1516    if not os.path.exists(pidfile_path):
1517        return None
1518
1519    pidfile = open(_get_pid_path(program_name), 'r')
1520
1521    try:
1522        try:
1523            pid = int(pidfile.readline())
1524        except IOError:
1525            if not os.path.exists(pidfile_path):
1526                return None
1527            raise
1528    finally:
1529        pidfile.close()
1530
1531    return pid
1532
1533
1534def get_process_name(pid):
1535    """
1536    Get process name from PID.
1537    @param pid: PID of process.
1538    @return: Process name if PID stat file exists or 'Dead PID' if it does not.
1539    """
1540    pid_stat_path = "/proc/%d/stat"
1541    if not os.path.exists(pid_stat_path % pid):
1542        return "Dead Pid"
1543    return get_field(read_file(pid_stat_path % pid), 1)[1:-1]
1544
1545
1546def program_is_alive(program_name):
1547    """
1548    Checks if the process is alive and not in Zombie state.
1549
1550    @param program_name the name of the program
1551    @return True if still alive, False otherwise
1552    """
1553    pid = get_pid_from_file(program_name)
1554    if pid is None:
1555        return False
1556    return pid_is_alive(pid)
1557
1558
1559def signal_program(program_name, sig=signal.SIGTERM):
1560    """
1561    Sends a signal to the process listed in <program_name>.pid
1562
1563    @param program_name the name of the program
1564    @param sig signal to send
1565    """
1566    pid = get_pid_from_file(program_name)
1567    if pid:
1568        signal_pid(pid, sig)
1569
1570
1571def get_relative_path(path, reference):
1572    """Given 2 absolute paths "path" and "reference", compute the path of
1573    "path" as relative to the directory "reference".
1574
1575    @param path the absolute path to convert to a relative path
1576    @param reference an absolute directory path to which the relative
1577        path will be computed
1578    """
1579    # normalize the paths (remove double slashes, etc)
1580    assert(os.path.isabs(path))
1581    assert(os.path.isabs(reference))
1582
1583    path = os.path.normpath(path)
1584    reference = os.path.normpath(reference)
1585
1586    # we could use os.path.split() but it splits from the end
1587    path_list = path.split(os.path.sep)[1:]
1588    ref_list = reference.split(os.path.sep)[1:]
1589
1590    # find the longest leading common path
1591    for i in xrange(min(len(path_list), len(ref_list))):
1592        if path_list[i] != ref_list[i]:
1593            # decrement i so when exiting this loop either by no match or by
1594            # end of range we are one step behind
1595            i -= 1
1596            break
1597    i += 1
1598    # drop the common part of the paths, not interested in that anymore
1599    del path_list[:i]
1600
1601    # for each uncommon component in the reference prepend a ".."
1602    path_list[:0] = ['..'] * (len(ref_list) - i)
1603
1604    return os.path.join(*path_list)
1605
1606
1607def sh_escape(command):
1608    """
1609    Escape special characters from a command so that it can be passed
1610    as a double quoted (" ") string in a (ba)sh command.
1611
1612    Args:
1613            command: the command string to escape.
1614
1615    Returns:
1616            The escaped command string. The required englobing double
1617            quotes are NOT added and so should be added at some point by
1618            the caller.
1619
1620    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1621    """
1622    command = command.replace("\\", "\\\\")
1623    command = command.replace("$", r'\$')
1624    command = command.replace('"', r'\"')
1625    command = command.replace('`', r'\`')
1626    return command
1627
1628
1629def sh_quote_word(text, whitelist=SHELL_QUOTING_WHITELIST):
1630    r"""Quote a string to make it safe as a single word in a shell command.
1631
1632    POSIX shell syntax recognizes no escape characters inside a single-quoted
1633    string.  So, single quotes can safely quote any string of characters except
1634    a string with a single quote character.  A single quote character must be
1635    quoted with the sequence '\'' which translates to:
1636        '  -> close current quote
1637        \' -> insert a literal single quote
1638        '  -> reopen quoting again.
1639
1640    This is safe for all combinations of characters, including embedded and
1641    trailing backslashes in odd or even numbers.
1642
1643    This is also safe for nesting, e.g. the following is a valid use:
1644
1645        adb_command = 'adb shell %s' % (
1646                sh_quote_word('echo %s' % sh_quote_word('hello world')))
1647
1648    @param text: The string to be quoted into a single word for the shell.
1649    @param whitelist: Optional list of characters that do not need quoting.
1650                      Defaults to a known good list of characters.
1651
1652    @return A string, possibly quoted, safe as a single word for a shell.
1653    """
1654    if all(c in whitelist for c in text):
1655        return text
1656    return "'" + text.replace("'", r"'\''") + "'"
1657
1658
1659def configure(extra=None, configure='./configure'):
1660    """
1661    Run configure passing in the correct host, build, and target options.
1662
1663    @param extra: extra command line arguments to pass to configure
1664    @param configure: which configure script to use
1665    """
1666    args = []
1667    if 'CHOST' in os.environ:
1668        args.append('--host=' + os.environ['CHOST'])
1669    if 'CBUILD' in os.environ:
1670        args.append('--build=' + os.environ['CBUILD'])
1671    if 'CTARGET' in os.environ:
1672        args.append('--target=' + os.environ['CTARGET'])
1673    if extra:
1674        args.append(extra)
1675
1676    system('%s %s' % (configure, ' '.join(args)))
1677
1678
1679def make(extra='', make='make', timeout=None, ignore_status=False):
1680    """
1681    Run make, adding MAKEOPTS to the list of options.
1682
1683    @param extra: extra command line arguments to pass to make.
1684    """
1685    cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1686    return system(cmd, timeout=timeout, ignore_status=ignore_status)
1687
1688
1689def compare_versions(ver1, ver2):
1690    """Version number comparison between ver1 and ver2 strings.
1691
1692    >>> compare_tuple("1", "2")
1693    -1
1694    >>> compare_tuple("foo-1.1", "foo-1.2")
1695    -1
1696    >>> compare_tuple("1.2", "1.2a")
1697    -1
1698    >>> compare_tuple("1.2b", "1.2a")
1699    1
1700    >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1701    -1
1702
1703    Args:
1704        ver1: version string
1705        ver2: version string
1706
1707    Returns:
1708        int:  1 if ver1 >  ver2
1709              0 if ver1 == ver2
1710             -1 if ver1 <  ver2
1711    """
1712    ax = re.split('[.-]', ver1)
1713    ay = re.split('[.-]', ver2)
1714    while len(ax) > 0 and len(ay) > 0:
1715        cx = ax.pop(0)
1716        cy = ay.pop(0)
1717        maxlen = max(len(cx), len(cy))
1718        c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1719        if c != 0:
1720            return c
1721    return cmp(len(ax), len(ay))
1722
1723
1724def args_to_dict(args):
1725    """Convert autoserv extra arguments in the form of key=val or key:val to a
1726    dictionary.  Each argument key is converted to lowercase dictionary key.
1727
1728    Args:
1729        args - list of autoserv extra arguments.
1730
1731    Returns:
1732        dictionary
1733    """
1734    arg_re = re.compile(r'(\w+)[:=](.*)$')
1735    args_dict = {}
1736    for arg in args:
1737        match = arg_re.match(arg)
1738        if match:
1739            args_dict[match.group(1).lower()] = match.group(2)
1740        else:
1741            logging.warning("args_to_dict: argument '%s' doesn't match "
1742                            "'%s' pattern. Ignored.", arg, arg_re.pattern)
1743    return args_dict
1744
1745
1746def get_unused_port():
1747    """
1748    Finds a semi-random available port. A race condition is still
1749    possible after the port number is returned, if another process
1750    happens to bind it.
1751
1752    Returns:
1753        A port number that is unused on both TCP and UDP.
1754    """
1755
1756    def try_bind(port, socket_type, socket_proto):
1757        s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1758        try:
1759            try:
1760                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1761                s.bind(('', port))
1762                return s.getsockname()[1]
1763            except socket.error:
1764                return None
1765        finally:
1766            s.close()
1767
1768    # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1769    # same port over and over. So always try TCP first.
1770    while True:
1771        # Ask the OS for an unused port.
1772        port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1773        # Check if this port is unused on the other protocol.
1774        if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1775            return port
1776
1777
1778def ask(question, auto=False):
1779    """
1780    Raw input with a prompt that emulates logging.
1781
1782    @param question: Question to be asked
1783    @param auto: Whether to return "y" instead of asking the question
1784    """
1785    if auto:
1786        logging.info("%s (y/n) y", question)
1787        return "y"
1788    return raw_input("%s INFO | %s (y/n) " %
1789                     (time.strftime("%H:%M:%S", time.localtime()), question))
1790
1791
1792def rdmsr(address, cpu=0):
1793    """
1794    Reads an x86 MSR from the specified CPU, returns as long integer.
1795    """
1796    with open('/dev/cpu/%s/msr' % cpu, 'r', 0) as fd:
1797        fd.seek(address)
1798        return struct.unpack('=Q', fd.read(8))[0]
1799
1800
1801def wait_for_value(func,
1802                   expected_value=None,
1803                   min_threshold=None,
1804                   max_threshold=None,
1805                   timeout_sec=10):
1806    """
1807    Returns the value of func().  If |expected_value|, |min_threshold|, and
1808    |max_threshold| are not set, returns immediately.
1809
1810    If |expected_value| is set, polls the return value until |expected_value| is
1811    reached, and returns that value.
1812
1813    If either |max_threshold| or |min_threshold| is set, this function will
1814    will repeatedly call func() until the return value reaches or exceeds one of
1815    these thresholds.
1816
1817    Polling will stop after |timeout_sec| regardless of these thresholds.
1818
1819    @param func: function whose return value is to be waited on.
1820    @param expected_value: wait for func to return this value.
1821    @param min_threshold: wait for func value to reach or fall below this value.
1822    @param max_threshold: wait for func value to reach or rise above this value.
1823    @param timeout_sec: Number of seconds to wait before giving up and
1824                        returning whatever value func() last returned.
1825
1826    Return value:
1827        The most recent return value of func().
1828    """
1829    value = None
1830    start_time_sec = time.time()
1831    while True:
1832        value = func()
1833        if (expected_value is None and \
1834            min_threshold is None and \
1835            max_threshold is None) or \
1836           (expected_value is not None and value == expected_value) or \
1837           (min_threshold is not None and value <= min_threshold) or \
1838           (max_threshold is not None and value >= max_threshold):
1839            break
1840
1841        if time.time() - start_time_sec >= timeout_sec:
1842            break
1843        time.sleep(0.1)
1844
1845    return value
1846
1847
1848def wait_for_value_changed(func,
1849                           old_value=None,
1850                           timeout_sec=10):
1851    """
1852    Returns the value of func().
1853
1854    The function polls the return value until it is different from |old_value|,
1855    and returns that value.
1856
1857    Polling will stop after |timeout_sec|.
1858
1859    @param func: function whose return value is to be waited on.
1860    @param old_value: wait for func to return a value different from this.
1861    @param timeout_sec: Number of seconds to wait before giving up and
1862                        returning whatever value func() last returned.
1863
1864    @returns The most recent return value of func().
1865    """
1866    value = None
1867    start_time_sec = time.time()
1868    while True:
1869        value = func()
1870        if value != old_value:
1871            break
1872
1873        if time.time() - start_time_sec >= timeout_sec:
1874            break
1875        time.sleep(0.1)
1876
1877    return value
1878
1879
1880CONFIG = global_config.global_config
1881
1882# Keep checking if the pid is alive every second until the timeout (in seconds)
1883CHECK_PID_IS_ALIVE_TIMEOUT = 6
1884
1885_LOCAL_HOST_LIST = ('localhost', '127.0.0.1')
1886
1887# The default address of a vm gateway.
1888DEFAULT_VM_GATEWAY = '10.0.2.2'
1889
1890# Google Storage bucket URI to store results in.
1891DEFAULT_OFFLOAD_GSURI = CONFIG.get_config_value(
1892        'CROS', 'results_storage_server', default=None)
1893
1894# Default Moblab Ethernet Interface.
1895_MOBLAB_ETH_0 = 'eth0'
1896_MOBLAB_ETH_1 = 'eth1'
1897
1898# A list of subnets that requires dedicated devserver and drone in the same
1899# subnet. Each item is a tuple of (subnet_ip, mask_bits), e.g.,
1900# ('192.168.0.0', 24))
1901RESTRICTED_SUBNETS = []
1902
1903def _setup_restricted_subnets():
1904    restricted_subnets_list = CONFIG.get_config_value(
1905            'CROS', 'restricted_subnets', type=list, default=[])
1906    # TODO(dshi): Remove the code to split subnet with `:` after R51 is
1907    # off stable channel, and update shadow config to use `/` as
1908    # delimiter for consistency.
1909    for subnet in restricted_subnets_list:
1910        ip, mask_bits = subnet.split('/') if '/' in subnet \
1911                        else subnet.split(':')
1912        RESTRICTED_SUBNETS.append((ip, int(mask_bits)))
1913
1914_setup_restricted_subnets()
1915
1916# regex pattern for CLIENT/wireless_ssid_ config. For example, global config
1917# can have following config in CLIENT section to indicate that hosts in subnet
1918# 192.168.0.1/24 should use wireless ssid of `ssid_1`
1919# wireless_ssid_192.168.0.1/24: ssid_1
1920WIRELESS_SSID_PATTERN = 'wireless_ssid_(.*)/(\d+)'
1921
1922
1923def get_moblab_serial_number():
1924    """Gets a unique identifier for the moblab.
1925
1926    Serial number is the prefered identifier, use it if
1927    present, however fallback is the ethernet mac address.
1928    """
1929    for vpd_key in ['serial_number', 'ethernet_mac']:
1930      try:
1931          cmd_result = run('sudo vpd -g %s' % vpd_key)
1932          if cmd_result and cmd_result.stdout:
1933            return cmd_result.stdout
1934      except error.CmdError as e:
1935          logging.error(str(e))
1936          logging.info(vpd_key)
1937    return 'NoSerialNumber'
1938
1939
1940def ping(host, deadline=None, tries=None, timeout=60, user=None):
1941    """Attempt to ping |host|.
1942
1943    Shell out to 'ping' if host is an IPv4 addres or 'ping6' if host is an
1944    IPv6 address to try to reach |host| for |timeout| seconds.
1945    Returns exit code of ping.
1946
1947    Per 'man ping', if you specify BOTH |deadline| and |tries|, ping only
1948    returns 0 if we get responses to |tries| pings within |deadline| seconds.
1949
1950    Specifying |deadline| or |count| alone should return 0 as long as
1951    some packets receive responses.
1952
1953    Note that while this works with literal IPv6 addresses it will not work
1954    with hostnames that resolve to IPv6 only.
1955
1956    @param host: the host to ping.
1957    @param deadline: seconds within which |tries| pings must succeed.
1958    @param tries: number of pings to send.
1959    @param timeout: number of seconds after which to kill 'ping' command.
1960    @return exit code of ping command.
1961    """
1962    args = [host]
1963    cmd = 'ping6' if re.search(r':.*:', host) else 'ping'
1964
1965    if deadline:
1966        args.append('-w%d' % deadline)
1967    if tries:
1968        args.append('-c%d' % tries)
1969
1970    if user != None:
1971        args = [user, '-c', ' '.join([cmd] + args)]
1972        cmd = 'su'
1973
1974    result = run(cmd, args=args, verbose=True,
1975                 ignore_status=True, timeout=timeout,
1976                 stderr_tee=TEE_TO_LOGS)
1977
1978    rc = result.exit_status
1979    lines = result.stdout.splitlines()
1980
1981    # rc=0: host reachable
1982    # rc=1: host unreachable
1983    # other: an error (do not abbreviate)
1984    if rc in (0, 1):
1985        # Report the two stats lines, as a single line.
1986        # [-2]: packets transmitted, 1 received, 0% packet loss, time 0ms
1987        # [-1]: rtt min/avg/max/mdev = 0.497/0.497/0.497/0.000 ms
1988        stats = lines[-2:]
1989        while '' in stats:
1990            stats.remove('')
1991
1992        if stats or len(lines) < 2:
1993            logging.debug('[rc=%s] %s', rc, '; '.join(stats))
1994        else:
1995            logging.debug('[rc=%s] Ping output:\n%s',
1996                          rc, result.stdout)
1997    else:
1998        output = result.stdout.rstrip()
1999        if output:
2000            logging.debug('Unusual ping result (rc=%s):\n%s', rc, output)
2001        else:
2002            logging.debug('Unusual ping result (rc=%s).', rc)
2003    return rc
2004
2005
2006def host_is_in_lab_zone(hostname):
2007    """Check if the host is in the CLIENT.dns_zone.
2008
2009    @param hostname: The hostname to check.
2010    @returns True if hostname.dns_zone resolves, otherwise False.
2011    """
2012    host_parts = hostname.split('.')
2013    dns_zone = CONFIG.get_config_value('CLIENT', 'dns_zone', default=None)
2014    fqdn = '%s.%s' % (host_parts[0], dns_zone)
2015    logging.debug('Checking if host %s is in lab zone.', fqdn)
2016    try:
2017        socket.gethostbyname(fqdn)
2018        return True
2019    except socket.gaierror:
2020        return False
2021
2022
2023def host_is_in_power_lab(hostname):
2024    """Check if the hostname is in power lab.
2025
2026    Example: chromeos1-power-host2.cros
2027
2028    @param hostname: The hostname to check.
2029    @returns True if hostname match power lab hostname, otherwise False.
2030    """
2031    pattern = r'chromeos\d+-power-host\d+(\.cros(\.corp(\.google\.com)?)?)?$'
2032    return re.match(pattern, hostname) is not None
2033
2034
2035def get_power_lab_wlan_hostname(hostname):
2036    """Return wlan hostname for host in power lab.
2037
2038    Example: chromeos1-power-host2.cros -> chromeos1-power-host2-wlan.cros
2039
2040    @param hostname: The hostname in power lab.
2041    @returns wlan hostname.
2042    """
2043    split_host = hostname.split('.')
2044    split_host[0] += '-wlan'
2045    return '.'.join(split_host)
2046
2047
2048def in_moblab_ssp():
2049    """Detects if this execution is inside an SSP container on moblab."""
2050    config_is_moblab = CONFIG.get_config_value('SSP', 'is_moblab', type=bool,
2051                                               default=False)
2052    return is_in_container() and config_is_moblab
2053
2054
2055def get_chrome_version(job_views):
2056    """
2057    Retrieves the version of the chrome binary associated with a job.
2058
2059    When a test runs we query the chrome binary for it's version and drop
2060    that value into a client keyval. To retrieve the chrome version we get all
2061    the views associated with a test from the db, including those of the
2062    server and client jobs, and parse the version out of the first test view
2063    that has it. If we never ran a single test in the suite the job_views
2064    dictionary will not contain a chrome version.
2065
2066    This method cannot retrieve the chrome version from a dictionary that
2067    does not conform to the structure of an autotest tko view.
2068
2069    @param job_views: a list of a job's result views, as returned by
2070                      the get_detailed_test_views method in rpc_interface.
2071    @return: The chrome version string, or None if one can't be found.
2072    """
2073
2074    # Aborted jobs have no views.
2075    if not job_views:
2076        return None
2077
2078    for view in job_views:
2079        if (view.get('attributes')
2080            and constants.CHROME_VERSION in view['attributes'].keys()):
2081
2082            return view['attributes'].get(constants.CHROME_VERSION)
2083
2084    logging.warning('Could not find chrome version for failure.')
2085    return None
2086
2087
2088def get_moblab_id():
2089    """Gets the moblab random id.
2090
2091    The random id file is cached on disk. If it does not exist, a new file is
2092    created the first time.
2093
2094    @returns the moblab random id.
2095    """
2096    moblab_id_filepath = '/home/moblab/.moblab_id'
2097    try:
2098        if os.path.exists(moblab_id_filepath):
2099            with open(moblab_id_filepath, 'r') as moblab_id_file:
2100                random_id = moblab_id_file.read()
2101        else:
2102            random_id = uuid.uuid1().hex
2103            with open(moblab_id_filepath, 'w') as moblab_id_file:
2104                moblab_id_file.write('%s' % random_id)
2105    except IOError as e:
2106        # Possible race condition, another process has created the file.
2107        # Sleep a second to make sure the file gets closed.
2108        logging.info(e)
2109        time.sleep(1)
2110        with open(moblab_id_filepath, 'r') as moblab_id_file:
2111            random_id = moblab_id_file.read()
2112    return random_id
2113
2114
2115def get_offload_gsuri():
2116    """Return the GSURI to offload test results to.
2117
2118    For the normal use case this is the results_storage_server in the
2119    global_config.
2120
2121    However partners using Moblab will be offloading their results to a
2122    subdirectory of their image storage buckets. The subdirectory is
2123    determined by the MAC Address of the Moblab device.
2124
2125    @returns gsuri to offload test results to.
2126    """
2127    # For non-moblab, use results_storage_server or default.
2128    if not is_moblab():  # pylint: disable=undefined-variable
2129        return DEFAULT_OFFLOAD_GSURI
2130
2131    # For moblab, use results_storage_server or image_storage_server as bucket
2132    # name and mac-address/moblab_id as path.
2133    gsuri = DEFAULT_OFFLOAD_GSURI
2134    if not gsuri:
2135        gsuri = "%sresults/" % CONFIG.get_config_value('CROS',
2136                                                       'image_storage_server')
2137
2138    return '%s%s/%s/' % (gsuri, get_moblab_serial_number(), get_moblab_id())
2139
2140
2141# TODO(petermayo): crosbug.com/31826 Share this with _GsUpload in
2142# //chromite.git/buildbot/prebuilt.py somewhere/somehow
2143def gs_upload(local_file, remote_file, acl, result_dir=None,
2144              transfer_timeout=300, acl_timeout=300):
2145    """Upload to GS bucket.
2146
2147    @param local_file: Local file to upload
2148    @param remote_file: Remote location to upload the local_file to.
2149    @param acl: name or file used for controlling access to the uploaded
2150                file.
2151    @param result_dir: Result directory if you want to add tracing to the
2152                       upload.
2153    @param transfer_timeout: Timeout for this upload call.
2154    @param acl_timeout: Timeout for the acl call needed to confirm that
2155                        the uploader has permissions to execute the upload.
2156
2157    @raise CmdError: the exit code of the gsutil call was not 0.
2158
2159    @returns True/False - depending on if the upload succeeded or failed.
2160    """
2161    # https://developers.google.com/storage/docs/accesscontrol#extension
2162    CANNED_ACLS = ['project-private', 'private', 'public-read',
2163                   'public-read-write', 'authenticated-read',
2164                   'bucket-owner-read', 'bucket-owner-full-control']
2165    _GSUTIL_BIN = 'gsutil'
2166    acl_cmd = None
2167    if acl in CANNED_ACLS:
2168        cmd = '%s cp -a %s %s %s' % (_GSUTIL_BIN, acl, local_file, remote_file)
2169    else:
2170        # For private uploads we assume that the overlay board is set up
2171        # properly and a googlestore_acl.xml is present, if not this script
2172        # errors
2173        cmd = '%s cp -a private %s %s' % (_GSUTIL_BIN, local_file, remote_file)
2174        if not os.path.exists(acl):
2175            logging.error('Unable to find ACL File %s.', acl)
2176            return False
2177        acl_cmd = '%s setacl %s %s' % (_GSUTIL_BIN, acl, remote_file)
2178    if not result_dir:
2179        run(cmd, timeout=transfer_timeout, verbose=True)
2180        if acl_cmd:
2181            run(acl_cmd, timeout=acl_timeout, verbose=True)
2182        return True
2183    with open(os.path.join(result_dir, 'tracing'), 'w') as ftrace:
2184        ftrace.write('Preamble\n')
2185        run(cmd, timeout=transfer_timeout, verbose=True,
2186                       stdout_tee=ftrace, stderr_tee=ftrace)
2187        if acl_cmd:
2188            ftrace.write('\nACL setting\n')
2189            # Apply the passed in ACL xml file to the uploaded object.
2190            run(acl_cmd, timeout=acl_timeout, verbose=True,
2191                           stdout_tee=ftrace, stderr_tee=ftrace)
2192        ftrace.write('Postamble\n')
2193        return True
2194
2195
2196def gs_ls(uri_pattern):
2197    """Returns a list of URIs that match a given pattern.
2198
2199    @param uri_pattern: a GS URI pattern, may contain wildcards
2200
2201    @return A list of URIs matching the given pattern.
2202
2203    @raise CmdError: the gsutil command failed.
2204
2205    """
2206    gs_cmd = ' '.join(['gsutil', 'ls', uri_pattern])
2207    result = system_output(gs_cmd).splitlines()
2208    return [path.rstrip() for path in result if path]
2209
2210
2211def nuke_pids(pid_list, signal_queue=None):
2212    """
2213    Given a list of pid's, kill them via an esclating series of signals.
2214
2215    @param pid_list: List of PID's to kill.
2216    @param signal_queue: Queue of signals to send the PID's to terminate them.
2217
2218    @return: A mapping of the signal name to the number of processes it
2219        was sent to.
2220    """
2221    if signal_queue is None:
2222        signal_queue = [signal.SIGTERM, signal.SIGKILL]
2223    sig_count = {}
2224    # Though this is slightly hacky it beats hardcoding names anyday.
2225    sig_names = dict((k, v) for v, k in signal.__dict__.iteritems()
2226                     if v.startswith('SIG'))
2227    for sig in signal_queue:
2228        logging.debug('Sending signal %s to the following pids:', sig)
2229        sig_count[sig_names.get(sig, 'unknown_signal')] = len(pid_list)
2230        for pid in pid_list:
2231            logging.debug('Pid %d', pid)
2232            try:
2233                os.kill(pid, sig)
2234            except OSError:
2235                # The process may have died from a previous signal before we
2236                # could kill it.
2237                pass
2238        if sig == signal.SIGKILL:
2239            return sig_count
2240        pid_list = [pid for pid in pid_list if pid_is_alive(pid)]
2241        if not pid_list:
2242            break
2243        time.sleep(CHECK_PID_IS_ALIVE_TIMEOUT)
2244    failed_list = []
2245    for pid in pid_list:
2246        if pid_is_alive(pid):
2247            failed_list.append('Could not kill %d for process name: %s.' % pid,
2248                               get_process_name(pid))
2249    if failed_list:
2250        raise error.AutoservRunError('Following errors occured: %s' %
2251                                     failed_list, None)
2252    return sig_count
2253
2254
2255def externalize_host(host):
2256    """Returns an externally accessible host name.
2257
2258    @param host: a host name or address (string)
2259
2260    @return An externally visible host name or address
2261
2262    """
2263    return socket.gethostname() if host in _LOCAL_HOST_LIST else host
2264
2265
2266def urlopen_socket_timeout(url, data=None, timeout=5):
2267    """
2268    Wrapper to urllib2.urlopen with a socket timeout.
2269
2270    This method will convert all socket timeouts to
2271    TimeoutExceptions, so we can use it in conjunction
2272    with the rpc retry decorator and continue to handle
2273    other URLErrors as we see fit.
2274
2275    @param url: The url to open.
2276    @param data: The data to send to the url (eg: the urlencoded dictionary
2277                 used with a POST call).
2278    @param timeout: The timeout for this urlopen call.
2279
2280    @return: The response of the urlopen call.
2281
2282    @raises: error.TimeoutException when a socket timeout occurs.
2283             urllib2.URLError for errors that not caused by timeout.
2284             urllib2.HTTPError for errors like 404 url not found.
2285    """
2286    old_timeout = socket.getdefaulttimeout()
2287    socket.setdefaulttimeout(timeout)
2288    try:
2289        return urllib2.urlopen(url, data=data)
2290    except urllib2.URLError as e:
2291        if type(e.reason) is socket.timeout:
2292            raise error.TimeoutException(str(e))
2293        raise
2294    finally:
2295        socket.setdefaulttimeout(old_timeout)
2296
2297
2298def parse_chrome_version(version_string):
2299    """
2300    Parse a chrome version string and return version and milestone.
2301
2302    Given a chrome version of the form "W.X.Y.Z", return "W.X.Y.Z" as
2303    the version and "W" as the milestone.
2304
2305    @param version_string: Chrome version string.
2306    @return: a tuple (chrome_version, milestone). If the incoming version
2307             string is not of the form "W.X.Y.Z", chrome_version will
2308             be set to the incoming "version_string" argument and the
2309             milestone will be set to the empty string.
2310    """
2311    match = re.search('(\d+)\.\d+\.\d+\.\d+', version_string)
2312    ver = match.group(0) if match else version_string
2313    milestone = match.group(1) if match else ''
2314    return ver, milestone
2315
2316
2317def parse_gs_uri_version(uri):
2318    """Pull out major.minor.sub from image URI
2319
2320    @param uri: A GS URI for a bucket containing ChromeOS build artifacts
2321    @return: The build version as a string in the form 'major.minor.sub'
2322
2323    """
2324    return re.sub('.*(R[0-9]+|LATEST)-', '', uri).strip('/')
2325
2326
2327def compare_gs_uri_build_versions(x, y):
2328    """Compares two bucket URIs by their version string
2329
2330    @param x: A GS URI for a bucket containing ChromeOS build artifacts
2331    @param y: Another GS URI for a bucket containing ChromeOS build artifacts
2332    @return: 1 if x > y, -1 if x < y, and 0 if x == y
2333
2334    """
2335    # Converts a gs uri 'gs://.../R75-<major>.<minor>.<sub>' to
2336    # [major, minor, sub]
2337    split_version = lambda v: [int(x) for x in
2338                               parse_gs_uri_version(v).split('.')]
2339
2340    x_version = split_version(x)
2341    y_version = split_version(y)
2342
2343    for a, b in zip(x_version, y_version):
2344        if a > b:
2345            return 1
2346        elif b > a:
2347            return -1
2348
2349    return 0
2350
2351
2352def is_localhost(server):
2353    """Check if server is equivalent to localhost.
2354
2355    @param server: Name of the server to check.
2356
2357    @return: True if given server is equivalent to localhost.
2358
2359    @raise socket.gaierror: If server name failed to be resolved.
2360    """
2361    if server in _LOCAL_HOST_LIST:
2362        return True
2363    try:
2364        return (socket.gethostbyname(socket.gethostname()) ==
2365                socket.gethostbyname(server))
2366    except socket.gaierror:
2367        logging.error('Failed to resolve server name %s.', server)
2368        return False
2369
2370
2371def get_function_arg_value(func, arg_name, args, kwargs):
2372    """Get the value of the given argument for the function.
2373
2374    @param func: Function being called with given arguments.
2375    @param arg_name: Name of the argument to look for value.
2376    @param args: arguments for function to be called.
2377    @param kwargs: keyword arguments for function to be called.
2378
2379    @return: The value of the given argument for the function.
2380
2381    @raise ValueError: If the argument is not listed function arguemnts.
2382    @raise KeyError: If no value is found for the given argument.
2383    """
2384    if arg_name in kwargs:
2385        return kwargs[arg_name]
2386
2387    argspec = inspect.getargspec(func)
2388    index = argspec.args.index(arg_name)
2389    try:
2390        return args[index]
2391    except IndexError:
2392        try:
2393            # The argument can use a default value. Reverse the default value
2394            # so argument with default value can be counted from the last to
2395            # the first.
2396            return argspec.defaults[::-1][len(argspec.args) - index - 1]
2397        except IndexError:
2398            raise KeyError('Argument %s is not given a value. argspec: %s, '
2399                           'args:%s, kwargs:%s' %
2400                           (arg_name, argspec, args, kwargs))
2401
2402
2403def has_systemd():
2404    """Check if the host is running systemd.
2405
2406    @return: True if the host uses systemd, otherwise returns False.
2407    """
2408    return os.path.basename(os.readlink('/proc/1/exe')) == 'systemd'
2409
2410
2411def get_real_user():
2412    """Get the real user that runs the script.
2413
2414    The function check environment variable SUDO_USER for the user if the
2415    script is run with sudo. Otherwise, it returns the value of environment
2416    variable USER.
2417
2418    @return: The user name that runs the script.
2419
2420    """
2421    user = os.environ.get('SUDO_USER')
2422    if not user:
2423        user = os.environ.get('USER')
2424    return user
2425
2426
2427def get_service_pid(service_name):
2428    """Return pid of service.
2429
2430    @param service_name: string name of service.
2431
2432    @return: pid or 0 if service is not running.
2433    """
2434    if has_systemd():
2435        # systemctl show prints 'MainPID=0' if the service is not running.
2436        cmd_result = run('systemctl show -p MainPID %s' %
2437                                    service_name, ignore_status=True)
2438        return int(cmd_result.stdout.split('=')[1])
2439    else:
2440        cmd_result = run('status %s' % service_name,
2441                                        ignore_status=True)
2442        if 'start/running' in cmd_result.stdout:
2443            return int(cmd_result.stdout.split()[3])
2444        return 0
2445
2446
2447def control_service(service_name, action='start', ignore_status=True):
2448    """Controls a service. It can be used to start, stop or restart
2449    a service.
2450
2451    @param service_name: string service to be restarted.
2452
2453    @param action: string choice of action to control command.
2454
2455    @param ignore_status: boolean ignore if system command fails.
2456
2457    @return: status code of the executed command.
2458    """
2459    if action not in ('start', 'stop', 'restart'):
2460        raise ValueError('Unknown action supplied as parameter.')
2461
2462    control_cmd = action + ' ' + service_name
2463    if has_systemd():
2464        control_cmd = 'systemctl ' + control_cmd
2465    return system(control_cmd, ignore_status=ignore_status)
2466
2467
2468def restart_service(service_name, ignore_status=True):
2469    """Restarts a service
2470
2471    @param service_name: string service to be restarted.
2472
2473    @param ignore_status: boolean ignore if system command fails.
2474
2475    @return: status code of the executed command.
2476    """
2477    return control_service(service_name, action='restart',
2478                           ignore_status=ignore_status)
2479
2480
2481def start_service(service_name, ignore_status=True):
2482    """Starts a service
2483
2484    @param service_name: string service to be started.
2485
2486    @param ignore_status: boolean ignore if system command fails.
2487
2488    @return: status code of the executed command.
2489    """
2490    return control_service(service_name, action='start',
2491                           ignore_status=ignore_status)
2492
2493
2494def stop_service(service_name, ignore_status=True):
2495    """Stops a service
2496
2497    @param service_name: string service to be stopped.
2498
2499    @param ignore_status: boolean ignore if system command fails.
2500
2501    @return: status code of the executed command.
2502    """
2503    return control_service(service_name, action='stop',
2504                           ignore_status=ignore_status)
2505
2506
2507def sudo_require_password():
2508    """Test if the process can run sudo command without using password.
2509
2510    @return: True if the process needs password to run sudo command.
2511
2512    """
2513    try:
2514        run('sudo -n true')
2515        return False
2516    except error.CmdError:
2517        logging.warn('sudo command requires password.')
2518        return True
2519
2520
2521def is_in_container():
2522    """Check if the process is running inside a container.
2523
2524    @return: True if the process is running inside a container, otherwise False.
2525    """
2526    result = run('grep -q "/lxc/" /proc/1/cgroup',
2527                            verbose=False, ignore_status=True)
2528    if result.exit_status == 0:
2529        return True
2530
2531    # Check "container" environment variable for lxd/lxc containers.
2532    if os.environ.get('container') == 'lxc':
2533        return True
2534
2535    return False
2536
2537
2538def is_flash_installed():
2539    """
2540    The Adobe Flash binary is only distributed with internal builds.
2541    """
2542    return (os.path.exists('/opt/google/chrome/pepper/libpepflashplayer.so')
2543        and os.path.exists('/opt/google/chrome/pepper/pepper-flash.info'))
2544
2545
2546def verify_flash_installed():
2547    """
2548    The Adobe Flash binary is only distributed with internal builds.
2549    Warn users of public builds of the extra dependency.
2550    """
2551    if not is_flash_installed():
2552        raise error.TestNAError('No Adobe Flash binary installed.')
2553
2554
2555def is_in_same_subnet(ip_1, ip_2, mask_bits=24):
2556    """Check if two IP addresses are in the same subnet with given mask bits.
2557
2558    The two IP addresses are string of IPv4, e.g., '192.168.0.3'.
2559
2560    @param ip_1: First IP address to compare.
2561    @param ip_2: Second IP address to compare.
2562    @param mask_bits: Number of mask bits for subnet comparison. Default to 24.
2563
2564    @return: True if the two IP addresses are in the same subnet.
2565
2566    """
2567    mask = ((2L<<mask_bits-1) -1)<<(32-mask_bits)
2568    ip_1_num = struct.unpack('!I', socket.inet_aton(ip_1))[0]
2569    ip_2_num = struct.unpack('!I', socket.inet_aton(ip_2))[0]
2570    return ip_1_num & mask == ip_2_num & mask
2571
2572
2573def get_ip_address(hostname=None):
2574    """Get the IP address of given hostname or current machine.
2575
2576    @param hostname: Hostname of a DUT, default value is None.
2577
2578    @return: The IP address of given hostname. If hostname is not given then
2579             we'll try to query the IP address of the current machine and
2580             return.
2581    """
2582    if hostname:
2583        try:
2584            return socket.gethostbyname(hostname)
2585        except socket.gaierror as e:
2586            logging.error(
2587                'Failed to get IP address of %s, error: %s.', hostname, e)
2588    else:
2589        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
2590        s.connect(("8.8.8.8", 80))
2591        ip = s.getsockname()[0]
2592        s.close()
2593        return ip
2594
2595
2596def get_servers_in_same_subnet(host_ip, mask_bits, servers=None,
2597                               server_ip_map=None):
2598    """Get the servers in the same subnet of the given host ip.
2599
2600    @param host_ip: The IP address of a dut to look for devserver.
2601    @param mask_bits: Number of mask bits.
2602    @param servers: A list of servers to be filtered by subnet specified by
2603                    host_ip and mask_bits.
2604    @param server_ip_map: A map between the server name and its IP address.
2605            The map can be pre-built for better performance, e.g., when
2606            allocating a drone for an agent task.
2607
2608    @return: A list of servers in the same subnet of the given host ip.
2609
2610    """
2611    matched_servers = []
2612    if not servers and not server_ip_map:
2613        raise ValueError('Either `servers` or `server_ip_map` must be given.')
2614    if not servers:
2615        servers = server_ip_map.keys()
2616    # Make sure server_ip_map is an empty dict if it's not set.
2617    if not server_ip_map:
2618        server_ip_map = {}
2619    for server in servers:
2620        server_ip = server_ip_map.get(server, get_ip_address(server))
2621        if server_ip and is_in_same_subnet(server_ip, host_ip, mask_bits):
2622            matched_servers.append(server)
2623    return matched_servers
2624
2625
2626def get_restricted_subnet(hostname, restricted_subnets=None):
2627    """Get the restricted subnet of given hostname.
2628
2629    @param hostname: Name of the host to look for matched restricted subnet.
2630    @param restricted_subnets: A list of restricted subnets, default is set to
2631            RESTRICTED_SUBNETS.
2632
2633    @return: A tuple of (subnet_ip, mask_bits), which defines a restricted
2634             subnet.
2635    """
2636    if restricted_subnets is None:
2637        restricted_subnets=RESTRICTED_SUBNETS
2638    host_ip = get_ip_address(hostname)
2639    if not host_ip:
2640        return
2641    for subnet_ip, mask_bits in restricted_subnets:
2642        if is_in_same_subnet(subnet_ip, host_ip, mask_bits):
2643            return subnet_ip, mask_bits
2644
2645
2646def get_wireless_ssid(hostname):
2647    """Get the wireless ssid based on given hostname.
2648
2649    The method tries to locate the wireless ssid in the same subnet of given
2650    hostname first. If none is found, it returns the default setting in
2651    CLIENT/wireless_ssid.
2652
2653    @param hostname: Hostname of the test device.
2654
2655    @return: wireless ssid for the test device.
2656    """
2657    default_ssid = CONFIG.get_config_value('CLIENT', 'wireless_ssid',
2658                                           default=None)
2659    host_ip = get_ip_address(hostname)
2660    if not host_ip:
2661        return default_ssid
2662
2663    # Get all wireless ssid in the global config.
2664    ssids = CONFIG.get_config_value_regex('CLIENT', WIRELESS_SSID_PATTERN)
2665
2666    # There could be multiple subnet matches, pick the one with most strict
2667    # match, i.e., the one with highest maskbit.
2668    matched_ssid = default_ssid
2669    matched_maskbit = -1
2670    for key, value in ssids.items():
2671        # The config key filtered by regex WIRELESS_SSID_PATTERN has a format of
2672        # wireless_ssid_[subnet_ip]/[maskbit], for example:
2673        # wireless_ssid_192.168.0.1/24
2674        # Following line extract the subnet ip and mask bit from the key name.
2675        match = re.match(WIRELESS_SSID_PATTERN, key)
2676        subnet_ip, maskbit = match.groups()
2677        maskbit = int(maskbit)
2678        if (is_in_same_subnet(subnet_ip, host_ip, maskbit) and
2679            maskbit > matched_maskbit):
2680            matched_ssid = value
2681            matched_maskbit = maskbit
2682    return matched_ssid
2683
2684
2685def parse_launch_control_build(build_name):
2686    """Get branch, target, build_id from the given Launch Control build_name.
2687
2688    @param build_name: Name of a Launch Control build, should be formated as
2689                       branch/target/build_id
2690
2691    @return: Tuple of branch, target, build_id
2692    @raise ValueError: If the build_name is not correctly formated.
2693    """
2694    branch, target, build_id = build_name.split('/')
2695    return branch, target, build_id
2696
2697
2698def parse_android_target(target):
2699    """Get board and build type from the given target.
2700
2701    @param target: Name of an Android build target, e.g., shamu-eng.
2702
2703    @return: Tuple of board, build_type
2704    @raise ValueError: If the target is not correctly formated.
2705    """
2706    board, build_type = target.split('-')
2707    return board, build_type
2708
2709
2710def parse_launch_control_target(target):
2711    """Parse the build target and type from a Launch Control target.
2712
2713    The Launch Control target has the format of build_target-build_type, e.g.,
2714    shamu-eng or dragonboard-userdebug. This method extracts the build target
2715    and type from the target name.
2716
2717    @param target: Name of a Launch Control target, e.g., shamu-eng.
2718
2719    @return: (build_target, build_type), e.g., ('shamu', 'userdebug')
2720    """
2721    match = re.match('(?P<build_target>.+)-(?P<build_type>[^-]+)', target)
2722    if match:
2723        return match.group('build_target'), match.group('build_type')
2724    else:
2725        return None, None
2726
2727
2728def is_launch_control_build(build):
2729    """Check if a given build is a Launch Control build.
2730
2731    @param build: Name of a build, e.g.,
2732                  ChromeOS build: daisy-release/R50-1234.0.0
2733                  Launch Control build: git_mnc_release/shamu-eng
2734
2735    @return: True if the build name matches the pattern of a Launch Control
2736             build, False otherwise.
2737    """
2738    try:
2739        _, target, _ = parse_launch_control_build(build)
2740        build_target, _ = parse_launch_control_target(target)
2741        if build_target:
2742            return True
2743    except ValueError:
2744        # parse_launch_control_build or parse_launch_control_target failed.
2745        pass
2746    return False
2747
2748
2749def which(exec_file):
2750    """Finds an executable file.
2751
2752    If the file name contains a path component, it is checked as-is.
2753    Otherwise, we check with each of the path components found in the system
2754    PATH prepended. This behavior is similar to the 'which' command-line tool.
2755
2756    @param exec_file: Name or path to desired executable.
2757
2758    @return: An actual path to the executable, or None if not found.
2759    """
2760    if os.path.dirname(exec_file):
2761        return exec_file if os.access(exec_file, os.X_OK) else None
2762    sys_path = os.environ.get('PATH')
2763    prefix_list = sys_path.split(os.pathsep) if sys_path else []
2764    for prefix in prefix_list:
2765        path = os.path.join(prefix, exec_file)
2766        if os.access(path, os.X_OK):
2767            return path
2768
2769
2770class TimeoutError(error.TestError):
2771    """Error raised when poll_for_condition() failed to poll within time.
2772
2773    It may embed a reason (either a string or an exception object) so that
2774    the caller of poll_for_condition() can handle failure better.
2775    """
2776
2777    def __init__(self, message=None, reason=None):
2778        """Constructor.
2779
2780        It supports three invocations:
2781        1) TimeoutError()
2782        2) TimeoutError(message): with customized message.
2783        3) TimeoutError(message, reason): with message and reason for timeout.
2784        """
2785        self.reason = reason
2786        if self.reason:
2787            reason_str = 'Reason: ' + repr(self.reason)
2788            if message:
2789                message += '. ' + reason_str
2790            else:
2791                message = reason_str
2792
2793        if message:
2794            super(TimeoutError, self).__init__(message)
2795        else:
2796            super(TimeoutError, self).__init__()
2797
2798
2799class Timer(object):
2800    """A synchronous timer to evaluate if timout is reached.
2801
2802    Usage:
2803      timer = Timer(timeout_sec)
2804      while timer.sleep(sleep_interval):
2805        # do something...
2806    """
2807    def __init__(self, timeout):
2808        """Constructor.
2809
2810        Note that timer won't start until next() is called.
2811
2812        @param timeout: timer timeout in seconds.
2813        """
2814        self.timeout = timeout
2815        self.deadline = 0
2816
2817    def sleep(self, interval):
2818        """Checks if it has sufficient time to sleep; sleeps if so.
2819
2820        It blocks for |interval| seconds if it has time to sleep.
2821        If timer is not ticked yet, kicks it off and returns True without
2822        sleep.
2823
2824        @param interval: sleep interval in seconds.
2825        @return True if it has sleeped or just kicked off the timer. False
2826                otherwise.
2827        """
2828        now = time.time()
2829        if not self.deadline:
2830            self.deadline = now + self.timeout
2831            return True
2832        if now + interval < self.deadline:
2833            time.sleep(interval)
2834            return True
2835        return False
2836
2837
2838def poll_for_condition(condition,
2839                       exception=None,
2840                       timeout=10,
2841                       sleep_interval=0.1,
2842                       desc=None):
2843    """Polls until a condition is evaluated to true.
2844
2845    @param condition: function taking no args and returning anything that will
2846                      evaluate to True in a conditional check
2847    @param exception: exception to throw if condition doesn't evaluate to true
2848    @param timeout: maximum number of seconds to wait
2849    @param sleep_interval: time to sleep between polls
2850    @param desc: description of default TimeoutError used if 'exception' is
2851                 None
2852
2853    @return The evaluated value that caused the poll loop to terminate.
2854
2855    @raise 'exception' arg if supplied; TimeoutError otherwise
2856    """
2857    start_time = time.time()
2858    while True:
2859        value = condition()
2860        if value:
2861            return value
2862        if time.time() + sleep_interval - start_time > timeout:
2863            if exception:
2864                logging.error('Will raise error %r due to unexpected return: '
2865                              '%r', exception, value)
2866                raise exception # pylint: disable=raising-bad-type
2867
2868            if desc:
2869                desc = 'Timed out waiting for condition: ' + desc
2870            else:
2871                desc = 'Timed out waiting for unnamed condition'
2872            logging.error(desc)
2873            raise TimeoutError(message=desc)
2874
2875        time.sleep(sleep_interval)
2876
2877
2878def poll_for_condition_ex(condition, timeout=10, sleep_interval=0.1, desc=None):
2879    """Polls until a condition is evaluated to true or until timeout.
2880
2881    Similiar to poll_for_condition, except that it handles exceptions
2882    condition() raises. If timeout is not reached, the exception is dropped and
2883    poll for condition after a sleep; otherwise, the exception is embedded into
2884    TimeoutError to raise.
2885
2886    @param condition: function taking no args and returning anything that will
2887                      evaluate to True in a conditional check
2888    @param timeout: maximum number of seconds to wait
2889    @param sleep_interval: time to sleep between polls
2890    @param desc: description of the condition
2891
2892    @return The evaluated value that caused the poll loop to terminate.
2893
2894    @raise TimeoutError. If condition() raised exception, it is embedded in
2895           raised TimeoutError.
2896    """
2897    timer = Timer(timeout)
2898    while timer.sleep(sleep_interval):
2899        reason = None
2900        try:
2901            value = condition()
2902            if value:
2903                return value
2904        except BaseException as e:
2905            reason = e
2906
2907    if desc is None:
2908        desc = 'unamed condition'
2909    if reason is None:
2910        reason = 'condition evaluted as false'
2911    to_raise = TimeoutError(message='Timed out waiting for ' + desc,
2912                            reason=reason)
2913    logging.error(str(to_raise))
2914    raise to_raise
2915
2916
2917def poll_till_condition_holds(condition,
2918                              exception=None,
2919                              timeout=10,
2920                              sleep_interval=0.1,
2921                              hold_interval=5,
2922                              desc=None):
2923    """Polls until a condition is evaluated to true for a period of time
2924
2925    This function checks that a condition remains true for the 'hold_interval'
2926    seconds after it first becomes true. If the condition becomes false
2927    subsequently, the timer is reset. This function will not detect if
2928    condition becomes false for any period of time less than the sleep_interval.
2929
2930    @param condition: function taking no args and returning anything that will
2931                      evaluate to True in a conditional check
2932    @param exception: exception to throw if condition doesn't evaluate to true
2933    @param timeout: maximum number of seconds to wait
2934    @param sleep_interval: time to sleep between polls
2935    @param hold_interval: time period for which the condition should hold true
2936    @param desc: description of default TimeoutError used if 'exception' is
2937                 None
2938
2939    @return The evaluated value that caused the poll loop to terminate.
2940
2941    @raise 'exception' arg if supplied; TimeoutError otherwise
2942    """
2943    start_time = time.time()
2944    cond_is_held = False
2945    cond_hold_start_time = None
2946
2947    while True:
2948        value = condition()
2949        if value:
2950            if cond_is_held:
2951                if time.time() - cond_hold_start_time > hold_interval:
2952                    return value
2953            else:
2954                cond_is_held = True
2955                cond_hold_start_time = time.time()
2956        else:
2957            cond_is_held = False
2958
2959        time_remaining = timeout - (time.time() - start_time)
2960        if time_remaining < hold_interval:
2961            if exception:
2962                logging.error('Will raise error %r due to unexpected return: '
2963                              '%r', exception, value)
2964                raise exception # pylint: disable=raising-bad-type
2965
2966            if desc:
2967                desc = 'Timed out waiting for condition: ' + desc
2968            else:
2969                desc = 'Timed out waiting for unnamed condition'
2970            logging.error(desc)
2971            raise TimeoutError(message=desc)
2972
2973        time.sleep(sleep_interval)
2974
2975
2976def shadowroot_query(element, action):
2977    """Recursively queries shadowRoot.
2978
2979    @param element: element to query for.
2980    @param action: action to be performed on the element.
2981
2982    @return JS functions to execute.
2983
2984    """
2985    # /deep/ CSS query has been removed from ShadowDOM. The only way to access
2986    # elements now is to recursively query in each shadowRoot.
2987    shadowroot_script = """
2988    function deepQuerySelectorAll(root, targetQuery) {
2989        const elems = Array.prototype.slice.call(
2990            root.querySelectorAll(targetQuery[0]));
2991        const remaining = targetQuery.slice(1);
2992        if (remaining.length === 0) {
2993            return elems;
2994        }
2995
2996        let res = [];
2997        for (let i = 0; i < elems.length; i++) {
2998            if (elems[i].shadowRoot) {
2999                res = res.concat(
3000                    deepQuerySelectorAll(elems[i].shadowRoot, remaining));
3001            }
3002        }
3003        return res;
3004    };
3005    var testing_element = deepQuerySelectorAll(document, %s);
3006    testing_element[0].%s;
3007    """
3008    script_to_execute = shadowroot_script % (element, action)
3009    return script_to_execute
3010
3011
3012def threaded_return(function):
3013    """
3014    Decorator to add to a function to get that function to return a thread
3015    object, but with the added benefit of storing its return value.
3016
3017    @param function: function object to be run in the thread
3018
3019    @return a threading.Thread object, that has already been started, is
3020            recording its result, and can be completed and its result
3021            fetched by calling .finish()
3022    """
3023    def wrapped_t(queue, *args, **kwargs):
3024        """
3025        Calls the decorated function as normal, but appends the output into
3026        the passed-in threadsafe queue.
3027        """
3028        ret = function(*args, **kwargs)
3029        queue.put(ret)
3030
3031    def wrapped_finish(threaded_object):
3032        """
3033        Provides a utility to this thread object, getting its result while
3034        simultaneously joining the thread.
3035        """
3036        ret = threaded_object.get()
3037        threaded_object.join()
3038        return ret
3039
3040    def wrapper(*args, **kwargs):
3041        """
3042        Creates the queue and starts the thread, then assigns extra attributes
3043        to the thread to give it result-storing capability.
3044        """
3045        q = Queue.Queue()
3046        t = threading.Thread(target=wrapped_t, args=(q,) + args, kwargs=kwargs)
3047        t.start()
3048        t.result_queue = q
3049        t.get = t.result_queue.get
3050        t.finish = lambda: wrapped_finish(t)
3051        return t
3052
3053    # for the decorator
3054    return wrapper
3055
3056
3057@threaded_return
3058def background_sample_until_condition(
3059        function,
3060        condition=lambda: True,
3061        timeout=10,
3062        sleep_interval=1):
3063    """
3064    Records the value of the function until the condition is False or the
3065    timeout is reached. Runs as a background thread, so it's nonblocking.
3066    Usage might look something like:
3067
3068    def function():
3069        return get_value()
3070    def condition():
3071        return self._keep_sampling
3072
3073    # main thread
3074    sample_thread = utils.background_sample_until_condition(
3075        function=function,condition=condition)
3076    # do other work
3077    # ...
3078    self._keep_sampling = False
3079    # blocking call to get result and join the thread
3080    result = sample_thread.finish()
3081
3082    @param function: function object, 0 args, to be continually polled
3083    @param condition: function object, 0 args, to say when to stop polling
3084    @param timeout: maximum number of seconds to wait
3085    @param number of seconds to wait in between polls
3086
3087    @return a thread object that has already been started and is running in
3088            the background, whose run must be stopped with .finish(), which
3089            also returns a list of the results from the sample function
3090    """
3091    log = []
3092
3093    end_time = datetime.datetime.now() + datetime.timedelta(
3094            seconds = timeout + sleep_interval)
3095
3096    while condition() and datetime.datetime.now() < end_time:
3097        log.append(function())
3098        time.sleep(sleep_interval)
3099    return log
3100
3101
3102class metrics_mock(metrics_mock_class.mock_class_base):
3103    """mock class for metrics in case chromite is not installed."""
3104    pass
3105
3106
3107MountInfo = collections.namedtuple('MountInfo', ['root', 'mount_point', 'tags'])
3108
3109
3110def get_mount_info(process='self', mount_point=None):
3111    """Retrieves information about currently mounted file systems.
3112
3113    @param mount_point: (optional) The mount point (a path).  If this is
3114                        provided, only information about the given mount point
3115                        is returned.  If this is omitted, info about all mount
3116                        points is returned.
3117    @param process: (optional) The process id (or the string 'self') of the
3118                    process whose mountinfo will be obtained.  If this is
3119                    omitted, info about the current process is returned.
3120
3121    @return A generator yielding one MountInfo object for each relevant mount
3122            found in /proc/PID/mountinfo.
3123    """
3124    with open('/proc/{}/mountinfo'.format(process)) as f:
3125        for line in f.readlines():
3126            # These lines are formatted according to the proc(5) manpage.
3127            # Sample line:
3128            # 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root \
3129            #     rw,errors=continue
3130            # Fields (descriptions omitted for fields we don't care about)
3131            # 3: the root of the mount.
3132            # 4: the mount point.
3133            # 5: mount options.
3134            # 6: tags.  There can be more than one of these.  This is where
3135            #    shared mounts are indicated.
3136            # 7: a dash separator marking the end of the tags.
3137            mountinfo = line.split()
3138            if mount_point is None or mountinfo[4] == mount_point:
3139                tags = []
3140                for field in mountinfo[6:]:
3141                    if field == '-':
3142                        break
3143                    tags.append(field.split(':')[0])
3144                yield MountInfo(root = mountinfo[3],
3145                                mount_point = mountinfo[4],
3146                                tags = tags)
3147
3148
3149# Appended suffix for chart tablet naming convention in test lab
3150CHART_ADDRESS_SUFFIX = '-tablet'
3151
3152
3153def get_lab_chart_address(hostname):
3154    """Convert lab DUT hostname to address of camera box chart tablet"""
3155    return hostname + CHART_ADDRESS_SUFFIX if is_in_container() else None
3156