• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Lint as: python2, python3
2# Copyright (c) 2017 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""
7Convenience functions for use by tests or whomever.
8
9There's no really good way to do this, as this isn't a class we can do
10inheritance with, just a collection of static methods.
11"""
12
13# pylint: disable=missing-docstring
14
15from __future__ import absolute_import
16from __future__ import division
17from __future__ import print_function
18
19import collections
20import datetime
21import errno
22import inspect
23import itertools
24import logging
25import os
26import pickle
27import random
28import re
29import resource
30import select
31import shutil
32import signal
33import socket
34import six
35from six.moves import input
36from six.moves import range
37from six.moves import urllib
38from six.moves import zip
39from six.moves import zip_longest
40import six.moves.urllib.parse
41import string
42import struct
43import subprocess
44import textwrap
45import threading
46import time
47import six.moves.queue
48import uuid
49import warnings
50
51try:
52    import hashlib
53except ImportError as e:
54    if six.PY2:
55        import md5
56        import sha
57    else:
58        raise ImportError("Broken hashlib imports %s", e)
59
60import common
61
62from autotest_lib.client.common_lib import env
63from autotest_lib.client.common_lib import error
64from autotest_lib.client.common_lib import global_config
65from autotest_lib.client.common_lib import logging_manager
66from autotest_lib.client.common_lib import metrics_mock_class
67from autotest_lib.client.cros import constants
68
69# pylint: disable=wildcard-import
70from autotest_lib.client.common_lib.lsbrelease_utils import *
71
72
73def deprecated(func):
74    """This is a decorator which can be used to mark functions as deprecated.
75    It will result in a warning being emmitted when the function is used."""
76    def new_func(*args, **dargs):
77        warnings.warn("Call to deprecated function %s." % func.__name__,
78                      category=DeprecationWarning)
79        return func(*args, **dargs)
80    new_func.__name__ = func.__name__
81    new_func.__doc__ = func.__doc__
82    new_func.__dict__.update(func.__dict__)
83    return new_func
84
85
86class _NullStream(object):
87    def write(self, data):
88        pass
89
90
91    def flush(self):
92        pass
93
94
95TEE_TO_LOGS = object()
96_the_null_stream = _NullStream()
97
98DEVNULL = object()
99
100DEFAULT_STDOUT_LEVEL = logging.DEBUG
101DEFAULT_STDERR_LEVEL = logging.ERROR
102
103# prefixes for logging stdout/stderr of commands
104STDOUT_PREFIX = '[stdout] '
105STDERR_PREFIX = '[stderr] '
106
107# safe characters for the shell (do not need quoting)
108_SHELL_QUOTING_ALLOWLIST = frozenset(string.ascii_letters +
109                                    string.digits +
110                                    '_-+=>|')
111
112def custom_warning_handler(message, category, filename, lineno, file=None,
113                           line=None):
114    """Custom handler to log at the WARNING error level. Ignores |file|."""
115    logging.warning(warnings.formatwarning(message, category, filename, lineno,
116                                           line))
117
118warnings.showwarning = custom_warning_handler
119
120def get_stream_tee_file(stream, level, prefix=''):
121    if stream is None:
122        return _the_null_stream
123    if stream is DEVNULL:
124        return None
125    if stream is TEE_TO_LOGS:
126        return logging_manager.LoggingFile(level=level, prefix=prefix)
127    return stream
128
129
130def _join_with_nickname(base_string, nickname):
131    if nickname:
132        return '%s BgJob "%s" ' % (base_string, nickname)
133    return base_string
134
135
136# TODO: Cleanup and possibly eliminate |unjoinable|, which is only used in our
137# ssh connection process, while fixing underlying
138# semantics problem in BgJob. See crbug.com/279312
139class BgJob(object):
140    def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
141                 stdin=None, stdout_level=DEFAULT_STDOUT_LEVEL,
142                 stderr_level=DEFAULT_STDERR_LEVEL, nickname=None,
143                 unjoinable=False, env=None, extra_paths=None):
144        """Create and start a new BgJob.
145
146        This constructor creates a new BgJob, and uses Popen to start a new
147        subprocess with given command. It returns without blocking on execution
148        of the subprocess.
149
150        After starting a new BgJob, use output_prepare to connect the process's
151        stdout and stderr pipes to the stream of your choice.
152
153        When the job is running, the jobs's output streams are only read from
154        when process_output is called.
155
156        @param command: command to be executed in new subprocess. May be either
157                        a list, or a string (in which case Popen will be called
158                        with shell=True)
159        @param stdout_tee: (Optional) a file like object, TEE_TO_LOGS or
160                           DEVNULL.
161                           If not given, after finishing the process, the
162                           stdout data from subprocess is available in
163                           result.stdout.
164                           If a file like object is given, in process_output(),
165                           the stdout data from the subprocess will be handled
166                           by the given file like object.
167                           If TEE_TO_LOGS is given, in process_output(), the
168                           stdout data from the subprocess will be handled by
169                           the standard logging_manager.
170                           If DEVNULL is given, the stdout of the subprocess
171                           will be just discarded. In addition, even after
172                           cleanup(), result.stdout will be just an empty
173                           string (unlike the case where stdout_tee is not
174                           given).
175        @param stderr_tee: Same as stdout_tee, but for stderr.
176        @param verbose: Boolean, make BgJob logging more verbose.
177        @param stdin: Stream object, will be passed to Popen as the new
178                      process's stdin.
179        @param stdout_level: A logging level value. If stdout_tee was set to
180                             TEE_TO_LOGS, sets the level that tee'd
181                             stdout output will be logged at. Ignored
182                             otherwise.
183        @param stderr_level: Same as stdout_level, but for stderr.
184        @param nickname: Optional string, to be included in logging messages
185        @param unjoinable: Optional bool, default False.
186                           This should be True for BgJobs running in background
187                           and will never be joined with join_bg_jobs(), such
188                           as the ssh connection. Instead, it is
189                           caller's responsibility to terminate the subprocess
190                           correctly, e.g. by calling nuke_subprocess().
191                           This will lead that, calling join_bg_jobs(),
192                           process_output() or cleanup() will result in an
193                           InvalidBgJobCall exception.
194                           Also, |stdout_tee| and |stderr_tee| must be set to
195                           DEVNULL, otherwise InvalidBgJobCall is raised.
196        @param env: Dict containing environment variables used in subprocess.
197        @param extra_paths: Optional string list, to be prepended to the PATH
198                            env variable in env (or os.environ dict if env is
199                            not specified).
200        """
201        self.command = command
202        self.unjoinable = unjoinable
203        if (unjoinable and (stdout_tee != DEVNULL or stderr_tee != DEVNULL)):
204            raise error.InvalidBgJobCall(
205                'stdout_tee and stderr_tee must be DEVNULL for '
206                'unjoinable BgJob')
207        self._stdout_tee = get_stream_tee_file(
208                stdout_tee, stdout_level,
209                prefix=_join_with_nickname(STDOUT_PREFIX, nickname))
210        self._stderr_tee = get_stream_tee_file(
211                stderr_tee, stderr_level,
212                prefix=_join_with_nickname(STDERR_PREFIX, nickname))
213        self.result = CmdResult(command)
214
215        # allow for easy stdin input by string, we'll let subprocess create
216        # a pipe for stdin input and we'll write to it in the wait loop
217        if isinstance(stdin, six.string_types):
218            self.string_stdin = stdin
219            stdin = subprocess.PIPE
220        else:
221            self.string_stdin = None
222
223        # Prepend extra_paths to env['PATH'] if necessary.
224        if extra_paths:
225            env = (os.environ if env is None else env).copy()
226            oldpath = env.get('PATH')
227            env['PATH'] = os.pathsep.join(
228                    extra_paths + ([oldpath] if oldpath else []))
229
230        if verbose:
231            logging.debug("Running '%s'", command)
232
233        if type(command) == list:
234            shell = False
235            executable = None
236        else:
237            shell = True
238            executable = '/bin/bash'
239
240        with open('/dev/null', 'w') as devnull:
241            # TODO b/169678884. close_fds was reverted to False, as there is a
242            # large performance hit due to a docker + python2 bug. Eventually
243            # update (everything) to python3. Moving this call to subprocess32
244            # is also an option, but will require new packages to the drone/lxc
245            # containers.
246
247            self.sp = subprocess.Popen(
248                command,
249                stdin=stdin,
250                stdout=devnull if stdout_tee == DEVNULL else subprocess.PIPE,
251                stderr=devnull if stderr_tee == DEVNULL else subprocess.PIPE,
252                preexec_fn=self._reset_sigpipe,
253                shell=shell, executable=executable,
254                env=env, close_fds=False)
255        self._cleanup_called = False
256        self._stdout_file = (
257            None if stdout_tee == DEVNULL else six.StringIO())
258        self._stderr_file = (
259            None if stderr_tee == DEVNULL else six.StringIO())
260
261    def process_output(self, stdout=True, final_read=False):
262        """Read from process's output stream, and write data to destinations.
263
264        This function reads up to 1024 bytes from the background job's
265        stdout or stderr stream, and writes the resulting data to the BgJob's
266        output tee and to the stream set up in output_prepare.
267
268        Warning: Calls to process_output will block on reads from the
269        subprocess stream, and will block on writes to the configured
270        destination stream.
271
272        @param stdout: True = read and process data from job's stdout.
273                       False = from stderr.
274                       Default: True
275        @param final_read: Do not read only 1024 bytes from stream. Instead,
276                           read and process all data until end of the stream.
277
278        """
279        if self.unjoinable:
280            raise error.InvalidBgJobCall('Cannot call process_output on '
281                                         'a job with unjoinable BgJob')
282        if stdout:
283            pipe, buf, tee = (
284                self.sp.stdout, self._stdout_file, self._stdout_tee)
285        else:
286            pipe, buf, tee = (
287                self.sp.stderr, self._stderr_file, self._stderr_tee)
288
289        if not pipe:
290            return
291
292        if final_read:
293            # read in all the data we can from pipe and then stop
294            data = []
295            while select.select([pipe], [], [], 0)[0]:
296                data.append(self._read_data(pipe))
297                if len(data[-1]) == 0:
298                    break
299            data = "".join(data)
300        else:
301            # perform a single read
302            data = self._read_data(pipe)
303        buf.write(data)
304        tee.write(data)
305
306    def _read_data(self, pipe):
307        """Read & return the data from the provided pipe.
308
309        Handles the changes to pipe reading & iostring writing in python 2/3.
310        In python2 the buffer (iostring) can take bytes, where in python3 it
311        must be a string. Formatting bytes to string in python 2 vs 3 seems
312        to be a bit different. In 3, .decode() is needed, however in 2 that
313        results in unicode (not str), breaking downstream users.
314
315        """
316
317        data = os.read(pipe.fileno(), 1024)
318        if isinstance(data, bytes) and six.PY3:
319            return data.decode()
320        return data
321
322    def cleanup(self):
323        """Clean up after BgJob.
324
325        Flush the stdout_tee and stderr_tee buffers, close the
326        subprocess stdout and stderr buffers, and saves data from
327        the configured stdout and stderr destination streams to
328        self.result. Duplicate calls ignored with a warning.
329        """
330        if self.unjoinable:
331            raise error.InvalidBgJobCall('Cannot call cleanup on '
332                                         'a job with a unjoinable BgJob')
333        if self._cleanup_called:
334            logging.warning('BgJob [%s] received a duplicate call to '
335                            'cleanup. Ignoring.', self.command)
336            return
337        try:
338            if self.sp.stdout:
339                self._stdout_tee.flush()
340                self.sp.stdout.close()
341                self.result.stdout = self._stdout_file.getvalue()
342
343            if self.sp.stderr:
344                self._stderr_tee.flush()
345                self.sp.stderr.close()
346                self.result.stderr = self._stderr_file.getvalue()
347        finally:
348            self._cleanup_called = True
349
350    def _reset_sigpipe(self):
351        if not env.IN_MOD_WSGI:
352            signal.signal(signal.SIGPIPE, signal.SIG_DFL)
353
354
355def ip_to_long(ip):
356    # !L is a long in network byte order
357    return struct.unpack('!L', socket.inet_aton(ip))[0]
358
359
360def long_to_ip(number):
361    # See above comment.
362    return socket.inet_ntoa(struct.pack('!L', number))
363
364
365def create_subnet_mask(bits):
366    return (1 << 32) - (1 << 32-bits)
367
368
369def format_ip_with_mask(ip, mask_bits):
370    masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
371    return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
372
373
374def normalize_hostname(alias):
375    ip = socket.gethostbyname(alias)
376    return socket.gethostbyaddr(ip)[0]
377
378
379def get_ip_local_port_range():
380    match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
381                     read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
382    return (int(match.group(1)), int(match.group(2)))
383
384
385def set_ip_local_port_range(lower, upper):
386    write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
387                   '%d %d\n' % (lower, upper))
388
389
390def read_one_line(filename):
391    f = open(filename, 'r')
392    try:
393        return f.readline().rstrip('\n')
394    finally:
395        f.close()
396
397
398def read_file(filename):
399    f = open(filename)
400    try:
401        return f.read()
402    finally:
403        f.close()
404
405
406def get_field(data, param, linestart="", sep=" "):
407    """
408    Parse data from string.
409    @param data: Data to parse.
410        example:
411          data:
412             cpu   324 345 34  5 345
413             cpu0  34  11  34 34  33
414             ^^^^
415             start of line
416             params 0   1   2  3   4
417    @param param: Position of parameter after linestart marker.
418    @param linestart: String to which start line with parameters.
419    @param sep: Separator between parameters regular expression.
420    """
421    search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
422    find = search.search(data)
423    if find != None:
424        return re.split("%s" % sep, find.group(1))[param]
425    else:
426        print("There is no line which starts with %s in data." % linestart)
427        return None
428
429
430def write_one_line(filename, line):
431    open_write_close(filename, str(line).rstrip('\n') + '\n')
432
433
434def open_write_close(filename, data):
435    f = open(filename, 'w')
436    try:
437        f.write(data)
438    finally:
439        f.close()
440
441
442def locate_file(path, base_dir=None):
443    """Locates a file.
444
445    @param path: The path of the file being located. Could be absolute or
446        relative path. For relative path, it tries to locate the file from
447        base_dir.
448
449    @param base_dir (optional): Base directory of the relative path.
450
451    @returns Absolute path of the file if found. None if path is None.
452    @raises error.TestFail if the file is not found.
453    """
454    if path is None:
455        return None
456
457    if not os.path.isabs(path) and base_dir is not None:
458        # Assume the relative path is based in autotest directory.
459        path = os.path.join(base_dir, path)
460    if not os.path.isfile(path):
461        raise error.TestFail('ERROR: Unable to find %s' % path)
462    return path
463
464
465def matrix_to_string(matrix, header=None):
466    """
467    Return a pretty, aligned string representation of a nxm matrix.
468
469    This representation can be used to print any tabular data, such as
470    database results. It works by scanning the lengths of each element
471    in each column, and determining the format string dynamically.
472
473    @param matrix: Matrix representation (list with n rows of m elements).
474    @param header: Optional tuple or list with header elements to be displayed.
475    """
476    if type(header) is list:
477        header = tuple(header)
478    lengths = []
479    if header:
480        for column in header:
481            lengths.append(len(column))
482    for row in matrix:
483        for i, column in enumerate(row):
484            column = six.ensure_binary(six.text_type(column), "utf-8")
485            cl = len(column)
486            try:
487                ml = lengths[i]
488                if cl > ml:
489                    lengths[i] = cl
490            except IndexError:
491                lengths.append(cl)
492
493    lengths = tuple(lengths)
494    format_string = ""
495    for length in lengths:
496        format_string += "%-" + str(length) + "s "
497    format_string += "\n"
498
499    matrix_str = ""
500    if header:
501        matrix_str += format_string % header
502    for row in matrix:
503        matrix_str += format_string % tuple(row)
504
505    return matrix_str
506
507
508def read_keyval(path, type_tag=None):
509    """
510    Read a key-value pair format file into a dictionary, and return it.
511    Takes either a filename or directory name as input. If it's a
512    directory name, we assume you want the file to be called keyval.
513
514    @param path: Full path of the file to read from.
515    @param type_tag: If not None, only keyvals with key ending
516                     in a suffix {type_tag} will be collected.
517    """
518    if os.path.isdir(path):
519        path = os.path.join(path, 'keyval')
520    if not os.path.exists(path):
521        return {}
522
523    if type_tag:
524        pattern = r'^([-\.\w]+)\{%s\}=(.*)$' % type_tag
525    else:
526        pattern = r'^([-\.\w]+)=(.*)$'
527
528    keyval = {}
529    f = open(path)
530    for line in f:
531        line = re.sub('#.*', '', line).rstrip()
532        if not line:
533            continue
534        match = re.match(pattern, line)
535        if match:
536            key = match.group(1)
537            value = match.group(2)
538            if re.search('^\d+$', value):
539                value = int(value)
540            elif re.search('^(\d+\.)?\d+$', value):
541                value = float(value)
542            keyval[key] = value
543        else:
544            raise ValueError('Invalid format line: %s' % line)
545    f.close()
546    return keyval
547
548
549def write_keyval(path, dictionary, type_tag=None):
550    """
551    Write a key-value pair format file out to a file. This uses append
552    mode to open the file, so existing text will not be overwritten or
553    reparsed.
554
555    If type_tag is None, then the key must be composed of alphanumeric
556    characters (or dashes+underscores). However, if type-tag is not
557    null then the keys must also have "{type_tag}" as a suffix. At
558    the moment the only valid values of type_tag are "attr" and "perf".
559
560    @param path: full path of the file to be written
561    @param dictionary: the items to write
562    @param type_tag: see text above
563    """
564    if os.path.isdir(path):
565        path = os.path.join(path, 'keyval')
566    keyval = open(path, 'a')
567
568    if type_tag is None:
569        key_regex = re.compile(r'^[-\.\w]+$')
570    else:
571        if type_tag not in ('attr', 'perf'):
572            raise ValueError('Invalid type tag: %s' % type_tag)
573        escaped_tag = re.escape(type_tag)
574        key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
575    try:
576        for key in sorted(dictionary.keys()):
577            if not key_regex.search(key):
578                raise ValueError('Invalid key: %s' % key)
579            keyval.write('%s=%s\n' % (key, dictionary[key]))
580    finally:
581        keyval.close()
582
583
584def is_url(path):
585    """Return true if path looks like a URL"""
586    # for now, just handle http and ftp
587    url_parts = six.moves.urllib.parse.urlparse(path)
588    return (url_parts[0] in ('http', 'ftp'))
589
590
591def urlopen(url, data=None, timeout=5):
592    """Wrapper to urllib2.urlopen with timeout addition."""
593
594    # Save old timeout
595    old_timeout = socket.getdefaulttimeout()
596    socket.setdefaulttimeout(timeout)
597    try:
598        return urllib.request.urlopen(url, data=data)
599    finally:
600        socket.setdefaulttimeout(old_timeout)
601
602
603def urlretrieve(url, filename, data=None, timeout=300):
604    """Retrieve a file from given url."""
605    logging.debug('Fetching %s -> %s', url, filename)
606
607    src_file = urlopen(url, data=data, timeout=timeout)
608    try:
609        dest_file = open(filename, 'wb')
610        try:
611            shutil.copyfileobj(src_file, dest_file)
612        finally:
613            dest_file.close()
614    finally:
615        src_file.close()
616
617
618def hash(hashtype, input=None):
619    """
620    Returns an hash object of type md5 or sha1. This function is implemented in
621    order to encapsulate hash objects in a way that is compatible with python
622    2.4 and python 2.6 without warnings.
623
624    Note that even though python 2.6 hashlib supports hash types other than
625    md5 and sha1, we are artificially limiting the input values in order to
626    make the function to behave exactly the same among both python
627    implementations.
628
629    @param input: Optional input string that will be used to update the hash.
630    """
631    # pylint: disable=redefined-builtin
632    if hashtype not in ['md5', 'sha1']:
633        raise ValueError("Unsupported hash type: %s" % hashtype)
634
635    try:
636        computed_hash = hashlib.new(hashtype)
637    except NameError:
638        if hashtype == 'md5':
639            computed_hash = md5.new()
640        elif hashtype == 'sha1':
641            computed_hash = sha.new()
642
643    if input:
644        try:
645            computed_hash.update(input.encode())
646        except UnicodeError:
647            computed_hash.update(input)
648
649
650    return computed_hash
651
652
653def get_file(src, dest, permissions=None):
654    """Get a file from src, which can be local or a remote URL"""
655    if src == dest:
656        return
657
658    if is_url(src):
659        urlretrieve(src, dest)
660    else:
661        shutil.copyfile(src, dest)
662
663    if permissions:
664        os.chmod(dest, permissions)
665    return dest
666
667
668def unmap_url(srcdir, src, destdir='.'):
669    """
670    Receives either a path to a local file or a URL.
671    returns either the path to the local file, or the fetched URL
672
673    unmap_url('/usr/src', 'foo.tar', '/tmp')
674                            = '/usr/src/foo.tar'
675    unmap_url('/usr/src', 'http://site/file', '/tmp')
676                            = '/tmp/file'
677                            (after retrieving it)
678    """
679    if is_url(src):
680        url_parts = six.moves.urllib.parse.urlparse(src)
681        filename = os.path.basename(url_parts[2])
682        dest = os.path.join(destdir, filename)
683        return get_file(src, dest)
684    else:
685        return os.path.join(srcdir, src)
686
687
688def update_version(srcdir, preserve_srcdir, new_version, install,
689                   *args, **dargs):
690    """
691    Make sure srcdir is version new_version
692
693    If not, delete it and install() the new version.
694
695    In the preserve_srcdir case, we just check it's up to date,
696    and if not, we rerun install, without removing srcdir
697    """
698    versionfile = os.path.join(srcdir, '.version')
699    install_needed = True
700
701    if os.path.exists(versionfile):
702        old_version = pickle.load(open(versionfile))
703        if old_version == new_version:
704            install_needed = False
705
706    if install_needed:
707        if not preserve_srcdir and os.path.exists(srcdir):
708            shutil.rmtree(srcdir)
709        install(*args, **dargs)
710        if os.path.exists(srcdir):
711            pickle.dump(new_version, open(versionfile, 'w'))
712
713
714def get_stderr_level(stderr_is_expected, stdout_level=DEFAULT_STDOUT_LEVEL):
715    if stderr_is_expected:
716        return stdout_level
717    return DEFAULT_STDERR_LEVEL
718
719
720def run(command, timeout=None, ignore_status=False, stdout_tee=None,
721        stderr_tee=None, verbose=True, stdin=None, stderr_is_expected=None,
722        stdout_level=None, stderr_level=None, args=(), nickname=None,
723        ignore_timeout=False, env=None, extra_paths=None):
724    """
725    Run a command on the host.
726
727    @param command: the command line string.
728    @param timeout: time limit in seconds before attempting to kill the
729            running process. The run() function will take a few seconds
730            longer than 'timeout' to complete if it has to kill the process.
731    @param ignore_status: do not raise an exception, no matter what the exit
732            code of the command is.
733    @param stdout_tee: optional file-like object to which stdout data
734            will be written as it is generated (data will still be stored
735            in result.stdout unless this is DEVNULL).
736    @param stderr_tee: likewise for stderr.
737    @param verbose: if True, log the command being run.
738    @param stdin: stdin to pass to the executed process (can be a file
739            descriptor, a file object of a real file or a string).
740    @param stderr_is_expected: if True, stderr will be logged at the same level
741            as stdout
742    @param stdout_level: logging level used if stdout_tee is TEE_TO_LOGS;
743            if None, a default is used.
744    @param stderr_level: like stdout_level but for stderr.
745    @param args: sequence of strings of arguments to be given to the command
746            inside " quotes after they have been escaped for that; each
747            element in the sequence will be given as a separate command
748            argument
749    @param nickname: Short string that will appear in logging messages
750                     associated with this command.
751    @param ignore_timeout: If True, timeouts are ignored otherwise if a
752            timeout occurs it will raise CmdTimeoutError.
753    @param env: Dict containing environment variables used in a subprocess.
754    @param extra_paths: Optional string list, to be prepended to the PATH
755                        env variable in env (or os.environ dict if env is
756                        not specified).
757
758    @return a CmdResult object or None if the command timed out and
759            ignore_timeout is True
760    @rtype: CmdResult
761
762    @raise CmdError: the exit code of the command execution was not 0
763    @raise CmdTimeoutError: the command timed out and ignore_timeout is False.
764    """
765    if isinstance(args, six.string_types):
766        raise TypeError('Got a string for the "args" keyword argument, '
767                        'need a sequence.')
768
769    # In some cases, command will actually be a list
770    # (For example, see get_user_hash in client/cros/cryptohome.py.)
771    # So, to cover that case, detect if it's a string or not and convert it
772    # into one if necessary.
773    if not isinstance(command, six.string_types):
774        command = ' '.join([sh_quote_word(arg) for arg in command])
775
776    command = ' '.join([command] + [sh_quote_word(arg) for arg in args])
777
778    if stderr_is_expected is None:
779        stderr_is_expected = ignore_status
780    if stdout_level is None:
781        stdout_level = DEFAULT_STDOUT_LEVEL
782    if stderr_level is None:
783        stderr_level = get_stderr_level(stderr_is_expected, stdout_level)
784
785    try:
786        bg_job = join_bg_jobs(
787            (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
788                   stdout_level=stdout_level, stderr_level=stderr_level,
789                   nickname=nickname, env=env, extra_paths=extra_paths),),
790            timeout)[0]
791    except error.CmdTimeoutError:
792        if not ignore_timeout:
793            raise
794        return None
795
796    if not ignore_status and bg_job.result.exit_status:
797        raise error.CmdError(command, bg_job.result,
798                             "Command returned non-zero exit status")
799
800    return bg_job.result
801
802
803def run_parallel(commands, timeout=None, ignore_status=False,
804                 stdout_tee=None, stderr_tee=None,
805                 nicknames=None):
806    """
807    Behaves the same as run() with the following exceptions:
808
809    - commands is a list of commands to run in parallel.
810    - ignore_status toggles whether or not an exception should be raised
811      on any error.
812
813    @return: a list of CmdResult objects
814    """
815    bg_jobs = []
816    if nicknames is None:
817        nicknames = []
818    for (command, nickname) in zip_longest(commands, nicknames):
819        bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
820                             stderr_level=get_stderr_level(ignore_status),
821                             nickname=nickname))
822
823    # Updates objects in bg_jobs list with their process information
824    join_bg_jobs(bg_jobs, timeout)
825
826    for bg_job in bg_jobs:
827        if not ignore_status and bg_job.result.exit_status:
828            raise error.CmdError(command, bg_job.result,
829                                 "Command returned non-zero exit status")
830
831    return [bg_job.result for bg_job in bg_jobs]
832
833
834@deprecated
835def run_bg(command):
836    """Function deprecated. Please use BgJob class instead."""
837    bg_job = BgJob(command)
838    return bg_job.sp, bg_job.result
839
840
841def join_bg_jobs(bg_jobs, timeout=None):
842    """Joins the bg_jobs with the current thread.
843
844    Returns the same list of bg_jobs objects that was passed in.
845    """
846    if any(bg_job.unjoinable for bg_job in bg_jobs):
847        raise error.InvalidBgJobCall(
848                'join_bg_jobs cannot be called for unjoinable bg_job')
849
850    timeout_error = False
851    try:
852        # We are holding ends to stdin, stdout pipes
853        # hence we need to be sure to close those fds no mater what
854        start_time = time.time()
855        timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
856
857        for bg_job in bg_jobs:
858            # Process stdout and stderr
859            bg_job.process_output(stdout=True,final_read=True)
860            bg_job.process_output(stdout=False,final_read=True)
861    finally:
862        # close our ends of the pipes to the sp no matter what
863        for bg_job in bg_jobs:
864            bg_job.cleanup()
865
866    if timeout_error:
867        # TODO: This needs to be fixed to better represent what happens when
868        # running in parallel. However this is backwards compatable, so it will
869        # do for the time being.
870        raise error.CmdTimeoutError(
871                bg_jobs[0].command, bg_jobs[0].result,
872                "Command(s) did not complete within %d seconds" % timeout)
873
874
875    return bg_jobs
876
877
878def _wait_for_commands(bg_jobs, start_time, timeout):
879    """Waits for background jobs by select polling their stdout/stderr.
880
881    @param bg_jobs: A list of background jobs to wait on.
882    @param start_time: Time used to calculate the timeout lifetime of a job.
883    @param timeout: The timeout of the list of bg_jobs.
884
885    @return: True if the return was due to a timeout, False otherwise.
886    """
887
888    # To check for processes which terminate without producing any output
889    # a 1 second timeout is used in select.
890    SELECT_TIMEOUT = 1
891
892    read_list = []
893    write_list = []
894    reverse_dict = {}
895
896    for bg_job in bg_jobs:
897        if bg_job.sp.stdout:
898            read_list.append(bg_job.sp.stdout)
899            reverse_dict[bg_job.sp.stdout] = (bg_job, True)
900        if bg_job.sp.stderr:
901            read_list.append(bg_job.sp.stderr)
902            reverse_dict[bg_job.sp.stderr] = (bg_job, False)
903        if bg_job.string_stdin is not None:
904            write_list.append(bg_job.sp.stdin)
905            reverse_dict[bg_job.sp.stdin] = bg_job
906
907    if timeout:
908        stop_time = start_time + timeout
909        time_left = stop_time - time.time()
910    else:
911        time_left = None # so that select never times out
912
913    while not timeout or time_left > 0:
914        # select will return when we may write to stdin, when there is
915        # stdout/stderr output we can read (including when it is
916        # EOF, that is the process has terminated) or when a non-fatal
917        # signal was sent to the process. In the last case the select returns
918        # EINTR, and we continue waiting for the job if the signal handler for
919        # the signal that interrupted the call allows us to.
920        try:
921            read_ready, write_ready, _ = select.select(read_list, write_list,
922                                                       [], SELECT_TIMEOUT)
923        except select.error as v:
924            if v[0] == errno.EINTR:
925                logging.warning(v)
926                continue
927            else:
928                raise
929        # os.read() has to be used instead of
930        # subproc.stdout.read() which will otherwise block
931        for file_obj in read_ready:
932            bg_job, is_stdout = reverse_dict[file_obj]
933            bg_job.process_output(is_stdout)
934
935        for file_obj in write_ready:
936            # we can write PIPE_BUF bytes without blocking
937            # POSIX requires PIPE_BUF is >= 512
938            bg_job = reverse_dict[file_obj]
939            file_obj.write(bg_job.string_stdin[:512])
940            bg_job.string_stdin = bg_job.string_stdin[512:]
941            # no more input data, close stdin, remove it from the select set
942            if not bg_job.string_stdin:
943                file_obj.close()
944                write_list.remove(file_obj)
945                del reverse_dict[file_obj]
946
947        all_jobs_finished = True
948        for bg_job in bg_jobs:
949            if bg_job.result.exit_status is not None:
950                continue
951
952            bg_job.result.exit_status = bg_job.sp.poll()
953            if bg_job.result.exit_status is not None:
954                # process exited, remove its stdout/stdin from the select set
955                bg_job.result.duration = time.time() - start_time
956                if bg_job.sp.stdout:
957                    read_list.remove(bg_job.sp.stdout)
958                    del reverse_dict[bg_job.sp.stdout]
959                if bg_job.sp.stderr:
960                    read_list.remove(bg_job.sp.stderr)
961                    del reverse_dict[bg_job.sp.stderr]
962            else:
963                all_jobs_finished = False
964
965        if all_jobs_finished:
966            return False
967
968        if timeout:
969            time_left = stop_time - time.time()
970
971    # Kill all processes which did not complete prior to timeout
972    for bg_job in bg_jobs:
973        if bg_job.result.exit_status is not None:
974            continue
975
976        logging.warning('run process timeout (%s) fired on: %s', timeout,
977                        bg_job.command)
978        if nuke_subprocess(bg_job.sp) is None:
979            # If process could not be SIGKILL'd, log kernel stack.
980            logging.warning(read_file('/proc/%d/stack' % bg_job.sp.pid))
981        bg_job.result.exit_status = bg_job.sp.poll()
982        bg_job.result.duration = time.time() - start_time
983
984    return True
985
986
987def pid_is_alive(pid):
988    """
989    True if process pid exists and is not yet stuck in Zombie state.
990    Zombies are impossible to move between cgroups, etc.
991    pid can be integer, or text of integer.
992    """
993    path = '/proc/%s/stat' % pid
994
995    try:
996        stat = read_one_line(path)
997    except IOError:
998        if not os.path.exists(path):
999            # file went away
1000            return False
1001        raise
1002
1003    return stat.split()[2] != 'Z'
1004
1005
1006def signal_pid(pid, sig):
1007    """
1008    Sends a signal to a process id. Returns True if the process terminated
1009    successfully, False otherwise.
1010    """
1011    try:
1012        os.kill(pid, sig)
1013    except OSError:
1014        # The process may have died before we could kill it.
1015        pass
1016
1017    for _ in range(5):
1018        if not pid_is_alive(pid):
1019            return True
1020        time.sleep(1)
1021
1022    # The process is still alive
1023    return False
1024
1025
1026def nuke_subprocess(subproc):
1027    # check if the subprocess is still alive, first
1028    if subproc.poll() is not None:
1029        return subproc.poll()
1030
1031    # the process has not terminated within timeout,
1032    # kill it via an escalating series of signals.
1033    signal_queue = [signal.SIGTERM, signal.SIGKILL]
1034    for sig in signal_queue:
1035        signal_pid(subproc.pid, sig)
1036        if subproc.poll() is not None:
1037            return subproc.poll()
1038
1039
1040def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
1041    # the process has not terminated within timeout,
1042    # kill it via an escalating series of signals.
1043    pid_path = '/proc/%d/'
1044    if not os.path.exists(pid_path % pid):
1045        # Assume that if the pid does not exist in proc it is already dead.
1046        logging.error('No listing in /proc for pid:%d.', pid)
1047        raise error.AutoservPidAlreadyDeadError('Could not kill nonexistant '
1048                                                'pid: %s.', pid)
1049    for sig in signal_queue:
1050        if signal_pid(pid, sig):
1051            return
1052
1053    # no signal successfully terminated the process
1054    raise error.AutoservRunError('Could not kill %d for process name: %s' % (
1055            pid, get_process_name(pid)), None)
1056
1057
1058def system(command, timeout=None, ignore_status=False):
1059    """
1060    Run a command
1061
1062    @param timeout: timeout in seconds
1063    @param ignore_status: if ignore_status=False, throw an exception if the
1064            command's exit code is non-zero
1065            if ignore_stauts=True, return the exit code.
1066
1067    @return exit status of command
1068            (note, this will always be zero unless ignore_status=True)
1069    """
1070    return run(command, timeout=timeout, ignore_status=ignore_status,
1071               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
1072
1073
1074def system_parallel(commands, timeout=None, ignore_status=False):
1075    """This function returns a list of exit statuses for the respective
1076    list of commands."""
1077    return [bg_jobs.exit_status for bg_jobs in
1078            run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
1079                         stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1080
1081
1082def system_output(command, timeout=None, ignore_status=False,
1083                  retain_output=False, args=()):
1084    """
1085    Run a command and return the stdout output.
1086
1087    @param command: command string to execute.
1088    @param timeout: time limit in seconds before attempting to kill the
1089            running process. The function will take a few seconds longer
1090            than 'timeout' to complete if it has to kill the process.
1091    @param ignore_status: do not raise an exception, no matter what the exit
1092            code of the command is.
1093    @param retain_output: set to True to make stdout/stderr of the command
1094            output to be also sent to the logging system
1095    @param args: sequence of strings of arguments to be given to the command
1096            inside " quotes after they have been escaped for that; each
1097            element in the sequence will be given as a separate command
1098            argument
1099
1100    @return a string with the stdout output of the command.
1101    """
1102    if retain_output:
1103        out = run(command, timeout=timeout, ignore_status=ignore_status,
1104                  stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
1105                  args=args).stdout
1106    else:
1107        out = run(command, timeout=timeout, ignore_status=ignore_status,
1108                  args=args).stdout
1109    if out[-1:] == '\n':
1110        out = out[:-1]
1111    return out
1112
1113
1114def system_output_parallel(commands, timeout=None, ignore_status=False,
1115                           retain_output=False):
1116    if retain_output:
1117        out = [bg_job.stdout for bg_job
1118               in run_parallel(commands, timeout=timeout,
1119                               ignore_status=ignore_status,
1120                               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1121    else:
1122        out = [bg_job.stdout for bg_job in run_parallel(commands,
1123                                  timeout=timeout, ignore_status=ignore_status)]
1124    for _ in out:
1125        if out[-1:] == '\n':
1126            out = out[:-1]
1127    return out
1128
1129
1130def strip_unicode(input_obj):
1131    if type(input_obj) == list:
1132        return [strip_unicode(i) for i in input_obj]
1133    elif type(input_obj) == dict:
1134        output = {}
1135        for key in input_obj.keys():
1136            output[str(key)] = strip_unicode(input_obj[key])
1137        return output
1138    elif type(input_obj) == six.text_type:
1139        return str(input_obj)
1140    else:
1141        return input_obj
1142
1143
1144def get_cpu_percentage(function, *args, **dargs):
1145    """Returns a tuple containing the CPU% and return value from function call.
1146
1147    This function calculates the usage time by taking the difference of
1148    the user and system times both before and after the function call.
1149    """
1150    child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
1151    self_pre = resource.getrusage(resource.RUSAGE_SELF)
1152    start = time.time()
1153    to_return = function(*args, **dargs)
1154    elapsed = time.time() - start
1155    self_post = resource.getrusage(resource.RUSAGE_SELF)
1156    child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
1157
1158    # Calculate CPU Percentage
1159    s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
1160    c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
1161    cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
1162
1163    return cpu_percent, to_return
1164
1165
1166def get_arch(run_function=run):
1167    """
1168    Get the hardware architecture of the machine.
1169    If specified, run_function should return a CmdResult object and throw a
1170    CmdError exception.
1171    If run_function is anything other than utils.run(), it is used to
1172    execute the commands. By default (when set to utils.run()) this will
1173    just examine os.uname()[4].
1174    """
1175
1176    # Short circuit from the common case.
1177    if run_function == run:
1178        return re.sub(r'i\d86$', 'i386', os.uname()[4])
1179
1180    # Otherwise, use the run_function in case it hits a remote machine.
1181    arch = run_function('/bin/uname -m').stdout.rstrip()
1182    if re.match(r'i\d86$', arch):
1183        arch = 'i386'
1184    return arch
1185
1186def get_arch_userspace(run_function=run):
1187    """
1188    Get the architecture by userspace (possibly different from kernel).
1189    """
1190    archs = {
1191        'arm': 'ELF 32-bit.*, ARM,',
1192        'arm64': 'ELF 64-bit.*, ARM aarch64,',
1193        'i386': 'ELF 32-bit.*, Intel 80386,',
1194        'x86_64': 'ELF 64-bit.*, x86-64,',
1195    }
1196
1197    cmd = 'file --brief --dereference /bin/sh'
1198    filestr = run_function(cmd).stdout.rstrip()
1199    for a, regex in six.iteritems(archs):
1200        if re.match(regex, filestr):
1201            return a
1202
1203    return get_arch()
1204
1205
1206def get_num_logical_cpus_per_socket(run_function=run):
1207    """
1208    Get the number of cores (including hyperthreading) per cpu.
1209    run_function is used to execute the commands. It defaults to
1210    utils.run() but a custom method (if provided) should be of the
1211    same schema as utils.run. It should return a CmdResult object and
1212    throw a CmdError exception.
1213    """
1214    siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1215    num_siblings = [int(x) for x in
1216                    re.findall(r'^siblings\s*:\s*(\d+)\s*$', siblings, re.M)]
1217    if len(num_siblings) == 0:
1218        raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1219    if min(num_siblings) != max(num_siblings):
1220        raise error.TestError('Number of siblings differ %r' %
1221                              num_siblings)
1222    return num_siblings[0]
1223
1224
1225def set_high_performance_mode(host=None):
1226    """
1227    Sets the kernel governor mode to the highest setting.
1228    Returns previous governor state.
1229    """
1230    original_governors = get_scaling_governor_states(host)
1231    set_scaling_governors('performance', host)
1232    return original_governors
1233
1234
1235def set_scaling_governors(value, host=None):
1236    """
1237    Sets all scaling governor to string value.
1238    Sample values: 'performance', 'interactive', 'ondemand', 'powersave'.
1239    """
1240    paths = _get_cpufreq_paths('scaling_governor', host)
1241    if not paths:
1242        logging.info("Could not set governor states, as no files of the form "
1243                     "'/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor' "
1244                     "were found.")
1245    run_func = host.run if host else system
1246    for path in paths:
1247        cmd = 'echo %s > %s' % (value, path)
1248        logging.info('Writing scaling governor mode \'%s\' -> %s', value, path)
1249        # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
1250        run_func(cmd, ignore_status=True)
1251
1252
1253def _get_cpufreq_paths(filename, host=None):
1254    """
1255    Returns a list of paths to the governors.
1256    """
1257    run_func = host.run if host else run
1258    glob = '/sys/devices/system/cpu/cpu*/cpufreq/' + filename
1259    # Simple glob expansion; note that CPUs may come and go, causing these
1260    # paths to change at any time.
1261    cmd = 'echo ' + glob
1262    try:
1263        paths = run_func(cmd, verbose=False).stdout.split()
1264    except error.CmdError:
1265        return []
1266    # If the glob result equals itself, then we likely didn't match any real
1267    # paths (assuming 'cpu*' is not a real path).
1268    if paths == [glob]:
1269        return []
1270    return paths
1271
1272
1273def get_scaling_governor_states(host=None):
1274    """
1275    Returns a list of (performance governor path, current state) tuples.
1276    """
1277    paths = _get_cpufreq_paths('scaling_governor', host)
1278    path_value_list = []
1279    run_func = host.run if host else run
1280    for path in paths:
1281        value = run_func('head -n 1 %s' % path, verbose=False).stdout
1282        path_value_list.append((path, value))
1283    return path_value_list
1284
1285
1286def restore_scaling_governor_states(path_value_list, host=None):
1287    """
1288    Restores governor states. Inverse operation to get_scaling_governor_states.
1289    """
1290    run_func = host.run if host else system
1291    for (path, value) in path_value_list:
1292        cmd = 'echo %s > %s' % (value.rstrip('\n'), path)
1293        # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
1294        run_func(cmd, ignore_status=True)
1295
1296
1297def merge_trees(src, dest):
1298    """
1299    Merges a source directory tree at 'src' into a destination tree at
1300    'dest'. If a path is a file in both trees than the file in the source
1301    tree is APPENDED to the one in the destination tree. If a path is
1302    a directory in both trees then the directories are recursively merged
1303    with this function. In any other case, the function will skip the
1304    paths that cannot be merged (instead of failing).
1305    """
1306    if not os.path.exists(src):
1307        return # exists only in dest
1308    elif not os.path.exists(dest):
1309        if os.path.isfile(src):
1310            shutil.copy2(src, dest) # file only in src
1311        else:
1312            shutil.copytree(src, dest, symlinks=True) # dir only in src
1313        return
1314    elif os.path.isfile(src) and os.path.isfile(dest):
1315        # src & dest are files in both trees, append src to dest
1316        destfile = open(dest, "a")
1317        try:
1318            srcfile = open(src)
1319            try:
1320                destfile.write(srcfile.read())
1321            finally:
1322                srcfile.close()
1323        finally:
1324            destfile.close()
1325    elif os.path.isdir(src) and os.path.isdir(dest):
1326        # src & dest are directories in both trees, so recursively merge
1327        for name in os.listdir(src):
1328            merge_trees(os.path.join(src, name), os.path.join(dest, name))
1329    else:
1330        # src & dest both exist, but are incompatible
1331        return
1332
1333
1334class CmdResult(object):
1335    """
1336    Command execution result.
1337
1338    command:     String containing the command line itself
1339    exit_status: Integer exit code of the process
1340    stdout:      String containing stdout of the process
1341    stderr:      String containing stderr of the process
1342    duration:    Elapsed wall clock time running the process
1343    """
1344
1345
1346    def __init__(self, command="", stdout="", stderr="",
1347                 exit_status=None, duration=0):
1348        self.command = command
1349        self.exit_status = exit_status
1350        self.stdout = stdout
1351        self.stderr = stderr
1352        self.duration = duration
1353
1354
1355    def __eq__(self, other):
1356        if type(self) == type(other):
1357            return (self.command == other.command
1358                    and self.exit_status == other.exit_status
1359                    and self.stdout == other.stdout
1360                    and self.stderr == other.stderr
1361                    and self.duration == other.duration)
1362        else:
1363            return NotImplemented
1364
1365
1366    def __repr__(self):
1367        wrapper = textwrap.TextWrapper(width = 78,
1368                                       initial_indent="\n    ",
1369                                       subsequent_indent="    ")
1370
1371        stdout = self.stdout.rstrip()
1372        if stdout:
1373            stdout = "\nstdout:\n%s" % stdout
1374
1375        stderr = self.stderr.rstrip()
1376        if stderr:
1377            stderr = "\nstderr:\n%s" % stderr
1378
1379        return ("* Command: %s\n"
1380                "Exit status: %s\n"
1381                "Duration: %s\n"
1382                "%s"
1383                "%s"
1384                % (wrapper.fill(str(self.command)), self.exit_status,
1385                self.duration, stdout, stderr))
1386
1387
1388class run_randomly:
1389    def __init__(self, run_sequentially=False):
1390        # Run sequentially is for debugging control files
1391        self.test_list = []
1392        self.run_sequentially = run_sequentially
1393
1394
1395    def add(self, *args, **dargs):
1396        test = (args, dargs)
1397        self.test_list.append(test)
1398
1399
1400    def run(self, fn):
1401        while self.test_list:
1402            test_index = random.randint(0, len(self.test_list)-1)
1403            if self.run_sequentially:
1404                test_index = 0
1405            (args, dargs) = self.test_list.pop(test_index)
1406            fn(*args, **dargs)
1407
1408
1409def import_site_module(path, module, dummy=None, modulefile=None):
1410    """
1411    Try to import the site specific module if it exists.
1412
1413    @param path full filename of the source file calling this (ie __file__)
1414    @param module full module name
1415    @param dummy dummy value to return in case there is no symbol to import
1416    @param modulefile module filename
1417
1418    @return site specific module or dummy
1419
1420    @raises ImportError if the site file exists but imports fails
1421    """
1422    short_module = module[module.rfind(".") + 1:]
1423
1424    if not modulefile:
1425        modulefile = short_module + ".py"
1426
1427    if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1428        return __import__(module, {}, {}, [short_module])
1429    return dummy
1430
1431
1432def import_site_symbol(path, module, name, dummy=None, modulefile=None):
1433    """
1434    Try to import site specific symbol from site specific file if it exists
1435
1436    @param path full filename of the source file calling this (ie __file__)
1437    @param module full module name
1438    @param name symbol name to be imported from the site file
1439    @param dummy dummy value to return in case there is no symbol to import
1440    @param modulefile module filename
1441
1442    @return site specific symbol or dummy
1443
1444    @raises ImportError if the site file exists but imports fails
1445    """
1446    module = import_site_module(path, module, modulefile=modulefile)
1447    if not module:
1448        return dummy
1449
1450    # special unique value to tell us if the symbol can't be imported
1451    cant_import = object()
1452
1453    obj = getattr(module, name, cant_import)
1454    if obj is cant_import:
1455        return dummy
1456
1457    return obj
1458
1459
1460def import_site_class(path, module, classname, baseclass, modulefile=None):
1461    """
1462    Try to import site specific class from site specific file if it exists
1463
1464    Args:
1465        path: full filename of the source file calling this (ie __file__)
1466        module: full module name
1467        classname: class name to be loaded from site file
1468        baseclass: base class object to return when no site file present or
1469            to mixin when site class exists but is not inherited from baseclass
1470        modulefile: module filename
1471
1472    Returns: baseclass if site specific class does not exist, the site specific
1473        class if it exists and is inherited from baseclass or a mixin of the
1474        site specific class and baseclass when the site specific class exists
1475        and is not inherited from baseclass
1476
1477    Raises: ImportError if the site file exists but imports fails
1478    """
1479
1480    res = import_site_symbol(path, module, classname, None, modulefile)
1481    if res:
1482        if not issubclass(res, baseclass):
1483            # if not a subclass of baseclass then mix in baseclass with the
1484            # site specific class object and return the result
1485            res = type(classname, (res, baseclass), {})
1486    else:
1487        res = baseclass
1488
1489    return res
1490
1491
1492def import_site_function(path, module, funcname, dummy, modulefile=None):
1493    """
1494    Try to import site specific function from site specific file if it exists
1495
1496    Args:
1497        path: full filename of the source file calling this (ie __file__)
1498        module: full module name
1499        funcname: function name to be imported from site file
1500        dummy: dummy function to return in case there is no function to import
1501        modulefile: module filename
1502
1503    Returns: site specific function object or dummy
1504
1505    Raises: ImportError if the site file exists but imports fails
1506    """
1507
1508    return import_site_symbol(path, module, funcname, dummy, modulefile)
1509
1510
1511def _get_pid_path(program_name):
1512    my_path = os.path.dirname(__file__)
1513    return os.path.abspath(os.path.join(my_path, "..", "..",
1514                                        "%s.pid" % program_name))
1515
1516
1517def write_pid(program_name):
1518    """
1519    Try to drop <program_name>.pid in the main autotest directory.
1520
1521    Args:
1522      program_name: prefix for file name
1523    """
1524    pidfile = open(_get_pid_path(program_name), "w")
1525    try:
1526        pidfile.write("%s\n" % os.getpid())
1527    finally:
1528        pidfile.close()
1529
1530
1531def delete_pid_file_if_exists(program_name):
1532    """
1533    Tries to remove <program_name>.pid from the main autotest directory.
1534    """
1535    pidfile_path = _get_pid_path(program_name)
1536
1537    try:
1538        os.remove(pidfile_path)
1539    except OSError:
1540        if not os.path.exists(pidfile_path):
1541            return
1542        raise
1543
1544
1545def get_pid_from_file(program_name):
1546    """
1547    Reads the pid from <program_name>.pid in the autotest directory.
1548
1549    @param program_name the name of the program
1550    @return the pid if the file exists, None otherwise.
1551    """
1552    pidfile_path = _get_pid_path(program_name)
1553    if not os.path.exists(pidfile_path):
1554        return None
1555
1556    pidfile = open(_get_pid_path(program_name), 'r')
1557
1558    try:
1559        try:
1560            pid = int(pidfile.readline())
1561        except IOError:
1562            if not os.path.exists(pidfile_path):
1563                return None
1564            raise
1565    finally:
1566        pidfile.close()
1567
1568    return pid
1569
1570
1571def get_process_name(pid):
1572    """
1573    Get process name from PID.
1574    @param pid: PID of process.
1575    @return: Process name if PID stat file exists or 'Dead PID' if it does not.
1576    """
1577    pid_stat_path = "/proc/%d/stat"
1578    if not os.path.exists(pid_stat_path % pid):
1579        return "Dead Pid"
1580    return get_field(read_file(pid_stat_path % pid), 1)[1:-1]
1581
1582
1583def program_is_alive(program_name):
1584    """
1585    Checks if the process is alive and not in Zombie state.
1586
1587    @param program_name the name of the program
1588    @return True if still alive, False otherwise
1589    """
1590    pid = get_pid_from_file(program_name)
1591    if pid is None:
1592        return False
1593    return pid_is_alive(pid)
1594
1595
1596def signal_program(program_name, sig=signal.SIGTERM):
1597    """
1598    Sends a signal to the process listed in <program_name>.pid
1599
1600    @param program_name the name of the program
1601    @param sig signal to send
1602    """
1603    pid = get_pid_from_file(program_name)
1604    if pid:
1605        signal_pid(pid, sig)
1606
1607
1608def get_relative_path(path, reference):
1609    """Given 2 absolute paths "path" and "reference", compute the path of
1610    "path" as relative to the directory "reference".
1611
1612    @param path the absolute path to convert to a relative path
1613    @param reference an absolute directory path to which the relative
1614        path will be computed
1615    """
1616    # normalize the paths (remove double slashes, etc)
1617    assert(os.path.isabs(path))
1618    assert(os.path.isabs(reference))
1619
1620    path = os.path.normpath(path)
1621    reference = os.path.normpath(reference)
1622
1623    # we could use os.path.split() but it splits from the end
1624    path_list = path.split(os.path.sep)[1:]
1625    ref_list = reference.split(os.path.sep)[1:]
1626
1627    # find the longest leading common path
1628    for i in range(min(len(path_list), len(ref_list))):
1629        if path_list[i] != ref_list[i]:
1630            # decrement i so when exiting this loop either by no match or by
1631            # end of range we are one step behind
1632            i -= 1
1633            break
1634    i += 1
1635    # drop the common part of the paths, not interested in that anymore
1636    del path_list[:i]
1637
1638    # for each uncommon component in the reference prepend a ".."
1639    path_list[:0] = ['..'] * (len(ref_list) - i)
1640
1641    return os.path.join(*path_list)
1642
1643
1644def sh_escape(command):
1645    """
1646    Escape special characters from a command so that it can be passed
1647    as a double quoted (" ") string in a (ba)sh command.
1648
1649    Args:
1650            command: the command string to escape.
1651
1652    Returns:
1653            The escaped command string. The required englobing double
1654            quotes are NOT added and so should be added at some point by
1655            the caller.
1656
1657    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1658    """
1659    command = command.replace("\\", "\\\\")
1660    command = command.replace("$", r'\$')
1661    command = command.replace('"', r'\"')
1662    command = command.replace('`', r'\`')
1663    return command
1664
1665
1666def sh_quote_word(text, allowlist=_SHELL_QUOTING_ALLOWLIST):
1667    r"""Quote a string to make it safe as a single word in a shell command.
1668
1669    POSIX shell syntax recognizes no escape characters inside a single-quoted
1670    string.  So, single quotes can safely quote any string of characters except
1671    a string with a single quote character.  A single quote character must be
1672    quoted with the sequence '\'' which translates to:
1673        '  -> close current quote
1674        \' -> insert a literal single quote
1675        '  -> reopen quoting again.
1676
1677    This is safe for all combinations of characters, including embedded and
1678    trailing backslashes in odd or even numbers.
1679
1680    This is also safe for nesting, e.g. the following is a valid use:
1681
1682        adb_command = 'adb shell %s' % (
1683                sh_quote_word('echo %s' % sh_quote_word('hello world')))
1684
1685    @param text: The string to be quoted into a single word for the shell.
1686    @param allowlist: Optional list of characters that do not need quoting.
1687                      Defaults to a known good list of characters.
1688
1689    @return A string, possibly quoted, safe as a single word for a shell.
1690    """
1691    if all(c in allowlist for c in text):
1692        return text
1693    return "'" + text.replace("'", r"'\''") + "'"
1694
1695
1696def configure(extra=None, configure='./configure'):
1697    """
1698    Run configure passing in the correct host, build, and target options.
1699
1700    @param extra: extra command line arguments to pass to configure
1701    @param configure: which configure script to use
1702    """
1703    args = []
1704    if 'CHOST' in os.environ:
1705        args.append('--host=' + os.environ['CHOST'])
1706    if 'CBUILD' in os.environ:
1707        args.append('--build=' + os.environ['CBUILD'])
1708    if 'CTARGET' in os.environ:
1709        args.append('--target=' + os.environ['CTARGET'])
1710    if extra:
1711        args.append(extra)
1712
1713    system('%s %s' % (configure, ' '.join(args)))
1714
1715
1716def make(extra='', make='make', timeout=None, ignore_status=False):
1717    """
1718    Run make, adding MAKEOPTS to the list of options.
1719
1720    @param extra: extra command line arguments to pass to make.
1721    """
1722    cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1723    return system(cmd, timeout=timeout, ignore_status=ignore_status)
1724
1725
1726def compare_versions(ver1, ver2):
1727    """Version number comparison between ver1 and ver2 strings.
1728
1729    >>> compare_tuple("1", "2")
1730    -1
1731    >>> compare_tuple("foo-1.1", "foo-1.2")
1732    -1
1733    >>> compare_tuple("1.2", "1.2a")
1734    -1
1735    >>> compare_tuple("1.2b", "1.2a")
1736    1
1737    >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1738    -1
1739
1740    Args:
1741        ver1: version string
1742        ver2: version string
1743
1744    Returns:
1745        int:  1 if ver1 >  ver2
1746              0 if ver1 == ver2
1747             -1 if ver1 <  ver2
1748    """
1749    ax = re.split('[.-]', ver1)
1750    ay = re.split('[.-]', ver2)
1751    while len(ax) > 0 and len(ay) > 0:
1752        cx = ax.pop(0)
1753        cy = ay.pop(0)
1754        maxlen = max(len(cx), len(cy))
1755        c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1756        if c != 0:
1757            return c
1758    return cmp(len(ax), len(ay))
1759
1760
1761def args_to_dict(args):
1762    """Convert autoserv extra arguments in the form of key=val or key:val to a
1763    dictionary.  Each argument key is converted to lowercase dictionary key.
1764
1765    Args:
1766        args - list of autoserv extra arguments.
1767
1768    Returns:
1769        dictionary
1770    """
1771    arg_re = re.compile(r'(\w+)[:=](.*)$')
1772    args_dict = {}
1773    for arg in args:
1774        match = arg_re.match(arg)
1775        if match:
1776            args_dict[match.group(1).lower()] = match.group(2)
1777        else:
1778            logging.warning("args_to_dict: argument '%s' doesn't match "
1779                            "'%s' pattern. Ignored.", arg, arg_re.pattern)
1780    return args_dict
1781
1782
1783def get_unused_port():
1784    """
1785    Finds a semi-random available port. A race condition is still
1786    possible after the port number is returned, if another process
1787    happens to bind it.
1788
1789    Returns:
1790        A port number that is unused on both TCP and UDP.
1791    """
1792
1793    def try_bind(port, socket_type, socket_proto):
1794        s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1795        try:
1796            try:
1797                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1798                s.bind(('', port))
1799                return s.getsockname()[1]
1800            except socket.error:
1801                return None
1802        finally:
1803            s.close()
1804
1805    # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1806    # same port over and over. So always try TCP first.
1807    while True:
1808        # Ask the OS for an unused port.
1809        port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1810        # Check if this port is unused on the other protocol.
1811        if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1812            return port
1813
1814
1815def ask(question, auto=False):
1816    """
1817    Raw input with a prompt that emulates logging.
1818
1819    @param question: Question to be asked
1820    @param auto: Whether to return "y" instead of asking the question
1821    """
1822    if auto:
1823        logging.info("%s (y/n) y", question)
1824        return "y"
1825    return input("%s INFO | %s (y/n) " %
1826                     (time.strftime("%H:%M:%S", time.localtime()), question))
1827
1828
1829def rdmsr(address, cpu=0):
1830    """
1831    Reads an x86 MSR from the specified CPU, returns as long integer.
1832    """
1833    with open('/dev/cpu/%s/msr' % cpu, 'r', 0) as fd:
1834        fd.seek(address)
1835        return struct.unpack('=Q', fd.read(8))[0]
1836
1837
1838def wait_for_value(func,
1839                   expected_value=None,
1840                   min_threshold=None,
1841                   max_threshold=None,
1842                   timeout_sec=10):
1843    """
1844    Returns the value of func().  If |expected_value|, |min_threshold|, and
1845    |max_threshold| are not set, returns immediately.
1846
1847    If |expected_value| is set, polls the return value until |expected_value| is
1848    reached, and returns that value.
1849
1850    If either |max_threshold| or |min_threshold| is set, this function will
1851    will repeatedly call func() until the return value reaches or exceeds one of
1852    these thresholds.
1853
1854    Polling will stop after |timeout_sec| regardless of these thresholds.
1855
1856    @param func: function whose return value is to be waited on.
1857    @param expected_value: wait for func to return this value.
1858    @param min_threshold: wait for func value to reach or fall below this value.
1859    @param max_threshold: wait for func value to reach or rise above this value.
1860    @param timeout_sec: Number of seconds to wait before giving up and
1861                        returning whatever value func() last returned.
1862
1863    Return value:
1864        The most recent return value of func().
1865    """
1866    value = None
1867    start_time_sec = time.time()
1868    while True:
1869        value = func()
1870        if (expected_value is None and \
1871            min_threshold is None and \
1872            max_threshold is None) or \
1873           (expected_value is not None and value == expected_value) or \
1874           (min_threshold is not None and value <= min_threshold) or \
1875           (max_threshold is not None and value >= max_threshold):
1876            break
1877
1878        if time.time() - start_time_sec >= timeout_sec:
1879            break
1880        time.sleep(0.1)
1881
1882    return value
1883
1884
1885def wait_for_value_changed(func,
1886                           old_value=None,
1887                           timeout_sec=10):
1888    """
1889    Returns the value of func().
1890
1891    The function polls the return value until it is different from |old_value|,
1892    and returns that value.
1893
1894    Polling will stop after |timeout_sec|.
1895
1896    @param func: function whose return value is to be waited on.
1897    @param old_value: wait for func to return a value different from this.
1898    @param timeout_sec: Number of seconds to wait before giving up and
1899                        returning whatever value func() last returned.
1900
1901    @returns The most recent return value of func().
1902    """
1903    value = None
1904    start_time_sec = time.time()
1905    while True:
1906        value = func()
1907        if value != old_value:
1908            break
1909
1910        if time.time() - start_time_sec >= timeout_sec:
1911            break
1912        time.sleep(0.1)
1913
1914    return value
1915
1916
1917CONFIG = global_config.global_config
1918
1919# Keep checking if the pid is alive every second until the timeout (in seconds)
1920CHECK_PID_IS_ALIVE_TIMEOUT = 6
1921
1922_LOCAL_HOST_LIST = ('localhost', '127.0.0.1')
1923
1924# The default address of a vm gateway.
1925DEFAULT_VM_GATEWAY = '10.0.2.2'
1926
1927# Google Storage bucket URI to store results in.
1928DEFAULT_OFFLOAD_GSURI = CONFIG.get_config_value(
1929        'CROS', 'results_storage_server', default=None)
1930
1931# Default Moblab Ethernet Interface.
1932_MOBLAB_ETH_0 = 'eth0'
1933_MOBLAB_ETH_1 = 'eth1'
1934
1935# A list of subnets that requires dedicated devserver and drone in the same
1936# subnet. Each item is a tuple of (subnet_ip, mask_bits), e.g.,
1937# ('192.168.0.0', 24))
1938RESTRICTED_SUBNETS = []
1939
1940def _setup_restricted_subnets():
1941    restricted_subnets_list = CONFIG.get_config_value(
1942            'CROS', 'restricted_subnets', type=list, default=[])
1943    # TODO(dshi): Remove the code to split subnet with `:` after R51 is
1944    # off stable channel, and update shadow config to use `/` as
1945    # delimiter for consistency.
1946    for subnet in restricted_subnets_list:
1947        ip, mask_bits = subnet.split('/') if '/' in subnet \
1948                        else subnet.split(':')
1949        RESTRICTED_SUBNETS.append((ip, int(mask_bits)))
1950
1951_setup_restricted_subnets()
1952
1953# regex pattern for CLIENT/wireless_ssid_ config. For example, global config
1954# can have following config in CLIENT section to indicate that hosts in subnet
1955# 192.168.0.1/24 should use wireless ssid of `ssid_1`
1956# wireless_ssid_192.168.0.1/24: ssid_1
1957WIRELESS_SSID_PATTERN = 'wireless_ssid_(.*)/(\d+)'
1958
1959
1960def get_moblab_serial_number():
1961    """Gets a unique identifier for the moblab.
1962
1963    Serial number is the prefered identifier, use it if
1964    present, however fallback is the ethernet mac address.
1965    """
1966    for vpd_key in ['serial_number', 'ethernet_mac']:
1967      try:
1968          cmd_result = run('sudo vpd -g %s' % vpd_key)
1969          if cmd_result and cmd_result.stdout:
1970            return cmd_result.stdout
1971      except error.CmdError as e:
1972          logging.error(str(e))
1973          logging.info(vpd_key)
1974    return 'NoSerialNumber'
1975
1976
1977def ping(host,
1978         deadline=None,
1979         tries=None,
1980         timeout=60,
1981         ignore_timeout=False,
1982         user=None):
1983    """Attempt to ping |host|.
1984
1985    Shell out to 'ping' if host is an IPv4 addres or 'ping6' if host is an
1986    IPv6 address to try to reach |host| for |timeout| seconds.
1987    Returns exit code of ping.
1988
1989    Per 'man ping', if you specify BOTH |deadline| and |tries|, ping only
1990    returns 0 if we get responses to |tries| pings within |deadline| seconds.
1991
1992    Specifying |deadline| or |count| alone should return 0 as long as
1993    some packets receive responses.
1994
1995    Note that while this works with literal IPv6 addresses it will not work
1996    with hostnames that resolve to IPv6 only.
1997
1998    @param host: the host to ping.
1999    @param deadline: seconds within which |tries| pings must succeed.
2000    @param tries: number of pings to send.
2001    @param timeout: number of seconds after which to kill 'ping' command.
2002    @param ignore_timeout: If true, timeouts won't raise CmdTimeoutError.
2003    @param user: Run as a specific user
2004    @return exit code of ping command.
2005    """
2006    args = [host]
2007    cmd = 'ping6' if re.search(r':.*:', host) else 'ping'
2008
2009    if deadline:
2010        args.append('-w%d' % deadline)
2011    if tries:
2012        args.append('-c%d' % tries)
2013
2014    if user != None:
2015        args = [user, '-c', ' '.join([cmd] + args)]
2016        cmd = 'su'
2017
2018    result = run(cmd,
2019                 args=args,
2020                 verbose=True,
2021                 ignore_status=True,
2022                 timeout=timeout,
2023                 ignore_timeout=ignore_timeout,
2024                 stderr_tee=TEE_TO_LOGS)
2025
2026    # Sometimes the ping process times out even though a deadline is set. If
2027    # ignore_timeout is set, it will fall through to here instead of raising.
2028    if result is None:
2029        logging.debug('Unusual ping result (timeout)')
2030        # From man ping: If a packet count and deadline are both specified, and
2031        # fewer than count packets are received by the time the deadline has
2032        # arrived, it will also exit with code 1. On other error it exits with
2033        # code 2.
2034        return 1 if deadline and tries else 2
2035
2036    rc = result.exit_status
2037    lines = result.stdout.splitlines()
2038
2039    # rc=0: host reachable
2040    # rc=1: host unreachable
2041    # other: an error (do not abbreviate)
2042    if rc in (0, 1):
2043        # Report the two stats lines, as a single line.
2044        # [-2]: packets transmitted, 1 received, 0% packet loss, time 0ms
2045        # [-1]: rtt min/avg/max/mdev = 0.497/0.497/0.497/0.000 ms
2046        stats = lines[-2:]
2047        while '' in stats:
2048            stats.remove('')
2049
2050        if stats or len(lines) < 2:
2051            logging.debug('[rc=%s] %s', rc, '; '.join(stats))
2052        else:
2053            logging.debug('[rc=%s] Ping output:\n%s',
2054                          rc, result.stdout)
2055    else:
2056        output = result.stdout.rstrip()
2057        if output:
2058            logging.debug('Unusual ping result (rc=%s):\n%s', rc, output)
2059        else:
2060            logging.debug('Unusual ping result (rc=%s).', rc)
2061    return rc
2062
2063
2064def host_is_in_lab_zone(hostname):
2065    """Check if the host is in the CLIENT.dns_zone.
2066
2067    @param hostname: The hostname to check.
2068    @returns True if hostname.dns_zone resolves, otherwise False.
2069    """
2070    host_parts = hostname.split('.')
2071    dns_zone = CONFIG.get_config_value('CLIENT', 'dns_zone', default=None)
2072    fqdn = '%s.%s' % (host_parts[0], dns_zone)
2073    logging.debug('Checking if host %s is in lab zone.', fqdn)
2074    try:
2075        socket.gethostbyname(fqdn)
2076        return True
2077    except socket.gaierror:
2078        return False
2079
2080
2081def host_is_in_power_lab(hostname):
2082    """Check if the hostname is in power lab.
2083
2084    Example: chromeos1-power-host2.cros
2085
2086    @param hostname: The hostname to check.
2087    @returns True if hostname match power lab hostname, otherwise False.
2088    """
2089    pattern = r'chromeos\d+-power-host\d+(\.cros(\.corp(\.google\.com)?)?)?$'
2090    return re.match(pattern, hostname) is not None
2091
2092
2093def get_power_lab_wlan_hostname(hostname):
2094    """Return wlan hostname for host in power lab.
2095
2096    Example: chromeos1-power-host2.cros -> chromeos1-power-host2-wlan.cros
2097
2098    @param hostname: The hostname in power lab.
2099    @returns wlan hostname.
2100    """
2101    split_host = hostname.split('.')
2102    split_host[0] += '-wlan'
2103    return '.'.join(split_host)
2104
2105
2106def in_moblab_ssp():
2107    """Detects if this execution is inside an SSP container on moblab."""
2108    config_is_moblab = CONFIG.get_config_value('SSP', 'is_moblab', type=bool,
2109                                               default=False)
2110    return is_in_container() and config_is_moblab
2111
2112
2113def get_chrome_version(job_views):
2114    """
2115    Retrieves the version of the chrome binary associated with a job.
2116
2117    When a test runs we query the chrome binary for it's version and drop
2118    that value into a client keyval. To retrieve the chrome version we get all
2119    the views associated with a test from the db, including those of the
2120    server and client jobs, and parse the version out of the first test view
2121    that has it. If we never ran a single test in the suite the job_views
2122    dictionary will not contain a chrome version.
2123
2124    This method cannot retrieve the chrome version from a dictionary that
2125    does not conform to the structure of an autotest tko view.
2126
2127    @param job_views: a list of a job's result views, as returned by
2128                      the get_detailed_test_views method in rpc_interface.
2129    @return: The chrome version string, or None if one can't be found.
2130    """
2131
2132    # Aborted jobs have no views.
2133    if not job_views:
2134        return None
2135
2136    for view in job_views:
2137        if (view.get('attributes')
2138            and constants.CHROME_VERSION in list(view['attributes'].keys())):
2139
2140            return view['attributes'].get(constants.CHROME_VERSION)
2141
2142    logging.warning('Could not find chrome version for failure.')
2143    return None
2144
2145
2146def get_moblab_id():
2147    """Gets the moblab random id.
2148
2149    The random id file is cached on disk. If it does not exist, a new file is
2150    created the first time.
2151
2152    @returns the moblab random id.
2153    """
2154    moblab_id_filepath = '/home/moblab/.moblab_id'
2155    try:
2156        if os.path.exists(moblab_id_filepath):
2157            with open(moblab_id_filepath, 'r') as moblab_id_file:
2158                random_id = moblab_id_file.read()
2159        else:
2160            random_id = uuid.uuid1().hex
2161            with open(moblab_id_filepath, 'w') as moblab_id_file:
2162                moblab_id_file.write('%s' % random_id)
2163    except IOError as e:
2164        # Possible race condition, another process has created the file.
2165        # Sleep a second to make sure the file gets closed.
2166        logging.info(e)
2167        time.sleep(1)
2168        with open(moblab_id_filepath, 'r') as moblab_id_file:
2169            random_id = moblab_id_file.read()
2170    return random_id
2171
2172
2173def get_offload_gsuri():
2174    """Return the GSURI to offload test results to.
2175
2176    For the normal use case this is the results_storage_server in the
2177    global_config.
2178
2179    However partners using Moblab will be offloading their results to a
2180    subdirectory of their image storage buckets. The subdirectory is
2181    determined by the MAC Address of the Moblab device.
2182
2183    @returns gsuri to offload test results to.
2184    """
2185    # For non-moblab, use results_storage_server or default.
2186    if not is_moblab():  # pylint: disable=undefined-variable
2187        return DEFAULT_OFFLOAD_GSURI
2188
2189    # For moblab, use results_storage_server or image_storage_server as bucket
2190    # name and mac-address/moblab_id as path.
2191    gsuri = DEFAULT_OFFLOAD_GSURI
2192    if not gsuri:
2193        gsuri = "%sresults/" % CONFIG.get_config_value('CROS',
2194                                                       'image_storage_server')
2195
2196    return '%s%s/%s/' % (gsuri, get_moblab_serial_number(), get_moblab_id())
2197
2198
2199# TODO(petermayo): crosbug.com/31826 Share this with _GsUpload in
2200# //chromite.git/buildbot/prebuilt.py somewhere/somehow
2201def gs_upload(local_file, remote_file, acl, result_dir=None,
2202              transfer_timeout=300, acl_timeout=300):
2203    """Upload to GS bucket.
2204
2205    @param local_file: Local file to upload
2206    @param remote_file: Remote location to upload the local_file to.
2207    @param acl: name or file used for controlling access to the uploaded
2208                file.
2209    @param result_dir: Result directory if you want to add tracing to the
2210                       upload.
2211    @param transfer_timeout: Timeout for this upload call.
2212    @param acl_timeout: Timeout for the acl call needed to confirm that
2213                        the uploader has permissions to execute the upload.
2214
2215    @raise CmdError: the exit code of the gsutil call was not 0.
2216
2217    @returns True/False - depending on if the upload succeeded or failed.
2218    """
2219    # https://developers.google.com/storage/docs/accesscontrol#extension
2220    CANNED_ACLS = ['project-private', 'private', 'public-read',
2221                   'public-read-write', 'authenticated-read',
2222                   'bucket-owner-read', 'bucket-owner-full-control']
2223    _GSUTIL_BIN = 'gsutil'
2224    acl_cmd = None
2225    if acl in CANNED_ACLS:
2226        cmd = '%s cp -a %s %s %s' % (_GSUTIL_BIN, acl, local_file, remote_file)
2227    else:
2228        # For private uploads we assume that the overlay board is set up
2229        # properly and a googlestore_acl.xml is present, if not this script
2230        # errors
2231        cmd = '%s cp -a private %s %s' % (_GSUTIL_BIN, local_file, remote_file)
2232        if not os.path.exists(acl):
2233            logging.error('Unable to find ACL File %s.', acl)
2234            return False
2235        acl_cmd = '%s setacl %s %s' % (_GSUTIL_BIN, acl, remote_file)
2236    if not result_dir:
2237        run(cmd, timeout=transfer_timeout, verbose=True)
2238        if acl_cmd:
2239            run(acl_cmd, timeout=acl_timeout, verbose=True)
2240        return True
2241    with open(os.path.join(result_dir, 'tracing'), 'w') as ftrace:
2242        ftrace.write('Preamble\n')
2243        run(cmd, timeout=transfer_timeout, verbose=True,
2244                       stdout_tee=ftrace, stderr_tee=ftrace)
2245        if acl_cmd:
2246            ftrace.write('\nACL setting\n')
2247            # Apply the passed in ACL xml file to the uploaded object.
2248            run(acl_cmd, timeout=acl_timeout, verbose=True,
2249                           stdout_tee=ftrace, stderr_tee=ftrace)
2250        ftrace.write('Postamble\n')
2251        return True
2252
2253
2254def gs_ls(uri_pattern):
2255    """Returns a list of URIs that match a given pattern.
2256
2257    @param uri_pattern: a GS URI pattern, may contain wildcards
2258
2259    @return A list of URIs matching the given pattern.
2260
2261    @raise CmdError: the gsutil command failed.
2262
2263    """
2264    gs_cmd = ' '.join(['gsutil', 'ls', uri_pattern])
2265    result = system_output(gs_cmd).splitlines()
2266    return [path.rstrip() for path in result if path]
2267
2268
2269def nuke_pids(pid_list, signal_queue=None):
2270    """
2271    Given a list of pid's, kill them via an esclating series of signals.
2272
2273    @param pid_list: List of PID's to kill.
2274    @param signal_queue: Queue of signals to send the PID's to terminate them.
2275
2276    @return: A mapping of the signal name to the number of processes it
2277        was sent to.
2278    """
2279    if signal_queue is None:
2280        signal_queue = [signal.SIGTERM, signal.SIGKILL]
2281    sig_count = {}
2282    # Though this is slightly hacky it beats hardcoding names anyday.
2283    sig_names = dict((k, v) for v, k in six.iteritems(signal.__dict__)
2284                     if v.startswith('SIG'))
2285    for sig in signal_queue:
2286        logging.debug('Sending signal %s to the following pids:', sig)
2287        sig_count[sig_names.get(sig, 'unknown_signal')] = len(pid_list)
2288        for pid in pid_list:
2289            logging.debug('Pid %d', pid)
2290            try:
2291                os.kill(pid, sig)
2292            except OSError:
2293                # The process may have died from a previous signal before we
2294                # could kill it.
2295                pass
2296        if sig == signal.SIGKILL:
2297            return sig_count
2298        pid_list = [pid for pid in pid_list if pid_is_alive(pid)]
2299        if not pid_list:
2300            break
2301        time.sleep(CHECK_PID_IS_ALIVE_TIMEOUT)
2302    failed_list = []
2303    for pid in pid_list:
2304        if pid_is_alive(pid):
2305            failed_list.append('Could not kill %d for process name: %s.' % pid,
2306                               get_process_name(pid))
2307    if failed_list:
2308        raise error.AutoservRunError('Following errors occured: %s' %
2309                                     failed_list, None)
2310    return sig_count
2311
2312
2313def externalize_host(host):
2314    """Returns an externally accessible host name.
2315
2316    @param host: a host name or address (string)
2317
2318    @return An externally visible host name or address
2319
2320    """
2321    return socket.gethostname() if host in _LOCAL_HOST_LIST else host
2322
2323
2324def urlopen_socket_timeout(url, data=None, timeout=5):
2325    """
2326    Wrapper to urllib2.urlopen with a socket timeout.
2327
2328    This method will convert all socket timeouts to
2329    TimeoutExceptions, so we can use it in conjunction
2330    with the rpc retry decorator and continue to handle
2331    other URLErrors as we see fit.
2332
2333    @param url: The url to open.
2334    @param data: The data to send to the url (eg: the urlencoded dictionary
2335                 used with a POST call).
2336    @param timeout: The timeout for this urlopen call.
2337
2338    @return: The response of the urlopen call.
2339
2340    @raises: error.TimeoutException when a socket timeout occurs.
2341             urllib2.URLError for errors that not caused by timeout.
2342             urllib2.HTTPError for errors like 404 url not found.
2343    """
2344    old_timeout = socket.getdefaulttimeout()
2345    socket.setdefaulttimeout(timeout)
2346    try:
2347        return urllib.request.urlopen(url, data=data)
2348    except urllib.error.URLError as e:
2349        if type(e.reason) is socket.timeout:
2350            raise error.TimeoutException(str(e))
2351        raise
2352    finally:
2353        socket.setdefaulttimeout(old_timeout)
2354
2355
2356def parse_chrome_version(version_string):
2357    """
2358    Parse a chrome version string and return version and milestone.
2359
2360    Given a chrome version of the form "W.X.Y.Z", return "W.X.Y.Z" as
2361    the version and "W" as the milestone.
2362
2363    @param version_string: Chrome version string.
2364    @return: a tuple (chrome_version, milestone). If the incoming version
2365             string is not of the form "W.X.Y.Z", chrome_version will
2366             be set to the incoming "version_string" argument and the
2367             milestone will be set to the empty string.
2368    """
2369    match = re.search('(\d+)\.\d+\.\d+\.\d+', version_string)
2370    ver = match.group(0) if match else version_string
2371    milestone = match.group(1) if match else ''
2372    return ver, milestone
2373
2374
2375def parse_gs_uri_version(uri):
2376    """Pull out major.minor.sub from image URI
2377
2378    @param uri: A GS URI for a bucket containing ChromeOS build artifacts
2379    @return: The build version as a string in the form 'major.minor.sub'
2380
2381    """
2382    return re.sub('.*(R[0-9]+|LATEST)-', '', uri).strip('/')
2383
2384
2385def compare_gs_uri_build_versions(x, y):
2386    """Compares two bucket URIs by their version string
2387
2388    @param x: A GS URI for a bucket containing ChromeOS build artifacts
2389    @param y: Another GS URI for a bucket containing ChromeOS build artifacts
2390    @return: 1 if x > y, -1 if x < y, and 0 if x == y
2391
2392    """
2393    # Converts a gs uri 'gs://.../R75-<major>.<minor>.<sub>' to
2394    # [major, minor, sub]
2395    split_version = lambda v: [int(x) for x in
2396                               parse_gs_uri_version(v).split('.')]
2397
2398    x_version = split_version(x)
2399    y_version = split_version(y)
2400
2401    for a, b in zip(x_version, y_version):
2402        if a > b:
2403            return 1
2404        elif b > a:
2405            return -1
2406
2407    return 0
2408
2409
2410def is_localhost(server):
2411    """Check if server is equivalent to localhost.
2412
2413    @param server: Name of the server to check.
2414
2415    @return: True if given server is equivalent to localhost.
2416
2417    @raise socket.gaierror: If server name failed to be resolved.
2418    """
2419    if server in _LOCAL_HOST_LIST:
2420        return True
2421    try:
2422        return (socket.gethostbyname(socket.gethostname()) ==
2423                socket.gethostbyname(server))
2424    except socket.gaierror:
2425        logging.error('Failed to resolve server name %s.', server)
2426        return False
2427
2428
2429def get_function_arg_value(func, arg_name, args, kwargs):
2430    """Get the value of the given argument for the function.
2431
2432    @param func: Function being called with given arguments.
2433    @param arg_name: Name of the argument to look for value.
2434    @param args: arguments for function to be called.
2435    @param kwargs: keyword arguments for function to be called.
2436
2437    @return: The value of the given argument for the function.
2438
2439    @raise ValueError: If the argument is not listed function arguemnts.
2440    @raise KeyError: If no value is found for the given argument.
2441    """
2442    if arg_name in kwargs:
2443        return kwargs[arg_name]
2444
2445    argspec = inspect.getargspec(func)
2446    index = argspec.args.index(arg_name)
2447    try:
2448        return args[index]
2449    except IndexError:
2450        try:
2451            # The argument can use a default value. Reverse the default value
2452            # so argument with default value can be counted from the last to
2453            # the first.
2454            return argspec.defaults[::-1][len(argspec.args) - index - 1]
2455        except IndexError:
2456            raise KeyError('Argument %s is not given a value. argspec: %s, '
2457                           'args:%s, kwargs:%s' %
2458                           (arg_name, argspec, args, kwargs))
2459
2460
2461def has_systemd():
2462    """Check if the host is running systemd.
2463
2464    @return: True if the host uses systemd, otherwise returns False.
2465    """
2466    return os.path.basename(os.readlink('/proc/1/exe')) == 'systemd'
2467
2468
2469def get_real_user():
2470    """Get the real user that runs the script.
2471
2472    The function check environment variable SUDO_USER for the user if the
2473    script is run with sudo. Otherwise, it returns the value of environment
2474    variable USER.
2475
2476    @return: The user name that runs the script.
2477
2478    """
2479    user = os.environ.get('SUDO_USER')
2480    if not user:
2481        user = os.environ.get('USER')
2482    return user
2483
2484
2485def get_service_pid(service_name):
2486    """Return pid of service.
2487
2488    @param service_name: string name of service.
2489
2490    @return: pid or 0 if service is not running.
2491    """
2492    if has_systemd():
2493        # systemctl show prints 'MainPID=0' if the service is not running.
2494        cmd_result = run('systemctl show -p MainPID %s' %
2495                                    service_name, ignore_status=True)
2496        return int(cmd_result.stdout.split('=')[1])
2497    else:
2498        cmd_result = run('status %s' % service_name,
2499                                        ignore_status=True)
2500        if 'start/running' in cmd_result.stdout:
2501            return int(cmd_result.stdout.split()[3])
2502        return 0
2503
2504
2505def control_service(service_name, action='start', ignore_status=True):
2506    """Controls a service. It can be used to start, stop or restart
2507    a service.
2508
2509    @param service_name: string service to be restarted.
2510
2511    @param action: string choice of action to control command.
2512
2513    @param ignore_status: boolean ignore if system command fails.
2514
2515    @return: status code of the executed command.
2516    """
2517    if action not in ('start', 'stop', 'restart'):
2518        raise ValueError('Unknown action supplied as parameter.')
2519
2520    control_cmd = action + ' ' + service_name
2521    if has_systemd():
2522        control_cmd = 'systemctl ' + control_cmd
2523    return system(control_cmd, ignore_status=ignore_status)
2524
2525
2526def restart_service(service_name, ignore_status=True):
2527    """Restarts a service
2528
2529    @param service_name: string service to be restarted.
2530
2531    @param ignore_status: boolean ignore if system command fails.
2532
2533    @return: status code of the executed command.
2534    """
2535    return control_service(service_name, action='restart',
2536                           ignore_status=ignore_status)
2537
2538
2539def start_service(service_name, ignore_status=True):
2540    """Starts a service
2541
2542    @param service_name: string service to be started.
2543
2544    @param ignore_status: boolean ignore if system command fails.
2545
2546    @return: status code of the executed command.
2547    """
2548    return control_service(service_name, action='start',
2549                           ignore_status=ignore_status)
2550
2551
2552def stop_service(service_name, ignore_status=True):
2553    """Stops a service
2554
2555    @param service_name: string service to be stopped.
2556
2557    @param ignore_status: boolean ignore if system command fails.
2558
2559    @return: status code of the executed command.
2560    """
2561    return control_service(service_name, action='stop',
2562                           ignore_status=ignore_status)
2563
2564
2565def sudo_require_password():
2566    """Test if the process can run sudo command without using password.
2567
2568    @return: True if the process needs password to run sudo command.
2569
2570    """
2571    try:
2572        run('sudo -n true')
2573        return False
2574    except error.CmdError:
2575        logging.warn('sudo command requires password.')
2576        return True
2577
2578
2579def is_in_container():
2580    """Check if the process is running inside a container.
2581
2582    @return: True if the process is running inside a container, otherwise False.
2583    """
2584    result = run('grep -q "/lxc/" /proc/1/cgroup',
2585                            verbose=False, ignore_status=True)
2586    if result.exit_status == 0:
2587        return True
2588
2589    # Check "container" environment variable for lxd/lxc containers.
2590    if os.environ.get('container') == 'lxc':
2591        return True
2592
2593    return False
2594
2595
2596def is_flash_installed():
2597    """
2598    The Adobe Flash binary is only distributed with internal builds.
2599    """
2600    return (os.path.exists('/opt/google/chrome/pepper/libpepflashplayer.so')
2601        and os.path.exists('/opt/google/chrome/pepper/pepper-flash.info'))
2602
2603
2604def verify_flash_installed():
2605    """
2606    The Adobe Flash binary is only distributed with internal builds.
2607    Warn users of public builds of the extra dependency.
2608    """
2609    if not is_flash_installed():
2610        raise error.TestNAError('No Adobe Flash binary installed.')
2611
2612
2613def is_in_same_subnet(ip_1, ip_2, mask_bits=24):
2614    """Check if two IP addresses are in the same subnet with given mask bits.
2615
2616    The two IP addresses are string of IPv4, e.g., '192.168.0.3'.
2617
2618    @param ip_1: First IP address to compare.
2619    @param ip_2: Second IP address to compare.
2620    @param mask_bits: Number of mask bits for subnet comparison. Default to 24.
2621
2622    @return: True if the two IP addresses are in the same subnet.
2623
2624    """
2625    mask = ((2<<mask_bits-1) -1)<<(32-mask_bits)
2626    ip_1_num = struct.unpack('!I', socket.inet_aton(ip_1))[0]
2627    ip_2_num = struct.unpack('!I', socket.inet_aton(ip_2))[0]
2628    return ip_1_num & mask == ip_2_num & mask
2629
2630
2631def get_ip_address(hostname=None):
2632    """Get the IP address of given hostname or current machine.
2633
2634    @param hostname: Hostname of a DUT, default value is None.
2635
2636    @return: The IP address of given hostname. If hostname is not given then
2637             we'll try to query the IP address of the current machine and
2638             return.
2639    """
2640    if hostname:
2641        try:
2642            return socket.gethostbyname(hostname)
2643        except socket.gaierror as e:
2644            logging.error(
2645                'Failed to get IP address of %s, error: %s.', hostname, e)
2646    else:
2647        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
2648        s.connect(("8.8.8.8", 80))
2649        ip = s.getsockname()[0]
2650        s.close()
2651        return ip
2652
2653
2654def get_servers_in_same_subnet(host_ip, mask_bits, servers=None,
2655                               server_ip_map=None):
2656    """Get the servers in the same subnet of the given host ip.
2657
2658    @param host_ip: The IP address of a dut to look for devserver.
2659    @param mask_bits: Number of mask bits.
2660    @param servers: A list of servers to be filtered by subnet specified by
2661                    host_ip and mask_bits.
2662    @param server_ip_map: A map between the server name and its IP address.
2663            The map can be pre-built for better performance, e.g., when
2664            allocating a drone for an agent task.
2665
2666    @return: A list of servers in the same subnet of the given host ip.
2667
2668    """
2669    matched_servers = []
2670    if not servers and not server_ip_map:
2671        raise ValueError('Either `servers` or `server_ip_map` must be given.')
2672    if not servers:
2673        servers = list(server_ip_map.keys())
2674    # Make sure server_ip_map is an empty dict if it's not set.
2675    if not server_ip_map:
2676        server_ip_map = {}
2677    for server in servers:
2678        server_ip = server_ip_map.get(server, get_ip_address(server))
2679        if server_ip and is_in_same_subnet(server_ip, host_ip, mask_bits):
2680            matched_servers.append(server)
2681    return matched_servers
2682
2683
2684def get_restricted_subnet(hostname, restricted_subnets=None):
2685    """Get the restricted subnet of given hostname.
2686
2687    @param hostname: Name of the host to look for matched restricted subnet.
2688    @param restricted_subnets: A list of restricted subnets, default is set to
2689            RESTRICTED_SUBNETS.
2690
2691    @return: A tuple of (subnet_ip, mask_bits), which defines a restricted
2692             subnet.
2693    """
2694    if restricted_subnets is None:
2695        restricted_subnets=RESTRICTED_SUBNETS
2696    host_ip = get_ip_address(hostname)
2697    if not host_ip:
2698        return
2699    for subnet_ip, mask_bits in restricted_subnets:
2700        if is_in_same_subnet(subnet_ip, host_ip, mask_bits):
2701            return subnet_ip, mask_bits
2702
2703
2704def get_wireless_ssid(hostname):
2705    """Get the wireless ssid based on given hostname.
2706
2707    The method tries to locate the wireless ssid in the same subnet of given
2708    hostname first. If none is found, it returns the default setting in
2709    CLIENT/wireless_ssid.
2710
2711    @param hostname: Hostname of the test device.
2712
2713    @return: wireless ssid for the test device.
2714    """
2715    default_ssid = CONFIG.get_config_value('CLIENT', 'wireless_ssid',
2716                                           default=None)
2717    host_ip = get_ip_address(hostname)
2718    if not host_ip:
2719        return default_ssid
2720
2721    # Get all wireless ssid in the global config.
2722    ssids = CONFIG.get_config_value_regex('CLIENT', WIRELESS_SSID_PATTERN)
2723
2724    # There could be multiple subnet matches, pick the one with most strict
2725    # match, i.e., the one with highest maskbit.
2726    matched_ssid = default_ssid
2727    matched_maskbit = -1
2728    for key, value in ssids.items():
2729        # The config key filtered by regex WIRELESS_SSID_PATTERN has a format of
2730        # wireless_ssid_[subnet_ip]/[maskbit], for example:
2731        # wireless_ssid_192.168.0.1/24
2732        # Following line extract the subnet ip and mask bit from the key name.
2733        match = re.match(WIRELESS_SSID_PATTERN, key)
2734        subnet_ip, maskbit = match.groups()
2735        maskbit = int(maskbit)
2736        if (is_in_same_subnet(subnet_ip, host_ip, maskbit) and
2737            maskbit > matched_maskbit):
2738            matched_ssid = value
2739            matched_maskbit = maskbit
2740    return matched_ssid
2741
2742
2743def parse_launch_control_build(build_name):
2744    """Get branch, target, build_id from the given Launch Control build_name.
2745
2746    @param build_name: Name of a Launch Control build, should be formated as
2747                       branch/target/build_id
2748
2749    @return: Tuple of branch, target, build_id
2750    @raise ValueError: If the build_name is not correctly formated.
2751    """
2752    branch, target, build_id = build_name.split('/')
2753    return branch, target, build_id
2754
2755
2756def parse_android_target(target):
2757    """Get board and build type from the given target.
2758
2759    @param target: Name of an Android build target, e.g., shamu-eng.
2760
2761    @return: Tuple of board, build_type
2762    @raise ValueError: If the target is not correctly formated.
2763    """
2764    board, build_type = target.split('-')
2765    return board, build_type
2766
2767
2768def parse_launch_control_target(target):
2769    """Parse the build target and type from a Launch Control target.
2770
2771    The Launch Control target has the format of build_target-build_type, e.g.,
2772    shamu-eng or dragonboard-userdebug. This method extracts the build target
2773    and type from the target name.
2774
2775    @param target: Name of a Launch Control target, e.g., shamu-eng.
2776
2777    @return: (build_target, build_type), e.g., ('shamu', 'userdebug')
2778    """
2779    match = re.match('(?P<build_target>.+)-(?P<build_type>[^-]+)', target)
2780    if match:
2781        return match.group('build_target'), match.group('build_type')
2782    else:
2783        return None, None
2784
2785
2786def is_launch_control_build(build):
2787    """Check if a given build is a Launch Control build.
2788
2789    @param build: Name of a build, e.g.,
2790                  ChromeOS build: daisy-release/R50-1234.0.0
2791                  Launch Control build: git_mnc_release/shamu-eng
2792
2793    @return: True if the build name matches the pattern of a Launch Control
2794             build, False otherwise.
2795    """
2796    try:
2797        _, target, _ = parse_launch_control_build(build)
2798        build_target, _ = parse_launch_control_target(target)
2799        if build_target:
2800            return True
2801    except ValueError:
2802        # parse_launch_control_build or parse_launch_control_target failed.
2803        pass
2804    return False
2805
2806
2807def which(exec_file):
2808    """Finds an executable file.
2809
2810    If the file name contains a path component, it is checked as-is.
2811    Otherwise, we check with each of the path components found in the system
2812    PATH prepended. This behavior is similar to the 'which' command-line tool.
2813
2814    @param exec_file: Name or path to desired executable.
2815
2816    @return: An actual path to the executable, or None if not found.
2817    """
2818    if os.path.dirname(exec_file):
2819        return exec_file if os.access(exec_file, os.X_OK) else None
2820    sys_path = os.environ.get('PATH')
2821    prefix_list = sys_path.split(os.pathsep) if sys_path else []
2822    for prefix in prefix_list:
2823        path = os.path.join(prefix, exec_file)
2824        if os.access(path, os.X_OK):
2825            return path
2826
2827
2828class TimeoutError(error.TestError):
2829    """Error raised when poll_for_condition() failed to poll within time.
2830
2831    It may embed a reason (either a string or an exception object) so that
2832    the caller of poll_for_condition() can handle failure better.
2833    """
2834
2835    def __init__(self, message=None, reason=None):
2836        """Constructor.
2837
2838        It supports three invocations:
2839        1) TimeoutError()
2840        2) TimeoutError(message): with customized message.
2841        3) TimeoutError(message, reason): with message and reason for timeout.
2842        """
2843        self.reason = reason
2844        if self.reason:
2845            reason_str = 'Reason: ' + repr(self.reason)
2846            if message:
2847                message += '. ' + reason_str
2848            else:
2849                message = reason_str
2850
2851        if message:
2852            super(TimeoutError, self).__init__(message)
2853        else:
2854            super(TimeoutError, self).__init__()
2855
2856
2857class Timer(object):
2858    """A synchronous timer to evaluate if timout is reached.
2859
2860    Usage:
2861      timer = Timer(timeout_sec)
2862      while timer.sleep(sleep_interval):
2863        # do something...
2864    """
2865    def __init__(self, timeout):
2866        """Constructor.
2867
2868        Note that timer won't start until next() is called.
2869
2870        @param timeout: timer timeout in seconds.
2871        """
2872        self.timeout = timeout
2873        self.deadline = 0
2874
2875    def sleep(self, interval):
2876        """Checks if it has sufficient time to sleep; sleeps if so.
2877
2878        It blocks for |interval| seconds if it has time to sleep.
2879        If timer is not ticked yet, kicks it off and returns True without
2880        sleep.
2881
2882        @param interval: sleep interval in seconds.
2883        @return True if it has sleeped or just kicked off the timer. False
2884                otherwise.
2885        """
2886        now = time.time()
2887        if not self.deadline:
2888            self.deadline = now + self.timeout
2889            return True
2890        if now + interval < self.deadline:
2891            time.sleep(interval)
2892            return True
2893        return False
2894
2895
2896def poll_for_condition(condition,
2897                       exception=None,
2898                       timeout=10,
2899                       sleep_interval=0.1,
2900                       desc=None):
2901    """Polls until a condition is evaluated to true.
2902
2903    @param condition: function taking no args and returning anything that will
2904                      evaluate to True in a conditional check
2905    @param exception: exception to throw if condition doesn't evaluate to true
2906    @param timeout: maximum number of seconds to wait
2907    @param sleep_interval: time to sleep between polls
2908    @param desc: description of default TimeoutError used if 'exception' is
2909                 None
2910
2911    @return The evaluated value that caused the poll loop to terminate.
2912
2913    @raise 'exception' arg if supplied; TimeoutError otherwise
2914    """
2915    start_time = time.time()
2916    while True:
2917        value = condition()
2918        if value:
2919            return value
2920        if time.time() + sleep_interval - start_time > timeout:
2921            if exception:
2922                logging.error('Will raise error %r due to unexpected return: '
2923                              '%r', exception, value)
2924                raise exception # pylint: disable=raising-bad-type
2925
2926            if desc:
2927                desc = 'Timed out waiting for condition: ' + desc
2928            else:
2929                desc = 'Timed out waiting for unnamed condition'
2930            logging.error(desc)
2931            raise TimeoutError(message=desc)
2932
2933        time.sleep(sleep_interval)
2934
2935
2936def poll_for_condition_ex(condition, timeout=10, sleep_interval=0.1, desc=None):
2937    """Polls until a condition is evaluated to true or until timeout.
2938
2939    Similiar to poll_for_condition, except that it handles exceptions
2940    condition() raises. If timeout is not reached, the exception is dropped and
2941    poll for condition after a sleep; otherwise, the exception is embedded into
2942    TimeoutError to raise.
2943
2944    @param condition: function taking no args and returning anything that will
2945                      evaluate to True in a conditional check
2946    @param timeout: maximum number of seconds to wait
2947    @param sleep_interval: time to sleep between polls
2948    @param desc: description of the condition
2949
2950    @return The evaluated value that caused the poll loop to terminate.
2951
2952    @raise TimeoutError. If condition() raised exception, it is embedded in
2953           raised TimeoutError.
2954    """
2955    timer = Timer(timeout)
2956    while timer.sleep(sleep_interval):
2957        reason = None
2958        try:
2959            value = condition()
2960            if value:
2961                return value
2962        except BaseException as e:
2963            reason = e
2964
2965    if desc is None:
2966        desc = 'unamed condition'
2967    if reason is None:
2968        reason = 'condition evaluted as false'
2969    to_raise = TimeoutError(message='Timed out waiting for ' + desc,
2970                            reason=reason)
2971    logging.error(str(to_raise))
2972    raise to_raise
2973
2974
2975def poll_till_condition_holds(condition,
2976                              exception=None,
2977                              timeout=10,
2978                              sleep_interval=0.1,
2979                              hold_interval=5,
2980                              desc=None):
2981    """Polls until a condition is evaluated to true for a period of time
2982
2983    This function checks that a condition remains true for the 'hold_interval'
2984    seconds after it first becomes true. If the condition becomes false
2985    subsequently, the timer is reset. This function will not detect if
2986    condition becomes false for any period of time less than the sleep_interval.
2987
2988    @param condition: function taking no args and returning anything that will
2989                      evaluate to True in a conditional check
2990    @param exception: exception to throw if condition doesn't evaluate to true
2991    @param timeout: maximum number of seconds to wait
2992    @param sleep_interval: time to sleep between polls
2993    @param hold_interval: time period for which the condition should hold true
2994    @param desc: description of default TimeoutError used if 'exception' is
2995                 None
2996
2997    @return The evaluated value that caused the poll loop to terminate.
2998
2999    @raise 'exception' arg if supplied; TimeoutError otherwise
3000    """
3001    start_time = time.time()
3002    cond_is_held = False
3003    cond_hold_start_time = None
3004
3005    while True:
3006        value = condition()
3007        if value:
3008            if cond_is_held:
3009                if time.time() - cond_hold_start_time > hold_interval:
3010                    return value
3011            else:
3012                cond_is_held = True
3013                cond_hold_start_time = time.time()
3014        else:
3015            cond_is_held = False
3016
3017        time_remaining = timeout - (time.time() - start_time)
3018        if time_remaining < hold_interval:
3019            if exception:
3020                logging.error('Will raise error %r due to unexpected return: '
3021                              '%r', exception, value)
3022                raise exception # pylint: disable=raising-bad-type
3023
3024            if desc:
3025                desc = 'Timed out waiting for condition: ' + desc
3026            else:
3027                desc = 'Timed out waiting for unnamed condition'
3028            logging.error(desc)
3029            raise TimeoutError(message=desc)
3030
3031        time.sleep(sleep_interval)
3032
3033
3034def shadowroot_query(element, action):
3035    """Recursively queries shadowRoot.
3036
3037    @param element: element to query for.
3038    @param action: action to be performed on the element.
3039
3040    @return JS functions to execute.
3041
3042    """
3043    # /deep/ CSS query has been removed from ShadowDOM. The only way to access
3044    # elements now is to recursively query in each shadowRoot.
3045    shadowroot_script = """
3046    function deepQuerySelectorAll(root, targetQuery) {
3047        const elems = Array.prototype.slice.call(
3048            root.querySelectorAll(targetQuery[0]));
3049        const remaining = targetQuery.slice(1);
3050        if (remaining.length === 0) {
3051            return elems;
3052        }
3053
3054        let res = [];
3055        for (let i = 0; i < elems.length; i++) {
3056            if (elems[i].shadowRoot) {
3057                res = res.concat(
3058                    deepQuerySelectorAll(elems[i].shadowRoot, remaining));
3059            }
3060        }
3061        return res;
3062    };
3063    var testing_element = deepQuerySelectorAll(document, %s);
3064    testing_element[0].%s;
3065    """
3066    script_to_execute = shadowroot_script % (element, action)
3067    return script_to_execute
3068
3069
3070def threaded_return(function):
3071    """
3072    Decorator to add to a function to get that function to return a thread
3073    object, but with the added benefit of storing its return value.
3074
3075    @param function: function object to be run in the thread
3076
3077    @return a threading.Thread object, that has already been started, is
3078            recording its result, and can be completed and its result
3079            fetched by calling .finish()
3080    """
3081    def wrapped_t(queue, *args, **kwargs):
3082        """
3083        Calls the decorated function as normal, but appends the output into
3084        the passed-in threadsafe queue.
3085        """
3086        ret = function(*args, **kwargs)
3087        queue.put(ret)
3088
3089    def wrapped_finish(threaded_object):
3090        """
3091        Provides a utility to this thread object, getting its result while
3092        simultaneously joining the thread.
3093        """
3094        ret = threaded_object.get()
3095        threaded_object.join()
3096        return ret
3097
3098    def wrapper(*args, **kwargs):
3099        """
3100        Creates the queue and starts the thread, then assigns extra attributes
3101        to the thread to give it result-storing capability.
3102        """
3103        q = six.moves.queue.Queue()
3104        t = threading.Thread(target=wrapped_t, args=(q,) + args, kwargs=kwargs)
3105        t.start()
3106        t.result_queue = q
3107        t.get = t.result_queue.get
3108        t.finish = lambda: wrapped_finish(t)
3109        return t
3110
3111    # for the decorator
3112    return wrapper
3113
3114
3115@threaded_return
3116def background_sample_until_condition(
3117        function,
3118        condition=lambda: True,
3119        timeout=10,
3120        sleep_interval=1):
3121    """
3122    Records the value of the function until the condition is False or the
3123    timeout is reached. Runs as a background thread, so it's nonblocking.
3124    Usage might look something like:
3125
3126    def function():
3127        return get_value()
3128    def condition():
3129        return self._keep_sampling
3130
3131    # main thread
3132    sample_thread = utils.background_sample_until_condition(
3133        function=function,condition=condition)
3134    # do other work
3135    # ...
3136    self._keep_sampling = False
3137    # blocking call to get result and join the thread
3138    result = sample_thread.finish()
3139
3140    @param function: function object, 0 args, to be continually polled
3141    @param condition: function object, 0 args, to say when to stop polling
3142    @param timeout: maximum number of seconds to wait
3143    @param number of seconds to wait in between polls
3144
3145    @return a thread object that has already been started and is running in
3146            the background, whose run must be stopped with .finish(), which
3147            also returns a list of the results from the sample function
3148    """
3149    log = []
3150
3151    end_time = datetime.datetime.now() + datetime.timedelta(
3152            seconds = timeout + sleep_interval)
3153
3154    while condition() and datetime.datetime.now() < end_time:
3155        log.append(function())
3156        time.sleep(sleep_interval)
3157    return log
3158
3159
3160class metrics_mock(metrics_mock_class.mock_class_base):
3161    """mock class for metrics in case chromite is not installed."""
3162    pass
3163
3164
3165MountInfo = collections.namedtuple('MountInfo', ['root', 'mount_point', 'tags'])
3166
3167
3168def get_mount_info(process='self', mount_point=None):
3169    """Retrieves information about currently mounted file systems.
3170
3171    @param mount_point: (optional) The mount point (a path).  If this is
3172                        provided, only information about the given mount point
3173                        is returned.  If this is omitted, info about all mount
3174                        points is returned.
3175    @param process: (optional) The process id (or the string 'self') of the
3176                    process whose mountinfo will be obtained.  If this is
3177                    omitted, info about the current process is returned.
3178
3179    @return A generator yielding one MountInfo object for each relevant mount
3180            found in /proc/PID/mountinfo.
3181    """
3182    with open('/proc/{}/mountinfo'.format(process)) as f:
3183        for line in f.readlines():
3184            # TODO b:169251326 terms below are set outside of this codebase
3185            # and should be updated when possible. ("master" -> "main")
3186            # These lines are formatted according to the proc(5) manpage.
3187            # Sample line:
3188            # 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root \
3189            #     rw,errors=continue
3190            # Fields (descriptions omitted for fields we don't care about)
3191            # 3: the root of the mount.
3192            # 4: the mount point.
3193            # 5: mount options.
3194            # 6: tags.  There can be more than one of these.  This is where
3195            #    shared mounts are indicated.
3196            # 7: a dash separator marking the end of the tags.
3197            mountinfo = line.split()
3198            if mount_point is None or mountinfo[4] == mount_point:
3199                tags = []
3200                for field in mountinfo[6:]:
3201                    if field == '-':
3202                        break
3203                    tags.append(field.split(':')[0])
3204                yield MountInfo(root = mountinfo[3],
3205                                mount_point = mountinfo[4],
3206                                tags = tags)
3207
3208
3209# Appended suffix for chart tablet naming convention in test lab
3210CHART_ADDRESS_SUFFIX = '-tablet'
3211
3212
3213def get_lab_chart_address(hostname):
3214    """Convert lab DUT hostname to address of camera box chart tablet"""
3215    return hostname + CHART_ADDRESS_SUFFIX if is_in_container() else None
3216
3217
3218def cherry_pick_args(func, args, dargs):
3219    """Sanitize positional and keyword arguments before calling a function.
3220
3221    Given a callable (func), an argument tuple and a dictionary of keyword
3222    arguments, pick only those arguments which the function is prepared to
3223    accept and return a new argument tuple and keyword argument dictionary.
3224
3225    Args:
3226      func: A callable that we want to choose arguments for.
3227      args: A tuple of positional arguments to consider passing to func.
3228      dargs: A dictionary of keyword arguments to consider passing to func.
3229    Returns:
3230      A tuple of: (args tuple, keyword arguments dictionary)
3231    """
3232    # Cherry pick args:
3233    if hasattr(func, "func_code"):
3234        # Moock doesn't have __code__ in either py2 or 3 :(
3235        flags = func.func_code.co_flags
3236    else:
3237        flags = func.__code__.co_flags
3238
3239    if flags & 0x04:
3240        # func accepts *args, so return the entire args.
3241        p_args = args
3242    else:
3243        p_args = ()
3244
3245    # Cherry pick dargs:
3246    if flags & 0x08:
3247        # func accepts **dargs, so return the entire dargs.
3248        p_dargs = dargs
3249    else:
3250        # Only return the keyword arguments that func accepts.
3251        p_dargs = {}
3252        for param in get_nonstar_args(func):
3253            if param in dargs:
3254                p_dargs[param] = dargs[param]
3255
3256    return p_args, p_dargs
3257
3258
3259def cherry_pick_call(func, *args, **dargs):
3260    """Cherry picks arguments from args/dargs based on what "func" accepts
3261    and calls the function with the picked arguments."""
3262    p_args, p_dargs = cherry_pick_args(func, args, dargs)
3263    return func(*p_args, **p_dargs)
3264
3265
3266def get_nonstar_args(func):
3267    """Extract all the (normal) function parameter names.
3268
3269    Given a function, returns a tuple of parameter names, specifically
3270    excluding the * and ** parameters, if the function accepts them.
3271
3272    @param func: A callable that we want to chose arguments for.
3273
3274    @return: A tuple of parameters accepted by the function.
3275    """
3276    return func.__code__.co_varnames[:func.__code__.co_argcount]
3277
3278def crc8(buf):
3279    """Calculate CRC8 for a given int list.
3280
3281    This is a simple version of CRC8.
3282
3283    Args:
3284      buf: A list of byte integer
3285    Returns:
3286      A crc value in integer
3287    """
3288
3289    _table_crc8 = [ 0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15,
3290                    0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d,
3291                    0x70, 0x77, 0x7e, 0x79, 0x6c, 0x6b, 0x62, 0x65,
3292                    0x48, 0x4f, 0x46, 0x41, 0x54, 0x53, 0x5a, 0x5d,
3293                    0xe0, 0xe7, 0xee, 0xe9, 0xfc, 0xfb, 0xf2, 0xf5,
3294                    0xd8, 0xdf, 0xd6, 0xd1, 0xc4, 0xc3, 0xca, 0xcd,
3295                    0x90, 0x97, 0x9e, 0x99, 0x8c, 0x8b, 0x82, 0x85,
3296                    0xa8, 0xaf, 0xa6, 0xa1, 0xb4, 0xb3, 0xba, 0xbd,
3297                    0xc7, 0xc0, 0xc9, 0xce, 0xdb, 0xdc, 0xd5, 0xd2,
3298                    0xff, 0xf8, 0xf1, 0xf6, 0xe3, 0xe4, 0xed, 0xea,
3299                    0xb7, 0xb0, 0xb9, 0xbe, 0xab, 0xac, 0xa5, 0xa2,
3300                    0x8f, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9d, 0x9a,
3301                    0x27, 0x20, 0x29, 0x2e, 0x3b, 0x3c, 0x35, 0x32,
3302                    0x1f, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0d, 0x0a,
3303                    0x57, 0x50, 0x59, 0x5e, 0x4b, 0x4c, 0x45, 0x42,
3304                    0x6f, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7d, 0x7a,
3305                    0x89, 0x8e, 0x87, 0x80, 0x95, 0x92, 0x9b, 0x9c,
3306                    0xb1, 0xb6, 0xbf, 0xb8, 0xad, 0xaa, 0xa3, 0xa4,
3307                    0xf9, 0xfe, 0xf7, 0xf0, 0xe5, 0xe2, 0xeb, 0xec,
3308                    0xc1, 0xc6, 0xcf, 0xc8, 0xdd, 0xda, 0xd3, 0xd4,
3309                    0x69, 0x6e, 0x67, 0x60, 0x75, 0x72, 0x7b, 0x7c,
3310                    0x51, 0x56, 0x5f, 0x58, 0x4d, 0x4a, 0x43, 0x44,
3311                    0x19, 0x1e, 0x17, 0x10, 0x05, 0x02, 0x0b, 0x0c,
3312                    0x21, 0x26, 0x2f, 0x28, 0x3d, 0x3a, 0x33, 0x34,
3313                    0x4e, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5c, 0x5b,
3314                    0x76, 0x71, 0x78, 0x7f, 0x6a, 0x6d, 0x64, 0x63,
3315                    0x3e, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2c, 0x2b,
3316                    0x06, 0x01, 0x08, 0x0f, 0x1a, 0x1d, 0x14, 0x13,
3317                    0xae, 0xa9, 0xa0, 0xa7, 0xb2, 0xb5, 0xbc, 0xbb,
3318                    0x96, 0x91, 0x98, 0x9f, 0x8a, 0x8d, 0x84, 0x83,
3319                    0xde, 0xd9, 0xd0, 0xd7, 0xc2, 0xc5, 0xcc, 0xcb,
3320                    0xe6, 0xe1, 0xe8, 0xef, 0xfa, 0xfd, 0xf4, 0xf3,
3321                  ]
3322    if not isinstance(buf, list):
3323        raise error.TestError('buf should be an integer list.')
3324    if not all(isinstance(i, int) for i in buf):
3325        raise error.TestError('buf should contain integers only.')
3326
3327    rv = 0
3328    for i in buf:
3329        rv = _table_crc8[ (rv ^ i) & 0xff ]
3330    return rv
3331