• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Lint as: python2, python3
2# Copyright (c) 2017 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""
7Convenience functions for use by tests or whomever.
8
9There's no really good way to do this, as this isn't a class we can do
10inheritance with, just a collection of static methods.
11"""
12
13# pylint: disable=missing-docstring
14
15from __future__ import absolute_import
16from __future__ import division
17from __future__ import print_function
18
19import collections
20import datetime
21import errno
22import inspect
23import json
24import logging
25import os
26import pickle
27import random
28import re
29import resource
30import select
31import shutil
32import signal
33import socket
34import six
35from six.moves import input
36from six.moves import range
37from six.moves import urllib
38from six.moves import zip
39from six.moves import zip_longest
40import six.moves.urllib.parse
41import string
42import struct
43import subprocess
44import textwrap
45import threading
46import time
47import six.moves.queue
48import uuid
49import warnings
50
51try:
52    import hashlib
53except ImportError as e:
54    if six.PY2:
55        import md5
56        import sha
57    else:
58        raise ImportError("Broken hashlib imports %s", e)
59
60import common
61
62from autotest_lib.client.common_lib import env
63from autotest_lib.client.common_lib import error
64from autotest_lib.client.common_lib import global_config
65from autotest_lib.client.common_lib import logging_manager
66from autotest_lib.client.common_lib import metrics_mock_class
67from autotest_lib.client.cros import constants
68
69# pylint: disable=wildcard-import
70from autotest_lib.client.common_lib.lsbrelease_utils import *
71
72
73def deprecated(func):
74    """This is a decorator which can be used to mark functions as deprecated.
75    It will result in a warning being emmitted when the function is used."""
76    def new_func(*args, **dargs):
77        warnings.warn("Call to deprecated function %s." % func.__name__,
78                      category=DeprecationWarning)
79        return func(*args, **dargs)
80    new_func.__name__ = func.__name__
81    new_func.__doc__ = func.__doc__
82    new_func.__dict__.update(func.__dict__)
83    return new_func
84
85
86class _NullStream(object):
87    def write(self, data):
88        pass
89
90
91    def flush(self):
92        pass
93
94
95TEE_TO_LOGS = object()
96_the_null_stream = _NullStream()
97
98DEVNULL = object()
99
100DEFAULT_STDOUT_LEVEL = logging.DEBUG
101DEFAULT_STDERR_LEVEL = logging.ERROR
102
103# prefixes for logging stdout/stderr of commands
104STDOUT_PREFIX = '[stdout] '
105STDERR_PREFIX = '[stderr] '
106
107# safe characters for the shell (do not need quoting)
108_SHELL_QUOTING_ALLOWLIST = frozenset(string.ascii_letters +
109                                    string.digits +
110                                    '_-+=>|')
111
112def custom_warning_handler(message, category, filename, lineno, file=None,
113                           line=None):
114    """Custom handler to log at the WARNING error level. Ignores |file|."""
115    logging.warning(warnings.formatwarning(message, category, filename, lineno,
116                                           line))
117
118warnings.showwarning = custom_warning_handler
119
120def get_stream_tee_file(stream, level, prefix=''):
121    if stream is None:
122        return _the_null_stream
123    if stream is DEVNULL:
124        return None
125    if stream is TEE_TO_LOGS:
126        return logging_manager.LoggingFile(level=level, prefix=prefix)
127    return stream
128
129
130def _join_with_nickname(base_string, nickname):
131    if nickname:
132        return '%s BgJob "%s" ' % (base_string, nickname)
133    return base_string
134
135
136# TODO: Cleanup and possibly eliminate |unjoinable|, which is only used in our
137# ssh connection process, while fixing underlying
138# semantics problem in BgJob. See crbug.com/279312
139class BgJob(object):
140    def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
141                 stdin=None, stdout_level=DEFAULT_STDOUT_LEVEL,
142                 stderr_level=DEFAULT_STDERR_LEVEL, nickname=None,
143                 unjoinable=False, env=None, extra_paths=None):
144        """Create and start a new BgJob.
145
146        This constructor creates a new BgJob, and uses Popen to start a new
147        subprocess with given command. It returns without blocking on execution
148        of the subprocess.
149
150        After starting a new BgJob, use output_prepare to connect the process's
151        stdout and stderr pipes to the stream of your choice.
152
153        When the job is running, the jobs's output streams are only read from
154        when process_output is called.
155
156        @param command: command to be executed in new subprocess. May be either
157                        a list, or a string (in which case Popen will be called
158                        with shell=True)
159        @param stdout_tee: (Optional) a file like object, TEE_TO_LOGS or
160                           DEVNULL.
161                           If not given, after finishing the process, the
162                           stdout data from subprocess is available in
163                           result.stdout.
164                           If a file like object is given, in process_output(),
165                           the stdout data from the subprocess will be handled
166                           by the given file like object.
167                           If TEE_TO_LOGS is given, in process_output(), the
168                           stdout data from the subprocess will be handled by
169                           the standard logging_manager.
170                           If DEVNULL is given, the stdout of the subprocess
171                           will be just discarded. In addition, even after
172                           cleanup(), result.stdout will be just an empty
173                           string (unlike the case where stdout_tee is not
174                           given).
175        @param stderr_tee: Same as stdout_tee, but for stderr.
176        @param verbose: Boolean, make BgJob logging more verbose.
177        @param stdin: Stream object, will be passed to Popen as the new
178                      process's stdin.
179        @param stdout_level: A logging level value. If stdout_tee was set to
180                             TEE_TO_LOGS, sets the level that tee'd
181                             stdout output will be logged at. Ignored
182                             otherwise.
183        @param stderr_level: Same as stdout_level, but for stderr.
184        @param nickname: Optional string, to be included in logging messages
185        @param unjoinable: Optional bool, default False.
186                           This should be True for BgJobs running in background
187                           and will never be joined with join_bg_jobs(), such
188                           as the ssh connection. Instead, it is
189                           caller's responsibility to terminate the subprocess
190                           correctly, e.g. by calling nuke_subprocess().
191                           This will lead that, calling join_bg_jobs(),
192                           process_output() or cleanup() will result in an
193                           InvalidBgJobCall exception.
194                           Also, |stdout_tee| and |stderr_tee| must be set to
195                           DEVNULL, otherwise InvalidBgJobCall is raised.
196        @param env: Dict containing environment variables used in subprocess.
197        @param extra_paths: Optional string list, to be prepended to the PATH
198                            env variable in env (or os.environ dict if env is
199                            not specified).
200        """
201        self.command = command
202        self.unjoinable = unjoinable
203        if (unjoinable and (stdout_tee != DEVNULL or stderr_tee != DEVNULL)):
204            raise error.InvalidBgJobCall(
205                'stdout_tee and stderr_tee must be DEVNULL for '
206                'unjoinable BgJob')
207        self._stdout_tee = get_stream_tee_file(
208                stdout_tee, stdout_level,
209                prefix=_join_with_nickname(STDOUT_PREFIX, nickname))
210        self._stderr_tee = get_stream_tee_file(
211                stderr_tee, stderr_level,
212                prefix=_join_with_nickname(STDERR_PREFIX, nickname))
213        self.result = CmdResult(command)
214
215        # allow for easy stdin input by string, we'll let subprocess create
216        # a pipe for stdin input and we'll write to it in the wait loop
217        if isinstance(stdin, six.string_types):
218            self.string_stdin = stdin
219            stdin = subprocess.PIPE
220        else:
221            self.string_stdin = None
222
223        # Prepend extra_paths to env['PATH'] if necessary.
224        if extra_paths:
225            env = (os.environ if env is None else env).copy()
226            oldpath = env.get('PATH')
227            env['PATH'] = os.pathsep.join(
228                    extra_paths + ([oldpath] if oldpath else []))
229
230        if verbose:
231            logging.debug("Running '%s'", command)
232
233        if type(command) == list:
234            shell = False
235            executable = None
236        else:
237            shell = True
238            executable = '/bin/bash'
239
240        with open('/dev/null', 'w') as devnull:
241            # TODO b/169678884. close_fds was reverted to False, as there is a
242            # large performance hit due to a docker + python2 bug. Eventually
243            # update (everything) to python3. Moving this call to subprocess32
244            # is also an option, but will require new packages to the drone/lxc
245            # containers.
246
247            self.sp = subprocess.Popen(
248                command,
249                stdin=stdin,
250                stdout=devnull if stdout_tee == DEVNULL else subprocess.PIPE,
251                stderr=devnull if stderr_tee == DEVNULL else subprocess.PIPE,
252                preexec_fn=self._reset_sigpipe,
253                shell=shell, executable=executable,
254                env=env, close_fds=False)
255        self._cleanup_called = False
256        self._stdout_file = (
257            None if stdout_tee == DEVNULL else six.StringIO())
258        self._stderr_file = (
259            None if stderr_tee == DEVNULL else six.StringIO())
260
261    def process_output(self, stdout=True, final_read=False):
262        """Read from process's output stream, and write data to destinations.
263
264        This function reads up to 1024 bytes from the background job's
265        stdout or stderr stream, and writes the resulting data to the BgJob's
266        output tee and to the stream set up in output_prepare.
267
268        Warning: Calls to process_output will block on reads from the
269        subprocess stream, and will block on writes to the configured
270        destination stream.
271
272        @param stdout: True = read and process data from job's stdout.
273                       False = from stderr.
274                       Default: True
275        @param final_read: Do not read only 1024 bytes from stream. Instead,
276                           read and process all data until end of the stream.
277
278        """
279        if self.unjoinable:
280            raise error.InvalidBgJobCall('Cannot call process_output on '
281                                         'a job with unjoinable BgJob')
282        if stdout:
283            pipe, buf, tee = (
284                self.sp.stdout, self._stdout_file, self._stdout_tee)
285        else:
286            pipe, buf, tee = (
287                self.sp.stderr, self._stderr_file, self._stderr_tee)
288
289        if not pipe:
290            return
291
292        if final_read:
293            # read in all the data we can from pipe and then stop
294            data = []
295            while select.select([pipe], [], [], 0)[0]:
296                data.append(self._read_data(pipe))
297                if len(data[-1]) == 0:
298                    break
299            data = "".join(data)
300        else:
301            # perform a single read
302            data = self._read_data(pipe)
303        buf.write(data)
304        tee.write(data)
305
306    def _read_data(self, pipe):
307        """Read & return the data from the provided pipe.
308
309        Handles the changes to pipe reading & iostring writing in python 2/3.
310        In python2 the buffer (iostring) can take bytes, where in python3 it
311        must be a string. Formatting bytes to string in python 2 vs 3 seems
312        to be a bit different. In 3, .decode() is needed, however in 2 that
313        results in unicode (not str), breaking downstream users.
314
315        """
316
317        data = os.read(pipe.fileno(), 1024)
318        if isinstance(data, bytes) and six.PY3:
319            # On rare occasion, an invalid byte will be read, causing this to
320            # crash. Ignoring these errors seems like the best option for now.
321            return data.decode(errors='ignore')
322        return data
323
324    def cleanup(self):
325        """Clean up after BgJob.
326
327        Flush the stdout_tee and stderr_tee buffers, close the
328        subprocess stdout and stderr buffers, and saves data from
329        the configured stdout and stderr destination streams to
330        self.result. Duplicate calls ignored with a warning.
331        """
332        if self.unjoinable:
333            raise error.InvalidBgJobCall('Cannot call cleanup on '
334                                         'a job with a unjoinable BgJob')
335        if self._cleanup_called:
336            logging.warning('BgJob [%s] received a duplicate call to '
337                            'cleanup. Ignoring.', self.command)
338            return
339        try:
340            if self.sp.stdout:
341                self._stdout_tee.flush()
342                self.sp.stdout.close()
343                self.result.stdout = self._stdout_file.getvalue()
344
345            if self.sp.stderr:
346                self._stderr_tee.flush()
347                self.sp.stderr.close()
348                self.result.stderr = self._stderr_file.getvalue()
349        finally:
350            self._cleanup_called = True
351
352    def _reset_sigpipe(self):
353        if not env.IN_MOD_WSGI:
354            signal.signal(signal.SIGPIPE, signal.SIG_DFL)
355
356
357def ip_to_long(ip):
358    # !L is a long in network byte order
359    return struct.unpack('!L', socket.inet_aton(ip))[0]
360
361
362def long_to_ip(number):
363    # See above comment.
364    return socket.inet_ntoa(struct.pack('!L', number))
365
366
367def create_subnet_mask(bits):
368    return (1 << 32) - (1 << 32-bits)
369
370
371def format_ip_with_mask(ip, mask_bits):
372    masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
373    return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
374
375
376def normalize_hostname(alias):
377    ip = socket.gethostbyname(alias)
378    return socket.gethostbyaddr(ip)[0]
379
380
381def get_ip_local_port_range():
382    match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
383                     read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
384    return (int(match.group(1)), int(match.group(2)))
385
386
387def set_ip_local_port_range(lower, upper):
388    write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
389                   '%d %d\n' % (lower, upper))
390
391
392def read_one_line(filename):
393    f = open(filename, 'r')
394    try:
395        return f.readline().rstrip('\n')
396    finally:
397        f.close()
398
399
400def read_file(filename):
401    f = open(filename)
402    try:
403        return f.read()
404    finally:
405        f.close()
406
407
408def get_field(data, param, linestart="", sep=" "):
409    """
410    Parse data from string.
411    @param data: Data to parse.
412        example:
413          data:
414             cpu   324 345 34  5 345
415             cpu0  34  11  34 34  33
416             ^^^^
417             start of line
418             params 0   1   2  3   4
419    @param param: Position of parameter after linestart marker.
420    @param linestart: String to which start line with parameters.
421    @param sep: Separator between parameters regular expression.
422    """
423    search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
424    find = search.search(data)
425    if find != None:
426        return re.split("%s" % sep, find.group(1))[param]
427    else:
428        print("There is no line which starts with %s in data." % linestart)
429        return None
430
431
432def write_one_line(filename, line):
433    open_write_close(filename, str(line).rstrip('\n') + '\n')
434
435
436def open_write_close(filename, data, is_binary=False):
437    open_mode = 'w'
438    if is_binary:
439        open_mode = 'wb'
440
441    with open(filename, open_mode) as f:
442        f.write(data)
443
444
445def locate_file(path, base_dir=None):
446    """Locates a file.
447
448    @param path: The path of the file being located. Could be absolute or
449        relative path. For relative path, it tries to locate the file from
450        base_dir.
451
452    @param base_dir (optional): Base directory of the relative path.
453
454    @returns Absolute path of the file if found. None if path is None.
455    @raises error.TestFail if the file is not found.
456    """
457    if path is None:
458        return None
459
460    if not os.path.isabs(path) and base_dir is not None:
461        # Assume the relative path is based in autotest directory.
462        path = os.path.join(base_dir, path)
463    if not os.path.isfile(path):
464        raise error.TestFail('ERROR: Unable to find %s' % path)
465    return path
466
467
468def read_keyval(path, type_tag=None):
469    """
470    Read a key-value pair format file into a dictionary, and return it.
471    Takes either a filename or directory name as input. If it's a
472    directory name, we assume you want the file to be called keyval.
473
474    @param path: Full path of the file to read from.
475    @param type_tag: If not None, only keyvals with key ending
476                     in a suffix {type_tag} will be collected.
477    """
478    if os.path.isdir(path):
479        path = os.path.join(path, 'keyval')
480    if not os.path.exists(path):
481        return {}
482
483    if type_tag:
484        pattern = r'^([-\.\w]+)\{%s\}=(.*)$' % type_tag
485    else:
486        pattern = r'^([-\.\w]+)=(.*)$'
487
488    keyval = {}
489    f = open(path)
490    for line in f:
491        line = re.sub('#.*', '', line).rstrip()
492        if not line:
493            continue
494        match = re.match(pattern, line)
495        if match:
496            key = match.group(1)
497            value = match.group(2)
498            if re.search('^\d+$', value):
499                value = int(value)
500            elif re.search('^(\d+\.)?\d+$', value):
501                value = float(value)
502            keyval[key] = value
503        else:
504            raise ValueError('Invalid format line: %s' % line)
505    f.close()
506    return keyval
507
508
509def write_keyval(path, dictionary, type_tag=None):
510    """
511    Write a key-value pair format file out to a file. This uses append
512    mode to open the file, so existing text will not be overwritten or
513    reparsed.
514
515    If type_tag is None, then the key must be composed of alphanumeric
516    characters (or dashes+underscores). However, if type-tag is not
517    null then the keys must also have "{type_tag}" as a suffix. At
518    the moment the only valid values of type_tag are "attr" and "perf".
519
520    @param path: full path of the file to be written
521    @param dictionary: the items to write
522    @param type_tag: see text above
523    """
524    if os.path.isdir(path):
525        path = os.path.join(path, 'keyval')
526    keyval = open(path, 'a')
527
528    if type_tag is None:
529        key_regex = re.compile(r'^[-\.\w]+$')
530    else:
531        if type_tag not in ('attr', 'perf'):
532            raise ValueError('Invalid type tag: %s' % type_tag)
533        escaped_tag = re.escape(type_tag)
534        key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
535    try:
536        for key in sorted(dictionary.keys()):
537            if not key_regex.search(key):
538                raise ValueError('Invalid key: %s' % key)
539            keyval.write('%s=%s\n' % (key, dictionary[key]))
540    finally:
541        keyval.close()
542
543
544def is_url(path):
545    """Return true if path looks like a URL"""
546    # for now, just handle http and ftp
547    url_parts = six.moves.urllib.parse.urlparse(path)
548    return (url_parts[0] in ('http', 'ftp'))
549
550
551def urlopen(url, data=None, timeout=5):
552    """Wrapper to urllib2.urlopen with timeout addition."""
553
554    # Save old timeout
555    old_timeout = socket.getdefaulttimeout()
556    socket.setdefaulttimeout(timeout)
557    try:
558        return urllib.request.urlopen(url, data=data)
559    finally:
560        socket.setdefaulttimeout(old_timeout)
561
562
563def urlretrieve(url, filename, data=None, timeout=300):
564    """Retrieve a file from given url."""
565    logging.debug('Fetching %s -> %s', url, filename)
566
567    src_file = urlopen(url, data=data, timeout=timeout)
568    try:
569        dest_file = open(filename, 'wb')
570        try:
571            shutil.copyfileobj(src_file, dest_file)
572        finally:
573            dest_file.close()
574    finally:
575        src_file.close()
576
577
578def hash(hashtype, input=None):
579    """
580    Returns an hash object of type md5 or sha1. This function is implemented in
581    order to encapsulate hash objects in a way that is compatible with python
582    2.4 and python 2.6 without warnings.
583
584    Note that even though python 2.6 hashlib supports hash types other than
585    md5 and sha1, we are artificially limiting the input values in order to
586    make the function to behave exactly the same among both python
587    implementations.
588
589    @param input: Optional input string that will be used to update the hash.
590    """
591    # pylint: disable=redefined-builtin
592    if hashtype not in ['md5', 'sha1']:
593        raise ValueError("Unsupported hash type: %s" % hashtype)
594
595    try:
596        computed_hash = hashlib.new(hashtype)
597    except NameError:
598        if hashtype == 'md5':
599            computed_hash = md5.new()
600        elif hashtype == 'sha1':
601            computed_hash = sha.new()
602
603    if input:
604        try:
605            computed_hash.update(input.encode())
606        except UnicodeError:
607            computed_hash.update(input)
608
609
610    return computed_hash
611
612
613def get_file(src, dest, permissions=None):
614    """Get a file from src, which can be local or a remote URL"""
615    if src == dest:
616        return
617
618    if is_url(src):
619        urlretrieve(src, dest)
620    else:
621        shutil.copyfile(src, dest)
622
623    if permissions:
624        os.chmod(dest, permissions)
625    return dest
626
627
628def unmap_url(srcdir, src, destdir='.'):
629    """
630    Receives either a path to a local file or a URL.
631    returns either the path to the local file, or the fetched URL
632
633    unmap_url('/usr/src', 'foo.tar', '/tmp')
634                            = '/usr/src/foo.tar'
635    unmap_url('/usr/src', 'http://site/file', '/tmp')
636                            = '/tmp/file'
637                            (after retrieving it)
638    """
639    if is_url(src):
640        url_parts = six.moves.urllib.parse.urlparse(src)
641        filename = os.path.basename(url_parts[2])
642        dest = os.path.join(destdir, filename)
643        return get_file(src, dest)
644    else:
645        return os.path.join(srcdir, src)
646
647
648def update_version(srcdir, preserve_srcdir, new_version, install,
649                   *args, **dargs):
650    """
651    Make sure srcdir is version new_version
652
653    If not, delete it and install() the new version.
654
655    In the preserve_srcdir case, we just check it's up to date,
656    and if not, we rerun install, without removing srcdir
657    """
658    versionfile = os.path.join(srcdir, '.version')
659    install_needed = True
660
661    if os.path.exists(versionfile) and os.path.getsize(versionfile) > 0:
662        old_version = pickle.load(open(versionfile, 'rb'))
663        if old_version == new_version:
664            install_needed = False
665
666    if install_needed:
667        if not preserve_srcdir and os.path.exists(srcdir):
668            shutil.rmtree(srcdir)
669        install(*args, **dargs)
670        if os.path.exists(srcdir):
671            pickle.dump(new_version, open(versionfile, 'wb'))
672
673
674def get_stderr_level(stderr_is_expected, stdout_level=DEFAULT_STDOUT_LEVEL):
675    if stderr_is_expected:
676        return stdout_level
677    return DEFAULT_STDERR_LEVEL
678
679
680def run(command, timeout=None, ignore_status=False, stdout_tee=None,
681        stderr_tee=None, verbose=True, stdin=None, stderr_is_expected=None,
682        stdout_level=None, stderr_level=None, args=(), nickname=None,
683        ignore_timeout=False, env=None, extra_paths=None):
684    """
685    Run a command on the host.
686
687    @param command: the command line string.
688    @param timeout: time limit in seconds before attempting to kill the
689            running process. The run() function will take a few seconds
690            longer than 'timeout' to complete if it has to kill the process.
691    @param ignore_status: do not raise an exception, no matter what the exit
692            code of the command is.
693    @param stdout_tee: optional file-like object to which stdout data
694            will be written as it is generated (data will still be stored
695            in result.stdout unless this is DEVNULL).
696    @param stderr_tee: likewise for stderr.
697    @param verbose: if True, log the command being run.
698    @param stdin: stdin to pass to the executed process (can be a file
699            descriptor, a file object of a real file or a string).
700    @param stderr_is_expected: if True, stderr will be logged at the same level
701            as stdout
702    @param stdout_level: logging level used if stdout_tee is TEE_TO_LOGS;
703            if None, a default is used.
704    @param stderr_level: like stdout_level but for stderr.
705    @param args: sequence of strings of arguments to be given to the command
706            inside " quotes after they have been escaped for that; each
707            element in the sequence will be given as a separate command
708            argument
709    @param nickname: Short string that will appear in logging messages
710                     associated with this command.
711    @param ignore_timeout: If True, timeouts are ignored otherwise if a
712            timeout occurs it will raise CmdTimeoutError.
713    @param env: Dict containing environment variables used in a subprocess.
714    @param extra_paths: Optional string list, to be prepended to the PATH
715                        env variable in env (or os.environ dict if env is
716                        not specified).
717
718    @return a CmdResult object or None if the command timed out and
719            ignore_timeout is True
720    @rtype: CmdResult
721
722    @raise CmdError: the exit code of the command execution was not 0
723    @raise CmdTimeoutError: the command timed out and ignore_timeout is False.
724    """
725    if isinstance(args, six.string_types):
726        raise TypeError('Got a string for the "args" keyword argument, '
727                        'need a sequence.')
728
729    # In some cases, command will actually be a list
730    # (For example, see get_user_hash in client/cros/cryptohome.py.)
731    # So, to cover that case, detect if it's a string or not and convert it
732    # into one if necessary.
733    if not isinstance(command, six.string_types):
734        command = ' '.join([sh_quote_word(arg) for arg in command])
735
736    command = ' '.join([command] + [sh_quote_word(arg) for arg in args])
737
738    if stderr_is_expected is None:
739        stderr_is_expected = ignore_status
740    if stdout_level is None:
741        stdout_level = DEFAULT_STDOUT_LEVEL
742    if stderr_level is None:
743        stderr_level = get_stderr_level(stderr_is_expected, stdout_level)
744
745    try:
746        bg_job = join_bg_jobs(
747            (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
748                   stdout_level=stdout_level, stderr_level=stderr_level,
749                   nickname=nickname, env=env, extra_paths=extra_paths),),
750            timeout)[0]
751    except error.CmdTimeoutError:
752        if not ignore_timeout:
753            raise
754        return None
755
756    if not ignore_status and bg_job.result.exit_status:
757        raise error.CmdError(command, bg_job.result,
758                             "Command returned non-zero exit status")
759
760    return bg_job.result
761
762
763def run_parallel(commands, timeout=None, ignore_status=False,
764                 stdout_tee=None, stderr_tee=None,
765                 nicknames=None):
766    """
767    Behaves the same as run() with the following exceptions:
768
769    - commands is a list of commands to run in parallel.
770    - ignore_status toggles whether or not an exception should be raised
771      on any error.
772
773    @return: a list of CmdResult objects
774    """
775    bg_jobs = []
776    if nicknames is None:
777        nicknames = []
778    for (command, nickname) in zip_longest(commands, nicknames):
779        bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
780                             stderr_level=get_stderr_level(ignore_status),
781                             nickname=nickname))
782
783    # Updates objects in bg_jobs list with their process information
784    join_bg_jobs(bg_jobs, timeout)
785
786    for bg_job in bg_jobs:
787        if not ignore_status and bg_job.result.exit_status:
788            raise error.CmdError(command, bg_job.result,
789                                 "Command returned non-zero exit status")
790
791    return [bg_job.result for bg_job in bg_jobs]
792
793
794@deprecated
795def run_bg(command):
796    """Function deprecated. Please use BgJob class instead."""
797    bg_job = BgJob(command)
798    return bg_job.sp, bg_job.result
799
800
801def join_bg_jobs(bg_jobs, timeout=None):
802    """Joins the bg_jobs with the current thread.
803
804    Returns the same list of bg_jobs objects that was passed in.
805    """
806    if any(bg_job.unjoinable for bg_job in bg_jobs):
807        raise error.InvalidBgJobCall(
808                'join_bg_jobs cannot be called for unjoinable bg_job')
809
810    timeout_error = False
811    try:
812        # We are holding ends to stdin, stdout pipes
813        # hence we need to be sure to close those fds no mater what
814        start_time = time.time()
815        timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
816
817        for bg_job in bg_jobs:
818            # Process stdout and stderr
819            bg_job.process_output(stdout=True,final_read=True)
820            bg_job.process_output(stdout=False,final_read=True)
821    finally:
822        # close our ends of the pipes to the sp no matter what
823        for bg_job in bg_jobs:
824            bg_job.cleanup()
825
826    if timeout_error:
827        # TODO: This needs to be fixed to better represent what happens when
828        # running in parallel. However this is backwards compatable, so it will
829        # do for the time being.
830        raise error.CmdTimeoutError(
831                bg_jobs[0].command, bg_jobs[0].result,
832                "Command(s) did not complete within %d seconds" % timeout)
833
834
835    return bg_jobs
836
837
838def _wait_for_commands(bg_jobs, start_time, timeout):
839    """Waits for background jobs by select polling their stdout/stderr.
840
841    @param bg_jobs: A list of background jobs to wait on.
842    @param start_time: Time used to calculate the timeout lifetime of a job.
843    @param timeout: The timeout of the list of bg_jobs.
844
845    @return: True if the return was due to a timeout, False otherwise.
846    """
847
848    # To check for processes which terminate without producing any output
849    # a 1 second timeout is used in select.
850    SELECT_TIMEOUT = 1
851
852    read_list = []
853    write_list = []
854    reverse_dict = {}
855
856    for bg_job in bg_jobs:
857        if bg_job.sp.stdout:
858            read_list.append(bg_job.sp.stdout)
859            reverse_dict[bg_job.sp.stdout] = (bg_job, True)
860        if bg_job.sp.stderr:
861            read_list.append(bg_job.sp.stderr)
862            reverse_dict[bg_job.sp.stderr] = (bg_job, False)
863        if bg_job.string_stdin is not None:
864            write_list.append(bg_job.sp.stdin)
865            reverse_dict[bg_job.sp.stdin] = bg_job
866
867    if timeout:
868        stop_time = start_time + timeout
869        time_left = stop_time - time.time()
870    else:
871        time_left = None # so that select never times out
872
873    while not timeout or time_left > 0:
874        # select will return when we may write to stdin, when there is
875        # stdout/stderr output we can read (including when it is
876        # EOF, that is the process has terminated) or when a non-fatal
877        # signal was sent to the process. In the last case the select returns
878        # EINTR, and we continue waiting for the job if the signal handler for
879        # the signal that interrupted the call allows us to.
880        try:
881            read_ready, write_ready, _ = select.select(read_list, write_list,
882                                                       [], SELECT_TIMEOUT)
883        except select.error as v:
884            if v.args[0] == errno.EINTR:
885                logging.warning(v)
886                continue
887            else:
888                raise
889        # os.read() has to be used instead of
890        # subproc.stdout.read() which will otherwise block
891        for file_obj in read_ready:
892            bg_job, is_stdout = reverse_dict[file_obj]
893            bg_job.process_output(is_stdout)
894
895        for file_obj in write_ready:
896            # we can write PIPE_BUF bytes without blocking
897            # POSIX requires PIPE_BUF is >= 512
898            bg_job = reverse_dict[file_obj]
899            string_stdin = bg_job.string_stdin[:512]
900            if isinstance(string_stdin, six.text_type):
901                string_stdin = string_stdin.encode('utf-8', 'strict')
902            file_obj.write(string_stdin)
903            bg_job.string_stdin = bg_job.string_stdin[512:]
904            # no more input data, close stdin, remove it from the select set
905            if not bg_job.string_stdin:
906                file_obj.close()
907                write_list.remove(file_obj)
908                del reverse_dict[file_obj]
909
910        all_jobs_finished = True
911        for bg_job in bg_jobs:
912            if bg_job.result.exit_status is not None:
913                continue
914
915            bg_job.result.exit_status = bg_job.sp.poll()
916            if bg_job.result.exit_status is not None:
917                # process exited, remove its stdout/stdin from the select set
918                bg_job.result.duration = time.time() - start_time
919                if bg_job.sp.stdout:
920                    read_list.remove(bg_job.sp.stdout)
921                    del reverse_dict[bg_job.sp.stdout]
922                if bg_job.sp.stderr:
923                    read_list.remove(bg_job.sp.stderr)
924                    del reverse_dict[bg_job.sp.stderr]
925            else:
926                all_jobs_finished = False
927
928        if all_jobs_finished:
929            return False
930
931        if timeout:
932            time_left = stop_time - time.time()
933
934    # Kill all processes which did not complete prior to timeout
935    for bg_job in bg_jobs:
936        if bg_job.result.exit_status is not None:
937            continue
938
939        logging.warning('run process timeout (%s) fired on: %s', timeout,
940                        bg_job.command)
941        if nuke_subprocess(bg_job.sp) is None:
942            # If process could not be SIGKILL'd, log kernel stack.
943            logging.warning(read_file('/proc/%d/stack' % bg_job.sp.pid))
944        bg_job.result.exit_status = bg_job.sp.poll()
945        bg_job.result.duration = time.time() - start_time
946
947    return True
948
949
950def pid_is_alive(pid):
951    """
952    True if process pid exists and is not yet stuck in Zombie state.
953    Zombies are impossible to move between cgroups, etc.
954    pid can be integer, or text of integer.
955    """
956    path = '/proc/%s/stat' % pid
957
958    try:
959        stat = read_one_line(path)
960    except IOError:
961        if not os.path.exists(path):
962            # file went away
963            return False
964        raise
965
966    return stat.split()[2] != 'Z'
967
968
969def signal_pid(pid, sig):
970    """
971    Sends a signal to a process id. Returns True if the process terminated
972    successfully, False otherwise.
973    """
974    try:
975        os.kill(pid, sig)
976    except OSError:
977        # The process may have died before we could kill it.
978        pass
979
980    for _ in range(5):
981        if not pid_is_alive(pid):
982            return True
983        time.sleep(1)
984
985    # The process is still alive
986    return False
987
988
989def nuke_subprocess(subproc):
990    # check if the subprocess is still alive, first
991    if subproc.poll() is not None:
992        return subproc.poll()
993
994    # the process has not terminated within timeout,
995    # kill it via an escalating series of signals.
996    signal_queue = [signal.SIGTERM, signal.SIGKILL]
997    for sig in signal_queue:
998        signal_pid(subproc.pid, sig)
999        if subproc.poll() is not None:
1000            return subproc.poll()
1001
1002
1003def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
1004    # the process has not terminated within timeout,
1005    # kill it via an escalating series of signals.
1006    pid_path = '/proc/%d/'
1007    if not os.path.exists(pid_path % pid):
1008        # Assume that if the pid does not exist in proc it is already dead.
1009        logging.error('No listing in /proc for pid:%d.', pid)
1010        raise error.AutoservPidAlreadyDeadError('Could not kill nonexistant '
1011                                                'pid: %s.', pid)
1012    for sig in signal_queue:
1013        if signal_pid(pid, sig):
1014            return
1015
1016    # no signal successfully terminated the process
1017    raise error.AutoservRunError('Could not kill %d for process name: %s' % (
1018            pid, get_process_name(pid)), None)
1019
1020
1021def system(command, timeout=None, ignore_status=False):
1022    """
1023    Run a command
1024
1025    @param timeout: timeout in seconds
1026    @param ignore_status: if ignore_status=False, throw an exception if the
1027            command's exit code is non-zero
1028            if ignore_stauts=True, return the exit code.
1029
1030    @return exit status of command
1031            (note, this will always be zero unless ignore_status=True)
1032    """
1033    return run(command, timeout=timeout, ignore_status=ignore_status,
1034               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
1035
1036
1037def system_parallel(commands, timeout=None, ignore_status=False):
1038    """This function returns a list of exit statuses for the respective
1039    list of commands."""
1040    return [bg_jobs.exit_status for bg_jobs in
1041            run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
1042                         stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1043
1044
1045def system_output(command, timeout=None, ignore_status=False,
1046                  retain_output=False, args=()):
1047    """
1048    Run a command and return the stdout output.
1049
1050    @param command: command string to execute.
1051    @param timeout: time limit in seconds before attempting to kill the
1052            running process. The function will take a few seconds longer
1053            than 'timeout' to complete if it has to kill the process.
1054    @param ignore_status: do not raise an exception, no matter what the exit
1055            code of the command is.
1056    @param retain_output: set to True to make stdout/stderr of the command
1057            output to be also sent to the logging system
1058    @param args: sequence of strings of arguments to be given to the command
1059            inside " quotes after they have been escaped for that; each
1060            element in the sequence will be given as a separate command
1061            argument
1062
1063    @return a string with the stdout output of the command.
1064    """
1065    if retain_output:
1066        out = run(command, timeout=timeout, ignore_status=ignore_status,
1067                  stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
1068                  args=args).stdout
1069    else:
1070        out = run(command, timeout=timeout, ignore_status=ignore_status,
1071                  args=args).stdout
1072    if out[-1:] == '\n':
1073        out = out[:-1]
1074    return out
1075
1076
1077def system_output_parallel(commands, timeout=None, ignore_status=False,
1078                           retain_output=False):
1079    if retain_output:
1080        out = [bg_job.stdout for bg_job
1081               in run_parallel(commands, timeout=timeout,
1082                               ignore_status=ignore_status,
1083                               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1084    else:
1085        out = [bg_job.stdout for bg_job in run_parallel(commands,
1086                                  timeout=timeout, ignore_status=ignore_status)]
1087    for _ in out:
1088        if out[-1:] == '\n':
1089            out = out[:-1]
1090    return out
1091
1092
1093def strip_unicode(input_obj):
1094    if type(input_obj) == list:
1095        return [strip_unicode(i) for i in input_obj]
1096    elif type(input_obj) == dict:
1097        output = {}
1098        for key in input_obj.keys():
1099            output[str(key)] = strip_unicode(input_obj[key])
1100        return output
1101    elif type(input_obj) == six.text_type:
1102        return str(input_obj)
1103    else:
1104        return input_obj
1105
1106
1107def get_cpu_percentage(function, *args, **dargs):
1108    """Returns a tuple containing the CPU% and return value from function call.
1109
1110    This function calculates the usage time by taking the difference of
1111    the user and system times both before and after the function call.
1112    """
1113    child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
1114    self_pre = resource.getrusage(resource.RUSAGE_SELF)
1115    start = time.time()
1116    to_return = function(*args, **dargs)
1117    elapsed = time.time() - start
1118    self_post = resource.getrusage(resource.RUSAGE_SELF)
1119    child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
1120
1121    # Calculate CPU Percentage
1122    s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
1123    c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
1124    cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
1125
1126    return cpu_percent, to_return
1127
1128
1129def get_arch(run_function=run):
1130    """
1131    Get the hardware architecture of the machine.
1132    If specified, run_function should return a CmdResult object and throw a
1133    CmdError exception.
1134    If run_function is anything other than utils.run(), it is used to
1135    execute the commands. By default (when set to utils.run()) this will
1136    just examine os.uname()[4].
1137    """
1138
1139    # Short circuit from the common case.
1140    if run_function == run:
1141        return re.sub(r'i\d86$', 'i386', os.uname()[4])
1142
1143    # Otherwise, use the run_function in case it hits a remote machine.
1144    arch = run_function('/bin/uname -m').stdout.rstrip()
1145    if re.match(r'i\d86$', arch):
1146        arch = 'i386'
1147    return arch
1148
1149def get_arch_userspace(run_function=run):
1150    """
1151    Get the architecture by userspace (possibly different from kernel).
1152    """
1153    archs = {
1154        'arm': 'ELF 32-bit.*, ARM,',
1155        'arm64': 'ELF 64-bit.*, ARM aarch64,',
1156        'i386': 'ELF 32-bit.*, Intel 80386,',
1157        'x86_64': 'ELF 64-bit.*, x86-64,',
1158    }
1159
1160    cmd = 'file --brief --dereference /bin/sh'
1161    filestr = run_function(cmd).stdout.rstrip()
1162    for a, regex in six.iteritems(archs):
1163        if re.match(regex, filestr):
1164            return a
1165
1166    return get_arch()
1167
1168
1169def get_num_logical_cpus_per_socket(run_function=run):
1170    """
1171    Get the number of cores (including hyperthreading) per cpu.
1172    run_function is used to execute the commands. It defaults to
1173    utils.run() but a custom method (if provided) should be of the
1174    same schema as utils.run. It should return a CmdResult object and
1175    throw a CmdError exception.
1176    """
1177    siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1178    num_siblings = [int(x) for x in
1179                    re.findall(r'^siblings\s*:\s*(\d+)\s*$', siblings, re.M)]
1180    if len(num_siblings) == 0:
1181        raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1182    if min(num_siblings) != max(num_siblings):
1183        raise error.TestError('Number of siblings differ %r' %
1184                              num_siblings)
1185    return num_siblings[0]
1186
1187
1188def set_high_performance_mode(host=None):
1189    """
1190    Sets the kernel governor mode to the highest setting.
1191    Returns previous governor state.
1192    """
1193    original_governors = get_scaling_governor_states(host)
1194    set_scaling_governors('performance', host)
1195    return original_governors
1196
1197
1198def set_scaling_governors(value, host=None):
1199    """
1200    Sets all scaling governor to string value.
1201    Sample values: 'performance', 'interactive', 'ondemand', 'powersave'.
1202    """
1203    paths = _get_cpufreq_paths('scaling_governor', host)
1204    if not paths:
1205        logging.info("Could not set governor states, as no files of the form "
1206                     "'/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor' "
1207                     "were found.")
1208    run_func = host.run if host else system
1209    for path in paths:
1210        cmd = 'echo %s > %s' % (value, path)
1211        logging.info('Writing scaling governor mode \'%s\' -> %s', value, path)
1212        # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
1213        run_func(cmd, ignore_status=True)
1214
1215
1216def _get_cpufreq_paths(filename, host=None):
1217    """
1218    Returns a list of paths to the governors.
1219    """
1220    run_func = host.run if host else run
1221    glob = '/sys/devices/system/cpu/cpu*/cpufreq/' + filename
1222    # Simple glob expansion; note that CPUs may come and go, causing these
1223    # paths to change at any time.
1224    cmd = 'echo ' + glob
1225    try:
1226        paths = run_func(cmd, verbose=False).stdout.split()
1227    except error.CmdError:
1228        return []
1229    # If the glob result equals itself, then we likely didn't match any real
1230    # paths (assuming 'cpu*' is not a real path).
1231    if paths == [glob]:
1232        return []
1233    return paths
1234
1235
1236def get_scaling_governor_states(host=None):
1237    """
1238    Returns a list of (performance governor path, current state) tuples.
1239    """
1240    paths = _get_cpufreq_paths('scaling_governor', host)
1241    path_value_list = []
1242    run_func = host.run if host else run
1243    for path in paths:
1244        value = run_func('head -n 1 %s' % path, verbose=False).stdout
1245        path_value_list.append((path, value))
1246    return path_value_list
1247
1248
1249def restore_scaling_governor_states(path_value_list, host=None):
1250    """
1251    Restores governor states. Inverse operation to get_scaling_governor_states.
1252    """
1253    run_func = host.run if host else system
1254    for (path, value) in path_value_list:
1255        cmd = 'echo %s > %s' % (value.rstrip('\n'), path)
1256        # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
1257        run_func(cmd, ignore_status=True)
1258
1259
1260def merge_trees(src, dest):
1261    """
1262    Merges a source directory tree at 'src' into a destination tree at
1263    'dest'. If a path is a file in both trees than the file in the source
1264    tree is APPENDED to the one in the destination tree. If a path is
1265    a directory in both trees then the directories are recursively merged
1266    with this function. In any other case, the function will skip the
1267    paths that cannot be merged (instead of failing).
1268    """
1269    if not os.path.exists(src):
1270        return # exists only in dest
1271    elif not os.path.exists(dest):
1272        if os.path.isfile(src):
1273            shutil.copy2(src, dest) # file only in src
1274        else:
1275            shutil.copytree(src, dest, symlinks=True) # dir only in src
1276        return
1277    elif os.path.isfile(src) and os.path.isfile(dest):
1278        # src & dest are files in both trees, append src to dest
1279        destfile = open(dest, "a")
1280        try:
1281            srcfile = open(src)
1282            try:
1283                destfile.write(srcfile.read())
1284            finally:
1285                srcfile.close()
1286        finally:
1287            destfile.close()
1288    elif os.path.isdir(src) and os.path.isdir(dest):
1289        # src & dest are directories in both trees, so recursively merge
1290        for name in os.listdir(src):
1291            merge_trees(os.path.join(src, name), os.path.join(dest, name))
1292    else:
1293        # src & dest both exist, but are incompatible
1294        return
1295
1296
1297class CmdResult(object):
1298    """
1299    Command execution result.
1300
1301    command:     String containing the command line itself
1302    exit_status: Integer exit code of the process
1303    stdout:      String containing stdout of the process
1304    stderr:      String containing stderr of the process
1305    duration:    Elapsed wall clock time running the process
1306    """
1307
1308
1309    def __init__(self, command="", stdout="", stderr="",
1310                 exit_status=None, duration=0):
1311        self.command = command
1312        self.exit_status = exit_status
1313        self.stdout = stdout
1314        self.stderr = stderr
1315        self.duration = duration
1316
1317
1318    def __eq__(self, other):
1319        if type(self) == type(other):
1320            return (self.command == other.command
1321                    and self.exit_status == other.exit_status
1322                    and self.stdout == other.stdout
1323                    and self.stderr == other.stderr
1324                    and self.duration == other.duration)
1325        else:
1326            return NotImplemented
1327
1328
1329    def __repr__(self):
1330        wrapper = textwrap.TextWrapper(width = 78,
1331                                       initial_indent="\n    ",
1332                                       subsequent_indent="    ")
1333
1334        stdout = self.stdout.rstrip()
1335        if stdout:
1336            stdout = "\nstdout:\n%s" % stdout
1337
1338        stderr = self.stderr.rstrip()
1339        if stderr:
1340            stderr = "\nstderr:\n%s" % stderr
1341
1342        return ("* Command: %s\n"
1343                "Exit status: %s\n"
1344                "Duration: %s\n"
1345                "%s"
1346                "%s"
1347                % (wrapper.fill(str(self.command)), self.exit_status,
1348                self.duration, stdout, stderr))
1349
1350
1351class run_randomly:
1352    def __init__(self, run_sequentially=False):
1353        # Run sequentially is for debugging control files
1354        self.test_list = []
1355        self.run_sequentially = run_sequentially
1356
1357
1358    def add(self, *args, **dargs):
1359        test = (args, dargs)
1360        self.test_list.append(test)
1361
1362
1363    def run(self, fn):
1364        while self.test_list:
1365            test_index = random.randint(0, len(self.test_list)-1)
1366            if self.run_sequentially:
1367                test_index = 0
1368            (args, dargs) = self.test_list.pop(test_index)
1369            fn(*args, **dargs)
1370
1371
1372def import_site_module(path, module, placeholder=None, modulefile=None):
1373    """
1374    Try to import the site specific module if it exists.
1375
1376    @param path full filename of the source file calling this (ie __file__)
1377    @param module full module name
1378    @param placeholder value to return in case there is no symbol to import
1379    @param modulefile module filename
1380
1381    @return site specific module or placeholder
1382
1383    @raises ImportError if the site file exists but imports fails
1384    """
1385    short_module = module[module.rfind(".") + 1:]
1386
1387    if not modulefile:
1388        modulefile = short_module + ".py"
1389
1390    if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1391        return __import__(module, {}, {}, [short_module])
1392    return placeholder
1393
1394
1395def import_site_symbol(path, module, name, placeholder=None, modulefile=None):
1396    """
1397    Try to import site specific symbol from site specific file if it exists
1398
1399    @param path full filename of the source file calling this (ie __file__)
1400    @param module full module name
1401    @param name symbol name to be imported from the site file
1402    @param placeholder value to return in case there is no symbol to import
1403    @param modulefile module filename
1404
1405    @return site specific symbol or placeholder
1406
1407    @raises ImportError if the site file exists but imports fails
1408    """
1409    module = import_site_module(path, module, modulefile=modulefile)
1410    if not module:
1411        return placeholder
1412
1413    # special unique value to tell us if the symbol can't be imported
1414    cant_import = object()
1415
1416    obj = getattr(module, name, cant_import)
1417    if obj is cant_import:
1418        return placeholder
1419
1420    return obj
1421
1422
1423def import_site_class(path, module, classname, baseclass, modulefile=None):
1424    """
1425    Try to import site specific class from site specific file if it exists
1426
1427    Args:
1428        path: full filename of the source file calling this (ie __file__)
1429        module: full module name
1430        classname: class name to be loaded from site file
1431        baseclass: base class object to return when no site file present or
1432            to mixin when site class exists but is not inherited from baseclass
1433        modulefile: module filename
1434
1435    Returns: baseclass if site specific class does not exist, the site specific
1436        class if it exists and is inherited from baseclass or a mixin of the
1437        site specific class and baseclass when the site specific class exists
1438        and is not inherited from baseclass
1439
1440    Raises: ImportError if the site file exists but imports fails
1441    """
1442
1443    res = import_site_symbol(path, module, classname, None, modulefile)
1444    if res:
1445        if not issubclass(res, baseclass):
1446            # if not a subclass of baseclass then mix in baseclass with the
1447            # site specific class object and return the result
1448            res = type(classname, (res, baseclass), {})
1449    else:
1450        res = baseclass
1451
1452    return res
1453
1454
1455def import_site_function(path, module, funcname, placeholder, modulefile=None):
1456    """
1457    Try to import site specific function from site specific file if it exists
1458
1459    Args:
1460        path: full filename of the source file calling this (ie __file__)
1461        module: full module name
1462        funcname: function name to be imported from site file
1463        placeholder: function to return in case there is no function to import
1464        modulefile: module filename
1465
1466    Returns: site specific function object or placeholder
1467
1468    Raises: ImportError if the site file exists but imports fails
1469    """
1470
1471    return import_site_symbol(path, module, funcname, placeholder, modulefile)
1472
1473
1474def _get_pid_path(program_name):
1475    my_path = os.path.dirname(__file__)
1476    return os.path.abspath(os.path.join(my_path, "..", "..",
1477                                        "%s.pid" % program_name))
1478
1479
1480def write_pid(program_name):
1481    """
1482    Try to drop <program_name>.pid in the main autotest directory.
1483
1484    Args:
1485      program_name: prefix for file name
1486    """
1487    pidfile = open(_get_pid_path(program_name), "w")
1488    try:
1489        pidfile.write("%s\n" % os.getpid())
1490    finally:
1491        pidfile.close()
1492
1493
1494def delete_pid_file_if_exists(program_name):
1495    """
1496    Tries to remove <program_name>.pid from the main autotest directory.
1497    """
1498    pidfile_path = _get_pid_path(program_name)
1499
1500    try:
1501        os.remove(pidfile_path)
1502    except OSError:
1503        if not os.path.exists(pidfile_path):
1504            return
1505        raise
1506
1507
1508def get_pid_from_file(program_name):
1509    """
1510    Reads the pid from <program_name>.pid in the autotest directory.
1511
1512    @param program_name the name of the program
1513    @return the pid if the file exists, None otherwise.
1514    """
1515    pidfile_path = _get_pid_path(program_name)
1516    if not os.path.exists(pidfile_path):
1517        return None
1518
1519    pidfile = open(_get_pid_path(program_name), 'r')
1520
1521    try:
1522        try:
1523            pid = int(pidfile.readline())
1524        except IOError:
1525            if not os.path.exists(pidfile_path):
1526                return None
1527            raise
1528    finally:
1529        pidfile.close()
1530
1531    return pid
1532
1533
1534def get_process_name(pid):
1535    """
1536    Get process name from PID.
1537    @param pid: PID of process.
1538    @return: Process name if PID stat file exists or 'Dead PID' if it does not.
1539    """
1540    pid_stat_path = "/proc/%d/stat"
1541    if not os.path.exists(pid_stat_path % pid):
1542        return "Dead Pid"
1543    return get_field(read_file(pid_stat_path % pid), 1)[1:-1]
1544
1545
1546def program_is_alive(program_name):
1547    """
1548    Checks if the process is alive and not in Zombie state.
1549
1550    @param program_name the name of the program
1551    @return True if still alive, False otherwise
1552    """
1553    pid = get_pid_from_file(program_name)
1554    if pid is None:
1555        return False
1556    return pid_is_alive(pid)
1557
1558
1559def signal_program(program_name, sig=signal.SIGTERM):
1560    """
1561    Sends a signal to the process listed in <program_name>.pid
1562
1563    @param program_name the name of the program
1564    @param sig signal to send
1565    """
1566    pid = get_pid_from_file(program_name)
1567    if pid:
1568        signal_pid(pid, sig)
1569
1570
1571def get_relative_path(path, reference):
1572    """Given 2 absolute paths "path" and "reference", compute the path of
1573    "path" as relative to the directory "reference".
1574
1575    @param path the absolute path to convert to a relative path
1576    @param reference an absolute directory path to which the relative
1577        path will be computed
1578    """
1579    # normalize the paths (remove double slashes, etc)
1580    assert(os.path.isabs(path))
1581    assert(os.path.isabs(reference))
1582
1583    path = os.path.normpath(path)
1584    reference = os.path.normpath(reference)
1585
1586    # we could use os.path.split() but it splits from the end
1587    path_list = path.split(os.path.sep)[1:]
1588    ref_list = reference.split(os.path.sep)[1:]
1589
1590    # find the longest leading common path
1591    for i in range(min(len(path_list), len(ref_list))):
1592        if path_list[i] != ref_list[i]:
1593            # decrement i so when exiting this loop either by no match or by
1594            # end of range we are one step behind
1595            i -= 1
1596            break
1597    i += 1
1598    # drop the common part of the paths, not interested in that anymore
1599    del path_list[:i]
1600
1601    # for each uncommon component in the reference prepend a ".."
1602    path_list[:0] = ['..'] * (len(ref_list) - i)
1603
1604    return os.path.join(*path_list)
1605
1606
1607def sh_escape(command):
1608    """
1609    Escape special characters from a command so that it can be passed
1610    as a double quoted (" ") string in a (ba)sh command.
1611
1612    Args:
1613            command: the command string to escape.
1614
1615    Returns:
1616            The escaped command string. The required englobing double
1617            quotes are NOT added and so should be added at some point by
1618            the caller.
1619
1620    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1621    """
1622    command = command.replace("\\", "\\\\")
1623    command = command.replace("$", r'\$')
1624    command = command.replace('"', r'\"')
1625    command = command.replace('`', r'\`')
1626    return command
1627
1628
1629def sh_quote_word(text, allowlist=_SHELL_QUOTING_ALLOWLIST):
1630    r"""Quote a string to make it safe as a single word in a shell command.
1631
1632    POSIX shell syntax recognizes no escape characters inside a single-quoted
1633    string.  So, single quotes can safely quote any string of characters except
1634    a string with a single quote character.  A single quote character must be
1635    quoted with the sequence '\'' which translates to:
1636        '  -> close current quote
1637        \' -> insert a literal single quote
1638        '  -> reopen quoting again.
1639
1640    This is safe for all combinations of characters, including embedded and
1641    trailing backslashes in odd or even numbers.
1642
1643    This is also safe for nesting, e.g. the following is a valid use:
1644
1645        adb_command = 'adb shell %s' % (
1646                sh_quote_word('echo %s' % sh_quote_word('hello world')))
1647
1648    @param text: The string to be quoted into a single word for the shell.
1649    @param allowlist: Optional list of characters that do not need quoting.
1650                      Defaults to a known good list of characters.
1651
1652    @return A string, possibly quoted, safe as a single word for a shell.
1653    """
1654    if all(c in allowlist for c in text):
1655        return text
1656    return "'" + text.replace("'", r"'\''") + "'"
1657
1658
1659def configure(extra=None, configure='./configure'):
1660    """
1661    Run configure passing in the correct host, build, and target options.
1662
1663    @param extra: extra command line arguments to pass to configure
1664    @param configure: which configure script to use
1665    """
1666    args = []
1667    if 'CHOST' in os.environ:
1668        args.append('--host=' + os.environ['CHOST'])
1669    if 'CBUILD' in os.environ:
1670        args.append('--build=' + os.environ['CBUILD'])
1671    if 'CTARGET' in os.environ:
1672        args.append('--target=' + os.environ['CTARGET'])
1673    if extra:
1674        args.append(extra)
1675
1676    system('%s %s' % (configure, ' '.join(args)))
1677
1678
1679def make(extra='', make='make', timeout=None, ignore_status=False):
1680    """
1681    Run make, adding MAKEOPTS to the list of options.
1682
1683    @param extra: extra command line arguments to pass to make.
1684    """
1685    cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1686    return system(cmd, timeout=timeout, ignore_status=ignore_status)
1687
1688
1689def _cmp(x, y):
1690    """
1691    Replacement for built-in function cmp that was removed in Python 3
1692
1693    Compare the two objects x and y and return an integer according to
1694    the outcome. The return value is negative if x < y, zero if x == y
1695    and strictly positive if x > y.
1696    """
1697
1698    return (x > y) - (x < y)
1699
1700
1701def compare_versions(ver1, ver2):
1702    """Version number comparison between ver1 and ver2 strings.
1703
1704    >>> compare_tuple("1", "2")
1705    -1
1706    >>> compare_tuple("foo-1.1", "foo-1.2")
1707    -1
1708    >>> compare_tuple("1.2", "1.2a")
1709    -1
1710    >>> compare_tuple("1.2b", "1.2a")
1711    1
1712    >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1713    -1
1714
1715    Args:
1716        ver1: version string
1717        ver2: version string
1718
1719    Returns:
1720        int:  1 if ver1 >  ver2
1721              0 if ver1 == ver2
1722             -1 if ver1 <  ver2
1723    """
1724    ax = re.split('[.-]', ver1)
1725    ay = re.split('[.-]', ver2)
1726    while len(ax) > 0 and len(ay) > 0:
1727        cx = ax.pop(0)
1728        cy = ay.pop(0)
1729        maxlen = max(len(cx), len(cy))
1730        c = _cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1731        if c != 0:
1732            return c
1733    return _cmp(len(ax), len(ay))
1734
1735
1736def args_to_dict(args):
1737    """Convert autoserv extra arguments in the form of key=val or key:val to a
1738    dictionary.  Each argument key is converted to lowercase dictionary key.
1739
1740    Args:
1741        args - list of autoserv extra arguments.
1742
1743    Returns:
1744        dictionary
1745    """
1746    arg_re = re.compile(r'(\w+)[:=](.*)$')
1747    args_dict = {}
1748    for arg in args:
1749        match = arg_re.match(arg)
1750        if match:
1751            args_dict[match.group(1).lower()] = match.group(2)
1752        else:
1753            logging.warning("args_to_dict: argument '%s' doesn't match "
1754                            "'%s' pattern. Ignored.", arg, arg_re.pattern)
1755    return args_dict
1756
1757
1758def get_unused_port():
1759    """
1760    Finds a semi-random available port. A race condition is still
1761    possible after the port number is returned, if another process
1762    happens to bind it.
1763
1764    Returns:
1765        A port number that is unused on both TCP and UDP.
1766    """
1767
1768    def try_bind(port, socket_type, socket_proto):
1769        s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1770        try:
1771            try:
1772                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1773                s.bind(('', port))
1774                return s.getsockname()[1]
1775            except socket.error:
1776                return None
1777        finally:
1778            s.close()
1779
1780    # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1781    # same port over and over. So always try TCP first.
1782    while True:
1783        # Ask the OS for an unused port.
1784        port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1785        # Check if this port is unused on the other protocol.
1786        if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1787            return port
1788
1789
1790def ask(question, auto=False):
1791    """
1792    Raw input with a prompt that emulates logging.
1793
1794    @param question: Question to be asked
1795    @param auto: Whether to return "y" instead of asking the question
1796    """
1797    if auto:
1798        logging.info("%s (y/n) y", question)
1799        return "y"
1800    return input("%s INFO | %s (y/n) " %
1801                     (time.strftime("%H:%M:%S", time.localtime()), question))
1802
1803
1804def rdmsr(address, cpu=0):
1805    """
1806    Reads an x86 MSR from the specified CPU, returns as long integer.
1807    """
1808    with open('/dev/cpu/%s/msr' % cpu, 'rb', 0) as fd:
1809        fd.seek(address)
1810        return struct.unpack('=Q', fd.read(8))[0]
1811
1812
1813def wait_for_value(func,
1814                   expected_value=None,
1815                   min_threshold=None,
1816                   max_threshold=None,
1817                   timeout_sec=10):
1818    """
1819    Returns the value of func().  If |expected_value|, |min_threshold|, and
1820    |max_threshold| are not set, returns immediately.
1821
1822    If |expected_value| is set, polls the return value until |expected_value| is
1823    reached, and returns that value.
1824
1825    If either |max_threshold| or |min_threshold| is set, this function will
1826    will repeatedly call func() until the return value reaches or exceeds one of
1827    these thresholds.
1828
1829    Polling will stop after |timeout_sec| regardless of these thresholds.
1830
1831    @param func: function whose return value is to be waited on.
1832    @param expected_value: wait for func to return this value.
1833    @param min_threshold: wait for func value to reach or fall below this value.
1834    @param max_threshold: wait for func value to reach or rise above this value.
1835    @param timeout_sec: Number of seconds to wait before giving up and
1836                        returning whatever value func() last returned.
1837
1838    Return value:
1839        The most recent return value of func().
1840    """
1841    value = None
1842    start_time_sec = time.time()
1843    while True:
1844        value = func()
1845        if (expected_value is None and \
1846            min_threshold is None and \
1847            max_threshold is None) or \
1848           (expected_value is not None and value == expected_value) or \
1849           (min_threshold is not None and value <= min_threshold) or \
1850           (max_threshold is not None and value >= max_threshold):
1851            break
1852
1853        if time.time() - start_time_sec >= timeout_sec:
1854            break
1855        time.sleep(0.1)
1856
1857    return value
1858
1859
1860def wait_for_value_changed(func,
1861                           old_value=None,
1862                           timeout_sec=10):
1863    """
1864    Returns the value of func().
1865
1866    The function polls the return value until it is different from |old_value|,
1867    and returns that value.
1868
1869    Polling will stop after |timeout_sec|.
1870
1871    @param func: function whose return value is to be waited on.
1872    @param old_value: wait for func to return a value different from this.
1873    @param timeout_sec: Number of seconds to wait before giving up and
1874                        returning whatever value func() last returned.
1875
1876    @returns The most recent return value of func().
1877    """
1878    value = None
1879    start_time_sec = time.time()
1880    while True:
1881        value = func()
1882        if value != old_value:
1883            break
1884
1885        if time.time() - start_time_sec >= timeout_sec:
1886            break
1887        time.sleep(0.1)
1888
1889    return value
1890
1891
1892CONFIG = global_config.global_config
1893
1894# Keep checking if the pid is alive every second until the timeout (in seconds)
1895CHECK_PID_IS_ALIVE_TIMEOUT = 6
1896
1897_LOCAL_HOST_LIST = ('localhost', '127.0.0.1')
1898
1899# The default address of a vm gateway.
1900DEFAULT_VM_GATEWAY = '10.0.2.2'
1901
1902# Google Storage bucket URI to store results in.
1903DEFAULT_OFFLOAD_GSURI = CONFIG.get_config_value(
1904        'CROS', 'results_storage_server', default=None)
1905
1906# Default Moblab Ethernet Interface.
1907_MOBLAB_ETH_0 = 'eth0'
1908_MOBLAB_ETH_1 = 'eth1'
1909
1910
1911def _parse_subnet(subnet_str):
1912    """Parse a subnet string to a (ip, mask) tuple."""
1913    ip, mask = subnet_str.split('/')
1914    return ip, int(mask)
1915
1916
1917# A list of subnets that requires dedicated devserver and drone in the same
1918# subnet. Each item is a tuple of (subnet_ip, mask_bits), e.g.,
1919# ('192.168.0.0', 24))
1920RESTRICTED_SUBNETS = []
1921
1922
1923def _setup_restricted_subnets():
1924    restricted_subnets_list = CONFIG.get_config_value(
1925            'CROS', 'restricted_subnets', type=list, default=[])
1926    global RESTRICTED_SUBNETS
1927    RESTRICTED_SUBNETS = [_parse_subnet(s) for s in restricted_subnets_list]
1928
1929
1930_setup_restricted_subnets()
1931
1932
1933# A two level list of subnets, e.g. '[["1.1.1.0/24","1.1.2.0/24"],
1934# ["1.2.1.0/24", "1.2.2.0/24"]]'. Each element of it is either a singleton list
1935# of a restricted subnet, or a list of subnets which can communicate with each
1936# other (i.e. p2p subnets).
1937ALL_SUBNETS = []
1938
1939
1940def _setup_all_subnets():
1941    all_subnets_raw = CONFIG.get_config_value('CROS',
1942                                              'p2p_subnets',
1943                                              default='[]')
1944    all_subnets = json.loads(all_subnets_raw)
1945    for subnet_group in all_subnets:
1946        ALL_SUBNETS.append([_parse_subnet(s) for s in subnet_group])
1947
1948    if not RESTRICTED_SUBNETS:
1949        _setup_restricted_subnets()
1950    for subnet in RESTRICTED_SUBNETS:
1951        ALL_SUBNETS.append([subnet])
1952
1953
1954_setup_all_subnets()
1955
1956
1957def get_all_restricted_subnets():
1958    """Returns all restricted subnets in a flat list, including subnets that
1959    are part of a p2p group.
1960
1961    This helps us to check if a host is in a restricted subnet."""
1962    result = []
1963    for s in ALL_SUBNETS:
1964        result.extend(s)
1965
1966    return result
1967
1968
1969# regex pattern for CLIENT/wireless_ssid_ config. For example, global config
1970# can have following config in CLIENT section to indicate that hosts in subnet
1971# 192.168.0.1/24 should use wireless ssid of `ssid_1`
1972# wireless_ssid_192.168.0.1/24: ssid_1
1973WIRELESS_SSID_PATTERN = 'wireless_ssid_(.*)/(\d+)'
1974
1975
1976def get_moblab_serial_number():
1977    """Gets a unique identifier for the moblab.
1978
1979    Serial number is the prefered identifier, use it if
1980    present, however fallback is the ethernet mac address.
1981    """
1982    for vpd_key in ['serial_number', 'ethernet_mac']:
1983        try:
1984            cmd_result = run('sudo vpd -g %s' % vpd_key)
1985            if cmd_result and cmd_result.stdout:
1986                return cmd_result.stdout
1987        except error.CmdError as e:
1988            logging.error(str(e))
1989            logging.info(vpd_key)
1990    return 'NoSerialNumber'
1991
1992
1993def ping(host,
1994         deadline=None,
1995         tries=None,
1996         timeout=60,
1997         ignore_timeout=False,
1998         user=None,
1999         interface=None):
2000    """Attempt to ping |host|.
2001
2002    Shell out to 'ping' if host is an IPv4 addres or 'ping6' if host is an
2003    IPv6 address to try to reach |host| for |timeout| seconds.
2004    Returns exit code of ping.
2005
2006    Per 'man ping', if you specify BOTH |deadline| and |tries|, ping only
2007    returns 0 if we get responses to |tries| pings within |deadline| seconds.
2008
2009    Specifying |deadline| or |count| alone should return 0 as long as
2010    some packets receive responses.
2011
2012    Note that while this works with literal IPv6 addresses it will not work
2013    with hostnames that resolve to IPv6 only.
2014
2015    @param host: the host to ping.
2016    @param deadline: seconds within which |tries| pings must succeed.
2017    @param tries: number of pings to send.
2018    @param timeout: number of seconds after which to kill 'ping' command.
2019    @param ignore_timeout: If true, timeouts won't raise CmdTimeoutError.
2020    @param user: Run as a specific user
2021    @param interface: Run on a specific network interface
2022    @return exit code of ping command.
2023    """
2024    args = [host]
2025    cmd = 'ping6' if re.search(r':.*:', host) else 'ping'
2026
2027    if deadline:
2028        args.append('-w%d' % deadline)
2029    if tries:
2030        args.append('-c%d' % tries)
2031    if interface:
2032        args.append('-I%s' % interface)
2033
2034    if user != None:
2035        args = [user, '-c', ' '.join([cmd] + args)]
2036        cmd = 'su'
2037
2038    result = run(cmd,
2039                 args=args,
2040                 verbose=True,
2041                 ignore_status=True,
2042                 timeout=timeout,
2043                 ignore_timeout=ignore_timeout,
2044                 stderr_tee=TEE_TO_LOGS)
2045
2046    # Sometimes the ping process times out even though a deadline is set. If
2047    # ignore_timeout is set, it will fall through to here instead of raising.
2048    if result is None:
2049        logging.debug('Unusual ping result (timeout)')
2050        # From man ping: If a packet count and deadline are both specified, and
2051        # fewer than count packets are received by the time the deadline has
2052        # arrived, it will also exit with code 1. On other error it exits with
2053        # code 2.
2054        return 1 if deadline and tries else 2
2055
2056    rc = result.exit_status
2057    lines = result.stdout.splitlines()
2058
2059    # rc=0: host reachable
2060    # rc=1: host unreachable
2061    # other: an error (do not abbreviate)
2062    if rc in (0, 1):
2063        # Report the two stats lines, as a single line.
2064        # [-2]: packets transmitted, 1 received, 0% packet loss, time 0ms
2065        # [-1]: rtt min/avg/max/mdev = 0.497/0.497/0.497/0.000 ms
2066        stats = lines[-2:]
2067        while '' in stats:
2068            stats.remove('')
2069
2070        if stats or len(lines) < 2:
2071            logging.debug('[rc=%s] %s', rc, '; '.join(stats))
2072        else:
2073            logging.debug('[rc=%s] Ping output:\n%s',
2074                          rc, result.stdout)
2075    else:
2076        output = result.stdout.rstrip()
2077        if output:
2078            logging.debug('Unusual ping result (rc=%s):\n%s', rc, output)
2079        else:
2080            logging.debug('Unusual ping result (rc=%s).', rc)
2081    return rc
2082
2083
2084def host_is_in_lab_zone(hostname):
2085    """Check if the host is in the CLIENT.dns_zone.
2086
2087    @param hostname: The hostname to check.
2088    @returns True if hostname.dns_zone resolves, otherwise False.
2089    """
2090    host_parts = hostname.split('.')
2091    dns_zone = CONFIG.get_config_value('CLIENT', 'dns_zone', default=None)
2092    fqdn = '%s.%s' % (host_parts[0], dns_zone)
2093    logging.debug('Checking if host %s is in lab zone.', fqdn)
2094    try:
2095        socket.gethostbyname(fqdn)
2096        return True
2097    except socket.gaierror:
2098        return False
2099
2100
2101def host_is_in_power_lab(hostname):
2102    """Check if the hostname is in power lab.
2103
2104    Example: chromeos1-power-host2.cros
2105
2106    @param hostname: The hostname to check.
2107    @returns True if hostname match power lab hostname, otherwise False.
2108    """
2109    pattern = r'chromeos\d.*power.*(\.cros(\.corp(\.google\.com)?)?)?$'
2110    return re.match(pattern, hostname) is not None
2111
2112
2113def get_power_lab_wlan_hostname(hostname):
2114    """Return wlan hostname for host in power lab.
2115
2116    Example: chromeos1-power-host2.cros -> chromeos1-power-host2-wlan.cros
2117
2118    @param hostname: The hostname in power lab.
2119    @returns wlan hostname.
2120    """
2121    split_host = hostname.split('.')
2122    split_host[0] += '-wlan'
2123    return '.'.join(split_host)
2124
2125
2126def in_moblab_ssp():
2127    """Detects if this execution is inside an SSP container on moblab."""
2128    config_is_moblab = CONFIG.get_config_value('SSP', 'is_moblab', type=bool,
2129                                               default=False)
2130    return is_in_container() and config_is_moblab
2131
2132
2133def get_chrome_version(job_views):
2134    """
2135    Retrieves the version of the chrome binary associated with a job.
2136
2137    When a test runs we query the chrome binary for it's version and drop
2138    that value into a client keyval. To retrieve the chrome version we get all
2139    the views associated with a test from the db, including those of the
2140    server and client jobs, and parse the version out of the first test view
2141    that has it. If we never ran a single test in the suite the job_views
2142    dictionary will not contain a chrome version.
2143
2144    This method cannot retrieve the chrome version from a dictionary that
2145    does not conform to the structure of an autotest tko view.
2146
2147    @param job_views: a list of a job's result views, as returned by
2148                      the get_detailed_test_views method in rpc_interface.
2149    @return: The chrome version string, or None if one can't be found.
2150    """
2151
2152    # Aborted jobs have no views.
2153    if not job_views:
2154        return None
2155
2156    for view in job_views:
2157        if (view.get('attributes')
2158            and constants.CHROME_VERSION in list(view['attributes'].keys())):
2159
2160            return view['attributes'].get(constants.CHROME_VERSION)
2161
2162    logging.warning('Could not find chrome version for failure.')
2163    return None
2164
2165
2166def get_moblab_id():
2167    """Gets the moblab random id.
2168
2169    The random id file is cached on disk. If it does not exist, a new file is
2170    created the first time.
2171
2172    @returns the moblab random id.
2173    """
2174    moblab_id_filepath = '/home/moblab/.moblab_id'
2175    try:
2176        if os.path.exists(moblab_id_filepath):
2177            with open(moblab_id_filepath, 'r') as moblab_id_file:
2178                random_id = moblab_id_file.read()
2179        else:
2180            random_id = uuid.uuid1().hex
2181            with open(moblab_id_filepath, 'w') as moblab_id_file:
2182                moblab_id_file.write('%s' % random_id)
2183    except IOError as e:
2184        # Possible race condition, another process has created the file.
2185        # Sleep a second to make sure the file gets closed.
2186        logging.info(e)
2187        time.sleep(1)
2188        with open(moblab_id_filepath, 'r') as moblab_id_file:
2189            random_id = moblab_id_file.read()
2190    return random_id
2191
2192
2193def get_offload_gsuri():
2194    """Return the GSURI to offload test results to.
2195
2196    For the normal use case this is the results_storage_server in the
2197    global_config.
2198
2199    However partners using Moblab will be offloading their results to a
2200    subdirectory of their image storage buckets. The subdirectory is
2201    determined by the MAC Address of the Moblab device.
2202
2203    @returns gsuri to offload test results to.
2204    """
2205    # For non-moblab, use results_storage_server or default.
2206    if not is_moblab():  # pylint: disable=undefined-variable
2207        return DEFAULT_OFFLOAD_GSURI
2208
2209    # For moblab, use results_storage_server or image_storage_server as bucket
2210    # name and mac-address/moblab_id as path.
2211    gsuri = DEFAULT_OFFLOAD_GSURI
2212    if not gsuri:
2213        gsuri = "%sresults/" % CONFIG.get_config_value('CROS',
2214                                                       'image_storage_server')
2215
2216    return '%s%s/%s/' % (gsuri, get_moblab_serial_number(), get_moblab_id())
2217
2218
2219# TODO(petermayo): crosbug.com/31826 Share this with _GsUpload in
2220# //chromite.git/buildbot/prebuilt.py somewhere/somehow
2221def gs_upload(local_file, remote_file, acl, result_dir=None,
2222              transfer_timeout=300, acl_timeout=300):
2223    """Upload to GS bucket.
2224
2225    @param local_file: Local file to upload
2226    @param remote_file: Remote location to upload the local_file to.
2227    @param acl: name or file used for controlling access to the uploaded
2228                file.
2229    @param result_dir: Result directory if you want to add tracing to the
2230                       upload.
2231    @param transfer_timeout: Timeout for this upload call.
2232    @param acl_timeout: Timeout for the acl call needed to confirm that
2233                        the uploader has permissions to execute the upload.
2234
2235    @raise CmdError: the exit code of the gsutil call was not 0.
2236
2237    @returns True/False - depending on if the upload succeeded or failed.
2238    """
2239    # https://developers.google.com/storage/docs/accesscontrol#extension
2240    CANNED_ACLS = ['project-private', 'private', 'public-read',
2241                   'public-read-write', 'authenticated-read',
2242                   'bucket-owner-read', 'bucket-owner-full-control']
2243    _GSUTIL_BIN = 'gsutil'
2244    acl_cmd = None
2245    if acl in CANNED_ACLS:
2246        cmd = '%s cp -a %s %s %s' % (_GSUTIL_BIN, acl, local_file, remote_file)
2247    else:
2248        # For private uploads we assume that the overlay board is set up
2249        # properly and a googlestore_acl.xml is present, if not this script
2250        # errors
2251        cmd = '%s cp -a private %s %s' % (_GSUTIL_BIN, local_file, remote_file)
2252        if not os.path.exists(acl):
2253            logging.error('Unable to find ACL File %s.', acl)
2254            return False
2255        acl_cmd = '%s setacl %s %s' % (_GSUTIL_BIN, acl, remote_file)
2256    if not result_dir:
2257        run(cmd, timeout=transfer_timeout, verbose=True)
2258        if acl_cmd:
2259            run(acl_cmd, timeout=acl_timeout, verbose=True)
2260        return True
2261    with open(os.path.join(result_dir, 'tracing'), 'w') as ftrace:
2262        ftrace.write('Preamble\n')
2263        run(cmd, timeout=transfer_timeout, verbose=True,
2264                       stdout_tee=ftrace, stderr_tee=ftrace)
2265        if acl_cmd:
2266            ftrace.write('\nACL setting\n')
2267            # Apply the passed in ACL xml file to the uploaded object.
2268            run(acl_cmd, timeout=acl_timeout, verbose=True,
2269                           stdout_tee=ftrace, stderr_tee=ftrace)
2270        ftrace.write('Postamble\n')
2271        return True
2272
2273
2274def gs_ls(uri_pattern):
2275    """Returns a list of URIs that match a given pattern.
2276
2277    @param uri_pattern: a GS URI pattern, may contain wildcards
2278
2279    @return A list of URIs matching the given pattern.
2280
2281    @raise CmdError: the gsutil command failed.
2282
2283    """
2284    gs_cmd = ' '.join(['gsutil', 'ls', uri_pattern])
2285    result = system_output(gs_cmd).splitlines()
2286    return [path.rstrip() for path in result if path]
2287
2288
2289def nuke_pids(pid_list, signal_queue=None):
2290    """
2291    Given a list of pid's, kill them via an esclating series of signals.
2292
2293    @param pid_list: List of PID's to kill.
2294    @param signal_queue: Queue of signals to send the PID's to terminate them.
2295
2296    @return: A mapping of the signal name to the number of processes it
2297        was sent to.
2298    """
2299    if signal_queue is None:
2300        signal_queue = [signal.SIGTERM, signal.SIGKILL]
2301    sig_count = {}
2302    # Though this is slightly hacky it beats hardcoding names anyday.
2303    sig_names = dict((k, v) for v, k in six.iteritems(signal.__dict__)
2304                     if v.startswith('SIG'))
2305    for sig in signal_queue:
2306        logging.debug('Sending signal %s to the following pids:', sig)
2307        sig_count[sig_names.get(sig, 'unknown_signal')] = len(pid_list)
2308        for pid in pid_list:
2309            logging.debug('Pid %d', pid)
2310            try:
2311                os.kill(pid, sig)
2312            except OSError:
2313                # The process may have died from a previous signal before we
2314                # could kill it.
2315                pass
2316        if sig == signal.SIGKILL:
2317            return sig_count
2318        pid_list = [pid for pid in pid_list if pid_is_alive(pid)]
2319        if not pid_list:
2320            break
2321        time.sleep(CHECK_PID_IS_ALIVE_TIMEOUT)
2322    failed_list = []
2323    for pid in pid_list:
2324        if pid_is_alive(pid):
2325            failed_list.append('Could not kill %d for process name: %s.' % pid,
2326                               get_process_name(pid))
2327    if failed_list:
2328        raise error.AutoservRunError('Following errors occured: %s' %
2329                                     failed_list, None)
2330    return sig_count
2331
2332
2333def externalize_host(host):
2334    """Returns an externally accessible host name.
2335
2336    @param host: a host name or address (string)
2337
2338    @return An externally visible host name or address
2339
2340    """
2341    return socket.gethostname() if host in _LOCAL_HOST_LIST else host
2342
2343
2344def urlopen_socket_timeout(url, data=None, timeout=5):
2345    """
2346    Wrapper to urllib2.urlopen with a socket timeout.
2347
2348    This method will convert all socket timeouts to
2349    TimeoutExceptions, so we can use it in conjunction
2350    with the rpc retry decorator and continue to handle
2351    other URLErrors as we see fit.
2352
2353    @param url: The url to open.
2354    @param data: The data to send to the url (eg: the urlencoded dictionary
2355                 used with a POST call).
2356    @param timeout: The timeout for this urlopen call.
2357
2358    @return: The response of the urlopen call.
2359
2360    @raises: error.TimeoutException when a socket timeout occurs.
2361             urllib2.URLError for errors that not caused by timeout.
2362             urllib2.HTTPError for errors like 404 url not found.
2363    """
2364    old_timeout = socket.getdefaulttimeout()
2365    socket.setdefaulttimeout(timeout)
2366    try:
2367        return urllib.request.urlopen(url, data=data)
2368    except urllib.error.URLError as e:
2369        if type(e.reason) is socket.timeout:
2370            raise error.TimeoutException(str(e))
2371        raise
2372    finally:
2373        socket.setdefaulttimeout(old_timeout)
2374
2375
2376def parse_chrome_version(version_string):
2377    """
2378    Parse a chrome version string and return version and milestone.
2379
2380    Given a chrome version of the form "W.X.Y.Z", return "W.X.Y.Z" as
2381    the version and "W" as the milestone.
2382
2383    @param version_string: Chrome version string.
2384    @return: a tuple (chrome_version, milestone). If the incoming version
2385             string is not of the form "W.X.Y.Z", chrome_version will
2386             be set to the incoming "version_string" argument and the
2387             milestone will be set to the empty string.
2388    """
2389    match = re.search('(\d+)\.\d+\.\d+\.\d+', version_string)
2390    ver = match.group(0) if match else version_string
2391    milestone = match.group(1) if match else ''
2392    return ver, milestone
2393
2394
2395def parse_gs_uri_version(uri):
2396    """Pull out major.minor.sub from image URI
2397
2398    @param uri: A GS URI for a bucket containing ChromeOS build artifacts
2399    @return: The build version as a string in the form 'major.minor.sub'
2400
2401    """
2402    return re.sub('.*(R[0-9]+|LATEST)-', '', uri).strip('/')
2403
2404
2405def compare_gs_uri_build_versions(x, y):
2406    """Compares two bucket URIs by their version string
2407
2408    @param x: A GS URI for a bucket containing ChromeOS build artifacts
2409    @param y: Another GS URI for a bucket containing ChromeOS build artifacts
2410    @return: 1 if x > y, -1 if x < y, and 0 if x == y
2411
2412    """
2413    # Converts a gs uri 'gs://.../R75-<major>.<minor>.<sub>' to
2414    # [major, minor, sub]
2415    split_version = lambda v: [int(x) for x in
2416                               parse_gs_uri_version(v).split('.')]
2417
2418    x_version = split_version(x)
2419    y_version = split_version(y)
2420
2421    for a, b in zip(x_version, y_version):
2422        if a > b:
2423            return 1
2424        elif b > a:
2425            return -1
2426
2427    return 0
2428
2429
2430def is_localhost(server):
2431    """Check if server is equivalent to localhost.
2432
2433    @param server: Name of the server to check.
2434
2435    @return: True if given server is equivalent to localhost.
2436
2437    @raise socket.gaierror: If server name failed to be resolved.
2438    """
2439    if server in _LOCAL_HOST_LIST:
2440        return True
2441    try:
2442        return (socket.gethostbyname(socket.gethostname()) ==
2443                socket.gethostbyname(server))
2444    except socket.gaierror:
2445        logging.error('Failed to resolve server name %s.', server)
2446        return False
2447
2448
2449def get_function_arg_value(func, arg_name, args, kwargs):
2450    """Get the value of the given argument for the function.
2451
2452    @param func: Function being called with given arguments.
2453    @param arg_name: Name of the argument to look for value.
2454    @param args: arguments for function to be called.
2455    @param kwargs: keyword arguments for function to be called.
2456
2457    @return: The value of the given argument for the function.
2458
2459    @raise ValueError: If the argument is not listed function arguemnts.
2460    @raise KeyError: If no value is found for the given argument.
2461    """
2462    if arg_name in kwargs:
2463        return kwargs[arg_name]
2464
2465    argspec = inspect.getargspec(func)
2466    index = argspec.args.index(arg_name)
2467    try:
2468        return args[index]
2469    except IndexError:
2470        try:
2471            # The argument can use a default value. Reverse the default value
2472            # so argument with default value can be counted from the last to
2473            # the first.
2474            return argspec.defaults[::-1][len(argspec.args) - index - 1]
2475        except IndexError:
2476            raise KeyError('Argument %s is not given a value. argspec: %s, '
2477                           'args:%s, kwargs:%s' %
2478                           (arg_name, argspec, args, kwargs))
2479
2480
2481def has_systemd():
2482    """Check if the host is running systemd.
2483
2484    @return: True if the host uses systemd, otherwise returns False.
2485    """
2486    return os.path.basename(os.readlink('/proc/1/exe')) == 'systemd'
2487
2488
2489def get_real_user():
2490    """Get the real user that runs the script.
2491
2492    The function check environment variable SUDO_USER for the user if the
2493    script is run with sudo. Otherwise, it returns the value of environment
2494    variable USER.
2495
2496    @return: The user name that runs the script.
2497
2498    """
2499    user = os.environ.get('SUDO_USER')
2500    if not user:
2501        user = os.environ.get('USER')
2502    return user
2503
2504
2505def get_service_pid(service_name):
2506    """Return pid of service.
2507
2508    @param service_name: string name of service.
2509
2510    @return: pid or 0 if service is not running.
2511    """
2512    if has_systemd():
2513        # systemctl show prints 'MainPID=0' if the service is not running.
2514        cmd_result = run('systemctl show -p MainPID %s' %
2515                                    service_name, ignore_status=True)
2516        return int(cmd_result.stdout.split('=')[1])
2517    else:
2518        cmd_result = run('status %s' % service_name,
2519                                        ignore_status=True)
2520        if 'start/running' in cmd_result.stdout:
2521            return int(cmd_result.stdout.split()[3])
2522        return 0
2523
2524
2525def control_service(service_name, action='start', ignore_status=True):
2526    """Controls a service. It can be used to start, stop or restart
2527    a service.
2528
2529    @param service_name: string service to be restarted.
2530
2531    @param action: string choice of action to control command.
2532
2533    @param ignore_status: boolean ignore if system command fails.
2534
2535    @return: status code of the executed command.
2536    """
2537    if action not in ('start', 'stop', 'restart'):
2538        raise ValueError('Unknown action supplied as parameter.')
2539
2540    control_cmd = action + ' ' + service_name
2541    if has_systemd():
2542        control_cmd = 'systemctl ' + control_cmd
2543    return system(control_cmd, ignore_status=ignore_status)
2544
2545
2546def restart_service(service_name, ignore_status=True):
2547    """Restarts a service
2548
2549    @param service_name: string service to be restarted.
2550
2551    @param ignore_status: boolean ignore if system command fails.
2552
2553    @return: status code of the executed command.
2554    """
2555    return control_service(service_name, action='restart',
2556                           ignore_status=ignore_status)
2557
2558
2559def start_service(service_name, ignore_status=True):
2560    """Starts a service
2561
2562    @param service_name: string service to be started.
2563
2564    @param ignore_status: boolean ignore if system command fails.
2565
2566    @return: status code of the executed command.
2567    """
2568    return control_service(service_name, action='start',
2569                           ignore_status=ignore_status)
2570
2571
2572def stop_service(service_name, ignore_status=True):
2573    """Stops a service
2574
2575    @param service_name: string service to be stopped.
2576
2577    @param ignore_status: boolean ignore if system command fails.
2578
2579    @return: status code of the executed command.
2580    """
2581    return control_service(service_name, action='stop',
2582                           ignore_status=ignore_status)
2583
2584
2585def sudo_require_password():
2586    """Test if the process can run sudo command without using password.
2587
2588    @return: True if the process needs password to run sudo command.
2589
2590    """
2591    try:
2592        run('sudo -n true')
2593        return False
2594    except error.CmdError:
2595        logging.warning('sudo command requires password.')
2596        return True
2597
2598
2599def is_in_container():
2600    """Check if the process is running inside a container.
2601
2602    @return: True if the process is running inside a container, otherwise False.
2603    """
2604    result = run('grep -q "/lxc/" /proc/1/cgroup',
2605                            verbose=False, ignore_status=True)
2606    if result.exit_status == 0:
2607        return True
2608
2609    # Check "container" environment variable for lxd/lxc containers.
2610    if os.environ.get('container') == 'lxc':
2611        return True
2612
2613    return False
2614
2615
2616def is_flash_installed():
2617    """
2618    The Adobe Flash binary is only distributed with internal builds.
2619    """
2620    return (os.path.exists('/opt/google/chrome/pepper/libpepflashplayer.so')
2621        and os.path.exists('/opt/google/chrome/pepper/pepper-flash.info'))
2622
2623
2624def verify_flash_installed():
2625    """
2626    The Adobe Flash binary is only distributed with internal builds.
2627    Warn users of public builds of the extra dependency.
2628    """
2629    if not is_flash_installed():
2630        raise error.TestNAError('No Adobe Flash binary installed.')
2631
2632
2633def is_in_same_subnet(ip_1, ip_2, mask_bits=24):
2634    """Check if two IP addresses are in the same subnet with given mask bits.
2635
2636    The two IP addresses are string of IPv4, e.g., '192.168.0.3'.
2637
2638    @param ip_1: First IP address to compare.
2639    @param ip_2: Second IP address to compare.
2640    @param mask_bits: Number of mask bits for subnet comparison. Default to 24.
2641
2642    @return: True if the two IP addresses are in the same subnet.
2643
2644    """
2645    mask = ((2<<mask_bits-1) -1)<<(32-mask_bits)
2646    ip_1_num = struct.unpack('!I', socket.inet_aton(ip_1))[0]
2647    ip_2_num = struct.unpack('!I', socket.inet_aton(ip_2))[0]
2648    return ip_1_num & mask == ip_2_num & mask
2649
2650
2651def get_ip_address(hostname=None):
2652    """Get the IP address of given hostname or current machine.
2653
2654    @param hostname: Hostname of a DUT, default value is None.
2655
2656    @return: The IP address of given hostname. If hostname is not given then
2657             we'll try to query the IP address of the current machine and
2658             return.
2659    """
2660    if hostname:
2661        try:
2662            return socket.gethostbyname(hostname)
2663        except socket.gaierror as e:
2664            logging.error(
2665                'Failed to get IP address of %s, error: %s.', hostname, e)
2666    else:
2667        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
2668        s.connect(("8.8.8.8", 80))
2669        ip = s.getsockname()[0]
2670        s.close()
2671        return ip
2672
2673
2674def get_servers_in_same_subnet(host_ip, mask_bits, servers=None,
2675                               server_ip_map=None):
2676    """Get the servers in the same subnet of the given host ip.
2677
2678    @param host_ip: The IP address of a dut to look for devserver.
2679    @param mask_bits: Number of mask bits.
2680    @param servers: A list of servers to be filtered by subnet specified by
2681                    host_ip and mask_bits.
2682    @param server_ip_map: A map between the server name and its IP address.
2683            The map can be pre-built for better performance, e.g., when
2684            allocating a drone for an agent task.
2685
2686    @return: A list of servers in the same subnet of the given host ip.
2687
2688    """
2689    matched_servers = []
2690    if not servers and not server_ip_map:
2691        raise ValueError('Either `servers` or `server_ip_map` must be given.')
2692    if not servers:
2693        servers = list(server_ip_map.keys())
2694    # Make sure server_ip_map is an empty dict if it's not set.
2695    if not server_ip_map:
2696        server_ip_map = {}
2697    for server in servers:
2698        server_ip = server_ip_map.get(server, get_ip_address(server))
2699        if server_ip and is_in_same_subnet(server_ip, host_ip, mask_bits):
2700            matched_servers.append(server)
2701    return matched_servers
2702
2703
2704def get_restricted_subnet(hostname, restricted_subnets=None):
2705    """Get the restricted subnet of given hostname.
2706
2707    @param hostname: Name of the host to look for matched restricted subnet.
2708    @param restricted_subnets: A list of restricted subnets, default is set to
2709            RESTRICTED_SUBNETS.
2710
2711    @return: A tuple of (subnet_ip, mask_bits), which defines a restricted
2712             subnet.
2713    """
2714    if restricted_subnets is None:
2715        restricted_subnets=RESTRICTED_SUBNETS
2716    host_ip = get_ip_address(hostname)
2717    if not host_ip:
2718        return
2719    for subnet_ip, mask_bits in restricted_subnets:
2720        if is_in_same_subnet(subnet_ip, host_ip, mask_bits):
2721            return subnet_ip, mask_bits
2722
2723
2724def get_wireless_ssid(hostname):
2725    """Get the wireless ssid based on given hostname.
2726
2727    The method tries to locate the wireless ssid in the same subnet of given
2728    hostname first. If none is found, it returns the default setting in
2729    CLIENT/wireless_ssid.
2730
2731    @param hostname: Hostname of the test device.
2732
2733    @return: wireless ssid for the test device.
2734    """
2735    default_ssid = CONFIG.get_config_value('CLIENT', 'wireless_ssid',
2736                                           default=None)
2737    host_ip = get_ip_address(hostname)
2738    if not host_ip:
2739        return default_ssid
2740
2741    # Get all wireless ssid in the global config.
2742    ssids = CONFIG.get_config_value_regex('CLIENT', WIRELESS_SSID_PATTERN)
2743
2744    # There could be multiple subnet matches, pick the one with most strict
2745    # match, i.e., the one with highest maskbit.
2746    matched_ssid = default_ssid
2747    matched_maskbit = -1
2748    for key, value in ssids.items():
2749        # The config key filtered by regex WIRELESS_SSID_PATTERN has a format of
2750        # wireless_ssid_[subnet_ip]/[maskbit], for example:
2751        # wireless_ssid_192.168.0.1/24
2752        # Following line extract the subnet ip and mask bit from the key name.
2753        match = re.match(WIRELESS_SSID_PATTERN, key)
2754        subnet_ip, maskbit = match.groups()
2755        maskbit = int(maskbit)
2756        if (is_in_same_subnet(subnet_ip, host_ip, maskbit) and
2757            maskbit > matched_maskbit):
2758            matched_ssid = value
2759            matched_maskbit = maskbit
2760    return matched_ssid
2761
2762
2763def parse_launch_control_build(build_name):
2764    """Get branch, target, build_id from the given Launch Control build_name.
2765
2766    @param build_name: Name of a Launch Control build, should be formated as
2767                       branch/target/build_id
2768
2769    @return: Tuple of branch, target, build_id
2770    @raise ValueError: If the build_name is not correctly formated.
2771    """
2772    branch, target, build_id = build_name.split('/')
2773    return branch, target, build_id
2774
2775
2776def parse_android_target(target):
2777    """Get board and build type from the given target.
2778
2779    @param target: Name of an Android build target, e.g., shamu-eng.
2780
2781    @return: Tuple of board, build_type
2782    @raise ValueError: If the target is not correctly formated.
2783    """
2784    board, build_type = target.split('-')
2785    return board, build_type
2786
2787
2788def parse_launch_control_target(target):
2789    """Parse the build target and type from a Launch Control target.
2790
2791    The Launch Control target has the format of build_target-build_type, e.g.,
2792    shamu-eng or dragonboard-userdebug. This method extracts the build target
2793    and type from the target name.
2794
2795    @param target: Name of a Launch Control target, e.g., shamu-eng.
2796
2797    @return: (build_target, build_type), e.g., ('shamu', 'userdebug')
2798    """
2799    match = re.match('(?P<build_target>.+)-(?P<build_type>[^-]+)', target)
2800    if match:
2801        return match.group('build_target'), match.group('build_type')
2802    else:
2803        return None, None
2804
2805
2806def is_launch_control_build(build):
2807    """Check if a given build is a Launch Control build.
2808
2809    @param build: Name of a build, e.g.,
2810                  ChromeOS build: daisy-release/R50-1234.0.0
2811                  Launch Control build: git_mnc_release/shamu-eng
2812
2813    @return: True if the build name matches the pattern of a Launch Control
2814             build, False otherwise.
2815    """
2816    try:
2817        _, target, _ = parse_launch_control_build(build)
2818        build_target, _ = parse_launch_control_target(target)
2819        if build_target:
2820            return True
2821    except ValueError:
2822        # parse_launch_control_build or parse_launch_control_target failed.
2823        pass
2824    return False
2825
2826
2827def which(exec_file):
2828    """Finds an executable file.
2829
2830    If the file name contains a path component, it is checked as-is.
2831    Otherwise, we check with each of the path components found in the system
2832    PATH prepended. This behavior is similar to the 'which' command-line tool.
2833
2834    @param exec_file: Name or path to desired executable.
2835
2836    @return: An actual path to the executable, or None if not found.
2837    """
2838    if os.path.dirname(exec_file):
2839        return exec_file if os.access(exec_file, os.X_OK) else None
2840    sys_path = os.environ.get('PATH')
2841    prefix_list = sys_path.split(os.pathsep) if sys_path else []
2842    for prefix in prefix_list:
2843        path = os.path.join(prefix, exec_file)
2844        if os.access(path, os.X_OK):
2845            return path
2846
2847
2848class TimeoutError(error.TestError):
2849    """Error raised when poll_for_condition() failed to poll within time.
2850
2851    It may embed a reason (either a string or an exception object) so that
2852    the caller of poll_for_condition() can handle failure better.
2853    """
2854
2855    def __init__(self, message=None, reason=None):
2856        """Constructor.
2857
2858        It supports three invocations:
2859        1) TimeoutError()
2860        2) TimeoutError(message): with customized message.
2861        3) TimeoutError(message, reason): with message and reason for timeout.
2862        """
2863        self.reason = reason
2864        if self.reason:
2865            reason_str = 'Reason: ' + repr(self.reason)
2866            if message:
2867                message += '. ' + reason_str
2868            else:
2869                message = reason_str
2870
2871        if message:
2872            super(TimeoutError, self).__init__(message)
2873        else:
2874            super(TimeoutError, self).__init__()
2875
2876
2877class Timer(object):
2878    """A synchronous timer to evaluate if timout is reached.
2879
2880    Usage:
2881      timer = Timer(timeout_sec)
2882      while timer.sleep(sleep_interval):
2883        # do something...
2884    """
2885    def __init__(self, timeout):
2886        """Constructor.
2887
2888        Note that timer won't start until next() is called.
2889
2890        @param timeout: timer timeout in seconds.
2891        """
2892        self.timeout = timeout
2893        self.deadline = 0
2894
2895    def sleep(self, interval):
2896        """Checks if it has sufficient time to sleep; sleeps if so.
2897
2898        It blocks for |interval| seconds if it has time to sleep.
2899        If timer is not ticked yet, kicks it off and returns True without
2900        sleep.
2901
2902        @param interval: sleep interval in seconds.
2903        @return True if it has sleeped or just kicked off the timer. False
2904                otherwise.
2905        """
2906        now = time.time()
2907        if not self.deadline:
2908            self.deadline = now + self.timeout
2909            return True
2910        if now + interval < self.deadline:
2911            time.sleep(interval)
2912            return True
2913        return False
2914
2915
2916def poll_for_condition(condition,
2917                       exception=None,
2918                       timeout=10,
2919                       sleep_interval=0.1,
2920                       desc=None):
2921    """Polls until a condition is evaluated to true.
2922
2923    @param condition: function taking no args and returning anything that will
2924                      evaluate to True in a conditional check
2925    @param exception: exception to throw if condition doesn't evaluate to true
2926    @param timeout: maximum number of seconds to wait
2927    @param sleep_interval: time to sleep between polls
2928    @param desc: description of default TimeoutError used if 'exception' is
2929                 None
2930
2931    @return The evaluated value that caused the poll loop to terminate.
2932
2933    @raise 'exception' arg if supplied; TimeoutError otherwise
2934    """
2935    start_time = time.time()
2936    while True:
2937        value = condition()
2938        if value:
2939            return value
2940        if time.time() + sleep_interval - start_time > timeout:
2941            if exception:
2942                logging.error('Will raise error %r due to unexpected return: '
2943                              '%r', exception, value)
2944                raise exception # pylint: disable=raising-bad-type
2945
2946            if desc:
2947                desc = 'Timed out waiting for condition: ' + desc
2948            else:
2949                desc = 'Timed out waiting for unnamed condition'
2950            logging.error(desc)
2951            raise TimeoutError(message=desc)
2952
2953        time.sleep(sleep_interval)
2954
2955
2956def poll_for_condition_ex(condition, timeout=10, sleep_interval=0.1, desc=None):
2957    """Polls until a condition is evaluated to true or until timeout.
2958
2959    Similiar to poll_for_condition, except that it handles exceptions
2960    condition() raises. If timeout is not reached, the exception is dropped and
2961    poll for condition after a sleep; otherwise, the exception is embedded into
2962    TimeoutError to raise.
2963
2964    @param condition: function taking no args and returning anything that will
2965                      evaluate to True in a conditional check
2966    @param timeout: maximum number of seconds to wait
2967    @param sleep_interval: time to sleep between polls
2968    @param desc: description of the condition
2969
2970    @return The evaluated value that caused the poll loop to terminate.
2971
2972    @raise TimeoutError. If condition() raised exception, it is embedded in
2973           raised TimeoutError.
2974    """
2975    timer = Timer(timeout)
2976    while timer.sleep(sleep_interval):
2977        reason = None
2978        try:
2979            value = condition()
2980            if value:
2981                return value
2982        except BaseException as e:
2983            reason = e
2984
2985    if desc is None:
2986        desc = 'unamed condition'
2987    if reason is None:
2988        reason = 'condition evaluted as false'
2989    to_raise = TimeoutError(message='Timed out waiting for ' + desc,
2990                            reason=reason)
2991    logging.error(str(to_raise))
2992    raise to_raise
2993
2994
2995def poll_till_condition_holds(condition,
2996                              exception=None,
2997                              timeout=10,
2998                              sleep_interval=0.1,
2999                              hold_interval=5,
3000                              desc=None):
3001    """Polls until a condition is evaluated to true for a period of time
3002
3003    This function checks that a condition remains true for the 'hold_interval'
3004    seconds after it first becomes true. If the condition becomes false
3005    subsequently, the timer is reset. This function will not detect if
3006    condition becomes false for any period of time less than the sleep_interval.
3007
3008    @param condition: function taking no args and returning anything that will
3009                      evaluate to True in a conditional check
3010    @param exception: exception to throw if condition doesn't evaluate to true
3011    @param timeout: maximum number of seconds to wait
3012    @param sleep_interval: time to sleep between polls
3013    @param hold_interval: time period for which the condition should hold true
3014    @param desc: description of default TimeoutError used if 'exception' is
3015                 None
3016
3017    @return The evaluated value that caused the poll loop to terminate.
3018
3019    @raise 'exception' arg if supplied; TimeoutError otherwise
3020    """
3021    start_time = time.time()
3022    cond_is_held = False
3023    cond_hold_start_time = None
3024
3025    while True:
3026        value = condition()
3027        if value:
3028            if cond_is_held:
3029                if time.time() - cond_hold_start_time > hold_interval:
3030                    return value
3031            else:
3032                cond_is_held = True
3033                cond_hold_start_time = time.time()
3034        else:
3035            cond_is_held = False
3036
3037        time_remaining = timeout - (time.time() - start_time)
3038        if time_remaining < hold_interval:
3039            if exception:
3040                logging.error('Will raise error %r due to unexpected return: '
3041                              '%r', exception, value)
3042                raise exception # pylint: disable=raising-bad-type
3043
3044            if desc:
3045                desc = 'Timed out waiting for condition: ' + desc
3046            else:
3047                desc = 'Timed out waiting for unnamed condition'
3048            logging.error(desc)
3049            raise TimeoutError(message=desc)
3050
3051        time.sleep(sleep_interval)
3052
3053
3054def shadowroot_query(element, action):
3055    """Recursively queries shadowRoot.
3056
3057    @param element: element to query for.
3058    @param action: action to be performed on the element.
3059
3060    @return JS functions to execute.
3061
3062    """
3063    # /deep/ CSS query has been removed from ShadowDOM. The only way to access
3064    # elements now is to recursively query in each shadowRoot.
3065    shadowroot_script = """
3066    function deepQuerySelectorAll(root, targetQuery) {
3067        const elems = Array.prototype.slice.call(
3068            root.querySelectorAll(targetQuery[0]));
3069        const remaining = targetQuery.slice(1);
3070        if (remaining.length === 0) {
3071            return elems;
3072        }
3073
3074        let res = [];
3075        for (let i = 0; i < elems.length; i++) {
3076            if (elems[i].shadowRoot) {
3077                res = res.concat(
3078                    deepQuerySelectorAll(elems[i].shadowRoot, remaining));
3079            }
3080        }
3081        return res;
3082    };
3083    var testing_element = deepQuerySelectorAll(document, %s);
3084    testing_element[0].%s;
3085    """
3086    script_to_execute = shadowroot_script % (element, action)
3087    return script_to_execute
3088
3089
3090def threaded_return(function):
3091    """
3092    Decorator to add to a function to get that function to return a thread
3093    object, but with the added benefit of storing its return value.
3094
3095    @param function: function object to be run in the thread
3096
3097    @return a threading.Thread object, that has already been started, is
3098            recording its result, and can be completed and its result
3099            fetched by calling .finish()
3100    """
3101    def wrapped_t(queue, *args, **kwargs):
3102        """
3103        Calls the decorated function as normal, but appends the output into
3104        the passed-in threadsafe queue.
3105        """
3106        ret = function(*args, **kwargs)
3107        queue.put(ret)
3108
3109    def wrapped_finish(threaded_object):
3110        """
3111        Provides a utility to this thread object, getting its result while
3112        simultaneously joining the thread.
3113        """
3114        ret = threaded_object.get()
3115        threaded_object.join()
3116        return ret
3117
3118    def wrapper(*args, **kwargs):
3119        """
3120        Creates the queue and starts the thread, then assigns extra attributes
3121        to the thread to give it result-storing capability.
3122        """
3123        q = six.moves.queue.Queue()
3124        t = threading.Thread(target=wrapped_t, args=(q,) + args, kwargs=kwargs)
3125        t.start()
3126        t.result_queue = q
3127        t.get = t.result_queue.get
3128        t.finish = lambda: wrapped_finish(t)
3129        return t
3130
3131    # for the decorator
3132    return wrapper
3133
3134
3135@threaded_return
3136def background_sample_until_condition(
3137        function,
3138        condition=lambda: True,
3139        timeout=10,
3140        sleep_interval=1):
3141    """
3142    Records the value of the function until the condition is False or the
3143    timeout is reached. Runs as a background thread, so it's nonblocking.
3144    Usage might look something like:
3145
3146    def function():
3147        return get_value()
3148    def condition():
3149        return self._keep_sampling
3150
3151    # main thread
3152    sample_thread = utils.background_sample_until_condition(
3153        function=function,condition=condition)
3154    # do other work
3155    # ...
3156    self._keep_sampling = False
3157    # blocking call to get result and join the thread
3158    result = sample_thread.finish()
3159
3160    @param function: function object, 0 args, to be continually polled
3161    @param condition: function object, 0 args, to say when to stop polling
3162    @param timeout: maximum number of seconds to wait
3163    @param number of seconds to wait in between polls
3164
3165    @return a thread object that has already been started and is running in
3166            the background, whose run must be stopped with .finish(), which
3167            also returns a list of the results from the sample function
3168    """
3169    log = []
3170
3171    end_time = datetime.datetime.now() + datetime.timedelta(
3172            seconds = timeout + sleep_interval)
3173
3174    while condition() and datetime.datetime.now() < end_time:
3175        log.append(function())
3176        time.sleep(sleep_interval)
3177    return log
3178
3179
3180class metrics_mock(metrics_mock_class.mock_class_base):
3181    """mock class for metrics in case chromite is not installed."""
3182    pass
3183
3184
3185MountInfo = collections.namedtuple('MountInfo', ['root', 'mount_point', 'tags'])
3186
3187
3188def get_mount_info(process='self', mount_point=None):
3189    """Retrieves information about currently mounted file systems.
3190
3191    @param mount_point: (optional) The mount point (a path).  If this is
3192                        provided, only information about the given mount point
3193                        is returned.  If this is omitted, info about all mount
3194                        points is returned.
3195    @param process: (optional) The process id (or the string 'self') of the
3196                    process whose mountinfo will be obtained.  If this is
3197                    omitted, info about the current process is returned.
3198
3199    @return A generator yielding one MountInfo object for each relevant mount
3200            found in /proc/PID/mountinfo.
3201    """
3202    with open('/proc/{}/mountinfo'.format(process)) as f:
3203        for line in f.readlines():
3204            # TODO b:169251326 terms below are set outside of this codebase
3205            # and should be updated when possible. ("master" -> "main")
3206            # These lines are formatted according to the proc(5) manpage.
3207            # Sample line:
3208            # 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root \
3209            #     rw,errors=continue
3210            # Fields (descriptions omitted for fields we don't care about)
3211            # 3: the root of the mount.
3212            # 4: the mount point.
3213            # 5: mount options.
3214            # 6: tags.  There can be more than one of these.  This is where
3215            #    shared mounts are indicated.
3216            # 7: a dash separator marking the end of the tags.
3217            mountinfo = line.split()
3218            if mount_point is None or mountinfo[4] == mount_point:
3219                tags = []
3220                for field in mountinfo[6:]:
3221                    if field == '-':
3222                        break
3223                    tags.append(field.split(':')[0])
3224                yield MountInfo(root = mountinfo[3],
3225                                mount_point = mountinfo[4],
3226                                tags = tags)
3227
3228
3229# Appended suffix for chart tablet naming convention in test lab
3230CHART_ADDRESS_SUFFIX = '-tablet'
3231
3232
3233def get_lab_chart_address(hostname):
3234    """Convert lab DUT hostname to address of camera box chart tablet"""
3235    return hostname + CHART_ADDRESS_SUFFIX if is_in_container() else None
3236
3237
3238def cherry_pick_args(func, args, dargs):
3239    """Sanitize positional and keyword arguments before calling a function.
3240
3241    Given a callable (func), an argument tuple and a dictionary of keyword
3242    arguments, pick only those arguments which the function is prepared to
3243    accept and return a new argument tuple and keyword argument dictionary.
3244
3245    Args:
3246      func: A callable that we want to choose arguments for.
3247      args: A tuple of positional arguments to consider passing to func.
3248      dargs: A dictionary of keyword arguments to consider passing to func.
3249    Returns:
3250      A tuple of: (args tuple, keyword arguments dictionary)
3251    """
3252    # Cherry pick args:
3253    if hasattr(func, "func_code"):
3254        # Moock doesn't have __code__ in either py2 or 3 :(
3255        flags = func.func_code.co_flags
3256    else:
3257        flags = func.__code__.co_flags
3258
3259    if flags & 0x04:
3260        # func accepts *args, so return the entire args.
3261        p_args = args
3262    else:
3263        p_args = ()
3264
3265    # Cherry pick dargs:
3266    if flags & 0x08:
3267        # func accepts **dargs, so return the entire dargs.
3268        p_dargs = dargs
3269    else:
3270        # Only return the keyword arguments that func accepts.
3271        p_dargs = {}
3272        for param in get_nonstar_args(func):
3273            if param in dargs:
3274                p_dargs[param] = dargs[param]
3275
3276    return p_args, p_dargs
3277
3278
3279def cherry_pick_call(func, *args, **dargs):
3280    """Cherry picks arguments from args/dargs based on what "func" accepts
3281    and calls the function with the picked arguments."""
3282    p_args, p_dargs = cherry_pick_args(func, args, dargs)
3283    return func(*p_args, **p_dargs)
3284
3285
3286def get_nonstar_args(func):
3287    """Extract all the (normal) function parameter names.
3288
3289    Given a function, returns a tuple of parameter names, specifically
3290    excluding the * and ** parameters, if the function accepts them.
3291
3292    @param func: A callable that we want to chose arguments for.
3293
3294    @return: A tuple of parameters accepted by the function.
3295    """
3296    return func.__code__.co_varnames[:func.__code__.co_argcount]
3297
3298def crc8(buf):
3299    """Calculate CRC8 for a given int list.
3300
3301    This is a simple version of CRC8.
3302
3303    Args:
3304      buf: A list of byte integer
3305    Returns:
3306      A crc value in integer
3307    """
3308
3309    _table_crc8 = [ 0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15,
3310                    0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d,
3311                    0x70, 0x77, 0x7e, 0x79, 0x6c, 0x6b, 0x62, 0x65,
3312                    0x48, 0x4f, 0x46, 0x41, 0x54, 0x53, 0x5a, 0x5d,
3313                    0xe0, 0xe7, 0xee, 0xe9, 0xfc, 0xfb, 0xf2, 0xf5,
3314                    0xd8, 0xdf, 0xd6, 0xd1, 0xc4, 0xc3, 0xca, 0xcd,
3315                    0x90, 0x97, 0x9e, 0x99, 0x8c, 0x8b, 0x82, 0x85,
3316                    0xa8, 0xaf, 0xa6, 0xa1, 0xb4, 0xb3, 0xba, 0xbd,
3317                    0xc7, 0xc0, 0xc9, 0xce, 0xdb, 0xdc, 0xd5, 0xd2,
3318                    0xff, 0xf8, 0xf1, 0xf6, 0xe3, 0xe4, 0xed, 0xea,
3319                    0xb7, 0xb0, 0xb9, 0xbe, 0xab, 0xac, 0xa5, 0xa2,
3320                    0x8f, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9d, 0x9a,
3321                    0x27, 0x20, 0x29, 0x2e, 0x3b, 0x3c, 0x35, 0x32,
3322                    0x1f, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0d, 0x0a,
3323                    0x57, 0x50, 0x59, 0x5e, 0x4b, 0x4c, 0x45, 0x42,
3324                    0x6f, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7d, 0x7a,
3325                    0x89, 0x8e, 0x87, 0x80, 0x95, 0x92, 0x9b, 0x9c,
3326                    0xb1, 0xb6, 0xbf, 0xb8, 0xad, 0xaa, 0xa3, 0xa4,
3327                    0xf9, 0xfe, 0xf7, 0xf0, 0xe5, 0xe2, 0xeb, 0xec,
3328                    0xc1, 0xc6, 0xcf, 0xc8, 0xdd, 0xda, 0xd3, 0xd4,
3329                    0x69, 0x6e, 0x67, 0x60, 0x75, 0x72, 0x7b, 0x7c,
3330                    0x51, 0x56, 0x5f, 0x58, 0x4d, 0x4a, 0x43, 0x44,
3331                    0x19, 0x1e, 0x17, 0x10, 0x05, 0x02, 0x0b, 0x0c,
3332                    0x21, 0x26, 0x2f, 0x28, 0x3d, 0x3a, 0x33, 0x34,
3333                    0x4e, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5c, 0x5b,
3334                    0x76, 0x71, 0x78, 0x7f, 0x6a, 0x6d, 0x64, 0x63,
3335                    0x3e, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2c, 0x2b,
3336                    0x06, 0x01, 0x08, 0x0f, 0x1a, 0x1d, 0x14, 0x13,
3337                    0xae, 0xa9, 0xa0, 0xa7, 0xb2, 0xb5, 0xbc, 0xbb,
3338                    0x96, 0x91, 0x98, 0x9f, 0x8a, 0x8d, 0x84, 0x83,
3339                    0xde, 0xd9, 0xd0, 0xd7, 0xc2, 0xc5, 0xcc, 0xcb,
3340                    0xe6, 0xe1, 0xe8, 0xef, 0xfa, 0xfd, 0xf4, 0xf3,
3341                  ]
3342    if not isinstance(buf, list):
3343        raise error.TestError('buf should be an integer list.')
3344    if not all(isinstance(i, int) for i in buf):
3345        raise error.TestError('buf should contain integers only.')
3346
3347    rv = 0
3348    for i in buf:
3349        rv = _table_crc8[ (rv ^ i) & 0xff ]
3350    return rv
3351
3352
3353def send_msg_to_terminal(job, msg):
3354    """Send from the client side to the terminal.
3355
3356    ONLY to be used on non-scheduled tests (aka local runs).
3357    Do not send anything which could be confused for a status.
3358    See server/autotest.py client_logger for examples of status's NOT to use
3359
3360    @param job: The client job obj. Can be accessed from anything built off
3361        test.test via self.job
3362    @param msg: the msg to send.
3363    """
3364    status = os.fdopen(3, 'w', 2)
3365    try:
3366        status.write(msg + '\n')
3367    finally:
3368        status.flush()
3369