• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""
16Utility functions for atest.
17"""
18
19
20from __future__ import print_function
21
22import hashlib
23import itertools
24import json
25import logging
26import os
27import pickle
28import re
29import shutil
30import subprocess
31import sys
32
33import atest_decorator
34import atest_error
35import constants
36
37from metrics import metrics_base
38from metrics import metrics_utils
39
40
41_BASH_RESET_CODE = '\033[0m\n'
42# Arbitrary number to limit stdout for failed runs in _run_limited_output.
43# Reason for its use is that the make command itself has its own carriage
44# return output mechanism that when collected line by line causes the streaming
45# full_output list to be extremely large.
46_FAILED_OUTPUT_LINE_LIMIT = 100
47# Regular expression to match the start of a ninja compile:
48# ex: [ 99% 39710/39711]
49_BUILD_COMPILE_STATUS = re.compile(r'\[\s*(\d{1,3}%\s+)?\d+/\d+\]')
50_BUILD_FAILURE = 'FAILED: '
51CMD_RESULT_PATH = os.path.join(os.environ.get(constants.ANDROID_BUILD_TOP,
52                                              os.getcwd()),
53                               'tools/tradefederation/core/atest/test_data',
54                               'test_commands.json')
55BUILD_TOP_HASH = hashlib.md5(os.environ.get(constants.ANDROID_BUILD_TOP, '').
56                             encode()).hexdigest()
57TEST_INFO_CACHE_ROOT = os.path.join(os.path.expanduser('~'), '.atest',
58                                    'info_cache', BUILD_TOP_HASH[:8])
59_DEFAULT_TERMINAL_WIDTH = 80
60_DEFAULT_TERMINAL_HEIGHT = 25
61_BUILD_CMD = 'build/soong/soong_ui.bash'
62_FIND_MODIFIED_FILES_CMDS = (
63    "cd {};"
64    "local_branch=$(git rev-parse --abbrev-ref HEAD);"
65    "remote_branch=$(git branch -r | grep '\\->' | awk '{{print $1}}');"
66    # Get the number of commits from local branch to remote branch.
67    "ahead=$(git rev-list --left-right --count $local_branch...$remote_branch "
68    "| awk '{{print $1}}');"
69    # Get the list of modified files from HEAD to previous $ahead generation.
70    "git diff HEAD~$ahead --name-only")
71
72
73def get_build_cmd():
74    """Compose build command with relative path and flag "--make-mode".
75
76    Returns:
77        A list of soong build command.
78    """
79    make_cmd = ('%s/%s' %
80                (os.path.relpath(os.environ.get(
81                    constants.ANDROID_BUILD_TOP, os.getcwd()), os.getcwd()),
82                 _BUILD_CMD))
83    return [make_cmd, '--make-mode']
84
85
86def _capture_fail_section(full_log):
87    """Return the error message from the build output.
88
89    Args:
90        full_log: List of strings representing full output of build.
91
92    Returns:
93        capture_output: List of strings that are build errors.
94    """
95    am_capturing = False
96    capture_output = []
97    for line in full_log:
98        if am_capturing and _BUILD_COMPILE_STATUS.match(line):
99            break
100        if am_capturing or line.startswith(_BUILD_FAILURE):
101            capture_output.append(line)
102            am_capturing = True
103            continue
104    return capture_output
105
106
107def _run_limited_output(cmd, env_vars=None):
108    """Runs a given command and streams the output on a single line in stdout.
109
110    Args:
111        cmd: A list of strings representing the command to run.
112        env_vars: Optional arg. Dict of env vars to set during build.
113
114    Raises:
115        subprocess.CalledProcessError: When the command exits with a non-0
116            exitcode.
117    """
118    # Send stderr to stdout so we only have to deal with a single pipe.
119    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
120                            stderr=subprocess.STDOUT, env=env_vars)
121    sys.stdout.write('\n')
122    term_width, _ = get_terminal_size()
123    white_space = " " * int(term_width)
124    full_output = []
125    while proc.poll() is None:
126        line = proc.stdout.readline()
127        # Readline will often return empty strings.
128        if not line:
129            continue
130        full_output.append(line.decode('utf-8'))
131        # Trim the line to the width of the terminal.
132        # Note: Does not handle terminal resizing, which is probably not worth
133        #       checking the width every loop.
134        if len(line) >= term_width:
135            line = line[:term_width - 1]
136        # Clear the last line we outputted.
137        sys.stdout.write('\r%s\r' % white_space)
138        sys.stdout.write('%s' % line.strip())
139        sys.stdout.flush()
140    # Reset stdout (on bash) to remove any custom formatting and newline.
141    sys.stdout.write(_BASH_RESET_CODE)
142    sys.stdout.flush()
143    # Wait for the Popen to finish completely before checking the returncode.
144    proc.wait()
145    if proc.returncode != 0:
146        # Parse out the build error to output.
147        output = _capture_fail_section(full_output)
148        if not output:
149            output = full_output
150        if len(output) >= _FAILED_OUTPUT_LINE_LIMIT:
151            output = output[-_FAILED_OUTPUT_LINE_LIMIT:]
152        output = 'Output (may be trimmed):\n%s' % ''.join(output)
153        raise subprocess.CalledProcessError(proc.returncode, cmd, output)
154
155
156def build(build_targets, verbose=False, env_vars=None):
157    """Shell out and make build_targets.
158
159    Args:
160        build_targets: A set of strings of build targets to make.
161        verbose: Optional arg. If True output is streamed to the console.
162                 If False, only the last line of the build output is outputted.
163        env_vars: Optional arg. Dict of env vars to set during build.
164
165    Returns:
166        Boolean of whether build command was successful, True if nothing to
167        build.
168    """
169    if not build_targets:
170        logging.debug('No build targets, skipping build.')
171        return True
172    full_env_vars = os.environ.copy()
173    if env_vars:
174        full_env_vars.update(env_vars)
175    print('\n%s\n%s' % (colorize("Building Dependencies...", constants.CYAN),
176                        ', '.join(build_targets)))
177    logging.debug('Building Dependencies: %s', ' '.join(build_targets))
178    cmd = get_build_cmd() + list(build_targets)
179    logging.debug('Executing command: %s', cmd)
180    try:
181        if verbose:
182            subprocess.check_call(cmd, stderr=subprocess.STDOUT,
183                                  env=full_env_vars)
184        else:
185            # TODO: Save output to a log file.
186            _run_limited_output(cmd, env_vars=full_env_vars)
187        logging.info('Build successful')
188        return True
189    except subprocess.CalledProcessError as err:
190        logging.error('Error building: %s', build_targets)
191        if err.output:
192            logging.error(err.output)
193        return False
194
195
196def _can_upload_to_result_server():
197    """Return True if we can talk to result server."""
198    # TODO: Also check if we have a slow connection to result server.
199    if constants.RESULT_SERVER:
200        try:
201            try:
202                # If PYTHON2
203                from urllib2 import urlopen
204            except ImportError:
205                metrics_utils.handle_exc_and_send_exit_event(
206                    constants.IMPORT_FAILURE)
207                from urllib.request import urlopen
208            urlopen(constants.RESULT_SERVER,
209                    timeout=constants.RESULT_SERVER_TIMEOUT).close()
210            return True
211        # pylint: disable=broad-except
212        except Exception as err:
213            logging.debug('Talking to result server raised exception: %s', err)
214    return False
215
216
217def get_result_server_args(for_test_mapping=False):
218    """Return list of args for communication with result server.
219
220    Args:
221        for_test_mapping: True if the test run is for Test Mapping to include
222            additional reporting args. Default is False.
223    """
224    # TODO (b/147644460) Temporarily disable Sponge V1 since it will be turned
225    # down.
226    if _can_upload_to_result_server():
227        if for_test_mapping:
228            return (constants.RESULT_SERVER_ARGS +
229                    constants.TEST_MAPPING_RESULT_SERVER_ARGS)
230        return constants.RESULT_SERVER_ARGS
231    return []
232
233
234def sort_and_group(iterable, key):
235    """Sort and group helper function."""
236    return itertools.groupby(sorted(iterable, key=key), key=key)
237
238
239def is_test_mapping(args):
240    """Check if the atest command intends to run tests in test mapping.
241
242    When atest runs tests in test mapping, it must have at most one test
243    specified. If a test is specified, it must be started with  `:`,
244    which means the test value is a test group name in TEST_MAPPING file, e.g.,
245    `:postsubmit`.
246
247    If any test mapping options is specified, the atest command must also be
248    set to run tests in test mapping files.
249
250    Args:
251        args: arg parsed object.
252
253    Returns:
254        True if the args indicates atest shall run tests in test mapping. False
255        otherwise.
256    """
257    return (
258        args.test_mapping or
259        args.include_subdirs or
260        not args.tests or
261        (len(args.tests) == 1 and args.tests[0][0] == ':'))
262
263@atest_decorator.static_var("cached_has_colors", {})
264def _has_colors(stream):
265    """Check the output stream is colorful.
266
267    Args:
268        stream: The standard file stream.
269
270    Returns:
271        True if the file stream can interpreter the ANSI color code.
272    """
273    cached_has_colors = _has_colors.cached_has_colors
274    if stream in cached_has_colors:
275        return cached_has_colors[stream]
276    else:
277        cached_has_colors[stream] = True
278    # Following from Python cookbook, #475186
279    if not hasattr(stream, "isatty"):
280        cached_has_colors[stream] = False
281        return False
282    if not stream.isatty():
283        # Auto color only on TTYs
284        cached_has_colors[stream] = False
285        return False
286    try:
287        import curses
288        curses.setupterm()
289        cached_has_colors[stream] = curses.tigetnum("colors") > 2
290    # pylint: disable=broad-except
291    except Exception as err:
292        logging.debug('Checking colorful raised exception: %s', err)
293        cached_has_colors[stream] = False
294    return cached_has_colors[stream]
295
296
297def colorize(text, color, highlight=False):
298    """ Convert to colorful string with ANSI escape code.
299
300    Args:
301        text: A string to print.
302        color: ANSI code shift for colorful print. They are defined
303               in constants_default.py.
304        highlight: True to print with highlight.
305
306    Returns:
307        Colorful string with ANSI escape code.
308    """
309    clr_pref = '\033[1;'
310    clr_suff = '\033[0m'
311    has_colors = _has_colors(sys.stdout)
312    if has_colors:
313        if highlight:
314            ansi_shift = 40 + color
315        else:
316            ansi_shift = 30 + color
317        clr_str = "%s%dm%s%s" % (clr_pref, ansi_shift, text, clr_suff)
318    else:
319        clr_str = text
320    return clr_str
321
322
323def colorful_print(text, color, highlight=False, auto_wrap=True):
324    """Print out the text with color.
325
326    Args:
327        text: A string to print.
328        color: ANSI code shift for colorful print. They are defined
329               in constants_default.py.
330        highlight: True to print with highlight.
331        auto_wrap: If True, Text wraps while print.
332    """
333    output = colorize(text, color, highlight)
334    if auto_wrap:
335        print(output)
336    else:
337        print(output, end="")
338
339
340# pylint: disable=no-member
341# TODO: remove the above disable when migrating to python3.
342def get_terminal_size():
343    """Get terminal size and return a tuple.
344
345    Returns:
346        2 integers: the size of X(columns) and Y(lines/rows).
347    """
348    # Determine the width of the terminal. We'll need to clear this many
349    # characters when carriage returning. Set default value as 80.
350    try:
351        if sys.version_info[0] == 2:
352            _y, _x = subprocess.check_output(['stty', 'size']).decode().split()
353            return int(_x), int(_y)
354        return (shutil.get_terminal_size().columns,
355                shutil.get_terminal_size().lines)
356    # b/137521782 stty size could have changed for reasones.
357    except subprocess.CalledProcessError:
358        return _DEFAULT_TERMINAL_WIDTH, _DEFAULT_TERMINAL_HEIGHT
359
360
361def is_external_run():
362    # TODO(b/133905312): remove this function after aidegen calling
363    #       metrics_base.get_user_type directly.
364    """Check is external run or not.
365
366    Determine the internal user by passing at least one check:
367      - whose git mail domain is from google
368      - whose hostname is from google
369    Otherwise is external user.
370
371    Returns:
372        True if this is an external run, False otherwise.
373    """
374    return metrics_base.get_user_type() == metrics_base.EXTERNAL_USER
375
376
377def print_data_collection_notice():
378    """Print the data collection notice."""
379    anonymous = ''
380    user_type = 'INTERNAL'
381    if metrics_base.get_user_type() == metrics_base.EXTERNAL_USER:
382        anonymous = ' anonymous'
383        user_type = 'EXTERNAL'
384    notice = ('  We collect%s usage statistics in accordance with our Content '
385              'Licenses (%s), Contributor License Agreement (%s), Privacy '
386              'Policy (%s) and Terms of Service (%s).'
387             ) % (anonymous,
388                  constants.CONTENT_LICENSES_URL,
389                  constants.CONTRIBUTOR_AGREEMENT_URL[user_type],
390                  constants.PRIVACY_POLICY_URL,
391                  constants.TERMS_SERVICE_URL
392                 )
393    print('\n==================')
394    colorful_print("Notice:", constants.RED)
395    colorful_print("%s" % notice, constants.GREEN)
396    print('==================\n')
397
398
399def handle_test_runner_cmd(input_test, test_cmds, do_verification=False,
400                           result_path=CMD_RESULT_PATH):
401    """Handle the runner command of input tests.
402
403    Args:
404        input_test: A string of input tests pass to atest.
405        test_cmds: A list of strings for running input tests.
406        do_verification: A boolean to indicate the action of this method.
407                         True: Do verification without updating result map and
408                               raise DryRunVerificationError if verifying fails.
409                         False: Update result map, if the former command is
410                                different with current command, it will confirm
411                                with user if they want to update or not.
412        result_path: The file path for saving result.
413    """
414    full_result_content = {}
415    if os.path.isfile(result_path):
416        with open(result_path) as json_file:
417            full_result_content = json.load(json_file)
418    former_test_cmds = full_result_content.get(input_test, [])
419    if not _are_identical_cmds(test_cmds, former_test_cmds):
420        if do_verification:
421            raise atest_error.DryRunVerificationError('Dry run verification failed,'
422                                                      ' former commands: %s' %
423                                                      former_test_cmds)
424        if former_test_cmds:
425            # If former_test_cmds is different from test_cmds, ask users if they
426            # are willing to update the result.
427            print('Former cmds = %s' % former_test_cmds)
428            print('Current cmds = %s' % test_cmds)
429            try:
430                # TODO(b/137156054):
431                # Move the import statement into a method for that distutils is
432                # not a built-in lib in older python3(b/137017806). Will move it
433                # back when embedded_launcher fully supports Python3.
434                from distutils.util import strtobool
435                if not strtobool(raw_input('Do you want to update former result'
436                                           'with the latest one?(Y/n)')):
437                    print('SKIP updating result!!!')
438                    return
439            except ValueError:
440                # Default action is updating the command result of the input_test.
441                # If the user input is unrecognizable telling yes or no,
442                # "Y" is implicitly applied.
443                pass
444    else:
445        # If current commands are the same as the formers, no need to update
446        # result.
447        return
448    full_result_content[input_test] = test_cmds
449    with open(result_path, 'w') as outfile:
450        json.dump(full_result_content, outfile, indent=0)
451        print('Save result mapping to %s' % result_path)
452
453
454def _are_identical_cmds(current_cmds, former_cmds):
455    """Tell two commands are identical. Note that '--atest-log-file-path' is not
456    considered a critical argument, therefore, it will be removed during
457    the comparison. Also, atest can be ran in any place, so verifying relative
458    path is regardless as well.
459
460    Args:
461        current_cmds: A list of strings for running input tests.
462        former_cmds: A list of strings recorded from the previous run.
463
464    Returns:
465        True if both commands are identical, False otherwise.
466    """
467    def _normalize(cmd_list):
468        """Method that normalize commands.
469
470        Args:
471            cmd_list: A list with one element. E.g. ['cmd arg1 arg2 True']
472
473        Returns:
474            A list with elements. E.g. ['cmd', 'arg1', 'arg2', 'True']
475        """
476        _cmd = ''.join(cmd_list).encode('utf-8').split()
477        for cmd in _cmd:
478            if cmd.startswith('--atest-log-file-path'):
479                _cmd.remove(cmd)
480                continue
481            if _BUILD_CMD in cmd:
482                _cmd.remove(cmd)
483                _cmd.append(os.path.join('./', _BUILD_CMD))
484                continue
485        return _cmd
486
487    _current_cmds = _normalize(current_cmds)
488    _former_cmds = _normalize(former_cmds)
489    # Always sort cmd list to make it comparable.
490    _current_cmds.sort()
491    _former_cmds.sort()
492    return _current_cmds == _former_cmds
493
494def _get_hashed_file_name(main_file_name):
495    """Convert the input string to a md5-hashed string. If file_extension is
496       given, returns $(hashed_string).$(file_extension), otherwise
497       $(hashed_string).cache.
498
499    Args:
500        main_file_name: The input string need to be hashed.
501
502    Returns:
503        A string as hashed file name with .cache file extension.
504    """
505    hashed_fn = hashlib.md5(str(main_file_name).encode())
506    hashed_name = hashed_fn.hexdigest()
507    return hashed_name + '.cache'
508
509def get_test_info_cache_path(test_reference, cache_root=TEST_INFO_CACHE_ROOT):
510    """Get the cache path of the desired test_infos.
511
512    Args:
513        test_reference: A string of the test.
514        cache_root: Folder path where stores caches.
515
516    Returns:
517        A string of the path of test_info cache.
518    """
519    return os.path.join(cache_root,
520                        _get_hashed_file_name(test_reference))
521
522def update_test_info_cache(test_reference, test_infos,
523                           cache_root=TEST_INFO_CACHE_ROOT):
524    """Update cache content which stores a set of test_info objects through
525       pickle module, each test_reference will be saved as a cache file.
526
527    Args:
528        test_reference: A string referencing a test.
529        test_infos: A set of TestInfos.
530        cache_root: Folder path for saving caches.
531    """
532    if not os.path.isdir(cache_root):
533        os.makedirs(cache_root)
534    cache_path = get_test_info_cache_path(test_reference, cache_root)
535    # Save test_info to files.
536    try:
537        with open(cache_path, 'wb') as test_info_cache_file:
538            logging.debug('Saving cache %s.', cache_path)
539            pickle.dump(test_infos, test_info_cache_file, protocol=2)
540    except (pickle.PicklingError, TypeError, IOError) as err:
541        # Won't break anything, just log this error, and collect the exception
542        # by metrics.
543        logging.debug('Exception raised: %s', err)
544        metrics_utils.handle_exc_and_send_exit_event(
545            constants.ACCESS_CACHE_FAILURE)
546
547
548def load_test_info_cache(test_reference, cache_root=TEST_INFO_CACHE_ROOT):
549    """Load cache by test_reference to a set of test_infos object.
550
551    Args:
552        test_reference: A string referencing a test.
553        cache_root: Folder path for finding caches.
554
555    Returns:
556        A list of TestInfo namedtuple if cache found, else None.
557    """
558    cache_file = get_test_info_cache_path(test_reference, cache_root)
559    if os.path.isfile(cache_file):
560        logging.debug('Loading cache %s.', cache_file)
561        try:
562            with open(cache_file, 'rb') as config_dictionary_file:
563                return pickle.load(config_dictionary_file)
564        except (pickle.UnpicklingError, ValueError, EOFError, IOError) as err:
565            # Won't break anything, just remove the old cache, log this error, and
566            # collect the exception by metrics.
567            logging.debug('Exception raised: %s', err)
568            os.remove(cache_file)
569            metrics_utils.handle_exc_and_send_exit_event(
570                constants.ACCESS_CACHE_FAILURE)
571    return None
572
573def clean_test_info_caches(tests, cache_root=TEST_INFO_CACHE_ROOT):
574    """Clean caches of input tests.
575
576    Args:
577        tests: A list of test references.
578        cache_root: Folder path for finding caches.
579    """
580    for test in tests:
581        cache_file = get_test_info_cache_path(test, cache_root)
582        if os.path.isfile(cache_file):
583            logging.debug('Removing cache: %s', cache_file)
584            try:
585                os.remove(cache_file)
586            except IOError as err:
587                logging.debug('Exception raised: %s', err)
588                metrics_utils.handle_exc_and_send_exit_event(
589                    constants.ACCESS_CACHE_FAILURE)
590
591def get_modified_files(root_dir):
592    """Get the git modified files. The git path here is git top level of
593    the root_dir. It's inevitable to utilise different commands to fulfill
594    2 scenario:
595        1. locate unstaged/staged files
596        2. locate committed files but not yet merged.
597    the 'git_status_cmd' fulfils the former while the 'find_modified_files'
598    fulfils the latter.
599
600    Args:
601        root_dir: the root where it starts finding.
602
603    Returns:
604        A set of modified files altered since last commit.
605    """
606    modified_files = set()
607    try:
608        find_git_cmd = 'cd {}; git rev-parse --show-toplevel'.format(root_dir)
609        git_paths = subprocess.check_output(
610            find_git_cmd, shell=True).splitlines()
611        for git_path in git_paths:
612            # Find modified files from git working tree status.
613            git_status_cmd = ("repo forall {} -c git status --short | "
614                              "awk '{{print $NF}}'").format(git_path)
615            modified_wo_commit = subprocess.check_output(
616                git_status_cmd, shell=True).rstrip().splitlines()
617            for change in modified_wo_commit:
618                modified_files.add(
619                    os.path.normpath('{}/{}'.format(git_path, change)))
620            # Find modified files that are committed but not yet merged.
621            find_modified_files = _FIND_MODIFIED_FILES_CMDS.format(git_path)
622            commit_modified_files = subprocess.check_output(
623                find_modified_files, shell=True).splitlines()
624            for line in commit_modified_files:
625                modified_files.add(os.path.normpath('{}/{}'.format(
626                    git_path, line)))
627    except (OSError, subprocess.CalledProcessError) as err:
628        logging.debug('Exception raised: %s', err)
629    return modified_files
630