• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""
16Utility functions for atest.
17"""
18
19
20# pylint: disable=import-outside-toplevel
21# pylint: disable=too-many-lines
22
23from __future__ import print_function
24
25import fnmatch
26import hashlib
27import importlib
28import itertools
29import json
30import logging
31import os
32import pickle
33import re
34import shutil
35import subprocess
36import sys
37import sysconfig
38import time
39import zipfile
40
41import xml.etree.ElementTree as ET
42
43from distutils.util import strtobool
44
45# This is a workaround of b/144743252, where the http.client failed to loaded
46# because the googleapiclient was found before the built-in libs; enabling
47# embedded launcher(b/135639220) has not been reliable and other issue will
48# raise.
49# The workaround is repositioning the built-in libs before other 3rd libs in
50# PYTHONPATH(sys.path) to eliminate the symptom of failed loading http.client.
51sys.path.insert(0, os.path.dirname(sysconfig.get_paths()['purelib']))
52sys.path.insert(0, os.path.dirname(sysconfig.get_paths()['stdlib']))
53
54#pylint: disable=wrong-import-position
55import atest_decorator
56import atest_error
57import constants
58
59# This proto related module will be auto generated in build time.
60# pylint: disable=no-name-in-module
61# pylint: disable=import-error
62try:
63    from tools.asuite.atest.tf_proto import test_record_pb2
64except ImportError as err:
65    pass
66# b/147562331 only occurs when running atest in source code. We don't encourge
67# the users to manually "pip3 install protobuf", therefore when the exception
68# occurs, we don't collect data and the tab completion is for args is silence.
69try:
70    from metrics import metrics
71    from metrics import metrics_base
72    from metrics import metrics_utils
73except ImportError as err:
74    # TODO(b/182854938): remove this ImportError after refactor metrics dir.
75    try:
76        from asuite.metrics import metrics
77        from asuite.metrics import metrics_base
78        from asuite.metrics import metrics_utils
79    except ImportError as err:
80        # This exception occurs only when invoking atest in source code.
81        print("You shouldn't see this message unless you ran 'atest-src'."
82              "To resolve the issue, please run:\n\t{}\n"
83              "and try again.".format('pip3 install protobuf'))
84        print('Import error, %s', err)
85        print('sys.path: %s', sys.path)
86        sys.exit(constants.IMPORT_FAILURE)
87
88_BASH_RESET_CODE = '\033[0m\n'
89# Arbitrary number to limit stdout for failed runs in _run_limited_output.
90# Reason for its use is that the make command itself has its own carriage
91# return output mechanism that when collected line by line causes the streaming
92# full_output list to be extremely large.
93_FAILED_OUTPUT_LINE_LIMIT = 100
94# Regular expression to match the start of a ninja compile:
95# ex: [ 99% 39710/39711]
96_BUILD_COMPILE_STATUS = re.compile(r'\[\s*(\d{1,3}%\s+)?\d+/\d+\]')
97_BUILD_FAILURE = 'FAILED: '
98CMD_RESULT_PATH = os.path.join(os.environ.get(constants.ANDROID_BUILD_TOP,
99                                              os.getcwd()),
100                               'tools/asuite/atest/test_data',
101                               'test_commands.json')
102BUILD_TOP_HASH = hashlib.md5(os.environ.get(constants.ANDROID_BUILD_TOP, '').
103                             encode()).hexdigest()
104TEST_INFO_CACHE_ROOT = os.path.join(os.path.expanduser('~'), '.atest',
105                                    'info_cache', BUILD_TOP_HASH[:8])
106_DEFAULT_TERMINAL_WIDTH = 80
107_DEFAULT_TERMINAL_HEIGHT = 25
108_BUILD_CMD = 'build/soong/soong_ui.bash'
109_FIND_MODIFIED_FILES_CMDS = (
110    "cd {};"
111    "local_branch=$(git rev-parse --abbrev-ref HEAD);"
112    "remote_branch=$(git branch -r | grep '\\->' | awk '{{print $1}}');"
113    # Get the number of commits from local branch to remote branch.
114    "ahead=$(git rev-list --left-right --count $local_branch...$remote_branch "
115    "| awk '{{print $1}}');"
116    # Get the list of modified files from HEAD to previous $ahead generation.
117    "git diff HEAD~$ahead --name-only")
118_ANDROID_BUILD_EXT = ('.bp', '.mk')
119
120# Set of special chars for various purposes.
121_REGEX_CHARS = {'[', '(', '{', '|', '\\', '*', '?', '+', '^'}
122_WILDCARD_CHARS = {'?', '*'}
123
124def get_build_cmd():
125    """Compose build command with no-absolute path and flag "--make-mode".
126
127    Returns:
128        A list of soong build command.
129    """
130    make_cmd = ('%s/%s' %
131                (os.path.relpath(os.environ.get(
132                    constants.ANDROID_BUILD_TOP, os.getcwd()), os.getcwd()),
133                 _BUILD_CMD))
134    return [make_cmd, '--make-mode']
135
136
137def _capture_fail_section(full_log):
138    """Return the error message from the build output.
139
140    Args:
141        full_log: List of strings representing full output of build.
142
143    Returns:
144        capture_output: List of strings that are build errors.
145    """
146    am_capturing = False
147    capture_output = []
148    for line in full_log:
149        if am_capturing and _BUILD_COMPILE_STATUS.match(line):
150            break
151        if am_capturing or line.startswith(_BUILD_FAILURE):
152            capture_output.append(line)
153            am_capturing = True
154            continue
155    return capture_output
156
157
158def _capture_limited_output(full_log):
159    """Return the limited error message from capture_failed_section.
160
161    Args:
162        full_log: List of strings representing full output of build.
163
164    Returns:
165        output: List of strings that are build errors.
166    """
167    # Parse out the build error to output.
168    output = _capture_fail_section(full_log)
169    if not output:
170        output = full_log
171    if len(output) >= _FAILED_OUTPUT_LINE_LIMIT:
172        output = output[-_FAILED_OUTPUT_LINE_LIMIT:]
173    output = 'Output (may be trimmed):\n%s' % ''.join(output)
174    return output
175
176
177# TODO: b/187122993 refine subprocess with 'with-statement' in fixit week.
178def _run_limited_output(cmd, env_vars=None):
179    """Runs a given command and streams the output on a single line in stdout.
180
181    Args:
182        cmd: A list of strings representing the command to run.
183        env_vars: Optional arg. Dict of env vars to set during build.
184
185    Raises:
186        subprocess.CalledProcessError: When the command exits with a non-0
187            exitcode.
188    """
189    # Send stderr to stdout so we only have to deal with a single pipe.
190    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
191                            stderr=subprocess.STDOUT, env=env_vars)
192    sys.stdout.write('\n')
193    term_width, _ = get_terminal_size()
194    white_space = " " * int(term_width)
195    full_output = []
196    while proc.poll() is None:
197        line = proc.stdout.readline().decode('utf-8')
198        # Readline will often return empty strings.
199        if not line:
200            continue
201        full_output.append(line)
202        # Trim the line to the width of the terminal.
203        # Note: Does not handle terminal resizing, which is probably not worth
204        #       checking the width every loop.
205        if len(line) >= term_width:
206            line = line[:term_width - 1]
207        # Clear the last line we outputted.
208        sys.stdout.write('\r%s\r' % white_space)
209        sys.stdout.write('%s' % line.strip())
210        sys.stdout.flush()
211    # Reset stdout (on bash) to remove any custom formatting and newline.
212    sys.stdout.write(_BASH_RESET_CODE)
213    sys.stdout.flush()
214    # Wait for the Popen to finish completely before checking the returncode.
215    proc.wait()
216    if proc.returncode != 0:
217        # get error log from "OUT_DIR/error.log"
218        error_log_file = os.path.join(get_build_out_dir(), "error.log")
219        output = []
220        if os.path.isfile(error_log_file):
221            if os.stat(error_log_file).st_size > 0:
222                with open(error_log_file) as f:
223                    output = f.read()
224        if not output:
225            output = _capture_limited_output(full_output)
226        raise subprocess.CalledProcessError(proc.returncode, cmd, output)
227
228
229def get_build_out_dir():
230    """Get android build out directory.
231
232    Returns:
233        String of the out directory.
234    """
235    build_top = os.environ.get(constants.ANDROID_BUILD_TOP)
236    # Get the out folder if user specified $OUT_DIR
237    custom_out_dir = os.environ.get(constants.ANDROID_OUT_DIR)
238    custom_out_dir_common_base = os.environ.get(
239        constants.ANDROID_OUT_DIR_COMMON_BASE)
240    user_out_dir = None
241    if custom_out_dir:
242        if os.path.isabs(custom_out_dir):
243            user_out_dir = custom_out_dir
244        else:
245            user_out_dir = os.path.join(build_top, custom_out_dir)
246    elif custom_out_dir_common_base:
247        # When OUT_DIR_COMMON_BASE is set, the output directory for each
248        # separate source tree is named after the directory holding the
249        # source tree.
250        build_top_basename = os.path.basename(build_top)
251        if os.path.isabs(custom_out_dir_common_base):
252            user_out_dir = os.path.join(custom_out_dir_common_base,
253                                        build_top_basename)
254        else:
255            user_out_dir = os.path.join(build_top, custom_out_dir_common_base,
256                                        build_top_basename)
257    if user_out_dir:
258        return user_out_dir
259    return os.path.join(build_top, "out")
260
261
262def build(build_targets, verbose=False, env_vars=None):
263    """Shell out and make build_targets.
264
265    Args:
266        build_targets: A set of strings of build targets to make.
267        verbose: Optional arg. If True output is streamed to the console.
268                 If False, only the last line of the build output is outputted.
269        env_vars: Optional arg. Dict of env vars to set during build.
270
271    Returns:
272        Boolean of whether build command was successful, True if nothing to
273        build.
274    """
275    if not build_targets:
276        logging.debug('No build targets, skipping build.')
277        return True
278    full_env_vars = os.environ.copy()
279    if env_vars:
280        full_env_vars.update(env_vars)
281    print('\n%s\n%s' % (colorize("Building Dependencies...", constants.CYAN),
282                        ', '.join(build_targets)))
283    logging.debug('Building Dependencies: %s', ' '.join(build_targets))
284    cmd = get_build_cmd() + list(build_targets)
285    logging.debug('Executing command: %s', cmd)
286    try:
287        if verbose:
288            subprocess.check_call(cmd, stderr=subprocess.STDOUT,
289                                  env=full_env_vars)
290        else:
291            # TODO: Save output to a log file.
292            _run_limited_output(cmd, env_vars=full_env_vars)
293        logging.info('Build successful')
294        return True
295    except subprocess.CalledProcessError as err:
296        logging.error('Error building: %s', build_targets)
297        print(constants.REBUILD_MODULE_INFO_MSG.format(
298            colorize(constants.REBUILD_MODULE_INFO_FLAG,
299                     constants.RED)))
300        if err.output:
301            logging.error(err.output)
302        return False
303
304
305def _can_upload_to_result_server():
306    """Return True if we can talk to result server."""
307    # TODO: Also check if we have a slow connection to result server.
308    if constants.RESULT_SERVER:
309        try:
310            from urllib.request import urlopen
311            urlopen(constants.RESULT_SERVER,
312                    timeout=constants.RESULT_SERVER_TIMEOUT).close()
313            return True
314        # pylint: disable=broad-except
315        except Exception as err:
316            logging.debug('Talking to result server raised exception: %s', err)
317    return False
318
319
320# pylint: disable=unused-argument
321def get_result_server_args(for_test_mapping=False):
322    """Return list of args for communication with result server.
323
324    Args:
325        for_test_mapping: True if the test run is for Test Mapping to include
326            additional reporting args. Default is False.
327    """
328    # Customize test mapping argument here if needed.
329    return constants.RESULT_SERVER_ARGS
330
331def sort_and_group(iterable, key):
332    """Sort and group helper function."""
333    return itertools.groupby(sorted(iterable, key=key), key=key)
334
335
336def is_test_mapping(args):
337    """Check if the atest command intends to run tests in test mapping.
338
339    When atest runs tests in test mapping, it must have at most one test
340    specified. If a test is specified, it must be started with  `:`,
341    which means the test value is a test group name in TEST_MAPPING file, e.g.,
342    `:postsubmit`.
343
344    If --host-unit-test-only be applied, it's not test mapping.
345    If any test mapping options is specified, the atest command must also be
346    set to run tests in test mapping files.
347
348    Args:
349        args: arg parsed object.
350
351    Returns:
352        True if the args indicates atest shall run tests in test mapping. False
353        otherwise.
354    """
355    return (
356        not args.host_unit_test_only and
357        (args.test_mapping or
358        args.include_subdirs or
359        not args.tests or
360        (len(args.tests) == 1 and args.tests[0][0] == ':')))
361
362
363@atest_decorator.static_var("cached_has_colors", {})
364def _has_colors(stream):
365    """Check the output stream is colorful.
366
367    Args:
368        stream: The standard file stream.
369
370    Returns:
371        True if the file stream can interpreter the ANSI color code.
372    """
373    cached_has_colors = _has_colors.cached_has_colors
374    if stream in cached_has_colors:
375        return cached_has_colors[stream]
376    cached_has_colors[stream] = True
377    # Following from Python cookbook, #475186
378    if not hasattr(stream, "isatty"):
379        cached_has_colors[stream] = False
380        return False
381    if not stream.isatty():
382        # Auto color only on TTYs
383        cached_has_colors[stream] = False
384        return False
385    try:
386        import curses
387        curses.setupterm()
388        cached_has_colors[stream] = curses.tigetnum("colors") > 2
389    # pylint: disable=broad-except
390    except Exception as err:
391        logging.debug('Checking colorful raised exception: %s', err)
392        cached_has_colors[stream] = False
393    return cached_has_colors[stream]
394
395
396def colorize(text, color, highlight=False):
397    """ Convert to colorful string with ANSI escape code.
398
399    Args:
400        text: A string to print.
401        color: ANSI code shift for colorful print. They are defined
402               in constants_default.py.
403        highlight: True to print with highlight.
404
405    Returns:
406        Colorful string with ANSI escape code.
407    """
408    clr_pref = '\033[1;'
409    clr_suff = '\033[0m'
410    has_colors = _has_colors(sys.stdout)
411    if has_colors:
412        if highlight:
413            ansi_shift = 40 + color
414        else:
415            ansi_shift = 30 + color
416        clr_str = "%s%dm%s%s" % (clr_pref, ansi_shift, text, clr_suff)
417    else:
418        clr_str = text
419    return clr_str
420
421
422def colorful_print(text, color, highlight=False, auto_wrap=True):
423    """Print out the text with color.
424
425    Args:
426        text: A string to print.
427        color: ANSI code shift for colorful print. They are defined
428               in constants_default.py.
429        highlight: True to print with highlight.
430        auto_wrap: If True, Text wraps while print.
431    """
432    output = colorize(text, color, highlight)
433    if auto_wrap:
434        print(output)
435    else:
436        print(output, end="")
437
438
439def get_terminal_size():
440    """Get terminal size and return a tuple.
441
442    Returns:
443        2 integers: the size of X(columns) and Y(lines/rows).
444    """
445    # Determine the width of the terminal. We'll need to clear this many
446    # characters when carriage returning. Set default value as 80.
447    columns, rows = shutil.get_terminal_size(
448        fallback=(_DEFAULT_TERMINAL_WIDTH,
449                  _DEFAULT_TERMINAL_HEIGHT))
450    return columns, rows
451
452
453def is_external_run():
454    # TODO(b/133905312): remove this function after aidegen calling
455    #       metrics_base.get_user_type directly.
456    """Check is external run or not.
457
458    Determine the internal user by passing at least one check:
459      - whose git mail domain is from google
460      - whose hostname is from google
461    Otherwise is external user.
462
463    Returns:
464        True if this is an external run, False otherwise.
465    """
466    return metrics_base.get_user_type() == metrics_base.EXTERNAL_USER
467
468
469def print_data_collection_notice():
470    """Print the data collection notice."""
471    anonymous = ''
472    user_type = 'INTERNAL'
473    if metrics_base.get_user_type() == metrics_base.EXTERNAL_USER:
474        anonymous = ' anonymous'
475        user_type = 'EXTERNAL'
476    notice = ('  We collect%s usage statistics in accordance with our Content '
477              'Licenses (%s), Contributor License Agreement (%s), Privacy '
478              'Policy (%s) and Terms of Service (%s).'
479             ) % (anonymous,
480                  constants.CONTENT_LICENSES_URL,
481                  constants.CONTRIBUTOR_AGREEMENT_URL[user_type],
482                  constants.PRIVACY_POLICY_URL,
483                  constants.TERMS_SERVICE_URL
484                 )
485    print(delimiter('=', 18, prenl=1))
486    colorful_print("Notice:", constants.RED)
487    colorful_print("%s" % notice, constants.GREEN)
488    print(delimiter('=', 18, postnl=1))
489
490
491def handle_test_runner_cmd(input_test, test_cmds, do_verification=False,
492                           result_path=CMD_RESULT_PATH):
493    """Handle the runner command of input tests.
494
495    Args:
496        input_test: A string of input tests pass to atest.
497        test_cmds: A list of strings for running input tests.
498        do_verification: A boolean to indicate the action of this method.
499                         True: Do verification without updating result map and
500                               raise DryRunVerificationError if verifying fails.
501                         False: Update result map, if the former command is
502                                different with current command, it will confirm
503                                with user if they want to update or not.
504        result_path: The file path for saving result.
505    """
506    full_result_content = {}
507    if os.path.isfile(result_path):
508        with open(result_path) as json_file:
509            full_result_content = json.load(json_file)
510    former_test_cmds = full_result_content.get(input_test, [])
511    test_cmds = _normalize(test_cmds)
512    former_test_cmds = _normalize(former_test_cmds)
513    if not _are_identical_cmds(test_cmds, former_test_cmds):
514        if do_verification:
515            raise atest_error.DryRunVerificationError(
516                'Dry run verification failed, former commands: {}'.format(
517                    former_test_cmds))
518        if former_test_cmds:
519            # If former_test_cmds is different from test_cmds, ask users if they
520            # are willing to update the result.
521            print('Former cmds = %s' % former_test_cmds)
522            print('Current cmds = %s' % test_cmds)
523            if not prompt_with_yn_result('Do you want to update former result '
524                                         'to the latest one?', True):
525                print('SKIP updating result!!!')
526                return
527    else:
528        # If current commands are the same as the formers, no need to update
529        # result.
530        return
531    full_result_content[input_test] = test_cmds
532    with open(result_path, 'w') as outfile:
533        json.dump(full_result_content, outfile, indent=0)
534        print('Save result mapping to %s' % result_path)
535
536def _normalize(cmd_list):
537    """Method that normalize commands. Note that '--atest-log-file-path' is not
538    considered a critical argument, therefore, it will be removed during
539    the comparison. Also, atest can be ran in any place, so verifying relative
540    path, LD_LIBRARY_PATH, and --proto-output-file is regardless as well.
541
542    Args:
543        cmd_list: A list with one element. E.g. ['cmd arg1 arg2 True']
544
545    Returns:
546        A list with elements. E.g. ['cmd', 'arg1', 'arg2', 'True']
547    """
548    _cmd = ' '.join(cmd_list).split()
549    for cmd in _cmd:
550        if cmd.startswith('--atest-log-file-path'):
551            _cmd.remove(cmd)
552            continue
553        if cmd.startswith('LD_LIBRARY_PATH='):
554            _cmd.remove(cmd)
555            continue
556        if cmd.startswith('--proto-output-file='):
557            _cmd.remove(cmd)
558            continue
559        if _BUILD_CMD in cmd:
560            _cmd.remove(cmd)
561            _cmd.append(os.path.join('./', _BUILD_CMD))
562            continue
563    return _cmd
564
565def _are_identical_cmds(current_cmds, former_cmds):
566    """Tell two commands are identical.
567
568    Args:
569        current_cmds: A list of strings for running input tests.
570        former_cmds: A list of strings recorded from the previous run.
571
572    Returns:
573        True if both commands are identical, False otherwise.
574    """
575    # Always sort cmd list to make it comparable.
576    current_cmds.sort()
577    former_cmds.sort()
578    return current_cmds == former_cmds
579
580def _get_hashed_file_name(main_file_name):
581    """Convert the input string to a md5-hashed string. If file_extension is
582       given, returns $(hashed_string).$(file_extension), otherwise
583       $(hashed_string).cache.
584
585    Args:
586        main_file_name: The input string need to be hashed.
587
588    Returns:
589        A string as hashed file name with .cache file extension.
590    """
591    hashed_fn = hashlib.md5(str(main_file_name).encode())
592    hashed_name = hashed_fn.hexdigest()
593    return hashed_name + '.cache'
594
595def md5sum(filename):
596    """Generate MD5 checksum of a file.
597
598    Args:
599        name: A string of a filename.
600
601    Returns:
602        A string of hashed MD5 checksum.
603    """
604    if not os.path.isfile(filename):
605        return ""
606    with open(filename, 'rb') as target:
607        content = target.read()
608    return hashlib.md5(content).hexdigest()
609
610def check_md5(check_file, missing_ok=False):
611    """Method equivalent to 'md5sum --check /file/to/check'.
612
613    Args:
614        check_file: A string of filename that stores filename and its
615                   md5 checksum.
616        missing_ok: A boolean that considers OK even when the check_file does
617                    not exist. Using missing_ok=True allows ignoring md5 check
618                    especially for initial run that the check_file has not yet
619                    generated. Using missing_ok=False ensures the consistency of
620                    files, and guarantees the process is successfully completed.
621
622    Returns:
623        When missing_ok is True (soft check):
624          - True if the checksum is consistent with the actual MD5, even the
625            check_file is missing or not a valid JSON.
626          - False when the checksum is inconsistent with the actual MD5.
627        When missing_ok is False (ensure the process completed properly):
628          - True if the checksum is consistent with the actual MD5.
629          - False otherwise.
630    """
631    if not os.path.isfile(check_file):
632        if not missing_ok:
633            logging.warning(
634                'Unable to verify: %s not found.', check_file)
635        return missing_ok
636    if not is_valid_json_file(check_file):
637        logging.warning(
638            'Unable to verify: %s invalid JSON format.', check_file)
639        return missing_ok
640    with open(check_file, 'r+') as _file:
641        content = json.load(_file)
642        for filename, md5 in content.items():
643            if md5sum(filename) != md5:
644                logging.debug('%s has altered.', filename)
645                return False
646    return True
647
648def save_md5(filenames, save_file):
649    """Method equivalent to 'md5sum file1 file2 > /file/to/check'
650
651    Args:
652        filenames: A list of filenames.
653        save_file: Filename for storing files and their md5 checksums.
654    """
655    if os.path.isfile(save_file):
656        os.remove(save_file)
657    data = {}
658    for name in filenames:
659        if not os.path.isfile(name):
660            logging.warning('%s is not a file.', name)
661        data.update({name: md5sum(name)})
662    with open(save_file, 'w+') as _file:
663        json.dump(data, _file)
664
665def get_cache_root():
666    """Get the root path dir for cache.
667
668    Use branch and target information as cache_root.
669    The path will look like ~/.atest/info_cache/$hash(branch+target)
670
671    Returns:
672        A string of the path of the root dir of cache.
673    """
674    manifest_branch = get_manifest_branch()
675    if not manifest_branch:
676        manifest_branch = os.environ.get(
677            constants.ANDROID_BUILD_TOP, constants.ANDROID_BUILD_TOP)
678    # target
679    build_target = os.path.basename(
680        os.environ.get(constants.ANDROID_PRODUCT_OUT,
681                       constants.ANDROID_PRODUCT_OUT))
682    branch_target_hash = hashlib.md5(
683        (constants.MODE + manifest_branch + build_target).encode()).hexdigest()
684    return os.path.join(os.path.expanduser('~'), '.atest','info_cache',
685                        branch_target_hash[:8])
686
687def get_test_info_cache_path(test_reference, cache_root=None):
688    """Get the cache path of the desired test_infos.
689
690    Args:
691        test_reference: A string of the test.
692        cache_root: Folder path where stores caches.
693
694    Returns:
695        A string of the path of test_info cache.
696    """
697    if not cache_root:
698        cache_root = get_cache_root()
699    return os.path.join(cache_root, _get_hashed_file_name(test_reference))
700
701def update_test_info_cache(test_reference, test_infos,
702                           cache_root=None):
703    """Update cache content which stores a set of test_info objects through
704       pickle module, each test_reference will be saved as a cache file.
705
706    Args:
707        test_reference: A string referencing a test.
708        test_infos: A set of TestInfos.
709        cache_root: Folder path for saving caches.
710    """
711    if not cache_root:
712        cache_root = get_cache_root()
713    if not os.path.isdir(cache_root):
714        os.makedirs(cache_root)
715    cache_path = get_test_info_cache_path(test_reference, cache_root)
716    # Save test_info to files.
717    try:
718        with open(cache_path, 'wb') as test_info_cache_file:
719            logging.debug('Saving cache %s.', cache_path)
720            pickle.dump(test_infos, test_info_cache_file, protocol=2)
721    except (pickle.PicklingError, TypeError, IOError) as err:
722        # Won't break anything, just log this error, and collect the exception
723        # by metrics.
724        logging.debug('Exception raised: %s', err)
725        metrics_utils.handle_exc_and_send_exit_event(
726            constants.ACCESS_CACHE_FAILURE)
727
728
729def load_test_info_cache(test_reference, cache_root=None):
730    """Load cache by test_reference to a set of test_infos object.
731
732    Args:
733        test_reference: A string referencing a test.
734        cache_root: Folder path for finding caches.
735
736    Returns:
737        A list of TestInfo namedtuple if cache found, else None.
738    """
739    if not cache_root:
740        cache_root = get_cache_root()
741    cache_file = get_test_info_cache_path(test_reference, cache_root)
742    if os.path.isfile(cache_file):
743        logging.debug('Loading cache %s.', cache_file)
744        try:
745            with open(cache_file, 'rb') as config_dictionary_file:
746                return pickle.load(config_dictionary_file, encoding='utf-8')
747        except (pickle.UnpicklingError,
748                ValueError,
749                TypeError,
750                EOFError,
751                IOError) as err:
752            # Won't break anything, just remove the old cache, log this error,
753            # and collect the exception by metrics.
754            logging.debug('Exception raised: %s', err)
755            os.remove(cache_file)
756            metrics_utils.handle_exc_and_send_exit_event(
757                constants.ACCESS_CACHE_FAILURE)
758    return None
759
760def clean_test_info_caches(tests, cache_root=None):
761    """Clean caches of input tests.
762
763    Args:
764        tests: A list of test references.
765        cache_root: Folder path for finding caches.
766    """
767    if not cache_root:
768        cache_root = get_cache_root()
769    for test in tests:
770        cache_file = get_test_info_cache_path(test, cache_root)
771        if os.path.isfile(cache_file):
772            logging.debug('Removing cache: %s', cache_file)
773            try:
774                os.remove(cache_file)
775            except IOError as err:
776                logging.debug('Exception raised: %s', err)
777                metrics_utils.handle_exc_and_send_exit_event(
778                    constants.ACCESS_CACHE_FAILURE)
779
780def get_modified_files(root_dir):
781    """Get the git modified files. The git path here is git top level of
782    the root_dir. It's inevitable to utilise different commands to fulfill
783    2 scenario:
784        1. locate unstaged/staged files
785        2. locate committed files but not yet merged.
786    the 'git_status_cmd' fulfils the former while the 'find_modified_files'
787    fulfils the latter.
788
789    Args:
790        root_dir: the root where it starts finding.
791
792    Returns:
793        A set of modified files altered since last commit.
794    """
795    modified_files = set()
796    try:
797        find_git_cmd = 'cd {}; git rev-parse --show-toplevel'.format(root_dir)
798        git_paths = subprocess.check_output(
799            find_git_cmd, shell=True).decode().splitlines()
800        for git_path in git_paths:
801            # Find modified files from git working tree status.
802            git_status_cmd = ("repo forall {} -c git status --short | "
803                              "awk '{{print $NF}}'").format(git_path)
804            modified_wo_commit = subprocess.check_output(
805                git_status_cmd, shell=True).decode().rstrip().splitlines()
806            for change in modified_wo_commit:
807                modified_files.add(
808                    os.path.normpath('{}/{}'.format(git_path, change)))
809            # Find modified files that are committed but not yet merged.
810            find_modified_files = _FIND_MODIFIED_FILES_CMDS.format(git_path)
811            commit_modified_files = subprocess.check_output(
812                find_modified_files, shell=True).decode().splitlines()
813            for line in commit_modified_files:
814                modified_files.add(os.path.normpath('{}/{}'.format(
815                    git_path, line)))
816    except (OSError, subprocess.CalledProcessError) as err:
817        logging.debug('Exception raised: %s', err)
818    return modified_files
819
820def delimiter(char, length=_DEFAULT_TERMINAL_WIDTH, prenl=0, postnl=0):
821    """A handy delimiter printer.
822
823    Args:
824        char: A string used for delimiter.
825        length: An integer for the replication.
826        prenl: An integer that insert '\n' before delimiter.
827        postnl: An integer that insert '\n' after delimiter.
828
829    Returns:
830        A string of delimiter.
831    """
832    return prenl * '\n' + char * length + postnl * '\n'
833
834def find_files(path, file_name=constants.TEST_MAPPING):
835    """Find all files with given name under the given path.
836
837    Args:
838        path: A string of path in source.
839        file_name: The file name pattern for finding matched files.
840
841    Returns:
842        A list of paths of the files with the matching name under the given
843        path.
844    """
845    match_files = []
846    for root, _, filenames in os.walk(path):
847        for filename in fnmatch.filter(filenames, file_name):
848            match_files.append(os.path.join(root, filename))
849    return match_files
850
851def extract_zip_text(zip_path):
852    """Extract the text files content for input zip file.
853
854    Args:
855        zip_path: The file path of zip.
856
857    Returns:
858        The string in input zip file.
859    """
860    content = ''
861    try:
862        with zipfile.ZipFile(zip_path) as zip_file:
863            for filename in zip_file.namelist():
864                if os.path.isdir(filename):
865                    continue
866                # Force change line if multiple text files in zip
867                content = content + '\n'
868                # read the file
869                with zip_file.open(filename) as extract_file:
870                    for line in extract_file:
871                        if matched_tf_error_log(line.decode()):
872                            content = content + line.decode()
873    except zipfile.BadZipfile as err:
874        logging.debug('Exception raised: %s', err)
875    return content
876
877def matched_tf_error_log(content):
878    """Check if the input content matched tradefed log pattern.
879    The format will look like this.
880    05-25 17:37:04 W/XXXXXX
881    05-25 17:37:04 E/XXXXXX
882
883    Args:
884        content: Log string.
885
886    Returns:
887        True if the content matches the regular expression for tradefed error or
888        warning log.
889    """
890    reg = ('^((0[1-9])|(1[0-2]))-((0[1-9])|([12][0-9])|(3[0-1])) '
891           '(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9]) (E|W/)')
892    if re.search(reg, content):
893        return True
894    return False
895
896def has_valid_cert():
897    """Check whether the certificate is valid.
898
899    Returns: True if the cert is valid.
900    """
901    if not constants.CERT_STATUS_CMD:
902        return False
903    try:
904        return (not subprocess.check_call(constants.CERT_STATUS_CMD,
905                                          stdout=subprocess.DEVNULL,
906                                          stderr=subprocess.DEVNULL))
907    except subprocess.CalledProcessError:
908        return False
909
910# pylint: disable=too-many-locals
911def get_flakes(branch='',
912               target='',
913               test_name='',
914               test_module='',
915               test_method=''):
916    """Get flake information.
917
918    Args:
919        branch: A string of branch name.
920        target: A string of target.
921        test_name: A string of test suite name.
922        test_module: A string of test module.
923        test_method: A string of test method.
924
925    Returns:
926        A dictionary of flake info. None if no flakes service exists.
927    """
928    if not branch:
929        branch = constants.FLAKE_BRANCH
930    if not target:
931        target = constants.FLAKE_TARGET
932    if not test_name:
933        test_name = constants.FLAKE_TEST_NAME
934    # Currently lock the flake information from test-mapping test
935    # which only runs on cuttlefish(x86) devices.
936    # TODO: extend supporting other devices
937    if test_module:
938        test_module = 'x86 {}'.format(test_module)
939    flake_service = os.path.join(constants.FLAKE_SERVICE_PATH,
940                                 constants.FLAKE_FILE)
941    if not os.path.exists(flake_service):
942        logging.debug('Get flakes: Flake service path not exist.')
943        # Send (3, 0) to present no flakes info because service does not exist.
944        metrics.LocalDetectEvent(
945            detect_type=constants.DETECT_TYPE_NO_FLAKE,
946            result=0)
947        return None
948    if not has_valid_cert():
949        logging.debug('Get flakes: No valid cert.')
950        # Send (3, 1) to present no flakes info because no valid cert.
951        metrics.LocalDetectEvent(
952            detect_type=constants.DETECT_TYPE_NO_FLAKE,
953            result=1)
954        return None
955    flake_info = {}
956    start = time.time()
957    try:
958        shutil.copy2(flake_service, constants.FLAKE_TMP_PATH)
959        tmp_service = os.path.join(constants.FLAKE_TMP_PATH,
960                                   constants.FLAKE_FILE)
961        os.chmod(tmp_service, 0o0755)
962        cmd = [tmp_service, branch, target, test_name, test_module, test_method]
963        logging.debug('Executing: %s', ' '.join(cmd))
964        output = subprocess.check_output(cmd).decode()
965        percent_template = "{}:".format(constants.FLAKE_PERCENT)
966        postsubmit_template = "{}:".format(constants.FLAKE_POSTSUBMIT)
967        for line in output.splitlines():
968            if line.startswith(percent_template):
969                flake_info[constants.FLAKE_PERCENT] = line.replace(
970                    percent_template, '')
971            if line.startswith(postsubmit_template):
972                flake_info[constants.FLAKE_POSTSUBMIT] = line.replace(
973                    postsubmit_template, '')
974    # pylint: disable=broad-except
975    except Exception as e:
976        logging.debug('Exception:%s', e)
977        return None
978    # Send (4, time) to present having flakes info and it spent time.
979    duration = round(time.time()-start)
980    logging.debug('Took %ss to get flakes info', duration)
981    metrics.LocalDetectEvent(
982        detect_type=constants.DETECT_TYPE_HAS_FLAKE,
983        result=duration)
984    return flake_info
985
986def read_test_record(path):
987    """A Helper to read test record proto.
988
989    Args:
990        path: The proto file path.
991
992    Returns:
993        The test_record proto instance.
994    """
995    with open(path, 'rb') as proto_file:
996        msg = test_record_pb2.TestRecord()
997        msg.ParseFromString(proto_file.read())
998    return msg
999
1000def has_python_module(module_name):
1001    """Detect if the module can be loaded without importing it in real.
1002
1003    Args:
1004        cmd: A string of the tested module name.
1005
1006    Returns:
1007        True if found, False otherwise.
1008    """
1009    return bool(importlib.util.find_spec(module_name))
1010
1011def is_valid_json_file(path):
1012    """Detect if input path exist and content is valid.
1013
1014    Args:
1015        path: The json file path.
1016
1017    Returns:
1018        True if file exist and content is valid, False otherwise.
1019    """
1020    if isinstance(path, bytes):
1021        path = path.decode('utf-8')
1022    try:
1023        if os.path.isfile(path):
1024            with open(path) as json_file:
1025                json.load(json_file)
1026            return True
1027        logging.warning('%s: File not found.', path)
1028    except json.JSONDecodeError:
1029        logging.warning('Exception happened while loading %s.', path)
1030    return False
1031
1032def get_manifest_branch():
1033    """Get the manifest branch via repo info command.
1034
1035    Returns:
1036        None if no system environment parameter ANDROID_BUILD_TOP or
1037        running 'repo info' command error, otherwise the manifest branch
1038    """
1039    build_top = os.getenv(constants.ANDROID_BUILD_TOP, None)
1040    if not build_top:
1041        return None
1042    try:
1043        # Command repo need use default lib "http", add non-default lib
1044        # might cause repo command execution error.
1045        splitter = ':'
1046        env_vars = os.environ.copy()
1047        org_python_path = env_vars['PYTHONPATH'].split(splitter)
1048        default_python_path = [p for p in org_python_path
1049                               if not p.startswith('/tmp/Soong.python_')]
1050        env_vars['PYTHONPATH'] = splitter.join(default_python_path)
1051        output = subprocess.check_output(
1052            ['repo', 'info', '-o', constants.ASUITE_REPO_PROJECT_NAME],
1053            env=env_vars,
1054            cwd=build_top,
1055            universal_newlines=True)
1056        branch_re = re.compile(r'Manifest branch:\s*(?P<branch>.*)')
1057        match = branch_re.match(output)
1058        if match:
1059            return match.group('branch')
1060        logging.warning('Unable to detect branch name through:\n %s', output)
1061    except subprocess.CalledProcessError:
1062        logging.warning('Exception happened while getting branch')
1063    return None
1064
1065def get_build_target():
1066    """Get the build target form system environment TARGET_PRODUCT."""
1067    return os.getenv(constants.ANDROID_TARGET_PRODUCT, None)
1068
1069def parse_mainline_modules(test):
1070    """Parse test reference into test and mainline modules.
1071
1072    Args:
1073        test: An String of test reference.
1074
1075    Returns:
1076        A string of test without mainline modules,
1077        A string of mainline modules.
1078    """
1079    result = constants.TEST_WITH_MAINLINE_MODULES_RE.match(test)
1080    if not result:
1081        return test, ""
1082    test_wo_mainline_modules = result.group('test')
1083    mainline_modules = result.group('mainline_modules')
1084    return test_wo_mainline_modules, mainline_modules
1085
1086def has_wildcard(test_name):
1087    """ Tell whether the test_name(either a list or string) contains wildcard
1088    symbols.
1089
1090    Args:
1091        test_name: A list or a str.
1092
1093    Return:
1094        True if test_name contains wildcard, False otherwise.
1095    """
1096    if isinstance(test_name, str):
1097        return any(char in test_name for char in _WILDCARD_CHARS)
1098    if isinstance(test_name, list):
1099        for name in test_name:
1100            if has_wildcard(name):
1101                return True
1102    return False
1103
1104def is_build_file(path):
1105    """ If input file is one of an android build file.
1106
1107    Args:
1108        path: A string of file path.
1109
1110    Return:
1111        True if path is android build file, False otherwise.
1112    """
1113    return bool(os.path.splitext(path)[-1] in _ANDROID_BUILD_EXT)
1114
1115def quote(input_str):
1116    """ If the input string -- especially in custom args -- contains shell-aware
1117    characters, insert a pair of "\" to the input string.
1118
1119    e.g. unit(test|testing|testing) -> 'unit(test|testing|testing)'
1120
1121    Args:
1122        input_str: A string from user input.
1123
1124    Returns: A string with single quotes if regex chars were detected.
1125    """
1126    if has_chars(input_str, _REGEX_CHARS):
1127        return "\'" + input_str + "\'"
1128    return input_str
1129
1130def has_chars(input_str, chars):
1131    """ Check if the input string contains one of the designated characters.
1132
1133    Args:
1134        input_str: A string from user input.
1135        chars: An iterable object.
1136
1137    Returns:
1138        True if the input string contains one of the special chars.
1139    """
1140    for char in chars:
1141        if char in input_str:
1142            return True
1143    return False
1144
1145def prompt_with_yn_result(msg, default=True):
1146    """Prompt message and get yes or no result.
1147
1148    Args:
1149        msg: The question you want asking.
1150        default: boolean to True/Yes or False/No
1151    Returns:
1152        default value if get KeyboardInterrupt or ValueError exception.
1153    """
1154    suffix = '[Y/n]: ' if default else '[y/N]: '
1155    try:
1156        return strtobool(input(msg+suffix))
1157    except (ValueError, KeyboardInterrupt):
1158        return default
1159
1160def get_android_junit_config_filters(test_config):
1161    """Get the dictionary of a input config for junit config's filters
1162
1163    Args:
1164        test_config: The path of the test config.
1165    Returns:
1166        A dictionary include all the filters in the input config.
1167    """
1168    filter_dict = {}
1169    xml_root = ET.parse(test_config).getroot()
1170    option_tags = xml_root.findall('.//option')
1171    for tag in option_tags:
1172        name = tag.attrib['name'].strip()
1173        if name in constants.SUPPORTED_FILTERS:
1174            filter_values = filter_dict.get(name, [])
1175            value = tag.attrib['value'].strip()
1176            filter_values.append(value)
1177            filter_dict.update({name: filter_values})
1178    return filter_dict
1179
1180def get_config_parameter(test_config):
1181    """Get all the parameter values for the input config
1182
1183    Args:
1184        test_config: The path of the test config.
1185    Returns:
1186        A set include all the parameters of the input config.
1187    """
1188    parameters = set()
1189    xml_root = ET.parse(test_config).getroot()
1190    option_tags = xml_root.findall('.//option')
1191    for tag in option_tags:
1192        name = tag.attrib['name'].strip()
1193        if name == constants.CONFIG_DESCRIPTOR:
1194            key = tag.attrib['key'].strip()
1195            if key == constants.PARAMETER_KEY:
1196                value = tag.attrib['value'].strip()
1197                parameters.add(value)
1198    return parameters
1199