• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""
16Utility functions for atest.
17"""
18
19
20# pylint: disable=import-outside-toplevel
21# pylint: disable=too-many-lines
22
23from __future__ import print_function
24
25import fnmatch
26import hashlib
27import importlib
28import itertools
29import json
30import logging
31import os
32import pickle
33import platform
34import re
35import shutil
36import subprocess
37import sys
38import sysconfig
39import time
40import zipfile
41
42from multiprocessing import Process
43from pathlib import Path
44
45import xml.etree.ElementTree as ET
46
47from atest_enum import DetectType, FilterType
48
49# This is a workaround of b/144743252, where the http.client failed to loaded
50# because the googleapiclient was found before the built-in libs; enabling
51# embedded launcher(b/135639220) has not been reliable and other issue will
52# raise.
53# The workaround is repositioning the built-in libs before other 3rd libs in
54# PYTHONPATH(sys.path) to eliminate the symptom of failed loading http.client.
55for lib in (sysconfig.get_paths()['stdlib'], sysconfig.get_paths()['purelib']):
56    if lib in sys.path:
57        sys.path.remove(lib)
58    sys.path.insert(0, lib)
59# (b/219847353) Move googleapiclient to the last position of sys.path when
60#  existed.
61for lib in sys.path:
62    if 'googleapiclient' in lib:
63        sys.path.remove(lib)
64        sys.path.append(lib)
65        break
66#pylint: disable=wrong-import-position
67import atest_decorator
68import atest_error
69import constants
70
71# This proto related module will be auto generated in build time.
72# pylint: disable=no-name-in-module
73# pylint: disable=import-error
74try:
75    from tools.asuite.atest.tf_proto import test_record_pb2
76except ImportError as err:
77    pass
78# b/147562331 only occurs when running atest in source code. We don't encourge
79# the users to manually "pip3 install protobuf", therefore when the exception
80# occurs, we don't collect data and the tab completion is for args is silence.
81try:
82    from metrics import metrics
83    from metrics import metrics_base
84    from metrics import metrics_utils
85except ImportError as err:
86    # TODO(b/182854938): remove this ImportError after refactor metrics dir.
87    try:
88        from asuite.metrics import metrics
89        from asuite.metrics import metrics_base
90        from asuite.metrics import metrics_utils
91    except ImportError as err:
92        # This exception occurs only when invoking atest in source code.
93        print("You shouldn't see this message unless you ran 'atest-src'. "
94              "To resolve the issue, please run:\n\t{}\n"
95              "and try again.".format('pip3 install protobuf'))
96        print('Import error: ', err)
97        print('sys.path:\n', '\n'.join(sys.path))
98        sys.exit(constants.IMPORT_FAILURE)
99
100_BASH_RESET_CODE = '\033[0m\n'
101# Arbitrary number to limit stdout for failed runs in _run_limited_output.
102# Reason for its use is that the make command itself has its own carriage
103# return output mechanism that when collected line by line causes the streaming
104# full_output list to be extremely large.
105_FAILED_OUTPUT_LINE_LIMIT = 100
106# Regular expression to match the start of a ninja compile:
107# ex: [ 99% 39710/39711]
108_BUILD_COMPILE_STATUS = re.compile(r'\[\s*(\d{1,3}%\s+)?\d+/\d+\]')
109_BUILD_FAILURE = 'FAILED: '
110CMD_RESULT_PATH = os.path.join(os.environ.get(constants.ANDROID_BUILD_TOP,
111                                              os.getcwd()),
112                               'tools/asuite/atest/test_data',
113                               'test_commands.json')
114BUILD_TOP_HASH = hashlib.md5(os.environ.get(constants.ANDROID_BUILD_TOP, '').
115                             encode()).hexdigest()
116_DEFAULT_TERMINAL_WIDTH = 80
117_DEFAULT_TERMINAL_HEIGHT = 25
118_BUILD_CMD = 'build/soong/soong_ui.bash'
119_FIND_MODIFIED_FILES_CMDS = (
120    "cd {};"
121    "local_branch=$(git rev-parse --abbrev-ref HEAD);"
122    "remote_branch=$(git branch -r | grep '\\->' | awk '{{print $1}}');"
123    # Get the number of commits from local branch to remote branch.
124    "ahead=$(git rev-list --left-right --count $local_branch...$remote_branch "
125    "| awk '{{print $1}}');"
126    # Get the list of modified files from HEAD to previous $ahead generation.
127    "git diff HEAD~$ahead --name-only")
128_ANDROID_BUILD_EXT = ('.bp', '.mk')
129
130# Set of special chars for various purposes.
131_REGEX_CHARS = {'[', '(', '{', '|', '\\', '*', '?', '+', '^'}
132_WILDCARD_CHARS = {'?', '*'}
133
134# TODO: (b/180394948) remove this after the universal build script lands.
135# Variables for building mainline modules:
136_VARS_FOR_MAINLINE = {
137    "TARGET_BUILD_DENSITY": "alldpi",
138    "TARGET_BUILD_TYPE": "release",
139    "OVERRIDE_PRODUCT_COMPRESSED_APEX": "false",
140    "UNBUNDLED_BUILD_SDKS_FROM_SOURCE": "true",
141    "ALWAYS_EMBED_NOTICES": "true",
142}
143
144_ROOT_PREPARER = "com.android.tradefed.targetprep.RootTargetPreparer"
145
146_WILDCARD_FILTER_RE = re.compile(r'.*[?|*]$')
147_REGULAR_FILTER_RE = re.compile(r'.*\w$')
148
149def get_build_cmd(dump=False):
150    """Compose build command with no-absolute path and flag "--make-mode".
151
152    Args:
153        dump: boolean that determines the option of build/soong/soong_iu.bash.
154              True: used to dump build variables, equivalent to printconfig.
155                    e.g. build/soong/soong_iu.bash --dumpvar-mode <VAR_NAME>
156              False: (default) used to build targets in make mode.
157                    e.g. build/soong/soong_iu.bash --make-mode <MOD_NAME>
158
159    Returns:
160        A list of soong build command.
161    """
162    make_cmd = ('%s/%s' %
163                (os.path.relpath(os.environ.get(
164                    constants.ANDROID_BUILD_TOP, os.getcwd()), os.getcwd()),
165                 _BUILD_CMD))
166    if dump:
167        return [make_cmd, '--dumpvar-mode', 'report_config']
168    return [make_cmd, '--make-mode']
169
170def _capture_fail_section(full_log):
171    """Return the error message from the build output.
172
173    Args:
174        full_log: List of strings representing full output of build.
175
176    Returns:
177        capture_output: List of strings that are build errors.
178    """
179    am_capturing = False
180    capture_output = []
181    for line in full_log:
182        if am_capturing and _BUILD_COMPILE_STATUS.match(line):
183            break
184        if am_capturing or line.startswith(_BUILD_FAILURE):
185            capture_output.append(line)
186            am_capturing = True
187            continue
188    return capture_output
189
190
191def _capture_limited_output(full_log):
192    """Return the limited error message from capture_failed_section.
193
194    Args:
195        full_log: List of strings representing full output of build.
196
197    Returns:
198        output: List of strings that are build errors.
199    """
200    # Parse out the build error to output.
201    output = _capture_fail_section(full_log)
202    if not output:
203        output = full_log
204    if len(output) >= _FAILED_OUTPUT_LINE_LIMIT:
205        output = output[-_FAILED_OUTPUT_LINE_LIMIT:]
206    output = 'Output (may be trimmed):\n%s' % ''.join(output)
207    return output
208
209
210# TODO: b/187122993 refine subprocess with 'with-statement' in fixit week.
211def _run_limited_output(cmd, env_vars=None):
212    """Runs a given command and streams the output on a single line in stdout.
213
214    Args:
215        cmd: A list of strings representing the command to run.
216        env_vars: Optional arg. Dict of env vars to set during build.
217
218    Raises:
219        subprocess.CalledProcessError: When the command exits with a non-0
220            exitcode.
221    """
222    # Send stderr to stdout so we only have to deal with a single pipe.
223    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
224                            stderr=subprocess.STDOUT, env=env_vars)
225    sys.stdout.write('\n')
226    term_width, _ = get_terminal_size()
227    white_space = " " * int(term_width)
228    full_output = []
229    while proc.poll() is None:
230        line = proc.stdout.readline().decode('utf-8')
231        # Readline will often return empty strings.
232        if not line:
233            continue
234        full_output.append(line)
235        # Trim the line to the width of the terminal.
236        # Note: Does not handle terminal resizing, which is probably not worth
237        #       checking the width every loop.
238        if len(line) >= term_width:
239            line = line[:term_width - 1]
240        # Clear the last line we outputted.
241        sys.stdout.write('\r%s\r' % white_space)
242        sys.stdout.write('%s' % line.strip())
243        sys.stdout.flush()
244    # Reset stdout (on bash) to remove any custom formatting and newline.
245    sys.stdout.write(_BASH_RESET_CODE)
246    sys.stdout.flush()
247    # Wait for the Popen to finish completely before checking the returncode.
248    proc.wait()
249    if proc.returncode != 0:
250        # get error log from "OUT_DIR/error.log"
251        error_log_file = os.path.join(get_build_out_dir(), "error.log")
252        output = []
253        if os.path.isfile(error_log_file):
254            if os.stat(error_log_file).st_size > 0:
255                with open(error_log_file) as f:
256                    output = f.read()
257        if not output:
258            output = _capture_limited_output(full_output)
259        raise subprocess.CalledProcessError(proc.returncode, cmd, output)
260
261
262def get_build_out_dir():
263    """Get android build out directory.
264
265    The order of the rules are:
266    1. OUT_DIR
267    2. OUT_DIR_COMMON_BASE
268    3. ANDROID_BUILD_TOP/out
269
270    Returns:
271        String of the out directory.
272    """
273    build_top = os.environ.get(constants.ANDROID_BUILD_TOP, '/')
274    # Get the out folder if user specified $OUT_DIR
275    custom_out_dir = os.environ.get(constants.ANDROID_OUT_DIR)
276    custom_out_dir_common_base = os.environ.get(
277        constants.ANDROID_OUT_DIR_COMMON_BASE)
278    user_out_dir = None
279    # If OUT_DIR == /output, the output dir will always be /outdir
280    # regardless of branch names. (Not recommended.)
281    if custom_out_dir:
282        if os.path.isabs(custom_out_dir):
283            user_out_dir = custom_out_dir
284        else:
285            user_out_dir = os.path.join(build_top, custom_out_dir)
286    # https://source.android.com/setup/build/initializing#using-a-separate-output-directory
287    # If OUT_DIR_COMMON_BASE is /output and the source tree is /src/master1,
288    # the output dir will be /output/master1.
289    elif custom_out_dir_common_base:
290        build_top_basename = os.path.basename(build_top)
291        if os.path.isabs(custom_out_dir_common_base):
292            user_out_dir = os.path.join(custom_out_dir_common_base,
293                                        build_top_basename)
294        else:
295            user_out_dir = os.path.join(build_top, custom_out_dir_common_base,
296                                        build_top_basename)
297    if user_out_dir:
298        return user_out_dir
299    return os.path.join(build_top, "out")
300
301
302def get_mainline_build_cmd(build_targets):
303    """Method that assembles cmd for building mainline modules.
304
305    Args:
306        build_targets: A set of strings of build targets to make.
307
308    Returns:
309        A list of build command.
310    """
311    print('%s\n%s' % (
312        colorize("Building Mainline Modules...", constants.CYAN),
313                 ', '.join(build_targets)))
314    logging.debug('Building Mainline Modules: %s', ' '.join(build_targets))
315    # TODO: (b/180394948) use the consolidated build script when it lands.
316    config = get_android_config()
317    branch = config.get('BUILD_ID')
318    arch = config.get('TARGET_ARCH')
319    # 2. Assemble TARGET_BUILD_APPS and TARGET_PRODUCT.
320    target_build_apps = 'TARGET_BUILD_APPS={}'.format(
321        ' '.join(build_targets))
322    target_product = 'TARGET_PRODUCT=mainline_modules_{}'.format(arch)
323    if 'AOSP' in branch:
324        target_product = 'TARGET_PRODUCT=module_{}'.format(arch)
325    # 3. Assemble DIST_DIR and the rest of static targets.
326    dist_dir = 'DIST_DIR={}'.format(
327        os.path.join('out', 'dist', 'mainline_modules_{}'.format(arch)))
328    static_targets = [
329        'dist',
330        'apps_only',
331        'merge_zips',
332        'aapt2'
333    ]
334    cmd = get_build_cmd()
335    cmd.append(target_build_apps)
336    cmd.append(target_product)
337    cmd.append(dist_dir)
338    cmd.extend(static_targets)
339    return cmd
340
341
342def build(build_targets, verbose=False, env_vars=None, mm_build_targets=None):
343    """Shell out and invoke run_build_cmd to make build_targets.
344
345    Args:
346        build_targets: A set of strings of build targets to make.
347        verbose: Optional arg. If True output is streamed to the console.
348                 If False, only the last line of the build output is outputted.
349        env_vars: Optional arg. Dict of env vars to set during build.
350        mm_build_targets: A set of string like build_targets, but will build
351                          in unbundled(mainline) module mode.
352
353    Returns:
354        Boolean of whether build command was successful, True if nothing to
355        build.
356    """
357    if not build_targets:
358        logging.debug('No build targets, skipping build.')
359        return True
360    full_env_vars = os.environ.copy()
361    if env_vars:
362        full_env_vars.update(env_vars)
363    if mm_build_targets:
364        # Set up necessary variables for building mainline modules.
365        full_env_vars.update(_VARS_FOR_MAINLINE)
366        if not os.getenv('TARGET_BUILD_VARIANT'):
367            full_env_vars.update({'TARGET_BUILD_VARIANT': 'user'})
368        # Inject APEX_BUILD_FOR_PRE_S_DEVICES=true for all products.
369        # TODO: support _bundled(S+) artifacts that link shared libs.
370        colorful_print(
371            '\nWARNING: Only support building pre-S products for now.',
372            constants.YELLOW)
373        full_env_vars.update({'APEX_BUILD_FOR_PRE_S_DEVICES': 'true'})
374        mm_build_cmd = get_mainline_build_cmd(mm_build_targets)
375        status = run_build_cmd(mm_build_cmd, verbose, full_env_vars)
376        if not status:
377            return status
378    print('\n%s\n%s' % (
379        colorize("Building Dependencies...", constants.CYAN),
380                 ', '.join(build_targets)))
381    logging.debug('Building Dependencies: %s', ' '.join(build_targets))
382    cmd = get_build_cmd() + list(build_targets)
383    return run_build_cmd(cmd, verbose, full_env_vars)
384
385def run_build_cmd(cmd, verbose=False, env_vars=None):
386    """The main process of building targets.
387
388    Args:
389        cmd: A list of soong command.
390        verbose: Optional arg. If True output is streamed to the console.
391                 If False, only the last line of the build output is outputted.
392        env_vars: Optional arg. Dict of env vars to set during build.
393
394    Returns:
395        Boolean of whether build command was successful, True if nothing to
396        build.
397    """
398    logging.debug('Executing command: %s', cmd)
399    try:
400        if verbose:
401            subprocess.check_call(cmd, stderr=subprocess.STDOUT, env=env_vars)
402        else:
403            # TODO: Save output to a log file.
404            _run_limited_output(cmd, env_vars=env_vars)
405        logging.info('Build successful')
406        return True
407    except subprocess.CalledProcessError as err:
408        logging.error('Build failure when running: %s', ' '.join(cmd))
409        if err.output:
410            logging.error(err.output)
411        return False
412
413
414def _can_upload_to_result_server():
415    """Return True if we can talk to result server."""
416    # TODO: Also check if we have a slow connection to result server.
417    if constants.RESULT_SERVER:
418        try:
419            from urllib.request import urlopen
420            urlopen(constants.RESULT_SERVER,
421                    timeout=constants.RESULT_SERVER_TIMEOUT).close()
422            return True
423        # pylint: disable=broad-except
424        except Exception as err:
425            logging.debug('Talking to result server raised exception: %s', err)
426    return False
427
428
429# pylint: disable=unused-argument
430def get_result_server_args(for_test_mapping=False):
431    """Return list of args for communication with result server.
432
433    Args:
434        for_test_mapping: True if the test run is for Test Mapping to include
435            additional reporting args. Default is False.
436    """
437    # Customize test mapping argument here if needed.
438    return constants.RESULT_SERVER_ARGS
439
440def sort_and_group(iterable, key):
441    """Sort and group helper function."""
442    return itertools.groupby(sorted(iterable, key=key), key=key)
443
444
445def is_test_mapping(args):
446    """Check if the atest command intends to run tests in test mapping.
447
448    When atest runs tests in test mapping, it must have at most one test
449    specified. If a test is specified, it must be started with  `:`,
450    which means the test value is a test group name in TEST_MAPPING file, e.g.,
451    `:postsubmit`.
452
453    If --host-unit-test-only be applied, it's not test mapping.
454    If any test mapping options is specified, the atest command must also be
455    set to run tests in test mapping files.
456
457    Args:
458        args: arg parsed object.
459
460    Returns:
461        True if the args indicates atest shall run tests in test mapping. False
462        otherwise.
463    """
464    return (
465        not args.host_unit_test_only and
466        (args.test_mapping or
467        args.include_subdirs or
468        not args.tests or
469        (len(args.tests) == 1 and args.tests[0][0] == ':')))
470
471
472@atest_decorator.static_var("cached_has_colors", {})
473def _has_colors(stream):
474    """Check the output stream is colorful.
475
476    Args:
477        stream: The standard file stream.
478
479    Returns:
480        True if the file stream can interpreter the ANSI color code.
481    """
482    cached_has_colors = _has_colors.cached_has_colors
483    if stream in cached_has_colors:
484        return cached_has_colors[stream]
485    cached_has_colors[stream] = True
486    # Following from Python cookbook, #475186
487    if not hasattr(stream, "isatty"):
488        cached_has_colors[stream] = False
489        return False
490    if not stream.isatty():
491        # Auto color only on TTYs
492        cached_has_colors[stream] = False
493        return False
494    # curses.tigetnum() cannot be used for telling supported color numbers
495    # because it does not come with the prebuilt py3-cmd.
496    return cached_has_colors[stream]
497
498
499def colorize(text, color, highlight=False):
500    """ Convert to colorful string with ANSI escape code.
501
502    Args:
503        text: A string to print.
504        color: ANSI code shift for colorful print. They are defined
505               in constants_default.py.
506        highlight: True to print with highlight.
507
508    Returns:
509        Colorful string with ANSI escape code.
510    """
511    clr_pref = '\033[1;'
512    clr_suff = '\033[0m'
513    has_colors = _has_colors(sys.stdout)
514    if has_colors:
515        if highlight:
516            ansi_shift = 40 + color
517        else:
518            ansi_shift = 30 + color
519        clr_str = "%s%dm%s%s" % (clr_pref, ansi_shift, text, clr_suff)
520    else:
521        clr_str = text
522    return clr_str
523
524
525def colorful_print(text, color, highlight=False, auto_wrap=True):
526    """Print out the text with color.
527
528    Args:
529        text: A string to print.
530        color: ANSI code shift for colorful print. They are defined
531               in constants_default.py.
532        highlight: True to print with highlight.
533        auto_wrap: If True, Text wraps while print.
534    """
535    output = colorize(text, color, highlight)
536    if auto_wrap:
537        print(output)
538    else:
539        print(output, end="")
540
541
542def get_terminal_size():
543    """Get terminal size and return a tuple.
544
545    Returns:
546        2 integers: the size of X(columns) and Y(lines/rows).
547    """
548    # Determine the width of the terminal. We'll need to clear this many
549    # characters when carriage returning. Set default value as 80.
550    columns, rows = shutil.get_terminal_size(
551        fallback=(_DEFAULT_TERMINAL_WIDTH,
552                  _DEFAULT_TERMINAL_HEIGHT))
553    return columns, rows
554
555
556def is_external_run():
557    # TODO(b/133905312): remove this function after aidegen calling
558    #       metrics_base.get_user_type directly.
559    """Check is external run or not.
560
561    Determine the internal user by passing at least one check:
562      - whose git mail domain is from google
563      - whose hostname is from google
564    Otherwise is external user.
565
566    Returns:
567        True if this is an external run, False otherwise.
568    """
569    return metrics_base.get_user_type() == metrics_base.EXTERNAL_USER
570
571
572def print_data_collection_notice():
573    """Print the data collection notice."""
574    anonymous = ''
575    user_type = 'INTERNAL'
576    if metrics_base.get_user_type() == metrics_base.EXTERNAL_USER:
577        anonymous = ' anonymous'
578        user_type = 'EXTERNAL'
579    notice = ('  We collect%s usage statistics in accordance with our Content '
580              'Licenses (%s), Contributor License Agreement (%s), Privacy '
581              'Policy (%s) and Terms of Service (%s).'
582             ) % (anonymous,
583                  constants.CONTENT_LICENSES_URL,
584                  constants.CONTRIBUTOR_AGREEMENT_URL[user_type],
585                  constants.PRIVACY_POLICY_URL,
586                  constants.TERMS_SERVICE_URL
587                 )
588    print(delimiter('=', 18, prenl=1))
589    colorful_print("Notice:", constants.RED)
590    colorful_print("%s" % notice, constants.GREEN)
591    print(delimiter('=', 18, postnl=1))
592
593
594def handle_test_runner_cmd(input_test, test_cmds, do_verification=False,
595                           result_path=constants.VERIFY_DATA_PATH):
596    """Handle the runner command of input tests.
597
598    Args:
599        input_test: A string of input tests pass to atest.
600        test_cmds: A list of strings for running input tests.
601        do_verification: A boolean to indicate the action of this method.
602                         True: Do verification without updating result map and
603                               raise DryRunVerificationError if verifying fails.
604                         False: Update result map, if the former command is
605                                different with current command, it will confirm
606                                with user if they want to update or not.
607        result_path: The file path for saving result.
608    """
609    full_result_content = {}
610    if os.path.isfile(result_path):
611        with open(result_path) as json_file:
612            full_result_content = json.load(json_file)
613    former_test_cmds = full_result_content.get(input_test, [])
614    test_cmds = _normalize(test_cmds)
615    former_test_cmds = _normalize(former_test_cmds)
616    if not _are_identical_cmds(test_cmds, former_test_cmds):
617        if do_verification:
618            raise atest_error.DryRunVerificationError(
619                'Dry run verification failed, former commands: {}'.format(
620                    former_test_cmds))
621        if former_test_cmds:
622            # If former_test_cmds is different from test_cmds, ask users if they
623            # are willing to update the result.
624            print('Former cmds = %s' % former_test_cmds)
625            print('Current cmds = %s' % test_cmds)
626            if not prompt_with_yn_result('Do you want to update former result '
627                                         'to the latest one?', True):
628                print('SKIP updating result!!!')
629                return
630    else:
631        # If current commands are the same as the formers, no need to update
632        # result.
633        return
634    full_result_content[input_test] = test_cmds
635    with open(result_path, 'w') as outfile:
636        json.dump(full_result_content, outfile, indent=0)
637        print('Save result mapping to %s' % result_path)
638
639def _normalize(cmd_list):
640    """Method that normalize commands. Note that '--atest-log-file-path' is not
641    considered a critical argument, therefore, it will be removed during
642    the comparison. Also, atest can be ran in any place, so verifying relative
643    path, LD_LIBRARY_PATH, and --proto-output-file is regardless as well.
644
645    Args:
646        cmd_list: A list with one element. E.g. ['cmd arg1 arg2 True']
647
648    Returns:
649        A list with elements. E.g. ['cmd', 'arg1', 'arg2', 'True']
650    """
651    _cmd = ' '.join(cmd_list).split()
652    for cmd in _cmd:
653        if cmd.startswith('--atest-log-file-path'):
654            _cmd.remove(cmd)
655            continue
656        if cmd.startswith('LD_LIBRARY_PATH='):
657            _cmd.remove(cmd)
658            continue
659        if cmd.startswith('--proto-output-file='):
660            _cmd.remove(cmd)
661            continue
662        if cmd.startswith('--log-root-path'):
663            _cmd.remove(cmd)
664            continue
665        if _BUILD_CMD in cmd:
666            _cmd.remove(cmd)
667            _cmd.append(os.path.join('./', _BUILD_CMD))
668            continue
669    return _cmd
670
671def _are_identical_cmds(current_cmds, former_cmds):
672    """Tell two commands are identical.
673
674    Args:
675        current_cmds: A list of strings for running input tests.
676        former_cmds: A list of strings recorded from the previous run.
677
678    Returns:
679        True if both commands are identical, False otherwise.
680    """
681    # Always sort cmd list to make it comparable.
682    current_cmds.sort()
683    former_cmds.sort()
684    return current_cmds == former_cmds
685
686def _get_hashed_file_name(main_file_name):
687    """Convert the input string to a md5-hashed string. If file_extension is
688       given, returns $(hashed_string).$(file_extension), otherwise
689       $(hashed_string).cache.
690
691    Args:
692        main_file_name: The input string need to be hashed.
693
694    Returns:
695        A string as hashed file name with .cache file extension.
696    """
697    hashed_fn = hashlib.md5(str(main_file_name).encode())
698    hashed_name = hashed_fn.hexdigest()
699    return hashed_name + '.cache'
700
701def md5sum(filename):
702    """Generate MD5 checksum of a file.
703
704    Args:
705        name: A string of a filename.
706
707    Returns:
708        A string of hashed MD5 checksum.
709    """
710    filename = Path(filename)
711    if not filename.is_file():
712        return ""
713    with open(filename, 'rb') as target:
714        content = target.read()
715    if not isinstance(content, bytes):
716        content = content.encode('utf-8')
717    return hashlib.md5(content).hexdigest()
718
719def check_md5(check_file, missing_ok=False):
720    """Method equivalent to 'md5sum --check /file/to/check'.
721
722    Args:
723        check_file: A string of filename that stores filename and its
724                   md5 checksum.
725        missing_ok: A boolean that considers OK even when the check_file does
726                    not exist. Using missing_ok=True allows ignoring md5 check
727                    especially for initial run that the check_file has not yet
728                    generated. Using missing_ok=False ensures the consistency of
729                    files, and guarantees the process is successfully completed.
730
731    Returns:
732        When missing_ok is True (soft check):
733          - True if the checksum is consistent with the actual MD5, even the
734            check_file is missing or not a valid JSON.
735          - False when the checksum is inconsistent with the actual MD5.
736        When missing_ok is False (ensure the process completed properly):
737          - True if the checksum is consistent with the actual MD5.
738          - False otherwise.
739    """
740    if not os.path.isfile(check_file):
741        if not missing_ok:
742            logging.debug(
743                'Unable to verify: %s not found.', check_file)
744        return missing_ok
745    if not is_valid_json_file(check_file):
746        logging.debug(
747            'Unable to verify: %s invalid JSON format.', check_file)
748        return missing_ok
749    with open(check_file, 'r+') as _file:
750        content = json.load(_file)
751        for filename, md5 in content.items():
752            if md5sum(filename) != md5:
753                logging.debug('%s has altered.', filename)
754                return False
755    return True
756
757def save_md5(filenames, save_file):
758    """Method equivalent to 'md5sum file1 file2 > /file/to/check'
759
760    Args:
761        filenames: A list of filenames.
762        save_file: Filename for storing files and their md5 checksums.
763    """
764    data = {}
765    for f in filenames:
766        name = Path(f)
767        if not name.is_file():
768            logging.warning(' ignore %s: not a file.', name)
769        data.update({str(name): md5sum(name)})
770    with open(save_file, 'w+') as _file:
771        json.dump(data, _file)
772
773def get_cache_root():
774    """Get the root path dir for cache.
775
776    Use branch and target information as cache_root.
777    The path will look like ~/.atest/info_cache/$hash(branch+target)
778
779    Returns:
780        A string of the path of the root dir of cache.
781    """
782    manifest_branch = get_manifest_branch()
783    if not manifest_branch:
784        manifest_branch = os.environ.get(
785            constants.ANDROID_BUILD_TOP, constants.ANDROID_BUILD_TOP)
786    # target
787    build_target = os.path.basename(
788        os.environ.get(constants.ANDROID_PRODUCT_OUT,
789                       constants.ANDROID_PRODUCT_OUT))
790    branch_target_hash = hashlib.md5(
791        (constants.MODE + manifest_branch + build_target).encode()).hexdigest()
792    return os.path.join(get_misc_dir(), '.atest', 'info_cache',
793                        branch_target_hash[:8])
794
795def get_test_info_cache_path(test_reference, cache_root=None):
796    """Get the cache path of the desired test_infos.
797
798    Args:
799        test_reference: A string of the test.
800        cache_root: Folder path where stores caches.
801
802    Returns:
803        A string of the path of test_info cache.
804    """
805    if not cache_root:
806        cache_root = get_cache_root()
807    return os.path.join(cache_root, _get_hashed_file_name(test_reference))
808
809def update_test_info_cache(test_reference, test_infos,
810                           cache_root=None):
811    """Update cache content which stores a set of test_info objects through
812       pickle module, each test_reference will be saved as a cache file.
813
814    Args:
815        test_reference: A string referencing a test.
816        test_infos: A set of TestInfos.
817        cache_root: Folder path for saving caches.
818    """
819    if not cache_root:
820        cache_root = get_cache_root()
821    if not os.path.isdir(cache_root):
822        os.makedirs(cache_root)
823    cache_path = get_test_info_cache_path(test_reference, cache_root)
824    # Save test_info to files.
825    try:
826        with open(cache_path, 'wb') as test_info_cache_file:
827            logging.debug('Saving cache %s.', cache_path)
828            pickle.dump(test_infos, test_info_cache_file, protocol=2)
829    except (pickle.PicklingError, TypeError, IOError) as err:
830        # Won't break anything, just log this error, and collect the exception
831        # by metrics.
832        logging.debug('Exception raised: %s', err)
833        metrics_utils.handle_exc_and_send_exit_event(
834            constants.ACCESS_CACHE_FAILURE)
835
836
837def load_test_info_cache(test_reference, cache_root=None):
838    """Load cache by test_reference to a set of test_infos object.
839
840    Args:
841        test_reference: A string referencing a test.
842        cache_root: Folder path for finding caches.
843
844    Returns:
845        A list of TestInfo namedtuple if cache found, else None.
846    """
847    if not cache_root:
848        cache_root = get_cache_root()
849    cache_file = get_test_info_cache_path(test_reference, cache_root)
850    if os.path.isfile(cache_file):
851        logging.debug('Loading cache %s.', cache_file)
852        try:
853            with open(cache_file, 'rb') as config_dictionary_file:
854                return pickle.load(config_dictionary_file, encoding='utf-8')
855        except (pickle.UnpicklingError,
856                ValueError,
857                TypeError,
858                EOFError,
859                IOError) as err:
860            # Won't break anything, just remove the old cache, log this error,
861            # and collect the exception by metrics.
862            logging.debug('Exception raised: %s', err)
863            os.remove(cache_file)
864            metrics_utils.handle_exc_and_send_exit_event(
865                constants.ACCESS_CACHE_FAILURE)
866    return None
867
868def clean_test_info_caches(tests, cache_root=None):
869    """Clean caches of input tests.
870
871    Args:
872        tests: A list of test references.
873        cache_root: Folder path for finding caches.
874    """
875    if not cache_root:
876        cache_root = get_cache_root()
877    for test in tests:
878        cache_file = get_test_info_cache_path(test, cache_root)
879        if os.path.isfile(cache_file):
880            logging.debug('Removing cache: %s', cache_file)
881            try:
882                os.remove(cache_file)
883            except IOError as err:
884                logging.debug('Exception raised: %s', err)
885                metrics_utils.handle_exc_and_send_exit_event(
886                    constants.ACCESS_CACHE_FAILURE)
887
888def get_modified_files(root_dir):
889    """Get the git modified files. The git path here is git top level of
890    the root_dir. It's inevitable to utilise different commands to fulfill
891    2 scenario:
892        1. locate unstaged/staged files
893        2. locate committed files but not yet merged.
894    the 'git_status_cmd' fulfils the former while the 'find_modified_files'
895    fulfils the latter.
896
897    Args:
898        root_dir: the root where it starts finding.
899
900    Returns:
901        A set of modified files altered since last commit.
902    """
903    modified_files = set()
904    try:
905        find_git_cmd = 'cd {}; git rev-parse --show-toplevel'.format(root_dir)
906        git_paths = subprocess.check_output(
907            find_git_cmd, shell=True).decode().splitlines()
908        for git_path in git_paths:
909            # Find modified files from git working tree status.
910            git_status_cmd = ("repo forall {} -c git status --short | "
911                              "awk '{{print $NF}}'").format(git_path)
912            modified_wo_commit = subprocess.check_output(
913                git_status_cmd, shell=True).decode().rstrip().splitlines()
914            for change in modified_wo_commit:
915                modified_files.add(
916                    os.path.normpath('{}/{}'.format(git_path, change)))
917            # Find modified files that are committed but not yet merged.
918            find_modified_files = _FIND_MODIFIED_FILES_CMDS.format(git_path)
919            commit_modified_files = subprocess.check_output(
920                find_modified_files, shell=True).decode().splitlines()
921            for line in commit_modified_files:
922                modified_files.add(os.path.normpath('{}/{}'.format(
923                    git_path, line)))
924    except (OSError, subprocess.CalledProcessError) as err:
925        logging.debug('Exception raised: %s', err)
926    return modified_files
927
928def delimiter(char, length=_DEFAULT_TERMINAL_WIDTH, prenl=0, postnl=0):
929    """A handy delimiter printer.
930
931    Args:
932        char: A string used for delimiter.
933        length: An integer for the replication.
934        prenl: An integer that insert '\n' before delimiter.
935        postnl: An integer that insert '\n' after delimiter.
936
937    Returns:
938        A string of delimiter.
939    """
940    return prenl * '\n' + char * length + postnl * '\n'
941
942def find_files(path, file_name=constants.TEST_MAPPING):
943    """Find all files with given name under the given path.
944
945    Args:
946        path: A string of path in source.
947        file_name: The file name pattern for finding matched files.
948
949    Returns:
950        A list of paths of the files with the matching name under the given
951        path.
952    """
953    match_files = []
954    for root, _, filenames in os.walk(path):
955        for filename in fnmatch.filter(filenames, file_name):
956            match_files.append(os.path.join(root, filename))
957    return match_files
958
959def extract_zip_text(zip_path):
960    """Extract the text files content for input zip file.
961
962    Args:
963        zip_path: The file path of zip.
964
965    Returns:
966        The string in input zip file.
967    """
968    content = ''
969    try:
970        with zipfile.ZipFile(zip_path) as zip_file:
971            for filename in zip_file.namelist():
972                if os.path.isdir(filename):
973                    continue
974                # Force change line if multiple text files in zip
975                content = content + '\n'
976                # read the file
977                with zip_file.open(filename) as extract_file:
978                    for line in extract_file:
979                        if matched_tf_error_log(line.decode()):
980                            content = content + line.decode()
981    except zipfile.BadZipfile as err:
982        logging.debug('Exception raised: %s', err)
983    return content
984
985def matched_tf_error_log(content):
986    """Check if the input content matched tradefed log pattern.
987    The format will look like this.
988    05-25 17:37:04 W/XXXXXX
989    05-25 17:37:04 E/XXXXXX
990
991    Args:
992        content: Log string.
993
994    Returns:
995        True if the content matches the regular expression for tradefed error or
996        warning log.
997    """
998    reg = ('^((0[1-9])|(1[0-2]))-((0[1-9])|([12][0-9])|(3[0-1])) '
999           '(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9]) (E|W/)')
1000    if re.search(reg, content):
1001        return True
1002    return False
1003
1004def has_valid_cert():
1005    """Check whether the certificate is valid.
1006
1007    Returns: True if the cert is valid.
1008    """
1009    if not constants.CERT_STATUS_CMD:
1010        return False
1011    try:
1012        return (not subprocess.check_call(constants.CERT_STATUS_CMD,
1013                                          stdout=subprocess.DEVNULL,
1014                                          stderr=subprocess.DEVNULL))
1015    except subprocess.CalledProcessError:
1016        return False
1017
1018# pylint: disable=too-many-locals
1019def get_flakes(branch='',
1020               target='',
1021               test_name='',
1022               test_module='',
1023               test_method=''):
1024    """Get flake information.
1025
1026    Args:
1027        branch: A string of branch name.
1028        target: A string of target.
1029        test_name: A string of test suite name.
1030        test_module: A string of test module.
1031        test_method: A string of test method.
1032
1033    Returns:
1034        A dictionary of flake info. None if no flakes service exists.
1035    """
1036    if not branch:
1037        branch = constants.FLAKE_BRANCH
1038    if not target:
1039        target = constants.FLAKE_TARGET
1040    if not test_name:
1041        test_name = constants.FLAKE_TEST_NAME
1042    # Currently lock the flake information from test-mapping test
1043    # which only runs on cuttlefish(x86) devices.
1044    # TODO: extend supporting other devices
1045    if test_module:
1046        test_module = 'x86 {}'.format(test_module)
1047    flake_service = os.path.join(constants.FLAKE_SERVICE_PATH,
1048                                 constants.FLAKE_FILE)
1049    if not os.path.exists(flake_service):
1050        logging.debug('Get flakes: Flake service path not exist.')
1051        # Send (3, 0) to present no flakes info because service does not exist.
1052        metrics.LocalDetectEvent(
1053            detect_type=DetectType.NO_FLAKE, result=0)
1054        return None
1055    if not has_valid_cert():
1056        logging.debug('Get flakes: No valid cert.')
1057        # Send (3, 1) to present no flakes info because no valid cert.
1058        metrics.LocalDetectEvent(
1059            detect_type=DetectType.NO_FLAKE, result=1)
1060        return None
1061    flake_info = {}
1062    start = time.time()
1063    try:
1064        shutil.copy2(flake_service, constants.FLAKE_TMP_PATH)
1065        tmp_service = os.path.join(constants.FLAKE_TMP_PATH,
1066                                   constants.FLAKE_FILE)
1067        os.chmod(tmp_service, 0o0755)
1068        cmd = [tmp_service, branch, target, test_name, test_module, test_method]
1069        logging.debug('Executing: %s', ' '.join(cmd))
1070        output = subprocess.check_output(cmd).decode()
1071        percent_template = "{}:".format(constants.FLAKE_PERCENT)
1072        postsubmit_template = "{}:".format(constants.FLAKE_POSTSUBMIT)
1073        for line in output.splitlines():
1074            if line.startswith(percent_template):
1075                flake_info[constants.FLAKE_PERCENT] = line.replace(
1076                    percent_template, '')
1077            if line.startswith(postsubmit_template):
1078                flake_info[constants.FLAKE_POSTSUBMIT] = line.replace(
1079                    postsubmit_template, '')
1080    # pylint: disable=broad-except
1081    except Exception as e:
1082        logging.debug('Exception:%s', e)
1083        return None
1084    # Send (4, time) to present having flakes info and it spent time.
1085    duration = round(time.time()-start)
1086    logging.debug('Took %ss to get flakes info', duration)
1087    metrics.LocalDetectEvent(
1088        detect_type=DetectType.HAS_FLAKE,
1089        result=duration)
1090    return flake_info
1091
1092def read_test_record(path):
1093    """A Helper to read test record proto.
1094
1095    Args:
1096        path: The proto file path.
1097
1098    Returns:
1099        The test_record proto instance.
1100    """
1101    with open(path, 'rb') as proto_file:
1102        msg = test_record_pb2.TestRecord()
1103        msg.ParseFromString(proto_file.read())
1104    return msg
1105
1106def has_python_module(module_name):
1107    """Detect if the module can be loaded without importing it in real.
1108
1109    Args:
1110        cmd: A string of the tested module name.
1111
1112    Returns:
1113        True if found, False otherwise.
1114    """
1115    return bool(importlib.util.find_spec(module_name))
1116
1117def is_valid_json_file(path):
1118    """Detect if input path exist and content is valid.
1119
1120    Args:
1121        path: The json file path.
1122
1123    Returns:
1124        True if file exist and content is valid, False otherwise.
1125    """
1126    if isinstance(path, bytes):
1127        path = path.decode('utf-8')
1128    try:
1129        if os.path.isfile(path):
1130            with open(path) as json_file:
1131                json.load(json_file)
1132            return True
1133        logging.debug('%s: File not found.', path)
1134    except json.JSONDecodeError:
1135        logging.debug('Exception happened while loading %s.', path)
1136    return False
1137
1138def get_manifest_branch():
1139    """Get the manifest branch via repo info command.
1140
1141    Returns:
1142        None if no system environment parameter ANDROID_BUILD_TOP or
1143        running 'repo info' command error, otherwise the manifest branch
1144    """
1145    build_top = os.getenv(constants.ANDROID_BUILD_TOP, None)
1146    if not build_top:
1147        return None
1148    splitter = ':'
1149    env_vars = os.environ.copy()
1150    orig_pythonpath = env_vars['PYTHONPATH'].split(splitter)
1151    # Command repo imports stdlib "http.client", so adding non-default lib
1152    # e.g. googleapiclient, may cause repo command execution error.
1153    # The temporary dir is not presumably always /tmp, especially in MacOS.
1154    # b/169936306, b/190647636 are the cases we should never ignore.
1155    soong_path_re = re.compile(r'.*/Soong.python_.*/')
1156    default_python_path = [p for p in orig_pythonpath
1157                            if not soong_path_re.match(p)]
1158    env_vars['PYTHONPATH'] = splitter.join(default_python_path)
1159    proc = subprocess.Popen(f'repo info '
1160                            f'-o {constants.ASUITE_REPO_PROJECT_NAME}',
1161                            shell=True,
1162                            env=env_vars,
1163                            cwd=build_top,
1164                            universal_newlines=True,
1165                            stdout=subprocess.PIPE,
1166                            stderr=subprocess.PIPE)
1167    try:
1168        cmd_out, err_out = proc.communicate()
1169        branch_re = re.compile(r'Manifest branch:\s*(?P<branch>.*)')
1170        match = branch_re.match(cmd_out)
1171        if match:
1172            return match.group('branch')
1173        logging.warning('Unable to detect branch name through:\n %s, %s',
1174                        cmd_out, err_out)
1175    except subprocess.TimeoutExpired:
1176        logging.warning('Exception happened while getting branch')
1177        proc.kill()
1178    return None
1179
1180def get_build_target():
1181    """Get the build target form system environment TARGET_PRODUCT."""
1182    build_target = '%s-%s' % (
1183        os.getenv(constants.ANDROID_TARGET_PRODUCT, None),
1184        os.getenv(constants.TARGET_BUILD_VARIANT, None))
1185    return build_target
1186
1187def parse_mainline_modules(test):
1188    """Parse test reference into test and mainline modules.
1189
1190    Args:
1191        test: An String of test reference.
1192
1193    Returns:
1194        A string of test without mainline modules,
1195        A string of mainline modules.
1196    """
1197    result = constants.TEST_WITH_MAINLINE_MODULES_RE.match(test)
1198    if not result:
1199        return test, ""
1200    test_wo_mainline_modules = result.group('test')
1201    mainline_modules = result.group('mainline_modules')
1202    return test_wo_mainline_modules, mainline_modules
1203
1204def has_wildcard(test_name):
1205    """ Tell whether the test_name(either a list or string) contains wildcard
1206    symbols.
1207
1208    Args:
1209        test_name: A list or a str.
1210
1211    Return:
1212        True if test_name contains wildcard, False otherwise.
1213    """
1214    if isinstance(test_name, str):
1215        return any(char in test_name for char in _WILDCARD_CHARS)
1216    if isinstance(test_name, list):
1217        for name in test_name:
1218            if has_wildcard(name):
1219                return True
1220    return False
1221
1222def is_build_file(path):
1223    """ If input file is one of an android build file.
1224
1225    Args:
1226        path: A string of file path.
1227
1228    Return:
1229        True if path is android build file, False otherwise.
1230    """
1231    return bool(os.path.splitext(path)[-1] in _ANDROID_BUILD_EXT)
1232
1233def quote(input_str):
1234    """ If the input string -- especially in custom args -- contains shell-aware
1235    characters, insert a pair of "\" to the input string.
1236
1237    e.g. unit(test|testing|testing) -> 'unit(test|testing|testing)'
1238
1239    Args:
1240        input_str: A string from user input.
1241
1242    Returns: A string with single quotes if regex chars were detected.
1243    """
1244    if has_chars(input_str, _REGEX_CHARS):
1245        return "\'" + input_str + "\'"
1246    return input_str
1247
1248def has_chars(input_str, chars):
1249    """ Check if the input string contains one of the designated characters.
1250
1251    Args:
1252        input_str: A string from user input.
1253        chars: An iterable object.
1254
1255    Returns:
1256        True if the input string contains one of the special chars.
1257    """
1258    for char in chars:
1259        if char in input_str:
1260            return True
1261    return False
1262
1263def prompt_with_yn_result(msg, default=True):
1264    """Prompt message and get yes or no result.
1265
1266    Args:
1267        msg: The question you want asking.
1268        default: boolean to True/Yes or False/No
1269    Returns:
1270        default value if get KeyboardInterrupt or ValueError exception.
1271    """
1272    suffix = '[Y/n]: ' if default else '[y/N]: '
1273    try:
1274        return strtobool(input(msg+suffix))
1275    except (ValueError, KeyboardInterrupt):
1276        return default
1277
1278def strtobool(val):
1279    """Convert a string representation of truth to True or False.
1280
1281    Args:
1282        val: a string of input value.
1283
1284    Returns:
1285        True when values are 'y', 'yes', 't', 'true', 'on', and '1';
1286        False when 'n', 'no', 'f', 'false', 'off', and '0'.
1287        Raises ValueError if 'val' is anything else.
1288    """
1289    if val.lower() in ('y', 'yes', 't', 'true', 'on', '1'):
1290        return True
1291    if val.lower() in ('n', 'no', 'f', 'false', 'off', '0'):
1292        return False
1293    raise ValueError("invalid truth value %r" % (val,))
1294
1295def get_android_junit_config_filters(test_config):
1296    """Get the dictionary of a input config for junit config's filters
1297
1298    Args:
1299        test_config: The path of the test config.
1300    Returns:
1301        A dictionary include all the filters in the input config.
1302    """
1303    filter_dict = {}
1304    xml_root = ET.parse(test_config).getroot()
1305    option_tags = xml_root.findall('.//option')
1306    for tag in option_tags:
1307        name = tag.attrib['name'].strip()
1308        if name in constants.SUPPORTED_FILTERS:
1309            filter_values = filter_dict.get(name, [])
1310            value = tag.attrib['value'].strip()
1311            filter_values.append(value)
1312            filter_dict.update({name: filter_values})
1313    return filter_dict
1314
1315def get_config_parameter(test_config):
1316    """Get all the parameter values for the input config
1317
1318    Args:
1319        test_config: The path of the test config.
1320    Returns:
1321        A set include all the parameters of the input config.
1322    """
1323    parameters = set()
1324    xml_root = ET.parse(test_config).getroot()
1325    option_tags = xml_root.findall('.//option')
1326    for tag in option_tags:
1327        name = tag.attrib['name'].strip()
1328        if name == constants.CONFIG_DESCRIPTOR:
1329            key = tag.attrib['key'].strip()
1330            if key == constants.PARAMETER_KEY:
1331                value = tag.attrib['value'].strip()
1332                parameters.add(value)
1333    return parameters
1334
1335def get_config_device(test_config):
1336    """Get all the device names from the input config
1337
1338    Args:
1339        test_config: The path of the test config.
1340    Returns:
1341        A set include all the device name of the input config.
1342    """
1343    devices = set()
1344    xml_root = ET.parse(test_config).getroot()
1345    device_tags = xml_root.findall('.//device')
1346    for tag in device_tags:
1347        name = tag.attrib['name'].strip()
1348        devices.add(name)
1349    return devices
1350
1351def get_mainline_param(test_config):
1352    """Get all the mainline-param values for the input config
1353
1354    Args:
1355        test_config: The path of the test config.
1356    Returns:
1357        A set include all the parameters of the input config.
1358    """
1359    mainline_param = set()
1360    xml_root = ET.parse(test_config).getroot()
1361    option_tags = xml_root.findall('.//option')
1362    for tag in option_tags:
1363        name = tag.attrib['name'].strip()
1364        if name == constants.CONFIG_DESCRIPTOR:
1365            key = tag.attrib['key'].strip()
1366            if key == constants.MAINLINE_PARAM_KEY:
1367                value = tag.attrib['value'].strip()
1368                mainline_param.add(value)
1369    return mainline_param
1370
1371def get_adb_devices():
1372    """Run `adb devices` and return a list of devices.
1373
1374    Returns:
1375        A list of devices. e.g.
1376        ['127.0.0.1:40623', '127.0.0.1:40625']
1377    """
1378    probe_cmd = "adb devices | egrep -v \"^List|^$\"||true"
1379    suts = subprocess.check_output(probe_cmd, shell=True).decode().splitlines()
1380    return [sut.split('\t')[0] for sut in suts]
1381
1382def get_android_config():
1383    """Get Android config as "printconfig" shows.
1384
1385    Returns:
1386        A dict of Android configurations.
1387    """
1388    dump_cmd = get_build_cmd(dump=True)
1389    raw_config = subprocess.check_output(dump_cmd).decode('utf-8')
1390    android_config = {}
1391    for element in raw_config.splitlines():
1392        if not element.startswith('='):
1393            key, value = tuple(element.split('=', 1))
1394            android_config.setdefault(key, value)
1395    return android_config
1396
1397def get_config_gtest_args(test_config):
1398    """Get gtest's module-name and device-path option from the input config
1399
1400    Args:
1401        test_config: The path of the test config.
1402    Returns:
1403        A string of gtest's module name.
1404        A string of gtest's device path.
1405    """
1406    module_name = ''
1407    device_path = ''
1408    xml_root = ET.parse(test_config).getroot()
1409    option_tags = xml_root.findall('.//option')
1410    for tag in option_tags:
1411        name = tag.attrib['name'].strip()
1412        value = tag.attrib['value'].strip()
1413        if name == 'native-test-device-path':
1414            device_path = value
1415        elif name == 'module-name':
1416            module_name = value
1417    return module_name, device_path
1418
1419def get_arch_name(module_name, is_64=False):
1420    """Get the arch folder name for the input module.
1421
1422        Scan the test case folders to get the matched arch folder name.
1423
1424        Args:
1425            module_name: The module_name of test
1426            is_64: If need 64 bit arch name, False otherwise.
1427        Returns:
1428            A string of the arch name.
1429    """
1430    arch_32 = ['arm', 'x86']
1431    arch_64 = ['arm64', 'x86_64']
1432    arch_list = arch_32
1433    if is_64:
1434        arch_list = arch_64
1435    test_case_root = os.path.join(
1436        os.environ.get(constants.ANDROID_TARGET_OUT_TESTCASES, ''),
1437        module_name
1438    )
1439    if not os.path.isdir(test_case_root):
1440        logging.debug('%s does not exist.', test_case_root)
1441        return ''
1442    for f in os.listdir(test_case_root):
1443        if f in arch_list:
1444            return f
1445    return ''
1446
1447def copy_single_arch_native_symbols(
1448    symbol_root, module_name, device_path, is_64=False):
1449    """Copy symbol files for native tests which belong to input arch.
1450
1451        Args:
1452            module_name: The module_name of test
1453            device_path: The device path define in test config.
1454            is_64: True if need to copy 64bit symbols, False otherwise.
1455    """
1456    src_symbol = os.path.join(symbol_root, 'data', 'nativetest', module_name)
1457    if is_64:
1458        src_symbol = os.path.join(
1459            symbol_root, 'data', 'nativetest64', module_name)
1460    dst_symbol = os.path.join(
1461        symbol_root, device_path[1:], module_name,
1462        get_arch_name(module_name, is_64))
1463    if os.path.isdir(src_symbol):
1464        # TODO: Use shutil.copytree(src, dst, dirs_exist_ok=True) after
1465        #  python3.8
1466        if os.path.isdir(dst_symbol):
1467            shutil.rmtree(dst_symbol)
1468        shutil.copytree(src_symbol, dst_symbol)
1469
1470def copy_native_symbols(module_name, device_path):
1471    """Copy symbol files for native tests to match with tradefed file structure.
1472
1473    The original symbols will locate at
1474    $(PRODUCT_OUT)/symbols/data/nativetest(64)/$(module)/$(stem).
1475    From TF, the test binary will locate at
1476    /data/local/tmp/$(module)/$(arch)/$(stem).
1477    In order to make trace work need to copy the original symbol to
1478    $(PRODUCT_OUT)/symbols/data/local/tmp/$(module)/$(arch)/$(stem)
1479
1480    Args:
1481        module_name: The module_name of test
1482        device_path: The device path define in test config.
1483    """
1484    symbol_root = os.path.join(
1485        os.environ.get(constants.ANDROID_PRODUCT_OUT, ''),
1486        'symbols')
1487    if not os.path.isdir(symbol_root):
1488        logging.debug('Symbol dir:%s not exist, skip copy symbols.',
1489                      symbol_root)
1490        return
1491    # Copy 32 bit symbols
1492    if get_arch_name(module_name, is_64=False):
1493        copy_single_arch_native_symbols(
1494            symbol_root, module_name, device_path, is_64=False)
1495    # Copy 64 bit symbols
1496    if get_arch_name(module_name, is_64=True):
1497        copy_single_arch_native_symbols(
1498            symbol_root, module_name, device_path, is_64=True)
1499
1500def get_config_preparer_options(test_config, class_name):
1501    """Get all the parameter values for the input config
1502
1503    Args:
1504        test_config: The path of the test config.
1505        class_name: A string of target_preparer
1506    Returns:
1507        A set include all the parameters of the input config.
1508    """
1509    options = {}
1510    xml_root = ET.parse(test_config).getroot()
1511    option_tags = xml_root.findall(
1512        './/target_preparer[@class="%s"]/option' % class_name)
1513    for tag in option_tags:
1514        name = tag.attrib['name'].strip()
1515        value = tag.attrib['value'].strip()
1516        options[name] = value
1517    return options
1518
1519def is_adb_root(args):
1520    """Check whether device has root permission.
1521
1522    Args:
1523        args: An argspace.Namespace class instance holding parsed args.
1524    Returns:
1525        True if adb has root permission.
1526    """
1527    try:
1528        serial = os.environ.get(constants.ANDROID_SERIAL, '')
1529        if not serial:
1530            serial = args.serial
1531        serial_options = ('-s ' + serial) if serial else ''
1532        output = subprocess.check_output("adb %s shell id" % serial_options,
1533                                         shell=True,
1534                                         stderr=subprocess.STDOUT).decode()
1535        return "uid=0(root)" in output
1536    except subprocess.CalledProcessError as err:
1537        logging.debug('Exception raised(): %s, Output: %s', err, err.output)
1538        raise err
1539
1540def perm_metrics(config_path, adb_root):
1541    """Compare adb root permission with RootTargetPreparer in config.
1542
1543    Args:
1544        config_path: A string of AndroidTest.xml file path.
1545        adb_root: A boolean of whether device is root or not.
1546    """
1547    # RootTargetPreparer's force-root set in config
1548    options = get_config_preparer_options(config_path, _ROOT_PREPARER)
1549    if not options:
1550        return
1551    logging.debug('preparer_options: %s', options)
1552    preparer_force_root = True
1553    if options.get('force-root', '').upper() == "FALSE":
1554        preparer_force_root = False
1555    logging.debug(' preparer_force_root: %s', preparer_force_root)
1556    if preparer_force_root and not adb_root:
1557        logging.debug('DETECT_TYPE_PERMISSION_INCONSISTENT:0')
1558        metrics.LocalDetectEvent(
1559            detect_type=DetectType.PERMISSION_INCONSISTENT,
1560            result=0)
1561    elif not preparer_force_root and adb_root:
1562        logging.debug('DETECT_TYPE_PERMISSION_INCONSISTENT:1')
1563        metrics.LocalDetectEvent(
1564            detect_type=DetectType.PERMISSION_INCONSISTENT,
1565            result=1)
1566
1567def get_verify_key(tests, extra_args):
1568    """Compose test command key.
1569
1570    Args:
1571        test_name: A list of input tests.
1572        extra_args: Dict of extra args to add to test run.
1573    Returns:
1574        A composed test commands.
1575    """
1576    # test_commands is a concatenated string of sorted test_ref+extra_args.
1577    # For example, "ITERATIONS=5 hello_world_test"
1578    test_commands = tests
1579    for key, value in extra_args.items():
1580        if key not in constants.SKIP_VARS:
1581            test_commands.append('%s=%s' % (key, str(value)))
1582    test_commands.sort()
1583    return ' '.join(test_commands)
1584
1585def handle_test_env_var(input_test, result_path=constants.VERIFY_ENV_PATH,
1586                        pre_verify=False):
1587    """Handle the environment variable of input tests.
1588
1589    Args:
1590        input_test: A string of input tests pass to atest.
1591        result_path: The file path for saving result.
1592        pre_verify: A booloan to separate into pre-verify and actually verify.
1593    Returns:
1594        0 is no variable needs to verify, 1 has some variables to next verify.
1595    """
1596    full_result_content = {}
1597    if os.path.isfile(result_path):
1598        with open(result_path) as json_file:
1599            full_result_content = json.load(json_file)
1600    demand_env_vars = []
1601    demand_env_vars = full_result_content.get(input_test)
1602    if demand_env_vars is None:
1603        raise atest_error.DryRunVerificationError(
1604            '{}: No verify key.'.format(input_test))
1605    # No mapping variables.
1606    if demand_env_vars == []:
1607        return 0
1608    if pre_verify:
1609        return 1
1610    verify_error = []
1611    for env in demand_env_vars:
1612        if '=' in env:
1613            key, value = env.split('=', 1)
1614            env_value = os.environ.get(key, None)
1615            if env_value is None or env_value != value:
1616                verify_error.append('Environ verification failed, ({0},{1})!='
1617                    '({0},{2})'.format(key, value, env_value))
1618        else:
1619            if not os.environ.get(env, None):
1620                verify_error.append('Missing environ:{}'.format(env))
1621    if verify_error:
1622        raise atest_error.DryRunVerificationError('\n'.join(verify_error))
1623    return 1
1624
1625def generate_buildfiles_checksum():
1626    """ Method that generate md5 checksum of Android.{bp,mk} files.
1627
1628    The checksum of build files are stores in
1629        $ANDROID_HOST_OUT/indexes/buildfiles.md5
1630    """
1631    if os.path.isfile(constants.LOCATE_CACHE):
1632        cmd = (f'locate -d{constants.LOCATE_CACHE} --existing '
1633               r'--regex "/Android.(bp|mk)$"')
1634        try:
1635            result = subprocess.check_output(cmd, shell=True).decode('utf-8')
1636            save_md5(result.split(), constants.BUILDFILES_MD5)
1637        except subprocess.CalledProcessError:
1638            logging.error('Failed to generate %s',
1639                          constants.BUILDFILES_MD5)
1640
1641def run_multi_proc(func, *args, **kwargs):
1642    """Start a process with multiprocessing and return Process object.
1643
1644    Args:
1645        func: A string of function name which will be the target name.
1646        args/kwargs: check doc page:
1647        https://docs.python.org/3.8/library/multiprocessing.html#process-and-exceptions
1648
1649    Returns:
1650        multiprocessing.Process object.
1651    """
1652    proc = Process(target=func, *args, **kwargs)
1653    proc.start()
1654    return proc
1655
1656def get_prebuilt_sdk_tools_dir():
1657    """Get the path for the prebuilt sdk tools root dir.
1658
1659    Returns: The absolute path of prebuilt sdk tools directory.
1660    """
1661    build_top = Path(os.environ.get(constants.ANDROID_BUILD_TOP, ''))
1662    return build_top.joinpath(
1663        'prebuilts/sdk/tools/', str(platform.system()).lower(), 'bin')
1664
1665
1666def is_writable(path):
1667    """Check if the given path is writable.
1668
1669    Returns: True if input path is writable, False otherwise.
1670    """
1671    if not os.path.exists(path):
1672        return is_writable(os.path.dirname(path))
1673    return os.access(path, os.W_OK)
1674
1675
1676def get_misc_dir():
1677    """Get the path for the ATest data root dir.
1678
1679    Returns: The absolute path of the ATest data root dir.
1680    """
1681    home_dir = os.path.expanduser('~')
1682    if is_writable(home_dir):
1683        return home_dir
1684    return get_build_out_dir()
1685
1686def get_full_annotation_class_name(module_info, class_name):
1687    """ Get fully qualified class name from a class name.
1688
1689    If the given keyword(class_name) is "smalltest", this method can search
1690    among source codes and grep the accurate annotation class name:
1691
1692        android.test.suitebuilder.annotation.SmallTest
1693
1694    Args:
1695        module_info: A dict of module_info.
1696        class_name: A string of class name.
1697
1698    Returns:
1699        A string of fully qualified class name, empty string otherwise.
1700    """
1701    fullname_re = re.compile(
1702        r'import\s+(?P<fqcn>{})(|;)$'.format(class_name), re.I)
1703    keyword_re = re.compile(
1704        r'import\s+(?P<fqcn>.*\.{})(|;)$'.format(class_name), re.I)
1705    build_top = Path(os.environ.get(constants.ANDROID_BUILD_TOP, ''))
1706    for f in module_info.get('srcs'):
1707        full_path = build_top.joinpath(f)
1708        with open(full_path, 'r') as cache:
1709            for line in cache.readlines():
1710                # Accept full class name.
1711                match = fullname_re.match(line)
1712                if match:
1713                    return match.group('fqcn')
1714                # Search annotation class from keyword.
1715                match = keyword_re.match(line)
1716                if match:
1717                    return match.group('fqcn')
1718    return ""
1719
1720def has_mixed_type_filters(test_infos):
1721    """ There are different types in a test module.
1722
1723    Dict test_to_types is mapping module name and the set of types.
1724    For example,
1725    {
1726        'module_1': {'wildcard class_method'},
1727        'module_2': {'wildcard class_method', 'regular class_method'},
1728        'module_3': set()
1729        }
1730
1731    Args:
1732        test_infos: A set of TestInfos.
1733
1734    Returns:
1735        True if more than one filter type in a test module, False otherwise.
1736    """
1737    test_to_types = dict()
1738    for test_info in test_infos:
1739        filters = test_info.data.get(constants.TI_FILTER, [])
1740        filter_types = set()
1741        for flt in filters:
1742            filter_types |= get_filter_types(flt.to_set_of_tf_strings())
1743        filter_types |= test_to_types.get(test_info.test_name, set())
1744        test_to_types[test_info.test_name] = filter_types
1745    for _, types in test_to_types.items():
1746        if len(types) > 1:
1747            return True
1748    return False
1749
1750def get_filter_types(tf_filter_set):
1751    """ Get filter types.
1752
1753    Args:
1754        tf_filter_set: A set of tf filter strings.
1755
1756    Returns:
1757        A set of FilterType.
1758    """
1759    type_set = set()
1760    for tf_filter in tf_filter_set:
1761        if _WILDCARD_FILTER_RE.match(tf_filter):
1762            logging.debug('Filter and type: (%s, %s)',
1763                          tf_filter, FilterType.WILDCARD_FILTER.value)
1764            type_set.add(FilterType.WILDCARD_FILTER.value)
1765        if _REGULAR_FILTER_RE.match(tf_filter):
1766            logging.debug('Filter and type: (%s, %s)',
1767                         tf_filter, FilterType.REGULAR_FILTER.value)
1768            type_set.add(FilterType.REGULAR_FILTER.value)
1769    return type_set
1770