• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""
16Utility functions for atest.
17"""
18
19
20# pylint: disable=import-outside-toplevel
21# pylint: disable=too-many-lines
22
23from __future__ import print_function
24
25import enum
26import datetime
27import fnmatch
28import hashlib
29import html
30import importlib
31import itertools
32import json
33import logging
34import os
35import pickle
36import platform
37import re
38import shutil
39import subprocess
40import sys
41import time
42import urllib
43import zipfile
44
45from dataclasses import dataclass
46from multiprocessing import Process
47from pathlib import Path
48from typing import Any, Dict, List, Set
49
50import xml.etree.ElementTree as ET
51
52from atest.atest_enum import DetectType, ExitCode, FilterType
53
54#pylint: disable=wrong-import-position
55from atest import atest_decorator
56from atest import atest_error
57from atest import constants
58
59from atest.metrics import metrics
60from atest.metrics import metrics_utils
61from atest.tf_proto import test_record_pb2
62
63_BASH_RESET_CODE = '\033[0m\n'
64DIST_OUT_DIR = Path(os.environ.get(constants.ANDROID_BUILD_TOP, os.getcwd())
65                    + '/out/dist/')
66MAINLINE_MODULES_EXT_RE = re.compile(r'(.apex|.apks|.apk)$')
67
68# Arbitrary number to limit stdout for failed runs in _run_limited_output.
69# Reason for its use is that the make command itself has its own carriage
70# return output mechanism that when collected line by line causes the streaming
71# full_output list to be extremely large.
72_FAILED_OUTPUT_LINE_LIMIT = 100
73# Regular expression to match the start of a ninja compile:
74# ex: [ 99% 39710/39711]
75_BUILD_COMPILE_STATUS = re.compile(r'\[\s*(\d{1,3}%\s+)?\d+/\d+\]')
76_BUILD_FAILURE = 'FAILED: '
77CMD_RESULT_PATH = os.path.join(os.environ.get(constants.ANDROID_BUILD_TOP,
78                                              os.getcwd()),
79                               'tools/asuite/atest/test_data',
80                               'test_commands.json')
81BUILD_TOP_HASH = hashlib.md5(os.environ.get(constants.ANDROID_BUILD_TOP, '').
82                             encode()).hexdigest()
83_DEFAULT_TERMINAL_WIDTH = 80
84_DEFAULT_TERMINAL_HEIGHT = 25
85_BUILD_CMD = 'build/soong/soong_ui.bash'
86_FIND_MODIFIED_FILES_CMDS = (
87    "cd {};"
88    "local_branch=$(git rev-parse --abbrev-ref HEAD);"
89    "remote_branch=$(git branch -r | grep '\\->' | awk '{{print $1}}');"
90    # Get the number of commits from local branch to remote branch.
91    "ahead=$(git rev-list --left-right --count $local_branch...$remote_branch "
92    "| awk '{{print $1}}');"
93    # Get the list of modified files from HEAD to previous $ahead generation.
94    "git diff HEAD~$ahead --name-only")
95_ANDROID_BUILD_EXT = ('.bp', '.mk')
96
97# Set of special chars for various purposes.
98_REGEX_CHARS = {'[', '(', '{', '|', '\\', '*', '?', '+', '^'}
99_WILDCARD_CHARS = {'?', '*'}
100
101_ROOT_PREPARER = "com.android.tradefed.targetprep.RootTargetPreparer"
102
103_WILDCARD_FILTER_RE = re.compile(r'.*[?|*]$')
104_REGULAR_FILTER_RE = re.compile(r'.*\w$')
105
106SUGGESTIONS = {
107    # (b/198581508) Do not run "adb sync" for the users.
108    'CANNOT LINK EXECUTABLE': 'Please run "adb sync" or reflash the device(s).',
109    # (b/177626045) If Atest does not install target application properly.
110    'Runner reported an invalid method': 'Please reflash the device(s).'
111}
112
113_BUILD_ENV = {}
114
115
116@dataclass
117class BuildEnvProfiler:
118    """Represents the condition before and after trigging build."""
119    ninja_file: Path
120    ninja_file_mtime: float
121    variable_file: Path
122    variable_file_md5: str
123    clean_out: bool
124    build_files_integrity: bool
125
126
127@enum.unique
128class BuildOutputMode(enum.Enum):
129    "Represents the different ways to display build output."
130    STREAMED = 'streamed'
131    LOGGED = 'logged'
132
133    def __init__(self, arg_name: str):
134        self._description = arg_name
135
136    # pylint: disable=missing-function-docstring
137    def description(self):
138        return self._description
139
140
141def get_build_cmd(dump=False):
142    """Compose build command with no-absolute path and flag "--make-mode".
143
144    Args:
145        dump: boolean that determines the option of build/soong/soong_iu.bash.
146              True: used to dump build variables, equivalent to printconfig.
147                    e.g. build/soong/soong_iu.bash --dumpvar-mode <VAR_NAME>
148              False: (default) used to build targets in make mode.
149                    e.g. build/soong/soong_iu.bash --make-mode <MOD_NAME>
150
151    Returns:
152        A list of soong build command.
153    """
154    make_cmd = ('%s/%s' %
155                (os.path.relpath(os.environ.get(
156                    constants.ANDROID_BUILD_TOP, os.getcwd()), os.getcwd()),
157                 _BUILD_CMD))
158    if dump:
159        return [make_cmd, '--dumpvar-mode', 'report_config']
160    return [make_cmd, '--make-mode']
161
162def _capture_fail_section(full_log):
163    """Return the error message from the build output.
164
165    Args:
166        full_log: List of strings representing full output of build.
167
168    Returns:
169        capture_output: List of strings that are build errors.
170    """
171    am_capturing = False
172    capture_output = []
173    for line in full_log:
174        if am_capturing and _BUILD_COMPILE_STATUS.match(line):
175            break
176        if am_capturing or line.startswith(_BUILD_FAILURE):
177            capture_output.append(line)
178            am_capturing = True
179            continue
180    return capture_output
181
182
183def _capture_limited_output(full_log):
184    """Return the limited error message from capture_failed_section.
185
186    Args:
187        full_log: List of strings representing full output of build.
188
189    Returns:
190        output: List of strings that are build errors.
191    """
192    # Parse out the build error to output.
193    output = _capture_fail_section(full_log)
194    if not output:
195        output = full_log
196    if len(output) >= _FAILED_OUTPUT_LINE_LIMIT:
197        output = output[-_FAILED_OUTPUT_LINE_LIMIT:]
198    output = 'Output (may be trimmed):\n%s' % ''.join(output)
199    return output
200
201
202# TODO: b/187122993 refine subprocess with 'with-statement' in fixit week.
203def _run_limited_output(cmd, env_vars=None):
204    """Runs a given command and streams the output on a single line in stdout.
205
206    Args:
207        cmd: A list of strings representing the command to run.
208        env_vars: Optional arg. Dict of env vars to set during build.
209
210    Raises:
211        subprocess.CalledProcessError: When the command exits with a non-0
212            exitcode.
213    """
214    # Send stderr to stdout so we only have to deal with a single pipe.
215    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
216                            stderr=subprocess.STDOUT, env=env_vars)
217    sys.stdout.write('\n')
218    term_width, _ = get_terminal_size()
219    white_space = " " * int(term_width)
220    full_output = []
221    while proc.poll() is None:
222        line = proc.stdout.readline().decode('utf-8')
223        # Readline will often return empty strings.
224        if not line:
225            continue
226        full_output.append(line)
227        # Trim the line to the width of the terminal.
228        # Note: Does not handle terminal resizing, which is probably not worth
229        #       checking the width every loop.
230        if len(line) >= term_width:
231            line = line[:term_width - 1]
232        # Clear the last line we outputted.
233        sys.stdout.write('\r%s\r' % white_space)
234        sys.stdout.write('%s' % line.strip())
235        sys.stdout.flush()
236    # Reset stdout (on bash) to remove any custom formatting and newline.
237    sys.stdout.write(_BASH_RESET_CODE)
238    sys.stdout.flush()
239    # Wait for the Popen to finish completely before checking the returncode.
240    proc.wait()
241    if proc.returncode != 0:
242        # get error log from "OUT_DIR/error.log"
243        error_log_file = os.path.join(get_build_out_dir(), "error.log")
244        output = []
245        if os.path.isfile(error_log_file):
246            if os.stat(error_log_file).st_size > 0:
247                with open(error_log_file) as f:
248                    output = f.read()
249        if not output:
250            output = _capture_limited_output(full_output)
251        raise subprocess.CalledProcessError(proc.returncode, cmd, output)
252
253
254def get_build_out_dir() -> str:
255    """Get android build out directory.
256
257    The order of the rules are:
258    1. OUT_DIR
259    2. OUT_DIR_COMMON_BASE
260    3. ANDROID_BUILD_TOP/out
261
262    Returns:
263        String of the out directory.
264    """
265    build_top = os.environ.get(constants.ANDROID_BUILD_TOP, '/')
266    # Get the out folder if user specified $OUT_DIR
267    custom_out_dir = os.environ.get(constants.ANDROID_OUT_DIR)
268    custom_out_dir_common_base = os.environ.get(
269        constants.ANDROID_OUT_DIR_COMMON_BASE)
270    user_out_dir = None
271    # If OUT_DIR == /output, the output dir will always be /outdir
272    # regardless of branch names. (Not recommended.)
273    if custom_out_dir:
274        if os.path.isabs(custom_out_dir):
275            user_out_dir = custom_out_dir
276        else:
277            user_out_dir = os.path.join(build_top, custom_out_dir)
278    # https://source.android.com/setup/build/initializing#using-a-separate-output-directory
279    # If OUT_DIR_COMMON_BASE is /output and the source tree is /src/master1,
280    # the output dir will be /output/master1.
281    elif custom_out_dir_common_base:
282        build_top_basename = os.path.basename(build_top)
283        if os.path.isabs(custom_out_dir_common_base):
284            user_out_dir = os.path.join(custom_out_dir_common_base,
285                                        build_top_basename)
286        else:
287            user_out_dir = os.path.join(build_top, custom_out_dir_common_base,
288                                        build_top_basename)
289    if user_out_dir:
290        return user_out_dir
291    return os.path.join(build_top, "out")
292
293def update_build_env(env: Dict[str, str]):
294    """Method that updates build environment variables."""
295    # pylint: disable=global-statement
296    global _BUILD_ENV
297    _BUILD_ENV.update(env)
298
299
300def build(build_targets: Set[str]):
301    """Shell out and invoke run_build_cmd to make build_targets.
302
303    Args:
304        build_targets: A set of strings of build targets to make.
305
306    Returns:
307        Boolean of whether build command was successful, True if nothing to
308        build.
309    """
310    if not build_targets:
311        logging.debug('No build targets, skipping build.')
312        return True
313
314    # pylint: disable=global-statement
315    global _BUILD_ENV
316    full_env_vars = os.environ.copy()
317    update_build_env(full_env_vars)
318    print('\n%s\n%s' % (
319        colorize("Building Dependencies...", constants.CYAN),
320                 ', '.join(build_targets)))
321    logging.debug('Building Dependencies: %s', ' '.join(build_targets))
322    cmd = get_build_cmd() + list(build_targets)
323    return _run_build_cmd(cmd, _BUILD_ENV)
324
325
326def _run_build_cmd(cmd: List[str], env_vars: Dict[str, str]):
327    """The main process of building targets.
328
329    Args:
330        cmd: A list of soong command.
331        env_vars: Dict of environment variables used for build.
332    Returns:
333        Boolean of whether build command was successful, True if nothing to
334        build.
335    """
336    logging.debug('Executing command: %s', cmd)
337    build_profiler = _build_env_profiling()
338    try:
339        if env_vars.get('BUILD_OUTPUT_MODE') == BuildOutputMode.STREAMED.value:
340            print()
341            subprocess.check_call(cmd, stderr=subprocess.STDOUT, env=env_vars)
342        else:
343            # Note that piping stdout forces Soong to switch to 'dumb terminal
344            # mode' which only prints completed actions. This gives users the
345            # impression that actions are taking longer than they really are.
346            # See b/233044822 for more details.
347            log_path = Path(get_build_out_dir()).joinpath('verbose.log.gz')
348            print('\n(Build log may not reflect actual status in simple output'
349                  'mode; check {} for detail after build finishes.)'.format(
350                    colorize(f'{log_path}', constants.CYAN)
351                  ), end='')
352            _run_limited_output(cmd, env_vars=env_vars)
353        _send_build_condition_metrics(build_profiler, cmd)
354        logging.info('Build successful')
355        return True
356    except subprocess.CalledProcessError as err:
357        logging.error('Build failure when running: %s', ' '.join(cmd))
358        if err.output:
359            logging.error(err.output)
360        return False
361
362
363# pylint: disable=unused-argument
364def get_result_server_args(for_test_mapping=False):
365    """Return list of args for communication with result server.
366
367    Args:
368        for_test_mapping: True if the test run is for Test Mapping to include
369            additional reporting args. Default is False.
370    """
371    # Customize test mapping argument here if needed.
372    return constants.RESULT_SERVER_ARGS
373
374def sort_and_group(iterable, key):
375    """Sort and group helper function."""
376    return itertools.groupby(sorted(iterable, key=key), key=key)
377
378
379def is_test_mapping(args):
380    """Check if the atest command intends to run tests in test mapping.
381
382    When atest runs tests in test mapping, it must have at most one test
383    specified. If a test is specified, it must be started with  `:`,
384    which means the test value is a test group name in TEST_MAPPING file, e.g.,
385    `:postsubmit`.
386
387    If --host-unit-test-only or --smart-testing-local was applied, it doesn't
388    intend to be a test_mapping test.
389    If any test mapping options is specified, the atest command must also be
390    set to run tests in test mapping files.
391
392    Args:
393        args: arg parsed object.
394
395    Returns:
396        True if the args indicates atest shall run tests in test mapping. False
397        otherwise.
398    """
399    if any((args.host_unit_test_only, args.smart_testing_local)):
400        return False
401    if any((args.test_mapping, args.include_subdirs, not args.tests)):
402        return True
403    # ':postsubmit' implicitly indicates running in test-mapping mode.
404    return all((len(args.tests) == 1, args.tests[0][0] == ':'))
405
406
407@atest_decorator.static_var("cached_has_colors", {})
408def _has_colors(stream):
409    """Check the output stream is colorful.
410
411    Args:
412        stream: The standard file stream.
413
414    Returns:
415        True if the file stream can interpreter the ANSI color code.
416    """
417    cached_has_colors = _has_colors.cached_has_colors
418    if stream in cached_has_colors:
419        return cached_has_colors[stream]
420    cached_has_colors[stream] = True
421    # Following from Python cookbook, #475186
422    if not hasattr(stream, "isatty"):
423        cached_has_colors[stream] = False
424        return False
425    if not stream.isatty():
426        # Auto color only on TTYs
427        cached_has_colors[stream] = False
428        return False
429    # curses.tigetnum() cannot be used for telling supported color numbers
430    # because it does not come with the prebuilt py3-cmd.
431    return cached_has_colors[stream]
432
433
434def colorize(text, color, bp_color=None):
435    """ Convert to colorful string with ANSI escape code.
436
437    Args:
438        text: A string to print.
439        color: Forground(Text) color which is an ANSI code shift for colorful
440               print. They are defined in constants_default.py.
441        bp_color: Backgroud color which is an ANSI code shift for colorful
442                   print.
443
444    Returns:
445        Colorful string with ANSI escape code.
446    """
447    clr_pref = '\033[1;'
448    clr_suff = '\033[0m'
449    has_colors = _has_colors(sys.stdout)
450    if has_colors:
451        background_color = ''
452        if bp_color:
453            # Foreground(Text) ranges from 30-37
454            text_color = 30 + color
455            # Background ranges from 40-47
456            background_color = ';%d' % (40 + bp_color)
457        else:
458            text_color = 30 + color
459        clr_str = "%s%d%sm%s%s" % (clr_pref, text_color, background_color,
460                                    text, clr_suff)
461    else:
462        clr_str = text
463    return clr_str
464
465
466def colorful_print(text, color, bp_color=None, auto_wrap=True):
467    """Print out the text with color.
468
469    Args:
470        text: A string to print.
471        color: Forground(Text) color which is an ANSI code shift for colorful
472               print. They are defined in constants_default.py.
473        bp_color: Backgroud color which is an ANSI code shift for colorful
474                   print.
475        auto_wrap: If True, Text wraps while print.
476    """
477    output = colorize(text, color, bp_color)
478    if auto_wrap:
479        print(output)
480    else:
481        print(output, end="")
482
483
484def get_terminal_size():
485    """Get terminal size and return a tuple.
486
487    Returns:
488        2 integers: the size of X(columns) and Y(lines/rows).
489    """
490    # Determine the width of the terminal. We'll need to clear this many
491    # characters when carriage returning. Set default value as 80.
492    columns, rows = shutil.get_terminal_size(
493        fallback=(_DEFAULT_TERMINAL_WIDTH,
494                  _DEFAULT_TERMINAL_HEIGHT))
495    return columns, rows
496
497
498def handle_test_runner_cmd(input_test, test_cmds, do_verification=False,
499                           result_path=constants.VERIFY_DATA_PATH):
500    """Handle the runner command of input tests.
501
502    Args:
503        input_test: A string of input tests pass to atest.
504        test_cmds: A list of strings for running input tests.
505        do_verification: A boolean to indicate the action of this method.
506                         True: Do verification without updating result map and
507                               raise DryRunVerificationError if verifying fails.
508                         False: Update result map, if the former command is
509                                different with current command, it will confirm
510                                with user if they want to update or not.
511        result_path: The file path for saving result.
512    """
513    full_result_content = {}
514    if os.path.isfile(result_path):
515        with open(result_path) as json_file:
516            full_result_content = json.load(json_file)
517    former_test_cmds = full_result_content.get(input_test, [])
518    test_cmds = _normalize(test_cmds)
519    former_test_cmds = _normalize(former_test_cmds)
520    if not _are_identical_cmds(test_cmds, former_test_cmds):
521        if do_verification:
522            raise atest_error.DryRunVerificationError(
523                'Dry run verification failed, former commands: {}'.format(
524                    former_test_cmds))
525        if former_test_cmds:
526            # If former_test_cmds is different from test_cmds, ask users if they
527            # are willing to update the result.
528            print('Former cmds = %s' % former_test_cmds)
529            print('Current cmds = %s' % test_cmds)
530            if not prompt_with_yn_result('Do you want to update former result '
531                                         'to the latest one?', True):
532                print('SKIP updating result!!!')
533                return
534    else:
535        # If current commands are the same as the formers, no need to update
536        # result.
537        return
538    full_result_content[input_test] = test_cmds
539    with open(result_path, 'w') as outfile:
540        json.dump(full_result_content, outfile, indent=0)
541        print('Save result mapping to %s' % result_path)
542
543def _normalize(cmd_list):
544    """Method that normalize commands. Note that '--atest-log-file-path' is not
545    considered a critical argument, therefore, it will be removed during
546    the comparison. Also, atest can be ran in any place, so verifying relative
547    path, LD_LIBRARY_PATH, and --proto-output-file is regardless as well.
548
549    Args:
550        cmd_list: A list with one element. E.g. ['cmd arg1 arg2 True']
551
552    Returns:
553        A list with elements. E.g. ['cmd', 'arg1', 'arg2', 'True']
554    """
555    _cmd = ' '.join(cmd_list).split()
556    for cmd in _cmd:
557        if cmd.startswith('--skip-all-system-status-check'):
558            _cmd.remove(cmd)
559            continue
560        if cmd.startswith('--atest-log-file-path'):
561            _cmd.remove(cmd)
562            continue
563        if cmd.startswith('LD_LIBRARY_PATH='):
564            _cmd.remove(cmd)
565            continue
566        if cmd.startswith('--proto-output-file='):
567            _cmd.remove(cmd)
568            continue
569        if cmd.startswith('--log-root-path'):
570            _cmd.remove(cmd)
571            continue
572        if _BUILD_CMD in cmd:
573            _cmd.remove(cmd)
574            _cmd.append(os.path.join('./', _BUILD_CMD))
575            continue
576    return _cmd
577
578def _are_identical_cmds(current_cmds, former_cmds):
579    """Tell two commands are identical.
580
581    Args:
582        current_cmds: A list of strings for running input tests.
583        former_cmds: A list of strings recorded from the previous run.
584
585    Returns:
586        True if both commands are identical, False otherwise.
587    """
588    # Always sort cmd list to make it comparable.
589    current_cmds.sort()
590    former_cmds.sort()
591    return current_cmds == former_cmds
592
593def _get_hashed_file_name(main_file_name):
594    """Convert the input string to a md5-hashed string. If file_extension is
595       given, returns $(hashed_string).$(file_extension), otherwise
596       $(hashed_string).cache.
597
598    Args:
599        main_file_name: The input string need to be hashed.
600
601    Returns:
602        A string as hashed file name with .cache file extension.
603    """
604    hashed_fn = hashlib.md5(str(main_file_name).encode())
605    hashed_name = hashed_fn.hexdigest()
606    return hashed_name + '.cache'
607
608def md5sum(filename):
609    """Generate MD5 checksum of a file.
610
611    Args:
612        name: A string of a filename.
613
614    Returns:
615        A string of hashed MD5 checksum.
616    """
617    filename = Path(filename)
618    if not filename.is_file():
619        return ""
620    with open(filename, 'rb') as target:
621        content = target.read()
622    if not isinstance(content, bytes):
623        content = content.encode('utf-8')
624    return hashlib.md5(content).hexdigest()
625
626def check_md5(check_file, missing_ok=False):
627    """Method equivalent to 'md5sum --check /file/to/check'.
628
629    Args:
630        check_file: A string of filename that stores filename and its
631                   md5 checksum.
632        missing_ok: A boolean that considers OK even when the check_file does
633                    not exist. Using missing_ok=True allows ignoring md5 check
634                    especially for initial run that the check_file has not yet
635                    generated. Using missing_ok=False ensures the consistency of
636                    files, and guarantees the process is successfully completed.
637
638    Returns:
639        When missing_ok is True (soft check):
640          - True if the checksum is consistent with the actual MD5, even the
641            check_file is missing or not a valid JSON.
642          - False when the checksum is inconsistent with the actual MD5.
643        When missing_ok is False (ensure the process completed properly):
644          - True if the checksum is consistent with the actual MD5.
645          - False otherwise.
646    """
647    if not Path(check_file).is_file():
648        if not missing_ok:
649            logging.debug(
650                'Unable to verify: %s not found.', check_file)
651        return missing_ok
652    content = load_json_safely(check_file)
653    if content:
654        for filename, md5 in content.items():
655            if md5sum(filename) != md5:
656                logging.debug('%s has altered.', filename)
657                return False
658        return True
659    return False
660
661def save_md5(filenames, save_file):
662    """Method equivalent to 'md5sum file1 file2 > /file/to/check'
663
664    Args:
665        filenames: A list of filenames.
666        save_file: Filename for storing files and their md5 checksums.
667    """
668    data = {}
669    for f in filenames:
670        name = Path(f)
671        if not name.is_file():
672            logging.warning(' ignore %s: not a file.', name)
673        data.update({str(name): md5sum(name)})
674    with open(save_file, 'w+') as _file:
675        json.dump(data, _file)
676
677def get_cache_root():
678    """Get the root path dir for cache.
679
680    Use branch and target information as cache_root.
681    The path will look like ~/.atest/info_cache/$hash(branch+target)
682
683    Returns:
684        A string of the path of the root dir of cache.
685    """
686    manifest_branch = get_manifest_branch()
687    if not manifest_branch:
688        manifest_branch = os.environ.get(
689            constants.ANDROID_BUILD_TOP, constants.ANDROID_BUILD_TOP)
690    # target
691    build_target = os.path.basename(
692        os.environ.get(constants.ANDROID_PRODUCT_OUT,
693                       constants.ANDROID_PRODUCT_OUT))
694    branch_target_hash = hashlib.md5(
695        (constants.MODE + manifest_branch + build_target).encode()).hexdigest()
696    return os.path.join(get_misc_dir(), '.atest', 'info_cache',
697                        branch_target_hash[:8])
698
699def get_test_info_cache_path(test_reference, cache_root=None):
700    """Get the cache path of the desired test_infos.
701
702    Args:
703        test_reference: A string of the test.
704        cache_root: Folder path where stores caches.
705
706    Returns:
707        A string of the path of test_info cache.
708    """
709    if not cache_root:
710        cache_root = get_cache_root()
711    return os.path.join(cache_root, _get_hashed_file_name(test_reference))
712
713def update_test_info_cache(test_reference, test_infos,
714                           cache_root=None):
715    """Update cache content which stores a set of test_info objects through
716       pickle module, each test_reference will be saved as a cache file.
717
718    Args:
719        test_reference: A string referencing a test.
720        test_infos: A set of TestInfos.
721        cache_root: Folder path for saving caches.
722    """
723    if not cache_root:
724        cache_root = get_cache_root()
725    if not os.path.isdir(cache_root):
726        os.makedirs(cache_root)
727    cache_path = get_test_info_cache_path(test_reference, cache_root)
728    # Save test_info to files.
729    try:
730        with open(cache_path, 'wb') as test_info_cache_file:
731            logging.debug('Saving cache %s.', cache_path)
732            pickle.dump(test_infos, test_info_cache_file, protocol=2)
733    except (pickle.PicklingError, TypeError, IOError) as err:
734        # Won't break anything, just log this error, and collect the exception
735        # by metrics.
736        logging.debug('Exception raised: %s', err)
737        metrics_utils.handle_exc_and_send_exit_event(
738            constants.ACCESS_CACHE_FAILURE)
739
740
741def load_test_info_cache(test_reference, cache_root=None):
742    """Load cache by test_reference to a set of test_infos object.
743
744    Args:
745        test_reference: A string referencing a test.
746        cache_root: Folder path for finding caches.
747
748    Returns:
749        A list of TestInfo namedtuple if cache found, else None.
750    """
751    if not cache_root:
752        cache_root = get_cache_root()
753    cache_file = get_test_info_cache_path(test_reference, cache_root)
754    if os.path.isfile(cache_file):
755        logging.debug('Loading cache %s.', cache_file)
756        try:
757            with open(cache_file, 'rb') as config_dictionary_file:
758                return pickle.load(config_dictionary_file, encoding='utf-8')
759        except (pickle.UnpicklingError,
760                ValueError,
761                TypeError,
762                EOFError,
763                IOError,
764                ImportError) as err:
765            # Won't break anything, just remove the old cache, log this error,
766            # and collect the exception by metrics.
767            logging.debug('Exception raised: %s', err)
768            os.remove(cache_file)
769            metrics_utils.handle_exc_and_send_exit_event(
770                constants.ACCESS_CACHE_FAILURE)
771    return None
772
773def clean_test_info_caches(tests, cache_root=None):
774    """Clean caches of input tests.
775
776    Args:
777        tests: A list of test references.
778        cache_root: Folder path for finding caches.
779    """
780    if not cache_root:
781        cache_root = get_cache_root()
782    for test in tests:
783        cache_file = get_test_info_cache_path(test, cache_root)
784        if os.path.isfile(cache_file):
785            logging.debug('Removing cache: %s', cache_file)
786            try:
787                os.remove(cache_file)
788            except IOError as err:
789                logging.debug('Exception raised: %s', err)
790                metrics_utils.handle_exc_and_send_exit_event(
791                    constants.ACCESS_CACHE_FAILURE)
792
793def get_modified_files(root_dir):
794    """Get the git modified files. The git path here is git top level of
795    the root_dir. It's inevitable to utilise different commands to fulfill
796    2 scenario:
797        1. locate unstaged/staged files
798        2. locate committed files but not yet merged.
799    the 'git_status_cmd' fulfils the former while the 'find_modified_files'
800    fulfils the latter.
801
802    Args:
803        root_dir: the root where it starts finding.
804
805    Returns:
806        A set of modified files altered since last commit.
807    """
808    modified_files = set()
809    try:
810        find_git_cmd = 'cd {}; git rev-parse --show-toplevel'.format(root_dir)
811        git_paths = subprocess.check_output(
812            find_git_cmd, shell=True).decode().splitlines()
813        for git_path in git_paths:
814            # Find modified files from git working tree status.
815            git_status_cmd = ("repo forall {} -c git status --short | "
816                              "awk '{{print $NF}}'").format(git_path)
817            modified_wo_commit = subprocess.check_output(
818                git_status_cmd, shell=True).decode().rstrip().splitlines()
819            for change in modified_wo_commit:
820                modified_files.add(
821                    os.path.normpath('{}/{}'.format(git_path, change)))
822            # Find modified files that are committed but not yet merged.
823            find_modified_files = _FIND_MODIFIED_FILES_CMDS.format(git_path)
824            commit_modified_files = subprocess.check_output(
825                find_modified_files, shell=True).decode().splitlines()
826            for line in commit_modified_files:
827                modified_files.add(os.path.normpath('{}/{}'.format(
828                    git_path, line)))
829    except (OSError, subprocess.CalledProcessError) as err:
830        logging.debug('Exception raised: %s', err)
831    return modified_files
832
833def delimiter(char, length=_DEFAULT_TERMINAL_WIDTH, prenl=0, postnl=0):
834    """A handy delimiter printer.
835
836    Args:
837        char: A string used for delimiter.
838        length: An integer for the replication.
839        prenl: An integer that insert '\n' before delimiter.
840        postnl: An integer that insert '\n' after delimiter.
841
842    Returns:
843        A string of delimiter.
844    """
845    return prenl * '\n' + char * length + postnl * '\n'
846
847def find_files(path, file_name=constants.TEST_MAPPING):
848    """Find all files with given name under the given path.
849
850    Args:
851        path: A string of path in source.
852        file_name: The file name pattern for finding matched files.
853
854    Returns:
855        A list of paths of the files with the matching name under the given
856        path.
857    """
858    match_files = []
859    for root, _, filenames in os.walk(path):
860        try:
861            for filename in fnmatch.filter(filenames, file_name):
862                match_files.append(os.path.join(root, filename))
863        except re.error as e:
864            msg = "Unable to locate %s among %s" % (file_name, filenames)
865            logging.debug(msg)
866            logging.debug("Exception: %s", e)
867            metrics.AtestExitEvent(
868                duration=metrics_utils.convert_duration(0),
869                exit_code=ExitCode.COLLECT_ONLY_FILE_NOT_FOUND,
870                stacktrace=msg,
871                logs=str(e))
872    return match_files
873
874def extract_zip_text(zip_path):
875    """Extract the text files content for input zip file.
876
877    Args:
878        zip_path: The file path of zip.
879
880    Returns:
881        The string in input zip file.
882    """
883    content = ''
884    try:
885        with zipfile.ZipFile(zip_path) as zip_file:
886            for filename in zip_file.namelist():
887                if os.path.isdir(filename):
888                    continue
889                # Force change line if multiple text files in zip
890                content = content + '\n'
891                # read the file
892                with zip_file.open(filename) as extract_file:
893                    for line in extract_file:
894                        if matched_tf_error_log(line.decode()):
895                            content = content + line.decode()
896    except zipfile.BadZipfile as err:
897        logging.debug('Exception raised: %s', err)
898    return content
899
900def matched_tf_error_log(content):
901    """Check if the input content matched tradefed log pattern.
902    The format will look like this.
903    05-25 17:37:04 W/XXXXXX
904    05-25 17:37:04 E/XXXXXX
905
906    Args:
907        content: Log string.
908
909    Returns:
910        True if the content matches the regular expression for tradefed error or
911        warning log.
912    """
913    reg = ('^((0[1-9])|(1[0-2]))-((0[1-9])|([12][0-9])|(3[0-1])) '
914           '(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9]) (E|W/)')
915    if re.search(reg, content):
916        return True
917    return False
918
919def has_valid_cert():
920    """Check whether the certificate is valid.
921
922    Returns: True if the cert is valid.
923    """
924    if not constants.CERT_STATUS_CMD:
925        return False
926    try:
927        return (not subprocess.check_call(constants.CERT_STATUS_CMD,
928                                          stdout=subprocess.DEVNULL,
929                                          stderr=subprocess.DEVNULL))
930    except subprocess.CalledProcessError:
931        return False
932
933# pylint: disable=too-many-locals
934def get_flakes(branch='',
935               target='',
936               test_name='',
937               test_module='',
938               test_method=''):
939    """Get flake information.
940
941    Args:
942        branch: A string of branch name.
943        target: A string of target.
944        test_name: A string of test suite name.
945        test_module: A string of test module.
946        test_method: A string of test method.
947
948    Returns:
949        A dictionary of flake info. None if no flakes service exists.
950    """
951    if not branch:
952        branch = constants.FLAKE_BRANCH
953    if not target:
954        target = constants.FLAKE_TARGET
955    if not test_name:
956        test_name = constants.FLAKE_TEST_NAME
957    # Currently lock the flake information from test-mapping test
958    # which only runs on cuttlefish(x86) devices.
959    # TODO: extend supporting other devices
960    if test_module:
961        test_module = 'x86 {}'.format(test_module)
962    flake_service = os.path.join(constants.FLAKE_SERVICE_PATH,
963                                 constants.FLAKE_FILE)
964    if not os.path.exists(flake_service):
965        logging.debug('Get flakes: Flake service path not exist.')
966        # Send (3, 0) to present no flakes info because service does not exist.
967        metrics.LocalDetectEvent(
968            detect_type=DetectType.NO_FLAKE, result=0)
969        return None
970    if not has_valid_cert():
971        logging.debug('Get flakes: No valid cert.')
972        # Send (3, 1) to present no flakes info because no valid cert.
973        metrics.LocalDetectEvent(
974            detect_type=DetectType.NO_FLAKE, result=1)
975        return None
976    flake_info = {}
977    start = time.time()
978    try:
979        shutil.copy2(flake_service, constants.FLAKE_TMP_PATH)
980        tmp_service = os.path.join(constants.FLAKE_TMP_PATH,
981                                   constants.FLAKE_FILE)
982        os.chmod(tmp_service, 0o0755)
983        cmd = [tmp_service, branch, target, test_name, test_module, test_method]
984        logging.debug('Executing: %s', ' '.join(cmd))
985        output = subprocess.check_output(cmd).decode()
986        percent_template = "{}:".format(constants.FLAKE_PERCENT)
987        postsubmit_template = "{}:".format(constants.FLAKE_POSTSUBMIT)
988        for line in output.splitlines():
989            if line.startswith(percent_template):
990                flake_info[constants.FLAKE_PERCENT] = line.replace(
991                    percent_template, '')
992            if line.startswith(postsubmit_template):
993                flake_info[constants.FLAKE_POSTSUBMIT] = line.replace(
994                    postsubmit_template, '')
995    # pylint: disable=broad-except
996    except Exception as e:
997        logging.debug('Exception:%s', e)
998        return None
999    # Send (4, time) to present having flakes info and it spent time.
1000    duration = round(time.time()-start)
1001    logging.debug('Took %ss to get flakes info', duration)
1002    metrics.LocalDetectEvent(
1003        detect_type=DetectType.HAS_FLAKE,
1004        result=duration)
1005    return flake_info
1006
1007def read_test_record(path):
1008    """A Helper to read test record proto.
1009
1010    Args:
1011        path: The proto file path.
1012
1013    Returns:
1014        The test_record proto instance.
1015    """
1016    with open(path, 'rb') as proto_file:
1017        msg = test_record_pb2.TestRecord()
1018        msg.ParseFromString(proto_file.read())
1019    return msg
1020
1021def has_python_module(module_name):
1022    """Detect if the module can be loaded without importing it in real.
1023
1024    Args:
1025        cmd: A string of the tested module name.
1026
1027    Returns:
1028        True if found, False otherwise.
1029    """
1030    return bool(importlib.util.find_spec(module_name))
1031
1032def load_json_safely(jsonfile):
1033    """Load the given json file as an object.
1034
1035    Args:
1036        jsonfile: The json file path.
1037
1038    Returns:
1039        The content of the give json file. Null dict when:
1040        1. the given path doesn't exist.
1041        2. the given path is not a json or invalid format.
1042    """
1043    if isinstance(jsonfile, bytes):
1044        jsonfile = jsonfile.decode('utf-8')
1045    if Path(jsonfile).is_file():
1046        try:
1047            with open(jsonfile, 'r') as cache:
1048                return json.load(cache)
1049        except json.JSONDecodeError:
1050            logging.debug('Exception happened while loading %s.', jsonfile)
1051    else:
1052        logging.debug('%s: File not found.', jsonfile)
1053    return {}
1054
1055def get_atest_version():
1056    """Get atest version.
1057
1058    Returns:
1059        Version string from the VERSION file, e.g. prebuilt
1060            2022-11-24_9314547  (<release_date>_<build_id>)
1061
1062        If VERSION does not exist (src or local built):
1063            2022-11-24_5d448c50 (<commit_date>_<commit_id>)
1064
1065        If the git command fails for unexpected reason:
1066            2022-11-24_unknown  (<today_date>_unknown)
1067    """
1068    atest_dir = Path(__file__).resolve().parent
1069    version_file = atest_dir.joinpath('VERSION')
1070    if Path(version_file).is_file():
1071        return open(version_file).read()
1072
1073    # Try fetching commit date (%ci) and commit hash (%h).
1074    git_cmd = 'git log -1 --pretty=format:"%ci;%h"'
1075    try:
1076        # commit date/hash are only available when running from the source
1077        # and the local built.
1078        result = subprocess.run(
1079            git_cmd, shell=True, check=False, capture_output=True,
1080            cwd=Path(
1081                os.getenv(constants.ANDROID_BUILD_TOP), '').joinpath(
1082                    'tools/asuite/atest'))
1083        if result.stderr:
1084            raise subprocess.CalledProcessError(
1085                returncode=0, cmd=git_cmd)
1086        raw_date, commit = result.stdout.decode().split(';')
1087        date = datetime.datetime.strptime(raw_date,
1088                                          '%Y-%m-%d %H:%M:%S %z').date()
1089    # atest_dir doesn't exist will throw FileNotFoundError.
1090    except (subprocess.CalledProcessError, FileNotFoundError):
1091        # Use today as the commit date for unexpected conditions.
1092        date = datetime.datetime.today().date()
1093        commit = 'unknown'
1094    return f'{date}_{commit}'
1095
1096def get_manifest_branch(show_aosp=False):
1097    """Get the manifest branch.
1098
1099         (portal xml)                            (default xml)
1100    +--------------------+ _get_include() +-----------------------------+
1101    | .repo/manifest.xml |--------------->| .repo/manifests/default.xml |
1102    +--------------------+                +---------------+-------------+
1103                             <default revision="master" |
1104                                      remote="aosp"     | _get_revision()
1105                                      sync-j="4"/>      V
1106                                                    +--------+
1107                                                    | master |
1108                                                    +--------+
1109
1110    Args:
1111        show_aosp: A boolean that shows 'aosp' prefix by checking the 'remote'
1112                   attribute.
1113
1114    Returns:
1115        The value of 'revision' of the included xml or default.xml.
1116
1117        None when no ANDROID_BUILD_TOP or unable to access default.xml.
1118    """
1119    build_top = os.getenv(constants.ANDROID_BUILD_TOP)
1120    if not build_top:
1121        return None
1122    portal_xml = Path(build_top).joinpath('.repo', 'manifest.xml')
1123    default_xml = Path(build_top).joinpath('.repo/manifests', 'default.xml')
1124    def _get_revision(xml):
1125        try:
1126            xml_root = ET.parse(xml).getroot()
1127        except (IOError, OSError, ET.ParseError):
1128            # TODO(b/274989179) Change back to warning once warning if not going
1129            # to be treat as test failure. Or test_get_manifest_branch unit test
1130            # could be fix if return None if portal_xml or default_xml not
1131            # exist.
1132            logging.info('%s could not be read.', xml)
1133            return ''
1134        default_tags = xml_root.findall('./default')
1135        if default_tags:
1136            prefix = ''
1137            for tag in default_tags:
1138                branch = tag.attrib.get('revision')
1139                if show_aosp and tag.attrib.get('remote') == 'aosp':
1140                    prefix = 'aosp-'
1141                return f'{prefix}{branch}'
1142        return ''
1143    def _get_include(xml):
1144        try:
1145            xml_root = ET.parse(xml).getroot()
1146        except (IOError, OSError, ET.ParseError):
1147            # TODO(b/274989179) Change back to warning once warning if not going
1148            # to be treat as test failure. Or test_get_manifest_branch unit test
1149            # could be fix if return None if portal_xml or default_xml not
1150            # exist.
1151            logging.info('%s could not be read.', xml)
1152            return Path()
1153        include_tags = xml_root.findall('./include')
1154        if include_tags:
1155            for tag in include_tags:
1156                name = tag.attrib.get('name')
1157                if name:
1158                    return Path(build_top).joinpath('.repo/manifests', name)
1159        return default_xml
1160
1161    # 1. Try getting revision from .repo/manifests/default.xml
1162    if default_xml.is_file():
1163        return _get_revision(default_xml)
1164    # 2. Try getting revision from the included xml of .repo/manifest.xml
1165    include_xml = _get_include(portal_xml)
1166    if include_xml.is_file():
1167        return _get_revision(include_xml)
1168    # 3. Try getting revision directly from manifest.xml (unlikely to happen)
1169    return _get_revision(portal_xml)
1170
1171def get_build_target():
1172    """Get the build target form system environment TARGET_PRODUCT."""
1173    build_target = '%s-%s' % (
1174        os.getenv(constants.ANDROID_TARGET_PRODUCT, None),
1175        os.getenv(constants.TARGET_BUILD_VARIANT, None))
1176    return build_target
1177
1178def build_module_info_target(module_info_target):
1179    """Build module-info.json after deleting the original one.
1180
1181    Args:
1182        module_info_target: the target name that soong is going to build.
1183    """
1184    module_file = 'module-info.json'
1185    logging.debug('Generating %s - this is required for '
1186                  'initial runs or forced rebuilds.', module_file)
1187    build_start = time.time()
1188    product_out = os.getenv(constants.ANDROID_PRODUCT_OUT, None)
1189    module_info_path = Path(product_out).joinpath('module-info.json')
1190    if module_info_path.is_file():
1191        os.remove(module_info_path)
1192    if not build([module_info_target]):
1193        sys.exit(ExitCode.BUILD_FAILURE)
1194    build_duration = time.time() - build_start
1195    metrics.LocalDetectEvent(
1196        detect_type=DetectType.ONLY_BUILD_MODULE_INFO,
1197        result=int(build_duration))
1198
1199def has_wildcard(test_name):
1200    """ Tell whether the test_name(either a list or string) contains wildcard
1201    symbols.
1202
1203    Args:
1204        test_name: A list or a str.
1205
1206    Return:
1207        True if test_name contains wildcard, False otherwise.
1208    """
1209    if isinstance(test_name, str):
1210        return any(char in test_name for char in _WILDCARD_CHARS)
1211    if isinstance(test_name, list):
1212        for name in test_name:
1213            if has_wildcard(name):
1214                return True
1215    return False
1216
1217def is_build_file(path):
1218    """ If input file is one of an android build file.
1219
1220    Args:
1221        path: A string of file path.
1222
1223    Return:
1224        True if path is android build file, False otherwise.
1225    """
1226    return bool(os.path.splitext(path)[-1] in _ANDROID_BUILD_EXT)
1227
1228def quote(input_str):
1229    """ If the input string -- especially in custom args -- contains shell-aware
1230    characters, insert a pair of "\" to the input string.
1231
1232    e.g. unit(test|testing|testing) -> 'unit(test|testing|testing)'
1233
1234    Args:
1235        input_str: A string from user input.
1236
1237    Returns: A string with single quotes if regex chars were detected.
1238    """
1239    if has_chars(input_str, _REGEX_CHARS):
1240        return "\'" + input_str + "\'"
1241    return input_str
1242
1243def has_chars(input_str, chars):
1244    """ Check if the input string contains one of the designated characters.
1245
1246    Args:
1247        input_str: A string from user input.
1248        chars: An iterable object.
1249
1250    Returns:
1251        True if the input string contains one of the special chars.
1252    """
1253    for char in chars:
1254        if char in input_str:
1255            return True
1256    return False
1257
1258def prompt_with_yn_result(msg, default=True):
1259    """Prompt message and get yes or no result.
1260
1261    Args:
1262        msg: The question you want asking.
1263        default: boolean to True/Yes or False/No
1264    Returns:
1265        default value if get KeyboardInterrupt or ValueError exception.
1266    """
1267    suffix = '[Y/n]: ' if default else '[y/N]: '
1268    try:
1269        return strtobool(input(msg+suffix))
1270    except (ValueError, KeyboardInterrupt):
1271        return default
1272
1273def strtobool(val):
1274    """Convert a string representation of truth to True or False.
1275
1276    Args:
1277        val: a string of input value.
1278
1279    Returns:
1280        True when values are 'y', 'yes', 't', 'true', 'on', and '1';
1281        False when 'n', 'no', 'f', 'false', 'off', and '0'.
1282        Raises ValueError if 'val' is anything else.
1283    """
1284    if val.lower() in ('y', 'yes', 't', 'true', 'on', '1'):
1285        return True
1286    if val.lower() in ('n', 'no', 'f', 'false', 'off', '0'):
1287        return False
1288    raise ValueError("invalid truth value %r" % (val,))
1289
1290def get_android_junit_config_filters(test_config):
1291    """Get the dictionary of a input config for junit config's filters
1292
1293    Args:
1294        test_config: The path of the test config.
1295    Returns:
1296        A dictionary include all the filters in the input config.
1297    """
1298    filter_dict = {}
1299    xml_root = ET.parse(test_config).getroot()
1300    option_tags = xml_root.findall('.//option')
1301    for tag in option_tags:
1302        name = tag.attrib['name'].strip()
1303        if name in constants.SUPPORTED_FILTERS:
1304            filter_values = filter_dict.get(name, [])
1305            value = tag.attrib['value'].strip()
1306            filter_values.append(value)
1307            filter_dict.update({name: filter_values})
1308    return filter_dict
1309
1310def get_config_parameter(test_config):
1311    """Get all the parameter values for the input config
1312
1313    Args:
1314        test_config: The path of the test config.
1315    Returns:
1316        A set include all the parameters of the input config.
1317    """
1318    parameters = set()
1319    xml_root = ET.parse(test_config).getroot()
1320    option_tags = xml_root.findall('.//option')
1321    for tag in option_tags:
1322        name = tag.attrib['name'].strip()
1323        if name == constants.CONFIG_DESCRIPTOR:
1324            key = tag.attrib['key'].strip()
1325            if key == constants.PARAMETER_KEY:
1326                value = tag.attrib['value'].strip()
1327                parameters.add(value)
1328    return parameters
1329
1330def get_config_device(test_config):
1331    """Get all the device names from the input config
1332
1333    Args:
1334        test_config: The path of the test config.
1335    Returns:
1336        A set include all the device name of the input config.
1337    """
1338    devices = set()
1339    try:
1340        xml_root = ET.parse(test_config).getroot()
1341        device_tags = xml_root.findall('.//device')
1342        for tag in device_tags:
1343            name = tag.attrib['name'].strip()
1344            devices.add(name)
1345    except ET.ParseError as e:
1346        colorful_print('Config has invalid format.', constants.RED)
1347        colorful_print('File %s : %s' % (test_config, str(e)), constants.YELLOW)
1348        sys.exit(ExitCode.CONFIG_INVALID_FORMAT)
1349    return devices
1350
1351def get_mainline_param(test_config):
1352    """Get all the mainline-param values for the input config
1353
1354    Args:
1355        test_config: The path of the test config.
1356    Returns:
1357        A set include all the parameters of the input config.
1358    """
1359    mainline_param = set()
1360    xml_root = ET.parse(test_config).getroot()
1361    option_tags = xml_root.findall('.//option')
1362    for tag in option_tags:
1363        name = tag.attrib['name'].strip()
1364        if name == constants.CONFIG_DESCRIPTOR:
1365            key = tag.attrib['key'].strip()
1366            if key == constants.MAINLINE_PARAM_KEY:
1367                value = tag.attrib['value'].strip()
1368                mainline_param.add(value)
1369    return mainline_param
1370
1371def get_adb_devices():
1372    """Run `adb devices` and return a list of devices.
1373
1374    Returns:
1375        A list of devices. e.g.
1376        ['127.0.0.1:40623', '127.0.0.1:40625']
1377    """
1378    probe_cmd = "adb devices | egrep -v \"^List|^$\"||true"
1379    suts = subprocess.check_output(probe_cmd, shell=True).decode().splitlines()
1380    return [sut.split('\t')[0] for sut in suts]
1381
1382def get_android_config():
1383    """Get Android config as "printconfig" shows.
1384
1385    Returns:
1386        A dict of Android configurations.
1387    """
1388    dump_cmd = get_build_cmd(dump=True)
1389    raw_config = subprocess.check_output(dump_cmd).decode('utf-8')
1390    android_config = {}
1391    for element in raw_config.splitlines():
1392        if not element.startswith('='):
1393            key, value = tuple(element.split('=', 1))
1394            android_config.setdefault(key, value)
1395    return android_config
1396
1397def get_config_gtest_args(test_config):
1398    """Get gtest's module-name and device-path option from the input config
1399
1400    Args:
1401        test_config: The path of the test config.
1402    Returns:
1403        A string of gtest's module name.
1404        A string of gtest's device path.
1405    """
1406    module_name = ''
1407    device_path = ''
1408    xml_root = ET.parse(test_config).getroot()
1409    option_tags = xml_root.findall('.//option')
1410    for tag in option_tags:
1411        name = tag.attrib['name'].strip()
1412        value = tag.attrib['value'].strip()
1413        if name == 'native-test-device-path':
1414            device_path = value
1415        elif name == 'module-name':
1416            module_name = value
1417    return module_name, device_path
1418
1419def get_arch_name(module_name, is_64=False):
1420    """Get the arch folder name for the input module.
1421
1422        Scan the test case folders to get the matched arch folder name.
1423
1424        Args:
1425            module_name: The module_name of test
1426            is_64: If need 64 bit arch name, False otherwise.
1427        Returns:
1428            A string of the arch name.
1429    """
1430    arch_32 = ['arm', 'x86']
1431    arch_64 = ['arm64', 'x86_64']
1432    arch_list = arch_32
1433    if is_64:
1434        arch_list = arch_64
1435    test_case_root = os.path.join(
1436        os.environ.get(constants.ANDROID_TARGET_OUT_TESTCASES, ''),
1437        module_name
1438    )
1439    if not os.path.isdir(test_case_root):
1440        logging.debug('%s does not exist.', test_case_root)
1441        return ''
1442    for f in os.listdir(test_case_root):
1443        if f in arch_list:
1444            return f
1445    return ''
1446
1447def copy_single_arch_native_symbols(
1448    symbol_root, module_name, device_path, is_64=False):
1449    """Copy symbol files for native tests which belong to input arch.
1450
1451        Args:
1452            module_name: The module_name of test
1453            device_path: The device path define in test config.
1454            is_64: True if need to copy 64bit symbols, False otherwise.
1455    """
1456    src_symbol = os.path.join(symbol_root, 'data', 'nativetest', module_name)
1457    if is_64:
1458        src_symbol = os.path.join(
1459            symbol_root, 'data', 'nativetest64', module_name)
1460    dst_symbol = os.path.join(
1461        symbol_root, device_path[1:], module_name,
1462        get_arch_name(module_name, is_64))
1463    if os.path.isdir(src_symbol):
1464        # TODO: Use shutil.copytree(src, dst, dirs_exist_ok=True) after
1465        #  python3.8
1466        if os.path.isdir(dst_symbol):
1467            shutil.rmtree(dst_symbol)
1468        shutil.copytree(src_symbol, dst_symbol)
1469
1470def copy_native_symbols(module_name, device_path):
1471    """Copy symbol files for native tests to match with tradefed file structure.
1472
1473    The original symbols will locate at
1474    $(PRODUCT_OUT)/symbols/data/nativetest(64)/$(module)/$(stem).
1475    From TF, the test binary will locate at
1476    /data/local/tmp/$(module)/$(arch)/$(stem).
1477    In order to make trace work need to copy the original symbol to
1478    $(PRODUCT_OUT)/symbols/data/local/tmp/$(module)/$(arch)/$(stem)
1479
1480    Args:
1481        module_name: The module_name of test
1482        device_path: The device path define in test config.
1483    """
1484    symbol_root = os.path.join(
1485        os.environ.get(constants.ANDROID_PRODUCT_OUT, ''),
1486        'symbols')
1487    if not os.path.isdir(symbol_root):
1488        logging.debug('Symbol dir:%s not exist, skip copy symbols.',
1489                      symbol_root)
1490        return
1491    # Copy 32 bit symbols
1492    if get_arch_name(module_name, is_64=False):
1493        copy_single_arch_native_symbols(
1494            symbol_root, module_name, device_path, is_64=False)
1495    # Copy 64 bit symbols
1496    if get_arch_name(module_name, is_64=True):
1497        copy_single_arch_native_symbols(
1498            symbol_root, module_name, device_path, is_64=True)
1499
1500def get_config_preparer_options(test_config, class_name):
1501    """Get all the parameter values for the input config
1502
1503    Args:
1504        test_config: The path of the test config.
1505        class_name: A string of target_preparer
1506    Returns:
1507        A set include all the parameters of the input config.
1508    """
1509    options = {}
1510    xml_root = ET.parse(test_config).getroot()
1511    option_tags = xml_root.findall(
1512        './/target_preparer[@class="%s"]/option' % class_name)
1513    for tag in option_tags:
1514        name = tag.attrib['name'].strip()
1515        value = tag.attrib['value'].strip()
1516        options[name] = value
1517    return options
1518
1519def is_adb_root(args):
1520    """Check whether device has root permission.
1521
1522    Args:
1523        args: An argspace.Namespace class instance holding parsed args.
1524    Returns:
1525        True if adb has root permission.
1526    """
1527    try:
1528        serial = os.environ.get(constants.ANDROID_SERIAL, '')
1529        if not serial:
1530            serial = args.serial
1531        serial_options = ('-s ' + serial) if serial else ''
1532        output = subprocess.check_output("adb %s shell id" % serial_options,
1533                                         shell=True,
1534                                         stderr=subprocess.STDOUT).decode()
1535        return "uid=0(root)" in output
1536    except subprocess.CalledProcessError as err:
1537        logging.debug('Exception raised(): %s, Output: %s', err, err.output)
1538        raise err
1539
1540def perm_metrics(config_path, adb_root):
1541    """Compare adb root permission with RootTargetPreparer in config.
1542
1543    Args:
1544        config_path: A string of AndroidTest.xml file path.
1545        adb_root: A boolean of whether device is root or not.
1546    """
1547    # RootTargetPreparer's force-root set in config
1548    options = get_config_preparer_options(config_path, _ROOT_PREPARER)
1549    if not options:
1550        return
1551    logging.debug('preparer_options: %s', options)
1552    preparer_force_root = True
1553    if options.get('force-root', '').upper() == "FALSE":
1554        preparer_force_root = False
1555    logging.debug(' preparer_force_root: %s', preparer_force_root)
1556    if preparer_force_root and not adb_root:
1557        logging.debug('DETECT_TYPE_PERMISSION_INCONSISTENT:0')
1558        metrics.LocalDetectEvent(
1559            detect_type=DetectType.PERMISSION_INCONSISTENT,
1560            result=0)
1561    elif not preparer_force_root and adb_root:
1562        logging.debug('DETECT_TYPE_PERMISSION_INCONSISTENT:1')
1563        metrics.LocalDetectEvent(
1564            detect_type=DetectType.PERMISSION_INCONSISTENT,
1565            result=1)
1566
1567def get_verify_key(tests, extra_args):
1568    """Compose test command key.
1569
1570    Args:
1571        test_name: A list of input tests.
1572        extra_args: Dict of extra args to add to test run.
1573    Returns:
1574        A composed test commands.
1575    """
1576    # test_commands is a concatenated string of sorted test_ref+extra_args.
1577    # For example, "ITERATIONS=5 hello_world_test"
1578    test_commands = tests
1579    for key, value in extra_args.items():
1580        if key not in constants.SKIP_VARS:
1581            test_commands.append('%s=%s' % (key, str(value)))
1582    test_commands.sort()
1583    return ' '.join(test_commands)
1584
1585def gen_runner_cmd_to_file(tests, dry_run_cmd,
1586                           result_path=constants.RUNNER_COMMAND_PATH):
1587    """Generate test command and save to file.
1588
1589    Args:
1590        tests: A String of input tests.
1591        dry_run_cmd: A String of dry run command.
1592        result_path: A file path for saving result.
1593    Returns:
1594        A composed run commands.
1595    """
1596    normalized_cmd = dry_run_cmd
1597    root_path = os.environ.get(constants.ANDROID_BUILD_TOP)
1598    if root_path in dry_run_cmd:
1599        normalized_cmd = dry_run_cmd.replace(root_path,
1600                                             f"${constants.ANDROID_BUILD_TOP}")
1601    results = {}
1602    if not os.path.isfile(result_path):
1603        results[tests] = normalized_cmd
1604    else:
1605        with open(result_path) as json_file:
1606            results = json.load(json_file)
1607            if results.get(tests) != normalized_cmd:
1608                results[tests] = normalized_cmd
1609    with open(result_path, 'w+') as _file:
1610        json.dump(results, _file, indent=0)
1611    return results.get(tests, '')
1612
1613
1614def handle_test_env_var(input_test, result_path=constants.VERIFY_ENV_PATH,
1615                        pre_verify=False):
1616    """Handle the environment variable of input tests.
1617
1618    Args:
1619        input_test: A string of input tests pass to atest.
1620        result_path: The file path for saving result.
1621        pre_verify: A booloan to separate into pre-verify and actually verify.
1622    Returns:
1623        0 is no variable needs to verify, 1 has some variables to next verify.
1624    """
1625    full_result_content = {}
1626    if os.path.isfile(result_path):
1627        with open(result_path) as json_file:
1628            full_result_content = json.load(json_file)
1629    demand_env_vars = []
1630    demand_env_vars = full_result_content.get(input_test)
1631    if demand_env_vars is None:
1632        raise atest_error.DryRunVerificationError(
1633            '{}: No verify key.'.format(input_test))
1634    # No mapping variables.
1635    if demand_env_vars == []:
1636        return 0
1637    if pre_verify:
1638        return 1
1639    verify_error = []
1640    for env in demand_env_vars:
1641        if '=' in env:
1642            key, value = env.split('=', 1)
1643            env_value = os.environ.get(key, None)
1644            if env_value is None or env_value != value:
1645                verify_error.append('Environ verification failed, ({0},{1})!='
1646                    '({0},{2})'.format(key, value, env_value))
1647        else:
1648            if not os.environ.get(env, None):
1649                verify_error.append('Missing environ:{}'.format(env))
1650    if verify_error:
1651        raise atest_error.DryRunVerificationError('\n'.join(verify_error))
1652    return 1
1653
1654def generate_buildfiles_checksum(target_dir: Path):
1655    """ Method that generate md5 checksum of Android.{bp,mk} files.
1656
1657    The checksum of build files are stores in
1658        $ANDROID_HOST_OUT/indexes/buildfiles.md5
1659    """
1660    plocate_db = Path(target_dir).joinpath(constants.LOCATE_CACHE)
1661    checksum_file = Path(target_dir).joinpath(constants.BUILDFILES_MD5)
1662    if plocate_db.is_file():
1663        cmd = (f'locate -d{plocate_db} --existing '
1664               r'--regex "/Android\.(bp|mk)$"')
1665        try:
1666            result = subprocess.check_output(cmd, shell=True).decode('utf-8')
1667            save_md5(result.split(), checksum_file)
1668        except subprocess.CalledProcessError:
1669            logging.error('Failed to generate %s', checksum_file)
1670
1671def run_multi_proc(func, *args, **kwargs):
1672    """Start a process with multiprocessing and return Process object.
1673
1674    Args:
1675        func: A string of function name which will be the target name.
1676        args/kwargs: check doc page:
1677        https://docs.python.org/3.8/library/multiprocessing.html#process-and-exceptions
1678
1679    Returns:
1680        multiprocessing.Process object.
1681    """
1682    proc = Process(target=func, *args, **kwargs)
1683    proc.start()
1684    return proc
1685
1686def get_prebuilt_sdk_tools_dir():
1687    """Get the path for the prebuilt sdk tools root dir.
1688
1689    Returns: The absolute path of prebuilt sdk tools directory.
1690    """
1691    build_top = Path(os.environ.get(constants.ANDROID_BUILD_TOP, ''))
1692    return build_top.joinpath(
1693        'prebuilts/sdk/tools/', str(platform.system()).lower(), 'bin')
1694
1695
1696def is_writable(path):
1697    """Check if the given path is writable.
1698
1699    Returns: True if input path is writable, False otherwise.
1700    """
1701    if not os.path.exists(path):
1702        return is_writable(os.path.dirname(path))
1703    return os.access(path, os.W_OK)
1704
1705
1706def get_misc_dir():
1707    """Get the path for the ATest data root dir.
1708
1709    Returns: The absolute path of the ATest data root dir.
1710    """
1711    home_dir = os.path.expanduser('~')
1712    if is_writable(home_dir):
1713        return home_dir
1714    return get_build_out_dir()
1715
1716def get_full_annotation_class_name(module_info, class_name):
1717    """ Get fully qualified class name from a class name.
1718
1719    If the given keyword(class_name) is "smalltest", this method can search
1720    among source codes and grep the accurate annotation class name:
1721
1722        android.test.suitebuilder.annotation.SmallTest
1723
1724    Args:
1725        module_info: A dict of module_info.
1726        class_name: A string of class name.
1727
1728    Returns:
1729        A string of fully qualified class name, empty string otherwise.
1730    """
1731    fullname_re = re.compile(
1732        r'import\s+(?P<fqcn>{})(|;)$'.format(class_name), re.I)
1733    keyword_re = re.compile(
1734        r'import\s+(?P<fqcn>.*\.{})(|;)$'.format(class_name), re.I)
1735    build_top = Path(os.environ.get(constants.ANDROID_BUILD_TOP, ''))
1736    for f in module_info.get(constants.MODULE_SRCS, []):
1737        full_path = build_top.joinpath(f)
1738        with open(full_path, 'r') as cache:
1739            for line in cache.readlines():
1740                # Accept full class name.
1741                match = fullname_re.match(line)
1742                if match:
1743                    return match.group('fqcn')
1744                # Search annotation class from keyword.
1745                match = keyword_re.match(line)
1746                if match:
1747                    return match.group('fqcn')
1748    return ""
1749
1750def has_mixed_type_filters(test_infos):
1751    """ There are different types in a test module.
1752
1753    Dict test_to_types is mapping module name and the set of types.
1754    For example,
1755    {
1756        'module_1': {'wildcard class_method'},
1757        'module_2': {'wildcard class_method', 'regular class_method'},
1758        'module_3': set()
1759        }
1760
1761    Args:
1762        test_infos: A set of TestInfos.
1763
1764    Returns:
1765        True if more than one filter type in a test module, False otherwise.
1766    """
1767    test_to_types = dict()
1768    for test_info in test_infos:
1769        filters = test_info.data.get(constants.TI_FILTER, [])
1770        filter_types = set()
1771        for flt in filters:
1772            filter_types |= get_filter_types(flt.to_set_of_tf_strings())
1773        filter_types |= test_to_types.get(test_info.test_name, set())
1774        test_to_types[test_info.test_name] = filter_types
1775    for _, types in test_to_types.items():
1776        if len(types) > 1:
1777            return True
1778    return False
1779
1780def get_filter_types(tf_filter_set):
1781    """ Get filter types.
1782
1783    Args:
1784        tf_filter_set: A set of tf filter strings.
1785
1786    Returns:
1787        A set of FilterType.
1788    """
1789    type_set = set()
1790    for tf_filter in tf_filter_set:
1791        if _WILDCARD_FILTER_RE.match(tf_filter):
1792            logging.debug('Filter and type: (%s, %s)',
1793                          tf_filter, FilterType.WILDCARD_FILTER.value)
1794            type_set.add(FilterType.WILDCARD_FILTER.value)
1795        if _REGULAR_FILTER_RE.match(tf_filter):
1796            logging.debug('Filter and type: (%s, %s)',
1797                         tf_filter, FilterType.REGULAR_FILTER.value)
1798            type_set.add(FilterType.REGULAR_FILTER.value)
1799    return type_set
1800
1801def has_index_files():
1802    """Determine whether the essential index files are done.
1803
1804    (b/206886222) checksum may be different even the src is not changed; so
1805    the main process needs to wait when the essential index files do not exist.
1806
1807    Returns:
1808        False if one of the index file does not exist; True otherwise.
1809    """
1810    return all(Path(f).is_file() for f in [
1811        constants.CLASS_INDEX,
1812        constants.CC_CLASS_INDEX,
1813        constants.QCLASS_INDEX,
1814        constants.PACKAGE_INDEX])
1815
1816# pylint: disable=anomalous-backslash-in-string,too-many-branches
1817def get_bp_content(filename: Path, module_type: str) -> Dict:
1818    """Get essential content info from an Android.bp.
1819    By specifying module_type (e.g. 'android_test', 'android_app'), this method
1820    can parse the given starting point and grab 'name', 'instrumentation_for'
1821    and 'manifest'.
1822
1823    Returns:
1824        A dict of mapping test module and target module; e.g.
1825        {
1826         'FooUnitTests':
1827             {'manifest': 'AndroidManifest.xml', 'target_module': 'Foo'},
1828         'Foo':
1829             {'manifest': 'AndroidManifest-common.xml', 'target_module': ''}
1830        }
1831        Null dict if there is no content of the given module_type.
1832    """
1833    build_file = Path(filename)
1834    if not any((build_file.suffix == '.bp', build_file.is_file())):
1835        return {}
1836    start_from = re.compile(f'^{module_type}\s*\{{')
1837    end_with = re.compile(r'^\}$')
1838    context_re = re.compile(
1839        r'\s*(?P<key>(name|manifest|instrumentation_for))\s*:'
1840        r'\s*\"(?P<value>.*)\"\s*,', re.M)
1841    with open(build_file, 'r') as cache:
1842        data = cache.readlines()
1843    content_dict = {}
1844    start_recording = False
1845    for _line in data:
1846        line = _line.strip()
1847        if re.match(start_from, line):
1848            start_recording = True
1849            _dict = {}
1850            continue
1851        if start_recording:
1852            if not re.match(end_with, line):
1853                match = re.match(context_re, line)
1854                if match:
1855                    _dict.update(
1856                        {match.group('key'): match.group('value')})
1857            else:
1858                start_recording = False
1859                module_name = _dict.get('name')
1860                if module_name:
1861                    content_dict.update(
1862                        {module_name: {
1863                            'manifest': _dict.get(
1864                                'manifest', 'AndroidManifest.xml'),
1865                            'target_module': _dict.get(
1866                                'instrumentation_for', '')}
1867                        })
1868    return content_dict
1869
1870def get_manifest_info(manifest: Path) -> Dict[str, Any]:
1871    """Get the essential info from the given manifest file.
1872    This method cares only three attributes:
1873        * package
1874        * targetPackage
1875        * persistent
1876    For an instrumentation test, the result will be like:
1877    {
1878        'package': 'com.android.foo.tests.unit',
1879        'targetPackage': 'com.android.foo',
1880        'persistent': False
1881    }
1882    For a target module of the instrumentation test:
1883    {
1884        'package': 'com.android.foo',
1885        'targetPackage': '',
1886        'persistent': True
1887    }
1888    """
1889    mdict = {'package': '', 'target_package': '', 'persistent': False}
1890    try:
1891        xml_root = ET.parse(manifest).getroot()
1892    except (ET.ParseError, FileNotFoundError):
1893        return mdict
1894    manifest_package_re =  re.compile(r'[a-z][\w]+(\.[\w]+)*')
1895    # 1. Must probe 'package' name from the top.
1896    for item in xml_root.findall('.'):
1897        if 'package' in item.attrib.keys():
1898            pkg = item.attrib.get('package')
1899            match = manifest_package_re.match(pkg)
1900            if match:
1901                mdict['package'] = pkg
1902                break
1903    for item in xml_root.findall('*'):
1904        # 2. Probe 'targetPackage' in 'instrumentation' tag.
1905        if item.tag == 'instrumentation':
1906            for key, value in item.attrib.items():
1907                if 'targetPackage' in key:
1908                    mdict['target_package'] = value
1909                    break
1910        # 3. Probe 'persistent' in any tags.
1911        for key, value in item.attrib.items():
1912            if 'persistent' in key:
1913                mdict['persistent'] = value.lower() == 'true'
1914                break
1915    return mdict
1916
1917# pylint: disable=broad-except
1918def generate_print_result_html(result_file: Path):
1919    """Generate a html that collects all log files."""
1920    result_file = Path(result_file)
1921    search_dir = Path(result_file).parent.joinpath('log')
1922    result_html = Path(search_dir, 'test_logs.html')
1923    try:
1924        logs = sorted(find_files(str(search_dir), file_name='*'))
1925        with open(result_html, 'w') as cache:
1926            cache.write('<!DOCTYPE html><html><body>')
1927            result = load_json_safely(result_file)
1928            if result:
1929                cache.write(f'<h1>{"atest " + result.get("args")}</h1>')
1930                timestamp = datetime.datetime.fromtimestamp(
1931                    result_file.stat().st_ctime)
1932                cache.write(f'<h2>{timestamp}</h2>')
1933            for log in logs:
1934                cache.write(f'<p><a href="{urllib.parse.quote(log)}">'
1935                            f'{html.escape(Path(log).name)}</a></p>')
1936            cache.write('</body></html>')
1937        print(f'\nTo access logs, press "ctrl" and click on\n'
1938              f'file://{result_html}\n')
1939    except Exception as e:
1940        logging.debug('Did not generate log html for reason: %s', e)
1941
1942# pylint: disable=broad-except
1943def prompt_suggestions(result_file: Path):
1944    """Generate suggestions when detecting keywords in logs."""
1945    result_file = Path(result_file)
1946    search_dir = Path(result_file).parent.joinpath('log')
1947    logs = sorted(find_files(str(search_dir), file_name='*'))
1948    for log in logs:
1949        for keyword, suggestion in SUGGESTIONS.items():
1950            try:
1951                with open(log, 'r') as cache:
1952                    content = cache.read()
1953                    if keyword in content:
1954                        colorful_print(
1955                            '[Suggestion] ' + suggestion, color=constants.RED)
1956                        break
1957            # If the given is not a plain text, just ignore it.
1958            except Exception:
1959                pass
1960
1961def build_files_integrity_is_ok() -> bool:
1962    """Return Whether the integrity of build files is OK."""
1963    # 0. Inexistence of the checksum file means a fresh repo sync.
1964    if not Path(constants.BUILDFILES_MD5).is_file():
1965        return False
1966    # 1. Ensure no build files were added/deleted.
1967    with open(constants.BUILDFILES_MD5, 'r') as cache:
1968        recorded_amount = len(json.load(cache).keys())
1969        cmd = (f'locate -d{constants.LOCATE_CACHE} --regex '
1970               r'"/Android\.(bp|mk)$" | wc -l')
1971        if int(subprocess.getoutput(cmd)) != recorded_amount:
1972            return False
1973    # 2. Ensure the consistency of all build files.
1974    return check_md5(constants.BUILDFILES_MD5, missing_ok=False)
1975
1976
1977def _build_env_profiling() -> BuildEnvProfiler:
1978    """Determine the status profile before build.
1979
1980    The BuildEnvProfiler object can help use determine whether a build is:
1981        1. clean build. (empty out/ dir)
1982        2. Build files Integrity (Android.bp/Android.mk changes).
1983        3. Environment variables consistency.
1984        4. New Ninja file generated. (mtime of soong/build.ninja)
1985
1986    Returns:
1987        the BuildProfile object.
1988    """
1989    out_dir = Path(get_build_out_dir())
1990    ninja_file = out_dir.joinpath('soong/build.ninja')
1991    mtime = ninja_file.stat().st_mtime if ninja_file.is_file() else 0
1992    variables_file = out_dir.joinpath('soong/soong.environment.used.build')
1993
1994    return BuildEnvProfiler(
1995        ninja_file=ninja_file,
1996        ninja_file_mtime=mtime,
1997        variable_file=variables_file,
1998        variable_file_md5=md5sum(variables_file),
1999        clean_out=not ninja_file.exists(),
2000        build_files_integrity=build_files_integrity_is_ok()
2001    )
2002
2003
2004def _send_build_condition_metrics(
2005        build_profile: BuildEnvProfiler, cmd: List[str]):
2006    """Send build conditions by comparing build env profilers."""
2007
2008    # when build module-info.json only, 'module-info.json' will be
2009    # the last element.
2010    m_mod_info_only = 'module-info.json' in cmd.pop()
2011
2012    def ninja_file_is_changed(env_profiler: BuildEnvProfiler) -> bool:
2013        """Determine whether the ninja file had been renewal."""
2014        if not env_profiler.ninja_file.is_file():
2015            return True
2016        return (env_profiler.ninja_file.stat().st_mtime !=
2017                env_profiler.ninja_file_mtime)
2018
2019    def env_var_is_changed(env_profiler: BuildEnvProfiler) -> bool:
2020        """Determine whether soong-related variables had changed."""
2021        return (md5sum(env_profiler.variable_file) !=
2022                env_profiler.variable_file_md5)
2023
2024    def send_data(detect_type):
2025        """A simple wrapper of metrics.LocalDetectEvent."""
2026        metrics.LocalDetectEvent(detect_type=detect_type, result=1)
2027
2028    # Determine the correct detect type before profiling.
2029    # (build module-info.json or build dependencies.)
2030    clean_out = (DetectType.MODULE_INFO_CLEAN_OUT
2031                 if m_mod_info_only else DetectType.BUILD_CLEAN_OUT)
2032    ninja_generation = (DetectType.MODULE_INFO_GEN_NINJA
2033                        if m_mod_info_only else DetectType.BUILD_GEN_NINJA)
2034    bpmk_change = (DetectType.MODULE_INFO_BPMK_CHANGE
2035                   if m_mod_info_only else DetectType.BUILD_BPMK_CHANGE)
2036    env_change = (DetectType.MODULE_INFO_ENV_CHANGE
2037                  if m_mod_info_only else DetectType.BUILD_ENV_CHANGE)
2038    src_change = (DetectType.MODULE_INFO_SRC_CHANGE
2039                  if m_mod_info_only else DetectType.BUILD_SRC_CHANGE)
2040    other = (DetectType.MODULE_INFO_OTHER
2041             if m_mod_info_only else DetectType.BUILD_OTHER)
2042    incremental =(DetectType.MODULE_INFO_INCREMENTAL
2043                  if m_mod_info_only else DetectType.BUILD_INCREMENTAL)
2044
2045    if build_profile.clean_out:
2046        send_data(clean_out)
2047    else:
2048        send_data(incremental)
2049
2050    if ninja_file_is_changed(build_profile):
2051        send_data(ninja_generation)
2052
2053    other_condition = True
2054    if not build_profile.build_files_integrity:
2055        send_data(bpmk_change)
2056        other_condition = False
2057    if env_var_is_changed(build_profile):
2058        send_data(env_change)
2059        other_condition = False
2060    if bool(get_modified_files(os.getcwd())):
2061        send_data(src_change)
2062        other_condition = False
2063    if other_condition:
2064        send_data(other)
2065
2066
2067def get_local_auto_shardable_tests():
2068    """Get the auto shardable test names in shardable file.
2069
2070    The path will be ~/.atest/auto_shard/local_auto_shardable_tests
2071
2072    Returns:
2073        A list of auto shardable test names.
2074    """
2075    shardable_tests_file = Path(get_misc_dir()).joinpath(
2076        '.atest/auto_shard/local_auto_shardable_tests')
2077    if not shardable_tests_file.exists():
2078        return []
2079    return open(shardable_tests_file, 'r').read().split()
2080
2081def update_shardable_tests(test_name: str, run_time_in_sec: int):
2082    """Update local_auto_shardable_test file.
2083
2084    Strategy:
2085        - Determine to add the module by the run time > 10 mins.
2086        - local_auto_shardable_test file path :
2087            ~/.atest/auto_shard/local_auto_shardable_tests
2088        - The file content template is module name per line:
2089            <module1>
2090            <module2>
2091            ...
2092    """
2093    if run_time_in_sec < 600:
2094        return
2095    shardable_tests = get_local_auto_shardable_tests()
2096    if test_name not in shardable_tests:
2097        shardable_tests.append(test_name)
2098        logging.info('%s takes %ss (> 600s) to finish. Adding to shardable '
2099                    'test list.', test_name, run_time_in_sec)
2100
2101    if not shardable_tests:
2102        logging.info('No shardable tests to run.')
2103        return
2104    shardable_dir = Path(get_misc_dir()).joinpath('.atest/auto_shard')
2105    shardable_dir.mkdir(parents=True, exist_ok=True)
2106    shardable_tests_file = shardable_dir.joinpath('local_auto_shardable_tests')
2107    with open(shardable_tests_file, 'w') as file:
2108        file.write('\n'.join(shardable_tests))
2109