• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2#
3# Copyright 2017, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#     http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17"""
18Command line utility for running Android tests through TradeFederation.
19
20atest helps automate the flow of building test modules across the Android
21code base and executing the tests via the TradeFederation test harness.
22
23atest is designed to support any test types that can be ran by TradeFederation.
24"""
25
26# pylint: disable=line-too-long
27# pylint: disable=no-member
28# pylint: disable=too-many-lines
29# pylint: disable=wrong-import-position
30
31from __future__ import print_function
32
33import collections
34import logging
35import os
36import sys
37import tempfile
38import time
39import platform
40
41from typing import Dict, List
42
43from dataclasses import dataclass
44from pathlib import Path
45
46from atest import atest_arg_parser
47from atest import atest_configs
48from atest import atest_error
49from atest import atest_execution_info
50from atest import atest_utils
51from atest import bazel_mode
52from atest import bug_detector
53from atest import cli_translator
54from atest import constants
55from atest import module_info
56from atest import result_reporter
57from atest import test_runner_handler
58
59from atest.atest_enum import DetectType, ExitCode
60from atest.coverage import coverage
61from atest.metrics import metrics
62from atest.metrics import metrics_base
63from atest.metrics import metrics_utils
64from atest.test_finders import test_finder_utils
65from atest.test_runners import regression_test_runner
66from atest.test_runners import roboleaf_test_runner
67from atest.test_finders.test_info import TestInfo
68from atest.tools import atest_tools as at
69
70EXPECTED_VARS = frozenset([
71    constants.ANDROID_BUILD_TOP,
72    'ANDROID_TARGET_OUT_TESTCASES',
73    constants.ANDROID_OUT])
74TEST_RUN_DIR_PREFIX = "%Y%m%d_%H%M%S"
75CUSTOM_ARG_FLAG = '--'
76OPTION_NOT_FOR_TEST_MAPPING = (
77    'Option "{}" does not work for running tests in TEST_MAPPING files')
78
79DEVICE_TESTS = 'tests that require device'
80HOST_TESTS = 'tests that do NOT require device'
81RESULT_HEADER_FMT = '\nResults from %(test_type)s:'
82RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.'
83TEST_COUNT = 'test_count'
84TEST_TYPE = 'test_type'
85END_OF_OPTION = '--'
86HAS_IGNORED_ARGS = False
87# Conditions that atest should exit without sending result to metrics.
88EXIT_CODES_BEFORE_TEST = [ExitCode.ENV_NOT_SETUP,
89                          ExitCode.TEST_NOT_FOUND,
90                          ExitCode.OUTSIDE_ROOT,
91                          ExitCode.AVD_CREATE_FAILURE,
92                          ExitCode.AVD_INVALID_ARGS]
93
94@dataclass
95class Steps:
96    """A Dataclass that stores steps and shows step assignments."""
97    _build: bool
98    _install: bool
99    _test: bool
100
101    def has_build(self):
102        """Return whether build is in steps."""
103        return self._build
104
105    def is_build_only(self):
106        """Return whether build is the only one in steps."""
107        return self._build and not any((self._test, self._install))
108
109    def has_install(self):
110        """Return whether install is in steps."""
111        return self._install
112
113    def has_test(self):
114        """Return whether install is the only one in steps."""
115        return self._test
116
117    def is_test_only(self):
118        """Return whether build is not in steps but test."""
119        return self._test and not any((self._build, self._install))
120
121
122def parse_steps(args: atest_arg_parser.AtestArgParser) -> Steps:
123    """Return Steps object.
124
125    Args:
126        args: an AtestArgParser object.
127
128    Returns:
129        Step object that stores the boolean of build, install and test.
130    """
131    # Implicitly running 'build', 'install' and 'test' when args.steps is None.
132    if not args.steps:
133        return Steps(True, True, True)
134    build =  constants.BUILD_STEP in args.steps
135    test = constants.TEST_STEP in args.steps
136    install = constants.INSTALL_STEP in args.steps
137    if install and not test:
138        logging.warning('Installing without test step is currently not '
139                        'supported; Atest will proceed testing!')
140        test = True
141    return Steps(build, install, test)
142
143
144def _get_args_from_config():
145    """Get customized atest arguments in the config file.
146
147    If the config has not existed yet, atest will initialize an example
148    config file for it without any effective options.
149
150    Returns:
151        A list read from the config file.
152    """
153    _config = Path(atest_utils.get_misc_dir()).joinpath('.atest', 'config')
154    if not _config.parent.is_dir():
155        _config.parent.mkdir(parents=True)
156    args = []
157    if not _config.is_file():
158        with open(_config, 'w+', encoding='utf8') as cache:
159            cache.write(constants.ATEST_EXAMPLE_ARGS)
160        return args
161    warning = 'Line {} contains {} and will be ignored.'
162    print('\n{} {}'.format(
163        atest_utils.colorize('Reading config:', constants.CYAN),
164        atest_utils.colorize(_config, constants.YELLOW)))
165    # pylint: disable=global-statement:
166    global HAS_IGNORED_ARGS
167    with open(_config, 'r', encoding='utf8') as cache:
168        for entry in cache.readlines():
169            # Strip comments.
170            arg_in_line = entry.partition('#')[0].strip()
171            # Strip test name/path.
172            if arg_in_line.startswith('-'):
173                # Process argument that contains whitespaces.
174                # e.g. ["--serial foo"] -> ["--serial", "foo"]
175                if len(arg_in_line.split()) > 1:
176                    # remove "--" to avoid messing up atest/tradefed commands.
177                    if END_OF_OPTION in arg_in_line.split():
178                        HAS_IGNORED_ARGS = True
179                        print(warning.format(
180                            atest_utils.colorize(arg_in_line, constants.YELLOW),
181                            END_OF_OPTION))
182                    args.extend(arg_in_line.split())
183                else:
184                    if END_OF_OPTION == arg_in_line:
185                        HAS_IGNORED_ARGS = True
186                        print(warning.format(
187                            atest_utils.colorize(arg_in_line, constants.YELLOW),
188                            END_OF_OPTION))
189                    args.append(arg_in_line)
190    return args
191
192def _parse_args(argv):
193    """Parse command line arguments.
194
195    Args:
196        argv: A list of arguments.
197
198    Returns:
199        An argparse.Namespace class instance holding parsed args.
200    """
201    # Store everything after '--' in custom_args.
202    pruned_argv = argv
203    custom_args_index = None
204    if CUSTOM_ARG_FLAG in argv:
205        custom_args_index = argv.index(CUSTOM_ARG_FLAG)
206        pruned_argv = argv[:custom_args_index]
207    parser = atest_arg_parser.AtestArgParser()
208    parser.add_atest_args()
209    args = parser.parse_args(pruned_argv)
210    args.custom_args = []
211    if custom_args_index is not None:
212        for arg in argv[custom_args_index+1:]:
213            logging.debug('Quoting regex argument %s', arg)
214            args.custom_args.append(atest_utils.quote(arg))
215    return args
216
217
218def _configure_logging(verbose):
219    """Configure the logger.
220
221    Args:
222        verbose: A boolean. If true display DEBUG level logs.
223    """
224    # Clear the handlers to prevent logging.basicConfig from being called twice.
225    logging.getLogger('').handlers = []
226    log_format = '%(asctime)s %(filename)s:%(lineno)s:%(levelname)s: %(message)s'
227    datefmt = '%Y-%m-%d %H:%M:%S'
228    if verbose:
229        logging.basicConfig(level=logging.DEBUG,
230                            format=log_format, datefmt=datefmt)
231    else:
232        logging.basicConfig(level=logging.INFO,
233                            format=log_format, datefmt=datefmt)
234
235
236def _missing_environment_variables():
237    """Verify the local environment has been set up to run atest.
238
239    Returns:
240        List of strings of any missing environment variables.
241    """
242    missing = list(filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)]))
243    if missing:
244        logging.error('Local environment doesn\'t appear to have been '
245                      'initialized. Did you remember to run lunch? Expected '
246                      'Environment Variables: %s.', missing)
247    return missing
248
249
250def make_test_run_dir():
251    """Make the test run dir in ATEST_RESULT_ROOT.
252
253    Returns:
254        A string of the dir path.
255    """
256    if not os.path.exists(constants.ATEST_RESULT_ROOT):
257        os.makedirs(constants.ATEST_RESULT_ROOT)
258    ctime = time.strftime(TEST_RUN_DIR_PREFIX, time.localtime())
259    test_result_dir = tempfile.mkdtemp(prefix='%s_' % ctime,
260                                       dir=constants.ATEST_RESULT_ROOT)
261    return test_result_dir
262
263
264def get_extra_args(args):
265    """Get extra args for test runners.
266
267    Args:
268        args: arg parsed object.
269
270    Returns:
271        Dict of extra args for test runners to utilize.
272    """
273    extra_args = {}
274    if args.wait_for_debugger:
275        extra_args[constants.WAIT_FOR_DEBUGGER] = None
276    if not parse_steps(args).has_install():
277        extra_args[constants.DISABLE_INSTALL] = None
278    # The key and its value of the dict can be called via:
279    # if args.aaaa:
280    #     extra_args[constants.AAAA] = args.aaaa
281    arg_maps = {'all_abi': constants.ALL_ABI,
282                'annotation_filter': constants.ANNOTATION_FILTER,
283                'bazel_arg': constants.BAZEL_ARG,
284                'collect_tests_only': constants.COLLECT_TESTS_ONLY,
285                'experimental_coverage': constants.COVERAGE,
286                'custom_args': constants.CUSTOM_ARGS,
287                'device_only': constants.DEVICE_ONLY,
288                'disable_teardown': constants.DISABLE_TEARDOWN,
289                'disable_upload_result': constants.DISABLE_UPLOAD_RESULT,
290                'dry_run': constants.DRY_RUN,
291                'enable_device_preparer': constants.ENABLE_DEVICE_PREPARER,
292                'flakes_info': constants.FLAKES_INFO,
293                'generate_baseline': constants.PRE_PATCH_ITERATIONS,
294                'generate_new_metrics': constants.POST_PATCH_ITERATIONS,
295                'host': constants.HOST,
296                'instant': constants.INSTANT,
297                'iterations': constants.ITERATIONS,
298                'no_enable_root': constants.NO_ENABLE_ROOT,
299                'request_upload_result': constants.REQUEST_UPLOAD_RESULT,
300                'bazel_mode_features': constants.BAZEL_MODE_FEATURES,
301                'rerun_until_failure': constants.RERUN_UNTIL_FAILURE,
302                'retry_any_failure': constants.RETRY_ANY_FAILURE,
303                'serial': constants.SERIAL,
304                'auto_ld_library_path': constants.LD_LIBRARY_PATH,
305                'sharding': constants.SHARDING,
306                'test_filter': constants.TEST_FILTER,
307                'test_timeout': constants.TEST_TIMEOUT,
308                'tf_early_device_release': constants.TF_EARLY_DEVICE_RELEASE,
309                'tf_debug': constants.TF_DEBUG,
310                'tf_template': constants.TF_TEMPLATE,
311                'user_type': constants.USER_TYPE,
312                'verbose': constants.VERBOSE,
313                'verify_env_variable': constants.VERIFY_ENV_VARIABLE}
314    not_match = [k for k in arg_maps if k not in vars(args)]
315    if not_match:
316        raise AttributeError('%s object has no attribute %s'
317                             % (type(args).__name__, not_match))
318    extra_args.update({arg_maps.get(k): v for k, v in vars(args).items()
319                       if arg_maps.get(k) and v})
320    return extra_args
321
322
323def _get_regression_detection_args(args, results_dir):
324    """Get args for regression detection test runners.
325
326    Args:
327        args: parsed args object.
328        results_dir: string directory to store atest results.
329
330    Returns:
331        Dict of args for regression detection test runner to utilize.
332    """
333    regression_args = {}
334    pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline
335                        else args.detect_regression.pop(0))
336    post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics
337                         else args.detect_regression.pop(0))
338    regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder
339    regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder
340    return regression_args
341
342
343def _validate_exec_mode(args, test_infos, host_tests=None):
344    """Validate all test execution modes are not in conflict.
345
346    Exit the program with error code if have device-only and host-only.
347    If no conflict and host side, add args.host=True.
348
349    Args:
350        args: parsed args object.
351        test_info: TestInfo object.
352        host_tests: True if all tests should be deviceless, False if all tests
353            should be device tests. Default is set to None, which means
354            tests can be either deviceless or device tests.
355    """
356    all_device_modes = {x.get_supported_exec_mode() for x in test_infos}
357    err_msg = None
358    # In the case of '$atest <device-only> --host', exit.
359    if (host_tests or args.host) and constants.DEVICE_TEST in all_device_modes:
360        device_only_tests = [x.test_name for x in test_infos
361                             if x.get_supported_exec_mode() == constants.DEVICE_TEST]
362        err_msg = ('Specified --host, but the following tests are device-only:\n  ' +
363                   '\n  '.join(sorted(device_only_tests)) + '\nPlease remove the option '
364                   'when running device-only tests.')
365    # In the case of '$atest <host-only> <device-only> --host' or
366    # '$atest <host-only> <device-only>', exit.
367    if (constants.DEVICELESS_TEST in all_device_modes and
368            constants.DEVICE_TEST in all_device_modes):
369        err_msg = 'There are host-only and device-only tests in command.'
370    if host_tests is False and constants.DEVICELESS_TEST in all_device_modes:
371        err_msg = 'There are host-only tests in command.'
372    if err_msg:
373        logging.error(err_msg)
374        metrics_utils.send_exit_event(ExitCode.ERROR, logs=err_msg)
375        sys.exit(ExitCode.ERROR)
376    # The 'adb' may not be available for the first repo sync or a clean build; run
377    # `adb devices` in the build step again.
378    if at.has_command('adb'):
379        _validate_adb_devices(args, test_infos)
380    # In the case of '$atest <host-only>', we add --host to run on host-side.
381    # The option should only be overridden if `host_tests` is not set.
382    if not args.host and host_tests is None:
383        logging.debug('Appending "--host" for a deviceless test...')
384        args.host = bool(constants.DEVICELESS_TEST in all_device_modes)
385
386
387def _validate_adb_devices(args, test_infos):
388    """Validate the availability of connected devices via adb command.
389
390    Exit the program with error code if have device-only and host-only.
391
392    Args:
393        args: parsed args object.
394        test_info: TestInfo object.
395    """
396    # No need to check device availability if the user does not acquire to test.
397    if not parse_steps(args).has_test():
398        return
399    if args.no_checking_device:
400        return
401    all_device_modes = {x.get_supported_exec_mode() for x in test_infos}
402    device_tests = [x.test_name for x in test_infos
403        if x.get_supported_exec_mode() != constants.DEVICELESS_TEST]
404    # Only block testing if it is a device test.
405    if constants.DEVICE_TEST in all_device_modes:
406        if (not any((args.host, args.start_avd, args.acloud_create))
407            and not atest_utils.get_adb_devices()):
408            err_msg = (f'Stop running test(s): '
409                       f'{", ".join(device_tests)} require a device.')
410            atest_utils.colorful_print(err_msg, constants.RED)
411            logging.debug(atest_utils.colorize(
412                constants.REQUIRE_DEVICES_MSG, constants.RED))
413            metrics_utils.send_exit_event(ExitCode.DEVICE_NOT_FOUND,
414                                          logs=err_msg)
415            sys.exit(ExitCode.DEVICE_NOT_FOUND)
416
417
418def _validate_tm_tests_exec_mode(args, test_infos):
419    """Validate all test execution modes are not in conflict.
420
421    Split the tests in Test Mapping files into two groups, device tests and
422    deviceless tests running on host. Validate the tests' host setting.
423    For device tests, exit the program if any test is found for host-only.
424    For deviceless tests, exit the program if any test is found for device-only.
425
426    Args:
427        args: parsed args object.
428        test_info: TestInfo object.
429    """
430    device_test_infos, host_test_infos = _split_test_mapping_tests(
431        test_infos)
432    # No need to verify device tests if atest command is set to only run host
433    # tests.
434    if device_test_infos and not args.host:
435        _validate_exec_mode(args, device_test_infos, host_tests=False)
436    if host_test_infos:
437        _validate_exec_mode(args, host_test_infos, host_tests=True)
438
439
440def _will_run_tests(args):
441    """Determine if there are tests to run.
442
443    Currently only used by detect_regression to skip the test if just running
444    regression detection.
445
446    Args:
447        args: An argparse.Namespace object.
448
449    Returns:
450        True if there are tests to run, false otherwise.
451    """
452    return not (args.detect_regression and len(args.detect_regression) == 2)
453
454
455# pylint: disable=no-else-return
456# This method is going to dispose, let's ignore pylint for now.
457def _has_valid_regression_detection_args(args):
458    """Validate regression detection args.
459
460    Args:
461        args: parsed args object.
462
463    Returns:
464        True if args are valid
465    """
466    if args.generate_baseline and args.generate_new_metrics:
467        logging.error('Cannot collect both baseline and new metrics'
468                      'at the same time.')
469        return False
470    if args.detect_regression is not None:
471        if not args.detect_regression:
472            logging.error('Need to specify at least 1 arg for'
473                          ' regression detection.')
474            return False
475        elif len(args.detect_regression) == 1:
476            if args.generate_baseline or args.generate_new_metrics:
477                return True
478            logging.error('Need to specify --generate-baseline or'
479                          ' --generate-new-metrics.')
480            return False
481        elif len(args.detect_regression) == 2:
482            if args.generate_baseline:
483                logging.error('Specified 2 metric paths and --generate-baseline'
484                              ', either drop --generate-baseline or drop a path')
485                return False
486            if args.generate_new_metrics:
487                logging.error('Specified 2 metric paths and --generate-new-metrics, '
488                              'either drop --generate-new-metrics or drop a path')
489                return False
490            return True
491        else:
492            logging.error('Specified more than 2 metric paths.')
493            return False
494    return True
495
496
497def _has_valid_test_mapping_args(args):
498    """Validate test mapping args.
499
500    Not all args work when running tests in TEST_MAPPING files. Validate the
501    args before running the tests.
502
503    Args:
504        args: parsed args object.
505
506    Returns:
507        True if args are valid
508    """
509    is_test_mapping = atest_utils.is_test_mapping(args)
510    if not is_test_mapping:
511        return True
512    options_to_validate = [
513        (args.annotation_filter, '--annotation-filter'),
514        (args.generate_baseline, '--generate-baseline'),
515        (args.detect_regression, '--detect-regression'),
516        (args.generate_new_metrics, '--generate-new-metrics'),
517    ]
518    for arg_value, arg in options_to_validate:
519        if arg_value:
520            logging.error(atest_utils.colorize(
521                OPTION_NOT_FOR_TEST_MAPPING.format(arg), constants.RED))
522            return False
523    return True
524
525
526def _validate_args(args):
527    """Validate setups and args.
528
529    Exit the program with error code if any setup or arg is invalid.
530
531    Args:
532        args: parsed args object.
533    """
534    if _missing_environment_variables():
535        sys.exit(ExitCode.ENV_NOT_SETUP)
536    if args.generate_baseline and args.generate_new_metrics:
537        logging.error(
538            'Cannot collect both baseline and new metrics at the same time.')
539        sys.exit(ExitCode.ERROR)
540    if not _has_valid_regression_detection_args(args):
541        sys.exit(ExitCode.ERROR)
542    if not _has_valid_test_mapping_args(args):
543        sys.exit(ExitCode.ERROR)
544
545
546def _print_module_info_from_module_name(mod_info, module_name):
547    """print out the related module_info for a module_name.
548
549    Args:
550        mod_info: ModuleInfo object.
551        module_name: A string of module.
552
553    Returns:
554        True if the module_info is found.
555    """
556    title_mapping = collections.OrderedDict()
557    title_mapping[constants.MODULE_COMPATIBILITY_SUITES] = 'Compatibility suite'
558    title_mapping[constants.MODULE_PATH] = 'Source code path'
559    title_mapping[constants.MODULE_INSTALLED] = 'Installed path'
560    target_module_info = mod_info.get_module_info(module_name)
561    is_module_found = False
562    if target_module_info:
563        atest_utils.colorful_print(module_name, constants.GREEN)
564        for title_key in title_mapping:
565            atest_utils.colorful_print("\t%s" % title_mapping[title_key],
566                                       constants.CYAN)
567            for info_value in target_module_info[title_key]:
568                print("\t\t{}".format(info_value))
569        is_module_found = True
570    return is_module_found
571
572
573def _print_test_info(mod_info, test_infos):
574    """Print the module information from TestInfos.
575
576    Args:
577        mod_info: ModuleInfo object.
578        test_infos: A list of TestInfos.
579
580    Returns:
581        Always return EXIT_CODE_SUCCESS
582    """
583    for test_info in test_infos:
584        _print_module_info_from_module_name(mod_info, test_info.test_name)
585        atest_utils.colorful_print("\tRelated build targets", constants.MAGENTA)
586        sorted_build_targets = sorted(list(test_info.build_targets))
587        print("\t\t{}".format(", ".join(sorted_build_targets)))
588        for build_target in sorted_build_targets:
589            if build_target != test_info.test_name:
590                _print_module_info_from_module_name(mod_info, build_target)
591        atest_utils.colorful_print("", constants.WHITE)
592    return ExitCode.SUCCESS
593
594
595def is_from_test_mapping(test_infos):
596    """Check that the test_infos came from TEST_MAPPING files.
597
598    Args:
599        test_infos: A set of TestInfos.
600
601    Returns:
602        True if the test infos are from TEST_MAPPING files.
603    """
604    return list(test_infos)[0].from_test_mapping
605
606
607def _split_test_mapping_tests(test_infos):
608    """Split Test Mapping tests into 2 groups: device tests and host tests.
609
610    Args:
611        test_infos: A set of TestInfos.
612
613    Returns:
614        A tuple of (device_test_infos, host_test_infos), where
615        device_test_infos: A set of TestInfos for tests that require device.
616        host_test_infos: A set of TestInfos for tests that do NOT require
617            device.
618    """
619    assert is_from_test_mapping(test_infos)
620    host_test_infos = {info for info in test_infos if info.host}
621    device_test_infos = {info for info in test_infos if not info.host}
622    return device_test_infos, host_test_infos
623
624
625# pylint: disable=too-many-locals
626def _run_test_mapping_tests(results_dir, test_infos, extra_args, mod_info):
627    """Run all tests in TEST_MAPPING files.
628
629    Args:
630        results_dir: String directory to store atest results.
631        test_infos: A set of TestInfos.
632        extra_args: Dict of extra args to add to test run.
633        mod_info: ModuleInfo object.
634
635    Returns:
636        Exit code.
637    """
638    device_test_infos, host_test_infos = _split_test_mapping_tests(test_infos)
639    # `host` option needs to be set to True to run host side tests.
640    host_extra_args = extra_args.copy()
641    host_extra_args[constants.HOST] = True
642    test_runs = [(host_test_infos, host_extra_args, HOST_TESTS)]
643    if extra_args.get(constants.HOST):
644        atest_utils.colorful_print(
645            'Option `--host` specified. Skip running device tests.',
646            constants.MAGENTA)
647    elif extra_args.get(constants.DEVICE_ONLY):
648        test_runs = [(device_test_infos, extra_args, DEVICE_TESTS)]
649        atest_utils.colorful_print(
650            'Option `--device-only` specified. Skip running deviceless tests.',
651            constants.MAGENTA)
652    else:
653        test_runs.append((device_test_infos, extra_args, DEVICE_TESTS))
654
655    test_results = []
656    for tests, args, test_type in test_runs:
657        if not tests:
658            continue
659        header = RUN_HEADER_FMT % {TEST_COUNT: len(tests), TEST_TYPE: test_type}
660        atest_utils.colorful_print(header, constants.MAGENTA)
661        logging.debug('\n'.join([str(info) for info in tests]))
662        tests_exit_code, reporter = test_runner_handler.run_all_tests(
663            results_dir, tests, args, mod_info, delay_print_summary=True)
664        atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
665        test_results.append((tests_exit_code, reporter, test_type))
666
667    all_tests_exit_code = ExitCode.SUCCESS
668    failed_tests = []
669    for tests_exit_code, reporter, test_type in test_results:
670        atest_utils.colorful_print(
671            RESULT_HEADER_FMT % {TEST_TYPE: test_type}, constants.MAGENTA)
672        result = tests_exit_code | reporter.print_summary()
673        if result:
674            failed_tests.append(test_type)
675        all_tests_exit_code |= result
676
677    # List failed tests at the end as a reminder.
678    if failed_tests:
679        atest_utils.colorful_print(
680            atest_utils.delimiter('=', 30, prenl=1), constants.YELLOW)
681        atest_utils.colorful_print(
682            '\nFollowing tests failed:', constants.MAGENTA)
683        for failure in failed_tests:
684            atest_utils.colorful_print(failure, constants.RED)
685
686    return all_tests_exit_code
687
688
689def _dry_run(results_dir, extra_args, test_infos, mod_info):
690    """Only print the commands of the target tests rather than running them in actual.
691
692    Args:
693        results_dir: Path for saving atest logs.
694        extra_args: Dict of extra args for test runners to utilize.
695        test_infos: A list of TestInfos.
696        mod_info: ModuleInfo object.
697
698    Returns:
699        A list of test commands.
700    """
701    all_run_cmds = []
702    for test_runner, tests in test_runner_handler.group_tests_by_test_runners(
703            test_infos):
704        runner = test_runner(results_dir, mod_info=mod_info,
705                             extra_args=extra_args)
706        run_cmds = runner.generate_run_commands(tests, extra_args)
707        for run_cmd in run_cmds:
708            all_run_cmds.append(run_cmd)
709            print('Would run test via command: %s'
710                  % (atest_utils.colorize(run_cmd, constants.GREEN)))
711    return all_run_cmds
712
713def _print_testable_modules(mod_info, suite):
714    """Print the testable modules for a given suite.
715
716    Args:
717        mod_info: ModuleInfo object.
718        suite: A string of suite name.
719    """
720    testable_modules = mod_info.get_testable_modules(suite)
721    print('\n%s' % atest_utils.colorize('%s Testable %s modules' % (
722        len(testable_modules), suite), constants.CYAN))
723    print(atest_utils.delimiter('-'))
724    for module in sorted(testable_modules):
725        print('\t%s' % module)
726
727def _is_inside_android_root():
728    """Identify whether the cwd is inside of Android source tree.
729
730    Returns:
731        False if the cwd is outside of the source tree, True otherwise.
732    """
733    build_top = os.getenv(constants.ANDROID_BUILD_TOP, ' ')
734    return build_top in os.getcwd()
735
736def _non_action_validator(args):
737    """Method for non-action arguments such as --version, --help, --history,
738    --latest_result, etc.
739
740    Args:
741        args: An argparse.Namespace object.
742    """
743    if not _is_inside_android_root():
744        atest_utils.colorful_print(
745            "\nAtest must always work under ${}!".format(
746                constants.ANDROID_BUILD_TOP), constants.RED)
747        sys.exit(ExitCode.OUTSIDE_ROOT)
748    if args.version:
749        print(atest_utils.get_atest_version())
750        sys.exit(ExitCode.SUCCESS)
751    if args.help:
752        atest_arg_parser.print_epilog_text()
753        sys.exit(ExitCode.SUCCESS)
754    if args.history:
755        atest_execution_info.print_test_result(constants.ATEST_RESULT_ROOT,
756                                               args.history)
757        sys.exit(ExitCode.SUCCESS)
758    if args.latest_result:
759        atest_execution_info.print_test_result_by_path(
760            constants.LATEST_RESULT_FILE)
761        sys.exit(ExitCode.SUCCESS)
762    # TODO(b/131879842): remove below statement after they are fully removed.
763    if any((args.detect_regression,
764            args.generate_baseline,
765            args.generate_new_metrics)):
766        stop_msg = ('Please STOP using arguments below -- they are obsolete and '
767                    'will be removed in a very near future:\n'
768                    '\t--detect-regression\n'
769                    '\t--generate-baseline\n'
770                    '\t--generate-new-metrics\n')
771        msg = ('Please use below arguments instead:\n'
772               '\t--iterations\n'
773               '\t--rerun-until-failure\n'
774               '\t--retry-any-failure\n')
775        atest_utils.colorful_print(stop_msg, constants.RED)
776        atest_utils.colorful_print(msg, constants.CYAN)
777
778def _dry_run_validator(args, results_dir, extra_args, test_infos, mod_info):
779    """Method which process --dry-run argument.
780
781    Args:
782        args: An argparse.Namespace class instance holding parsed args.
783        result_dir: A string path of the results dir.
784        extra_args: A dict of extra args for test runners to utilize.
785        test_infos: A list of test_info.
786        mod_info: ModuleInfo object.
787    Returns:
788        Exit code.
789    """
790    dry_run_cmds = _dry_run(results_dir, extra_args, test_infos, mod_info)
791    if args.generate_runner_cmd:
792        dry_run_cmd_str = ' '.join(dry_run_cmds)
793        tests_str = ' '.join(args.tests)
794        test_commands = atest_utils.gen_runner_cmd_to_file(tests_str,
795                                                           dry_run_cmd_str)
796        print("add command %s to file %s" % (
797            atest_utils.colorize(test_commands, constants.GREEN),
798            atest_utils.colorize(constants.RUNNER_COMMAND_PATH,
799                                 constants.GREEN)))
800    else:
801        test_commands = atest_utils.get_verify_key(args.tests, extra_args)
802    if args.verify_cmd_mapping:
803        try:
804            atest_utils.handle_test_runner_cmd(test_commands,
805                                               dry_run_cmds,
806                                               do_verification=True)
807        except atest_error.DryRunVerificationError as e:
808            atest_utils.colorful_print(str(e), constants.RED)
809            return ExitCode.VERIFY_FAILURE
810    if args.update_cmd_mapping:
811        atest_utils.handle_test_runner_cmd(test_commands,
812                                           dry_run_cmds)
813    return ExitCode.SUCCESS
814
815def _exclude_modules_in_targets(build_targets):
816    """Method that excludes MODULES-IN-* targets.
817
818    Args:
819        build_targets: A set of build targets.
820
821    Returns:
822        A set of build targets that excludes MODULES-IN-*.
823    """
824    shrank_build_targets = build_targets.copy()
825    logging.debug('Will exclude all "%s*" from the build targets.',
826                  constants.MODULES_IN)
827    for target in build_targets:
828        if target.startswith(constants.MODULES_IN):
829            logging.debug('Ignore %s.', target)
830            shrank_build_targets.remove(target)
831    return shrank_build_targets
832
833# pylint: disable=protected-access
834def need_rebuild_module_info(args: atest_arg_parser.AtestArgParser) -> bool:
835    """Method that tells whether we need to rebuild module-info.json or not.
836
837    Args:
838        args: an AtestArgParser object.
839
840        +-----------------+
841        | Explicitly pass |  yes
842        |    '--test'     +-------> False (won't rebuild)
843        +--------+--------+
844                 | no
845                 V
846        +-------------------------+
847        | Explicitly pass         |  yes
848        | '--rebuild-module-info' +-------> True (forcely rebuild)
849        +--------+----------------+
850                 | no
851                 V
852        +-------------------+
853        |    Build files    |  no
854        | integrity is good +-------> True (smartly rebuild)
855        +--------+----------+
856                 | yes
857                 V
858               False (won't rebuild)
859
860    Returns:
861        True for forcely/smartly rebuild, otherwise False without rebuilding.
862    """
863    if not parse_steps(args).has_build():
864        logging.debug('\"--test\" mode detected, will not rebuild module-info.')
865        return False
866    if args.rebuild_module_info:
867        msg = (f'`{constants.REBUILD_MODULE_INFO_FLAG}` is no longer needed '
868               f'since Atest can smartly rebuild {module_info._MODULE_INFO} '
869               r'only when needed.')
870        atest_utils.colorful_print(msg, constants.YELLOW)
871        return True
872    logging.debug('Examinating the consistency of build files...')
873    if not atest_utils.build_files_integrity_is_ok():
874        logging.debug('Found build files were changed.')
875        return True
876    return False
877
878def need_run_index_targets(args, extra_args):
879    """Method that determines whether Atest need to run index_targets or not.
880
881
882    There are 3 conditions that Atest does not run index_targets():
883    1. dry-run flags were found.
884    2. VERIFY_ENV_VARIABLE was found in extra_args.
885    3. --test flag was found.
886
887    Args:
888        args: A list of argument.
889        extra_args: A list of extra argument.
890
891    Returns:
892        True when none of the above conditions were found.
893    """
894    ignore_args = (args.update_cmd_mapping, args.verify_cmd_mapping, args.dry_run)
895    if any(ignore_args):
896        return False
897    if extra_args.get(constants.VERIFY_ENV_VARIABLE, False):
898        return False
899    if not parse_steps(args).has_build():
900        return False
901    return True
902
903def _all_tests_are_bazel_buildable(
904    roboleaf_tests: Dict[str, TestInfo],
905    tests: List[str]) -> bool:
906    """Method that determines whether all tests have been fully converted to
907    bazel mode (roboleaf).
908
909    If all tests are fully converted, then indexing, generating mod-info, and
910    generating atest bazel workspace can be skipped since dependencies are
911    mapped already with `b`.
912
913    Args:
914        roboleaf_tests: A dictionary keyed by testname of roboleaf tests.
915        tests: A list of testnames.
916
917    Returns:
918        True when none of the above conditions were found.
919    """
920    return roboleaf_tests and set(tests) == set(roboleaf_tests)
921
922def perm_consistency_metrics(test_infos, mod_info, args):
923    """collect inconsistency between preparer and device root permission.
924
925    Args:
926        test_infos: TestInfo obj.
927        mod_info: ModuleInfo obj.
928        args: An argparse.Namespace class instance holding parsed args.
929    """
930    try:
931        # whether device has root permission
932        adb_root = atest_utils.is_adb_root(args)
933        logging.debug('is_adb_root: %s', adb_root)
934        for test_info in test_infos:
935            config_path, _ = test_finder_utils.get_test_config_and_srcs(
936                test_info, mod_info)
937            atest_utils.perm_metrics(config_path, adb_root)
938    # pylint: disable=broad-except
939    except Exception as err:
940        logging.debug('perm_consistency_metrics raised exception: %s', err)
941        return
942
943
944def set_build_output_mode(mode: atest_utils.BuildOutputMode):
945    """Update environment variable dict accordingly to args.build_output."""
946    # Changing this variable does not retrigger builds.
947    atest_utils.update_build_env(
948        {'ANDROID_QUIET_BUILD': 'true',
949         #(b/271654778) Showing the reasons for the ninja file was regenerated.
950         'SOONG_UI_NINJA_ARGS': '-d explain',
951         'BUILD_OUTPUT_MODE': mode.value})
952
953
954def get_device_count_config(test_infos, mod_info):
955    """Get the amount of desired devices from the test config.
956
957    Args:
958        test_infos: A set of TestInfo instances.
959        mod_info: ModuleInfo object.
960
961    Returns: the count of devices in test config. If there are more than one
962             configs, return the maximum.
963    """
964    max_count = 0
965    for tinfo in test_infos:
966        test_config, _ = test_finder_utils.get_test_config_and_srcs(
967            tinfo, mod_info)
968        if test_config:
969            devices = atest_utils.get_config_device(test_config)
970            if devices:
971                max_count = max(len(devices), max_count)
972    return max_count
973
974
975def _is_auto_shard_test(test_infos):
976    """Determine whether the given tests are in shardable test list.
977
978    Args:
979        test_infos: TestInfo objects.
980
981    Returns:
982        True if test in auto shardable list.
983    """
984    shardable_tests = atest_utils.get_local_auto_shardable_tests()
985    for test_info in test_infos:
986        if test_info.test_name in shardable_tests:
987            return True
988    return False
989
990
991# pylint: disable=too-many-statements
992# pylint: disable=too-many-branches
993# pylint: disable=too-many-return-statements
994def main(argv, results_dir, args):
995    """Entry point of atest script.
996
997    Args:
998        argv: A list of arguments.
999        results_dir: A directory which stores the ATest execution information.
1000        args: An argparse.Namespace class instance holding parsed args.
1001
1002    Returns:
1003        Exit code.
1004    """
1005    _begin_time = time.time()
1006
1007    # Sets coverage environment variables.
1008    if args.experimental_coverage:
1009        atest_utils.update_build_env(coverage.build_env_vars())
1010    set_build_output_mode(args.build_output)
1011
1012    _configure_logging(args.verbose)
1013    _validate_args(args)
1014    metrics_utils.get_start_time()
1015    os_pyver = (f'{platform.platform()}:{platform.python_version()}/'
1016                f'{atest_utils.get_manifest_branch(True)}:'
1017                f'{atest_utils.get_atest_version()}')
1018    metrics.AtestStartEvent(
1019        command_line=' '.join(argv),
1020        test_references=args.tests,
1021        cwd=os.getcwd(),
1022        os=os_pyver)
1023    _non_action_validator(args)
1024
1025    proc_acloud, report_file = None, None
1026    if any((args.acloud_create, args.start_avd)):
1027        proc_acloud, report_file = at.acloud_create_validator(results_dir, args)
1028    is_clean = not os.path.exists(
1029        os.environ.get(constants.ANDROID_PRODUCT_OUT, ''))
1030    extra_args = get_extra_args(args)
1031    verify_env_variables = extra_args.get(constants.VERIFY_ENV_VARIABLE, False)
1032
1033    # Gather roboleaf tests now to see if we can skip mod info generation.
1034    mod_info = module_info.ModuleInfo(no_generate=True)
1035    if args.roboleaf_mode != roboleaf_test_runner.BazelBuildMode.OFF:
1036        mod_info.roboleaf_tests = roboleaf_test_runner.RoboleafTestRunner(
1037            results_dir).roboleaf_eligible_tests(
1038                args.roboleaf_mode,
1039                args.tests)
1040    all_tests_are_bazel_buildable = _all_tests_are_bazel_buildable(
1041                                mod_info.roboleaf_tests,
1042                                args.tests)
1043
1044    # Run Test Mapping or coverage by no-bazel-mode.
1045    if atest_utils.is_test_mapping(args) or args.experimental_coverage:
1046        atest_utils.colorful_print('Not running using bazel-mode.', constants.YELLOW)
1047        args.bazel_mode = False
1048
1049    proc_idx = None
1050    if not all_tests_are_bazel_buildable:
1051        # Do not index targets while the users intend to dry-run tests.
1052        if need_run_index_targets(args, extra_args):
1053            proc_idx = atest_utils.run_multi_proc(at.index_targets)
1054        smart_rebuild = need_rebuild_module_info(args)
1055
1056        mod_start = time.time()
1057        mod_info = module_info.ModuleInfo(force_build=smart_rebuild)
1058        mod_stop = time.time() - mod_start
1059        metrics.LocalDetectEvent(detect_type=DetectType.MODULE_INFO_INIT_MS,
1060                                 result=int(mod_stop * 1000))
1061        atest_utils.run_multi_proc(func=mod_info._save_module_info_checksum)
1062        atest_utils.run_multi_proc(
1063            func=atest_utils.generate_buildfiles_checksum,
1064            args=[mod_info.module_index.parent])
1065
1066        if args.bazel_mode:
1067            start = time.time()
1068            bazel_mode.generate_bazel_workspace(
1069                mod_info,
1070                enabled_features=set(args.bazel_mode_features or []))
1071            metrics.LocalDetectEvent(
1072                detect_type=DetectType.BAZEL_WORKSPACE_GENERATE_TIME,
1073                result=int(time.time() - start))
1074
1075    translator = cli_translator.CLITranslator(
1076        mod_info=mod_info,
1077        print_cache_msg=not args.clear_cache,
1078        bazel_mode_enabled=args.bazel_mode,
1079        host=args.host,
1080        bazel_mode_features=args.bazel_mode_features)
1081    if args.list_modules:
1082        _print_testable_modules(mod_info, args.list_modules)
1083        return ExitCode.SUCCESS
1084    test_infos = set()
1085    dry_run_args = (args.update_cmd_mapping, args.verify_cmd_mapping,
1086                    args.dry_run, args.generate_runner_cmd)
1087    if _will_run_tests(args):
1088        # (b/242567487) index_targets may finish after cli_translator; to
1089        # mitigate the overhead, the main waits until it finished when no index
1090        # files are available (e.g. fresh repo sync)
1091        if proc_idx and not atest_utils.has_index_files():
1092            proc_idx.join()
1093        find_start = time.time()
1094        test_infos = translator.translate(args)
1095        given_amount  = len(args.serial) if args.serial else 0
1096        required_amount = get_device_count_config(test_infos, mod_info)
1097        args.device_count_config = required_amount
1098        # Only check when both given_amount and required_amount are non zero.
1099        if all((given_amount, required_amount)):
1100            # Base on TF rules, given_amount can be greater than or equal to
1101            # required_amount.
1102            if required_amount > given_amount:
1103                atest_utils.colorful_print(
1104                    f'The test requires {required_amount} devices, '
1105                    f'but {given_amount} were given.',
1106                    constants.RED)
1107                return 0
1108
1109        find_duration = time.time() - find_start
1110        if not test_infos:
1111            return ExitCode.TEST_NOT_FOUND
1112        if not is_from_test_mapping(test_infos):
1113            if not (any(dry_run_args) or verify_env_variables):
1114                _validate_exec_mode(args, test_infos)
1115                # _validate_exec_mode appends --host automatically when pure
1116                # host-side tests, so re-parsing extra_args is a must.
1117                extra_args = get_extra_args(args)
1118        else:
1119            _validate_tm_tests_exec_mode(args, test_infos)
1120        # Detect auto sharding and trigger creating AVDs
1121        if args.auto_sharding and _is_auto_shard_test(test_infos):
1122            extra_args.update({constants.SHARDING: constants.SHARD_NUM})
1123            if not (any(dry_run_args) or verify_env_variables):
1124                # TODO: check existing devices.
1125                args.acloud_create = [f'--num-instances={constants.SHARD_NUM}']
1126                proc_acloud, report_file = at.acloud_create_validator(
1127                    results_dir, args)
1128
1129    # TODO: change to another approach that put constants.CUSTOM_ARGS in the
1130    # end of command to make sure that customized args can override default
1131    # options.
1132    # For TEST_MAPPING, set timeout to 600000ms.
1133    custom_timeout = False
1134    for custom_args in args.custom_args:
1135        if '-timeout' in custom_args:
1136            custom_timeout = True
1137    if args.test_timeout is None and not custom_timeout:
1138        if is_from_test_mapping(test_infos):
1139            extra_args.update({constants.TEST_TIMEOUT: 600000})
1140            logging.debug(
1141                'Set test timeout to %sms to align it in TEST_MAPPING.',
1142                extra_args.get(constants.TEST_TIMEOUT))
1143
1144    if args.info:
1145        return _print_test_info(mod_info, test_infos)
1146
1147    build_targets = test_runner_handler.get_test_runner_reqs(
1148        mod_info, test_infos, extra_args=extra_args)
1149    # Remove MODULE-IN-* from build targets by default.
1150    if not args.use_modules_in:
1151        build_targets = _exclude_modules_in_targets(build_targets)
1152
1153    if any(dry_run_args):
1154        if not verify_env_variables:
1155            return _dry_run_validator(args, results_dir, extra_args, test_infos,
1156                                      mod_info)
1157    if verify_env_variables:
1158        # check environment variables.
1159        verify_key = atest_utils.get_verify_key(args.tests, extra_args)
1160        if not atest_utils.handle_test_env_var(verify_key, pre_verify=True):
1161            print('No environment variables need to verify.')
1162            return 0
1163    if args.detect_regression:
1164        build_targets |= (regression_test_runner.RegressionTestRunner('')
1165                          .get_test_runner_build_reqs([]))
1166
1167    steps = parse_steps(args)
1168    if build_targets and steps.has_build():
1169        if args.experimental_coverage:
1170            build_targets.add('jacoco_to_lcov_converter')
1171
1172        # Add module-info.json target to the list of build targets to keep the
1173        # file up to date.
1174        build_targets.add(mod_info.module_info_target)
1175
1176        build_start = time.time()
1177        success = atest_utils.build(build_targets)
1178        build_duration = time.time() - build_start
1179        metrics.BuildFinishEvent(
1180            duration=metrics_utils.convert_duration(build_duration),
1181            success=success,
1182            targets=build_targets)
1183        metrics.LocalDetectEvent(
1184            detect_type=DetectType.BUILD_TIME_PER_TARGET,
1185            result=int(build_duration/len(build_targets))
1186        )
1187        rebuild_module_info = DetectType.NOT_REBUILD_MODULE_INFO
1188        if is_clean:
1189            rebuild_module_info = DetectType.CLEAN_BUILD
1190        elif args.rebuild_module_info:
1191            rebuild_module_info = DetectType.REBUILD_MODULE_INFO
1192        elif smart_rebuild:
1193            rebuild_module_info = DetectType.SMART_REBUILD_MODULE_INFO
1194        metrics.LocalDetectEvent(
1195            detect_type=rebuild_module_info,
1196            result=int(build_duration))
1197        if not success:
1198            return ExitCode.BUILD_FAILURE
1199        if proc_acloud:
1200            proc_acloud.join()
1201            status = at.probe_acloud_status(
1202                report_file, find_duration + build_duration)
1203            if status != 0:
1204                return status
1205        # After build step 'adb' command will be available, and stop forward to
1206        # Tradefed if the tests require a device.
1207        _validate_adb_devices(args, test_infos)
1208
1209    tests_exit_code = ExitCode.SUCCESS
1210    test_start = time.time()
1211    if steps.has_test():
1212        # Only send duration to metrics when no --build.
1213        if not steps.has_build():
1214            _init_and_find = time.time() - _begin_time
1215            logging.debug('Initiation and finding tests took %ss', _init_and_find)
1216            metrics.LocalDetectEvent(
1217                detect_type=DetectType.INIT_AND_FIND_MS,
1218                result=int(_init_and_find*1000))
1219        perm_consistency_metrics(test_infos, mod_info, args)
1220        if not is_from_test_mapping(test_infos):
1221            tests_exit_code, reporter = test_runner_handler.run_all_tests(
1222                results_dir, test_infos, extra_args, mod_info)
1223            atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
1224        else:
1225            tests_exit_code = _run_test_mapping_tests(
1226                results_dir, test_infos, extra_args, mod_info)
1227        if args.experimental_coverage:
1228            coverage.generate_coverage_report(results_dir, test_infos, mod_info)
1229    if args.detect_regression:
1230        regression_args = _get_regression_detection_args(args, results_dir)
1231        # TODO(b/110485713): Should not call run_tests here.
1232        reporter = result_reporter.ResultReporter(
1233            collect_only=extra_args.get(constants.COLLECT_TESTS_ONLY))
1234        atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
1235        tests_exit_code |= regression_test_runner.RegressionTestRunner(
1236            '').run_tests(
1237                None, regression_args, reporter)
1238    metrics.RunTestsFinishEvent(
1239        duration=metrics_utils.convert_duration(time.time() - test_start))
1240    preparation_time = atest_execution_info.preparation_time(test_start)
1241    if preparation_time:
1242        # Send the preparation time only if it's set.
1243        metrics.RunnerFinishEvent(
1244            duration=metrics_utils.convert_duration(preparation_time),
1245            success=True,
1246            runner_name=constants.TF_PREPARATION,
1247            test=[])
1248    if tests_exit_code != ExitCode.SUCCESS:
1249        tests_exit_code = ExitCode.TEST_FAILURE
1250
1251    return tests_exit_code
1252
1253if __name__ == '__main__':
1254    RESULTS_DIR = make_test_run_dir()
1255    if END_OF_OPTION in sys.argv:
1256        end_position = sys.argv.index(END_OF_OPTION)
1257        final_args = [*sys.argv[1:end_position],
1258                      *_get_args_from_config(),
1259                      *sys.argv[end_position:]]
1260    else:
1261        final_args = [*sys.argv[1:], *_get_args_from_config()]
1262    if final_args != sys.argv[1:]:
1263        print('The actual cmd will be: \n\t{}\n'.format(
1264            atest_utils.colorize("atest " + " ".join(final_args),
1265                                 constants.CYAN)))
1266        metrics.LocalDetectEvent(
1267            detect_type=DetectType.ATEST_CONFIG, result=1)
1268        if HAS_IGNORED_ARGS:
1269            atest_utils.colorful_print(
1270                'Please correct the config and try again.', constants.YELLOW)
1271            sys.exit(ExitCode.EXIT_BEFORE_MAIN)
1272    else:
1273        metrics.LocalDetectEvent(
1274            detect_type=DetectType.ATEST_CONFIG, result=0)
1275    atest_configs.GLOBAL_ARGS = _parse_args(final_args)
1276    with atest_execution_info.AtestExecutionInfo(
1277            final_args, RESULTS_DIR,
1278            atest_configs.GLOBAL_ARGS) as result_file:
1279        if not atest_configs.GLOBAL_ARGS.no_metrics:
1280            metrics_utils.print_data_collection_notice()
1281            USER_FROM_TOOL = os.getenv(constants.USER_FROM_TOOL, '')
1282            if USER_FROM_TOOL == '':
1283                metrics_base.MetricsBase.tool_name = constants.TOOL_NAME
1284            else:
1285                metrics_base.MetricsBase.tool_name = USER_FROM_TOOL
1286            USER_FROM_SUB_TOOL = os.getenv(constants.USER_FROM_SUB_TOOL, '')
1287            if USER_FROM_SUB_TOOL == '':
1288                metrics_base.MetricsBase.sub_tool_name = constants.SUB_TOOL_NAME
1289            else:
1290                metrics_base.MetricsBase.sub_tool_name = USER_FROM_SUB_TOOL
1291
1292        EXIT_CODE = main(final_args, RESULTS_DIR, atest_configs.GLOBAL_ARGS)
1293        DETECTOR = bug_detector.BugDetector(final_args, EXIT_CODE)
1294        if EXIT_CODE not in EXIT_CODES_BEFORE_TEST:
1295            metrics.LocalDetectEvent(
1296                detect_type=DetectType.BUG_DETECTED,
1297                result=DETECTOR.caught_result)
1298            if result_file:
1299                print("Run 'atest --history' to review test result history.")
1300
1301    # Only asking internal google user to do this survey.
1302    if metrics_base.get_user_type() == metrics_base.INTERNAL_USER:
1303        # The bazel_mode value will only be false if user apply --no-bazel-mode.
1304        if not atest_configs.GLOBAL_ARGS.bazel_mode:
1305            MESSAGE = ('\nDear `--no-bazel-mode` users,\n'
1306                         'We are conducting a survey to understand why you are '
1307                         'still using `--no-bazel-mode`. The survey should '
1308                         'take less than 3 minutes and your responses will be '
1309                         'kept confidential and will only be used to improve '
1310                         'our understanding of the situation. Please click on '
1311                         'the link below to begin the survey:\n\n'
1312                         'http://go/atest-no-bazel-survey\n\n'
1313                         'Thanks for your time and feedback.\n\n'
1314                         'Sincerely,\n'
1315                         'The ATest Team')
1316
1317            print(atest_utils.colorize(MESSAGE, constants.BLACK, bp_color=constants.CYAN))
1318
1319    sys.exit(EXIT_CODE)
1320