• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2#
3# Copyright 2017, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#     http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17"""
18Command line utility for running Android tests through TradeFederation.
19
20atest helps automate the flow of building test modules across the Android
21code base and executing the tests via the TradeFederation test harness.
22
23atest is designed to support any test types that can be ran by TradeFederation.
24"""
25
26# pylint: disable=line-too-long
27
28from __future__ import print_function
29
30import collections
31import logging
32import os
33import sys
34import tempfile
35import time
36import platform
37
38from multiprocessing import Process
39
40import atest_arg_parser
41import atest_configs
42import atest_error
43import atest_execution_info
44import atest_utils
45import bug_detector
46import cli_translator
47import constants
48import module_info
49import result_reporter
50import test_runner_handler
51
52from metrics import metrics
53from metrics import metrics_base
54from metrics import metrics_utils
55from test_runners import regression_test_runner
56from tools import atest_tools as at
57
58EXPECTED_VARS = frozenset([
59    constants.ANDROID_BUILD_TOP,
60    'ANDROID_TARGET_OUT_TESTCASES',
61    constants.ANDROID_OUT])
62TEST_RUN_DIR_PREFIX = "%Y%m%d_%H%M%S"
63CUSTOM_ARG_FLAG = '--'
64OPTION_NOT_FOR_TEST_MAPPING = (
65    'Option `%s` does not work for running tests in TEST_MAPPING files')
66
67DEVICE_TESTS = 'tests that require device'
68HOST_TESTS = 'tests that do NOT require device'
69RESULT_HEADER_FMT = '\nResults from %(test_type)s:'
70RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.'
71TEST_COUNT = 'test_count'
72TEST_TYPE = 'test_type'
73# Tasks that must run in the build time but unable to build by soong.
74# (e.g subprocesses that invoke host commands.)
75ACLOUD_CREATE = at.acloud_create
76INDEX_TARGETS = at.index_targets
77
78
79def _run_multi_proc(func, *args, **kwargs):
80    """Start a process with multiprocessing and return Process object.
81
82    Args:
83        func: A string of function name which will be the target name.
84        args/kwargs: check doc page:
85        https://docs.python.org/3.8/library/multiprocessing.html#process-and-exceptions
86
87    Returns:
88        multiprocessing.Process object.
89    """
90
91    proc = Process(target=func, *args, **kwargs)
92    proc.start()
93    return proc
94
95
96def _parse_args(argv):
97    """Parse command line arguments.
98
99    Args:
100        argv: A list of arguments.
101
102    Returns:
103        An argspace.Namespace class instance holding parsed args.
104    """
105    # Store everything after '--' in custom_args.
106    pruned_argv = argv
107    custom_args_index = None
108    if CUSTOM_ARG_FLAG in argv:
109        custom_args_index = argv.index(CUSTOM_ARG_FLAG)
110        pruned_argv = argv[:custom_args_index]
111    parser = atest_arg_parser.AtestArgParser()
112    parser.add_atest_args()
113    args = parser.parse_args(pruned_argv)
114    args.custom_args = []
115    if custom_args_index is not None:
116        for arg in argv[custom_args_index+1:]:
117            logging.debug('Quoting regex argument %s', arg)
118            args.custom_args.append(atest_utils.quote(arg))
119    return args
120
121
122def _configure_logging(verbose):
123    """Configure the logger.
124
125    Args:
126        verbose: A boolean. If true display DEBUG level logs.
127    """
128    # Clear the handlers to prevent logging.basicConfig from being called twice.
129    logging.getLogger('').handlers = []
130    log_format = '%(asctime)s %(filename)s:%(lineno)s:%(levelname)s: %(message)s'
131    datefmt = '%Y-%m-%d %H:%M:%S'
132    if verbose:
133        logging.basicConfig(level=logging.DEBUG,
134                            format=log_format, datefmt=datefmt)
135    else:
136        logging.basicConfig(level=logging.INFO,
137                            format=log_format, datefmt=datefmt)
138
139
140def _missing_environment_variables():
141    """Verify the local environment has been set up to run atest.
142
143    Returns:
144        List of strings of any missing environment variables.
145    """
146    missing = list(filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)]))
147    if missing:
148        logging.error('Local environment doesn\'t appear to have been '
149                      'initialized. Did you remember to run lunch? Expected '
150                      'Environment Variables: %s.', missing)
151    return missing
152
153
154def make_test_run_dir():
155    """Make the test run dir in ATEST_RESULT_ROOT.
156
157    Returns:
158        A string of the dir path.
159    """
160    if not os.path.exists(constants.ATEST_RESULT_ROOT):
161        os.makedirs(constants.ATEST_RESULT_ROOT)
162    ctime = time.strftime(TEST_RUN_DIR_PREFIX, time.localtime())
163    test_result_dir = tempfile.mkdtemp(prefix='%s_' % ctime,
164                                       dir=constants.ATEST_RESULT_ROOT)
165    return test_result_dir
166
167
168def get_extra_args(args):
169    """Get extra args for test runners.
170
171    Args:
172        args: arg parsed object.
173
174    Returns:
175        Dict of extra args for test runners to utilize.
176    """
177    extra_args = {}
178    if args.wait_for_debugger:
179        extra_args[constants.WAIT_FOR_DEBUGGER] = None
180    steps = args.steps or constants.ALL_STEPS
181    if constants.INSTALL_STEP not in steps:
182        extra_args[constants.DISABLE_INSTALL] = None
183    # The key and its value of the dict can be called via:
184    # if args.aaaa:
185    #     extra_args[constants.AAAA] = args.aaaa
186    arg_maps = {'all_abi': constants.ALL_ABI,
187                'collect_tests_only': constants.COLLECT_TESTS_ONLY,
188                'custom_args': constants.CUSTOM_ARGS,
189                'disable_teardown': constants.DISABLE_TEARDOWN,
190                'dry_run': constants.DRY_RUN,
191                'generate_baseline': constants.PRE_PATCH_ITERATIONS,
192                'generate_new_metrics': constants.POST_PATCH_ITERATIONS,
193                'host': constants.HOST,
194                'instant': constants.INSTANT,
195                'iterations': constants.ITERATIONS,
196                'rerun_until_failure': constants.RERUN_UNTIL_FAILURE,
197                'retry_any_failure': constants.RETRY_ANY_FAILURE,
198                'serial': constants.SERIAL,
199                'sharding': constants.SHARDING,
200                'tf_debug': constants.TF_DEBUG,
201                'tf_template': constants.TF_TEMPLATE,
202                'user_type': constants.USER_TYPE,
203                'flakes_info': constants.FLAKES_INFO,
204                'tf_early_device_release': constants.TF_EARLY_DEVICE_RELEASE,
205                'request_upload_result': constants.REQUEST_UPLOAD_RESULT}
206    not_match = [k for k in arg_maps if k not in vars(args)]
207    if not_match:
208        raise AttributeError('%s object has no attribute %s'
209                             %(type(args).__name__, not_match))
210    extra_args.update({arg_maps.get(k): v for k, v in vars(args).items()
211                       if arg_maps.get(k) and v})
212    return extra_args
213
214
215def _get_regression_detection_args(args, results_dir):
216    """Get args for regression detection test runners.
217
218    Args:
219        args: parsed args object.
220        results_dir: string directory to store atest results.
221
222    Returns:
223        Dict of args for regression detection test runner to utilize.
224    """
225    regression_args = {}
226    pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline
227                        else args.detect_regression.pop(0))
228    post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics
229                         else args.detect_regression.pop(0))
230    regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder
231    regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder
232    return regression_args
233
234
235def _validate_exec_mode(args, test_infos, host_tests=None):
236    """Validate all test execution modes are not in conflict.
237
238    Exit the program with error code if have device-only and host-only.
239    If no conflict and host side, add args.host=True.
240
241    Args:
242        args: parsed args object.
243        test_info: TestInfo object.
244        host_tests: True if all tests should be deviceless, False if all tests
245            should be device tests. Default is set to None, which means
246            tests can be either deviceless or device tests.
247    """
248    all_device_modes = [x.get_supported_exec_mode() for x in test_infos]
249    err_msg = None
250    # In the case of '$atest <device-only> --host', exit.
251    if (host_tests or args.host) and constants.DEVICE_TEST in all_device_modes:
252        device_only_tests = [x.test_name for x in test_infos
253                             if x.get_supported_exec_mode() == constants.DEVICE_TEST]
254        err_msg = ('Specified --host, but the following tests are device-only:\n  ' +
255                   '\n  '.join(sorted(device_only_tests)) + '\nPlease remove the option '
256                   'when running device-only tests.')
257    # In the case of '$atest <host-only> <device-only> --host' or
258    # '$atest <host-only> <device-only>', exit.
259    if (constants.DEVICELESS_TEST in all_device_modes and
260            constants.DEVICE_TEST in all_device_modes):
261        err_msg = 'There are host-only and device-only tests in command.'
262    if host_tests is False and constants.DEVICELESS_TEST in all_device_modes:
263        err_msg = 'There are host-only tests in command.'
264    if err_msg:
265        logging.error(err_msg)
266        metrics_utils.send_exit_event(constants.EXIT_CODE_ERROR, logs=err_msg)
267        sys.exit(constants.EXIT_CODE_ERROR)
268    # In the case of '$atest <host-only>', we add --host to run on host-side.
269    # The option should only be overridden if `host_tests` is not set.
270    if not args.host and host_tests is None:
271        args.host = bool(constants.DEVICELESS_TEST in all_device_modes)
272
273
274def _validate_tm_tests_exec_mode(args, test_infos):
275    """Validate all test execution modes are not in conflict.
276
277    Split the tests in Test Mapping files into two groups, device tests and
278    deviceless tests running on host. Validate the tests' host setting.
279    For device tests, exit the program if any test is found for host-only.
280    For deviceless tests, exit the program if any test is found for device-only.
281
282    Args:
283        args: parsed args object.
284        test_info: TestInfo object.
285    """
286    device_test_infos, host_test_infos = _split_test_mapping_tests(
287        test_infos)
288    # No need to verify device tests if atest command is set to only run host
289    # tests.
290    if device_test_infos and not args.host:
291        _validate_exec_mode(args, device_test_infos, host_tests=False)
292    if host_test_infos:
293        _validate_exec_mode(args, host_test_infos, host_tests=True)
294
295
296def _will_run_tests(args):
297    """Determine if there are tests to run.
298
299    Currently only used by detect_regression to skip the test if just running
300    regression detection.
301
302    Args:
303        args: parsed args object.
304
305    Returns:
306        True if there are tests to run, false otherwise.
307    """
308    return not (args.detect_regression and len(args.detect_regression) == 2)
309
310
311# pylint: disable=no-else-return
312# This method is going to dispose, let's ignore pylint for now.
313def _has_valid_regression_detection_args(args):
314    """Validate regression detection args.
315
316    Args:
317        args: parsed args object.
318
319    Returns:
320        True if args are valid
321    """
322    if args.generate_baseline and args.generate_new_metrics:
323        logging.error('Cannot collect both baseline and new metrics'
324                      'at the same time.')
325        return False
326    if args.detect_regression is not None:
327        if not args.detect_regression:
328            logging.error('Need to specify at least 1 arg for'
329                          ' regression detection.')
330            return False
331        elif len(args.detect_regression) == 1:
332            if args.generate_baseline or args.generate_new_metrics:
333                return True
334            logging.error('Need to specify --generate-baseline or'
335                          ' --generate-new-metrics.')
336            return False
337        elif len(args.detect_regression) == 2:
338            if args.generate_baseline:
339                logging.error('Specified 2 metric paths and --generate-baseline'
340                              ', either drop --generate-baseline or drop a path')
341                return False
342            if args.generate_new_metrics:
343                logging.error('Specified 2 metric paths and --generate-new-metrics, '
344                              'either drop --generate-new-metrics or drop a path')
345                return False
346            return True
347        else:
348            logging.error('Specified more than 2 metric paths.')
349            return False
350    return True
351
352
353def _has_valid_test_mapping_args(args):
354    """Validate test mapping args.
355
356    Not all args work when running tests in TEST_MAPPING files. Validate the
357    args before running the tests.
358
359    Args:
360        args: parsed args object.
361
362    Returns:
363        True if args are valid
364    """
365    is_test_mapping = atest_utils.is_test_mapping(args)
366    if not is_test_mapping:
367        return True
368    options_to_validate = [
369        (args.generate_baseline, '--generate-baseline'),
370        (args.detect_regression, '--detect-regression'),
371        (args.generate_new_metrics, '--generate-new-metrics'),
372    ]
373    for arg_value, arg in options_to_validate:
374        if arg_value:
375            logging.error(OPTION_NOT_FOR_TEST_MAPPING, arg)
376            return False
377    return True
378
379
380def _validate_args(args):
381    """Validate setups and args.
382
383    Exit the program with error code if any setup or arg is invalid.
384
385    Args:
386        args: parsed args object.
387    """
388    if _missing_environment_variables():
389        sys.exit(constants.EXIT_CODE_ENV_NOT_SETUP)
390    if args.generate_baseline and args.generate_new_metrics:
391        logging.error(
392            'Cannot collect both baseline and new metrics at the same time.')
393        sys.exit(constants.EXIT_CODE_ERROR)
394    if not _has_valid_regression_detection_args(args):
395        sys.exit(constants.EXIT_CODE_ERROR)
396    if not _has_valid_test_mapping_args(args):
397        sys.exit(constants.EXIT_CODE_ERROR)
398
399
400def _print_module_info_from_module_name(mod_info, module_name):
401    """print out the related module_info for a module_name.
402
403    Args:
404        mod_info: ModuleInfo object.
405        module_name: A string of module.
406
407    Returns:
408        True if the module_info is found.
409    """
410    title_mapping = collections.OrderedDict()
411    title_mapping[constants.MODULE_COMPATIBILITY_SUITES] = 'Compatibility suite'
412    title_mapping[constants.MODULE_PATH] = 'Source code path'
413    title_mapping[constants.MODULE_INSTALLED] = 'Installed path'
414    target_module_info = mod_info.get_module_info(module_name)
415    is_module_found = False
416    if target_module_info:
417        atest_utils.colorful_print(module_name, constants.GREEN)
418        for title_key in title_mapping:
419            atest_utils.colorful_print("\t%s" % title_mapping[title_key],
420                                       constants.CYAN)
421            for info_value in target_module_info[title_key]:
422                print("\t\t{}".format(info_value))
423        is_module_found = True
424    return is_module_found
425
426
427def _print_test_info(mod_info, test_infos):
428    """Print the module information from TestInfos.
429
430    Args:
431        mod_info: ModuleInfo object.
432        test_infos: A list of TestInfos.
433
434    Returns:
435        Always return EXIT_CODE_SUCCESS
436    """
437    for test_info in test_infos:
438        _print_module_info_from_module_name(mod_info, test_info.test_name)
439        atest_utils.colorful_print("\tRelated build targets", constants.MAGENTA)
440        sorted_build_targets = sorted(list(test_info.build_targets))
441        print("\t\t{}".format(", ".join(sorted_build_targets)))
442        for build_target in sorted_build_targets:
443            if build_target != test_info.test_name:
444                _print_module_info_from_module_name(mod_info, build_target)
445        atest_utils.colorful_print("", constants.WHITE)
446    return constants.EXIT_CODE_SUCCESS
447
448
449def is_from_test_mapping(test_infos):
450    """Check that the test_infos came from TEST_MAPPING files.
451
452    Args:
453        test_infos: A set of TestInfos.
454
455    Returns:
456        True if the test infos are from TEST_MAPPING files.
457    """
458    return list(test_infos)[0].from_test_mapping
459
460
461def _split_test_mapping_tests(test_infos):
462    """Split Test Mapping tests into 2 groups: device tests and host tests.
463
464    Args:
465        test_infos: A set of TestInfos.
466
467    Returns:
468        A tuple of (device_test_infos, host_test_infos), where
469        device_test_infos: A set of TestInfos for tests that require device.
470        host_test_infos: A set of TestInfos for tests that do NOT require
471            device.
472    """
473    assert is_from_test_mapping(test_infos)
474    host_test_infos = {info for info in test_infos if info.host}
475    device_test_infos = {info for info in test_infos if not info.host}
476    return device_test_infos, host_test_infos
477
478
479# pylint: disable=too-many-locals
480def _run_test_mapping_tests(results_dir, test_infos, extra_args, mod_info):
481    """Run all tests in TEST_MAPPING files.
482
483    Args:
484        results_dir: String directory to store atest results.
485        test_infos: A set of TestInfos.
486        extra_args: Dict of extra args to add to test run.
487        mod_info: ModuleInfo object.
488
489    Returns:
490        Exit code.
491    """
492    device_test_infos, host_test_infos = _split_test_mapping_tests(test_infos)
493    # `host` option needs to be set to True to run host side tests.
494    host_extra_args = extra_args.copy()
495    host_extra_args[constants.HOST] = True
496    test_runs = [(host_test_infos, host_extra_args, HOST_TESTS)]
497    if extra_args.get(constants.HOST):
498        atest_utils.colorful_print(
499            'Option `--host` specified. Skip running device tests.',
500            constants.MAGENTA)
501    else:
502        test_runs.append((device_test_infos, extra_args, DEVICE_TESTS))
503
504    test_results = []
505    for tests, args, test_type in test_runs:
506        if not tests:
507            continue
508        header = RUN_HEADER_FMT % {TEST_COUNT: len(tests), TEST_TYPE: test_type}
509        atest_utils.colorful_print(header, constants.MAGENTA)
510        logging.debug('\n'.join([str(info) for info in tests]))
511        tests_exit_code, reporter = test_runner_handler.run_all_tests(
512            results_dir, tests, args, mod_info, delay_print_summary=True)
513        atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
514        test_results.append((tests_exit_code, reporter, test_type))
515
516    all_tests_exit_code = constants.EXIT_CODE_SUCCESS
517    failed_tests = []
518    for tests_exit_code, reporter, test_type in test_results:
519        atest_utils.colorful_print(
520            RESULT_HEADER_FMT % {TEST_TYPE: test_type}, constants.MAGENTA)
521        result = tests_exit_code | reporter.print_summary()
522        if result:
523            failed_tests.append(test_type)
524        all_tests_exit_code |= result
525
526    # List failed tests at the end as a reminder.
527    if failed_tests:
528        atest_utils.colorful_print(
529            atest_utils.delimiter('=', 30, prenl=1), constants.YELLOW)
530        atest_utils.colorful_print(
531            '\nFollowing tests failed:', constants.MAGENTA)
532        for failure in failed_tests:
533            atest_utils.colorful_print(failure, constants.RED)
534
535    return all_tests_exit_code
536
537
538def _dry_run(results_dir, extra_args, test_infos, mod_info):
539    """Only print the commands of the target tests rather than running them in actual.
540
541    Args:
542        results_dir: Path for saving atest logs.
543        extra_args: Dict of extra args for test runners to utilize.
544        test_infos: A list of TestInfos.
545        mod_info: ModuleInfo object.
546
547    Returns:
548        A list of test commands.
549    """
550    all_run_cmds = []
551    for test_runner, tests in test_runner_handler.group_tests_by_test_runners(test_infos):
552        runner = test_runner(results_dir, module_info=mod_info)
553        run_cmds = runner.generate_run_commands(tests, extra_args)
554        for run_cmd in run_cmds:
555            all_run_cmds.append(run_cmd)
556            print('Would run test via command: %s'
557                  % (atest_utils.colorize(run_cmd, constants.GREEN)))
558    return all_run_cmds
559
560def _print_testable_modules(mod_info, suite):
561    """Print the testable modules for a given suite.
562
563    Args:
564        mod_info: ModuleInfo object.
565        suite: A string of suite name.
566    """
567    testable_modules = mod_info.get_testable_modules(suite)
568    print('\n%s' % atest_utils.colorize('%s Testable %s modules' % (
569        len(testable_modules), suite), constants.CYAN))
570    print(atest_utils.delimiter('-'))
571    for module in sorted(testable_modules):
572        print('\t%s' % module)
573
574def _is_inside_android_root():
575    """Identify whether the cwd is inside of Android source tree.
576
577    Returns:
578        False if the cwd is outside of the source tree, True otherwise.
579    """
580    build_top = os.getenv(constants.ANDROID_BUILD_TOP, ' ')
581    return build_top in os.getcwd()
582
583def _non_action_validator(args):
584    """Method for non-action arguments such as --version, --help, --history,
585    --latest_result, etc.
586
587    Args:
588        args: An argspace.Namespace class instance holding parsed args.
589    """
590    if not _is_inside_android_root():
591        atest_utils.colorful_print(
592            "\nAtest must always work under ${}!".format(
593                constants.ANDROID_BUILD_TOP), constants.RED)
594        sys.exit(constants.EXIT_CODE_OUTSIDE_ROOT)
595    if args.version:
596        if os.path.isfile(constants.VERSION_FILE):
597            with open(constants.VERSION_FILE) as version_file:
598                print(version_file.read())
599        sys.exit(constants.EXIT_CODE_SUCCESS)
600    if args.help:
601        atest_arg_parser.print_epilog_text()
602        sys.exit(constants.EXIT_CODE_SUCCESS)
603    if args.history:
604        atest_execution_info.print_test_result(constants.ATEST_RESULT_ROOT,
605                                               args.history)
606        sys.exit(constants.EXIT_CODE_SUCCESS)
607    if args.latest_result:
608        atest_execution_info.print_test_result_by_path(
609            constants.LATEST_RESULT_FILE)
610        sys.exit(constants.EXIT_CODE_SUCCESS)
611    # TODO(b/131879842): remove below statement after they are fully removed.
612    if any((args.detect_regression,
613            args.generate_baseline,
614            args.generate_new_metrics)):
615        stop_msg = ('Please STOP using arguments below -- they are obsolete and '
616                    'will be removed in a very near future:\n'
617                    '\t--detect-regression\n'
618                    '\t--generate-baseline\n'
619                    '\t--generate-new-metrics\n')
620        msg = ('Please use below arguments instead:\n'
621               '\t--iterations\n'
622               '\t--rerun-until-failure\n'
623               '\t--retry-any-failure\n')
624        atest_utils.colorful_print(stop_msg, constants.RED)
625        atest_utils.colorful_print(msg, constants.CYAN)
626
627def _dry_run_validator(args, results_dir, extra_args, test_infos, mod_info):
628    """Method which process --dry-run argument.
629
630    Args:
631        args: An argspace.Namespace class instance holding parsed args.
632        result_dir: A string path of the results dir.
633        extra_args: A dict of extra args for test runners to utilize.
634        test_infos: A list of test_info.
635        mod_info: ModuleInfo object.
636    Returns:
637        Exit code.
638    """
639    args.tests.sort()
640    dry_run_cmds = _dry_run(results_dir, extra_args, test_infos, mod_info)
641    if args.verify_cmd_mapping:
642        try:
643            atest_utils.handle_test_runner_cmd(' '.join(args.tests),
644                                               dry_run_cmds,
645                                               do_verification=True)
646        except atest_error.DryRunVerificationError as e:
647            atest_utils.colorful_print(str(e), constants.RED)
648            return constants.EXIT_CODE_VERIFY_FAILURE
649    if args.update_cmd_mapping:
650        atest_utils.handle_test_runner_cmd(' '.join(args.tests),
651                                           dry_run_cmds)
652    return constants.EXIT_CODE_SUCCESS
653
654def _exclude_modules_in_targets(build_targets):
655    """Method that excludes MODULES-IN-* targets.
656
657    Args:
658        build_targets: A set of build targets.
659
660    Returns:
661        A set of build targets that excludes MODULES-IN-*.
662    """
663    shrank_build_targets = build_targets.copy()
664    logging.debug('Will exclude all "%s*" from the build targets.',
665                  constants.MODULES_IN)
666    for target in build_targets:
667        if target.startswith(constants.MODULES_IN):
668            logging.debug('Ignore %s.', target)
669            shrank_build_targets.remove(target)
670    return shrank_build_targets
671
672def acloud_create_validator(results_dir, args):
673    """Check lunch'd target before running 'acloud create'.
674
675    Args:
676        results_dir: A string of the results directory.
677        args: A list of arguments.
678
679    Returns:
680        If the target is valid:
681            A tuple of (multiprocessing.Process,
682                        string of report file path)
683        else:
684            None, None
685    """
686    if not any((args.acloud_create, args.start_avd)):
687        return None, None
688    if args.start_avd:
689        args.acloud_create = ['--num=1']
690    acloud_args = ' '.join(args.acloud_create)
691    target = os.getenv('TARGET_PRODUCT', "")
692    if 'cf_x86' in target:
693        report_file = at.get_report_file(results_dir, acloud_args)
694        acloud_proc = _run_multi_proc(
695            func=ACLOUD_CREATE,
696            args=[report_file],
697            kwargs={'args':acloud_args,
698                    'no_metrics_notice':args.no_metrics})
699        return acloud_proc, report_file
700    atest_utils.colorful_print(
701        '{} is not cf_x86 family; will not create any AVD.'.format(target),
702        constants.RED)
703    return None, None
704
705# pylint: disable=too-many-statements
706# pylint: disable=too-many-branches
707# pylint: disable=too-many-return-statements
708def main(argv, results_dir, args):
709    """Entry point of atest script.
710
711    Args:
712        argv: A list of arguments.
713        results_dir: A directory which stores the ATest execution information.
714        args: An argspace.Namespace class instance holding parsed args.
715
716    Returns:
717        Exit code.
718    """
719    _configure_logging(args.verbose)
720    _validate_args(args)
721    metrics_utils.get_start_time()
722    os_pyver = '{}:{}'.format(platform.platform(), platform.python_version())
723    metrics.AtestStartEvent(
724        command_line=' '.join(argv),
725        test_references=args.tests,
726        cwd=os.getcwd(),
727        os=os_pyver)
728    _non_action_validator(args)
729    proc_acloud, report_file = acloud_create_validator(results_dir, args)
730    mod_info = module_info.ModuleInfo(force_build=args.rebuild_module_info)
731    if args.rebuild_module_info:
732        proc_idx = _run_multi_proc(INDEX_TARGETS)
733        proc_idx.join()
734    translator = cli_translator.CLITranslator(
735        module_info=mod_info,
736        print_cache_msg=not args.clear_cache)
737    if args.list_modules:
738        _print_testable_modules(mod_info, args.list_modules)
739        return constants.EXIT_CODE_SUCCESS
740    build_targets = set()
741    test_infos = set()
742    if _will_run_tests(args):
743        find_start = time.time()
744        build_targets, test_infos = translator.translate(args)
745        if args.no_modules_in:
746            build_targets = _exclude_modules_in_targets(build_targets)
747        find_duration = time.time() - find_start
748        if not test_infos:
749            return constants.EXIT_CODE_TEST_NOT_FOUND
750        if not is_from_test_mapping(test_infos):
751            _validate_exec_mode(args, test_infos)
752        else:
753            _validate_tm_tests_exec_mode(args, test_infos)
754    if args.info:
755        return _print_test_info(mod_info, test_infos)
756    build_targets |= test_runner_handler.get_test_runner_reqs(mod_info,
757                                                              test_infos)
758    extra_args = get_extra_args(args)
759    if any((args.update_cmd_mapping, args.verify_cmd_mapping, args.dry_run)):
760        return _dry_run_validator(args, results_dir, extra_args, test_infos,
761                                  mod_info)
762    if args.detect_regression:
763        build_targets |= (regression_test_runner.RegressionTestRunner('')
764                          .get_test_runner_build_reqs())
765    # args.steps will be None if none of -bit set, else list of params set.
766    steps = args.steps if args.steps else constants.ALL_STEPS
767    if build_targets and constants.BUILD_STEP in steps:
768        if constants.TEST_STEP in steps and not args.rebuild_module_info:
769            # Run extra tasks along with build step concurrently. Note that
770            # Atest won't index targets when only "-b" is given(without -t).
771            proc_idx = _run_multi_proc(INDEX_TARGETS, daemon=True)
772        # Add module-info.json target to the list of build targets to keep the
773        # file up to date.
774        build_targets.add(mod_info.module_info_target)
775        build_start = time.time()
776        success = atest_utils.build(build_targets, verbose=args.verbose)
777        build_duration = time.time() - build_start
778        metrics.BuildFinishEvent(
779            duration=metrics_utils.convert_duration(build_duration),
780            success=success,
781            targets=build_targets)
782        rebuild_module_info = constants.DETECT_TYPE_NOT_REBUILD_MODULE_INFO
783        if args.rebuild_module_info:
784            rebuild_module_info = constants.DETECT_TYPE_REBUILD_MODULE_INFO
785        metrics.LocalDetectEvent(
786            detect_type=rebuild_module_info,
787            result=int(build_duration))
788        if not success:
789            return constants.EXIT_CODE_BUILD_FAILURE
790        # Always reload module-info after build finish.
791        # TODO(b/178675689) Move it to a thread when running test.
792        mod_info.generate_atest_merged_dep_file()
793        if proc_acloud:
794            proc_acloud.join()
795            status = at.probe_acloud_status(report_file)
796            if status != 0:
797                return status
798            acloud_duration = at.get_acloud_duration(report_file)
799            find_build_duration = find_duration + build_duration
800            if find_build_duration - acloud_duration >= 0:
801                # find+build took longer, saved acloud create time.
802                logging.debug('Saved acloud create time: %ss.',
803                              acloud_duration)
804                metrics.LocalDetectEvent(
805                    detect_type=constants.DETECT_TYPE_ACLOUD_CREATE,
806                    result=round(acloud_duration))
807            else:
808                # acloud create took longer, saved find+build time.
809                logging.debug('Saved Find and Build time: %ss.',
810                              find_build_duration)
811                metrics.LocalDetectEvent(
812                    detect_type=constants.DETECT_TYPE_FIND_BUILD,
813                    result=round(find_build_duration))
814    elif constants.TEST_STEP not in steps:
815        logging.warning('Install step without test step currently not '
816                        'supported, installing AND testing instead.')
817        steps.append(constants.TEST_STEP)
818    tests_exit_code = constants.EXIT_CODE_SUCCESS
819    test_start = time.time()
820    if constants.TEST_STEP in steps:
821        if not is_from_test_mapping(test_infos):
822            tests_exit_code, reporter = test_runner_handler.run_all_tests(
823                results_dir, test_infos, extra_args, mod_info)
824            atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
825        else:
826            tests_exit_code = _run_test_mapping_tests(
827                results_dir, test_infos, extra_args, mod_info)
828    if args.detect_regression:
829        regression_args = _get_regression_detection_args(args, results_dir)
830        # TODO(b/110485713): Should not call run_tests here.
831        reporter = result_reporter.ResultReporter(
832            collect_only=extra_args.get(constants.COLLECT_TESTS_ONLY))
833        atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
834        tests_exit_code |= regression_test_runner.RegressionTestRunner(
835            '').run_tests(
836                None, regression_args, reporter)
837    metrics.RunTestsFinishEvent(
838        duration=metrics_utils.convert_duration(time.time() - test_start))
839    preparation_time = atest_execution_info.preparation_time(test_start)
840    if preparation_time:
841        # Send the preparation time only if it's set.
842        metrics.RunnerFinishEvent(
843            duration=metrics_utils.convert_duration(preparation_time),
844            success=True,
845            runner_name=constants.TF_PREPARATION,
846            test=[])
847    if tests_exit_code != constants.EXIT_CODE_SUCCESS:
848        tests_exit_code = constants.EXIT_CODE_TEST_FAILURE
849    return tests_exit_code
850
851if __name__ == '__main__':
852    RESULTS_DIR = make_test_run_dir()
853    atest_configs.GLOBAL_ARGS = _parse_args(sys.argv[1:])
854    with atest_execution_info.AtestExecutionInfo(
855            sys.argv[1:], RESULTS_DIR,
856            atest_configs.GLOBAL_ARGS) as result_file:
857        if not atest_configs.GLOBAL_ARGS.no_metrics:
858            atest_utils.print_data_collection_notice()
859            USER_FROM_TOOL = os.getenv(constants.USER_FROM_TOOL, '')
860            if USER_FROM_TOOL == '':
861                metrics_base.MetricsBase.tool_name = constants.TOOL_NAME
862            else:
863                metrics_base.MetricsBase.tool_name = USER_FROM_TOOL
864
865        EXIT_CODE = main(sys.argv[1:], RESULTS_DIR, atest_configs.GLOBAL_ARGS)
866        DETECTOR = bug_detector.BugDetector(sys.argv[1:], EXIT_CODE)
867        if EXIT_CODE not in constants.EXIT_CODES_BEFORE_TEST:
868            metrics.LocalDetectEvent(
869                detect_type=constants.DETECT_TYPE_BUG_DETECTED,
870                result=DETECTOR.caught_result)
871            if result_file:
872                print("Run 'atest --history' to review test result history.")
873    sys.exit(EXIT_CODE)
874