• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2#
3# Copyright 2017, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#     http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17"""
18Command line utility for running Android tests through TradeFederation.
19
20atest helps automate the flow of building test modules across the Android
21code base and executing the tests via the TradeFederation test harness.
22
23atest is designed to support any test types that can be ran by TradeFederation.
24"""
25
26from __future__ import print_function
27
28import logging
29import os
30import sys
31import tempfile
32import time
33import platform
34
35from multiprocessing import Process
36
37import atest_arg_parser
38import atest_error
39import atest_execution_info
40import atest_utils
41import bug_detector
42import cli_translator
43# pylint: disable=import-error
44import constants
45import module_info
46import result_reporter
47import test_runner_handler
48
49from metrics import metrics
50from metrics import metrics_base
51from metrics import metrics_utils
52from test_runners import regression_test_runner
53from tools import atest_tools
54
55EXPECTED_VARS = frozenset([
56    constants.ANDROID_BUILD_TOP,
57    'ANDROID_TARGET_OUT_TESTCASES',
58    constants.ANDROID_OUT])
59TEST_RUN_DIR_PREFIX = "%Y%m%d_%H%M%S"
60CUSTOM_ARG_FLAG = '--'
61OPTION_NOT_FOR_TEST_MAPPING = (
62    'Option `%s` does not work for running tests in TEST_MAPPING files')
63
64DEVICE_TESTS = 'tests that require device'
65HOST_TESTS = 'tests that do NOT require device'
66RESULT_HEADER_FMT = '\nResults from %(test_type)s:'
67RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.'
68TEST_COUNT = 'test_count'
69TEST_TYPE = 'test_type'
70# Tasks that must run in the build time but unable to build by soong.
71# (e.g subprocesses that invoke host commands.)
72EXTRA_TASKS = {
73    'index-targets': atest_tools.index_targets
74}
75
76
77def _run_extra_tasks(join=False):
78    """Execute EXTRA_TASKS with multiprocessing.
79
80    Args:
81        join: A boolean that indicates the process should terminate when
82        the main process ends or keep itself alive. True indicates the
83        main process will wait for all subprocesses finish while False represents
84        killing all subprocesses when the main process exits.
85    """
86    _running_procs = []
87    for task in EXTRA_TASKS.values():
88        proc = Process(target=task)
89        proc.daemon = not join
90        proc.start()
91        _running_procs.append(proc)
92    if join:
93        for proc in _running_procs:
94            proc.join()
95
96
97def _parse_args(argv):
98    """Parse command line arguments.
99
100    Args:
101        argv: A list of arguments.
102
103    Returns:
104        An argspace.Namespace class instance holding parsed args.
105    """
106    # Store everything after '--' in custom_args.
107    pruned_argv = argv
108    custom_args_index = None
109    if CUSTOM_ARG_FLAG in argv:
110        custom_args_index = argv.index(CUSTOM_ARG_FLAG)
111        pruned_argv = argv[:custom_args_index]
112    parser = atest_arg_parser.AtestArgParser()
113    parser.add_atest_args()
114    args = parser.parse_args(pruned_argv)
115    args.custom_args = []
116    if custom_args_index is not None:
117        args.custom_args = argv[custom_args_index+1:]
118    return args
119
120
121def _configure_logging(verbose):
122    """Configure the logger.
123
124    Args:
125        verbose: A boolean. If true display DEBUG level logs.
126    """
127    log_format = '%(asctime)s %(filename)s:%(lineno)s:%(levelname)s: %(message)s'
128    datefmt = '%Y-%m-%d %H:%M:%S'
129    if verbose:
130        logging.basicConfig(level=logging.DEBUG, format=log_format, datefmt=datefmt)
131    else:
132        logging.basicConfig(level=logging.INFO, format=log_format, datefmt=datefmt)
133
134
135def _missing_environment_variables():
136    """Verify the local environment has been set up to run atest.
137
138    Returns:
139        List of strings of any missing environment variables.
140    """
141    missing = filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)])
142    if missing:
143        logging.error('Local environment doesn\'t appear to have been '
144                      'initialized. Did you remember to run lunch? Expected '
145                      'Environment Variables: %s.', missing)
146    return missing
147
148
149def make_test_run_dir():
150    """Make the test run dir in ATEST_RESULT_ROOT.
151
152    Returns:
153        A string of the dir path.
154    """
155    if not os.path.exists(constants.ATEST_RESULT_ROOT):
156        os.makedirs(constants.ATEST_RESULT_ROOT)
157    ctime = time.strftime(TEST_RUN_DIR_PREFIX, time.localtime())
158    test_result_dir = tempfile.mkdtemp(prefix='%s_' % ctime,
159                                       dir=constants.ATEST_RESULT_ROOT)
160    return test_result_dir
161
162
163def get_extra_args(args):
164    """Get extra args for test runners.
165
166    Args:
167        args: arg parsed object.
168
169    Returns:
170        Dict of extra args for test runners to utilize.
171    """
172    extra_args = {}
173    if args.wait_for_debugger:
174        extra_args[constants.WAIT_FOR_DEBUGGER] = None
175    steps = args.steps or constants.ALL_STEPS
176    if constants.INSTALL_STEP not in steps:
177        extra_args[constants.DISABLE_INSTALL] = None
178    # The key and its value of the dict can be called via:
179    # if args.aaaa:
180    #     extra_args[constants.AAAA] = args.aaaa
181    arg_maps = {'all_abi': constants.ALL_ABI,
182                'collect_tests_only': constants.COLLECT_TESTS_ONLY,
183                'custom_args': constants.CUSTOM_ARGS,
184                'disable_teardown': constants.DISABLE_TEARDOWN,
185                'dry_run': constants.DRY_RUN,
186                'generate_baseline': constants.PRE_PATCH_ITERATIONS,
187                'generate_new_metrics': constants.POST_PATCH_ITERATIONS,
188                'host': constants.HOST,
189                'instant': constants.INSTANT,
190                'iterations': constants.ITERATIONS,
191                'rerun_until_failure': constants.RERUN_UNTIL_FAILURE,
192                'retry_any_failure': constants.RETRY_ANY_FAILURE,
193                'serial': constants.SERIAL,
194                'sharding': constants.SHARDING,
195                'tf_debug': constants.TF_DEBUG,
196                'tf_template': constants.TF_TEMPLATE,
197                'user_type': constants.USER_TYPE}
198    not_match = [k for k in arg_maps if k not in vars(args)]
199    if not_match:
200        raise AttributeError('%s object has no attribute %s'
201                             %(type(args).__name__, not_match))
202    extra_args.update({arg_maps.get(k): v for k, v in vars(args).items()
203                       if arg_maps.get(k) and v})
204    return extra_args
205
206
207def _get_regression_detection_args(args, results_dir):
208    """Get args for regression detection test runners.
209
210    Args:
211        args: parsed args object.
212        results_dir: string directory to store atest results.
213
214    Returns:
215        Dict of args for regression detection test runner to utilize.
216    """
217    regression_args = {}
218    pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline
219                        else args.detect_regression.pop(0))
220    post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics
221                         else args.detect_regression.pop(0))
222    regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder
223    regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder
224    return regression_args
225
226
227def _validate_exec_mode(args, test_infos, host_tests=None):
228    """Validate all test execution modes are not in conflict.
229
230    Exit the program with error code if have device-only and host-only.
231    If no conflict and host side, add args.host=True.
232
233    Args:
234        args: parsed args object.
235        test_info: TestInfo object.
236        host_tests: True if all tests should be deviceless, False if all tests
237            should be device tests. Default is set to None, which means
238            tests can be either deviceless or device tests.
239    """
240    all_device_modes = [x.get_supported_exec_mode() for x in test_infos]
241    err_msg = None
242    # In the case of '$atest <device-only> --host', exit.
243    if (host_tests or args.host) and constants.DEVICE_TEST in all_device_modes:
244        err_msg = ('Test side and option(--host) conflict. Please remove '
245                   '--host if the test run on device side.')
246    # In the case of '$atest <host-only> <device-only> --host' or
247    # '$atest <host-only> <device-only>', exit.
248    if (constants.DEVICELESS_TEST in all_device_modes and
249            constants.DEVICE_TEST in all_device_modes):
250        err_msg = 'There are host-only and device-only tests in command.'
251    if host_tests is False and constants.DEVICELESS_TEST in all_device_modes:
252        err_msg = 'There are host-only tests in command.'
253    if err_msg:
254        logging.error(err_msg)
255        metrics_utils.send_exit_event(constants.EXIT_CODE_ERROR, logs=err_msg)
256        sys.exit(constants.EXIT_CODE_ERROR)
257    # In the case of '$atest <host-only>', we add --host to run on host-side.
258    # The option should only be overridden if `host_tests` is not set.
259    if not args.host and host_tests is None:
260        args.host = bool(constants.DEVICELESS_TEST in all_device_modes)
261
262
263def _validate_tm_tests_exec_mode(args, test_infos):
264    """Validate all test execution modes are not in conflict.
265
266    Split the tests in Test Mapping files into two groups, device tests and
267    deviceless tests running on host. Validate the tests' host setting.
268    For device tests, exit the program if any test is found for host-only.
269    For deviceless tests, exit the program if any test is found for device-only.
270
271    Args:
272        args: parsed args object.
273        test_info: TestInfo object.
274    """
275    device_test_infos, host_test_infos = _split_test_mapping_tests(
276        test_infos)
277    # No need to verify device tests if atest command is set to only run host
278    # tests.
279    if device_test_infos and not args.host:
280        _validate_exec_mode(args, device_test_infos, host_tests=False)
281    if host_test_infos:
282        _validate_exec_mode(args, host_test_infos, host_tests=True)
283
284
285def _will_run_tests(args):
286    """Determine if there are tests to run.
287
288    Currently only used by detect_regression to skip the test if just running regression detection.
289
290    Args:
291        args: parsed args object.
292
293    Returns:
294        True if there are tests to run, false otherwise.
295    """
296    return not (args.detect_regression and len(args.detect_regression) == 2)
297
298
299def _has_valid_regression_detection_args(args):
300    """Validate regression detection args.
301
302    Args:
303        args: parsed args object.
304
305    Returns:
306        True if args are valid
307    """
308    if args.generate_baseline and args.generate_new_metrics:
309        logging.error('Cannot collect both baseline and new metrics at the same time.')
310        return False
311    if args.detect_regression is not None:
312        if not args.detect_regression:
313            logging.error('Need to specify at least 1 arg for regression detection.')
314            return False
315        elif len(args.detect_regression) == 1:
316            if args.generate_baseline or args.generate_new_metrics:
317                return True
318            logging.error('Need to specify --generate-baseline or --generate-new-metrics.')
319            return False
320        elif len(args.detect_regression) == 2:
321            if args.generate_baseline:
322                logging.error('Specified 2 metric paths and --generate-baseline, '
323                              'either drop --generate-baseline or drop a path')
324                return False
325            if args.generate_new_metrics:
326                logging.error('Specified 2 metric paths and --generate-new-metrics, '
327                              'either drop --generate-new-metrics or drop a path')
328                return False
329            return True
330        else:
331            logging.error('Specified more than 2 metric paths.')
332            return False
333    return True
334
335
336def _has_valid_test_mapping_args(args):
337    """Validate test mapping args.
338
339    Not all args work when running tests in TEST_MAPPING files. Validate the
340    args before running the tests.
341
342    Args:
343        args: parsed args object.
344
345    Returns:
346        True if args are valid
347    """
348    is_test_mapping = atest_utils.is_test_mapping(args)
349    if not is_test_mapping:
350        return True
351    options_to_validate = [
352        (args.generate_baseline, '--generate-baseline'),
353        (args.detect_regression, '--detect-regression'),
354        (args.generate_new_metrics, '--generate-new-metrics'),
355    ]
356    for arg_value, arg in options_to_validate:
357        if arg_value:
358            logging.error(OPTION_NOT_FOR_TEST_MAPPING, arg)
359            return False
360    return True
361
362
363def _validate_args(args):
364    """Validate setups and args.
365
366    Exit the program with error code if any setup or arg is invalid.
367
368    Args:
369        args: parsed args object.
370    """
371    if _missing_environment_variables():
372        sys.exit(constants.EXIT_CODE_ENV_NOT_SETUP)
373    if args.generate_baseline and args.generate_new_metrics:
374        logging.error(
375            'Cannot collect both baseline and new metrics at the same time.')
376        sys.exit(constants.EXIT_CODE_ERROR)
377    if not _has_valid_regression_detection_args(args):
378        sys.exit(constants.EXIT_CODE_ERROR)
379    if not _has_valid_test_mapping_args(args):
380        sys.exit(constants.EXIT_CODE_ERROR)
381
382
383def _print_module_info_from_module_name(mod_info, module_name):
384    """print out the related module_info for a module_name.
385
386    Args:
387        mod_info: ModuleInfo object.
388        module_name: A string of module.
389
390    Returns:
391        True if the module_info is found.
392    """
393    title_mapping = {
394        constants.MODULE_PATH: "Source code path",
395        constants.MODULE_INSTALLED: "Installed path",
396        constants.MODULE_COMPATIBILITY_SUITES: "Compatibility suite"}
397    target_module_info = mod_info.get_module_info(module_name)
398    is_module_found = False
399    if target_module_info:
400        atest_utils.colorful_print(module_name, constants.GREEN)
401        for title_key in title_mapping.iterkeys():
402            atest_utils.colorful_print("\t%s" % title_mapping[title_key],
403                                       constants.CYAN)
404            for info_value in target_module_info[title_key]:
405                print("\t\t{}".format(info_value))
406        is_module_found = True
407    return is_module_found
408
409
410def _print_test_info(mod_info, test_infos):
411    """Print the module information from TestInfos.
412
413    Args:
414        mod_info: ModuleInfo object.
415        test_infos: A list of TestInfos.
416
417    Returns:
418        Always return EXIT_CODE_SUCCESS
419    """
420    for test_info in test_infos:
421        _print_module_info_from_module_name(mod_info, test_info.test_name)
422        atest_utils.colorful_print("\tRelated build targets", constants.MAGENTA)
423        print("\t\t{}".format(", ".join(test_info.build_targets)))
424        for build_target in test_info.build_targets:
425            if build_target != test_info.test_name:
426                _print_module_info_from_module_name(mod_info, build_target)
427        atest_utils.colorful_print("", constants.WHITE)
428    return constants.EXIT_CODE_SUCCESS
429
430
431def is_from_test_mapping(test_infos):
432    """Check that the test_infos came from TEST_MAPPING files.
433
434    Args:
435        test_infos: A set of TestInfos.
436
437    Returns:
438        True if the test infos are from TEST_MAPPING files.
439    """
440    return list(test_infos)[0].from_test_mapping
441
442
443def _split_test_mapping_tests(test_infos):
444    """Split Test Mapping tests into 2 groups: device tests and host tests.
445
446    Args:
447        test_infos: A set of TestInfos.
448
449    Returns:
450        A tuple of (device_test_infos, host_test_infos), where
451        device_test_infos: A set of TestInfos for tests that require device.
452        host_test_infos: A set of TestInfos for tests that do NOT require
453            device.
454    """
455    assert is_from_test_mapping(test_infos)
456    host_test_infos = set([info for info in test_infos if info.host])
457    device_test_infos = set([info for info in test_infos if not info.host])
458    return device_test_infos, host_test_infos
459
460
461# pylint: disable=too-many-locals
462def _run_test_mapping_tests(results_dir, test_infos, extra_args):
463    """Run all tests in TEST_MAPPING files.
464
465    Args:
466        results_dir: String directory to store atest results.
467        test_infos: A set of TestInfos.
468        extra_args: Dict of extra args to add to test run.
469
470    Returns:
471        Exit code.
472    """
473    device_test_infos, host_test_infos = _split_test_mapping_tests(test_infos)
474    # `host` option needs to be set to True to run host side tests.
475    host_extra_args = extra_args.copy()
476    host_extra_args[constants.HOST] = True
477    test_runs = [(host_test_infos, host_extra_args, HOST_TESTS)]
478    if extra_args.get(constants.HOST):
479        atest_utils.colorful_print(
480            'Option `--host` specified. Skip running device tests.',
481            constants.MAGENTA)
482    else:
483        test_runs.append((device_test_infos, extra_args, DEVICE_TESTS))
484
485    test_results = []
486    for tests, args, test_type in test_runs:
487        if not tests:
488            continue
489        header = RUN_HEADER_FMT % {TEST_COUNT: len(tests), TEST_TYPE: test_type}
490        atest_utils.colorful_print(header, constants.MAGENTA)
491        logging.debug('\n'.join([str(info) for info in tests]))
492        tests_exit_code, reporter = test_runner_handler.run_all_tests(
493            results_dir, tests, args, delay_print_summary=True)
494        atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
495        test_results.append((tests_exit_code, reporter, test_type))
496
497    all_tests_exit_code = constants.EXIT_CODE_SUCCESS
498    failed_tests = []
499    for tests_exit_code, reporter, test_type in test_results:
500        atest_utils.colorful_print(
501            RESULT_HEADER_FMT % {TEST_TYPE: test_type}, constants.MAGENTA)
502        result = tests_exit_code | reporter.print_summary()
503        if result:
504            failed_tests.append(test_type)
505        all_tests_exit_code |= result
506
507    # List failed tests at the end as a reminder.
508    if failed_tests:
509        atest_utils.colorful_print(
510            '\n==============================', constants.YELLOW)
511        atest_utils.colorful_print(
512            '\nFollowing tests failed:', constants.MAGENTA)
513        for failure in failed_tests:
514            atest_utils.colorful_print(failure, constants.RED)
515
516    return all_tests_exit_code
517
518
519def _dry_run(results_dir, extra_args, test_infos):
520    """Only print the commands of the target tests rather than running them in actual.
521
522    Args:
523        results_dir: Path for saving atest logs.
524        extra_args: Dict of extra args for test runners to utilize.
525        test_infos: A list of TestInfos.
526
527    Returns:
528        A list of test commands.
529    """
530    all_run_cmds = []
531    for test_runner, tests in test_runner_handler.group_tests_by_test_runners(test_infos):
532        runner = test_runner(results_dir)
533        run_cmds = runner.generate_run_commands(tests, extra_args)
534        for run_cmd in run_cmds:
535            all_run_cmds.append(run_cmd)
536            print('Would run test via command: %s'
537                  % (atest_utils.colorize(run_cmd, constants.GREEN)))
538    return all_run_cmds
539
540def _print_testable_modules(mod_info, suite):
541    """Print the testable modules for a given suite.
542
543    Args:
544        mod_info: ModuleInfo object.
545        suite: A string of suite name.
546    """
547    testable_modules = mod_info.get_testable_modules(suite)
548    print('\n%s' % atest_utils.colorize('%s Testable %s modules' % (
549        len(testable_modules), suite), constants.CYAN))
550    print('-------')
551    for module in sorted(testable_modules):
552        print('\t%s' % module)
553
554def _is_inside_android_root():
555    """Identify whether the cwd is inside of Android source tree.
556
557    Returns:
558        False if the cwd is outside of the source tree, True otherwise.
559    """
560    build_top = os.getenv(constants.ANDROID_BUILD_TOP, ' ')
561    return build_top in os.getcwd()
562
563# pylint: disable=too-many-statements
564# pylint: disable=too-many-branches
565# pylint: disable=too-many-return-statements
566def main(argv, results_dir, args):
567    """Entry point of atest script.
568
569    Args:
570        argv: A list of arguments.
571        results_dir: A directory which stores the ATest execution information.
572        args: An argspace.Namespace class instance holding parsed args.
573
574    Returns:
575        Exit code.
576    """
577    _configure_logging(args.verbose)
578    _validate_args(args)
579    metrics_utils.get_start_time()
580    os_pyver = '{}:{}'.format(platform.platform(), platform.python_version())
581    metrics.AtestStartEvent(
582        command_line=' '.join(argv),
583        test_references=args.tests,
584        cwd=os.getcwd(),
585        os=os_pyver)
586    if args.version:
587        if os.path.isfile(constants.VERSION_FILE):
588            with open(constants.VERSION_FILE) as version_file:
589                print(version_file.read())
590        return constants.EXIT_CODE_SUCCESS
591    if not _is_inside_android_root():
592        atest_utils.colorful_print(
593            "\nAtest must always work under ${}!".format(
594                constants.ANDROID_BUILD_TOP), constants.RED)
595        return constants.EXIT_CODE_OUTSIDE_ROOT
596    if args.help:
597        atest_arg_parser.print_epilog_text()
598        return constants.EXIT_CODE_SUCCESS
599    if args.history:
600        atest_execution_info.print_test_result(constants.ATEST_RESULT_ROOT,
601                                               args.history)
602        return constants.EXIT_CODE_SUCCESS
603    if args.latest_result:
604        atest_execution_info.print_test_result_by_path(
605            constants.LATEST_RESULT_FILE)
606        return constants.EXIT_CODE_SUCCESS
607    mod_info = module_info.ModuleInfo(force_build=args.rebuild_module_info)
608    if args.rebuild_module_info:
609        _run_extra_tasks(join=True)
610    translator = cli_translator.CLITranslator(module_info=mod_info,
611                                              print_cache_msg=not args.clear_cache)
612    if args.list_modules:
613        _print_testable_modules(mod_info, args.list_modules)
614        return constants.EXIT_CODE_SUCCESS
615    build_targets = set()
616    test_infos = set()
617    # Clear cache if user pass -c option
618    if args.clear_cache:
619        atest_utils.clean_test_info_caches(args.tests)
620    if _will_run_tests(args):
621        build_targets, test_infos = translator.translate(args)
622        if not test_infos:
623            return constants.EXIT_CODE_TEST_NOT_FOUND
624        if not is_from_test_mapping(test_infos):
625            _validate_exec_mode(args, test_infos)
626        else:
627            _validate_tm_tests_exec_mode(args, test_infos)
628    if args.info:
629        return _print_test_info(mod_info, test_infos)
630    build_targets |= test_runner_handler.get_test_runner_reqs(mod_info,
631                                                              test_infos)
632    extra_args = get_extra_args(args)
633    if args.update_cmd_mapping or args.verify_cmd_mapping:
634        args.dry_run = True
635    if args.dry_run:
636        args.tests.sort()
637        dry_run_cmds = _dry_run(results_dir, extra_args, test_infos)
638        if args.verify_cmd_mapping:
639            try:
640                atest_utils.handle_test_runner_cmd(' '.join(args.tests),
641                                                   dry_run_cmds,
642                                                   do_verification=True)
643            except atest_error.DryRunVerificationError as e:
644                atest_utils.colorful_print(str(e), constants.RED)
645                return constants.EXIT_CODE_VERIFY_FAILURE
646        if args.update_cmd_mapping:
647            atest_utils.handle_test_runner_cmd(' '.join(args.tests),
648                                               dry_run_cmds)
649        return constants.EXIT_CODE_SUCCESS
650    if args.detect_regression:
651        build_targets |= (regression_test_runner.RegressionTestRunner('')
652                          .get_test_runner_build_reqs())
653    # args.steps will be None if none of -bit set, else list of params set.
654    steps = args.steps if args.steps else constants.ALL_STEPS
655    if build_targets and constants.BUILD_STEP in steps:
656        if constants.TEST_STEP in steps and not args.rebuild_module_info:
657            # Run extra tasks along with build step concurrently. Note that
658            # Atest won't index targets when only "-b" is given(without -t).
659            _run_extra_tasks(join=False)
660        # Add module-info.json target to the list of build targets to keep the
661        # file up to date.
662        build_targets.add(mod_info.module_info_target)
663        build_start = time.time()
664        success = atest_utils.build(build_targets, verbose=args.verbose)
665        metrics.BuildFinishEvent(
666            duration=metrics_utils.convert_duration(time.time() - build_start),
667            success=success,
668            targets=build_targets)
669        if not success:
670            return constants.EXIT_CODE_BUILD_FAILURE
671    elif constants.TEST_STEP not in steps:
672        logging.warn('Install step without test step currently not '
673                     'supported, installing AND testing instead.')
674        steps.append(constants.TEST_STEP)
675    tests_exit_code = constants.EXIT_CODE_SUCCESS
676    test_start = time.time()
677    if constants.TEST_STEP in steps:
678        if not is_from_test_mapping(test_infos):
679            tests_exit_code, reporter = test_runner_handler.run_all_tests(
680                results_dir, test_infos, extra_args)
681            atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
682        else:
683            tests_exit_code = _run_test_mapping_tests(
684                results_dir, test_infos, extra_args)
685    if args.detect_regression:
686        regression_args = _get_regression_detection_args(args, results_dir)
687        # TODO(b/110485713): Should not call run_tests here.
688        reporter = result_reporter.ResultReporter()
689        atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
690        tests_exit_code |= regression_test_runner.RegressionTestRunner(
691            '').run_tests(
692                None, regression_args, reporter)
693    metrics.RunTestsFinishEvent(
694        duration=metrics_utils.convert_duration(time.time() - test_start))
695    preparation_time = atest_execution_info.preparation_time(test_start)
696    if preparation_time:
697        # Send the preparation time only if it's set.
698        metrics.RunnerFinishEvent(
699            duration=metrics_utils.convert_duration(preparation_time),
700            success=True,
701            runner_name=constants.TF_PREPARATION,
702            test=[])
703    if tests_exit_code != constants.EXIT_CODE_SUCCESS:
704        tests_exit_code = constants.EXIT_CODE_TEST_FAILURE
705    return tests_exit_code
706
707if __name__ == '__main__':
708    RESULTS_DIR = make_test_run_dir()
709    ARGS = _parse_args(sys.argv[1:])
710    with atest_execution_info.AtestExecutionInfo(sys.argv[1:],
711                                                 RESULTS_DIR,
712                                                 ARGS) as result_file:
713        metrics_base.MetricsBase.tool_name = constants.TOOL_NAME
714        EXIT_CODE = main(sys.argv[1:], RESULTS_DIR, ARGS)
715        DETECTOR = bug_detector.BugDetector(sys.argv[1:], EXIT_CODE)
716        metrics.LocalDetectEvent(
717            detect_type=constants.DETECT_TYPE_BUG_DETECTED,
718            result=DETECTOR.caught_result)
719        if result_file:
720            print("Run 'atest --history' to review test result history.")
721    sys.exit(EXIT_CODE)
722