• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3.4
2#
3#   Copyright 2016 - The Android Open Source Project
4#
5#   Licensed under the Apache License, Version 2.0 (the "License");
6#   you may not use this file except in compliance with the License.
7#   You may obtain a copy of the License at
8#
9#       http://www.apache.org/licenses/LICENSE-2.0
10#
11#   Unless required by applicable law or agreed to in writing, software
12#   distributed under the License is distributed on an "AS IS" BASIS,
13#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14#   See the License for the specific language governing permissions and
15#   limitations under the License.
16
17from builtins import str
18
19import argparse
20import multiprocessing
21import os
22import signal
23import sys
24import traceback
25
26from acts.keys import Config
27from acts.signals import TestAbortAll
28from acts.test_runner import TestRunner
29from acts.test_runner import USERError
30from acts.utils import abs_path
31from acts.utils import concurrent_exec
32from acts.utils import load_config
33from acts.utils import valid_filename_chars
34
35
36def _validate_test_config(test_config):
37    """Validates the raw configuration loaded from the config file.
38
39    Making sure all the required fields exist.
40    """
41    for k in Config.reserved_keys.value:
42        if k not in test_config:
43            raise USERError(("Required key {} missing in test "
44            "config.").format(k))
45
46def _validate_testbed_name(name):
47    """Validates the name of a test bed.
48
49    Since test bed names are used as part of the test run id, it needs to meet
50    certain requirements.
51
52    Args:
53        name: The test bed's name specified in config file.
54
55    Raises:
56        If the name does not meet any criteria, USERError is raised.
57    """
58    if not name:
59        raise USERError("Test bed names can't be empty.")
60    if not isinstance(name, str):
61        raise USERError("Test bed names have to be string.")
62    for l in name:
63        if l not in valid_filename_chars:
64            raise USERError("Char '%s' is not allowed in test bed names." % l)
65
66def _validate_testbed_configs(testbed_configs):
67    """Validates the testbed configurations.
68
69    Args:
70        testbed_configs: A list of testbed configuration json objects.
71
72    Raises:
73        If any part of the configuration is invalid, USERError is raised.
74    """
75    seen_names = set()
76    # Cross checks testbed configs for resource conflicts.
77    for config in testbed_configs:
78        # Check for conflicts between multiple concurrent testbed configs.
79        # No need to call it if there's only one testbed config.
80        name = config[Config.key_testbed_name.value]
81        _validate_testbed_name(name)
82        # Test bed names should be unique.
83        if name in seen_names:
84            raise USERError("Duplicate testbed name {} found.".format(name))
85        seen_names.add(name)
86
87def _verify_test_class_name(test_cls_name):
88    if not test_cls_name.endswith("Test"):
89        raise USERError(("Requested test class '%s' does not follow the test "
90                         "class naming convention *Test.") % test_cls_name)
91
92def _parse_one_test_specifier(item):
93    """Parse one test specifier from command line input.
94
95    This also verifies that the test class name and test case names follow
96    ACTS's naming conventions. A test class name has to end with "Test"; a test
97    case name has to start with "test".
98
99    Args:
100        item: A string that specifies a test class or test cases in one test
101            class to run.
102
103    Returns:
104        A tuple of a string and a list of strings. The string is the test class
105        name, the list of strings is a list of test case names. The list can be
106        None.
107    """
108    tokens = item.split(':')
109    if len(tokens) > 2:
110        raise USERError("Syntax error in test specifier %s" % item)
111    if len(tokens) == 1:
112        # This should be considered a test class name
113        test_cls_name = tokens[0]
114        _verify_test_class_name(test_cls_name)
115        return (test_cls_name, None)
116    elif len(tokens) == 2:
117        # This should be considered a test class name followed by
118        # a list of test case names.
119        test_cls_name, test_case_names = tokens
120        clean_names = []
121        _verify_test_class_name(test_cls_name)
122        for elem in test_case_names.split(','):
123            test_case_name = elem.strip()
124            if not test_case_name.startswith("test_"):
125                    raise USERError(("Requested test case '%s' in test class "
126                                    "'%s' does not follow the test case "
127                                    "naming convention test_*.") % (
128                                    test_case_name, test_cls_name))
129            clean_names.append(test_case_name)
130        return (test_cls_name, clean_names)
131
132def parse_test_list(test_list):
133    """Parse user provided test list into internal format for test_runner.
134
135    Args:
136        test_list: A list of test classes/cases.
137    """
138    result = []
139    for elem in test_list:
140        result.append(_parse_one_test_specifier(elem))
141    return result
142
143def load_test_config_file(test_config_path, tb_filters=None):
144    """Processes the test configuration file provied by user.
145
146    Loads the configuration file into a json object, unpacks each testbed
147    config into its own json object, and validate the configuration in the
148    process.
149
150    Args:
151        test_config_path: Path to the test configuration file.
152
153    Returns:
154        A list of test configuration json objects to be passed to TestRunner.
155    """
156    try:
157        configs = load_config(test_config_path)
158        if tb_filters:
159            tbs = []
160            for tb in configs[Config.key_testbed.value]:
161                if tb[Config.key_testbed_name.value] in tb_filters:
162                    tbs.append(tb)
163            if len(tbs) != len(tb_filters):
164                print("Expect to find %d test bed configs, found %d." % (
165                    len(tb_filters), len(tbs)))
166                print("Check if you have the correct test bed names.")
167                return None
168            configs[Config.key_testbed.value] = tbs
169        _validate_test_config(configs)
170        _validate_testbed_configs(configs[Config.key_testbed.value])
171        k_log_path = Config.key_log_path.value
172        configs[k_log_path] = abs_path(configs[k_log_path])
173        tps = configs[Config.key_test_paths.value]
174    except USERError as e:
175        print("Something is wrong in the test configurations.")
176        print(str(e))
177        return None
178    except Exception as e:
179        print("Error loading test config {}".format(test_config_path))
180        print(traceback.format_exc())
181        return None
182    # Unpack testbeds into separate json objects.
183    beds = configs.pop(Config.key_testbed.value)
184    config_jsons = []
185    # TODO: See if there is a better way to do this: b/29836695
186    config_path, _ = os.path.split(abs_path(test_config_path))
187    configs[Config.key_config_path] = config_path
188    for original_bed_config in beds:
189        new_test_config = dict(configs)
190        new_test_config[Config.key_testbed.value] = original_bed_config
191        # Keys in each test bed config will be copied to a level up to be
192        # picked up for user_params. If the key already exists in the upper
193        # level, the local one defined in test bed config overwrites the
194        # general one.
195        new_test_config.update(original_bed_config)
196        config_jsons.append(new_test_config)
197    return config_jsons
198
199def _run_test(test_runner, repeat=1):
200    """Instantiate and runs TestRunner.
201
202    This is the function to start separate processes with.
203
204    Args:
205        test_runner: The test_runner instance to be executed.
206        repeat: Number of times to iterate the specified tests.
207    """
208    try:
209        for i in range(repeat):
210            test_runner.run()
211    except TestAbortAll:
212        return
213    except:
214        print("Exception when executing {}, iteration {}.".format(
215            test_runner.testbed_name, i))
216        print(traceback.format_exc())
217        return False
218    finally:
219        test_runner.stop()
220
221def _gen_term_signal_handler(test_runners):
222    def termination_sig_handler(signal_num, frame):
223        for t in test_runners:
224            t.stop()
225        sys.exit(1)
226    return termination_sig_handler
227
228def _run_tests_parallel(process_args):
229    print("Executing {} concurrent test runs.".format(len(process_args)))
230    results = concurrent_exec(_run_test, process_args)
231    for r in results:
232        if r is False or isinstance(r, Exception):
233            return False
234
235def _run_tests_sequential(process_args):
236    ok = True
237    for args in process_args:
238        if _run_test(*args) is False:
239            ok = False
240    return ok
241
242def _parse_test_file(fpath):
243    try:
244        with open(fpath, 'r') as f:
245            tf = []
246            for line in f:
247                line = line.strip()
248                if not line:
249                    continue
250                if len(tf) and (tf[-1].endswith(':') or tf[-1].endswith(',')):
251                    tf[-1] += line
252                else:
253                    tf.append(line)
254            return tf
255    except:
256        print("Error loading test file.")
257        raise
258
259def main(argv):
260    parser = argparse.ArgumentParser(description=("Specify tests to run. If "
261                 "nothing specified, run all test cases found."))
262    parser.add_argument('-c', '--config', nargs=1, type=str, required=True,
263        metavar="<PATH>", help="Path to the test configuration file.")
264    parser.add_argument('--test_args', nargs='+', type=str,
265        metavar="Arg1 Arg2 ...",
266        help=("Command-line arguments to be passed to every test case in a "
267              "test run. Use with caution."))
268    parser.add_argument('-d', '--debug', action="store_true",
269        help=("Set this flag if manual debugging is required."))
270    parser.add_argument('-p', '--parallel', action="store_true",
271        help=("If set, tests will be executed on all testbeds in parallel. "
272              "Otherwise, tests are executed iteratively testbed by testbed."))
273    parser.add_argument('-r', '--repeat', type=int,
274        metavar="<NUMBER>",
275        help="Number of times to run the specified test cases.")
276    parser.add_argument('-tb', '--testbed', nargs='+', type=str,
277        metavar="[<TEST BED NAME1> <TEST BED NAME2> ...]",
278        help="Specify which test beds to run tests on.")
279    group = parser.add_mutually_exclusive_group(required=True)
280    group.add_argument('-tc', '--testclass', nargs='+', type=str,
281        metavar="[TestClass1 TestClass2:test_xxx ...]",
282        help="A list of test classes/cases to run.")
283    group.add_argument('-tf', '--testfile', nargs=1, type=str,
284        metavar="<PATH>",
285        help=("Path to a file containing a comma delimited list of test "
286              "classes to run."))
287
288    args = parser.parse_args(argv)
289    test_list = None
290    repeat = 1
291    if args.testfile:
292        test_list = _parse_test_file(args.testfile[0])
293    elif args.testclass:
294        test_list = args.testclass
295    if args.repeat:
296        repeat = args.repeat
297    parsed_configs = load_test_config_file(args.config[0], args.testbed)
298    if not parsed_configs:
299        print("Encountered error when parsing the config file, abort!")
300        sys.exit(1)
301    # Prepare args for test runs
302    test_identifiers = parse_test_list(test_list)
303    test_runners = []
304    process_args = []
305    try:
306        for c in parsed_configs:
307            c[Config.ikey_cli_args.value] = args.test_args
308            t = TestRunner(c, test_identifiers)
309            test_runners.append(t)
310            process_args.append((t, repeat))
311    except:
312        print("Failed to instantiate test runner, abort.")
313        print(traceback.format_exc())
314        sys.exit(1)
315    # Register handler for term signals if in -i mode.
316    if not args.debug:
317        handler = _gen_term_signal_handler(test_runners)
318        signal.signal(signal.SIGTERM, handler)
319        signal.signal(signal.SIGINT, handler)
320    # Execute test runners.
321    if args.parallel and len(process_args) > 1:
322        exec_result = _run_tests_parallel(process_args)
323    else:
324        exec_result = _run_tests_sequential(process_args)
325    if exec_result is False:
326        sys.exit(1)
327    sys.exit(0)
328
329if __name__ == "__main__":
330    main(sys.argv[1:])
331
332