• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3.4
2#
3#   Copyright 2016 - The Android Open Source Project
4#
5#   Licensed under the Apache License, Version 2.0 (the "License");
6#   you may not use this file except in compliance with the License.
7#   You may obtain a copy of the License at
8#
9#       http://www.apache.org/licenses/LICENSE-2.0
10#
11#   Unless required by applicable law or agreed to in writing, software
12#   distributed under the License is distributed on an "AS IS" BASIS,
13#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14#   See the License for the specific language governing permissions and
15#   limitations under the License.
16
17from builtins import str
18
19import argparse
20import multiprocessing
21import signal
22import sys
23import traceback
24
25from acts.keys import Config
26from acts.signals import TestAbortAll
27from acts.test_runner import TestRunner
28from acts.test_runner import USERError
29from acts.utils import abs_path
30from acts.utils import concurrent_exec
31from acts.utils import load_config
32from acts.utils import valid_filename_chars
33
34
35def _validate_test_config(test_config):
36    """Validates the raw configuration loaded from the config file.
37
38    Making sure all the required fields exist.
39    """
40    for k in Config.reserved_keys.value:
41        if k not in test_config:
42            raise USERError(("Required key {} missing in test "
43            "config.").format(k))
44
45def _validate_testbed_name(name):
46    """Validates the name of a test bed.
47
48    Since test bed names are used as part of the test run id, it needs to meet
49    certain requirements.
50
51    Args:
52        name: The test bed's name specified in config file.
53
54    Raises:
55        If the name does not meet any criteria, USERError is raised.
56    """
57    if not name:
58        raise USERError("Test bed names can't be empty.")
59    if not isinstance(name, str):
60        raise USERError("Test bed names have to be string.")
61    for l in name:
62        if l not in valid_filename_chars:
63            raise USERError("Char '%s' is not allowed in test bed names." % l)
64
65def _validate_testbed_configs(testbed_configs):
66    """Validates the testbed configurations.
67
68    Args:
69        testbed_configs: A list of testbed configuration json objects.
70
71    Raises:
72        If any part of the configuration is invalid, USERError is raised.
73    """
74    seen_names = set()
75    # Cross checks testbed configs for resource conflicts.
76    for config in testbed_configs:
77        # Check for conflicts between multiple concurrent testbed configs.
78        # No need to call it if there's only one testbed config.
79        name = config[Config.key_testbed_name.value]
80        _validate_testbed_name(name)
81        # Test bed names should be unique.
82        if name in seen_names:
83            raise USERError("Duplicate testbed name {} found.".format(name))
84        seen_names.add(name)
85
86def _verify_test_class_name(test_cls_name):
87    if not test_cls_name.endswith("Test"):
88        raise USERError(("Requested test class '%s' does not follow the test "
89                         "class naming convention *Test.") % test_cls_name)
90
91def _parse_one_test_specifier(item):
92    """Parse one test specifier from command line input.
93
94    This also verifies that the test class name and test case names follow
95    ACTS's naming conventions. A test class name has to end with "Test"; a test
96    case name has to start with "test".
97
98    Args:
99        item: A string that specifies a test class or test cases in one test
100            class to run.
101
102    Returns:
103        A tuple of a string and a list of strings. The string is the test class
104        name, the list of strings is a list of test case names. The list can be
105        None.
106    """
107    tokens = item.split(':')
108    if len(tokens) > 2:
109        raise USERError("Syntax error in test specifier %s" % item)
110    if len(tokens) == 1:
111        # This should be considered a test class name
112        test_cls_name = tokens[0]
113        _verify_test_class_name(test_cls_name)
114        return (test_cls_name, None)
115    elif len(tokens) == 2:
116        # This should be considered a test class name followed by
117        # a list of test case names.
118        test_cls_name, test_case_names = tokens
119        clean_names = []
120        _verify_test_class_name(test_cls_name)
121        for elem in test_case_names.split(','):
122            test_case_name = elem.strip()
123            if not test_case_name.startswith("test_"):
124                    raise USERError(("Requested test case '%s' in test class "
125                                    "'%s' does not follow the test case "
126                                    "naming convention test_*.") % (
127                                    test_case_name, test_cls_name))
128            clean_names.append(test_case_name)
129        return (test_cls_name, clean_names)
130
131def parse_test_list(test_list):
132    """Parse user provided test list into internal format for test_runner.
133
134    Args:
135        test_list: A list of test classes/cases.
136    """
137    result = []
138    for elem in test_list:
139        result.append(_parse_one_test_specifier(elem))
140    return result
141
142def load_test_config_file(test_config_path, tb_filters=None):
143    """Processes the test configuration file provied by user.
144
145    Loads the configuration file into a json object, unpacks each testbed
146    config into its own json object, and validate the configuration in the
147    process.
148
149    Args:
150        test_config_path: Path to the test configuration file.
151
152    Returns:
153        A list of test configuration json objects to be passed to TestRunner.
154    """
155    try:
156        configs = load_config(test_config_path)
157        if tb_filters:
158            tbs = []
159            for tb in configs[Config.key_testbed.value]:
160                if tb[Config.key_testbed_name.value] in tb_filters:
161                    tbs.append(tb)
162            if len(tbs) != len(tb_filters):
163                print("Expect to find %d test bed configs, found %d." % (
164                    len(tb_filters), len(tbs)))
165                print("Check if you have the correct test bed names.")
166                return None
167            configs[Config.key_testbed.value] = tbs
168        _validate_test_config(configs)
169        _validate_testbed_configs(configs[Config.key_testbed.value])
170        k_log_path = Config.key_log_path.value
171        configs[k_log_path] = abs_path(configs[k_log_path])
172        tps = configs[Config.key_test_paths.value]
173    except USERError as e:
174        print("Something is wrong in the test configurations.")
175        print(str(e))
176        return None
177    except Exception as e:
178        print("Error loading test config {}".format(test_config_path))
179        print(traceback.format_exc())
180        return None
181    # Unpack testbeds into separate json objects.
182    beds = configs.pop(Config.key_testbed.value)
183    config_jsons = []
184    for original_bed_config in beds:
185        new_test_config = dict(configs)
186        new_test_config[Config.key_testbed.value] = original_bed_config
187        # Keys in each test bed config will be copied to a level up to be
188        # picked up for user_params. If the key already exists in the upper
189        # level, the local one defined in test bed config overwrites the
190        # general one.
191        new_test_config.update(original_bed_config)
192        config_jsons.append(new_test_config)
193    return config_jsons
194
195def _run_test(test_runner, repeat=1):
196    """Instantiate and runs TestRunner.
197
198    This is the function to start separate processes with.
199
200    Args:
201        test_runner: The test_runner instance to be executed.
202        repeat: Number of times to iterate the specified tests.
203    """
204    try:
205        for i in range(repeat):
206            test_runner.run()
207    except TestAbortAll:
208        return
209    except:
210        print("Exception when executing {}, iteration {}.".format(
211            test_runner.testbed_name, i))
212        print(traceback.format_exc())
213        return False
214    finally:
215        test_runner.stop()
216
217def _gen_term_signal_handler(test_runners):
218    def termination_sig_handler(signal_num, frame):
219        for t in test_runners:
220            t.stop()
221        sys.exit(1)
222    return termination_sig_handler
223
224def _run_tests_parallel(process_args):
225    print("Executing {} concurrent test runs.".format(len(process_args)))
226    results = concurrent_exec(_run_test, process_args)
227    for r in results:
228        if r is False or isinstance(r, Exception):
229            return False
230
231def _run_tests_sequential(process_args):
232    ok = True
233    for args in process_args:
234        if _run_test(*args) is False:
235            ok = False
236    return ok
237
238def _parse_test_file(fpath):
239    try:
240        with open(fpath, 'r') as f:
241            tf = []
242            for line in f:
243                line = line.strip()
244                if not line:
245                    continue
246                if len(tf) and (tf[-1].endswith(':') or tf[-1].endswith(',')):
247                    tf[-1] += line
248                else:
249                    tf.append(line)
250            return tf
251    except:
252        print("Error loading test file.")
253        raise
254
255def main(argv):
256    parser = argparse.ArgumentParser(description=("Specify tests to run. If "
257                 "nothing specified, run all test cases found."))
258    parser.add_argument('-c', '--config', nargs=1, type=str, required=True,
259        metavar="<PATH>", help="Path to the test configuration file.")
260    parser.add_argument('--test_args', nargs='+', type=str,
261        metavar="Arg1 Arg2 ...",
262        help=("Command-line arguments to be passed to every test case in a "
263              "test run. Use with caution."))
264    parser.add_argument('-d', '--debug', action="store_true",
265        help=("Set this flag if manual debugging is required."))
266    parser.add_argument('-p', '--parallel', action="store_true",
267        help=("If set, tests will be executed on all testbeds in parallel. "
268              "Otherwise, tests are executed iteratively testbed by testbed."))
269    parser.add_argument('-r', '--repeat', type=int,
270        metavar="<NUMBER>",
271        help="Number of times to run the specified test cases.")
272    parser.add_argument('-tb', '--testbed', nargs='+', type=str,
273        metavar="[<TEST BED NAME1> <TEST BED NAME2> ...]",
274        help="Specify which test beds to run tests on.")
275    group = parser.add_mutually_exclusive_group(required=True)
276    group.add_argument('-tc', '--testclass', nargs='+', type=str,
277        metavar="[TestClass1 TestClass2:test_xxx ...]",
278        help="A list of test classes/cases to run.")
279    group.add_argument('-tf', '--testfile', nargs=1, type=str,
280        metavar="<PATH>",
281        help=("Path to a file containing a comma delimited list of test "
282              "classes to run."))
283
284    args = parser.parse_args(argv)
285    test_list = None
286    repeat = 1
287    if args.testfile:
288        test_list = _parse_test_file(args.testfile[0])
289    elif args.testclass:
290        test_list = args.testclass
291    if args.repeat:
292        repeat = args.repeat
293    parsed_configs = load_test_config_file(args.config[0], args.testbed)
294    if not parsed_configs:
295        print("Encountered error when parsing the config file, abort!")
296        sys.exit(1)
297    # Prepare args for test runs
298    test_identifiers = parse_test_list(test_list)
299    test_runners = []
300    process_args = []
301    try:
302        for c in parsed_configs:
303            c[Config.ikey_cli_args.value] = args.test_args
304            t = TestRunner(c, test_identifiers)
305            test_runners.append(t)
306            process_args.append((t, repeat))
307    except:
308        print("Failed to instantiate test runner, abort.")
309        print(traceback.format_exc())
310        sys.exit(1)
311    # Register handler for term signals if in -i mode.
312    if not args.debug:
313        handler = _gen_term_signal_handler(test_runners)
314        signal.signal(signal.SIGTERM, handler)
315        signal.signal(signal.SIGINT, handler)
316    # Execute test runners.
317    if args.parallel and len(process_args) > 1:
318        exec_result = _run_tests_parallel(process_args)
319    else:
320        exec_result = _run_tests_sequential(process_args)
321    if exec_result is False:
322        sys.exit(1)
323    sys.exit(0)
324
325if __name__ == "__main__":
326    main(sys.argv[1:])
327
328