• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""ATest execution info generator."""
16
17# pylint: disable=line-too-long
18
19from __future__ import print_function
20
21import glob
22import logging
23import json
24import os
25import sys
26
27import atest_utils as au
28import constants
29
30from atest_enum import ExitCode
31from metrics import metrics_utils
32
33_ARGS_KEY = 'args'
34_STATUS_PASSED_KEY = 'PASSED'
35_STATUS_FAILED_KEY = 'FAILED'
36_STATUS_IGNORED_KEY = 'IGNORED'
37_SUMMARY_KEY = 'summary'
38_TOTAL_SUMMARY_KEY = 'total_summary'
39_TEST_RUNNER_KEY = 'test_runner'
40_TEST_NAME_KEY = 'test_name'
41_TEST_TIME_KEY = 'test_time'
42_TEST_DETAILS_KEY = 'details'
43_TEST_RESULT_NAME = 'test_result'
44_TEST_RESULT_LINK = 'test_result_link'
45_EXIT_CODE_ATTR = 'EXIT_CODE'
46_MAIN_MODULE_KEY = '__main__'
47_UUID_LEN = 30
48_RESULT_LEN = 20
49_RESULT_URL_LEN = 35
50_COMMAND_LEN = 50
51_LOGCAT_FMT = '{}/log/invocation_*/{}*logcat-on-failure*'
52
53_SUMMARY_MAP_TEMPLATE = {_STATUS_PASSED_KEY: 0,
54                         _STATUS_FAILED_KEY: 0,
55                         _STATUS_IGNORED_KEY: 0}
56
57PREPARE_END_TIME = None
58
59
60def preparation_time(start_time):
61    """Return the preparation time.
62
63    Args:
64        start_time: The time.
65
66    Returns:
67        The preparation time if PREPARE_END_TIME is set, None otherwise.
68    """
69    return PREPARE_END_TIME - start_time if PREPARE_END_TIME else None
70
71
72def symlink_latest_result(test_result_dir):
73    """Make the symbolic link to latest result.
74
75    Args:
76        test_result_dir: A string of the dir path.
77    """
78    symlink = os.path.join(constants.ATEST_RESULT_ROOT, 'LATEST')
79    if os.path.exists(symlink) or os.path.islink(symlink):
80        os.remove(symlink)
81    os.symlink(test_result_dir, symlink)
82
83
84def print_test_result(root, history_arg):
85    """Make a list of latest n test result.
86
87    Args:
88        root: A string of the test result root path.
89        history_arg: A string of an integer or uuid. If it's an integer string,
90                     the number of lines of test result will be given; else it
91                     will be treated a uuid and print test result accordingly
92                     in detail.
93    """
94    if not history_arg.isdigit():
95        path = os.path.join(constants.ATEST_RESULT_ROOT, history_arg,
96                            'test_result')
97        print_test_result_by_path(path)
98        return
99    target = '%s/20*_*_*' % root
100    paths = glob.glob(target)
101    paths.sort(reverse=True)
102    if has_url_results():
103        print('{:-^{uuid_len}} {:-^{result_len}} {:-^{result_url_len}} {:-^{command_len}}'
104              .format('uuid', 'result', 'result_url', 'command',
105                      uuid_len=_UUID_LEN,
106                      result_len=_RESULT_LEN,
107                      result_url_len=_RESULT_URL_LEN,
108                      command_len=_COMMAND_LEN))
109    else:
110        print('{:-^{uuid_len}} {:-^{result_len}} {:-^{command_len}}'
111              .format('uuid', 'result', 'command',
112                      uuid_len=_UUID_LEN,
113                      result_len=_RESULT_LEN,
114                      command_len=_COMMAND_LEN))
115    for path in paths[0: int(history_arg)+1]:
116        result_path = os.path.join(path, 'test_result')
117        if os.path.isfile(result_path):
118            try:
119                with open(result_path) as json_file:
120                    result = json.load(json_file)
121                    total_summary = result.get(_TOTAL_SUMMARY_KEY, {})
122                    summary_str = ', '.join([k[:1]+':'+str(v)
123                                             for k, v in total_summary.items()])
124                    test_result_url = result.get(_TEST_RESULT_LINK, '')
125                    if has_url_results():
126                        print('{:<{uuid_len}} {:<{result_len}} '
127                              '{:<{result_url_len}} atest {:<{command_len}}'
128                              .format(os.path.basename(path),
129                                      summary_str,
130                                      test_result_url,
131                                      result.get(_ARGS_KEY, ''),
132                                      uuid_len=_UUID_LEN,
133                                      result_len=_RESULT_LEN,
134                                      result_url_len=_RESULT_URL_LEN,
135                                      command_len=_COMMAND_LEN))
136                    else:
137                        print('{:<{uuid_len}} {:<{result_len}} atest {:<{command_len}}'
138                              .format(os.path.basename(path),
139                                      summary_str,
140                                      result.get(_ARGS_KEY, ''),
141                                      uuid_len=_UUID_LEN,
142                                      result_len=_RESULT_LEN,
143                                      command_len=_COMMAND_LEN))
144            except ValueError:
145                pass
146
147
148def print_test_result_by_path(path):
149    """Print latest test result.
150
151    Args:
152        path: A string of test result path.
153    """
154    if os.path.isfile(path):
155        with open(path) as json_file:
156            result = json.load(json_file)
157            print("\natest {}".format(result.get(_ARGS_KEY, '')))
158            test_result_url = result.get(_TEST_RESULT_LINK, '')
159            if test_result_url:
160                print('\nTest Result Link: {}'.format(test_result_url))
161            print('\nTotal Summary:\n{}'.format(au.delimiter('-')))
162            total_summary = result.get(_TOTAL_SUMMARY_KEY, {})
163            print(', '.join([(k+':'+str(v))
164                             for k, v in total_summary.items()]))
165            fail_num = total_summary.get(_STATUS_FAILED_KEY)
166            if fail_num > 0:
167                message = '%d test failed' % fail_num
168                print('\n')
169                print(au.colorize(message, constants.RED))
170                print('-' * len(message))
171                test_runner = result.get(_TEST_RUNNER_KEY, {})
172                for runner_name in test_runner.keys():
173                    test_dict = test_runner.get(runner_name, {})
174                    for test_name in test_dict:
175                        test_details = test_dict.get(test_name, {})
176                        for fail in test_details.get(_STATUS_FAILED_KEY):
177                            print(au.colorize('{}'.format(
178                                fail.get(_TEST_NAME_KEY)), constants.RED))
179                            failure_files = glob.glob(_LOGCAT_FMT.format(
180                                os.path.dirname(path), fail.get(_TEST_NAME_KEY)
181                                ))
182                            if failure_files:
183                                print('{} {}'.format(
184                                    au.colorize('LOGCAT-ON-FAILURES:',
185                                                constants.CYAN),
186                                    failure_files[0]))
187                            print('{} {}'.format(
188                                au.colorize('STACKTRACE:\n', constants.CYAN),
189                                fail.get(_TEST_DETAILS_KEY)))
190
191
192def has_non_test_options(args):
193    """
194    check whether non-test option in the args.
195
196    Args:
197        args: An argspace.Namespace class instance holding parsed args.
198
199    Returns:
200        True, if args has at least one non-test option.
201        False, otherwise.
202    """
203    return (args.collect_tests_only
204            or args.dry_run
205            or args.help
206            or args.history
207            or args.info
208            or args.version
209            or args.latest_result
210            or args.history)
211
212
213def has_url_results():
214    """Get if contains url info."""
215    for root, _, files in os.walk(constants.ATEST_RESULT_ROOT):
216        for file in files:
217            if file != 'test_result':
218                continue
219            json_file = os.path.join(root, 'test_result')
220            with open(json_file) as result:
221                try:
222                    result = json.load(result)
223                    url_link = result.get(_TEST_RESULT_LINK, '')
224                    if url_link:
225                        return True
226                except ValueError:
227                    pass
228    return False
229
230
231class AtestExecutionInfo:
232    """Class that stores the whole test progress information in JSON format.
233
234    ----
235    For example, running command
236        atest hello_world_test HelloWorldTest
237
238    will result in storing the execution detail in JSON:
239    {
240      "args": "hello_world_test HelloWorldTest",
241      "test_runner": {
242          "AtestTradefedTestRunner": {
243              "hello_world_test": {
244                  "FAILED": [
245                      {"test_time": "(5ms)",
246                       "details": "Hello, Wor...",
247                       "test_name": "HelloWorldTest#PrintHelloWorld"}
248                      ],
249                  "summary": {"FAILED": 1, "PASSED": 0, "IGNORED": 0}
250              },
251              "HelloWorldTests": {
252                  "PASSED": [
253                      {"test_time": "(27ms)",
254                       "details": null,
255                       "test_name": "...HelloWorldTest#testHalloWelt"},
256                      {"test_time": "(1ms)",
257                       "details": null,
258                       "test_name": "....HelloWorldTest#testHelloWorld"}
259                      ],
260                  "summary": {"FAILED": 0, "PASSED": 2, "IGNORED": 0}
261              }
262          }
263      },
264      "total_summary": {"FAILED": 1, "PASSED": 2, "IGNORED": 0}
265    }
266    """
267
268    result_reporters = []
269
270    def __init__(self, args, work_dir, args_ns):
271        """Initialise an AtestExecutionInfo instance.
272
273        Args:
274            args: Command line parameters.
275            work_dir: The directory for saving information.
276            args_ns: An argspace.Namespace class instance holding parsed args.
277
278        Returns:
279               A json format string.
280        """
281        self.args = args
282        self.work_dir = work_dir
283        self.result_file = None
284        self.args_ns = args_ns
285
286    def __enter__(self):
287        """Create and return information file object."""
288        full_file_name = os.path.join(self.work_dir, _TEST_RESULT_NAME)
289        try:
290            self.result_file = open(full_file_name, 'w')
291        except IOError:
292            logging.error('Cannot open file %s', full_file_name)
293        return self.result_file
294
295    def __exit__(self, exit_type, value, traceback):
296        """Write execution information and close information file."""
297        if self.result_file and not has_non_test_options(self.args_ns):
298            self.result_file.write(AtestExecutionInfo.
299                                   _generate_execution_detail(self.args))
300            self.result_file.close()
301            symlink_latest_result(self.work_dir)
302        main_module = sys.modules.get(_MAIN_MODULE_KEY)
303        main_exit_code = getattr(main_module, _EXIT_CODE_ATTR, ExitCode.ERROR)
304        if main_exit_code == ExitCode.SUCCESS:
305            metrics_utils.send_exit_event(main_exit_code)
306        else:
307            metrics_utils.handle_exc_and_send_exit_event(main_exit_code)
308
309    @staticmethod
310    def _generate_execution_detail(args):
311        """Generate execution detail.
312
313        Args:
314            args: Command line parameters that you want to save.
315
316        Returns:
317            A json format string.
318        """
319        info_dict = {_ARGS_KEY: ' '.join(args)}
320        try:
321            AtestExecutionInfo._arrange_test_result(
322                info_dict,
323                AtestExecutionInfo.result_reporters)
324            return json.dumps(info_dict)
325        except ValueError as err:
326            logging.warning('Parsing test result failed due to : %s', err)
327        return {}
328
329    @staticmethod
330    def _arrange_test_result(info_dict, reporters):
331        """Append test result information in given dict.
332
333        Arrange test information to below
334        "test_runner": {
335            "test runner name": {
336                "test name": {
337                    "FAILED": [
338                        {"test time": "",
339                         "details": "",
340                         "test name": ""}
341                    ],
342                "summary": {"FAILED": 0, "PASSED": 0, "IGNORED": 0}
343                },
344            },
345        "total_summary": {"FAILED": 0, "PASSED": 0, "IGNORED": 0}
346
347        Args:
348            info_dict: A dict you want to add result information in.
349            reporters: A list of result_reporter.
350
351        Returns:
352            A dict contains test result information data.
353        """
354        info_dict[_TEST_RUNNER_KEY] = {}
355        for reporter in reporters:
356            if reporter.test_result_link:
357                info_dict[_TEST_RESULT_LINK] = reporter.test_result_link
358            for test in reporter.all_test_results:
359                runner = info_dict[_TEST_RUNNER_KEY].setdefault(
360                    test.runner_name, {})
361                group = runner.setdefault(test.group_name, {})
362                result_dict = {_TEST_NAME_KEY: test.test_name,
363                               _TEST_TIME_KEY: test.test_time,
364                               _TEST_DETAILS_KEY: test.details}
365                group.setdefault(test.status, []).append(result_dict)
366
367        total_test_group_summary = _SUMMARY_MAP_TEMPLATE.copy()
368        for runner in info_dict[_TEST_RUNNER_KEY]:
369            for group in info_dict[_TEST_RUNNER_KEY][runner]:
370                group_summary = _SUMMARY_MAP_TEMPLATE.copy()
371                for status in info_dict[_TEST_RUNNER_KEY][runner][group]:
372                    count = len(info_dict[_TEST_RUNNER_KEY][runner][group][status])
373                    if status in _SUMMARY_MAP_TEMPLATE:
374                        group_summary[status] = count
375                        total_test_group_summary[status] += count
376                info_dict[_TEST_RUNNER_KEY][runner][group][_SUMMARY_KEY] = group_summary
377        info_dict[_TOTAL_SUMMARY_KEY] = total_test_group_summary
378        return info_dict
379