• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2018, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""
15Result Reporter
16
17The result reporter formats and prints test results.
18
19----
20Example Output for command to run following tests:
21CtsAnimationTestCases:EvaluatorTest, HelloWorldTests, and WmTests
22
23Running Tests ...
24
25CtsAnimationTestCases
26---------------------
27
28android.animation.cts.EvaluatorTest.UnitTests (7 Tests)
29[1/7] android.animation.cts.EvaluatorTest#testRectEvaluator: PASSED (153ms)
30[2/7] android.animation.cts.EvaluatorTest#testIntArrayEvaluator: PASSED (0ms)
31[3/7] android.animation.cts.EvaluatorTest#testIntEvaluator: PASSED (0ms)
32[4/7] android.animation.cts.EvaluatorTest#testFloatArrayEvaluator: PASSED (1ms)
33[5/7] android.animation.cts.EvaluatorTest#testPointFEvaluator: PASSED (1ms)
34[6/7] android.animation.cts.EvaluatorTest#testArgbEvaluator: PASSED (0ms)
35[7/7] android.animation.cts.EvaluatorTest#testFloatEvaluator: PASSED (1ms)
36
37HelloWorldTests
38---------------
39
40android.test.example.helloworld.UnitTests(2 Tests)
41[1/2] android.test.example.helloworld.HelloWorldTest#testHalloWelt: PASSED (0ms)
42[2/2] android.test.example.helloworld.HelloWorldTest#testHelloWorld: PASSED (1ms)
43
44WmTests
45-------
46
47com.android.tradefed.targetprep.UnitTests (1 Test)
48RUNNER ERROR: com.android.tradefed.targetprep.TargetSetupError:
49Failed to install WmTests.apk on 127.0.0.1:54373. Reason:
50    error message ...
51
52
53Summary
54-------
55CtsAnimationTestCases: Passed: 7, Failed: 0
56HelloWorldTests: Passed: 2, Failed: 0
57WmTests: Passed: 0, Failed: 0 (Completed With ERRORS)
58
591 test failed
60"""
61
62from __future__ import print_function
63from collections import OrderedDict
64
65import constants
66import atest_utils as au
67
68from test_runners import test_runner_base
69
70UNSUPPORTED_FLAG = 'UNSUPPORTED_RUNNER'
71FAILURE_FLAG = 'RUNNER_FAILURE'
72BENCHMARK_ESSENTIAL_KEYS = {'repetition_index', 'cpu_time', 'name', 'repetitions',
73                            'run_type', 'threads', 'time_unit', 'iterations',
74                            'run_name', 'real_time'}
75# TODO(b/146875480): handle the optional benchmark events
76BENCHMARK_OPTIONAL_KEYS = {'bytes_per_second', 'label'}
77BENCHMARK_EVENT_KEYS = BENCHMARK_ESSENTIAL_KEYS.union(BENCHMARK_OPTIONAL_KEYS)
78INT_KEYS = {'cpu_time', 'real_time'}
79
80class PerfInfo(object):
81    """Class for storing performance test of a test run."""
82
83    def __init__(self):
84        """Initialize a new instance of PerfInfo class."""
85        # perf_info: A list of benchmark_info(dict).
86        self.perf_info = []
87
88    def update_perf_info(self, test):
89        """Update perf_info with the given result of a single test.
90
91        Args:
92            test: A TestResult namedtuple.
93        """
94        all_additional_keys = set(test.additional_info.keys())
95        # Ensure every key is in all_additional_keys.
96        if not BENCHMARK_ESSENTIAL_KEYS.issubset(all_additional_keys):
97            return
98        benchmark_info = {}
99        benchmark_info['test_name'] = test.test_name
100        for key, data in test.additional_info.items():
101            if key in INT_KEYS:
102                data_to_int = data.split('.')[0]
103                benchmark_info[key] = data_to_int
104            elif key in BENCHMARK_EVENT_KEYS:
105                benchmark_info[key] = data
106        if benchmark_info:
107            self.perf_info.append(benchmark_info)
108
109    def print_perf_info(self):
110        """Print summary of a perf_info."""
111        if not self.perf_info:
112            return
113        classify_perf_info, max_len = self._classify_perf_info()
114        separator = '-' * au.get_terminal_size()[0]
115        print(separator)
116        print("{:{name}}    {:^{real_time}}    {:^{cpu_time}}    "
117              "{:>{iterations}}".format(
118                  'Benchmark', 'Time', 'CPU', 'Iteration',
119                  name=max_len['name']+3,
120                  real_time=max_len['real_time']+max_len['time_unit']+1,
121                  cpu_time=max_len['cpu_time']+max_len['time_unit']+1,
122                  iterations=max_len['iterations']))
123        print(separator)
124        for module_name, module_perf_info in classify_perf_info.items():
125            print("{}:".format(module_name))
126            for benchmark_info in module_perf_info:
127                # BpfBenchMark/MapWriteNewEntry/1    1530 ns     1522 ns   460517
128                print("  #{:{name}}    {:>{real_time}} {:{time_unit}}    "
129                      "{:>{cpu_time}} {:{time_unit}}    "
130                      "{:>{iterations}}".format(benchmark_info['name'],
131                                                benchmark_info['real_time'],
132                                                benchmark_info['time_unit'],
133                                                benchmark_info['cpu_time'],
134                                                benchmark_info['time_unit'],
135                                                benchmark_info['iterations'],
136                                                name=max_len['name'],
137                                                real_time=max_len['real_time'],
138                                                time_unit=max_len['time_unit'],
139                                                cpu_time=max_len['cpu_time'],
140                                                iterations=max_len['iterations']))
141
142    def _classify_perf_info(self):
143        """Classify the perf_info by test module name.
144
145        Returns:
146            A tuple of (classified_perf_info, max_len), where
147            classified_perf_info: A dict of perf_info and each perf_info are
148                                 belong to different modules.
149                e.g.
150                    { module_name_01: [perf_info of module_1],
151                      module_name_02: [perf_info of module_2], ...}
152            max_len: A dict which stores the max length of each event.
153                     It contains the max string length of 'name', real_time',
154                     'time_unit', 'cpu_time', 'iterations'.
155                e.g.
156                    {name: 56, real_time: 9, time_unit: 2, cpu_time: 8,
157                     iterations: 12}
158        """
159        module_categories = set()
160        max_len = {}
161        all_name = []
162        all_real_time = []
163        all_time_unit = []
164        all_cpu_time = []
165        all_iterations = ['Iteration']
166        for benchmark_info in self.perf_info:
167            module_categories.add(benchmark_info['test_name'].split('#')[0])
168            all_name.append(benchmark_info['name'])
169            all_real_time.append(benchmark_info['real_time'])
170            all_time_unit.append(benchmark_info['time_unit'])
171            all_cpu_time.append(benchmark_info['cpu_time'])
172            all_iterations.append(benchmark_info['iterations'])
173        classified_perf_info = {}
174        for module_name in module_categories:
175            module_perf_info = []
176            for benchmark_info in self.perf_info:
177                if benchmark_info['test_name'].split('#')[0] == module_name:
178                    module_perf_info.append(benchmark_info)
179            classified_perf_info[module_name] = module_perf_info
180        max_len = {'name': len(max(all_name, key=len)),
181                   'real_time': len(max(all_real_time, key=len)),
182                   'time_unit': len(max(all_time_unit, key=len)),
183                   'cpu_time': len(max(all_cpu_time, key=len)),
184                   'iterations': len(max(all_iterations, key=len))}
185        return classified_perf_info, max_len
186
187
188class RunStat(object):
189    """Class for storing stats of a test run."""
190
191    def __init__(self, passed=0, failed=0, ignored=0, run_errors=False,
192                 assumption_failed=0):
193        """Initialize a new instance of RunStat class.
194
195        Args:
196            passed: Count of passing tests.
197            failed: Count of failed tests.
198            ignored: Count of ignored tests.
199            assumption_failed: Count of assumption failure tests.
200            run_errors: A boolean if there were run errors
201        """
202        # TODO(b/109822985): Track group and run estimated totals for updating
203        # summary line
204        self.passed = passed
205        self.failed = failed
206        self.ignored = ignored
207        self.assumption_failed = assumption_failed
208        self.perf_info = PerfInfo()
209        # Run errors are not for particular tests, they are runner errors.
210        self.run_errors = run_errors
211
212    @property
213    def total(self):
214        """Getter for total tests actually ran. Accessed via self.total"""
215        return self.passed + self.failed
216
217
218class ResultReporter(object):
219    """Result Reporter class.
220
221    As each test is run, the test runner will call self.process_test_result()
222    with a TestResult namedtuple that contains the following information:
223    - runner_name:   Name of the test runner
224    - group_name:    Name of the test group if any.
225                     In Tradefed that's the Module name.
226    - test_name:     Name of the test.
227                     In Tradefed that's qualified.class#Method
228    - status:        The strings FAILED or PASSED.
229    - stacktrace:    The stacktrace if the test failed.
230    - group_total:   The total tests scheduled to be run for a group.
231                     In Tradefed this is provided when the Module starts.
232    - runner_total:  The total tests scheduled to be run for the runner.
233                     In Tradefed this is not available so is None.
234
235    The Result Reporter will print the results of this test and then update
236    its stats state.
237
238    Test stats are stored in the following structure:
239    - self.run_stats: Is RunStat instance containing stats for the overall run.
240                      This include pass/fail counts across ALL test runners.
241
242    - self.runners:  Is of the form: {RunnerName: {GroupName: RunStat Instance}}
243                     Where {} is an ordered dict.
244
245                     The stats instance contains stats for each test group.
246                     If the runner doesn't support groups, then the group
247                     name will be None.
248
249    For example this could be a state of ResultReporter:
250
251    run_stats: RunStat(passed:10, failed:5)
252    runners: {'AtestTradefedTestRunner':
253                            {'Module1': RunStat(passed:1, failed:1),
254                             'Module2': RunStat(passed:0, failed:4)},
255              'RobolectricTestRunner': {None: RunStat(passed:5, failed:0)},
256              'VtsTradefedTestRunner': {'Module1': RunStat(passed:4, failed:0)}}
257    """
258
259    def __init__(self, silent=False):
260        """Init ResultReporter.
261
262        Args:
263            silent: A boolean of silence or not.
264        """
265        self.run_stats = RunStat()
266        self.runners = OrderedDict()
267        self.failed_tests = []
268        self.all_test_results = []
269        self.pre_test = None
270        self.log_path = None
271        self.silent = silent
272        self.rerun_options = ''
273
274    def process_test_result(self, test):
275        """Given the results of a single test, update stats and print results.
276
277        Args:
278            test: A TestResult namedtuple.
279        """
280        if test.runner_name not in self.runners:
281            self.runners[test.runner_name] = OrderedDict()
282        assert self.runners[test.runner_name] != FAILURE_FLAG
283        self.all_test_results.append(test)
284        if test.group_name not in self.runners[test.runner_name]:
285            self.runners[test.runner_name][test.group_name] = RunStat()
286            self._print_group_title(test)
287        self._update_stats(test,
288                           self.runners[test.runner_name][test.group_name])
289        self._print_result(test)
290
291    def runner_failure(self, runner_name, failure_msg):
292        """Report a runner failure.
293
294        Use instead of process_test_result() when runner fails separate from
295        any particular test, e.g. during setup of runner.
296
297        Args:
298            runner_name: A string of the name of the runner.
299            failure_msg: A string of the failure message to pass to user.
300        """
301        self.runners[runner_name] = FAILURE_FLAG
302        print('\n', runner_name, '\n', '-' * len(runner_name), sep='')
303        print('Runner encountered a critical failure. Skipping.\n'
304              'FAILURE: %s' % failure_msg)
305
306    def register_unsupported_runner(self, runner_name):
307        """Register an unsupported runner.
308
309           Prints the following to the screen:
310
311           RunnerName
312           ----------
313           This runner does not support normal results formatting.
314           Below is the raw output of the test runner.
315
316           RAW OUTPUT:
317           <Raw Runner Output>
318
319           Args:
320              runner_name: A String of the test runner's name.
321        """
322        assert runner_name not in self.runners
323        self.runners[runner_name] = UNSUPPORTED_FLAG
324        print('\n', runner_name, '\n', '-' * len(runner_name), sep='')
325        print('This runner does not support normal results formatting. Below '
326              'is the raw output of the test runner.\n\nRAW OUTPUT:')
327
328    def print_starting_text(self):
329        """Print starting text for running tests."""
330        print(au.colorize('\nRunning Tests...', constants.CYAN))
331
332    def print_summary(self):
333        """Print summary of all test runs.
334
335        Returns:
336            0 if all tests pass, non-zero otherwise.
337
338        """
339        tests_ret = constants.EXIT_CODE_SUCCESS
340        if not self.runners:
341            return tests_ret
342        print('\n%s' % au.colorize('Summary', constants.CYAN))
343        print('-------')
344        if self.rerun_options:
345            print(self.rerun_options)
346        failed_sum = len(self.failed_tests)
347        for runner_name, groups in self.runners.items():
348            if groups == UNSUPPORTED_FLAG:
349                print(runner_name, 'Unsupported. See raw output above.')
350                continue
351            if groups == FAILURE_FLAG:
352                tests_ret = constants.EXIT_CODE_TEST_FAILURE
353                print(runner_name, 'Crashed. No results to report.')
354                failed_sum += 1
355                continue
356            for group_name, stats in groups.items():
357                name = group_name if group_name else runner_name
358                summary = self.process_summary(name, stats)
359                if stats.failed > 0:
360                    tests_ret = constants.EXIT_CODE_TEST_FAILURE
361                if stats.run_errors:
362                    tests_ret = constants.EXIT_CODE_TEST_FAILURE
363                    failed_sum += 1 if not stats.failed else 0
364                print(summary)
365        self.run_stats.perf_info.print_perf_info()
366        print()
367        if tests_ret == constants.EXIT_CODE_SUCCESS:
368            print(au.colorize('All tests passed!', constants.GREEN))
369        else:
370            message = '%d %s failed' % (failed_sum,
371                                        'tests' if failed_sum > 1 else 'test')
372            print(au.colorize(message, constants.RED))
373            print('-'*len(message))
374            self.print_failed_tests()
375        if self.log_path:
376            print('Test Logs have saved in %s' % self.log_path)
377        return tests_ret
378
379    def print_failed_tests(self):
380        """Print the failed tests if existed."""
381        if self.failed_tests:
382            for test_name in self.failed_tests:
383                print('%s' % test_name)
384
385    def process_summary(self, name, stats):
386        """Process the summary line.
387
388        Strategy:
389            Error status happens ->
390                SomeTests: Passed: 2, Failed: 0 <red>(Completed With ERRORS)</red>
391                SomeTests: Passed: 2, <red>Failed</red>: 2 <red>(Completed With ERRORS)</red>
392            More than 1 test fails ->
393                SomeTests: Passed: 2, <red>Failed</red>: 5
394            No test fails ->
395                SomeTests: <green>Passed</green>: 2, Failed: 0
396
397        Args:
398            name: A string of test name.
399            stats: A RunStat instance for a test group.
400
401        Returns:
402            A summary of the test result.
403        """
404        passed_label = 'Passed'
405        failed_label = 'Failed'
406        ignored_label = 'Ignored'
407        assumption_failed_label = 'Assumption Failed'
408        error_label = ''
409        if stats.failed > 0:
410            failed_label = au.colorize(failed_label, constants.RED)
411        if stats.run_errors:
412            error_label = au.colorize('(Completed With ERRORS)', constants.RED)
413        elif stats.failed == 0:
414            passed_label = au.colorize(passed_label, constants.GREEN)
415        summary = '%s: %s: %s, %s: %s, %s: %s, %s: %s %s' % (name,
416                                                             passed_label,
417                                                             stats.passed,
418                                                             failed_label,
419                                                             stats.failed,
420                                                             ignored_label,
421                                                             stats.ignored,
422                                                             assumption_failed_label,
423                                                             stats.assumption_failed,
424                                                             error_label)
425        return summary
426
427    def _update_stats(self, test, group):
428        """Given the results of a single test, update test run stats.
429
430        Args:
431            test: a TestResult namedtuple.
432            group: a RunStat instance for a test group.
433        """
434        # TODO(109822985): Track group and run estimated totals for updating
435        # summary line
436        if test.status == test_runner_base.PASSED_STATUS:
437            self.run_stats.passed += 1
438            group.passed += 1
439        elif test.status == test_runner_base.IGNORED_STATUS:
440            self.run_stats.ignored += 1
441            group.ignored += 1
442        elif test.status == test_runner_base.ASSUMPTION_FAILED:
443            self.run_stats.assumption_failed += 1
444            group.assumption_failed += 1
445        elif test.status == test_runner_base.FAILED_STATUS:
446            self.run_stats.failed += 1
447            self.failed_tests.append(test.test_name)
448            group.failed += 1
449        elif test.status == test_runner_base.ERROR_STATUS:
450            self.run_stats.run_errors = True
451            group.run_errors = True
452        self.run_stats.perf_info.update_perf_info(test)
453
454    def _print_group_title(self, test):
455        """Print the title line for a test group.
456
457        Test Group/Runner Name
458        ----------------------
459
460        Args:
461            test: A TestResult namedtuple.
462        """
463        if self.silent:
464            return
465        title = test.group_name or test.runner_name
466        underline = '-' * (len(title))
467        print('\n%s\n%s' % (title, underline))
468
469    def _print_result(self, test):
470        """Print the results of a single test.
471
472           Looks like:
473           fully.qualified.class#TestMethod: PASSED/FAILED
474
475        Args:
476            test: a TestResult namedtuple.
477        """
478        if self.silent:
479            return
480        if not self.pre_test or (test.test_run_name !=
481                                 self.pre_test.test_run_name):
482            print('%s (%s %s)' % (au.colorize(test.test_run_name,
483                                              constants.BLUE),
484                                  test.group_total,
485                                  'Test' if test.group_total <= 1 else 'Tests'))
486        if test.status == test_runner_base.ERROR_STATUS:
487            print('RUNNER ERROR: %s\n' % test.details)
488            self.pre_test = test
489            return
490        if test.test_name:
491            if test.status == test_runner_base.PASSED_STATUS:
492                # Example of output:
493                # [78/92] test_name: PASSED (92ms)
494                print('[%s/%s] %s: %s %s' % (test.test_count,
495                                             test.group_total,
496                                             test.test_name,
497                                             au.colorize(
498                                                 test.status,
499                                                 constants.GREEN),
500                                             test.test_time))
501                for key, data in test.additional_info.items():
502                    if key not in BENCHMARK_EVENT_KEYS:
503                        print('\t%s: %s' % (au.colorize(key, constants.BLUE), data))
504            elif test.status == test_runner_base.IGNORED_STATUS:
505                # Example: [33/92] test_name: IGNORED (12ms)
506                print('[%s/%s] %s: %s %s' % (test.test_count, test.group_total,
507                                             test.test_name, au.colorize(
508                                                 test.status, constants.MAGENTA),
509                                             test.test_time))
510            elif test.status == test_runner_base.ASSUMPTION_FAILED:
511                # Example: [33/92] test_name: ASSUMPTION_FAILED (12ms)
512                print('[%s/%s] %s: %s %s' % (test.test_count, test.group_total,
513                                             test.test_name, au.colorize(
514                                                 test.status, constants.MAGENTA),
515                                             test.test_time))
516            else:
517                # Example: [26/92] test_name: FAILED (32ms)
518                print('[%s/%s] %s: %s %s' % (test.test_count, test.group_total,
519                                             test.test_name, au.colorize(
520                                                 test.status, constants.RED),
521                                             test.test_time))
522        if test.status == test_runner_base.FAILED_STATUS:
523            print('\nSTACKTRACE:\n%s' % test.details)
524        self.pre_test = test
525