• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2018, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15# pylint: disable=import-outside-toplevel
16# pylint: disable=line-too-long
17
18"""
19Result Reporter
20
21The result reporter formats and prints test results.
22
23----
24Example Output for command to run following tests:
25CtsAnimationTestCases:EvaluatorTest, HelloWorldTests, and WmTests
26
27Running Tests ...
28
29CtsAnimationTestCases
30---------------------
31
32android.animation.cts.EvaluatorTest.UnitTests (7 Tests)
33[1/7] android.animation.cts.EvaluatorTest#testRectEvaluator: PASSED (153ms)
34[2/7] android.animation.cts.EvaluatorTest#testIntArrayEvaluator: PASSED (0ms)
35[3/7] android.animation.cts.EvaluatorTest#testIntEvaluator: PASSED (0ms)
36[4/7] android.animation.cts.EvaluatorTest#testFloatArrayEvaluator: PASSED (1ms)
37[5/7] android.animation.cts.EvaluatorTest#testPointFEvaluator: PASSED (1ms)
38[6/7] android.animation.cts.EvaluatorTest#testArgbEvaluator: PASSED (0ms)
39[7/7] android.animation.cts.EvaluatorTest#testFloatEvaluator: PASSED (1ms)
40
41HelloWorldTests
42---------------
43
44android.test.example.helloworld.UnitTests(2 Tests)
45[1/2] android.test.example.helloworld.HelloWorldTest#testHalloWelt: PASSED (0ms)
46[2/2] android.test.example.helloworld.HelloWorldTest#testHelloWorld: PASSED (1ms)
47
48WmTests
49-------
50
51com.android.tradefed.targetprep.UnitTests (1 Test)
52RUNNER ERROR: com.android.tradefed.targetprep.TargetSetupError:
53Failed to install WmTests.apk on 127.0.0.1:54373. Reason:
54    error message ...
55
56
57Summary
58-------
59CtsAnimationTestCases: Passed: 7, Failed: 0
60HelloWorldTests: Passed: 2, Failed: 0
61WmTests: Passed: 0, Failed: 0 (Completed With ERRORS)
62
631 test failed
64"""
65
66from __future__ import print_function
67
68import logging
69import os
70import re
71import zipfile
72
73from collections import OrderedDict
74
75from atest import constants
76from atest import atest_configs
77from atest import atest_utils as au
78
79from atest.atest_enum import ExitCode
80from atest.test_runners import test_runner_base
81
82UNSUPPORTED_FLAG = 'UNSUPPORTED_RUNNER'
83FAILURE_FLAG = 'RUNNER_FAILURE'
84BENCHMARK_ESSENTIAL_KEYS = {'repetition_index', 'cpu_time', 'name', 'repetitions',
85                            'run_type', 'threads', 'time_unit', 'iterations',
86                            'run_name', 'real_time'}
87# TODO(b/146875480): handle the optional benchmark events
88BENCHMARK_OPTIONAL_KEYS = {'bytes_per_second', 'label'}
89BENCHMARK_EVENT_KEYS = BENCHMARK_ESSENTIAL_KEYS.union(BENCHMARK_OPTIONAL_KEYS)
90INT_KEYS = {'cpu_time', 'real_time'}
91ITER_SUMMARY = {}
92
93class PerfInfo():
94    """Class for storing performance test of a test run."""
95
96    def __init__(self):
97        """Initialize a new instance of PerfInfo class."""
98        # perf_info: A list of benchmark_info(dict).
99        self.perf_info = []
100
101    def update_perf_info(self, test):
102        """Update perf_info with the given result of a single test.
103
104        Args:
105            test: A TestResult namedtuple.
106        """
107        all_additional_keys = set(test.additional_info.keys())
108        # Ensure every key is in all_additional_keys.
109        if not BENCHMARK_ESSENTIAL_KEYS.issubset(all_additional_keys):
110            return
111        benchmark_info = {}
112        benchmark_info['test_name'] = test.test_name
113        for key, data in test.additional_info.items():
114            if key in INT_KEYS:
115                data_to_int = data.split('.')[0]
116                benchmark_info[key] = data_to_int
117            elif key in BENCHMARK_EVENT_KEYS:
118                benchmark_info[key] = data
119        if benchmark_info:
120            self.perf_info.append(benchmark_info)
121
122    def print_perf_info(self):
123        """Print summary of a perf_info."""
124        if not self.perf_info:
125            return
126        classify_perf_info, max_len = self._classify_perf_info()
127        separator = '-' * au.get_terminal_size()[0]
128        print(separator)
129        print("{:{name}}    {:^{real_time}}    {:^{cpu_time}}    "
130              "{:>{iterations}}".format(
131                  'Benchmark', 'Time', 'CPU', 'Iteration',
132                  name=max_len['name']+3,
133                  real_time=max_len['real_time']+max_len['time_unit']+1,
134                  cpu_time=max_len['cpu_time']+max_len['time_unit']+1,
135                  iterations=max_len['iterations']))
136        print(separator)
137        for module_name, module_perf_info in classify_perf_info.items():
138            print("{}:".format(module_name))
139            for benchmark_info in module_perf_info:
140                # BpfBenchMark/MapWriteNewEntry/1    1530 ns     1522 ns   460517
141                print("  #{:{name}}    {:>{real_time}} {:{time_unit}}    "
142                      "{:>{cpu_time}} {:{time_unit}}    "
143                      "{:>{iterations}}".format(benchmark_info['name'],
144                                                benchmark_info['real_time'],
145                                                benchmark_info['time_unit'],
146                                                benchmark_info['cpu_time'],
147                                                benchmark_info['time_unit'],
148                                                benchmark_info['iterations'],
149                                                name=max_len['name'],
150                                                real_time=max_len['real_time'],
151                                                time_unit=max_len['time_unit'],
152                                                cpu_time=max_len['cpu_time'],
153                                                iterations=max_len['iterations']))
154
155    def _classify_perf_info(self):
156        """Classify the perf_info by test module name.
157
158        Returns:
159            A tuple of (classified_perf_info, max_len), where
160            classified_perf_info: A dict of perf_info and each perf_info are
161                                 belong to different modules.
162                e.g.
163                    { module_name_01: [perf_info of module_1],
164                      module_name_02: [perf_info of module_2], ...}
165            max_len: A dict which stores the max length of each event.
166                     It contains the max string length of 'name', real_time',
167                     'time_unit', 'cpu_time', 'iterations'.
168                e.g.
169                    {name: 56, real_time: 9, time_unit: 2, cpu_time: 8,
170                     iterations: 12}
171        """
172        module_categories = set()
173        max_len = {}
174        all_name = []
175        all_real_time = []
176        all_time_unit = []
177        all_cpu_time = []
178        all_iterations = ['Iteration']
179        for benchmark_info in self.perf_info:
180            module_categories.add(benchmark_info['test_name'].split('#')[0])
181            all_name.append(benchmark_info['name'])
182            all_real_time.append(benchmark_info['real_time'])
183            all_time_unit.append(benchmark_info['time_unit'])
184            all_cpu_time.append(benchmark_info['cpu_time'])
185            all_iterations.append(benchmark_info['iterations'])
186        classified_perf_info = {}
187        for module_name in module_categories:
188            module_perf_info = []
189            for benchmark_info in self.perf_info:
190                if benchmark_info['test_name'].split('#')[0] == module_name:
191                    module_perf_info.append(benchmark_info)
192            classified_perf_info[module_name] = module_perf_info
193        max_len = {'name': len(max(all_name, key=len)),
194                   'real_time': len(max(all_real_time, key=len)),
195                   'time_unit': len(max(all_time_unit, key=len)),
196                   'cpu_time': len(max(all_cpu_time, key=len)),
197                   'iterations': len(max(all_iterations, key=len))}
198        return classified_perf_info, max_len
199
200
201class RunStat:
202    """Class for storing stats of a test run."""
203
204    def __init__(self, passed=0, failed=0, ignored=0, run_errors=False,
205                 assumption_failed=0):
206        """Initialize a new instance of RunStat class.
207
208        Args:
209            passed: Count of passing tests.
210            failed: Count of failed tests.
211            ignored: Count of ignored tests.
212            assumption_failed: Count of assumption failure tests.
213            run_errors: A boolean if there were run errors
214        """
215        # TODO(b/109822985): Track group and run estimated totals for updating
216        # summary line
217        self.passed = passed
218        self.failed = failed
219        self.ignored = ignored
220        self.assumption_failed = assumption_failed
221        self.perf_info = PerfInfo()
222        # Run errors are not for particular tests, they are runner errors.
223        self.run_errors = run_errors
224
225    @property
226    def total(self):
227        """Getter for total tests actually ran. Accessed via self.total"""
228        return self.passed + self.failed
229
230
231class ResultReporter:
232    """Result Reporter class.
233
234    As each test is run, the test runner will call self.process_test_result()
235    with a TestResult namedtuple that contains the following information:
236    - runner_name:   Name of the test runner
237    - group_name:    Name of the test group if any.
238                     In Tradefed that's the Module name.
239    - test_name:     Name of the test.
240                     In Tradefed that's qualified.class#Method
241    - status:        The strings FAILED or PASSED.
242    - stacktrace:    The stacktrace if the test failed.
243    - group_total:   The total tests scheduled to be run for a group.
244                     In Tradefed this is provided when the Module starts.
245    - runner_total:  The total tests scheduled to be run for the runner.
246                     In Tradefed this is not available so is None.
247
248    The Result Reporter will print the results of this test and then update
249    its stats state.
250
251    Test stats are stored in the following structure:
252    - self.run_stats: Is RunStat instance containing stats for the overall run.
253                      This include pass/fail counts across ALL test runners.
254
255    - self.runners:  Is of the form: {RunnerName: {GroupName: RunStat Instance}}
256                     Where {} is an ordered dict.
257
258                     The stats instance contains stats for each test group.
259                     If the runner doesn't support groups, then the group
260                     name will be None.
261
262    For example this could be a state of ResultReporter:
263
264    run_stats: RunStat(passed:10, failed:5)
265    runners: {'AtestTradefedTestRunner':
266                            {'Module1': RunStat(passed:1, failed:1),
267                             'Module2': RunStat(passed:0, failed:4)},
268              'RobolectricTestRunner': {None: RunStat(passed:5, failed:0)},
269              'VtsTradefedTestRunner': {'Module1': RunStat(passed:4, failed:0)}}
270    """
271
272    def __init__(self, silent=False, collect_only=False, flakes_info=False):
273        """Init ResultReporter.
274
275        Args:
276            silent: A boolean of silence or not.
277        """
278        self.run_stats = RunStat()
279        self.runners = OrderedDict()
280        self.failed_tests = []
281        self.all_test_results = []
282        self.pre_test = None
283        self.log_path = None
284        self.silent = silent
285        self.rerun_options = ''
286        self.collect_only = collect_only
287        self.flakes_info = flakes_info
288        self.test_result_link = None
289        self.device_count = 0
290
291    def process_test_result(self, test):
292        """Given the results of a single test, update stats and print results.
293
294        Args:
295            test: A TestResult namedtuple.
296        """
297        if test.runner_name not in self.runners:
298            self.runners[test.runner_name] = OrderedDict()
299        assert self.runners[test.runner_name] != FAILURE_FLAG
300        self.all_test_results.append(test)
301        if test.group_name not in self.runners[test.runner_name]:
302            self.runners[test.runner_name][test.group_name] = RunStat()
303            self._print_group_title(test)
304        self._update_stats(test,
305                           self.runners[test.runner_name][test.group_name])
306        self._print_result(test)
307
308    def runner_failure(self, runner_name, failure_msg):
309        """Report a runner failure.
310
311        Use instead of process_test_result() when runner fails separate from
312        any particular test, e.g. during setup of runner.
313
314        Args:
315            runner_name: A string of the name of the runner.
316            failure_msg: A string of the failure message to pass to user.
317        """
318        self.runners[runner_name] = FAILURE_FLAG
319        print('\n', runner_name, '\n', '-' * len(runner_name), sep='')
320        print('Runner encountered a critical failure. Skipping.\n'
321              'FAILURE: %s' % failure_msg)
322
323    def register_unsupported_runner(self, runner_name):
324        """Register an unsupported runner.
325
326           Prints the following to the screen:
327
328           RunnerName
329           ----------
330           This runner does not support normal results formatting.
331           Below is the raw output of the test runner.
332
333           RAW OUTPUT:
334           <Raw Runner Output>
335
336           Args:
337              runner_name: A String of the test runner's name.
338        """
339        assert runner_name not in self.runners
340        self.runners[runner_name] = UNSUPPORTED_FLAG
341        print('\n', runner_name, '\n', '-' * len(runner_name), sep='')
342        print('This runner does not support normal results formatting. Below '
343              'is the raw output of the test runner.\n\nRAW OUTPUT:')
344
345    def print_starting_text(self):
346        """Print starting text for running tests."""
347        print(au.colorize('\nRunning Tests...', constants.CYAN))
348
349    def set_current_summary(self, run_num):
350        """Set current test summary to ITER_SUMMARY."""
351        run_summary = []
352        for runner_name, groups in self.runners.items():
353            for group_name, stats in groups.items():
354                name = group_name if group_name else runner_name
355                summary = self.process_summary(name, stats)
356                run_summary.append(summary)
357        summary_list = ITER_SUMMARY.get(run_num, [])
358        # Not contain redundant item
359        if not set(run_summary).issubset(set(summary_list)):
360            summary_list.extend(run_summary)
361            ITER_SUMMARY[run_num] = summary_list
362
363    # pylint: disable=too-many-branches
364    def print_summary(self):
365        """Print summary of all test runs.
366
367        Returns:
368            0 if all tests pass, non-zero otherwise.
369
370        """
371        if self.collect_only:
372            return self.print_collect_tests()
373        tests_ret = ExitCode.SUCCESS
374        if not self.runners:
375            return tests_ret
376        if not self.device_count:
377            device_detail = ''
378        elif self.device_count == 1:
379            device_detail = '(Test executed with 1 device.)'
380        else:
381            device_detail = f'(Test executed with {self.device_count} devices.)'
382        print('\n{}'.format(au.colorize(f'Summary {device_detail}',
383                                        constants.CYAN)))
384        print(au.delimiter('-', 7))
385        iterations = len(ITER_SUMMARY)
386        for iter_num, summary_list in ITER_SUMMARY.items():
387            if iterations > 1:
388                print(au.colorize("ITERATION %s" % (int(iter_num) + 1),
389                                  constants.BLUE))
390            for summary in summary_list:
391                print(summary)
392        failed_sum = len(self.failed_tests)
393        for runner_name, groups in self.runners.items():
394            if groups == UNSUPPORTED_FLAG:
395                print(f'Pretty output does not support {runner_name}. '
396                      r'See raw output above.')
397                continue
398            if groups == FAILURE_FLAG:
399                tests_ret = ExitCode.TEST_FAILURE
400                print(runner_name, 'Crashed. No results to report.')
401                failed_sum += 1
402                continue
403            for group_name, stats in groups.items():
404                name = group_name if group_name else runner_name
405                summary = self.process_summary(name, stats)
406                if stats.failed > 0 or stats.run_errors:
407                    tests_ret = ExitCode.TEST_FAILURE
408                    if stats.run_errors:
409                        failed_sum += 1 if not stats.failed else 0
410                if not ITER_SUMMARY:
411                    print(summary)
412        self.run_stats.perf_info.print_perf_info()
413        print()
414        if not UNSUPPORTED_FLAG in self.runners.values():
415            if tests_ret == ExitCode.SUCCESS:
416                print(au.colorize('All tests passed!', constants.GREEN))
417            else:
418                message = '%d %s failed' % (failed_sum,
419                                            'tests' if failed_sum > 1 else 'test')
420                print(au.colorize(message, constants.RED))
421                print('-'*len(message))
422                self.print_failed_tests()
423        if self.log_path:
424            # Print aggregate result if any.
425            self._print_aggregate_test_metrics()
426            print('Test Logs have saved in %s' % self.log_path)
427        # TODO(b/174535786) Error handling while uploading test results has
428        # unexpected exceptions.
429        # TODO (b/174627499) Saving this information in atest history.
430        if self.test_result_link:
431            print('Test Result uploaded to %s'
432                  % au.colorize(self.test_result_link, constants.GREEN))
433        return tests_ret
434
435    def _print_aggregate_test_metrics(self):
436        """Print aggregate test metrics text content if metric files exist."""
437        metric_files = au.find_files(
438            self.log_path, file_name='*_aggregate_test_metrics_*.txt')
439
440        if metric_files:
441            print('\n{}'.format(au.colorize(
442                'Aggregate test metrics', constants.CYAN)))
443            print(au.delimiter('-', 7))
444            for metric_file in metric_files:
445                self._print_test_metric(metric_file)
446
447    def _print_test_metric(self, metric_file):
448        """Print the content of the input metric file."""
449        test_metrics_re = re.compile(
450            r'test_results.*\s(.*)_aggregate_test_metrics_.*\.txt')
451        if not os.path.isfile(metric_file):
452            return
453        matches = re.findall(test_metrics_re, metric_file)
454        test_name = matches[0] if matches else ''
455        if test_name:
456            print('{}:'.format(au.colorize(test_name, constants.CYAN)))
457            with open(metric_file, 'r') as f:
458                matched = False
459                filter_res = atest_configs.GLOBAL_ARGS.aggregate_metric_filter
460                logging.debug('Aggregate metric filters: %s', filter_res)
461                test_methods = []
462                # Collect all test methods
463                if filter_res:
464                    test_re = re.compile(r'\n\n(\S+)\n\n', re.MULTILINE)
465                    test_methods = re.findall(test_re, f.read())
466                    f.seek(0)
467                    # The first line of the file is also a test method but could
468                    # not parsed by test_re; add the first line manually.
469                    first_line = f.readline()
470                    test_methods.insert(0, str(first_line).strip())
471                    f.seek(0)
472                for line in f.readlines():
473                    stripped_line = str(line).strip()
474                    if filter_res:
475                        if stripped_line in test_methods:
476                            print()
477                            au.colorful_print(
478                                ' ' * 4 + stripped_line, constants.MAGENTA)
479                        for filter_re in filter_res:
480                            if re.match(re.compile(filter_re), line):
481                                matched = True
482                                print(' ' * 4 + stripped_line)
483                    else:
484                        matched = True
485                        print(' ' * 4 + stripped_line)
486                if not matched:
487                    au.colorful_print(
488                        '  Warning: Nothing returned by the pattern: {}'.format(
489                            filter_res), constants.RED)
490                print()
491
492    def print_collect_tests(self):
493        """Print summary of collect tests only.
494
495        Returns:
496            0 if all tests collection done.
497
498        """
499        tests_ret = ExitCode.SUCCESS
500        if not self.runners:
501            return tests_ret
502        print('\n{}'.format(au.colorize('Summary:' + constants.COLLECT_TESTS_ONLY,
503                                        constants.CYAN)))
504        print(au.delimiter('-', 26))
505        for runner_name, groups in self.runners.items():
506            for group_name, _ in groups.items():
507                name = group_name if group_name else runner_name
508                print(name)
509        print()
510        if self.log_path:
511            print('Test Logs have saved in %s' % self.log_path)
512        return ExitCode.SUCCESS
513
514    def print_failed_tests(self):
515        """Print the failed tests if existed."""
516        if self.failed_tests:
517            for test_name in self.failed_tests:
518                failed_details = test_name
519                if self.flakes_info:
520                    flakes_method = test_name.replace('#', '.')
521                    flakes_info = au.get_flakes(test_method=flakes_method)
522                    if (flakes_info and
523                            flakes_info.get(constants.FLAKE_PERCENT, None)):
524                        failed_details += (
525                            ': flakes percent: {}%, flakes postsubmit per week:'
526                            ' {}'.format(float(flakes_info.get(
527                                constants.FLAKE_PERCENT)),
528                                         flakes_info.get(
529                                             constants.FLAKE_POSTSUBMIT, '0')))
530                print(failed_details)
531
532    # pylint: disable=too-many-locals
533    def process_summary(self, name, stats):
534        """Process the summary line.
535
536        Strategy:
537            Error status happens ->
538                SomeTests: Passed: 2, Failed: 0 <red>(Completed With ERRORS)</red>
539                SomeTests: Passed: 2, <red>Failed</red>: 2 <red>(Completed With ERRORS)</red>
540            More than 1 test fails ->
541                SomeTests: Passed: 2, <red>Failed</red>: 5
542            No test fails ->
543                SomeTests: <green>Passed</green>: 2, Failed: 0
544
545        Args:
546            name: A string of test name.
547            stats: A RunStat instance for a test group.
548
549        Returns:
550            A summary of the test result.
551        """
552        passed_label = 'Passed'
553        failed_label = 'Failed'
554        flakes_label = ''
555        ignored_label = 'Ignored'
556        assumption_failed_label = 'Assumption Failed'
557        error_label = ''
558        host_log_content = ''
559        flakes_percent = ''
560        if stats.failed > 0:
561            failed_label = au.colorize(failed_label, constants.RED)
562            mod_list = name.split()
563            module = ''
564            if len(mod_list) > 1:
565                module = mod_list[1]
566            if module and self.flakes_info:
567                flakes_info = au.get_flakes(test_module=module)
568                if (flakes_info and
569                        flakes_info.get(constants.FLAKE_PERCENT, None)):
570                    flakes_label = au.colorize('Flakes Percent:',
571                                               constants.RED)
572                    flakes_percent = '{:.2f}%'.format(float(flakes_info.get(
573                        constants.FLAKE_PERCENT)))
574        if stats.run_errors:
575            error_label = au.colorize('(Completed With ERRORS)', constants.RED)
576            # Only extract host_log_content if test name is tradefed
577            # Import here to prevent circular-import error.
578            from atest.test_runners import atest_tf_test_runner
579            if name == atest_tf_test_runner.AtestTradefedTestRunner.NAME:
580                find_logs = au.find_files(self.log_path,
581                                          file_name=constants.TF_HOST_LOG)
582                if find_logs:
583                    host_log_content = au.colorize(
584                        '\n\nTradefederation host log:\n', constants.RED)
585                for tf_log in find_logs:
586                    if zipfile.is_zipfile(tf_log):
587                        host_log_content = (host_log_content +
588                                            au.extract_zip_text(tf_log))
589                    else:
590                        with open(tf_log, 'r') as f:
591                            for line in f.readlines():
592                                host_log_content = host_log_content + line
593
594            # Print the content for the standard error file for a single module.
595            if name and self.log_path and len(str(name).split()) > 1:
596                log_name = str(name).split()[1] + '-stderr_*.txt'
597                module_logs = au.find_files(self.log_path, file_name=log_name)
598                for log_file in module_logs:
599                    print(' ' * 2  + au.colorize(
600                        f'Logs in {os.path.basename(log_file)}:',
601                        constants.MAGENTA))
602                    with open(log_file, 'r') as f:
603                        for line in f.readlines():
604                            print(' ' * 2 + str(line), end='')
605        elif stats.failed == 0:
606            passed_label = au.colorize(passed_label, constants.GREEN)
607        summary = ('%s: %s: %s, %s: %s, %s: %s, %s: %s, %s %s %s %s'
608                   % (name,
609                      passed_label,
610                      stats.passed,
611                      failed_label,
612                      stats.failed,
613                      ignored_label,
614                      stats.ignored,
615                      assumption_failed_label,
616                      stats.assumption_failed,
617                      flakes_label,
618                      flakes_percent,
619                      error_label,
620                      host_log_content))
621        return summary
622
623    def _update_stats(self, test, group):
624        """Given the results of a single test, update test run stats.
625
626        Args:
627            test: a TestResult namedtuple.
628            group: a RunStat instance for a test group.
629        """
630        # TODO(109822985): Track group and run estimated totals for updating
631        # summary line
632        if test.status == test_runner_base.PASSED_STATUS:
633            self.run_stats.passed += 1
634            group.passed += 1
635        elif test.status == test_runner_base.IGNORED_STATUS:
636            self.run_stats.ignored += 1
637            group.ignored += 1
638        elif test.status == test_runner_base.ASSUMPTION_FAILED:
639            self.run_stats.assumption_failed += 1
640            group.assumption_failed += 1
641        elif test.status == test_runner_base.FAILED_STATUS:
642            self.run_stats.failed += 1
643            self.failed_tests.append(test.test_name)
644            group.failed += 1
645        elif test.status == test_runner_base.ERROR_STATUS:
646            self.run_stats.run_errors = True
647            group.run_errors = True
648        self.run_stats.perf_info.update_perf_info(test)
649
650    def _print_group_title(self, test):
651        """Print the title line for a test group.
652
653        Test Group/Runner Name
654        ----------------------
655
656        Args:
657            test: A TestResult namedtuple.
658        """
659        if self.silent:
660            return
661        title = test.group_name or test.runner_name
662        underline = '-' * (len(title))
663        print('\n%s\n%s' % (title, underline))
664
665    # pylint: disable=too-many-branches
666    def _print_result(self, test):
667        """Print the results of a single test.
668
669           Looks like:
670           fully.qualified.class#TestMethod: PASSED/FAILED
671
672        Args:
673            test: a TestResult namedtuple.
674        """
675        if self.silent:
676            return
677        if not self.pre_test or (test.test_run_name !=
678                                 self.pre_test.test_run_name):
679            print('%s (%s %s)' % (au.colorize(test.test_run_name,
680                                              constants.BLUE),
681                                  test.group_total,
682                                  'Test' if test.group_total == 1 else 'Tests'))
683        if test.status == test_runner_base.ERROR_STATUS:
684            print('RUNNER ERROR: %s\n' % test.details)
685            self.pre_test = test
686            return
687        if test.test_name:
688            color = ''
689            if test.status == test_runner_base.PASSED_STATUS:
690                # Example of output:
691                # [78/92] test_name: PASSED (92ms)
692                color = constants.GREEN
693            elif test.status in (test_runner_base.IGNORED_STATUS,
694                                 test_runner_base.ASSUMPTION_FAILED):
695                # Example: [33/92] test_name: IGNORED (12ms)
696                # Example: [33/92] test_name: ASSUMPTION_FAILED (12ms)
697                color = constants.MAGENTA
698            else:
699                # Example: [26/92] test_name: FAILED (32ms)
700                color = constants.RED
701            print('[{}/{}] {}'.format(test.test_count,
702                                      test.group_total,
703                                      test.test_name), end='')
704            if self.collect_only:
705                print()
706            else:
707                print(': {} {}'.format(au.colorize(test.status, color),
708                                       test.test_time))
709            if test.status == test_runner_base.PASSED_STATUS:
710                for key, data in sorted(test.additional_info.items()):
711                    if key not in BENCHMARK_EVENT_KEYS:
712                        print('\t%s: %s' % (au.colorize(key, constants.BLUE),
713                                            data))
714            if test.status == test_runner_base.FAILED_STATUS:
715                print(f'\nSTACKTRACE:\n{test.details}')
716        self.pre_test = test
717