1# Copyright 2018, The Android Open Source Project 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14""" 15Result Reporter 16 17The result reporter formats and prints test results. 18 19---- 20Example Output for command to run following tests: 21CtsAnimationTestCases:EvaluatorTest, HelloWorldTests, and WmTests 22 23Running Tests ... 24 25CtsAnimationTestCases (7 Tests) 26------------------------------ 27[1/7] android.animation.cts.EvaluatorTest#testRectEvaluator: PASSED (153ms) 28[2/7] android.animation.cts.EvaluatorTest#testIntArrayEvaluator: PASSED (0ms) 29[3/7] android.animation.cts.EvaluatorTest#testIntEvaluator: PASSED (0ms) 30[4/7] android.animation.cts.EvaluatorTest#testFloatArrayEvaluator: PASSED (1ms) 31[5/7] android.animation.cts.EvaluatorTest#testPointFEvaluator: PASSED (1ms) 32[6/7] android.animation.cts.EvaluatorTest#testArgbEvaluator: PASSED (0ms) 33[7/7] android.animation.cts.EvaluatorTest#testFloatEvaluator: PASSED (1ms) 34 35HelloWorldTests (2 Tests) 36------------------------ 37[1/2] android.test.example.helloworld.HelloWorldTest#testHalloWelt: PASSED (0ms) 38[2/2] android.test.example.helloworld.HelloWorldTest#testHelloWorld: PASSED (1ms) 39 40WmTests (1 Test) 41--------------- 42RUNNER ERROR: com.android.tradefed.targetprep.TargetSetupError: 43Failed to install WmTests.apk on 127.0.0.1:54373. Reason: 44 error message ... 45 46 47Summary 48------- 49CtsAnimationTestCases: Passed: 7, Failed: 0 50HelloWorldTests: Passed: 2, Failed: 0 51WmTests: Passed: 0, Failed: 0 (Completed With ERRORS) 52 531 test failed 54""" 55 56from __future__ import print_function 57from collections import OrderedDict 58 59import constants 60import atest_utils as au 61 62from test_runners import test_runner_base 63 64UNSUPPORTED_FLAG = 'UNSUPPORTED_RUNNER' 65FAILURE_FLAG = 'RUNNER_FAILURE' 66 67 68class RunStat(object): 69 """Class for storing stats of a test run.""" 70 71 def __init__(self, passed=0, failed=0, ignored=0, run_errors=False, 72 assumption_failed=0): 73 """Initialize a new instance of RunStat class. 74 75 Args: 76 passed: Count of passing tests. 77 failed: Count of failed tests. 78 ignored: Count of ignored tests. 79 assumption_failed: Count of assumption failure tests. 80 run_errors: A boolean if there were run errors 81 """ 82 # TODO(b/109822985): Track group and run estimated totals for updating 83 # summary line 84 self.passed = passed 85 self.failed = failed 86 self.ignored = ignored 87 self.assumption_failed = assumption_failed 88 # Run errors are not for particular tests, they are runner errors. 89 self.run_errors = run_errors 90 91 @property 92 def total(self): 93 """Getter for total tests actually ran. Accessed via self.total""" 94 return self.passed + self.failed 95 96 97class ResultReporter(object): 98 """Result Reporter class. 99 100 As each test is run, the test runner will call self.process_test_result() 101 with a TestResult namedtuple that contains the following information: 102 - runner_name: Name of the test runner 103 - group_name: Name of the test group if any. 104 In Tradefed that's the Module name. 105 - test_name: Name of the test. 106 In Tradefed that's qualified.class#Method 107 - status: The strings FAILED or PASSED. 108 - stacktrace: The stacktrace if the test failed. 109 - group_total: The total tests scheduled to be run for a group. 110 In Tradefed this is provided when the Module starts. 111 - runner_total: The total tests scheduled to be run for the runner. 112 In Tradefed this is not available so is None. 113 114 The Result Reporter will print the results of this test and then update 115 its stats state. 116 117 Test stats are stored in the following structure: 118 - self.run_stats: Is RunStat instance containing stats for the overall run. 119 This include pass/fail counts across ALL test runners. 120 121 - self.runners: Is of the form: {RunnerName: {GroupName: RunStat Instance}} 122 Where {} is an ordered dict. 123 124 The stats instance contains stats for each test group. 125 If the runner doesn't support groups, then the group 126 name will be None. 127 128 For example this could be a state of ResultReporter: 129 130 run_stats: RunStat(passed:10, failed:5) 131 runners: {'AtestTradefedTestRunner': 132 {'Module1': RunStat(passed:1, failed:1), 133 'Module2': RunStat(passed:0, failed:4)}, 134 'RobolectricTestRunner': {None: RunStat(passed:5, failed:0)}, 135 'VtsTradefedTestRunner': {'Module1': RunStat(passed:4, failed:0)}} 136 """ 137 138 def __init__(self): 139 self.run_stats = RunStat() 140 self.runners = OrderedDict() 141 self.failed_tests = [] 142 self.all_test_results = [] 143 144 def process_test_result(self, test): 145 """Given the results of a single test, update stats and print results. 146 147 Args: 148 test: A TestResult namedtuple. 149 """ 150 if test.runner_name not in self.runners: 151 self.runners[test.runner_name] = OrderedDict() 152 assert self.runners[test.runner_name] != FAILURE_FLAG 153 self.all_test_results.append(test) 154 if test.group_name not in self.runners[test.runner_name]: 155 self.runners[test.runner_name][test.group_name] = RunStat() 156 self._print_group_title(test) 157 self._update_stats(test, 158 self.runners[test.runner_name][test.group_name]) 159 self._print_result(test) 160 161 def runner_failure(self, runner_name, failure_msg): 162 """Report a runner failure. 163 164 Use instead of process_test_result() when runner fails separate from 165 any particular test, e.g. during setup of runner. 166 167 Args: 168 runner_name: A string of the name of the runner. 169 failure_msg: A string of the failure message to pass to user. 170 """ 171 self.runners[runner_name] = FAILURE_FLAG 172 print('\n', runner_name, '\n', '-' * len(runner_name), sep='') 173 print('Runner encountered a critical failure. Skipping.\n' 174 'FAILURE: %s' % failure_msg) 175 176 def register_unsupported_runner(self, runner_name): 177 """Register an unsupported runner. 178 179 Prints the following to the screen: 180 181 RunnerName 182 ---------- 183 This runner does not support normal results formatting. 184 Below is the raw output of the test runner. 185 186 RAW OUTPUT: 187 <Raw Runner Output> 188 189 Args: 190 runner_name: A String of the test runner's name. 191 """ 192 assert runner_name not in self.runners 193 self.runners[runner_name] = UNSUPPORTED_FLAG 194 print('\n', runner_name, '\n', '-' * len(runner_name), sep='') 195 print('This runner does not support normal results formatting. Below ' 196 'is the raw output of the test runner.\n\nRAW OUTPUT:') 197 198 def print_starting_text(self): 199 """Print starting text for running tests.""" 200 print(au.colorize('\nRunning Tests...', constants.CYAN)) 201 202 def print_summary(self): 203 """Print summary of all test runs. 204 205 Returns: 206 0 if all tests pass, non-zero otherwise. 207 208 """ 209 tests_ret = constants.EXIT_CODE_SUCCESS 210 if not self.runners: 211 return tests_ret 212 print('\n%s' % au.colorize('Summary', constants.CYAN)) 213 print('-------') 214 failed_sum = len(self.failed_tests) 215 for runner_name, groups in self.runners.items(): 216 if groups == UNSUPPORTED_FLAG: 217 print(runner_name, 'Unsupported. See raw output above.') 218 continue 219 if groups == FAILURE_FLAG: 220 tests_ret = constants.EXIT_CODE_TEST_FAILURE 221 print(runner_name, 'Crashed. No results to report.') 222 failed_sum += 1 223 continue 224 for group_name, stats in groups.items(): 225 name = group_name if group_name else runner_name 226 summary = self.process_summary(name, stats) 227 if stats.failed > 0: 228 tests_ret = constants.EXIT_CODE_TEST_FAILURE 229 if stats.run_errors: 230 tests_ret = constants.EXIT_CODE_TEST_FAILURE 231 failed_sum += 1 if not stats.failed else 0 232 print(summary) 233 print() 234 if tests_ret == constants.EXIT_CODE_SUCCESS: 235 print(au.colorize('All tests passed!', constants.GREEN)) 236 else: 237 message = '%d %s failed' % (failed_sum, 238 'tests' if failed_sum > 1 else 'test') 239 print(au.colorize(message, constants.RED)) 240 print('-'*len(message)) 241 self.print_failed_tests() 242 return tests_ret 243 244 def print_failed_tests(self): 245 """Print the failed tests if existed.""" 246 if self.failed_tests: 247 for test_name in self.failed_tests: 248 print('%s' % test_name) 249 250 def process_summary(self, name, stats): 251 """Process the summary line. 252 253 Strategy: 254 Error status happens -> 255 SomeTests: Passed: 2, Failed: 0 <red>(Completed With ERRORS)</red> 256 SomeTests: Passed: 2, <red>Failed</red>: 2 <red>(Completed With ERRORS)</red> 257 More than 1 test fails -> 258 SomeTests: Passed: 2, <red>Failed</red>: 5 259 No test fails -> 260 SomeTests: <green>Passed</green>: 2, Failed: 0 261 262 Args: 263 name: A string of test name. 264 stats: A RunStat instance for a test group. 265 266 Returns: 267 A summary of the test result. 268 """ 269 passed_label = 'Passed' 270 failed_label = 'Failed' 271 ignored_label = 'Ignored' 272 assumption_failed_label = 'Assumption Failed' 273 error_label = '' 274 if stats.failed > 0: 275 failed_label = au.colorize(failed_label, constants.RED) 276 if stats.run_errors: 277 error_label = au.colorize('(Completed With ERRORS)', constants.RED) 278 elif stats.failed == 0: 279 passed_label = au.colorize(passed_label, constants.GREEN) 280 summary = '%s: %s: %s, %s: %s, %s: %s, %s: %s %s' % (name, 281 passed_label, 282 stats.passed, 283 failed_label, 284 stats.failed, 285 ignored_label, 286 stats.ignored, 287 assumption_failed_label, 288 stats.assumption_failed, 289 error_label) 290 return summary 291 292 def _update_stats(self, test, group): 293 """Given the results of a single test, update test run stats. 294 295 Args: 296 test: a TestResult namedtuple. 297 group: a RunStat instance for a test group. 298 """ 299 # TODO(109822985): Track group and run estimated totals for updating 300 # summary line 301 if test.status == test_runner_base.PASSED_STATUS: 302 self.run_stats.passed += 1 303 group.passed += 1 304 elif test.status == test_runner_base.IGNORED_STATUS: 305 self.run_stats.ignored += 1 306 group.ignored += 1 307 elif test.status == test_runner_base.ASSUMPTION_FAILED: 308 self.run_stats.assumption_failed += 1 309 group.assumption_failed += 1 310 elif test.status == test_runner_base.FAILED_STATUS: 311 self.run_stats.failed += 1 312 self.failed_tests.append(test.test_name) 313 group.failed += 1 314 elif test.status == test_runner_base.ERROR_STATUS: 315 self.run_stats.run_errors = True 316 group.run_errors = True 317 318 def _print_group_title(self, test): 319 """Print the title line for a test group. 320 321 Test Group/Runner Name (## Total) 322 --------------------------------- 323 324 Args: 325 test: A TestResult namedtuple. 326 """ 327 title = test.group_name or test.runner_name 328 total = '' 329 if test.group_total: 330 if test.group_total > 1: 331 total = '(%s Tests)' % test.group_total 332 else: 333 total = '(%s Test)' % test.group_total 334 underline = '-' * (len(title) + len(total)) 335 print('\n%s %s\n%s' % (title, total, underline)) 336 337 def _print_result(self, test): 338 """Print the results of a single test. 339 340 Looks like: 341 fully.qualified.class#TestMethod: PASSED/FAILED 342 343 Args: 344 test: a TestResult namedtuple. 345 """ 346 if test.status == test_runner_base.ERROR_STATUS: 347 print('RUNNER ERROR: %s\n' % test.details) 348 return 349 if test.test_name: 350 if test.status == test_runner_base.PASSED_STATUS: 351 # Example of output: 352 # [78/92] test_name: PASSED (92ms) 353 print('[%s/%s] %s: %s %s' % (test.test_count, 354 test.group_total, 355 test.test_name, 356 au.colorize( 357 test.status, 358 constants.GREEN), 359 test.test_time)) 360 if test.perf_info.keys(): 361 print('\t%s: %s(ns) %s: %s(ns) %s: %s' 362 %(au.colorize('cpu_time', constants.BLUE), 363 test.perf_info['cpu_time'], 364 au.colorize('real_time', constants.BLUE), 365 test.perf_info['real_time'], 366 au.colorize('iterations', constants.BLUE), 367 test.perf_info['iterations'])) 368 elif test.status == test_runner_base.IGNORED_STATUS: 369 # Example: [33/92] test_name: IGNORED (12ms) 370 print('[%s/%s] %s: %s %s' % (test.test_count, test.group_total, 371 test.test_name, au.colorize( 372 test.status, constants.MAGENTA), 373 test.test_time)) 374 elif test.status == test_runner_base.ASSUMPTION_FAILED: 375 # Example: [33/92] test_name: ASSUMPTION_FAILED (12ms) 376 print('[%s/%s] %s: %s %s' % (test.test_count, test.group_total, 377 test.test_name, au.colorize( 378 test.status, constants.MAGENTA), 379 test.test_time)) 380 else: 381 # Example: [26/92] test_name: FAILED (32ms) 382 print('[%s/%s] %s: %s %s' % (test.test_count, test.group_total, 383 test.test_name, au.colorize( 384 test.status, constants.RED), 385 test.test_time)) 386 if test.status == test_runner_base.FAILED_STATUS: 387 print('\nSTACKTRACE:\n%s' % test.details) 388