1 /* 2 * Copyright (C) 2018 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 package com.android.tradefed.result; 17 18 import com.android.annotations.VisibleForTesting; 19 import com.android.tradefed.log.LogUtil.CLog; 20 import com.android.tradefed.metrics.proto.MetricMeasurement.Metric; 21 import com.android.tradefed.result.skipped.SkipReason; 22 import com.android.tradefed.retry.MergeStrategy; 23 import com.android.tradefed.util.MultiMap; 24 import com.android.tradefed.util.proto.TfMetricProtoUtil; 25 26 import java.util.ArrayList; 27 import java.util.Arrays; 28 import java.util.Collection; 29 import java.util.HashMap; 30 import java.util.LinkedHashMap; 31 import java.util.LinkedHashSet; 32 import java.util.List; 33 import java.util.Map; 34 import java.util.Set; 35 36 /** 37 * Holds results from a single test run. 38 * 39 * <p>Maintains an accurate count of tests, and tracks incomplete tests. 40 * 41 * <p>Not thread safe! The test* callbacks must be called in order 42 */ 43 public class TestRunResult { 44 45 public static final String ERROR_DIVIDER = "\n====Next Error====\n"; 46 private String mTestRunName; 47 // Uses a LinkedHashMap to have predictable iteration order 48 private Map<TestDescription, TestResult> mTestResults = 49 new LinkedHashMap<TestDescription, TestResult>(); 50 // Store the metrics for the run 51 private Map<String, String> mRunMetrics = new HashMap<>(); 52 private HashMap<String, Metric> mRunProtoMetrics = new HashMap<>(); 53 // Log files associated with the test run itself (testRunStart / testRunEnd). 54 private MultiMap<String, LogFile> mRunLoggedFiles; 55 private boolean mIsRunComplete = false; 56 private long mElapsedTime = 0L; 57 private long mStartTime = 0L; 58 59 private TestResult mCurrentTestResult; 60 61 /** represents sums of tests in each TestStatus state. Indexed by TestStatus.ordinal() */ 62 private int[] mStatusCounts = new int[com.android.tradefed.result.TestStatus.values().length]; 63 /** tracks if mStatusCounts is accurate, or if it needs to be recalculated */ 64 private boolean mIsCountDirty = true; 65 66 private FailureDescription mRunFailureError = null; 67 68 private boolean mAggregateMetrics = false; 69 70 private int mExpectedTestCount = 0; 71 72 /** Create an empty{@link TestRunResult}. */ TestRunResult()73 public TestRunResult() { 74 mTestRunName = "not started"; 75 mRunLoggedFiles = new MultiMap<String, LogFile>(); 76 } 77 setAggregateMetrics(boolean metricAggregation)78 public void setAggregateMetrics(boolean metricAggregation) { 79 mAggregateMetrics = metricAggregation; 80 } 81 82 /** @return the test run name */ getName()83 public String getName() { 84 return mTestRunName; 85 } 86 87 /** Returns a map of the test results. */ getTestResults()88 public Map<TestDescription, TestResult> getTestResults() { 89 return mTestResults; 90 } 91 92 /** @return a {@link Map} of the test run metrics. */ getRunMetrics()93 public Map<String, String> getRunMetrics() { 94 return mRunMetrics; 95 } 96 97 /** @return a {@link Map} of the test run metrics with the new proto format. */ getRunProtoMetrics()98 public HashMap<String, Metric> getRunProtoMetrics() { 99 return mRunProtoMetrics; 100 } 101 102 /** Gets the set of completed tests. */ getCompletedTests()103 public Set<TestDescription> getCompletedTests() { 104 List<TestStatus> completedStatuses = new ArrayList<>(); 105 for (TestStatus s : TestStatus.values()) { 106 if (!s.equals(TestStatus.INCOMPLETE)) { 107 completedStatuses.add(s); 108 } 109 } 110 return getTestsInState(completedStatuses); 111 } 112 113 /** Gets the set of failed tests. */ getFailedTests()114 public Set<TestDescription> getFailedTests() { 115 return getTestsInState(Arrays.asList(TestStatus.FAILURE)); 116 } 117 118 /** Gets the set of passed tests. */ getPassedTests()119 public Set<TestDescription> getPassedTests() { 120 return getTestsInState(Arrays.asList(TestStatus.PASSED)); 121 } 122 123 /** Gets the set of tests in given statuses. */ getTestsInState(List<TestStatus> statuses)124 public Set<TestDescription> getTestsInState(List<TestStatus> statuses) { 125 Set<TestDescription> tests = new LinkedHashSet<>(); 126 for (Map.Entry<TestDescription, TestResult> testEntry : getTestResults().entrySet()) { 127 TestStatus status = testEntry.getValue().getResultStatus(); 128 if (statuses.contains(status)) { 129 tests.add(testEntry.getKey()); 130 } 131 } 132 return tests; 133 } 134 135 /** Gets the set of tests in given statuses. */ getTestEntriesInState(Collection<TestStatus> statuses)136 public Map<TestDescription, TestResult> getTestEntriesInState(Collection<TestStatus> statuses) { 137 Map<TestDescription, TestResult> tests = new LinkedHashMap<>(); 138 for (Map.Entry<TestDescription, TestResult> testEntry : getTestResults().entrySet()) { 139 TestStatus status = testEntry.getValue().getResultStatus(); 140 if (statuses.contains(status)) { 141 tests.put(testEntry.getKey(), testEntry.getValue()); 142 } 143 } 144 return tests; 145 } 146 147 /** @return <code>true</code> if test run failed. */ isRunFailure()148 public boolean isRunFailure() { 149 return mRunFailureError != null; 150 } 151 152 /** @return <code>true</code> if test run finished. */ isRunComplete()153 public boolean isRunComplete() { 154 return mIsRunComplete; 155 } 156 157 /** Decides whether or not considering an aggregation of runs completed or not. */ isRunComplete( boolean isAtLeastOneCompleted, boolean areAllCompleted, MergeStrategy strategy)158 private static boolean isRunComplete( 159 boolean isAtLeastOneCompleted, boolean areAllCompleted, MergeStrategy strategy) { 160 switch (strategy) { 161 case ANY_PASS_IS_PASS: 162 case ONE_TESTRUN_PASS_IS_PASS: 163 return isAtLeastOneCompleted; 164 case ONE_TESTCASE_PASS_IS_PASS: 165 case ANY_FAIL_IS_FAIL: 166 default: 167 return areAllCompleted; 168 } 169 } 170 setRunComplete(boolean runComplete)171 public void setRunComplete(boolean runComplete) { 172 mIsRunComplete = runComplete; 173 } 174 175 /** 176 * Gets the number of test cases this TestRunResult expects to have. The actual number may be 177 * less than the expected number due to test crashes. Normally, such a mismatch indicates a test 178 * run failure. 179 */ getExpectedTestCount()180 public int getExpectedTestCount() { 181 return mExpectedTestCount; 182 } 183 184 /** FOR COMPATIBILITY with older status. Use {@link #getNumTestsInState(TestStatus)} instead. */ getNumTestsInState( com.android.ddmlib.testrunner.TestResult.TestStatus ddmlibStatus)185 public int getNumTestsInState( 186 com.android.ddmlib.testrunner.TestResult.TestStatus ddmlibStatus) { 187 return getNumTestsInState(TestStatus.convertFromDdmlibType(ddmlibStatus)); 188 } 189 190 /** Gets the number of tests in given state for this run. */ getNumTestsInState(TestStatus status)191 public int getNumTestsInState(TestStatus status) { 192 if (mIsCountDirty) { 193 // clear counts 194 for (int i = 0; i < mStatusCounts.length; i++) { 195 mStatusCounts[i] = 0; 196 } 197 // now recalculate 198 for (TestResult r : mTestResults.values()) { 199 mStatusCounts[r.getResultStatus().ordinal()]++; 200 } 201 mIsCountDirty = false; 202 } 203 return mStatusCounts[status.ordinal()]; 204 } 205 206 /** Returns all the {@link TestResult} in a particular state. */ getTestsResultsInState(TestStatus status)207 public List<TestResult> getTestsResultsInState(TestStatus status) { 208 List<TestResult> results = new ArrayList<>(); 209 for (TestResult r : mTestResults.values()) { 210 if (r.getResultStatus().equals(status)) { 211 results.add(r); 212 } 213 } 214 return results; 215 } 216 217 /** Gets the number of tests in this run. */ getNumTests()218 public int getNumTests() { 219 return mTestResults.size(); 220 } 221 222 /** Gets the number of complete tests in this run ie with status != incomplete. */ getNumCompleteTests()223 public int getNumCompleteTests() { 224 return getNumTests() - getNumTestsInState(TestStatus.INCOMPLETE); 225 } 226 227 /** @return <code>true</code> if test run had any failed or error tests. */ hasFailedTests()228 public boolean hasFailedTests() { 229 return getNumAllFailedTests() > 0; 230 } 231 232 /** Return total number of tests in a failure state (failed, assumption failure) */ getNumAllFailedTests()233 public int getNumAllFailedTests() { 234 return getNumTestsInState(TestStatus.FAILURE); 235 } 236 237 /** Returns the current run elapsed time. */ getElapsedTime()238 public long getElapsedTime() { 239 return mElapsedTime; 240 } 241 242 /** Returns the start time of the first testRunStart call. */ getStartTime()243 public long getStartTime() { 244 return mStartTime; 245 } 246 247 /** Return the run failure error message, <code>null</code> if run did not fail. */ getRunFailureMessage()248 public String getRunFailureMessage() { 249 if (mRunFailureError == null) { 250 return null; 251 } 252 return mRunFailureError.getErrorMessage(); 253 } 254 255 /** Returns the run failure descriptor, <code>null</code> if run did not fail. */ getRunFailureDescription()256 public FailureDescription getRunFailureDescription() { 257 return mRunFailureError; 258 } 259 260 /** 261 * Reset the run failure status. 262 * 263 * <p>Resetting the run failure status is sometimes required when retrying. This should be done 264 * with care to avoid clearing a real failure. 265 */ resetRunFailure()266 public void resetRunFailure() { 267 mRunFailureError = null; 268 } 269 270 /** 271 * Notify that a test run started. 272 * 273 * @param runName the name associated to the test run for tracking purpose. 274 * @param testCount the number of expected test cases associated with the test run. 275 */ testRunStarted(String runName, int testCount)276 public void testRunStarted(String runName, int testCount) { 277 testRunStarted(runName, testCount, System.currentTimeMillis()); 278 } 279 280 /** 281 * Notify that a test run started. 282 * 283 * @param runName the name associated to the test run for tracking purpose. 284 * @param testCount the number of expected test cases associated with the test run. 285 */ testRunStarted(String runName, int testCount, long startTime)286 public void testRunStarted(String runName, int testCount, long startTime) { 287 // A run may be started multiple times due to crashes or other reasons. Normally the first 288 // run reflect the expected number of test "testCount". To avoid latter TestRunStarted 289 // overrides the expected count, only the first testCount will be recorded. 290 // mExpectedTestCount is initialized as 0. 291 if (mExpectedTestCount == 0) { 292 mExpectedTestCount = testCount; 293 } else { 294 CLog.w( 295 "%s calls testRunStarted more than once. Previous expected count: %s. " 296 + "New Expected count: %s", 297 runName, mExpectedTestCount, mExpectedTestCount + testCount); 298 mExpectedTestCount += testCount; 299 } 300 mTestRunName = runName; 301 mIsRunComplete = false; 302 if (mStartTime == 0L) { 303 mStartTime = startTime; 304 } 305 // Do not reset mRunFailureError since for re-run we want to preserve previous failures. 306 } 307 testStarted(TestDescription test)308 public void testStarted(TestDescription test) { 309 testStarted(test, System.currentTimeMillis()); 310 } 311 testStarted(TestDescription test, long startTime)312 public void testStarted(TestDescription test, long startTime) { 313 mCurrentTestResult = new TestResult(); 314 mCurrentTestResult.setStartTime(startTime); 315 addTestResult(test, mCurrentTestResult); 316 } 317 318 @VisibleForTesting addTestResult(TestDescription test, TestResult testResult)319 public void addTestResult(TestDescription test, TestResult testResult) { 320 mIsCountDirty = true; 321 mTestResults.put(test, testResult); 322 } 323 updateTestResult( TestDescription test, com.android.tradefed.result.TestStatus status, FailureDescription failure)324 private void updateTestResult( 325 TestDescription test, 326 com.android.tradefed.result.TestStatus status, 327 FailureDescription failure) { 328 updateTestResult(test, status, failure, null); 329 } 330 updateTestResult( TestDescription test, com.android.tradefed.result.TestStatus status, FailureDescription failure, SkipReason reason)331 private void updateTestResult( 332 TestDescription test, 333 com.android.tradefed.result.TestStatus status, 334 FailureDescription failure, 335 SkipReason reason) { 336 TestResult r = mTestResults.get(test); 337 if (r == null) { 338 CLog.d("received test event without test start for %s", test); 339 r = new TestResult(); 340 } 341 r.setStatus(status); 342 if (failure != null) { 343 r.setFailure(failure); 344 } 345 if (reason != null) { 346 r.setSkipReason(reason); 347 } 348 addTestResult(test, r); 349 } 350 testFailed(TestDescription test, String trace)351 public void testFailed(TestDescription test, String trace) { 352 updateTestResult( 353 test, 354 com.android.tradefed.result.TestStatus.FAILURE, 355 FailureDescription.create(trace)); 356 } 357 testFailed(TestDescription test, FailureDescription failure)358 public void testFailed(TestDescription test, FailureDescription failure) { 359 updateTestResult(test, com.android.tradefed.result.TestStatus.FAILURE, failure); 360 } 361 testAssumptionFailure(TestDescription test, String trace)362 public void testAssumptionFailure(TestDescription test, String trace) { 363 updateTestResult( 364 test, 365 com.android.tradefed.result.TestStatus.ASSUMPTION_FAILURE, 366 FailureDescription.create(trace)); 367 } 368 testAssumptionFailure(TestDescription test, FailureDescription failure)369 public void testAssumptionFailure(TestDescription test, FailureDescription failure) { 370 updateTestResult(test, com.android.tradefed.result.TestStatus.ASSUMPTION_FAILURE, failure); 371 } 372 testIgnored(TestDescription test)373 public void testIgnored(TestDescription test) { 374 updateTestResult(test, com.android.tradefed.result.TestStatus.IGNORED, null); 375 } 376 testSkipped(TestDescription test, SkipReason reason)377 public void testSkipped(TestDescription test, SkipReason reason) { 378 updateTestResult(test, com.android.tradefed.result.TestStatus.SKIPPED, null, reason); 379 } 380 testEnded(TestDescription test, HashMap<String, Metric> testMetrics)381 public void testEnded(TestDescription test, HashMap<String, Metric> testMetrics) { 382 testEnded(test, System.currentTimeMillis(), testMetrics); 383 } 384 testEnded(TestDescription test, long endTime, HashMap<String, Metric> testMetrics)385 public void testEnded(TestDescription test, long endTime, HashMap<String, Metric> testMetrics) { 386 TestResult result = mTestResults.get(test); 387 if (result == null) { 388 result = new TestResult(); 389 } 390 if (result.getResultStatus().equals(TestStatus.INCOMPLETE)) { 391 result.setStatus(com.android.tradefed.result.TestStatus.PASSED); 392 } 393 result.setEndTime(endTime); 394 result.setMetrics(TfMetricProtoUtil.compatibleConvert(testMetrics)); 395 result.setProtoMetrics(testMetrics); 396 addTestResult(test, result); 397 mCurrentTestResult = null; 398 } 399 400 // TODO: Remove when done updating testRunFailed(String errorMessage)401 public void testRunFailed(String errorMessage) { 402 if (errorMessage == null) { 403 testRunFailed((FailureDescription) null); 404 } else { 405 testRunFailed(FailureDescription.create(errorMessage)); 406 } 407 } 408 testRunFailed(FailureDescription failureDescription)409 public void testRunFailed(FailureDescription failureDescription) { 410 if (failureDescription == null) { 411 failureDescription = FailureDescription.create("testRunFailed(null) was called."); 412 } 413 414 if (mRunFailureError != null) { 415 if (mRunFailureError instanceof MultiFailureDescription) { 416 ((MultiFailureDescription) mRunFailureError).addFailure(failureDescription); 417 } else { 418 MultiFailureDescription aggregatedFailure = 419 new MultiFailureDescription(mRunFailureError, failureDescription); 420 mRunFailureError = aggregatedFailure; 421 } 422 } else { 423 mRunFailureError = failureDescription; 424 } 425 } 426 testRunStopped(long elapsedTime)427 public void testRunStopped(long elapsedTime) { 428 mElapsedTime += elapsedTime; 429 mIsRunComplete = true; 430 } 431 testRunEnded(long elapsedTime, Map<String, String> runMetrics)432 public void testRunEnded(long elapsedTime, Map<String, String> runMetrics) { 433 if (mAggregateMetrics) { 434 for (Map.Entry<String, String> entry : runMetrics.entrySet()) { 435 String existingValue = mRunMetrics.get(entry.getKey()); 436 String combinedValue = combineValues(existingValue, entry.getValue()); 437 mRunMetrics.put(entry.getKey(), combinedValue); 438 } 439 } else { 440 mRunMetrics.putAll(runMetrics); 441 } 442 // Also add to the new interface: 443 mRunProtoMetrics.putAll(TfMetricProtoUtil.upgradeConvert(runMetrics)); 444 445 mElapsedTime += elapsedTime; 446 mIsRunComplete = true; 447 } 448 449 /** New interface using the new proto metrics. */ testRunEnded(long elapsedTime, HashMap<String, Metric> runMetrics)450 public void testRunEnded(long elapsedTime, HashMap<String, Metric> runMetrics) { 451 // Internally store the information as backward compatible format 452 testRunEnded(elapsedTime, TfMetricProtoUtil.compatibleConvert(runMetrics)); 453 // Store the new format directly too. 454 // TODO: See if aggregation should/can be done with the new format. 455 mRunProtoMetrics.putAll(runMetrics); 456 457 // TODO: when old format is deprecated, do not forget to uncomment the next two lines 458 // mElapsedTime += elapsedTime; 459 // mIsRunComplete = true; 460 } 461 462 /** 463 * Combine old and new metrics value 464 * 465 * @param existingValue 466 * @param newValue 467 * @return the combination of the two string as Long or Double value. 468 */ combineValues(String existingValue, String newValue)469 private String combineValues(String existingValue, String newValue) { 470 if (existingValue != null) { 471 try { 472 long existingLong = Long.parseLong(existingValue); 473 long newLong = Long.parseLong(newValue); 474 return Long.toString(existingLong + newLong); 475 } catch (NumberFormatException e) { 476 // not a long, skip to next 477 } 478 try { 479 double existingDouble = Double.parseDouble(existingValue); 480 double newDouble = Double.parseDouble(newValue); 481 return Double.toString(existingDouble + newDouble); 482 } catch (NumberFormatException e) { 483 // not a double either, fall through 484 } 485 } 486 // default to overriding existingValue 487 return newValue; 488 } 489 490 /** Returns a user friendly string describing results. */ getTextSummary()491 public String getTextSummary() { 492 StringBuilder builder = new StringBuilder(); 493 builder.append(String.format("Total tests %d, ", getNumTests())); 494 for (TestStatus status : TestStatus.values()) { 495 int count = getNumTestsInState(status); 496 // only add descriptive state for states that have non zero values, to avoid cluttering 497 // the response 498 if (count > 0) { 499 builder.append(String.format("%s %d, ", status.toString().toLowerCase(), count)); 500 } 501 } 502 return builder.toString(); 503 } 504 505 /** 506 * Information about a file being logged are stored and associated to the test case or test run 507 * in progress. 508 * 509 * @param dataName the name referencing the data. 510 * @param logFile The {@link LogFile} object representing where the object was saved and and 511 * information about it. 512 */ testLogSaved(String dataName, LogFile logFile)513 public void testLogSaved(String dataName, LogFile logFile) { 514 if (mCurrentTestResult != null) { 515 // We have a test case in progress, we can associate the log to it. 516 mCurrentTestResult.addLoggedFile(dataName, logFile); 517 } else { 518 mRunLoggedFiles.put(dataName, logFile); 519 } 520 } 521 522 /** Returns a copy of the map containing all the logged file associated with that test case. */ getRunLoggedFiles()523 public MultiMap<String, LogFile> getRunLoggedFiles() { 524 return new MultiMap<>(mRunLoggedFiles); 525 } 526 527 /** @see #merge(List, MergeStrategy) */ merge(List<TestRunResult> testRunResults)528 public static TestRunResult merge(List<TestRunResult> testRunResults) { 529 return merge(testRunResults, MergeStrategy.ONE_TESTCASE_PASS_IS_PASS); 530 } 531 532 /** 533 * Merge multiple TestRunResults of the same testRunName. If a testcase shows up in multiple 534 * TestRunResults but has different results (e.g. "boottest-device" runs three times with result 535 * FAIL-FAIL-PASS), we concatenate all the stack traces from the FAILED runs and trust the final 536 * run result for status, metrics, log files, start/end time. 537 * 538 * @param testRunResults A list of TestRunResult to merge. 539 * @param strategy the merging strategy adopted for merging results. 540 * @return the final TestRunResult containing the merged data from the testRunResults. 541 */ merge(List<TestRunResult> testRunResults, MergeStrategy strategy)542 public static TestRunResult merge(List<TestRunResult> testRunResults, MergeStrategy strategy) { 543 if (testRunResults.isEmpty()) { 544 return null; 545 } 546 if (MergeStrategy.NO_MERGE.equals(strategy)) { 547 throw new IllegalArgumentException( 548 "TestRunResult#merge cannot be called with NO_MERGE strategy."); 549 } 550 if (testRunResults.size() == 1) { 551 // No merging is needed in case of a single test run result. 552 return testRunResults.get(0); 553 } 554 TestRunResult finalRunResult = new TestRunResult(); 555 556 String testRunName = testRunResults.get(0).getName(); 557 Map<String, String> finalRunMetrics = new HashMap<>(); 558 HashMap<String, Metric> finalRunProtoMetrics = new HashMap<>(); 559 MultiMap<String, LogFile> finalRunLoggedFiles = new MultiMap<>(); 560 Map<TestDescription, List<TestResult>> testResultsAttempts = new LinkedHashMap<>(); 561 562 // Keep track of if one of the run is not complete 563 boolean isAtLeastOneCompleted = false; 564 boolean areAllCompleted = true; 565 // Keep track of whether we have run failure or not 566 List<FailureDescription> runErrors = new ArrayList<>(); 567 boolean atLeastOneFailure = false; 568 boolean allFailure = true; 569 // Keep track of elapsed time 570 long elapsedTime = 0L; 571 int maxExpectedTestCount = 0; 572 573 for (TestRunResult eachRunResult : testRunResults) { 574 // Check all mTestRunNames are the same. 575 if (!testRunName.equals(eachRunResult.getName())) { 576 throw new IllegalArgumentException( 577 String.format( 578 "Unabled to merge TestRunResults: The run results names are " 579 + "different (%s, %s)", 580 testRunName, eachRunResult.getName())); 581 } 582 elapsedTime += eachRunResult.getElapsedTime(); 583 // Evaluate the run failures 584 if (eachRunResult.isRunFailure()) { 585 atLeastOneFailure = true; 586 FailureDescription currentFailure = eachRunResult.getRunFailureDescription(); 587 if (currentFailure instanceof MultiFailureDescription) { 588 runErrors.addAll(((MultiFailureDescription) currentFailure).getFailures()); 589 } else { 590 runErrors.add(currentFailure); 591 } 592 } else { 593 allFailure = false; 594 } 595 // Evaluate the run completion 596 if (eachRunResult.isRunComplete()) { 597 isAtLeastOneCompleted = true; 598 } else { 599 areAllCompleted = false; 600 } 601 602 // A run may start multiple times. Normally the first run shows the expected count 603 // (max value). 604 maxExpectedTestCount = 605 Math.max(maxExpectedTestCount, eachRunResult.getExpectedTestCount()); 606 607 // Keep the last TestRunResult's RunMetrics, ProtoMetrics 608 finalRunMetrics.putAll(eachRunResult.getRunMetrics()); 609 finalRunProtoMetrics.putAll(eachRunResult.getRunProtoMetrics()); 610 finalRunLoggedFiles.putAll(eachRunResult.getRunLoggedFiles()); 611 // TODO: We are not handling the TestResult log files in the merging logic (different 612 // from the TestRunResult log files). Need to improve in the future. 613 for (Map.Entry<TestDescription, TestResult> testResultEntry : 614 eachRunResult.getTestResults().entrySet()) { 615 if (!testResultsAttempts.containsKey(testResultEntry.getKey())) { 616 testResultsAttempts.put(testResultEntry.getKey(), new ArrayList<>()); 617 } 618 List<TestResult> results = testResultsAttempts.get(testResultEntry.getKey()); 619 results.add(testResultEntry.getValue()); 620 } 621 } 622 623 // Evaluate test cases based on strategy 624 finalRunResult.mTestResults = evaluateTestCases(testResultsAttempts, strategy); 625 // Evaluate the run error status based on strategy 626 boolean isRunFailure = isRunFailed(atLeastOneFailure, allFailure, strategy); 627 if (isRunFailure) { 628 if (runErrors.size() == 1) { 629 finalRunResult.mRunFailureError = runErrors.get(0); 630 } else { 631 finalRunResult.mRunFailureError = new MultiFailureDescription(runErrors); 632 } 633 } 634 // Evaluate run completion from all the attempts based on strategy 635 finalRunResult.mIsRunComplete = 636 isRunComplete(isAtLeastOneCompleted, areAllCompleted, strategy); 637 638 finalRunResult.mTestRunName = testRunName; 639 finalRunResult.mRunMetrics = finalRunMetrics; 640 finalRunResult.mRunProtoMetrics = finalRunProtoMetrics; 641 finalRunResult.mRunLoggedFiles = finalRunLoggedFiles; 642 643 finalRunResult.mExpectedTestCount = maxExpectedTestCount; 644 // Report total elapsed times 645 finalRunResult.mElapsedTime = elapsedTime; 646 return finalRunResult; 647 } 648 649 /** Merge the different test cases attempts based on the strategy. */ evaluateTestCases( Map<TestDescription, List<TestResult>> results, MergeStrategy strategy)650 private static Map<TestDescription, TestResult> evaluateTestCases( 651 Map<TestDescription, List<TestResult>> results, MergeStrategy strategy) { 652 Map<TestDescription, TestResult> finalTestResults = new LinkedHashMap<>(); 653 for (TestDescription description : results.keySet()) { 654 List<TestResult> attemptRes = results.get(description); 655 TestResult aggResult = TestResult.merge(attemptRes, strategy); 656 finalTestResults.put(description, aggResult); 657 } 658 return finalTestResults; 659 } 660 661 /** Decides whether or not considering an aggregation of runs a pass or fail. */ isRunFailed( boolean atLeastOneFailure, boolean allFailures, MergeStrategy strategy)662 private static boolean isRunFailed( 663 boolean atLeastOneFailure, boolean allFailures, MergeStrategy strategy) { 664 switch (strategy) { 665 case ANY_PASS_IS_PASS: 666 case ONE_TESTRUN_PASS_IS_PASS: 667 return allFailures; 668 case ONE_TESTCASE_PASS_IS_PASS: 669 case ANY_FAIL_IS_FAIL: 670 default: 671 return atLeastOneFailure; 672 } 673 } 674 } 675