• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 package com.android.tradefed.result;
17 
18 import com.android.tradefed.log.LogUtil.CLog;
19 import com.android.tradefed.metrics.proto.MetricMeasurement.Metric;
20 import com.android.tradefed.result.skipped.SkipReason;
21 import com.android.tradefed.retry.MergeStrategy;
22 import com.android.tradefed.util.MultiMap;
23 import com.android.tradefed.util.proto.TfMetricProtoUtil;
24 
25 import java.util.ArrayList;
26 import java.util.Arrays;
27 import java.util.HashMap;
28 import java.util.LinkedHashMap;
29 import java.util.LinkedHashSet;
30 import java.util.List;
31 import java.util.Map;
32 import java.util.Set;
33 
34 /**
35  * Holds results from a single test run.
36  *
37  * <p>Maintains an accurate count of tests, and tracks incomplete tests.
38  *
39  * <p>Not thread safe! The test* callbacks must be called in order
40  */
41 public class TestRunResult {
42 
43     public static final String ERROR_DIVIDER = "\n====Next Error====\n";
44     private String mTestRunName;
45     // Uses a LinkedHashMap to have predictable iteration order
46     private Map<TestDescription, TestResult> mTestResults =
47             new LinkedHashMap<TestDescription, TestResult>();
48     // Store the metrics for the run
49     private Map<String, String> mRunMetrics = new HashMap<>();
50     private HashMap<String, Metric> mRunProtoMetrics = new HashMap<>();
51     // Log files associated with the test run itself (testRunStart / testRunEnd).
52     private MultiMap<String, LogFile> mRunLoggedFiles;
53     private boolean mIsRunComplete = false;
54     private long mElapsedTime = 0L;
55     private long mStartTime = 0L;
56 
57     private TestResult mCurrentTestResult;
58 
59     /** represents sums of tests in each TestStatus state. Indexed by TestStatus.ordinal() */
60     private int[] mStatusCounts = new int[com.android.tradefed.result.TestStatus.values().length];
61     /** tracks if mStatusCounts is accurate, or if it needs to be recalculated */
62     private boolean mIsCountDirty = true;
63 
64     private FailureDescription mRunFailureError = null;
65 
66     private boolean mAggregateMetrics = false;
67 
68     private int mExpectedTestCount = 0;
69 
70     /** Create an empty{@link TestRunResult}. */
TestRunResult()71     public TestRunResult() {
72         mTestRunName = "not started";
73         mRunLoggedFiles = new MultiMap<String, LogFile>();
74     }
75 
setAggregateMetrics(boolean metricAggregation)76     public void setAggregateMetrics(boolean metricAggregation) {
77         mAggregateMetrics = metricAggregation;
78     }
79 
80     /** @return the test run name */
getName()81     public String getName() {
82         return mTestRunName;
83     }
84 
85     /** Returns a map of the test results. */
getTestResults()86     public Map<TestDescription, TestResult> getTestResults() {
87         return mTestResults;
88     }
89 
90     /** @return a {@link Map} of the test run metrics. */
getRunMetrics()91     public Map<String, String> getRunMetrics() {
92         return mRunMetrics;
93     }
94 
95     /** @return a {@link Map} of the test run metrics with the new proto format. */
getRunProtoMetrics()96     public HashMap<String, Metric> getRunProtoMetrics() {
97         return mRunProtoMetrics;
98     }
99 
100     /** Gets the set of completed tests. */
getCompletedTests()101     public Set<TestDescription> getCompletedTests() {
102         List<TestStatus> completedStatuses = new ArrayList<>();
103         for (TestStatus s : TestStatus.values()) {
104             if (!s.equals(TestStatus.INCOMPLETE)) {
105                 completedStatuses.add(s);
106             }
107         }
108         return getTestsInState(completedStatuses);
109     }
110 
111     /** Gets the set of failed tests. */
getFailedTests()112     public Set<TestDescription> getFailedTests() {
113         return getTestsInState(Arrays.asList(TestStatus.FAILURE));
114     }
115 
116     /** Gets the set of passed tests. */
getPassedTests()117     public Set<TestDescription> getPassedTests() {
118         return getTestsInState(Arrays.asList(TestStatus.PASSED));
119     }
120 
121     /** Gets the set of tests in given statuses. */
getTestsInState(List<TestStatus> statuses)122     public Set<TestDescription> getTestsInState(List<TestStatus> statuses) {
123         Set<TestDescription> tests = new LinkedHashSet<>();
124         for (Map.Entry<TestDescription, TestResult> testEntry : getTestResults().entrySet()) {
125             TestStatus status = testEntry.getValue().getResultStatus();
126             if (statuses.contains(status)) {
127                 tests.add(testEntry.getKey());
128             }
129         }
130         return tests;
131     }
132 
133     /** @return <code>true</code> if test run failed. */
isRunFailure()134     public boolean isRunFailure() {
135         return mRunFailureError != null;
136     }
137 
138     /** @return <code>true</code> if test run finished. */
isRunComplete()139     public boolean isRunComplete() {
140         return mIsRunComplete;
141     }
142 
143     /** Decides whether or not considering an aggregation of runs completed or not. */
isRunComplete( boolean isAtLeastOneCompleted, boolean areAllCompleted, MergeStrategy strategy)144     private static boolean isRunComplete(
145             boolean isAtLeastOneCompleted, boolean areAllCompleted, MergeStrategy strategy) {
146         switch (strategy) {
147             case ANY_PASS_IS_PASS:
148             case ONE_TESTRUN_PASS_IS_PASS:
149                 return isAtLeastOneCompleted;
150             case ONE_TESTCASE_PASS_IS_PASS:
151             case ANY_FAIL_IS_FAIL:
152             default:
153                 return areAllCompleted;
154         }
155     }
156 
setRunComplete(boolean runComplete)157     public void setRunComplete(boolean runComplete) {
158         mIsRunComplete = runComplete;
159     }
160 
161     /**
162      * Gets the number of test cases this TestRunResult expects to have. The actual number may be
163      * less than the expected number due to test crashes. Normally, such a mismatch indicates a test
164      * run failure.
165      */
getExpectedTestCount()166     public int getExpectedTestCount() {
167         return mExpectedTestCount;
168     }
169 
170     /** FOR COMPATIBILITY with older status. Use {@link #getNumTestsInState(TestStatus)} instead. */
getNumTestsInState( com.android.ddmlib.testrunner.TestResult.TestStatus ddmlibStatus)171     public int getNumTestsInState(
172             com.android.ddmlib.testrunner.TestResult.TestStatus ddmlibStatus) {
173         return getNumTestsInState(TestStatus.convertFromDdmlibType(ddmlibStatus));
174     }
175 
176     /** Gets the number of tests in given state for this run. */
getNumTestsInState(TestStatus status)177     public int getNumTestsInState(TestStatus status) {
178         if (mIsCountDirty) {
179             // clear counts
180             for (int i = 0; i < mStatusCounts.length; i++) {
181                 mStatusCounts[i] = 0;
182             }
183             // now recalculate
184             for (TestResult r : mTestResults.values()) {
185                 mStatusCounts[r.getResultStatus().ordinal()]++;
186             }
187             mIsCountDirty = false;
188         }
189         return mStatusCounts[status.ordinal()];
190     }
191 
192     /** Returns all the {@link TestResult} in a particular state. */
getTestsResultsInState(TestStatus status)193     public List<TestResult> getTestsResultsInState(TestStatus status) {
194         List<TestResult> results = new ArrayList<>();
195         for (TestResult r : mTestResults.values()) {
196             if (r.getResultStatus().equals(status)) {
197                 results.add(r);
198             }
199         }
200         return results;
201     }
202 
203     /** Gets the number of tests in this run. */
getNumTests()204     public int getNumTests() {
205         return mTestResults.size();
206     }
207 
208     /** Gets the number of complete tests in this run ie with status != incomplete. */
getNumCompleteTests()209     public int getNumCompleteTests() {
210         return getNumTests() - getNumTestsInState(TestStatus.INCOMPLETE);
211     }
212 
213     /** @return <code>true</code> if test run had any failed or error tests. */
hasFailedTests()214     public boolean hasFailedTests() {
215         return getNumAllFailedTests() > 0;
216     }
217 
218     /** Return total number of tests in a failure state (failed, assumption failure) */
getNumAllFailedTests()219     public int getNumAllFailedTests() {
220         return getNumTestsInState(TestStatus.FAILURE);
221     }
222 
223     /** Returns the current run elapsed time. */
getElapsedTime()224     public long getElapsedTime() {
225         return mElapsedTime;
226     }
227 
228     /** Returns the start time of the first testRunStart call. */
getStartTime()229     public long getStartTime() {
230         return mStartTime;
231     }
232 
233     /** Return the run failure error message, <code>null</code> if run did not fail. */
getRunFailureMessage()234     public String getRunFailureMessage() {
235         if (mRunFailureError == null) {
236             return null;
237         }
238         return mRunFailureError.getErrorMessage();
239     }
240 
241     /** Returns the run failure descriptor, <code>null</code> if run did not fail. */
getRunFailureDescription()242     public FailureDescription getRunFailureDescription() {
243         return mRunFailureError;
244     }
245 
246     /**
247      * Reset the run failure status.
248      *
249      * <p>Resetting the run failure status is sometimes required when retrying. This should be done
250      * with care to avoid clearing a real failure.
251      */
resetRunFailure()252     public void resetRunFailure() {
253         mRunFailureError = null;
254     }
255 
256     /**
257      * Notify that a test run started.
258      *
259      * @param runName the name associated to the test run for tracking purpose.
260      * @param testCount the number of expected test cases associated with the test run.
261      */
testRunStarted(String runName, int testCount)262     public void testRunStarted(String runName, int testCount) {
263         testRunStarted(runName, testCount, System.currentTimeMillis());
264     }
265 
266     /**
267      * Notify that a test run started.
268      *
269      * @param runName the name associated to the test run for tracking purpose.
270      * @param testCount the number of expected test cases associated with the test run.
271      */
testRunStarted(String runName, int testCount, long startTime)272     public void testRunStarted(String runName, int testCount, long startTime) {
273         // A run may be started multiple times due to crashes or other reasons. Normally the first
274         // run reflect the expected number of test "testCount". To avoid latter TestRunStarted
275         // overrides the expected count, only the first testCount will be recorded.
276         // mExpectedTestCount is initialized as 0.
277         if (mExpectedTestCount == 0) {
278             mExpectedTestCount = testCount;
279         } else {
280             CLog.w(
281                     "%s calls testRunStarted more than once. Previous expected count: %s. "
282                             + "New Expected count: %s",
283                     runName, mExpectedTestCount, mExpectedTestCount + testCount);
284             mExpectedTestCount += testCount;
285         }
286         mTestRunName = runName;
287         mIsRunComplete = false;
288         if (mStartTime == 0L) {
289             mStartTime = startTime;
290         }
291         // Do not reset mRunFailureError since for re-run we want to preserve previous failures.
292     }
293 
testStarted(TestDescription test)294     public void testStarted(TestDescription test) {
295         testStarted(test, System.currentTimeMillis());
296     }
297 
testStarted(TestDescription test, long startTime)298     public void testStarted(TestDescription test, long startTime) {
299         mCurrentTestResult = new TestResult();
300         mCurrentTestResult.setStartTime(startTime);
301         addTestResult(test, mCurrentTestResult);
302     }
303 
addTestResult(TestDescription test, TestResult testResult)304     private void addTestResult(TestDescription test, TestResult testResult) {
305         mIsCountDirty = true;
306         mTestResults.put(test, testResult);
307     }
308 
updateTestResult( TestDescription test, com.android.tradefed.result.TestStatus status, FailureDescription failure)309     private void updateTestResult(
310             TestDescription test,
311             com.android.tradefed.result.TestStatus status,
312             FailureDescription failure) {
313         updateTestResult(test, status, failure, null);
314     }
315 
updateTestResult( TestDescription test, com.android.tradefed.result.TestStatus status, FailureDescription failure, SkipReason reason)316     private void updateTestResult(
317             TestDescription test,
318             com.android.tradefed.result.TestStatus status,
319             FailureDescription failure,
320             SkipReason reason) {
321         TestResult r = mTestResults.get(test);
322         if (r == null) {
323             CLog.d("received test event without test start for %s", test);
324             r = new TestResult();
325         }
326         r.setStatus(status);
327         if (failure != null) {
328             r.setFailure(failure);
329         }
330         if (reason != null) {
331             r.setSkipReason(reason);
332         }
333         addTestResult(test, r);
334     }
335 
testFailed(TestDescription test, String trace)336     public void testFailed(TestDescription test, String trace) {
337         updateTestResult(
338                 test,
339                 com.android.tradefed.result.TestStatus.FAILURE,
340                 FailureDescription.create(trace));
341     }
342 
testFailed(TestDescription test, FailureDescription failure)343     public void testFailed(TestDescription test, FailureDescription failure) {
344         updateTestResult(test, com.android.tradefed.result.TestStatus.FAILURE, failure);
345     }
346 
testAssumptionFailure(TestDescription test, String trace)347     public void testAssumptionFailure(TestDescription test, String trace) {
348         updateTestResult(
349                 test,
350                 com.android.tradefed.result.TestStatus.ASSUMPTION_FAILURE,
351                 FailureDescription.create(trace));
352     }
353 
testAssumptionFailure(TestDescription test, FailureDescription failure)354     public void testAssumptionFailure(TestDescription test, FailureDescription failure) {
355         updateTestResult(test, com.android.tradefed.result.TestStatus.ASSUMPTION_FAILURE, failure);
356     }
357 
testIgnored(TestDescription test)358     public void testIgnored(TestDescription test) {
359         updateTestResult(test, com.android.tradefed.result.TestStatus.IGNORED, null);
360     }
361 
testSkipped(TestDescription test, SkipReason reason)362     public void testSkipped(TestDescription test, SkipReason reason) {
363         updateTestResult(test, com.android.tradefed.result.TestStatus.SKIPPED, null, reason);
364     }
365 
testEnded(TestDescription test, HashMap<String, Metric> testMetrics)366     public void testEnded(TestDescription test, HashMap<String, Metric> testMetrics) {
367         testEnded(test, System.currentTimeMillis(), testMetrics);
368     }
369 
testEnded(TestDescription test, long endTime, HashMap<String, Metric> testMetrics)370     public void testEnded(TestDescription test, long endTime, HashMap<String, Metric> testMetrics) {
371         TestResult result = mTestResults.get(test);
372         if (result == null) {
373             result = new TestResult();
374         }
375         if (result.getResultStatus().equals(TestStatus.INCOMPLETE)) {
376             result.setStatus(com.android.tradefed.result.TestStatus.PASSED);
377         }
378         result.setEndTime(endTime);
379         result.setMetrics(TfMetricProtoUtil.compatibleConvert(testMetrics));
380         result.setProtoMetrics(testMetrics);
381         addTestResult(test, result);
382         mCurrentTestResult = null;
383     }
384 
385     // TODO: Remove when done updating
testRunFailed(String errorMessage)386     public void testRunFailed(String errorMessage) {
387         if (errorMessage == null) {
388             testRunFailed((FailureDescription) null);
389         } else {
390             testRunFailed(FailureDescription.create(errorMessage));
391         }
392     }
393 
testRunFailed(FailureDescription failureDescription)394     public void testRunFailed(FailureDescription failureDescription) {
395         if (failureDescription == null) {
396             failureDescription = FailureDescription.create("testRunFailed(null) was called.");
397         }
398 
399         if (mRunFailureError != null) {
400             if (mRunFailureError instanceof MultiFailureDescription) {
401                 ((MultiFailureDescription) mRunFailureError).addFailure(failureDescription);
402             } else {
403                 MultiFailureDescription aggregatedFailure =
404                         new MultiFailureDescription(mRunFailureError, failureDescription);
405                 mRunFailureError = aggregatedFailure;
406             }
407         } else {
408             mRunFailureError = failureDescription;
409         }
410     }
411 
testRunStopped(long elapsedTime)412     public void testRunStopped(long elapsedTime) {
413         mElapsedTime += elapsedTime;
414         mIsRunComplete = true;
415     }
416 
testRunEnded(long elapsedTime, Map<String, String> runMetrics)417     public void testRunEnded(long elapsedTime, Map<String, String> runMetrics) {
418         if (mAggregateMetrics) {
419             for (Map.Entry<String, String> entry : runMetrics.entrySet()) {
420                 String existingValue = mRunMetrics.get(entry.getKey());
421                 String combinedValue = combineValues(existingValue, entry.getValue());
422                 mRunMetrics.put(entry.getKey(), combinedValue);
423             }
424         } else {
425             mRunMetrics.putAll(runMetrics);
426         }
427         // Also add to the new interface:
428         mRunProtoMetrics.putAll(TfMetricProtoUtil.upgradeConvert(runMetrics));
429 
430         mElapsedTime += elapsedTime;
431         mIsRunComplete = true;
432     }
433 
434     /** New interface using the new proto metrics. */
testRunEnded(long elapsedTime, HashMap<String, Metric> runMetrics)435     public void testRunEnded(long elapsedTime, HashMap<String, Metric> runMetrics) {
436         // Internally store the information as backward compatible format
437         testRunEnded(elapsedTime, TfMetricProtoUtil.compatibleConvert(runMetrics));
438         // Store the new format directly too.
439         // TODO: See if aggregation should/can be done with the new format.
440         mRunProtoMetrics.putAll(runMetrics);
441 
442         // TODO: when old format is deprecated, do not forget to uncomment the next two lines
443         // mElapsedTime += elapsedTime;
444         // mIsRunComplete = true;
445     }
446 
447     /**
448      * Combine old and new metrics value
449      *
450      * @param existingValue
451      * @param newValue
452      * @return the combination of the two string as Long or Double value.
453      */
combineValues(String existingValue, String newValue)454     private String combineValues(String existingValue, String newValue) {
455         if (existingValue != null) {
456             try {
457                 long existingLong = Long.parseLong(existingValue);
458                 long newLong = Long.parseLong(newValue);
459                 return Long.toString(existingLong + newLong);
460             } catch (NumberFormatException e) {
461                 // not a long, skip to next
462             }
463             try {
464                 double existingDouble = Double.parseDouble(existingValue);
465                 double newDouble = Double.parseDouble(newValue);
466                 return Double.toString(existingDouble + newDouble);
467             } catch (NumberFormatException e) {
468                 // not a double either, fall through
469             }
470         }
471         // default to overriding existingValue
472         return newValue;
473     }
474 
475     /** Returns a user friendly string describing results. */
getTextSummary()476     public String getTextSummary() {
477         StringBuilder builder = new StringBuilder();
478         builder.append(String.format("Total tests %d, ", getNumTests()));
479         for (TestStatus status : TestStatus.values()) {
480             int count = getNumTestsInState(status);
481             // only add descriptive state for states that have non zero values, to avoid cluttering
482             // the response
483             if (count > 0) {
484                 builder.append(String.format("%s %d, ", status.toString().toLowerCase(), count));
485             }
486         }
487         return builder.toString();
488     }
489 
490     /**
491      * Information about a file being logged are stored and associated to the test case or test run
492      * in progress.
493      *
494      * @param dataName the name referencing the data.
495      * @param logFile The {@link LogFile} object representing where the object was saved and and
496      *     information about it.
497      */
testLogSaved(String dataName, LogFile logFile)498     public void testLogSaved(String dataName, LogFile logFile) {
499         if (mCurrentTestResult != null) {
500             // We have a test case in progress, we can associate the log to it.
501             mCurrentTestResult.addLoggedFile(dataName, logFile);
502         } else {
503             mRunLoggedFiles.put(dataName, logFile);
504         }
505     }
506 
507     /** Returns a copy of the map containing all the logged file associated with that test case. */
getRunLoggedFiles()508     public MultiMap<String, LogFile> getRunLoggedFiles() {
509         return new MultiMap<>(mRunLoggedFiles);
510     }
511 
512     /** @see #merge(List, MergeStrategy) */
merge(List<TestRunResult> testRunResults)513     public static TestRunResult merge(List<TestRunResult> testRunResults) {
514         return merge(testRunResults, MergeStrategy.ONE_TESTCASE_PASS_IS_PASS);
515     }
516 
517     /**
518      * Merge multiple TestRunResults of the same testRunName. If a testcase shows up in multiple
519      * TestRunResults but has different results (e.g. "boottest-device" runs three times with result
520      * FAIL-FAIL-PASS), we concatenate all the stack traces from the FAILED runs and trust the final
521      * run result for status, metrics, log files, start/end time.
522      *
523      * @param testRunResults A list of TestRunResult to merge.
524      * @param strategy the merging strategy adopted for merging results.
525      * @return the final TestRunResult containing the merged data from the testRunResults.
526      */
merge(List<TestRunResult> testRunResults, MergeStrategy strategy)527     public static TestRunResult merge(List<TestRunResult> testRunResults, MergeStrategy strategy) {
528         if (testRunResults.isEmpty()) {
529             return null;
530         }
531         if (MergeStrategy.NO_MERGE.equals(strategy)) {
532             throw new IllegalArgumentException(
533                     "TestRunResult#merge cannot be called with NO_MERGE strategy.");
534         }
535         if (testRunResults.size() == 1) {
536             // No merging is needed in case of a single test run result.
537             return testRunResults.get(0);
538         }
539         TestRunResult finalRunResult = new TestRunResult();
540 
541         String testRunName = testRunResults.get(0).getName();
542         Map<String, String> finalRunMetrics = new HashMap<>();
543         HashMap<String, Metric> finalRunProtoMetrics = new HashMap<>();
544         MultiMap<String, LogFile> finalRunLoggedFiles = new MultiMap<>();
545         Map<TestDescription, List<TestResult>> testResultsAttempts = new LinkedHashMap<>();
546 
547         // Keep track of if one of the run is not complete
548         boolean isAtLeastOneCompleted = false;
549         boolean areAllCompleted = true;
550         // Keep track of whether we have run failure or not
551         List<FailureDescription> runErrors = new ArrayList<>();
552         boolean atLeastOneFailure = false;
553         boolean allFailure = true;
554         // Keep track of elapsed time
555         long elapsedTime = 0L;
556         int maxExpectedTestCount = 0;
557 
558         for (TestRunResult eachRunResult : testRunResults) {
559             // Check all mTestRunNames are the same.
560             if (!testRunName.equals(eachRunResult.getName())) {
561                 throw new IllegalArgumentException(
562                         String.format(
563                                 "Unabled to merge TestRunResults: The run results names are "
564                                         + "different (%s, %s)",
565                                 testRunName, eachRunResult.getName()));
566             }
567             elapsedTime += eachRunResult.getElapsedTime();
568             // Evaluate the run failures
569             if (eachRunResult.isRunFailure()) {
570                 atLeastOneFailure = true;
571                 FailureDescription currentFailure = eachRunResult.getRunFailureDescription();
572                 if (currentFailure instanceof MultiFailureDescription) {
573                     runErrors.addAll(((MultiFailureDescription) currentFailure).getFailures());
574                 } else {
575                     runErrors.add(currentFailure);
576                 }
577             } else {
578                 allFailure = false;
579             }
580             // Evaluate the run completion
581             if (eachRunResult.isRunComplete()) {
582                 isAtLeastOneCompleted = true;
583             } else {
584                 areAllCompleted = false;
585             }
586 
587             // A run may start multiple times. Normally the first run shows the expected count
588             // (max value).
589             maxExpectedTestCount =
590                     Math.max(maxExpectedTestCount, eachRunResult.getExpectedTestCount());
591 
592             // Keep the last TestRunResult's RunMetrics, ProtoMetrics
593             finalRunMetrics.putAll(eachRunResult.getRunMetrics());
594             finalRunProtoMetrics.putAll(eachRunResult.getRunProtoMetrics());
595             finalRunLoggedFiles.putAll(eachRunResult.getRunLoggedFiles());
596             // TODO: We are not handling the TestResult log files in the merging logic (different
597             // from the TestRunResult log files). Need to improve in the future.
598             for (Map.Entry<TestDescription, TestResult> testResultEntry :
599                     eachRunResult.getTestResults().entrySet()) {
600                 if (!testResultsAttempts.containsKey(testResultEntry.getKey())) {
601                     testResultsAttempts.put(testResultEntry.getKey(), new ArrayList<>());
602                 }
603                 List<TestResult> results = testResultsAttempts.get(testResultEntry.getKey());
604                 results.add(testResultEntry.getValue());
605             }
606         }
607 
608         // Evaluate test cases based on strategy
609         finalRunResult.mTestResults = evaluateTestCases(testResultsAttempts, strategy);
610         // Evaluate the run error status based on strategy
611         boolean isRunFailure = isRunFailed(atLeastOneFailure, allFailure, strategy);
612         if (isRunFailure) {
613             if (runErrors.size() == 1) {
614                 finalRunResult.mRunFailureError = runErrors.get(0);
615             } else {
616                 finalRunResult.mRunFailureError = new MultiFailureDescription(runErrors);
617             }
618         }
619         // Evaluate run completion from all the attempts based on strategy
620         finalRunResult.mIsRunComplete =
621                 isRunComplete(isAtLeastOneCompleted, areAllCompleted, strategy);
622 
623         finalRunResult.mTestRunName = testRunName;
624         finalRunResult.mRunMetrics = finalRunMetrics;
625         finalRunResult.mRunProtoMetrics = finalRunProtoMetrics;
626         finalRunResult.mRunLoggedFiles = finalRunLoggedFiles;
627 
628         finalRunResult.mExpectedTestCount = maxExpectedTestCount;
629         // Report total elapsed times
630         finalRunResult.mElapsedTime = elapsedTime;
631         return finalRunResult;
632     }
633 
634     /** Merge the different test cases attempts based on the strategy. */
evaluateTestCases( Map<TestDescription, List<TestResult>> results, MergeStrategy strategy)635     private static Map<TestDescription, TestResult> evaluateTestCases(
636             Map<TestDescription, List<TestResult>> results, MergeStrategy strategy) {
637         Map<TestDescription, TestResult> finalTestResults = new LinkedHashMap<>();
638         for (TestDescription description : results.keySet()) {
639             List<TestResult> attemptRes = results.get(description);
640             TestResult aggResult = TestResult.merge(attemptRes, strategy);
641             finalTestResults.put(description, aggResult);
642         }
643         return finalTestResults;
644     }
645 
646     /** Decides whether or not considering an aggregation of runs a pass or fail. */
isRunFailed( boolean atLeastOneFailure, boolean allFailures, MergeStrategy strategy)647     private static boolean isRunFailed(
648             boolean atLeastOneFailure, boolean allFailures, MergeStrategy strategy) {
649         switch (strategy) {
650             case ANY_PASS_IS_PASS:
651             case ONE_TESTRUN_PASS_IS_PASS:
652                 return allFailures;
653             case ONE_TESTCASE_PASS_IS_PASS:
654             case ANY_FAIL_IS_FAIL:
655             default:
656                 return atLeastOneFailure;
657         }
658     }
659 }
660