• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 package com.android.tradefed.result;
17 
18 import com.android.ddmlib.testrunner.TestResult.TestStatus;
19 import com.android.tradefed.log.LogUtil.CLog;
20 import com.android.tradefed.metrics.proto.MetricMeasurement.Metric;
21 import com.android.tradefed.util.proto.TfMetricProtoUtil;
22 
23 import com.google.common.base.Joiner;
24 
25 import java.util.ArrayList;
26 import java.util.Arrays;
27 import java.util.HashMap;
28 import java.util.LinkedHashMap;
29 import java.util.LinkedHashSet;
30 import java.util.List;
31 import java.util.Map;
32 import java.util.Set;
33 
34 /**
35  * Holds results from a single test run.
36  *
37  * <p>Maintains an accurate count of tests, and tracks incomplete tests.
38  *
39  * <p>Not thread safe! The test* callbacks must be called in order
40  */
41 public class TestRunResult {
42 
43     public static final String ERROR_DIVIDER = "\n====Next Error====\n";
44     private String mTestRunName;
45     // Uses a LinkedHashMap to have predictable iteration order
46     private Map<TestDescription, TestResult> mTestResults =
47             new LinkedHashMap<TestDescription, TestResult>();
48     // Store the metrics for the run
49     private Map<String, String> mRunMetrics = new HashMap<>();
50     private HashMap<String, Metric> mRunProtoMetrics = new HashMap<>();
51     // Log files associated with the test run itself (testRunStart / testRunEnd).
52     private Map<String, LogFile> mRunLoggedFiles;
53     private boolean mIsRunComplete = false;
54     private long mElapsedTime = 0;
55 
56     private TestResult mCurrentTestResult;
57 
58     /** represents sums of tests in each TestStatus state. Indexed by TestStatus.ordinal() */
59     private int[] mStatusCounts = new int[TestStatus.values().length];
60     /** tracks if mStatusCounts is accurate, or if it needs to be recalculated */
61     private boolean mIsCountDirty = true;
62 
63     private String mRunFailureError = null;
64 
65     private boolean mAggregateMetrics = false;
66 
67     private int mExpectedTestCount = 0;
68 
69     /** Create an empty{@link TestRunResult}. */
TestRunResult()70     public TestRunResult() {
71         mTestRunName = "not started";
72         mRunLoggedFiles = new LinkedHashMap<String, LogFile>();
73     }
74 
setAggregateMetrics(boolean metricAggregation)75     public void setAggregateMetrics(boolean metricAggregation) {
76         mAggregateMetrics = metricAggregation;
77     }
78 
79     /** @return the test run name */
getName()80     public String getName() {
81         return mTestRunName;
82     }
83 
84     /** Returns a map of the test results. */
getTestResults()85     public Map<TestDescription, TestResult> getTestResults() {
86         return mTestResults;
87     }
88 
89     /** @return a {@link Map} of the test run metrics. */
getRunMetrics()90     public Map<String, String> getRunMetrics() {
91         return mRunMetrics;
92     }
93 
94     /** @return a {@link Map} of the test run metrics with the new proto format. */
getRunProtoMetrics()95     public HashMap<String, Metric> getRunProtoMetrics() {
96         return mRunProtoMetrics;
97     }
98 
99     /** Gets the set of completed tests. */
getCompletedTests()100     public Set<TestDescription> getCompletedTests() {
101         List<TestStatus> completedStatuses = new ArrayList<>();
102         for (TestStatus s : TestStatus.values()) {
103             if (!s.equals(TestStatus.INCOMPLETE)) {
104                 completedStatuses.add(s);
105             }
106         }
107         return getTestsInState(completedStatuses);
108     }
109 
110     /** Gets the set of failed tests. */
getFailedTests()111     public Set<TestDescription> getFailedTests() {
112         return getTestsInState(Arrays.asList(TestStatus.FAILURE));
113     }
114 
115     /** Gets the set of tests in given statuses. */
getTestsInState(List<TestStatus> statuses)116     private Set<TestDescription> getTestsInState(List<TestStatus> statuses) {
117         Set<TestDescription> tests = new LinkedHashSet<>();
118         for (Map.Entry<TestDescription, TestResult> testEntry : getTestResults().entrySet()) {
119             TestStatus status = testEntry.getValue().getStatus();
120             if (statuses.contains(status)) {
121                 tests.add(testEntry.getKey());
122             }
123         }
124         return tests;
125     }
126 
127     /** @return <code>true</code> if test run failed. */
isRunFailure()128     public boolean isRunFailure() {
129         return mRunFailureError != null;
130     }
131 
132     /** @return <code>true</code> if test run finished. */
isRunComplete()133     public boolean isRunComplete() {
134         return mIsRunComplete;
135     }
136 
setRunComplete(boolean runComplete)137     public void setRunComplete(boolean runComplete) {
138         mIsRunComplete = runComplete;
139     }
140 
141     /**
142      * Gets the number of test cases this TestRunResult expects to have. The actual number may be
143      * less than the expected number due to test crashes. Normally, such a mismatch indicates a test
144      * run failure.
145      */
getExpectedTestCount()146     public int getExpectedTestCount() {
147         return mExpectedTestCount;
148     }
149 
150     /** Gets the number of tests in given state for this run. */
getNumTestsInState(TestStatus status)151     public int getNumTestsInState(TestStatus status) {
152         if (mIsCountDirty) {
153             // clear counts
154             for (int i = 0; i < mStatusCounts.length; i++) {
155                 mStatusCounts[i] = 0;
156             }
157             // now recalculate
158             for (TestResult r : mTestResults.values()) {
159                 mStatusCounts[r.getStatus().ordinal()]++;
160             }
161             mIsCountDirty = false;
162         }
163         return mStatusCounts[status.ordinal()];
164     }
165 
166     /** Gets the number of tests in this run. */
getNumTests()167     public int getNumTests() {
168         return mTestResults.size();
169     }
170 
171     /** Gets the number of complete tests in this run ie with status != incomplete. */
getNumCompleteTests()172     public int getNumCompleteTests() {
173         return getNumTests() - getNumTestsInState(TestStatus.INCOMPLETE);
174     }
175 
176     /** @return <code>true</code> if test run had any failed or error tests. */
hasFailedTests()177     public boolean hasFailedTests() {
178         return getNumAllFailedTests() > 0;
179     }
180 
181     /** Return total number of tests in a failure state (failed, assumption failure) */
getNumAllFailedTests()182     public int getNumAllFailedTests() {
183         return getNumTestsInState(TestStatus.FAILURE);
184     }
185 
186     /** Returns the current run elapsed time. */
getElapsedTime()187     public long getElapsedTime() {
188         return mElapsedTime;
189     }
190 
191     /** Return the run failure error message, <code>null</code> if run did not fail. */
getRunFailureMessage()192     public String getRunFailureMessage() {
193         return mRunFailureError;
194     }
195 
196     /**
197      * Reset the run failure status.
198      *
199      * <p>Resetting the run failure status is sometimes required when retrying. This should be done
200      * with care to avoid clearing a real failure.
201      */
resetRunFailure()202     public void resetRunFailure() {
203         mRunFailureError = null;
204     }
205 
206     /**
207      * Notify that a test run started.
208      *
209      * @param runName the name associated to the test run for tracking purpose.
210      * @param testCount the number of expected test cases associated with the test run.
211      */
testRunStarted(String runName, int testCount)212     public void testRunStarted(String runName, int testCount) {
213         // A run may be started multiple times due to crashes or other reasons. Normally the first
214         // run reflect the expected number of test "testCount". To avoid latter TestRunStarted
215         // overrides the expected count, only the first testCount will be recorded.
216         // mExpectedTestCount is initialized as 0.
217         if (mExpectedTestCount == 0) {
218             mExpectedTestCount = testCount;
219         } else {
220             CLog.w(
221                     "%s calls testRunStarted more than once. Previous expected count: %s. "
222                             + "New Expected count: %s",
223                     runName, mExpectedTestCount, mExpectedTestCount + testCount);
224             mExpectedTestCount += testCount;
225         }
226         mTestRunName = runName;
227         mIsRunComplete = false;
228         // Do not reset mRunFailureError since for re-run we want to preserve previous failures.
229     }
230 
testStarted(TestDescription test)231     public void testStarted(TestDescription test) {
232         testStarted(test, System.currentTimeMillis());
233     }
234 
testStarted(TestDescription test, long startTime)235     public void testStarted(TestDescription test, long startTime) {
236         mCurrentTestResult = new TestResult();
237         mCurrentTestResult.setStartTime(startTime);
238         addTestResult(test, mCurrentTestResult);
239     }
240 
addTestResult(TestDescription test, TestResult testResult)241     private void addTestResult(TestDescription test, TestResult testResult) {
242         mIsCountDirty = true;
243         mTestResults.put(test, testResult);
244     }
245 
updateTestResult(TestDescription test, TestStatus status, String trace)246     private void updateTestResult(TestDescription test, TestStatus status, String trace) {
247         TestResult r = mTestResults.get(test);
248         if (r == null) {
249             CLog.d("received test event without test start for %s", test);
250             r = new TestResult();
251         }
252         r.setStatus(status);
253         r.setStackTrace(trace);
254         addTestResult(test, r);
255     }
256 
testFailed(TestDescription test, String trace)257     public void testFailed(TestDescription test, String trace) {
258         updateTestResult(test, TestStatus.FAILURE, trace);
259     }
260 
testAssumptionFailure(TestDescription test, String trace)261     public void testAssumptionFailure(TestDescription test, String trace) {
262         updateTestResult(test, TestStatus.ASSUMPTION_FAILURE, trace);
263     }
264 
testIgnored(TestDescription test)265     public void testIgnored(TestDescription test) {
266         updateTestResult(test, TestStatus.IGNORED, null);
267     }
268 
testEnded(TestDescription test, HashMap<String, Metric> testMetrics)269     public void testEnded(TestDescription test, HashMap<String, Metric> testMetrics) {
270         testEnded(test, System.currentTimeMillis(), testMetrics);
271     }
272 
testEnded(TestDescription test, long endTime, HashMap<String, Metric> testMetrics)273     public void testEnded(TestDescription test, long endTime, HashMap<String, Metric> testMetrics) {
274         TestResult result = mTestResults.get(test);
275         if (result == null) {
276             result = new TestResult();
277         }
278         if (result.getStatus().equals(TestStatus.INCOMPLETE)) {
279             result.setStatus(TestStatus.PASSED);
280         }
281         result.setEndTime(endTime);
282         result.setMetrics(TfMetricProtoUtil.compatibleConvert(testMetrics));
283         result.setProtoMetrics(testMetrics);
284         addTestResult(test, result);
285         mCurrentTestResult = null;
286     }
287 
testRunFailed(String errorMessage)288     public void testRunFailed(String errorMessage) {
289         if (errorMessage == null) {
290             // Null as an error message is a reset.
291             errorMessage = "testRunFailed(null) was called.";
292         }
293 
294         if (mRunFailureError != null) {
295             mRunFailureError += (ERROR_DIVIDER + errorMessage);
296         } else {
297             mRunFailureError = errorMessage;
298         }
299     }
300 
testRunStopped(long elapsedTime)301     public void testRunStopped(long elapsedTime) {
302         mElapsedTime += elapsedTime;
303         mIsRunComplete = true;
304     }
305 
testRunEnded(long elapsedTime, Map<String, String> runMetrics)306     public void testRunEnded(long elapsedTime, Map<String, String> runMetrics) {
307         if (mAggregateMetrics) {
308             for (Map.Entry<String, String> entry : runMetrics.entrySet()) {
309                 String existingValue = mRunMetrics.get(entry.getKey());
310                 String combinedValue = combineValues(existingValue, entry.getValue());
311                 mRunMetrics.put(entry.getKey(), combinedValue);
312             }
313         } else {
314             mRunMetrics.putAll(runMetrics);
315         }
316         // Also add to the new interface:
317         mRunProtoMetrics.putAll(TfMetricProtoUtil.upgradeConvert(runMetrics));
318 
319         mElapsedTime += elapsedTime;
320         mIsRunComplete = true;
321     }
322 
323     /** New interface using the new proto metrics. */
testRunEnded(long elapsedTime, HashMap<String, Metric> runMetrics)324     public void testRunEnded(long elapsedTime, HashMap<String, Metric> runMetrics) {
325         // Internally store the information as backward compatible format
326         testRunEnded(elapsedTime, TfMetricProtoUtil.compatibleConvert(runMetrics));
327         // Store the new format directly too.
328         // TODO: See if aggregation should/can be done with the new format.
329         mRunProtoMetrics.putAll(runMetrics);
330 
331         // TODO: when old format is deprecated, do not forget to uncomment the next two lines
332         // mElapsedTime += elapsedTime;
333         // mIsRunComplete = true;
334     }
335 
336     /**
337      * Combine old and new metrics value
338      *
339      * @param existingValue
340      * @param newValue
341      * @return the combination of the two string as Long or Double value.
342      */
combineValues(String existingValue, String newValue)343     private String combineValues(String existingValue, String newValue) {
344         if (existingValue != null) {
345             try {
346                 Long existingLong = Long.parseLong(existingValue);
347                 Long newLong = Long.parseLong(newValue);
348                 return Long.toString(existingLong + newLong);
349             } catch (NumberFormatException e) {
350                 // not a long, skip to next
351             }
352             try {
353                 Double existingDouble = Double.parseDouble(existingValue);
354                 Double newDouble = Double.parseDouble(newValue);
355                 return Double.toString(existingDouble + newDouble);
356             } catch (NumberFormatException e) {
357                 // not a double either, fall through
358             }
359         }
360         // default to overriding existingValue
361         return newValue;
362     }
363 
364     /** Returns a user friendly string describing results. */
getTextSummary()365     public String getTextSummary() {
366         StringBuilder builder = new StringBuilder();
367         builder.append(String.format("Total tests %d, ", getNumTests()));
368         for (TestStatus status : TestStatus.values()) {
369             int count = getNumTestsInState(status);
370             // only add descriptive state for states that have non zero values, to avoid cluttering
371             // the response
372             if (count > 0) {
373                 builder.append(String.format("%s %d, ", status.toString().toLowerCase(), count));
374             }
375         }
376         return builder.toString();
377     }
378 
379     /**
380      * Information about a file being logged are stored and associated to the test case or test run
381      * in progress.
382      *
383      * @param dataName the name referencing the data.
384      * @param logFile The {@link LogFile} object representing where the object was saved and and
385      *     information about it.
386      */
testLogSaved(String dataName, LogFile logFile)387     public void testLogSaved(String dataName, LogFile logFile) {
388         if (mCurrentTestResult != null) {
389             // We have a test case in progress, we can associate the log to it.
390             mCurrentTestResult.addLoggedFile(dataName, logFile);
391         } else {
392             mRunLoggedFiles.put(dataName, logFile);
393         }
394     }
395 
396     /** Returns a copy of the map containing all the logged file associated with that test case. */
getRunLoggedFiles()397     public Map<String, LogFile> getRunLoggedFiles() {
398         return new LinkedHashMap<>(mRunLoggedFiles);
399     }
400 
401     /** @see #merge(List, MergeStrategy) */
merge(List<TestRunResult> testRunResults)402     public static TestRunResult merge(List<TestRunResult> testRunResults) {
403         return merge(testRunResults, MergeStrategy.ONE_TESTCASE_PASS_IS_PASS);
404     }
405 
406     /**
407      * Merge multiple TestRunResults of the same testRunName. If a testcase shows up in multiple
408      * TestRunResults but has different results (e.g. "boottest-device" runs three times with result
409      * FAIL-FAIL-PASS), we concatenate all the stack traces from the FAILED runs and trust the final
410      * run result for status, metrics, log files, start/end time.
411      *
412      * @param testRunResults A list of TestRunResult to merge.
413      * @param strategy the merging strategy adopted for merging results.
414      * @return the final TestRunResult containing the merged data from the testRunResults.
415      */
merge(List<TestRunResult> testRunResults, MergeStrategy strategy)416     public static TestRunResult merge(List<TestRunResult> testRunResults, MergeStrategy strategy) {
417         if (testRunResults.isEmpty()) {
418             return null;
419         }
420         if (MergeStrategy.NO_MERGE.equals(strategy)) {
421             throw new IllegalArgumentException(
422                     "TestRunResult#merge cannot be called with NO_MERGE strategy.");
423         }
424         if (testRunResults.size() == 1) {
425             // No merging is needed in case of a single test run result.
426             return testRunResults.get(0);
427         }
428         TestRunResult finalRunResult = new TestRunResult();
429 
430         String testRunName = testRunResults.get(0).getName();
431         Map<String, String> finalRunMetrics = new HashMap<>();
432         HashMap<String, Metric> finalRunProtoMetrics = new HashMap<>();
433         Map<String, LogFile> finalRunLoggedFiles = new HashMap<>();
434         Map<TestDescription, List<TestResult>> testResultsAttempts = new LinkedHashMap<>();
435 
436         // Keep track of if one of the run is not complete
437         boolean isAtLeastOneCompleted = false;
438         boolean areAllCompleted = true;
439         // Keep track of whether we have run failure or not
440         List<String> runErrors = new ArrayList<>();
441         boolean atLeastOneFailure = false;
442         boolean allFailure = true;
443         // Keep track of elapsed time
444         long elapsedTime = 0L;
445         int maxExpectedTestCount = 0;
446 
447         for (TestRunResult eachRunResult : testRunResults) {
448             // Check all mTestRunNames are the same.
449             if (!testRunName.equals(eachRunResult.getName())) {
450                 throw new IllegalArgumentException(
451                         String.format(
452                                 "Unabled to merge TestRunResults: The run results names are "
453                                         + "different (%s, %s)",
454                                 testRunName, eachRunResult.getName()));
455             }
456             elapsedTime += eachRunResult.getElapsedTime();
457             // Evaluate the run failures
458             if (eachRunResult.isRunFailure()) {
459                 atLeastOneFailure = true;
460                 runErrors.add(eachRunResult.getRunFailureMessage());
461             } else {
462                 allFailure = false;
463             }
464             // Evaluate the run completion
465             if (eachRunResult.isRunComplete()) {
466                 isAtLeastOneCompleted = true;
467             } else {
468                 areAllCompleted = false;
469             }
470 
471             // A run may start multiple times. Normally the first run shows the expected count
472             // (max value).
473             maxExpectedTestCount =
474                     Math.max(maxExpectedTestCount, eachRunResult.getExpectedTestCount());
475 
476             // Keep the last TestRunResult's RunMetrics, ProtoMetrics and logFiles.
477             // TODO: Currently we keep a single item when multiple TestRunResult have the same
478             // keys. In the future, we may want to improve this logic.
479             finalRunMetrics.putAll(eachRunResult.getRunMetrics());
480             finalRunProtoMetrics.putAll(eachRunResult.getRunProtoMetrics());
481             finalRunLoggedFiles.putAll(eachRunResult.getRunLoggedFiles());
482             // TODO: We are not handling the TestResult log files in the merging logic (different
483             // from the TestRunResult log files). Need to improve in the future.
484             for (Map.Entry<TestDescription, TestResult> testResultEntry :
485                     eachRunResult.getTestResults().entrySet()) {
486                 if (!testResultsAttempts.containsKey(testResultEntry.getKey())) {
487                     testResultsAttempts.put(testResultEntry.getKey(), new ArrayList<>());
488                 }
489                 List<TestResult> results = testResultsAttempts.get(testResultEntry.getKey());
490                 results.add(testResultEntry.getValue());
491             }
492         }
493 
494         // Evaluate test cases based on strategy
495         finalRunResult.mTestResults = evaluateTestCases(testResultsAttempts, strategy);
496         // Evaluate the run error status based on strategy
497         boolean isRunFailure = isRunFailed(atLeastOneFailure, allFailure, strategy);
498         if (isRunFailure) {
499             finalRunResult.mRunFailureError = Joiner.on("\n\n").join(runErrors);
500         }
501         // Evaluate run completion from all the attempts based on strategy
502         finalRunResult.mIsRunComplete =
503                 isRunComplete(isAtLeastOneCompleted, areAllCompleted, strategy);
504 
505         finalRunResult.mTestRunName = testRunName;
506         finalRunResult.mRunMetrics = finalRunMetrics;
507         finalRunResult.mRunProtoMetrics = finalRunProtoMetrics;
508         finalRunResult.mRunLoggedFiles = finalRunLoggedFiles;
509 
510         finalRunResult.mExpectedTestCount = maxExpectedTestCount;
511         // Report total elapsed times
512         finalRunResult.mElapsedTime = elapsedTime;
513         return finalRunResult;
514     }
515 
516     /** Merge the different test cases attempts based on the strategy. */
evaluateTestCases( Map<TestDescription, List<TestResult>> results, MergeStrategy strategy)517     private static Map<TestDescription, TestResult> evaluateTestCases(
518             Map<TestDescription, List<TestResult>> results, MergeStrategy strategy) {
519         Map<TestDescription, TestResult> finalTestResults = new LinkedHashMap<>();
520         for (TestDescription description : results.keySet()) {
521             List<TestResult> attemptRes = results.get(description);
522             TestResult aggResult = TestResult.merge(attemptRes, strategy);
523             finalTestResults.put(description, aggResult);
524         }
525         return finalTestResults;
526     }
527 
528     /** Decides whether or not considering an aggregation of runs a pass or fail. */
isRunFailed( boolean atLeastOneFailure, boolean allFailures, MergeStrategy strategy)529     private static boolean isRunFailed(
530             boolean atLeastOneFailure, boolean allFailures, MergeStrategy strategy) {
531         switch (strategy) {
532             case ANY_PASS_IS_PASS:
533             case ONE_TESTRUN_PASS_IS_PASS:
534                 return allFailures;
535             case ONE_TESTCASE_PASS_IS_PASS:
536             case ANY_FAIL_IS_FAIL:
537             default:
538                 return atLeastOneFailure;
539         }
540     }
541 
542     /** Decides whether or not considering an aggregation of runs completed or not. */
isRunComplete( boolean isAtLeastOneCompleted, boolean areAllCompleted, MergeStrategy strategy)543     private static boolean isRunComplete(
544             boolean isAtLeastOneCompleted, boolean areAllCompleted, MergeStrategy strategy) {
545         switch (strategy) {
546             case ANY_PASS_IS_PASS:
547             case ONE_TESTRUN_PASS_IS_PASS:
548                 return isAtLeastOneCompleted;
549             case ONE_TESTCASE_PASS_IS_PASS:
550             case ANY_FAIL_IS_FAIL:
551             default:
552                 return areAllCompleted;
553         }
554     }
555 }
556