• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2018 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Common file shared by test_push of autotest and skylab.
6
7autotest: site_utils/test_push.py
8skylab: venv/skylab_staging/test_push.py
9"""
10
11import collections
12import re
13
14# Dictionary of test results keyed by test name regular expression.
15EXPECTED_TEST_RESULTS = {'^SERVER_JOB$':                 ['GOOD'],
16                         # This is related to dummy_Fail/control.dependency.
17                         'dummy_Fail.dependency$':       ['TEST_NA'],
18                         'login_LoginSuccess.*':         ['GOOD'],
19                         'provision_AutoUpdate.double':  ['GOOD'],
20                         'dummy_Pass.*':                 ['GOOD'],
21                         'dummy_Fail.Fail$':             ['FAIL'],
22                         'dummy_Fail.Error$':            ['ERROR'],
23                         'dummy_Fail.Warn$':             ['WARN'],
24                         'dummy_Fail.NAError$':          ['TEST_NA'],
25                         'dummy_Fail.Crash$':            ['GOOD'],
26                         'autotest_SyncCount$':          ['GOOD'],
27                         }
28
29EXPECTED_TEST_RESULTS_DUMMY = {'^SERVER_JOB$':       ['GOOD'],
30                               'dummy_Pass.*':       ['GOOD'],
31                               'dummy_Fail.Fail':    ['FAIL'],
32                               'dummy_Fail.Warn':    ['WARN'],
33                               'dummy_Fail.Crash':   ['GOOD'],
34                               'dummy_Fail.Error':   ['ERROR'],
35                               'dummy_Fail.NAError': ['TEST_NA'],
36                               }
37
38EXPECTED_TEST_RESULTS_POWERWASH = {'platform_Powerwash': ['GOOD'],
39                                   'SERVER_JOB':         ['GOOD'],
40                                   }
41
42_TestPushErrors = collections.namedtuple(
43        '_TestPushErrors',
44        [
45                'mismatch_errors',
46                'unknown_tests',
47                'missing_tests',
48        ]
49)
50
51
52def summarize_push(test_views, expected_results, ignored_tests=[]):
53    """Summarize the test push errors."""
54    test_push_errors = _match_test_results(test_views, expected_results,
55                                           ignored_tests)
56    return _generate_push_summary(test_push_errors)
57
58
59def _match_test_results(test_views, expected_results, ignored_tests):
60    """Match test results with expected results.
61
62    @param test_views: A defaultdict where keys are test names and values are
63                       lists of test statuses, e.g.,
64                       {'dummy_Fail.Error': ['ERROR', 'ERROR],
65                        'dummy_Fail.NAError': ['TEST_NA'],
66                        'dummy_Fail.RetrySuccess': ['ERROR', 'GOOD'],
67                        }
68    @param expected_results: A dictionary of test name to expected test result.
69                             Has the same format as test_views.
70    @param ignored_tests: A list of test name patterns. Any mismatch between
71                          test results and expected test results that matches
72                          one these patterns is ignored.
73
74    @return: A _TestPushErrors tuple.
75    """
76    mismatch_errors = []
77    unknown_tests = []
78    found_keys = set()
79    for test_name, test_status_list in test_views.iteritems():
80        test_found = False
81        for test_name_pattern, expected_result in expected_results.items():
82            if re.search(test_name_pattern, test_name):
83                test_found = True
84                found_keys.add(test_name_pattern)
85                if (sorted(expected_result) != sorted(test_status_list) and
86                    _is_significant(test_name, ignored_tests)):
87                    error = ('%s Expected: %s, Actual: %s' %
88                             (test_name, expected_result, test_status_list))
89                    mismatch_errors.append(error)
90
91        if not test_found and _is_significant(test_name, ignored_tests):
92            unknown_tests.append(test_name)
93
94    missing_tests = set(expected_results.keys()) - found_keys
95    missing_tests = [t for t in missing_tests
96                     if _is_significant(t, ignored_tests)]
97    return _TestPushErrors(mismatch_errors=mismatch_errors,
98                           unknown_tests=unknown_tests,
99                           missing_tests=missing_tests)
100
101
102def _is_significant(test, ignored_tests_patterns):
103    return all([test not in m for m in ignored_tests_patterns])
104
105
106def _generate_push_summary(test_push_errors):
107    """Generate a list of summary based on the test_push results."""
108    summary = []
109    if test_push_errors.mismatch_errors:
110        summary.append(('Results of %d test(s) do not match expected '
111                        'values:') % len(test_push_errors.mismatch_errors))
112        summary.extend(test_push_errors.mismatch_errors)
113        summary.append('\n')
114
115    if test_push_errors.unknown_tests:
116        summary.append('%d test(s) are not expected to be run:' %
117                       len(test_push_errors.unknown_tests))
118        summary.extend(test_push_errors.unknown_tests)
119        summary.append('\n')
120
121    if test_push_errors.missing_tests:
122        summary.append('%d test(s) are missing from the results:' %
123                       len(test_push_errors.missing_tests))
124        summary.extend(test_push_errors.missing_tests)
125        summary.append('\n')
126
127    return summary
128