• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Lint as: python2, python3
2# Copyright (c) 2017 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Utility class to parse the output of a gtest suite run."""
7
8import re
9
10
11class gtest_parser(object):
12    """This class knows how to understand GTest test output.
13
14    The code was borrowed with minor changes from chrome utility gtest_command.
15    """
16
17    def __init__(self):
18        # State tracking for log parsing
19        self._current_test = ''
20        self._failure_description = []
21        self._current_suppression_hash = ''
22        self._current_suppression = []
23
24        # Line number currently being processed.
25        self._line_number = 0
26
27        # List of parsing errors, as human-readable strings.
28        self.internal_error_lines = []
29
30        # Tests are stored here as 'test.name': (status, [description]).
31        # The status should be one of ('started', 'OK', 'failed', 'timeout').
32        # The description is a list of lines detailing the test's error, as
33        # reported in the log.
34        self._test_status = {}
35
36        # Suppressions are stored here as 'hash': [suppression].
37        self._suppressions = {}
38
39        # This may be either text or a number. It will be used in the phrase
40        # '%s disabled' or '%s flaky' on the waterfall display.
41        self.disabled_tests = 0
42        self.flaky_tests = 0
43
44        # Regular expressions for parsing GTest logs. Test names look like
45        #   SomeTestCase.SomeTest
46        #   SomeName/SomeTestCase.SomeTest/1
47        # This regexp also matches SomeName.SomeTest/1, which should be
48        # harmless.
49        test_name_regexp = r'((\w+/)?\w+\.\w+(\.\w+)?(/\d+)?)'
50        self._test_start = re.compile(r'\[\s+RUN\s+\] ' + test_name_regexp)
51        self._test_ok = re.compile(r'\[\s+OK\s+\] ' + test_name_regexp)
52        self._test_fail = re.compile(r'\[\s+FAILED\s+\] ' + test_name_regexp)
53        self._test_timeout = re.compile(
54                r'Test timeout \([0-9]+ ms\) exceeded for ' + test_name_regexp)
55        self._disabled = re.compile(r'  YOU HAVE (\d+) DISABLED TEST')
56        self._flaky = re.compile(r'  YOU HAVE (\d+) FLAKY TEST')
57
58        self._suppression_start = re.compile(
59                r'Suppression \(error hash=#([0-9A-F]+)#\):')
60        self._suppression_end = re.compile(r'^}\s*$')
61
62        # TODO b:169251326 terms below are set outside of this codebase
63        # and should be updated when possible. ("master" -> "main")
64        self._main_name_re = re.compile(
65                r'\[Running for master: "([^"]*)"')  # nocheck
66        self.main_name = ''
67
68        self._error_logging_start_re = re.compile('=' * 70)
69        self._error_logging_test_name_re = re.compile(
70            '[FAIL|ERROR]: ' + test_name_regexp)
71        self._error_logging_end_re = re.compile('-' * 70)
72        self._error_logging_first_dash_found = False
73
74    def _TestsByStatus(self, status, include_fails, include_flaky):
75        """Returns list of tests with the given status.
76
77        Args:
78            status: test results status to search for.
79            include_fails: If False, tests containing 'FAILS_' anywhere in
80                their names will be excluded from the list.
81            include_flaky: If False, tests containing 'FLAKY_' anywhere in
82                their names will be excluded from the list.
83        Returns:
84            List of tests with the status.
85        """
86        test_list = [x[0] for x in self._test_status.items()
87                     if self._StatusOfTest(x[0]) == status]
88
89        if not include_fails:
90            test_list = [x for x in test_list if x.find('FAILS_') == -1]
91        if not include_flaky:
92            test_list = [x for x in test_list if x.find('FLAKY_') == -1]
93
94        return test_list
95
96    def _StatusOfTest(self, test):
97        """Returns the status code for the given test, or 'not known'."""
98        test_status = self._test_status.get(test, ('not known', []))
99        return test_status[0]
100
101    def _RecordError(self, line, reason):
102        """Record a log line that produced a parsing error.
103
104        Args:
105            line: text of the line at which the error occurred.
106            reason: a string describing the error.
107        """
108        self.internal_error_lines.append("%s: %s [%s]" % (self._line_number,
109                                                          line.strip(),
110                                                          reason))
111
112    def TotalTests(self):
113        """Returns the number of parsed tests."""
114        return len(self._test_status)
115
116    def PassedTests(self):
117        """Returns list of tests that passed."""
118        return self._TestsByStatus('OK', False, False)
119
120    def FailedTests(self, include_fails=False, include_flaky=False):
121        """Returns list of tests that failed, timed out, or didn't finish.
122
123        This list will be incorrect until the complete log has been processed,
124        because it will show currently running tests as having failed.
125
126        Args:
127            include_fails: If true, all failing tests with FAILS_ in their
128                names will be included. Otherwise, they will only be included
129                if they crashed.
130            include_flaky: If true, all failing tests with FLAKY_ in their
131                names will be included. Otherwise, they will only be included
132                if they crashed.
133        Returns:
134            List of failed tests.
135        """
136        return (self._TestsByStatus('failed', include_fails, include_flaky) +
137                self._TestsByStatus('timeout', include_fails, include_flaky) +
138                self._TestsByStatus('started', include_fails, include_flaky))
139
140    def FailureDescription(self, test):
141        """Returns a list containing the failure description for the given test.
142
143        If the test didn't fail or timeout, returns [].
144        Args:
145            test: Name to test to find failure reason.
146        Returns:
147            List of test name, and failure string.
148        """
149        test_status = self._test_status.get(test, ('', []))
150        return test_status[1]
151
152    def SuppressionHashes(self):
153        """Returns list of suppression hashes found in the log."""
154        return list(self._suppressions.keys())
155
156    def Suppression(self, suppression_hash):
157        """Returns a list containing the suppression for a given hash.
158
159        If the suppression hash doesn't exist, returns [].
160
161        Args:
162            suppression_hash: name of hash.
163        Returns:
164            List of suppression for the hash.
165        """
166        return self._suppressions.get(suppression_hash, [])
167
168    def ProcessLogLine(self, line):
169        """This is called once with each line of the test log."""
170
171        # Track line number for error messages.
172        self._line_number += 1
173
174        if not self.main_name:
175            results = self._main_name_re.search(line)
176            if results:
177                self.main_name = results.group(1)
178
179        # Is it a line reporting disabled tests?
180        results = self._disabled.search(line)
181        if results:
182            try:
183                disabled = int(results.group(1))
184            except ValueError:
185                disabled = 0
186            if disabled > 0 and isinstance(self.disabled_tests, int):
187                self.disabled_tests += disabled
188            else:
189                # If we can't parse the line, at least give a heads-up. This is
190                # a safety net for a case that shouldn't happen but isn't a
191                # fatal error.
192                self.disabled_tests = 'some'
193            return
194
195        # Is it a line reporting flaky tests?
196        results = self._flaky.search(line)
197        if results:
198            try:
199                flaky = int(results.group(1))
200            except ValueError:
201                flaky = 0
202            if flaky > 0 and isinstance(self.flaky_tests, int):
203                self.flaky_tests = flaky
204            else:
205                # If we can't parse the line, at least give a heads-up. This is
206                # a safety net for a case that shouldn't happen but isn't a
207                # fatal error.
208                self.flaky_tests = 'some'
209            return
210
211        # Is it the start of a test?
212        results = self._test_start.search(line)
213        if results:
214            test_name = results.group(1)
215            if test_name in self._test_status:
216                self._RecordError(line, 'test started more than once')
217                return
218            if self._current_test:
219                status = self._StatusOfTest(self._current_test)
220                if status in ('OK', 'failed', 'timeout'):
221                    self._RecordError(line, 'start while in status %s' % status)
222                    return
223                if status not in ('failed', 'timeout'):
224                    self._test_status[self._current_test] = (
225                        'failed', self._failure_description)
226            self._test_status[test_name] = ('started', ['Did not complete.'])
227            self._current_test = test_name
228            self._failure_description = []
229            return
230
231        # Is it a test success line?
232        results = self._test_ok.search(line)
233        if results:
234            test_name = results.group(1)
235            status = self._StatusOfTest(test_name)
236            if status != 'started':
237                self._RecordError(line, 'success while in status %s' % status)
238                return
239            self._test_status[test_name] = ('OK', [])
240            self._failure_description = []
241            self._current_test = ''
242            return
243
244        # Is it a test failure line?
245        results = self._test_fail.search(line)
246        if results:
247            test_name = results.group(1)
248            status = self._StatusOfTest(test_name)
249            if status not in ('started', 'failed', 'timeout'):
250                self._RecordError(line, 'failure while in status %s' % status)
251                return
252            # Don't overwrite the failure description when a failing test is
253            # listed a second time in the summary, or if it was already
254            # recorded as timing out.
255            if status not in ('failed', 'timeout'):
256                self._test_status[test_name] = ('failed',
257                                                self._failure_description)
258            self._failure_description = []
259            self._current_test = ''
260            return
261
262        # Is it a test timeout line?
263        results = self._test_timeout.search(line)
264        if results:
265            test_name = results.group(1)
266            status = self._StatusOfTest(test_name)
267            if status not in ('started', 'failed'):
268                self._RecordError(line, 'timeout while in status %s' % status)
269                return
270            self._test_status[test_name] = (
271                'timeout', self._failure_description + ['Killed (timed out).'])
272            self._failure_description = []
273            self._current_test = ''
274            return
275
276        # Is it the start of a new valgrind suppression?
277        results = self._suppression_start.search(line)
278        if results:
279            suppression_hash = results.group(1)
280            if suppression_hash in self._suppressions:
281                self._RecordError(line, 'suppression reported more than once')
282                return
283            self._suppressions[suppression_hash] = []
284            self._current_suppression_hash = suppression_hash
285            self._current_suppression = [line]
286            return
287
288        # Is it the end of a valgrind suppression?
289        results = self._suppression_end.search(line)
290        if results and self._current_suppression_hash:
291            self._current_suppression.append(line)
292            self._suppressions[self._current_suppression_hash] = (
293                self._current_suppression)
294            self._current_suppression_hash = ''
295            self._current_suppression = []
296            return
297
298        # Is it the start of a test summary error message?
299        results = self._error_logging_test_name_re.search(line)
300        if results:
301            test_name = results.group(1)
302            self._test_status[test_name] = ('failed', ['Output not found.'])
303            self._current_test = test_name
304            self._failure_description = []
305            self._error_logging_first_dash_found = False
306            return
307
308        # Is it the start of the next test summary signaling the end
309        # of the previous message?
310        results = self._error_logging_start_re.search(line)
311        if results and self._current_test:
312            self._test_status[self._current_test] = ('failed',
313                                                     self._failure_description)
314            self._failure_description = []
315            self._current_test = ''
316            return
317
318        # Is it the end of the extra test failure summaries?
319        results = self._error_logging_end_re.search(line)
320        if results and self._current_test:
321            if self._error_logging_first_dash_found:
322                self._test_status[self._current_test] = (
323                    'failed', self._failure_description)
324                self._failure_description = []
325                self._current_test = ''
326            self._error_logging_first_dash_found = True
327            return
328
329        # Random line: if we're in a suppression, collect it. Suppressions are
330        # generated after all tests are finished, so this should always belong
331        # to the current suppression hash.
332        if self._current_suppression_hash:
333            self._current_suppression.append(line)
334            return
335
336        # Random line: if we're in a test, collect it for the failure
337        # description. Tests may run simultaneously, so this might be off, but
338        # it's worth a try.
339        if self._current_test:
340            self._failure_description.append(line)
341