• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/python -u
2#
3# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7# Site extension of the default parser. Generate JSON reports and stack traces.
8#
9# This site parser is used to generate a JSON report of test failures, crashes,
10# and the associated logs for later consumption by an Email generator. If any
11# crashes are found, the debug symbols for the build are retrieved (either from
12# Google Storage or local cache) and core dumps are symbolized.
13#
14# The parser uses the test report generator which comes bundled with the Chrome
15# OS source tree in order to maintain consistency. As well as not having to keep
16# track of any secondary failure white lists.
17#
18# Stack trace generation is done by the minidump_stackwalk utility which is also
19# bundled with the Chrome OS source tree. Requires gsutil and cros_sdk utilties
20# be present in the path.
21#
22# The path to the Chrome OS source tree is defined in global_config under the
23# CROS section as 'source_tree'.
24#
25# Existing parse behavior is kept completely intact. If the site parser is not
26# configured it will print a debug message and exit after default parser is
27# called.
28#
29
30import errno
31import json
32import os
33import sys
34
35import common
36from autotest_lib.client.bin import utils
37from autotest_lib.client.common_lib import global_config
38from autotest_lib.tko import models
39from autotest_lib.tko import parse
40from autotest_lib.tko import utils as tko_utils
41from autotest_lib.tko.parsers import version_0
42
43
44# Name of the report file to produce upon completion.
45_JSON_REPORT_FILE = 'results.json'
46
47# Number of log lines to include from error log with each test results.
48_ERROR_LOG_LIMIT = 10
49
50# Status information is generally more useful than error log, so provide a lot.
51_STATUS_LOG_LIMIT = 50
52
53
54class StackTrace(object):
55    """Handles all stack trace generation related duties. See generate()."""
56
57    # Cache dir relative to chroot.
58    _CACHE_DIR = 'tmp/symbol-cache'
59
60    # Flag file indicating symbols have completed processing. One is created in
61    # each new symbols directory.
62    _COMPLETE_FILE = '.completed'
63
64    # Maximum cache age in days; all older cache entries will be deleted.
65    _MAX_CACHE_AGE_DAYS = 1
66
67    # Directory inside of tarball under which the actual symbols are stored.
68    _SYMBOL_DIR = 'debug/breakpad'
69
70    # Maximum time to wait for another instance to finish processing symbols.
71    _SYMBOL_WAIT_TIMEOUT = 10 * 60
72
73
74    def __init__(self, results_dir, cros_src_dir):
75        """Initializes class variables.
76
77        Args:
78            results_dir: Full path to the results directory to process.
79            cros_src_dir: Full path to Chrome OS source tree. Must have a
80                working chroot.
81        """
82        self._results_dir = results_dir
83        self._cros_src_dir = cros_src_dir
84        self._chroot_dir = os.path.join(self._cros_src_dir, 'chroot')
85
86
87    def _get_cache_dir(self):
88        """Returns a path to the local cache dir, creating if nonexistent.
89
90        Symbol cache is kept inside the chroot so we don't have to mount it into
91        chroot for symbol generation each time.
92
93        Returns:
94            A path to the local cache dir.
95        """
96        cache_dir = os.path.join(self._chroot_dir, self._CACHE_DIR)
97        if not os.path.exists(cache_dir):
98            try:
99                os.makedirs(cache_dir)
100            except OSError, e:
101                if e.errno != errno.EEXIST:
102                    raise
103        return cache_dir
104
105
106    def _get_job_name(self):
107        """Returns job name read from 'label' keyval in the results dir.
108
109        Returns:
110            Job name string.
111        """
112        return models.job.read_keyval(self._results_dir).get('label')
113
114
115    def _parse_job_name(self, job_name):
116        """Returns a tuple of (board, rev, version) parsed from the job name.
117
118        Handles job names of the form "<board-rev>-<version>...",
119        "<board-rev>-<rev>-<version>...", and
120        "<board-rev>-<rev>-<version_0>_to_<version>..."
121
122        Args:
123            job_name: A job name of the format detailed above.
124
125        Returns:
126            A tuple of (board, rev, version) parsed from the job name.
127        """
128        version = job_name.rsplit('-', 3)[1].split('_')[-1]
129        arch, board, rev = job_name.split('-', 3)[:3]
130        return '-'.join([arch, board]), rev, version
131
132
133def parse_reason(path):
134    """Process status.log or status and return a test-name: reason dict."""
135    status_log = os.path.join(path, 'status.log')
136    if not os.path.exists(status_log):
137        status_log = os.path.join(path, 'status')
138    if not os.path.exists(status_log):
139        return
140
141    reasons = {}
142    last_test = None
143    for line in open(status_log).readlines():
144        try:
145            # Since we just want the status line parser, it's okay to use the
146            # version_0 parser directly; all other parsers extend it.
147            status = version_0.status_line.parse_line(line)
148        except:
149            status = None
150
151        # Assemble multi-line reasons into a single reason.
152        if not status and last_test:
153            reasons[last_test] += line
154
155        # Skip non-lines, empty lines, and successful tests.
156        if not status or not status.reason.strip() or status.status == 'GOOD':
157            continue
158
159        # Update last_test name, so we know which reason to append multi-line
160        # reasons to.
161        last_test = status.testname
162        reasons[last_test] = status.reason
163
164    return reasons
165
166
167def main():
168    # Call the original parser.
169    parse.main()
170
171    # Results directory should be the last argument passed in.
172    results_dir = sys.argv[-1]
173
174    # Load the Chrome OS source tree location.
175    cros_src_dir = global_config.global_config.get_config_value(
176        'CROS', 'source_tree', default='')
177
178    # We want the standard Autotest parser to keep working even if we haven't
179    # been setup properly.
180    if not cros_src_dir:
181        tko_utils.dprint(
182            'Unable to load required components for site parser. Falling back'
183            ' to default parser.')
184        return
185
186    # Load ResultCollector from the Chrome OS source tree.
187    sys.path.append(os.path.join(
188        cros_src_dir, 'src/platform/crostestutils/utils_py'))
189    from generate_test_report import ResultCollector
190
191    # Collect results using the standard Chrome OS test report generator. Doing
192    # so allows us to use the same crash white list and reporting standards the
193    # VM based test instances use.
194    # TODO(scottz): Reevaluate this code usage. crosbug.com/35282
195    results = ResultCollector().RecursivelyCollectResults(results_dir)
196    # We don't care about successful tests. We only want failed or crashing.
197    # Note: list([]) generates a copy of the dictionary, so it's safe to delete.
198    for test_status in list(results):
199        if test_status['crashes']:
200            continue
201        elif test_status['status'] == 'PASS':
202            results.remove(test_status)
203
204    # Filter results and collect logs. If we can't find a log for the test, skip
205    # it. The Emailer will fill in the blanks using Database data later.
206    filtered_results = {}
207    for test_dict in results:
208        result_log = ''
209        test_name = os.path.basename(test_dict['testdir'])
210        error = os.path.join(
211                test_dict['testdir'], 'debug', '%s.ERROR' % test_name)
212
213        # If the error log doesn't exist, we don't care about this test.
214        if not os.path.isfile(error):
215            continue
216
217        # Parse failure reason for this test.
218        for t, r in parse_reason(test_dict['testdir']).iteritems():
219            # Server tests may have subtests which will each have their own
220            # reason, so display the test name for the subtest in that case.
221            if t != test_name:
222                result_log += '%s: ' % t
223            result_log += '%s\n\n' % r.strip()
224
225        # Trim results_log to last _STATUS_LOG_LIMIT lines.
226        short_result_log = '\n'.join(
227            result_log.splitlines()[-1 * _STATUS_LOG_LIMIT:]).strip()
228
229        # Let the reader know we've trimmed the log.
230        if short_result_log != result_log.strip():
231            short_result_log = (
232                '[...displaying only the last %d status log lines...]\n%s' % (
233                    _STATUS_LOG_LIMIT, short_result_log))
234
235        # Pull out only the last _LOG_LIMIT lines of the file.
236        short_log = utils.system_output('tail -n %d %s' % (
237            _ERROR_LOG_LIMIT, error))
238
239        # Let the reader know we've trimmed the log.
240        if len(short_log.splitlines()) == _ERROR_LOG_LIMIT:
241            short_log = (
242                '[...displaying only the last %d error log lines...]\n%s' % (
243                    _ERROR_LOG_LIMIT, short_log))
244
245        filtered_results[test_name] = test_dict
246        filtered_results[test_name]['log'] = '%s\n\n%s' % (
247            short_result_log, short_log)
248
249    # Generate JSON dump of results. Store in results dir.
250    json_file = open(os.path.join(results_dir, _JSON_REPORT_FILE), 'w')
251    json.dump(filtered_results, json_file)
252    json_file.close()
253
254
255if __name__ == '__main__':
256    main()
257