• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/python -u
2#
3# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7# Site extension of the default parser. Generate JSON reports and stack traces.
8#
9# This site parser is used to generate a JSON report of test failures, crashes,
10# and the associated logs for later consumption by an Email generator. If any
11# crashes are found, the debug symbols for the build are retrieved (either from
12# Google Storage or local cache) and core dumps are symbolized.
13#
14# The parser uses the test report generator which comes bundled with the Chrome
15# OS source tree in order to maintain consistency. As well as not having to keep
16# track of any secondary failure white lists.
17#
18# Stack trace generation is done by the minidump_stackwalk utility which is also
19# bundled with the Chrome OS source tree. Requires gsutil and cros_sdk utilties
20# be present in the path.
21#
22# The path to the Chrome OS source tree is defined in global_config under the
23# CROS section as 'source_tree'.
24#
25# Existing parse behavior is kept completely intact. If the site parser is not
26# configured it will print a debug message and exit after default parser is
27# called.
28#
29
30import errno, os, json, shutil, sys, tempfile, time
31
32import common
33from autotest_lib.client.bin import os_dep, utils
34from autotest_lib.client.common_lib import global_config
35from autotest_lib.tko import models, parse, utils as tko_utils
36from autotest_lib.tko.parsers import version_0
37
38
39# Name of the report file to produce upon completion.
40_JSON_REPORT_FILE = 'results.json'
41
42# Number of log lines to include from error log with each test results.
43_ERROR_LOG_LIMIT = 10
44
45# Status information is generally more useful than error log, so provide a lot.
46_STATUS_LOG_LIMIT = 50
47
48
49class StackTrace(object):
50    """Handles all stack trace generation related duties. See generate()."""
51
52    # Cache dir relative to chroot.
53    _CACHE_DIR = 'tmp/symbol-cache'
54
55    # Flag file indicating symbols have completed processing. One is created in
56    # each new symbols directory.
57    _COMPLETE_FILE = '.completed'
58
59    # Maximum cache age in days; all older cache entries will be deleted.
60    _MAX_CACHE_AGE_DAYS = 1
61
62    # Directory inside of tarball under which the actual symbols are stored.
63    _SYMBOL_DIR = 'debug/breakpad'
64
65    # Maximum time to wait for another instance to finish processing symbols.
66    _SYMBOL_WAIT_TIMEOUT = 10 * 60
67
68
69    def __init__(self, results_dir, cros_src_dir):
70        """Initializes class variables.
71
72        Args:
73            results_dir: Full path to the results directory to process.
74            cros_src_dir: Full path to Chrome OS source tree. Must have a
75                working chroot.
76        """
77        self._results_dir = results_dir
78        self._cros_src_dir = cros_src_dir
79        self._chroot_dir = os.path.join(self._cros_src_dir, 'chroot')
80
81
82    def _get_cache_dir(self):
83        """Returns a path to the local cache dir, creating if nonexistent.
84
85        Symbol cache is kept inside the chroot so we don't have to mount it into
86        chroot for symbol generation each time.
87
88        Returns:
89            A path to the local cache dir.
90        """
91        cache_dir = os.path.join(self._chroot_dir, self._CACHE_DIR)
92        if not os.path.exists(cache_dir):
93            try:
94                os.makedirs(cache_dir)
95            except OSError, e:
96                if e.errno != errno.EEXIST:
97                    raise
98        return cache_dir
99
100
101    def _get_job_name(self):
102        """Returns job name read from 'label' keyval in the results dir.
103
104        Returns:
105            Job name string.
106        """
107        return models.job.read_keyval(self._results_dir).get('label')
108
109
110    def _parse_job_name(self, job_name):
111        """Returns a tuple of (board, rev, version) parsed from the job name.
112
113        Handles job names of the form "<board-rev>-<version>...",
114        "<board-rev>-<rev>-<version>...", and
115        "<board-rev>-<rev>-<version_0>_to_<version>..."
116
117        Args:
118            job_name: A job name of the format detailed above.
119
120        Returns:
121            A tuple of (board, rev, version) parsed from the job name.
122        """
123        version = job_name.rsplit('-', 3)[1].split('_')[-1]
124        arch, board, rev = job_name.split('-', 3)[:3]
125        return '-'.join([arch, board]), rev, version
126
127
128def parse_reason(path):
129    """Process status.log or status and return a test-name: reason dict."""
130    status_log = os.path.join(path, 'status.log')
131    if not os.path.exists(status_log):
132        status_log = os.path.join(path, 'status')
133    if not os.path.exists(status_log):
134        return
135
136    reasons = {}
137    last_test = None
138    for line in open(status_log).readlines():
139        try:
140            # Since we just want the status line parser, it's okay to use the
141            # version_0 parser directly; all other parsers extend it.
142            status = version_0.status_line.parse_line(line)
143        except:
144            status = None
145
146        # Assemble multi-line reasons into a single reason.
147        if not status and last_test:
148            reasons[last_test] += line
149
150        # Skip non-lines, empty lines, and successful tests.
151        if not status or not status.reason.strip() or status.status == 'GOOD':
152            continue
153
154        # Update last_test name, so we know which reason to append multi-line
155        # reasons to.
156        last_test = status.testname
157        reasons[last_test] = status.reason
158
159    return reasons
160
161
162def main():
163    # Call the original parser.
164    parse.main()
165
166    # Results directory should be the last argument passed in.
167    results_dir = sys.argv[-1]
168
169    # Load the Chrome OS source tree location.
170    cros_src_dir = global_config.global_config.get_config_value(
171        'CROS', 'source_tree', default='')
172
173    # We want the standard Autotest parser to keep working even if we haven't
174    # been setup properly.
175    if not cros_src_dir:
176        tko_utils.dprint(
177            'Unable to load required components for site parser. Falling back'
178            ' to default parser.')
179        return
180
181    # Load ResultCollector from the Chrome OS source tree.
182    sys.path.append(os.path.join(
183        cros_src_dir, 'src/platform/crostestutils/utils_py'))
184    from generate_test_report import ResultCollector
185
186    # Collect results using the standard Chrome OS test report generator. Doing
187    # so allows us to use the same crash white list and reporting standards the
188    # VM based test instances use.
189    # TODO(scottz): Reevaluate this code usage. crosbug.com/35282
190    results = ResultCollector().RecursivelyCollectResults(results_dir)
191    # We don't care about successful tests. We only want failed or crashing.
192    # Note: list([]) generates a copy of the dictionary, so it's safe to delete.
193    for test_status in list(results):
194        if test_status['crashes']:
195            continue
196        elif test_status['status'] == 'PASS':
197            results.remove(test_status)
198
199    # Filter results and collect logs. If we can't find a log for the test, skip
200    # it. The Emailer will fill in the blanks using Database data later.
201    filtered_results = {}
202    for test_dict in results:
203        result_log = ''
204        test_name = os.path.basename(test_dict['testdir'])
205        error = os.path.join(
206                test_dict['testdir'], 'debug', '%s.ERROR' % test_name)
207
208        # If the error log doesn't exist, we don't care about this test.
209        if not os.path.isfile(error):
210            continue
211
212        # Parse failure reason for this test.
213        for t, r in parse_reason(test_dict['testdir']).iteritems():
214            # Server tests may have subtests which will each have their own
215            # reason, so display the test name for the subtest in that case.
216            if t != test_name:
217                result_log += '%s: ' % t
218            result_log += '%s\n\n' % r.strip()
219
220        # Trim results_log to last _STATUS_LOG_LIMIT lines.
221        short_result_log = '\n'.join(
222            result_log.splitlines()[-1 * _STATUS_LOG_LIMIT:]).strip()
223
224        # Let the reader know we've trimmed the log.
225        if short_result_log != result_log.strip():
226            short_result_log = (
227                '[...displaying only the last %d status log lines...]\n%s' % (
228                    _STATUS_LOG_LIMIT, short_result_log))
229
230        # Pull out only the last _LOG_LIMIT lines of the file.
231        short_log = utils.system_output('tail -n %d %s' % (
232            _ERROR_LOG_LIMIT, error))
233
234        # Let the reader know we've trimmed the log.
235        if len(short_log.splitlines()) == _ERROR_LOG_LIMIT:
236            short_log = (
237                '[...displaying only the last %d error log lines...]\n%s' % (
238                    _ERROR_LOG_LIMIT, short_log))
239
240        filtered_results[test_name] = test_dict
241        filtered_results[test_name]['log'] = '%s\n\n%s' % (
242            short_result_log, short_log)
243
244    # Generate JSON dump of results. Store in results dir.
245    json_file = open(os.path.join(results_dir, _JSON_REPORT_FILE), 'w')
246    json.dump(filtered_results, json_file)
247    json_file.close()
248
249
250if __name__ == '__main__':
251    main()
252