1#!/usr/bin/python3 -u 2# 3# Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 4# Use of this source code is governed by a BSD-style license that can be 5# found in the LICENSE file. 6# 7# Site extension of the default parser. Generate JSON reports and stack traces. 8# 9# This site parser is used to generate a JSON report of test failures, crashes, 10# and the associated logs for later consumption by an Email generator. If any 11# crashes are found, the debug symbols for the build are retrieved (either from 12# Google Storage or local cache) and core dumps are symbolized. 13# 14# The parser uses the test report generator which comes bundled with the Chrome 15# OS source tree in order to maintain consistency. As well as not having to keep 16# track of any secondary failure allow lists. 17# 18# Stack trace generation is done by the minidump_stackwalk utility which is also 19# bundled with the ChromeOS source tree. Requires gsutil and cros_sdk utilties 20# be present in the path. 21# 22# The path to the ChromeOS source tree is defined in global_config under the 23# CROS section as 'source_tree'. 24# 25# Existing parse behavior is kept completely intact. If the site parser is not 26# configured it will print a debug message and exit after default parser is 27# called. 28# 29 30from __future__ import absolute_import 31from __future__ import division 32from __future__ import print_function 33 34import errno 35import json 36import os 37import sys 38 39import common 40from autotest_lib.client.bin import utils 41from autotest_lib.client.common_lib import global_config 42from autotest_lib.tko import models 43from autotest_lib.tko import parse 44from autotest_lib.tko import utils as tko_utils 45from autotest_lib.tko.parsers import version_0 46import six 47 48 49# Name of the report file to produce upon completion. 50_JSON_REPORT_FILE = 'results.json' 51 52# Number of log lines to include from error log with each test results. 53_ERROR_LOG_LIMIT = 10 54 55# Status information is generally more useful than error log, so provide a lot. 56_STATUS_LOG_LIMIT = 50 57 58 59class StackTrace(object): 60 """Handles all stack trace generation related duties. See generate().""" 61 62 # Cache dir relative to chroot. 63 _CACHE_DIR = 'tmp/symbol-cache' 64 65 # Flag file indicating symbols have completed processing. One is created in 66 # each new symbols directory. 67 _COMPLETE_FILE = '.completed' 68 69 # Maximum cache age in days; all older cache entries will be deleted. 70 _MAX_CACHE_AGE_DAYS = 1 71 72 # Directory inside of tarball under which the actual symbols are stored. 73 _SYMBOL_DIR = 'debug/breakpad' 74 75 # Maximum time to wait for another instance to finish processing symbols. 76 _SYMBOL_WAIT_TIMEOUT = 10 * 60 77 78 79 def __init__(self, results_dir, cros_src_dir): 80 """Initializes class variables. 81 82 Args: 83 results_dir: Full path to the results directory to process. 84 cros_src_dir: Full path to ChromeOS source tree. Must have a 85 working chroot. 86 """ 87 self._results_dir = results_dir 88 self._cros_src_dir = cros_src_dir 89 self._chroot_dir = os.path.join(self._cros_src_dir, 'chroot') 90 91 92 def _get_cache_dir(self): 93 """Returns a path to the local cache dir, creating if nonexistent. 94 95 Symbol cache is kept inside the chroot so we don't have to mount it into 96 chroot for symbol generation each time. 97 98 Returns: 99 A path to the local cache dir. 100 """ 101 cache_dir = os.path.join(self._chroot_dir, self._CACHE_DIR) 102 if not os.path.exists(cache_dir): 103 try: 104 os.makedirs(cache_dir) 105 except OSError as e: 106 if e.errno != errno.EEXIST: 107 raise 108 return cache_dir 109 110 111 def _get_job_name(self): 112 """Returns job name read from 'label' keyval in the results dir. 113 114 Returns: 115 Job name string. 116 """ 117 return models.job.read_keyval(self._results_dir).get('label') 118 119 120 def _parse_job_name(self, job_name): 121 """Returns a tuple of (board, rev, version) parsed from the job name. 122 123 Handles job names of the form "<board-rev>-<version>...", 124 "<board-rev>-<rev>-<version>...", and 125 "<board-rev>-<rev>-<version_0>_to_<version>..." 126 127 Args: 128 job_name: A job name of the format detailed above. 129 130 Returns: 131 A tuple of (board, rev, version) parsed from the job name. 132 """ 133 version = job_name.rsplit('-', 3)[1].split('_')[-1] 134 arch, board, rev = job_name.split('-', 3)[:3] 135 return '-'.join([arch, board]), rev, version 136 137 138def parse_reason(path): 139 """Process status.log or status and return a test-name: reason dict.""" 140 status_log = os.path.join(path, 'status.log') 141 if not os.path.exists(status_log): 142 status_log = os.path.join(path, 'status') 143 if not os.path.exists(status_log): 144 return 145 146 reasons = {} 147 last_test = None 148 for line in open(status_log).readlines(): 149 try: 150 # Since we just want the status line parser, it's okay to use the 151 # version_0 parser directly; all other parsers extend it. 152 status = version_0.status_line.parse_line(line) 153 except: 154 status = None 155 156 # Assemble multi-line reasons into a single reason. 157 if not status and last_test: 158 reasons[last_test] += line 159 160 # Skip non-lines, empty lines, and successful tests. 161 if not status or not status.reason.strip() or status.status == 'GOOD': 162 continue 163 164 # Update last_test name, so we know which reason to append multi-line 165 # reasons to. 166 last_test = status.testname 167 reasons[last_test] = status.reason 168 169 return reasons 170 171 172def main(): 173 # Call the original parser. 174 parse.main() 175 176 # Results directory should be the last argument passed in. 177 results_dir = sys.argv[-1] 178 179 # Load the ChromeOS source tree location. 180 cros_src_dir = global_config.global_config.get_config_value( 181 'CROS', 'source_tree', default='') 182 183 # We want the standard Autotest parser to keep working even if we haven't 184 # been setup properly. 185 if not cros_src_dir: 186 tko_utils.dprint( 187 'Unable to load required components for site parser. Falling back' 188 ' to default parser.') 189 return 190 191 # Load ResultCollector from the ChromeOS source tree. 192 sys.path.append(os.path.join( 193 cros_src_dir, 'src/platform/crostestutils/utils_py')) 194 from generate_test_report import ResultCollector 195 196 # Collect results using the standard ChromeOS test report generator. Doing 197 # so allows us to use the same crash allow list and reporting standards the 198 # VM based test instances use. 199 # TODO(scottz): Reevaluate this code usage. crosbug.com/35282 200 results = ResultCollector().RecursivelyCollectResults(results_dir) 201 # We don't care about successful tests. We only want failed or crashing. 202 # Note: list([]) generates a copy of the dictionary, so it's safe to delete. 203 for test_status in list(results): 204 if test_status['crashes']: 205 continue 206 elif test_status['status'] == 'PASS': 207 results.remove(test_status) 208 209 # Filter results and collect logs. If we can't find a log for the test, skip 210 # it. The Emailer will fill in the blanks using Database data later. 211 filtered_results = {} 212 for test_dict in results: 213 result_log = '' 214 test_name = os.path.basename(test_dict['testdir']) 215 error = os.path.join( 216 test_dict['testdir'], 'debug', '%s.ERROR' % test_name) 217 218 # If the error log doesn't exist, we don't care about this test. 219 if not os.path.isfile(error): 220 continue 221 222 # Parse failure reason for this test. 223 for t, r in six.iteritems(parse_reason(test_dict['testdir'])): 224 # Server tests may have subtests which will each have their own 225 # reason, so display the test name for the subtest in that case. 226 if t != test_name: 227 result_log += '%s: ' % t 228 result_log += '%s\n\n' % r.strip() 229 230 # Trim results_log to last _STATUS_LOG_LIMIT lines. 231 short_result_log = '\n'.join( 232 result_log.splitlines()[-1 * _STATUS_LOG_LIMIT:]).strip() 233 234 # Let the reader know we've trimmed the log. 235 if short_result_log != result_log.strip(): 236 short_result_log = ( 237 '[...displaying only the last %d status log lines...]\n%s' % ( 238 _STATUS_LOG_LIMIT, short_result_log)) 239 240 # Pull out only the last _LOG_LIMIT lines of the file. 241 short_log = utils.system_output('tail -n %d %s' % ( 242 _ERROR_LOG_LIMIT, error)) 243 244 # Let the reader know we've trimmed the log. 245 if len(short_log.splitlines()) == _ERROR_LOG_LIMIT: 246 short_log = ( 247 '[...displaying only the last %d error log lines...]\n%s' % ( 248 _ERROR_LOG_LIMIT, short_log)) 249 250 filtered_results[test_name] = test_dict 251 filtered_results[test_name]['log'] = '%s\n\n%s' % ( 252 short_result_log, short_log) 253 254 # Generate JSON dump of results. Store in results dir. 255 json_file = open(os.path.join(results_dir, _JSON_REPORT_FILE), 'w') 256 json.dump(filtered_results, json_file) 257 json_file.close() 258 259 260if __name__ == '__main__': 261 main() 262