1import json, math, os, re 2 3from autotest_lib.tko import models, status_lib, utils as tko_utils 4from autotest_lib.tko.parsers import base, version_0 5 6 7class job(version_0.job): 8 """Represents a job.""" 9 10 def exit_status(self): 11 """Returns the string exit status of this job.""" 12 13 # Find the .autoserv_execute path. 14 top_dir = tko_utils.find_toplevel_job_dir(self.dir) 15 if not top_dir: 16 return 'ABORT' 17 execute_path = os.path.join(top_dir, '.autoserv_execute') 18 19 # If for some reason we can't read the status code, assume disaster. 20 if not os.path.exists(execute_path): 21 return 'ABORT' 22 lines = open(execute_path).readlines() 23 if len(lines) < 2: 24 return 'ABORT' 25 try: 26 status_code = int(lines[1]) 27 except ValueError: 28 return 'ABORT' 29 30 if not os.WIFEXITED(status_code): 31 # Looks like a signal - an ABORT. 32 return 'ABORT' 33 elif os.WEXITSTATUS(status_code) != 0: 34 # Looks like a non-zero exit - a failure. 35 return 'FAIL' 36 else: 37 # Looks like exit code == 0. 38 return 'GOOD' 39 40 41class kernel(models.kernel): 42 """Represents a kernel.""" 43 44 def __init__(self, base, patches): 45 if base: 46 patches = [patch(*p.split()) for p in patches] 47 hashes = [p.hash for p in patches] 48 kernel_hash = self.compute_hash(base, hashes) 49 else: 50 base = 'UNKNOWN' 51 patches = [] 52 kernel_hash = 'UNKNOWN' 53 super(kernel, self).__init__(base, patches, kernel_hash) 54 55 56class test(models.test): 57 """Represents a test.""" 58 59 @staticmethod 60 def load_iterations(keyval_path): 61 return iteration.load_from_keyval(keyval_path) 62 63 64 @staticmethod 65 def load_perf_values(perf_values_file): 66 return perf_value_iteration.load_from_perf_values_file( 67 perf_values_file) 68 69 70class iteration(models.iteration): 71 """Represents an iteration.""" 72 73 @staticmethod 74 def parse_line_into_dicts(line, attr_dict, perf_dict): 75 key, val_type, value = "", "", "" 76 77 # Figure out what the key, value and keyval type are. 78 typed_match = re.search('^([^=]*)\{(\w*)\}=(.*)$', line) 79 if typed_match: 80 key, val_type, value = typed_match.groups() 81 else: 82 # Old-fashioned untyped match, assume perf. 83 untyped_match = re.search('^([^=]*)=(.*)$', line) 84 if untyped_match: 85 key, value = untyped_match.groups() 86 val_type = 'perf' 87 88 # Parse the actual value into a dict. 89 try: 90 if val_type == 'attr': 91 attr_dict[key] = value 92 elif val_type == 'perf': 93 perf_dict[key] = float(value) 94 else: 95 raise ValueError 96 except ValueError: 97 msg = ('WARNING: line "%s" found in test ' 98 'iteration keyval could not be parsed') 99 msg %= line 100 tko_utils.dprint(msg) 101 102 103class perf_value_iteration(models.perf_value_iteration): 104 """Represents a perf value iteration.""" 105 106 @staticmethod 107 def parse_line_into_dict(line): 108 """ 109 Parse a perf measurement text line into a dictionary. 110 111 The line is assumed to be a JSON-formatted string containing key/value 112 pairs, where each pair represents a piece of information associated 113 with a measured perf metric: 114 115 'description': a string description for the perf metric. 116 'value': a numeric value, or list of numeric values. 117 'units': the string units associated with the perf metric. 118 'higher_is_better': a boolean whether a higher value is considered 119 better. If False, a lower value is considered better. 120 'graph': a string indicating the name of the perf dashboard graph 121 on which the perf data will be displayed. 122 123 The resulting dictionary will also have a standard deviation key/value 124 pair, 'stddev'. If the perf measurement value is a list of values 125 instead of a single value, then the average and standard deviation of 126 the list of values is computed and stored. If a single value, the 127 value itself is used, and is associated with a standard deviation of 0. 128 129 @param line: A string line of JSON text from a perf measurements output 130 file. 131 132 @return A dictionary containing the parsed perf measurement information 133 along with a computed standard deviation value (key 'stddev'), or 134 an empty dictionary if the inputted line cannot be parsed. 135 """ 136 try: 137 perf_dict = json.loads(line) 138 except ValueError: 139 msg = 'Could not parse perf measurements line as json: "%s"' % line 140 tko_utils.dprint(msg) 141 return {} 142 143 def mean_and_standard_deviation(data): 144 """ 145 Computes the mean and standard deviation of a list of numbers. 146 147 @param data: A list of numbers. 148 149 @return A 2-tuple (mean, standard_deviation) computed from the list 150 of numbers. 151 152 """ 153 n = len(data) 154 if n == 0: 155 return 0.0, 0.0 156 if n == 1: 157 return data[0], 0.0 158 mean = float(sum(data)) / n 159 # Divide by n-1 to compute "sample standard deviation". 160 variance = sum([(elem - mean) ** 2 for elem in data]) / (n - 1) 161 return mean, math.sqrt(variance) 162 163 value = perf_dict['value'] 164 perf_dict['stddev'] = 0.0 165 if isinstance(value, list): 166 value, stddev = mean_and_standard_deviation(map(float, value)) 167 perf_dict['value'] = value 168 perf_dict['stddev'] = stddev 169 170 return perf_dict 171 172 173class status_line(version_0.status_line): 174 """Represents a status line.""" 175 176 def __init__(self, indent, status, subdir, testname, reason, 177 optional_fields): 178 # Handle INFO fields. 179 if status == 'INFO': 180 self.type = 'INFO' 181 self.indent = indent 182 self.status = self.subdir = self.testname = self.reason = None 183 self.optional_fields = optional_fields 184 else: 185 # Everything else is backwards compatible. 186 super(status_line, self).__init__(indent, status, subdir, 187 testname, reason, 188 optional_fields) 189 190 191 def is_successful_reboot(self, current_status): 192 """ 193 Checks whether the status represents a successful reboot. 194 195 @param current_status: A string representing the current status. 196 197 @return True, if the status represents a successful reboot, or False 198 if not. 199 200 """ 201 # Make sure this is a reboot line. 202 if self.testname != 'reboot': 203 return False 204 205 # Make sure this was not a failure. 206 if status_lib.is_worse_than_or_equal_to(current_status, 'FAIL'): 207 return False 208 209 # It must have been a successful reboot. 210 return True 211 212 213 def get_kernel(self): 214 # Get the base kernel version. 215 fields = self.optional_fields 216 base = re.sub('-autotest$', '', fields.get('kernel', '')) 217 # Get a list of patches. 218 patches = [] 219 patch_index = 0 220 while ('patch%d' % patch_index) in fields: 221 patches.append(fields['patch%d' % patch_index]) 222 patch_index += 1 223 # Create a new kernel instance. 224 return kernel(base, patches) 225 226 227 def get_timestamp(self): 228 return tko_utils.get_timestamp(self.optional_fields, 'timestamp') 229 230 231# The default implementations from version 0 will do for now. 232patch = version_0.patch 233 234 235class parser(base.parser): 236 """Represents a parser.""" 237 238 @staticmethod 239 def make_job(dir): 240 return job(dir) 241 242 243 @staticmethod 244 def make_dummy_abort(indent, subdir, testname, timestamp, reason): 245 """ 246 Creates an abort string. 247 248 @param indent: The number of indentation levels for the string. 249 @param subdir: The subdirectory name. 250 @param testname: The test name. 251 @param timestamp: The timestamp value. 252 @param reason: The reason string. 253 254 @return A string describing the abort. 255 256 """ 257 indent = '\t' * indent 258 if not subdir: 259 subdir = '----' 260 if not testname: 261 testname = '----' 262 263 # There is no guarantee that this will be set. 264 timestamp_field = '' 265 if timestamp: 266 timestamp_field = '\ttimestamp=%s' % timestamp 267 268 msg = indent + 'END ABORT\t%s\t%s%s\t%s' 269 return msg % (subdir, testname, timestamp_field, reason) 270 271 272 @staticmethod 273 def put_back_line_and_abort( 274 line_buffer, line, indent, subdir, timestamp, reason): 275 """ 276 Appends a line to the line buffer and aborts. 277 278 @param line_buffer: A line_buffer object. 279 @param line: A line to append to the line buffer. 280 @param indent: The number of indentation levels. 281 @param subdir: The subdirectory name. 282 @param timestamp: The timestamp value. 283 @param reason: The reason string. 284 285 """ 286 tko_utils.dprint('Unexpected indent regression, aborting') 287 line_buffer.put_back(line) 288 abort = parser.make_dummy_abort( 289 indent, subdir, subdir, timestamp, reason) 290 line_buffer.put_back(abort) 291 292 293 def state_iterator(self, buffer): 294 line = None 295 new_tests = [] 296 job_count, boot_count = 0, 0 297 min_stack_size = 0 298 stack = status_lib.status_stack() 299 current_kernel = kernel("", []) # UNKNOWN 300 current_status = status_lib.statuses[-1] 301 current_reason = None 302 started_time_stack = [None] 303 subdir_stack = [None] 304 running_test = None 305 running_reasons = set() 306 yield [] # We're ready to start running. 307 308 # Create a RUNNING SERVER_JOB entry to represent the entire test. 309 running_job = test.parse_partial_test(self.job, '----', 'SERVER_JOB', 310 '', current_kernel, 311 self.job.started_time) 312 new_tests.append(running_job) 313 314 while True: 315 # Are we finished with parsing? 316 if buffer.size() == 0 and self.finished: 317 if stack.size() == 0: 318 break 319 # We have status lines left on the stack; 320 # we need to implicitly abort them first. 321 tko_utils.dprint('\nUnexpected end of job, aborting') 322 abort_subdir_stack = list(subdir_stack) 323 if self.job.aborted_by: 324 reason = 'Job aborted by %s' % self.job.aborted_by 325 reason += self.job.aborted_on.strftime( 326 ' at %b %d %H:%M:%S') 327 else: 328 reason = 'Job aborted unexpectedly' 329 330 timestamp = line.optional_fields.get('timestamp') 331 for i in reversed(xrange(stack.size())): 332 if abort_subdir_stack: 333 subdir = abort_subdir_stack.pop() 334 else: 335 subdir = None 336 abort = self.make_dummy_abort( 337 i, subdir, subdir, timestamp, reason) 338 buffer.put(abort) 339 340 # Stop processing once the buffer is empty. 341 if buffer.size() == 0: 342 yield new_tests 343 new_tests = [] 344 continue 345 346 # Reinitialize the per-iteration state. 347 started_time = None 348 finished_time = None 349 350 # Get the next line. 351 raw_line = status_lib.clean_raw_line(buffer.get()) 352 tko_utils.dprint('\nSTATUS: ' + raw_line.strip()) 353 line = status_line.parse_line(raw_line) 354 if line is None: 355 tko_utils.dprint('non-status line, ignoring') 356 continue 357 358 # Do an initial sanity check of the indentation. 359 expected_indent = stack.size() 360 if line.type == 'END': 361 expected_indent -= 1 362 if line.indent < expected_indent: 363 # ABORT the current level if indentation was unexpectedly low. 364 self.put_back_line_and_abort( 365 buffer, raw_line, stack.size() - 1, subdir_stack[-1], 366 line.optional_fields.get('timestamp'), line.reason) 367 continue 368 elif line.indent > expected_indent: 369 # Ignore the log if the indent was unexpectedly high. 370 tko_utils.dprint('unexpected extra indentation, ignoring') 371 continue 372 373 # Initial line processing. 374 if line.type == 'START': 375 stack.start() 376 started_time = line.get_timestamp() 377 if (line.testname is None and line.subdir is None 378 and not running_test): 379 # We just started a client; all tests are relative to here. 380 min_stack_size = stack.size() 381 # Start a "RUNNING" CLIENT_JOB entry. 382 job_name = 'CLIENT_JOB.%d' % job_count 383 running_client = test.parse_partial_test(self.job, None, 384 job_name, 385 '', current_kernel, 386 started_time) 387 msg = 'RUNNING: %s\n%s\n' 388 msg %= (running_client.status, running_client.testname) 389 tko_utils.dprint(msg) 390 new_tests.append(running_client) 391 elif stack.size() == min_stack_size + 1 and not running_test: 392 # We just started a new test; insert a running record. 393 running_reasons = set() 394 if line.reason: 395 running_reasons.add(line.reason) 396 running_test = test.parse_partial_test(self.job, 397 line.subdir, 398 line.testname, 399 line.reason, 400 current_kernel, 401 started_time) 402 msg = 'RUNNING: %s\nSubdir: %s\nTestname: %s\n%s' 403 msg %= (running_test.status, running_test.subdir, 404 running_test.testname, running_test.reason) 405 tko_utils.dprint(msg) 406 new_tests.append(running_test) 407 started_time_stack.append(started_time) 408 subdir_stack.append(line.subdir) 409 continue 410 elif line.type == 'INFO': 411 fields = line.optional_fields 412 # Update the current kernel if one is defined in the info. 413 if 'kernel' in fields: 414 current_kernel = line.get_kernel() 415 # Update the SERVER_JOB reason if one was logged for an abort. 416 if 'job_abort_reason' in fields: 417 running_job.reason = fields['job_abort_reason'] 418 new_tests.append(running_job) 419 continue 420 elif line.type == 'STATUS': 421 # Update the stacks. 422 if line.subdir and stack.size() > min_stack_size: 423 subdir_stack[-1] = line.subdir 424 # Update the status, start and finished times. 425 stack.update(line.status) 426 if status_lib.is_worse_than_or_equal_to(line.status, 427 current_status): 428 if line.reason: 429 # Update the status of a currently running test. 430 if running_test: 431 running_reasons.add(line.reason) 432 running_reasons = tko_utils.drop_redundant_messages( 433 running_reasons) 434 sorted_reasons = sorted(running_reasons) 435 running_test.reason = ', '.join(sorted_reasons) 436 current_reason = running_test.reason 437 new_tests.append(running_test) 438 msg = 'update RUNNING reason: %s' % line.reason 439 tko_utils.dprint(msg) 440 else: 441 current_reason = line.reason 442 current_status = stack.current_status() 443 started_time = None 444 finished_time = line.get_timestamp() 445 # If this is a non-test entry there's nothing else to do. 446 if line.testname is None and line.subdir is None: 447 continue 448 elif line.type == 'END': 449 # Grab the current subdir off of the subdir stack, or, if this 450 # is the end of a job, just pop it off. 451 if (line.testname is None and line.subdir is None 452 and not running_test): 453 min_stack_size = stack.size() - 1 454 subdir_stack.pop() 455 else: 456 line.subdir = subdir_stack.pop() 457 if not subdir_stack[-1] and stack.size() > min_stack_size: 458 subdir_stack[-1] = line.subdir 459 # Update the status, start and finished times. 460 stack.update(line.status) 461 current_status = stack.end() 462 if stack.size() > min_stack_size: 463 stack.update(current_status) 464 current_status = stack.current_status() 465 started_time = started_time_stack.pop() 466 finished_time = line.get_timestamp() 467 # Update the current kernel. 468 if line.is_successful_reboot(current_status): 469 current_kernel = line.get_kernel() 470 # Adjust the testname if this is a reboot. 471 if line.testname == 'reboot' and line.subdir is None: 472 line.testname = 'boot.%d' % boot_count 473 else: 474 assert False 475 476 # Have we just finished a test? 477 if stack.size() <= min_stack_size: 478 # If there was no testname, just use the subdir. 479 if line.testname is None: 480 line.testname = line.subdir 481 # If there was no testname or subdir, use 'CLIENT_JOB'. 482 if line.testname is None: 483 line.testname = 'CLIENT_JOB.%d' % job_count 484 running_test = running_client 485 job_count += 1 486 if not status_lib.is_worse_than_or_equal_to( 487 current_status, 'ABORT'): 488 # A job hasn't really failed just because some of the 489 # tests it ran have. 490 current_status = 'GOOD' 491 492 if not current_reason: 493 current_reason = line.reason 494 new_test = test.parse_test(self.job, 495 line.subdir, 496 line.testname, 497 current_status, 498 current_reason, 499 current_kernel, 500 started_time, 501 finished_time, 502 running_test) 503 running_test = None 504 current_status = status_lib.statuses[-1] 505 current_reason = None 506 if new_test.testname == ('boot.%d' % boot_count): 507 boot_count += 1 508 msg = 'ADD: %s\nSubdir: %s\nTestname: %s\n%s' 509 msg %= (new_test.status, new_test.subdir, 510 new_test.testname, new_test.reason) 511 tko_utils.dprint(msg) 512 new_tests.append(new_test) 513 514 # The job is finished; produce the final SERVER_JOB entry and exit. 515 final_job = test.parse_test(self.job, '----', 'SERVER_JOB', 516 self.job.exit_status(), running_job.reason, 517 current_kernel, 518 self.job.started_time, 519 self.job.finished_time, 520 running_job) 521 new_tests.append(final_job) 522 yield new_tests 523