1import os 2from xml.sax.saxutils import quoteattr 3from json import JSONEncoder 4 5from lit.BooleanExpression import BooleanExpression 6 7# Test result codes. 8 9class ResultCode(object): 10 """Test result codes.""" 11 12 # We override __new__ and __getnewargs__ to ensure that pickling still 13 # provides unique ResultCode objects in any particular instance. 14 _instances = {} 15 def __new__(cls, name, isFailure): 16 res = cls._instances.get(name) 17 if res is None: 18 cls._instances[name] = res = super(ResultCode, cls).__new__(cls) 19 return res 20 def __getnewargs__(self): 21 return (self.name, self.isFailure) 22 23 def __init__(self, name, isFailure): 24 self.name = name 25 self.isFailure = isFailure 26 27 def __repr__(self): 28 return '%s%r' % (self.__class__.__name__, 29 (self.name, self.isFailure)) 30 31PASS = ResultCode('PASS', False) 32FLAKYPASS = ResultCode('FLAKYPASS', False) 33XFAIL = ResultCode('XFAIL', False) 34FAIL = ResultCode('FAIL', True) 35XPASS = ResultCode('XPASS', True) 36UNRESOLVED = ResultCode('UNRESOLVED', True) 37UNSUPPORTED = ResultCode('UNSUPPORTED', False) 38TIMEOUT = ResultCode('TIMEOUT', True) 39 40# Test metric values. 41 42class MetricValue(object): 43 def format(self): 44 """ 45 format() -> str 46 47 Convert this metric to a string suitable for displaying as part of the 48 console output. 49 """ 50 raise RuntimeError("abstract method") 51 52 def todata(self): 53 """ 54 todata() -> json-serializable data 55 56 Convert this metric to content suitable for serializing in the JSON test 57 output. 58 """ 59 raise RuntimeError("abstract method") 60 61class IntMetricValue(MetricValue): 62 def __init__(self, value): 63 self.value = value 64 65 def format(self): 66 return str(self.value) 67 68 def todata(self): 69 return self.value 70 71class RealMetricValue(MetricValue): 72 def __init__(self, value): 73 self.value = value 74 75 def format(self): 76 return '%.4f' % self.value 77 78 def todata(self): 79 return self.value 80 81class JSONMetricValue(MetricValue): 82 """ 83 JSONMetricValue is used for types that are representable in the output 84 but that are otherwise uninterpreted. 85 """ 86 def __init__(self, value): 87 # Ensure the value is a serializable by trying to encode it. 88 # WARNING: The value may change before it is encoded again, and may 89 # not be encodable after the change. 90 try: 91 e = JSONEncoder() 92 e.encode(value) 93 except TypeError: 94 raise 95 self.value = value 96 97 def format(self): 98 e = JSONEncoder(indent=2, sort_keys=True) 99 return e.encode(self.value) 100 101 def todata(self): 102 return self.value 103 104def toMetricValue(value): 105 if isinstance(value, MetricValue): 106 return value 107 elif isinstance(value, int): 108 return IntMetricValue(value) 109 elif isinstance(value, float): 110 return RealMetricValue(value) 111 else: 112 # 'long' is only present in python2 113 try: 114 if isinstance(value, long): 115 return IntMetricValue(value) 116 except NameError: 117 pass 118 119 # Try to create a JSONMetricValue and let the constructor throw 120 # if value is not a valid type. 121 return JSONMetricValue(value) 122 123 124# Test results. 125 126class Result(object): 127 """Wrapper for the results of executing an individual test.""" 128 129 def __init__(self, code, output='', elapsed=None): 130 # The result code. 131 self.code = code 132 # The test output. 133 self.output = output 134 # The wall timing to execute the test, if timing. 135 self.elapsed = elapsed 136 # The metrics reported by this test. 137 self.metrics = {} 138 # The micro-test results reported by this test. 139 self.microResults = {} 140 141 def addMetric(self, name, value): 142 """ 143 addMetric(name, value) 144 145 Attach a test metric to the test result, with the given name and list of 146 values. It is an error to attempt to attach the metrics with the same 147 name multiple times. 148 149 Each value must be an instance of a MetricValue subclass. 150 """ 151 if name in self.metrics: 152 raise ValueError("result already includes metrics for %r" % ( 153 name,)) 154 if not isinstance(value, MetricValue): 155 raise TypeError("unexpected metric value: %r" % (value,)) 156 self.metrics[name] = value 157 158 def addMicroResult(self, name, microResult): 159 """ 160 addMicroResult(microResult) 161 162 Attach a micro-test result to the test result, with the given name and 163 result. It is an error to attempt to attach a micro-test with the 164 same name multiple times. 165 166 Each micro-test result must be an instance of the Result class. 167 """ 168 if name in self.microResults: 169 raise ValueError("Result already includes microResult for %r" % ( 170 name,)) 171 if not isinstance(microResult, Result): 172 raise TypeError("unexpected MicroResult value %r" % (microResult,)) 173 self.microResults[name] = microResult 174 175 176# Test classes. 177 178class TestSuite: 179 """TestSuite - Information on a group of tests. 180 181 A test suite groups together a set of logically related tests. 182 """ 183 184 def __init__(self, name, source_root, exec_root, config): 185 self.name = name 186 self.source_root = source_root 187 self.exec_root = exec_root 188 # The test suite configuration. 189 self.config = config 190 191 def getSourcePath(self, components): 192 return os.path.join(self.source_root, *components) 193 194 def getExecPath(self, components): 195 return os.path.join(self.exec_root, *components) 196 197class Test: 198 """Test - Information on a single test instance.""" 199 200 def __init__(self, suite, path_in_suite, config, file_path = None): 201 self.suite = suite 202 self.path_in_suite = path_in_suite 203 self.config = config 204 self.file_path = file_path 205 206 # A list of conditions under which this test is expected to fail. 207 # Each condition is a boolean expression of features and target 208 # triple parts. These can optionally be provided by test format 209 # handlers, and will be honored when the test result is supplied. 210 self.xfails = [] 211 212 # A list of conditions that must be satisfied before running the test. 213 # Each condition is a boolean expression of features. All of them 214 # must be True for the test to run. 215 # FIXME should target triple parts count here too? 216 self.requires = [] 217 218 # A list of conditions that prevent execution of the test. 219 # Each condition is a boolean expression of features and target 220 # triple parts. All of them must be False for the test to run. 221 self.unsupported = [] 222 223 # The test result, once complete. 224 self.result = None 225 226 def setResult(self, result): 227 if self.result is not None: 228 raise ValueError("test result already set") 229 if not isinstance(result, Result): 230 raise ValueError("unexpected result type") 231 232 self.result = result 233 234 # Apply the XFAIL handling to resolve the result exit code. 235 try: 236 if self.isExpectedToFail(): 237 if self.result.code == PASS: 238 self.result.code = XPASS 239 elif self.result.code == FAIL: 240 self.result.code = XFAIL 241 except ValueError as e: 242 # Syntax error in an XFAIL line. 243 self.result.code = UNRESOLVED 244 self.result.output = str(e) 245 246 def getFullName(self): 247 return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite) 248 249 def getFilePath(self): 250 if self.file_path: 251 return self.file_path 252 return self.getSourcePath() 253 254 def getSourcePath(self): 255 return self.suite.getSourcePath(self.path_in_suite) 256 257 def getExecPath(self): 258 return self.suite.getExecPath(self.path_in_suite) 259 260 def isExpectedToFail(self): 261 """ 262 isExpectedToFail() -> bool 263 264 Check whether this test is expected to fail in the current 265 configuration. This check relies on the test xfails property which by 266 some test formats may not be computed until the test has first been 267 executed. 268 Throws ValueError if an XFAIL line has a syntax error. 269 """ 270 271 features = self.config.available_features 272 triple = getattr(self.suite.config, 'target_triple', "") 273 274 # Check if any of the xfails match an available feature or the target. 275 for item in self.xfails: 276 # If this is the wildcard, it always fails. 277 if item == '*': 278 return True 279 280 # If this is a True expression of features and target triple parts, 281 # it fails. 282 try: 283 if BooleanExpression.evaluate(item, features, triple): 284 return True 285 except ValueError as e: 286 raise ValueError('Error in XFAIL list:\n%s' % str(e)) 287 288 return False 289 290 def isWithinFeatureLimits(self): 291 """ 292 isWithinFeatureLimits() -> bool 293 294 A test is within the feature limits set by run_only_tests if 295 1. the test's requirements ARE satisfied by the available features 296 2. the test's requirements ARE NOT satisfied after the limiting 297 features are removed from the available features 298 299 Throws ValueError if a REQUIRES line has a syntax error. 300 """ 301 302 if not self.config.limit_to_features: 303 return True # No limits. Run it. 304 305 # Check the requirements as-is (#1) 306 if self.getMissingRequiredFeatures(): 307 return False 308 309 # Check the requirements after removing the limiting features (#2) 310 featuresMinusLimits = [f for f in self.config.available_features 311 if not f in self.config.limit_to_features] 312 if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits): 313 return False 314 315 return True 316 317 def getMissingRequiredFeaturesFromList(self, features): 318 try: 319 return [item for item in self.requires 320 if not BooleanExpression.evaluate(item, features)] 321 except ValueError as e: 322 raise ValueError('Error in REQUIRES list:\n%s' % str(e)) 323 324 def getMissingRequiredFeatures(self): 325 """ 326 getMissingRequiredFeatures() -> list of strings 327 328 Returns a list of features from REQUIRES that are not satisfied." 329 Throws ValueError if a REQUIRES line has a syntax error. 330 """ 331 332 features = self.config.available_features 333 return self.getMissingRequiredFeaturesFromList(features) 334 335 def getUnsupportedFeatures(self): 336 """ 337 getUnsupportedFeatures() -> list of strings 338 339 Returns a list of features from UNSUPPORTED that are present 340 in the test configuration's features or target triple. 341 Throws ValueError if an UNSUPPORTED line has a syntax error. 342 """ 343 344 features = self.config.available_features 345 triple = getattr(self.suite.config, 'target_triple', "") 346 347 try: 348 return [item for item in self.unsupported 349 if BooleanExpression.evaluate(item, features, triple)] 350 except ValueError as e: 351 raise ValueError('Error in UNSUPPORTED list:\n%s' % str(e)) 352 353 def isEarlyTest(self): 354 """ 355 isEarlyTest() -> bool 356 357 Check whether this test should be executed early in a particular run. 358 This can be used for test suites with long running tests to maximize 359 parallelism or where it is desirable to surface their failures early. 360 """ 361 return self.suite.config.is_early 362 363 def writeJUnitXML(self, fil): 364 """Write the test's report xml representation to a file handle.""" 365 test_name = quoteattr(self.path_in_suite[-1]) 366 test_path = self.path_in_suite[:-1] 367 safe_test_path = [x.replace(".","_") for x in test_path] 368 safe_name = self.suite.name.replace(".","-") 369 370 if safe_test_path: 371 class_name = safe_name + "." + "/".join(safe_test_path) 372 else: 373 class_name = safe_name + "." + safe_name 374 class_name = quoteattr(class_name) 375 testcase_template = '<testcase classname={class_name} name={test_name} time="{time:.2f}"' 376 elapsed_time = self.result.elapsed if self.result.elapsed is not None else 0.0 377 testcase_xml = testcase_template.format(class_name=class_name, test_name=test_name, time=elapsed_time) 378 fil.write(testcase_xml) 379 if self.result.code.isFailure: 380 fil.write(">\n\t<failure ><![CDATA[") 381 # In Python2, 'str' and 'unicode' are distinct types, but in Python3, the type 'unicode' does not exist 382 # and instead 'bytes' is distinct 383 # in Python3, there's no unicode 384 if isinstance(self.result.output, str): 385 encoded_output = self.result.output 386 elif isinstance(self.result.output, bytes): 387 encoded_output = self.result.output.decode("utf-8", 'ignore') 388 else: 389 encoded_output = self.result.output.encode("utf-8", 'ignore') 390 # In the unlikely case that the output contains the CDATA terminator 391 # we wrap it by creating a new CDATA block 392 fil.write(encoded_output.replace("]]>", "]]]]><![CDATA[>")) 393 fil.write("]]></failure>\n</testcase>") 394 elif self.result.code == UNSUPPORTED: 395 unsupported_features = self.getMissingRequiredFeatures() 396 if unsupported_features: 397 skip_message = "Skipping because of: " + ", ".join(unsupported_features) 398 else: 399 skip_message = "Skipping because of configuration." 400 401 fil.write(">\n\t<skipped message={} />\n</testcase>\n".format(quoteattr(skip_message))) 402 else: 403 fil.write("/>") 404