• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1import os
2from xml.sax.saxutils import escape
3from json import JSONEncoder
4
5# Test result codes.
6
7class ResultCode(object):
8    """Test result codes."""
9
10    # We override __new__ and __getnewargs__ to ensure that pickling still
11    # provides unique ResultCode objects in any particular instance.
12    _instances = {}
13    def __new__(cls, name, isFailure):
14        res = cls._instances.get(name)
15        if res is None:
16            cls._instances[name] = res = super(ResultCode, cls).__new__(cls)
17        return res
18    def __getnewargs__(self):
19        return (self.name, self.isFailure)
20
21    def __init__(self, name, isFailure):
22        self.name = name
23        self.isFailure = isFailure
24
25    def __repr__(self):
26        return '%s%r' % (self.__class__.__name__,
27                         (self.name, self.isFailure))
28
29PASS        = ResultCode('PASS', False)
30FLAKYPASS   = ResultCode('FLAKYPASS', False)
31XFAIL       = ResultCode('XFAIL', False)
32FAIL        = ResultCode('FAIL', True)
33XPASS       = ResultCode('XPASS', True)
34UNRESOLVED  = ResultCode('UNRESOLVED', True)
35UNSUPPORTED = ResultCode('UNSUPPORTED', False)
36TIMEOUT     = ResultCode('TIMEOUT', True)
37
38# Test metric values.
39
40class MetricValue(object):
41    def format(self):
42        """
43        format() -> str
44
45        Convert this metric to a string suitable for displaying as part of the
46        console output.
47        """
48        raise RuntimeError("abstract method")
49
50    def todata(self):
51        """
52        todata() -> json-serializable data
53
54        Convert this metric to content suitable for serializing in the JSON test
55        output.
56        """
57        raise RuntimeError("abstract method")
58
59class IntMetricValue(MetricValue):
60    def __init__(self, value):
61        self.value = value
62
63    def format(self):
64        return str(self.value)
65
66    def todata(self):
67        return self.value
68
69class RealMetricValue(MetricValue):
70    def __init__(self, value):
71        self.value = value
72
73    def format(self):
74        return '%.4f' % self.value
75
76    def todata(self):
77        return self.value
78
79class JSONMetricValue(MetricValue):
80    """
81        JSONMetricValue is used for types that are representable in the output
82        but that are otherwise uninterpreted.
83    """
84    def __init__(self, value):
85        # Ensure the value is a serializable by trying to encode it.
86        # WARNING: The value may change before it is encoded again, and may
87        #          not be encodable after the change.
88        try:
89            e = JSONEncoder()
90            e.encode(value)
91        except TypeError:
92            raise
93        self.value = value
94
95    def format(self):
96        e = JSONEncoder(indent=2, sort_keys=True)
97        return e.encode(self.value)
98
99    def todata(self):
100        return self.value
101
102def toMetricValue(value):
103    if isinstance(value, MetricValue):
104        return value
105    elif isinstance(value, int):
106        return IntMetricValue(value)
107    elif isinstance(value, float):
108        return RealMetricValue(value)
109    else:
110        # 'long' is only present in python2
111        try:
112            if isinstance(value, long):
113                return IntMetricValue(value)
114        except NameError:
115            pass
116
117        # Try to create a JSONMetricValue and let the constructor throw
118        # if value is not a valid type.
119        return JSONMetricValue(value)
120
121
122# Test results.
123
124class Result(object):
125    """Wrapper for the results of executing an individual test."""
126
127    def __init__(self, code, output='', elapsed=None):
128        # The result code.
129        self.code = code
130        # The test output.
131        self.output = output
132        # The wall timing to execute the test, if timing.
133        self.elapsed = elapsed
134        # The metrics reported by this test.
135        self.metrics = {}
136
137    def addMetric(self, name, value):
138        """
139        addMetric(name, value)
140
141        Attach a test metric to the test result, with the given name and list of
142        values. It is an error to attempt to attach the metrics with the same
143        name multiple times.
144
145        Each value must be an instance of a MetricValue subclass.
146        """
147        if name in self.metrics:
148            raise ValueError("result already includes metrics for %r" % (
149                    name,))
150        if not isinstance(value, MetricValue):
151            raise TypeError("unexpected metric value: %r" % (value,))
152        self.metrics[name] = value
153
154# Test classes.
155
156class TestSuite:
157    """TestSuite - Information on a group of tests.
158
159    A test suite groups together a set of logically related tests.
160    """
161
162    def __init__(self, name, source_root, exec_root, config):
163        self.name = name
164        self.source_root = source_root
165        self.exec_root = exec_root
166        # The test suite configuration.
167        self.config = config
168
169    def getSourcePath(self, components):
170        return os.path.join(self.source_root, *components)
171
172    def getExecPath(self, components):
173        return os.path.join(self.exec_root, *components)
174
175class Test:
176    """Test - Information on a single test instance."""
177
178    def __init__(self, suite, path_in_suite, config, file_path = None):
179        self.suite = suite
180        self.path_in_suite = path_in_suite
181        self.config = config
182        self.file_path = file_path
183        # A list of conditions under which this test is expected to fail. These
184        # can optionally be provided by test format handlers, and will be
185        # honored when the test result is supplied.
186        self.xfails = []
187        # The test result, once complete.
188        self.result = None
189
190    def setResult(self, result):
191        if self.result is not None:
192            raise ArgumentError("test result already set")
193        if not isinstance(result, Result):
194            raise ArgumentError("unexpected result type")
195
196        self.result = result
197
198        # Apply the XFAIL handling to resolve the result exit code.
199        if self.isExpectedToFail():
200            if self.result.code == PASS:
201                self.result.code = XPASS
202            elif self.result.code == FAIL:
203                self.result.code = XFAIL
204
205    def getFullName(self):
206        return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite)
207
208    def getFilePath(self):
209        if self.file_path:
210            return self.file_path
211        return self.getSourcePath()
212
213    def getSourcePath(self):
214        return self.suite.getSourcePath(self.path_in_suite)
215
216    def getExecPath(self):
217        return self.suite.getExecPath(self.path_in_suite)
218
219    def isExpectedToFail(self):
220        """
221        isExpectedToFail() -> bool
222
223        Check whether this test is expected to fail in the current
224        configuration. This check relies on the test xfails property which by
225        some test formats may not be computed until the test has first been
226        executed.
227        """
228
229        # Check if any of the xfails match an available feature or the target.
230        for item in self.xfails:
231            # If this is the wildcard, it always fails.
232            if item == '*':
233                return True
234
235            # If this is an exact match for one of the features, it fails.
236            if item in self.config.available_features:
237                return True
238
239            # If this is a part of the target triple, it fails.
240            if item and item in self.suite.config.target_triple:
241                return True
242
243        return False
244
245    def isEarlyTest(self):
246        """
247        isEarlyTest() -> bool
248
249        Check whether this test should be executed early in a particular run.
250        This can be used for test suites with long running tests to maximize
251        parallelism or where it is desirable to surface their failures early.
252        """
253        return self.suite.config.is_early
254
255    def getJUnitXML(self):
256        test_name = self.path_in_suite[-1]
257        test_path = self.path_in_suite[:-1]
258        safe_test_path = [x.replace(".","_") for x in test_path]
259        safe_name = self.suite.name.replace(".","-")
260
261        if safe_test_path:
262            class_name = safe_name + "." + "/".join(safe_test_path)
263        else:
264            class_name = safe_name + "." + safe_name
265
266        xml = "<testcase classname='" + class_name + "' name='" + \
267            test_name + "'"
268        xml += " time='%.2f'" % (self.result.elapsed,)
269        if self.result.code.isFailure:
270            xml += ">\n\t<failure >\n" + escape(self.result.output)
271            xml += "\n\t</failure>\n</testcase>"
272        else:
273            xml += "/>"
274        return xml
275