• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (C) 2013 Google Inc. All rights reserved.
2#
3# Redistribution and use in source and binary forms, with or without
4# modification, are permitted provided that the following conditions are
5# met:
6#
7#     * Redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer.
9#     * Redistributions in binary form must reproduce the above
10# copyright notice, this list of conditions and the following disclaimer
11# in the documentation and/or other materials provided with the
12# distribution.
13#     * Neither the Google name nor the names of its
14# contributors may be used to endorse or promote products derived from
15# this software without specific prior written permission.
16#
17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29"""Generates a fake TestExpectations file consisting of flaky tests from the bot
30corresponding to the give port."""
31
32import json
33import logging
34import os.path
35import urllib
36import urllib2
37
38from webkitpy.layout_tests.port import builders
39from webkitpy.layout_tests.models.test_expectations import TestExpectations
40from webkitpy.layout_tests.models.test_expectations import TestExpectationLine
41
42
43_log = logging.getLogger(__name__)
44
45
46# results.json v4 format:
47# {
48#  'version': 4,
49#  'builder name' : {
50#     'blinkRevision': [],
51#     'tests': {
52#       'directory' { # Each path component is a dictionary.
53#          'testname.html': {
54#             'expected' : 'FAIL', # expectation name
55#             'results': [], # Run-length encoded result.
56#             'times': [],
57#             'bugs': [], # bug urls
58#          }
59#      }
60#   }
61#  'buildNumbers': [],
62#  'secondsSinceEpoch': [],
63#  'chromeRevision': [],
64#  'failure_map': { } # Map from letter code to expectation name.
65# },
66class ResultsJSON(object):
67    TESTS_KEY = 'tests'
68    FAILURE_MAP_KEY = 'failure_map'
69    RESULTS_KEY = 'results'
70    EXPECTATIONS_KEY = 'expected'
71    BUGS_KEY = 'bugs'
72    RLE_LENGTH = 0
73    RLE_VALUE = 1
74
75    # results.json was originally designed to support
76    # multiple builders in one json file, so the builder_name
77    # is needed to figure out which builder this json file
78    # refers to (and thus where the results are stored)
79    def __init__(self, builder_name, json_dict):
80        self.builder_name = builder_name
81        self._json = json_dict
82
83    def _walk_trie(self, trie, parent_path):
84        for name, value in trie.items():
85            full_path = os.path.join(parent_path, name)
86
87            # FIXME: If we ever have a test directory self.RESULTS_KEY
88            # ("results"), this logic will break!
89            if self.RESULTS_KEY not in value:
90                for path, results in self._walk_trie(value, full_path):
91                    yield path, results
92            else:
93                yield full_path, value
94
95    def walk_results(self, full_path=''):
96        tests_trie = self._json[self.builder_name][self.TESTS_KEY]
97        return self._walk_trie(tests_trie, parent_path='')
98
99    def expectation_for_type(self, type_char):
100        return self._json[self.builder_name][self.FAILURE_MAP_KEY][type_char]
101
102    # Knowing how to parse the run-length-encoded values in results.json
103    # is a detail of this class.
104    def occurances_and_type_from_result_item(self, item):
105        return item[self.RLE_LENGTH], item[self.RLE_VALUE]
106
107
108class BotTestExpectationsFactory(object):
109    RESULTS_URL_PREFIX = 'http://test-results.appspot.com/testfile?master=ChromiumWebkit&testtype=layout-tests&name=results-small.json&builder='
110
111    def _results_json_for_port(self, port_name, builder_category):
112        if builder_category == 'deps':
113            builder = builders.deps_builder_name_for_port_name(port_name)
114        else:
115            builder = builders.builder_name_for_port_name(port_name)
116
117        if not builder:
118            return None
119        return self._results_json_for_builder(builder)
120
121    def _results_json_for_builder(self, builder):
122        results_url = self.RESULTS_URL_PREFIX + urllib.quote(builder)
123        try:
124            _log.debug('Fetching flakiness data from appengine.')
125            return ResultsJSON(builder, json.load(urllib2.urlopen(results_url)))
126        except urllib2.URLError as error:
127            _log.warning('Could not retrieve flakiness data from the bot.  url: %s', results_url)
128            _log.warning(error)
129
130    def expectations_for_port(self, port_name, builder_category='layout'):
131        # FIXME: This only grabs release builder's flakiness data. If we're running debug,
132        # when we should grab the debug builder's data.
133        # FIXME: What should this do if there is no debug builder for a port, e.g. we have
134        # no debug XP builder? Should it use the release bot or another Windows debug bot?
135        # At the very least, it should log an error.
136        results_json = self._results_json_for_port(port_name, builder_category)
137        if not results_json:
138            return None
139        return BotTestExpectations(results_json)
140
141    def expectations_for_builder(self, builder):
142        results_json = self._results_json_for_builder(builder)
143        if not results_json:
144            return None
145        return BotTestExpectations(results_json)
146
147class BotTestExpectations(object):
148    # FIXME: Get this from the json instead of hard-coding it.
149    RESULT_TYPES_TO_IGNORE = ['N', 'X', 'Y']
150
151    # specifiers arg is used in unittests to avoid the static dependency on builders.
152    def __init__(self, results_json, specifiers=None):
153        self.results_json = results_json
154        self.specifiers = specifiers or set(builders.specifiers_for_builder(results_json.builder_name))
155
156    def _line_from_test_and_flaky_types_and_bug_urls(self, test_path, flaky_types, bug_urls):
157        line = TestExpectationLine()
158        line.original_string = test_path
159        line.name = test_path
160        line.filename = test_path
161        line.path = test_path  # FIXME: Should this be normpath?
162        line.matching_tests = [test_path]
163        line.bugs = bug_urls if bug_urls else ["Bug(gardener)"]
164        line.expectations = sorted(map(self.results_json.expectation_for_type, flaky_types))
165        line.specifiers = self.specifiers
166        return line
167
168    def flakes_by_path(self, only_ignore_very_flaky):
169        """Sets test expectations to bot results if there are at least two distinct results."""
170        flakes_by_path = {}
171        for test_path, entry in self.results_json.walk_results():
172            results_dict = entry[self.results_json.RESULTS_KEY]
173            flaky_types = self._flaky_types_in_results(results_dict, only_ignore_very_flaky)
174            if len(flaky_types) <= 1:
175                continue
176            flakes_by_path[test_path] = sorted(map(self.results_json.expectation_for_type, flaky_types))
177        return flakes_by_path
178
179    def unexpected_results_by_path(self):
180        """For tests with unexpected results, returns original expectations + results."""
181        def exp_to_string(exp):
182            return TestExpectations.EXPECTATIONS_TO_STRING.get(exp, None).upper()
183
184        def string_to_exp(string):
185            # Needs a bit more logic than the method above,
186            # since a PASS is 0 and evaluates to False.
187            result = TestExpectations.EXPECTATIONS.get(string.lower(), None)
188            if not result is None:
189                return result
190            raise ValueError(string)
191
192        unexpected_results_by_path = {}
193        for test_path, entry in self.results_json.walk_results():
194            # Expectations for this test. No expectation defaults to PASS.
195            exp_string = entry.get(self.results_json.EXPECTATIONS_KEY, u'PASS')
196
197            # All run-length-encoded results for this test.
198            results_dict = entry.get(self.results_json.RESULTS_KEY, {})
199
200            # Set of expectations for this test.
201            expectations = set(map(string_to_exp, exp_string.split(' ')))
202
203            # Set of distinct results for this test.
204            result_types = self._flaky_types_in_results(results_dict)
205
206            # Distinct results as non-encoded strings.
207            result_strings = map(self.results_json.expectation_for_type, result_types)
208
209            # Distinct resulting expectations.
210            result_exp = map(string_to_exp, result_strings)
211
212            expected = lambda e: TestExpectations.result_was_expected(e, expectations, False)
213
214            additional_expectations = set(e for e in result_exp if not expected(e))
215
216            # Test did not have unexpected results.
217            if not additional_expectations:
218                continue
219
220            expectations.update(additional_expectations)
221            unexpected_results_by_path[test_path] = sorted(map(exp_to_string, expectations))
222        return unexpected_results_by_path
223
224    def expectation_lines(self, only_ignore_very_flaky=False):
225        lines = []
226        for test_path, entry in self.results_json.walk_results():
227            results_array = entry[self.results_json.RESULTS_KEY]
228            flaky_types = self._flaky_types_in_results(results_array, only_ignore_very_flaky)
229            if len(flaky_types) > 1:
230                bug_urls = entry.get(self.results_json.BUGS_KEY)
231                line = self._line_from_test_and_flaky_types_and_bug_urls(test_path, flaky_types, bug_urls)
232                lines.append(line)
233        return lines
234
235    def _flaky_types_in_results(self, run_length_encoded_results, only_ignore_very_flaky=False):
236        results_map = {}
237        seen_results = {}
238
239        for result_item in run_length_encoded_results:
240            _, result_type = self.results_json.occurances_and_type_from_result_item(result_item)
241            if result_type in self.RESULT_TYPES_TO_IGNORE:
242                continue
243
244            if only_ignore_very_flaky and result_type not in seen_results:
245                # Only consider a short-lived result if we've seen it more than once.
246                # Otherwise, we include lots of false-positives due to tests that fail
247                # for a couple runs and then start passing.
248                # FIXME: Maybe we should make this more liberal and consider it a flake
249                # even if we only see that failure once.
250                seen_results[result_type] = True
251                continue
252
253            results_map[result_type] = True
254
255        return results_map.keys()
256