• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright (C) 2010 Google Inc. All rights reserved.
3#
4# Redistribution and use in source and binary forms, with or without
5# modification, are permitted provided that the following conditions are
6# met:
7#
8#     * Redistributions of source code must retain the above copyright
9# notice, this list of conditions and the following disclaimer.
10#     * Redistributions in binary form must reproduce the above
11# copyright notice, this list of conditions and the following disclaimer
12# in the documentation and/or other materials provided with the
13# distribution.
14#     * Neither the name of Google Inc. nor the names of its
15# contributors may be used to endorse or promote products derived from
16# this software without specific prior written permission.
17#
18# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30"""A helper class for reading in and dealing with tests expectations
31for layout tests.
32"""
33
34import logging
35import os
36import re
37import sys
38import time
39
40import simplejson
41
42# Test expectation and modifier constants.
43(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX,
44 DEFER, SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(16)
45
46# Test expectation file update action constants
47(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4)
48
49
50class TestExpectations:
51    TEST_LIST = "test_expectations.txt"
52
53    def __init__(self, port, tests, expectations, test_platform_name,
54                 is_debug_mode, is_lint_mode, tests_are_present=True):
55        """Loads and parses the test expectations given in the string.
56        Args:
57            port: handle to object containing platform-specific functionality
58            test: list of all of the test files
59            expectations: test expectations as a string
60            test_platform_name: name of the platform to match expectations
61                against. Note that this may be different than
62                port.test_platform_name() when is_lint_mode is True.
63            is_debug_mode: whether to use the DEBUG or RELEASE modifiers
64                in the expectations
65            is_lint_mode: If True, just parse the expectations string
66                looking for errors.
67            tests_are_present: whether the test files exist in the file
68                system and can be probed for. This is useful for distinguishing
69                test files from directories, and is needed by the LTTF
70                dashboard, where the files aren't actually locally present.
71        """
72        self._expected_failures = TestExpectationsFile(port, expectations,
73            tests, test_platform_name, is_debug_mode, is_lint_mode,
74            tests_are_present=tests_are_present)
75
76    # TODO(ojan): Allow for removing skipped tests when getting the list of
77    # tests to run, but not when getting metrics.
78    # TODO(ojan): Replace the Get* calls here with the more sane API exposed
79    # by TestExpectationsFile below. Maybe merge the two classes entirely?
80
81    def get_expectations_json_for_all_platforms(self):
82        return (
83            self._expected_failures.get_expectations_json_for_all_platforms())
84
85    def get_rebaselining_failures(self):
86        return (self._expected_failures.get_test_set(REBASELINE, FAIL) |
87                self._expected_failures.get_test_set(REBASELINE, IMAGE) |
88                self._expected_failures.get_test_set(REBASELINE, TEXT) |
89                self._expected_failures.get_test_set(REBASELINE,
90                                                     IMAGE_PLUS_TEXT))
91
92    def get_options(self, test):
93        return self._expected_failures.get_options(test)
94
95    def get_expectations(self, test):
96        return self._expected_failures.get_expectations(test)
97
98    def get_expectations_string(self, test):
99        """Returns the expectatons for the given test as an uppercase string.
100        If there are no expectations for the test, then "PASS" is returned."""
101        expectations = self.get_expectations(test)
102        retval = []
103
104        for expectation in expectations:
105            for item in TestExpectationsFile.EXPECTATIONS.items():
106                if item[1] == expectation:
107                    retval.append(item[0])
108                    break
109
110        return " ".join(retval).upper()
111
112    def get_timeline_for_test(self, test):
113        return self._expected_failures.get_timeline_for_test(test)
114
115    def get_tests_with_result_type(self, result_type):
116        return self._expected_failures.get_tests_with_result_type(result_type)
117
118    def get_tests_with_timeline(self, timeline):
119        return self._expected_failures.get_tests_with_timeline(timeline)
120
121    def matches_an_expected_result(self, test, result):
122        """Returns whether we got one of the expected results for this test."""
123        return (result in self._expected_failures.get_expectations(test) or
124                (result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and
125                FAIL in self._expected_failures.get_expectations(test)) or
126                result == MISSING and self.is_rebaselining(test) or
127                result == SKIP and self._expected_failures.has_modifier(test,
128                                                                        SKIP))
129
130    def is_rebaselining(self, test):
131        return self._expected_failures.has_modifier(test, REBASELINE)
132
133    def has_modifier(self, test, modifier):
134        return self._expected_failures.has_modifier(test, modifier)
135
136    def remove_platform_from_file(self, tests, platform, backup=False):
137        return self._expected_failures.remove_platform_from_file(tests,
138                                                                 platform,
139                                                                 backup)
140
141
142def strip_comments(line):
143    """Strips comments from a line and return None if the line is empty
144    or else the contents of line with leading and trailing spaces removed
145    and all other whitespace collapsed"""
146
147    commentIndex = line.find('//')
148    if commentIndex is -1:
149        commentIndex = len(line)
150
151    line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
152    if line == '':
153        return None
154    else:
155        return line
156
157
158class ModifiersAndExpectations:
159    """A holder for modifiers and expectations on a test that serializes to
160    JSON."""
161
162    def __init__(self, modifiers, expectations):
163        self.modifiers = modifiers
164        self.expectations = expectations
165
166
167class ExpectationsJsonEncoder(simplejson.JSONEncoder):
168    """JSON encoder that can handle ModifiersAndExpectations objects.
169    """
170
171    def default(self, obj):
172        if isinstance(obj, ModifiersAndExpectations):
173            return {"modifiers": obj.modifiers,
174                    "expectations": obj.expectations}
175        else:
176            return JSONEncoder.default(self, obj)
177
178
179class TestExpectationsFile:
180    """Test expectation files consist of lines with specifications of what
181    to expect from layout test cases. The test cases can be directories
182    in which case the expectations apply to all test cases in that
183    directory and any subdirectory. The format of the file is along the
184    lines of:
185
186      LayoutTests/fast/js/fixme.js = FAIL
187      LayoutTests/fast/js/flaky.js = FAIL PASS
188      LayoutTests/fast/js/crash.js = CRASH TIMEOUT FAIL PASS
189      ...
190
191    To add other options:
192      SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
193      DEBUG : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
194      DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
195      LINUX DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
196      DEFER LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
197
198    SKIP: Doesn't run the test.
199    SLOW: The test takes a long time to run, but does not timeout indefinitely.
200    WONTFIX: For tests that we never intend to pass on a given platform.
201    DEFER: Test does not count in our statistics for the current release.
202    DEBUG: Expectations apply only to the debug build.
203    RELEASE: Expectations apply only to release build.
204    LINUX/WIN/WIN-XP/WIN-VISTA/WIN-7/MAC: Expectations apply only to these
205        platforms.
206
207    Notes:
208      -A test cannot be both SLOW and TIMEOUT
209      -A test cannot be both DEFER and WONTFIX
210      -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is
211       a migratory state that currently means either IMAGE, TEXT, or
212       IMAGE+TEXT. Once we have finished migrating the expectations, we will
213       change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT
214       identifier.
215      -A test can be included twice, but not via the same path.
216      -If a test is included twice, then the more precise path wins.
217      -CRASH tests cannot be DEFER or WONTFIX
218    """
219
220    EXPECTATIONS = {'pass': PASS,
221                    'fail': FAIL,
222                    'text': TEXT,
223                    'image': IMAGE,
224                    'image+text': IMAGE_PLUS_TEXT,
225                    'timeout': TIMEOUT,
226                    'crash': CRASH,
227                    'missing': MISSING}
228
229    EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'),
230                                PASS: ('pass', 'passes'),
231                                FAIL: ('failure', 'failures'),
232                                TEXT: ('text diff mismatch',
233                                       'text diff mismatch'),
234                                IMAGE: ('image mismatch', 'image mismatch'),
235                                IMAGE_PLUS_TEXT: ('image and text mismatch',
236                                                  'image and text mismatch'),
237                                CRASH: ('test shell crash',
238                                        'test shell crashes'),
239                                TIMEOUT: ('test timed out', 'tests timed out'),
240                                MISSING: ('no expected result found',
241                                          'no expected results found')}
242
243    EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT,
244       TEXT, IMAGE, FAIL, SKIP)
245
246    BUILD_TYPES = ('debug', 'release')
247
248    MODIFIERS = {'skip': SKIP,
249                 'wontfix': WONTFIX,
250                 'defer': DEFER,
251                 'slow': SLOW,
252                 'rebaseline': REBASELINE,
253                 'none': NONE}
254
255    TIMELINES = {'wontfix': WONTFIX,
256                 'now': NOW,
257                 'defer': DEFER}
258
259    RESULT_TYPES = {'skip': SKIP,
260                    'pass': PASS,
261                    'fail': FAIL,
262                    'flaky': FLAKY}
263
264    def __init__(self, port, expectations, full_test_list, test_platform_name,
265        is_debug_mode, is_lint_mode, suppress_errors=False,
266        tests_are_present=True):
267        """
268        expectations: Contents of the expectations file
269        full_test_list: The list of all tests to be run pending processing of
270            the expections for those tests.
271        test_platform_name: name of the platform to match expectations
272            against. Note that this may be different than
273            port.test_platform_name() when is_lint_mode is True.
274        is_debug_mode: Whether we testing a test_shell built debug mode.
275        is_lint_mode: Whether this is just linting test_expecatations.txt.
276        suppress_errors: Whether to suppress lint errors.
277        tests_are_present: Whether the test files are present in the local
278            filesystem. The LTTF Dashboard uses False here to avoid having to
279            keep a local copy of the tree.
280        """
281
282        self._port = port
283        self._expectations = expectations
284        self._full_test_list = full_test_list
285        self._test_platform_name = test_platform_name
286        self._is_debug_mode = is_debug_mode
287        self._is_lint_mode = is_lint_mode
288        self._tests_are_present = tests_are_present
289        self._suppress_errors = suppress_errors
290        self._errors = []
291        self._non_fatal_errors = []
292
293        # Maps relative test paths as listed in the expectations file to a
294        # list of maps containing modifiers and expectations for each time
295        # the test is listed in the expectations file.
296        self._all_expectations = {}
297
298        # Maps a test to its list of expectations.
299        self._test_to_expectations = {}
300
301        # Maps a test to its list of options (string values)
302        self._test_to_options = {}
303
304        # Maps a test to its list of modifiers: the constants associated with
305        # the options minus any bug or platform strings
306        self._test_to_modifiers = {}
307
308        # Maps a test to the base path that it was listed with in the list.
309        self._test_list_paths = {}
310
311        self._modifier_to_tests = self._dict_of_sets(self.MODIFIERS)
312        self._expectation_to_tests = self._dict_of_sets(self.EXPECTATIONS)
313        self._timeline_to_tests = self._dict_of_sets(self.TIMELINES)
314        self._result_type_to_tests = self._dict_of_sets(self.RESULT_TYPES)
315
316        self._read(self._get_iterable_expectations())
317
318    def _dict_of_sets(self, strings_to_constants):
319        """Takes a dict of strings->constants and returns a dict mapping
320        each constant to an empty set."""
321        d = {}
322        for c in strings_to_constants.values():
323            d[c] = set()
324        return d
325
326    def _get_iterable_expectations(self):
327        """Returns an object that can be iterated over. Allows for not caring
328        about whether we're iterating over a file or a new-line separated
329        string."""
330        iterable = [x + "\n" for x in
331            self._expectations.split("\n")]
332        # Strip final entry if it's empty to avoid added in an extra
333        # newline.
334        if iterable[-1] == "\n":
335            return iterable[:-1]
336        return iterable
337
338    def get_test_set(self, modifier, expectation=None, include_skips=True):
339        if expectation is None:
340            tests = self._modifier_to_tests[modifier]
341        else:
342            tests = (self._expectation_to_tests[expectation] &
343                self._modifier_to_tests[modifier])
344
345        if not include_skips:
346            tests = tests - self.get_test_set(SKIP, expectation)
347
348        return tests
349
350    def get_tests_with_result_type(self, result_type):
351        return self._result_type_to_tests[result_type]
352
353    def get_tests_with_timeline(self, timeline):
354        return self._timeline_to_tests[timeline]
355
356    def get_options(self, test):
357        """This returns the entire set of options for the given test
358        (the modifiers plus the BUGXXXX identifier). This is used by the
359        LTTF dashboard."""
360        return self._test_to_options[test]
361
362    def has_modifier(self, test, modifier):
363        return test in self._modifier_to_tests[modifier]
364
365    def get_expectations(self, test):
366        return self._test_to_expectations[test]
367
368    def get_expectations_json_for_all_platforms(self):
369        # Specify separators in order to get compact encoding.
370        return ExpectationsJsonEncoder(separators=(',', ':')).encode(
371            self._all_expectations)
372
373    def contains(self, test):
374        return test in self._test_to_expectations
375
376    def remove_platform_from_file(self, tests, platform, backup=False):
377        """Remove the platform option from test expectations file.
378
379        If a test is in the test list and has an option that matches the given
380        platform, remove the matching platform and save the updated test back
381        to the file. If no other platforms remaining after removal, delete the
382        test from the file.
383
384        Args:
385          tests: list of tests that need to update..
386          platform: which platform option to remove.
387          backup: if true, the original test expectations file is saved as
388                  [self.TEST_LIST].orig.YYYYMMDDHHMMSS
389
390        Returns:
391          no
392        """
393
394        # FIXME - remove_platform_from file worked by writing a new
395        # test_expectations.txt file over the old one. Now that we're just
396        # parsing strings, we need to change this to return the new
397        # expectations string.
398        raise NotImplementedException('remove_platform_from_file')
399
400        new_file = self._path + '.new'
401        logging.debug('Original file: "%s"', self._path)
402        logging.debug('New file: "%s"', new_file)
403        f_orig = self._get_iterable_expectations()
404        f_new = open(new_file, 'w')
405
406        tests_removed = 0
407        tests_updated = 0
408        lineno = 0
409        for line in f_orig:
410            lineno += 1
411            action = self._get_platform_update_action(line, lineno, tests,
412                                                      platform)
413            if action == NO_CHANGE:
414                # Save the original line back to the file
415                logging.debug('No change to test: %s', line)
416                f_new.write(line)
417            elif action == REMOVE_TEST:
418                tests_removed += 1
419                logging.info('Test removed: %s', line)
420            elif action == REMOVE_PLATFORM:
421                parts = line.split(':')
422                new_options = parts[0].replace(platform.upper() + ' ', '', 1)
423                new_line = ('%s:%s' % (new_options, parts[1]))
424                f_new.write(new_line)
425                tests_updated += 1
426                logging.info('Test updated: ')
427                logging.info('  old: %s', line)
428                logging.info('  new: %s', new_line)
429            elif action == ADD_PLATFORMS_EXCEPT_THIS:
430                parts = line.split(':')
431                new_options = parts[0]
432                for p in self._port.test_platform_names():
433                    p = p.upper()
434                    # This is a temp solution for rebaselining tool.
435                    # Do not add tags WIN-7 and WIN-VISTA to test expectations
436                    # if the original line does not specify the platform
437                    # option.
438                    # TODO(victorw): Remove WIN-VISTA and WIN-7 once we have
439                    # reliable Win 7 and Win Vista buildbots setup.
440                    if not p in (platform.upper(), 'WIN-VISTA', 'WIN-7'):
441                        new_options += p + ' '
442                new_line = ('%s:%s' % (new_options, parts[1]))
443                f_new.write(new_line)
444                tests_updated += 1
445                logging.info('Test updated: ')
446                logging.info('  old: %s', line)
447                logging.info('  new: %s', new_line)
448            else:
449                logging.error('Unknown update action: %d; line: %s',
450                              action, line)
451
452        logging.info('Total tests removed: %d', tests_removed)
453        logging.info('Total tests updated: %d', tests_updated)
454
455        f_orig.close()
456        f_new.close()
457
458        if backup:
459            date_suffix = time.strftime('%Y%m%d%H%M%S',
460                                        time.localtime(time.time()))
461            backup_file = ('%s.orig.%s' % (self._path, date_suffix))
462            if os.path.exists(backup_file):
463                os.remove(backup_file)
464            logging.info('Saving original file to "%s"', backup_file)
465            os.rename(self._path, backup_file)
466        else:
467            os.remove(self._path)
468
469        logging.debug('Saving new file to "%s"', self._path)
470        os.rename(new_file, self._path)
471        return True
472
473    def parse_expectations_line(self, line, lineno):
474        """Parses a line from test_expectations.txt and returns a tuple
475        with the test path, options as a list, expectations as a list."""
476        line = strip_comments(line)
477        if not line:
478            return (None, None, None)
479
480        options = []
481        if line.find(":") is -1:
482            test_and_expectation = line.split("=")
483        else:
484            parts = line.split(":")
485            options = self._get_options_list(parts[0])
486            test_and_expectation = parts[1].split('=')
487
488        test = test_and_expectation[0].strip()
489        if (len(test_and_expectation) is not 2):
490            self._add_error(lineno, "Missing expectations.",
491                           test_and_expectation)
492            expectations = None
493        else:
494            expectations = self._get_options_list(test_and_expectation[1])
495
496        return (test, options, expectations)
497
498    def _get_platform_update_action(self, line, lineno, tests, platform):
499        """Check the platform option and return the action needs to be taken.
500
501        Args:
502          line: current line in test expectations file.
503          lineno: current line number of line
504          tests: list of tests that need to update..
505          platform: which platform option to remove.
506
507        Returns:
508          NO_CHANGE: no change to the line (comments, test not in the list etc)
509          REMOVE_TEST: remove the test from file.
510          REMOVE_PLATFORM: remove this platform option from the test.
511          ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one.
512        """
513        test, options, expectations = self.parse_expectations_line(line,
514                                                                   lineno)
515        if not test or test not in tests:
516            return NO_CHANGE
517
518        has_any_platform = False
519        for option in options:
520            if option in self._port.test_platform_names():
521                has_any_platform = True
522                if not option == platform:
523                    return REMOVE_PLATFORM
524
525        # If there is no platform specified, then it means apply to all
526        # platforms. Return the action to add all the platforms except this
527        # one.
528        if not has_any_platform:
529            return ADD_PLATFORMS_EXCEPT_THIS
530
531        return REMOVE_TEST
532
533    def _has_valid_modifiers_for_current_platform(self, options, lineno,
534        test_and_expectations, modifiers):
535        """Returns true if the current platform is in the options list or if
536        no platforms are listed and if there are no fatal errors in the
537        options list.
538
539        Args:
540          options: List of lowercase options.
541          lineno: The line in the file where the test is listed.
542          test_and_expectations: The path and expectations for the test.
543          modifiers: The set to populate with modifiers.
544        """
545        has_any_platform = False
546        has_bug_id = False
547        for option in options:
548            if option in self.MODIFIERS:
549                modifiers.add(option)
550            elif option in self._port.test_platform_names():
551                has_any_platform = True
552            elif option.startswith('bug'):
553                has_bug_id = True
554            elif option not in self.BUILD_TYPES:
555                self._add_error(lineno, 'Invalid modifier for test: %s' %
556                                option, test_and_expectations)
557
558        if has_any_platform and not self._match_platform(options):
559            return False
560
561        if not has_bug_id and 'wontfix' not in options:
562            # TODO(ojan): Turn this into an AddError call once all the
563            # tests have BUG identifiers.
564            self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.',
565                test_and_expectations)
566
567        if 'release' in options or 'debug' in options:
568            if self._is_debug_mode and 'debug' not in options:
569                return False
570            if not self._is_debug_mode and 'release' not in options:
571                return False
572
573        if 'wontfix' in options and 'defer' in options:
574            self._add_error(lineno, 'Test cannot be both DEFER and WONTFIX.',
575                test_and_expectations)
576
577        if self._is_lint_mode and 'rebaseline' in options:
578            self._add_error(lineno,
579                'REBASELINE should only be used for running rebaseline.py. '
580                'Cannot be checked in.', test_and_expectations)
581
582        return True
583
584    def _match_platform(self, options):
585        """Match the list of options against our specified platform. If any
586        of the options prefix-match self._platform, return True. This handles
587        the case where a test is marked WIN and the platform is WIN-VISTA.
588
589        Args:
590          options: list of options
591        """
592        for opt in options:
593            if self._test_platform_name.startswith(opt):
594                return True
595        return False
596
597    def _add_to_all_expectations(self, test, options, expectations):
598        # Make all paths unix-style so the dashboard doesn't need to.
599        test = test.replace('\\', '/')
600        if not test in self._all_expectations:
601            self._all_expectations[test] = []
602        self._all_expectations[test].append(
603            ModifiersAndExpectations(options, expectations))
604
605    def _read(self, expectations):
606        """For each test in an expectations iterable, generate the
607        expectations for it."""
608        lineno = 0
609        for line in expectations:
610            lineno += 1
611
612            test_list_path, options, expectations = \
613                self.parse_expectations_line(line, lineno)
614            if not expectations:
615                continue
616
617            self._add_to_all_expectations(test_list_path,
618                                          " ".join(options).upper(),
619                                          " ".join(expectations).upper())
620
621            modifiers = set()
622            if options and not self._has_valid_modifiers_for_current_platform(
623                options, lineno, test_list_path, modifiers):
624                continue
625
626            expectations = self._parse_expectations(expectations, lineno,
627                test_list_path)
628
629            if 'slow' in options and TIMEOUT in expectations:
630                self._add_error(lineno,
631                    'A test can not be both slow and timeout. If it times out '
632                    'indefinitely, then it should be just timeout.',
633                    test_list_path)
634
635            full_path = os.path.join(self._port.layout_tests_dir(),
636                                     test_list_path)
637            full_path = os.path.normpath(full_path)
638            # WebKit's way of skipping tests is to add a -disabled suffix.
639            # So we should consider the path existing if the path or the
640            # -disabled version exists.
641            if (self._tests_are_present and not os.path.exists(full_path)
642                and not os.path.exists(full_path + '-disabled')):
643                # Log a non fatal error here since you hit this case any
644                # time you update test_expectations.txt without syncing
645                # the LayoutTests directory
646                self._log_non_fatal_error(lineno, 'Path does not exist.',
647                                       test_list_path)
648                continue
649
650            if not self._full_test_list:
651                tests = [test_list_path]
652            else:
653                tests = self._expand_tests(test_list_path)
654
655            self._add_tests(tests, expectations, test_list_path, lineno,
656                           modifiers, options)
657
658        if not self._suppress_errors and (
659            len(self._errors) or len(self._non_fatal_errors)):
660            if self._is_debug_mode:
661                build_type = 'DEBUG'
662            else:
663                build_type = 'RELEASE'
664            print "\nFAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" \
665                % (self._test_platform_name.upper(), build_type)
666
667            for error in self._non_fatal_errors:
668                logging.error(error)
669            if len(self._errors):
670                raise SyntaxError('\n'.join(map(str, self._errors)))
671
672        # Now add in the tests that weren't present in the expectations file
673        expectations = set([PASS])
674        options = []
675        modifiers = []
676        if self._full_test_list:
677            for test in self._full_test_list:
678                if not test in self._test_list_paths:
679                    self._add_test(test, modifiers, expectations, options)
680
681    def _get_options_list(self, listString):
682        return [part.strip().lower() for part in listString.strip().split(' ')]
683
684    def _parse_expectations(self, expectations, lineno, test_list_path):
685        result = set()
686        for part in expectations:
687            if not part in self.EXPECTATIONS:
688                self._add_error(lineno, 'Unsupported expectation: %s' % part,
689                    test_list_path)
690                continue
691            expectation = self.EXPECTATIONS[part]
692            result.add(expectation)
693        return result
694
695    def _expand_tests(self, test_list_path):
696        """Convert the test specification to an absolute, normalized
697        path and make sure directories end with the OS path separator."""
698        path = os.path.join(self._port.layout_tests_dir(), test_list_path)
699        path = os.path.normpath(path)
700        path = self._fix_dir(path)
701
702        result = []
703        for test in self._full_test_list:
704            if test.startswith(path):
705                result.append(test)
706        return result
707
708    def _fix_dir(self, path):
709        """Check to see if the path points to a directory, and if so, append
710        the directory separator if necessary."""
711        if self._tests_are_present:
712            if os.path.isdir(path):
713                path = os.path.join(path, '')
714        else:
715            # If we can't check the filesystem to see if this is a directory,
716            # we assume that files w/o an extension are directories.
717            # TODO(dpranke): What happens w/ LayoutTests/css2.1 ?
718            if os.path.splitext(path)[1] == '':
719                path = os.path.join(path, '')
720        return path
721
722    def _add_tests(self, tests, expectations, test_list_path, lineno,
723                   modifiers, options):
724        for test in tests:
725            if self._already_seen_test(test, test_list_path, lineno):
726                continue
727
728            self._clear_expectations_for_test(test, test_list_path)
729            self._add_test(test, modifiers, expectations, options)
730
731    def _add_test(self, test, modifiers, expectations, options):
732        """Sets the expected state for a given test.
733
734        This routine assumes the test has not been added before. If it has,
735        use _ClearExpectationsForTest() to reset the state prior to
736        calling this.
737
738        Args:
739          test: test to add
740          modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.)
741          expectations: sequence of expectations (PASS, IMAGE, etc.)
742          options: sequence of keywords and bug identifiers."""
743        self._test_to_expectations[test] = expectations
744        for expectation in expectations:
745            self._expectation_to_tests[expectation].add(test)
746
747        self._test_to_options[test] = options
748        self._test_to_modifiers[test] = set()
749        for modifier in modifiers:
750            mod_value = self.MODIFIERS[modifier]
751            self._modifier_to_tests[mod_value].add(test)
752            self._test_to_modifiers[test].add(mod_value)
753
754        if 'wontfix' in modifiers:
755            self._timeline_to_tests[WONTFIX].add(test)
756        elif 'defer' in modifiers:
757            self._timeline_to_tests[DEFER].add(test)
758        else:
759            self._timeline_to_tests[NOW].add(test)
760
761        if 'skip' in modifiers:
762            self._result_type_to_tests[SKIP].add(test)
763        elif expectations == set([PASS]):
764            self._result_type_to_tests[PASS].add(test)
765        elif len(expectations) > 1:
766            self._result_type_to_tests[FLAKY].add(test)
767        else:
768            self._result_type_to_tests[FAIL].add(test)
769
770    def _clear_expectations_for_test(self, test, test_list_path):
771        """Remove prexisting expectations for this test.
772        This happens if we are seeing a more precise path
773        than a previous listing.
774        """
775        if test in self._test_list_paths:
776            self._test_to_expectations.pop(test, '')
777            self._remove_from_sets(test, self._expectation_to_tests)
778            self._remove_from_sets(test, self._modifier_to_tests)
779            self._remove_from_sets(test, self._timeline_to_tests)
780            self._remove_from_sets(test, self._result_type_to_tests)
781
782        self._test_list_paths[test] = os.path.normpath(test_list_path)
783
784    def _remove_from_sets(self, test, dict):
785        """Removes the given test from the sets in the dictionary.
786
787        Args:
788          test: test to look for
789          dict: dict of sets of files"""
790        for set_of_tests in dict.itervalues():
791            if test in set_of_tests:
792                set_of_tests.remove(test)
793
794    def _already_seen_test(self, test, test_list_path, lineno):
795        """Returns true if we've already seen a more precise path for this test
796        than the test_list_path.
797        """
798        if not test in self._test_list_paths:
799            return False
800
801        prev_base_path = self._test_list_paths[test]
802        if (prev_base_path == os.path.normpath(test_list_path)):
803            self._add_error(lineno, 'Duplicate expectations.', test)
804            return True
805
806        # Check if we've already seen a more precise path.
807        return prev_base_path.startswith(os.path.normpath(test_list_path))
808
809    def _add_error(self, lineno, msg, path):
810        """Reports an error that will prevent running the tests. Does not
811        immediately raise an exception because we'd like to aggregate all the
812        errors so they can all be printed out."""
813        self._errors.append('\nLine:%s %s %s' % (lineno, msg, path))
814
815    def _log_non_fatal_error(self, lineno, msg, path):
816        """Reports an error that will not prevent running the tests. These are
817        still errors, but not bad enough to warrant breaking test running."""
818        self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path))
819