• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1from __future__ import print_function
2import sys
3import os
4import os.path
5from glob import glob
6import optparse
7
8VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes '
9
10def compareOutputs( expected, actual, message ):
11    expected = expected.strip().replace('\r','').split('\n')
12    actual = actual.strip().replace('\r','').split('\n')
13    diff_line = 0
14    max_line_to_compare = min( len(expected), len(actual) )
15    for index in range(0,max_line_to_compare):
16        if expected[index].strip() != actual[index].strip():
17            diff_line = index + 1
18            break
19    if diff_line == 0 and len(expected) != len(actual):
20        diff_line = max_line_to_compare+1
21    if diff_line == 0:
22        return None
23    def safeGetLine( lines, index ):
24        index += -1
25        if index >= len(lines):
26            return ''
27        return lines[index].strip()
28    return """  Difference in %s at line %d:
29  Expected: '%s'
30  Actual:   '%s'
31""" % (message, diff_line,
32       safeGetLine(expected,diff_line),
33       safeGetLine(actual,diff_line) )
34
35def safeReadFile( path ):
36    try:
37        return file( path, 'rt' ).read()
38    except IOError as e:
39        return '<File "%s" is missing: %s>' % (path,e)
40
41def runAllTests( jsontest_executable_path, input_dir = None,
42                 use_valgrind=False, with_json_checker=False ):
43    if not input_dir:
44        input_dir = os.path.join( os.getcwd(), 'data' )
45    tests = glob( os.path.join( input_dir, '*.json' ) )
46    if with_json_checker:
47        test_jsonchecker = glob( os.path.join( input_dir, '../jsonchecker', '*.json' ) )
48    else:
49        test_jsonchecker = []
50    failed_tests = []
51    valgrind_path = use_valgrind and VALGRIND_CMD or ''
52    for input_path in tests + test_jsonchecker:
53        expect_failure = os.path.basename( input_path ).startswith( 'fail' )
54        is_json_checker_test = (input_path in test_jsonchecker) or expect_failure
55        print('TESTING:', input_path, end=' ')
56        options = is_json_checker_test and '--json-checker' or ''
57        pipe = os.popen( "%s%s %s %s" % (
58            valgrind_path, jsontest_executable_path, options,
59            input_path) )
60        process_output = pipe.read()
61        status = pipe.close()
62        if is_json_checker_test:
63            if expect_failure:
64                if status is None:
65                    print('FAILED')
66                    failed_tests.append( (input_path, 'Parsing should have failed:\n%s' %
67                                          safeReadFile(input_path)) )
68                else:
69                    print('OK')
70            else:
71                if status is not None:
72                    print('FAILED')
73                    failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
74                else:
75                    print('OK')
76        else:
77            base_path = os.path.splitext(input_path)[0]
78            actual_output = safeReadFile( base_path + '.actual' )
79            actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
80            file(base_path + '.process-output','wt').write( process_output )
81            if status:
82                print('parsing failed')
83                failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
84            else:
85                expected_output_path = os.path.splitext(input_path)[0] + '.expected'
86                expected_output = file( expected_output_path, 'rt' ).read()
87                detail = ( compareOutputs( expected_output, actual_output, 'input' )
88                            or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
89                if detail:
90                    print('FAILED')
91                    failed_tests.append( (input_path, detail) )
92                else:
93                    print('OK')
94
95    if failed_tests:
96        print()
97        print('Failure details:')
98        for failed_test in failed_tests:
99            print('* Test', failed_test[0])
100            print(failed_test[1])
101            print()
102        print('Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
103                                                       len(failed_tests) ))
104        return 1
105    else:
106        print('All %d tests passed.' % len(tests))
107        return 0
108
109def main():
110    from optparse import OptionParser
111    parser = OptionParser( usage="%prog [options] <path to jsontestrunner.exe> [test case directory]" )
112    parser.add_option("--valgrind",
113                  action="store_true", dest="valgrind", default=False,
114                  help="run all the tests using valgrind to detect memory leaks")
115    parser.add_option("-c", "--with-json-checker",
116                  action="store_true", dest="with_json_checker", default=False,
117                  help="run all the tests from the official JSONChecker test suite of json.org")
118    parser.enable_interspersed_args()
119    options, args = parser.parse_args()
120
121    if len(args) < 1 or len(args) > 2:
122        parser.error( 'Must provides at least path to jsontestrunner executable.' )
123        sys.exit( 1 )
124
125    jsontest_executable_path = os.path.normpath( os.path.abspath( args[0] ) )
126    if len(args) > 1:
127        input_path = os.path.normpath( os.path.abspath( args[1] ) )
128    else:
129        input_path = None
130    status = runAllTests( jsontest_executable_path, input_path,
131                          use_valgrind=options.valgrind, with_json_checker=options.with_json_checker )
132    sys.exit( status )
133
134if __name__ == '__main__':
135    main()
136