• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2
3"""
4lit - LLVM Integrated Tester.
5
6See lit.pod for more information.
7"""
8
9from __future__ import absolute_import
10import math, os, platform, random, re, sys, time
11
12import lit.ProgressBar
13import lit.LitConfig
14import lit.Test
15import lit.run
16import lit.util
17import lit.discovery
18
19class TestingProgressDisplay(object):
20    def __init__(self, opts, numTests, progressBar=None):
21        self.opts = opts
22        self.numTests = numTests
23        self.current = None
24        self.progressBar = progressBar
25        self.completed = 0
26
27    def finish(self):
28        if self.progressBar:
29            self.progressBar.clear()
30        elif self.opts.quiet:
31            pass
32        elif self.opts.succinct:
33            sys.stdout.write('\n')
34
35    def update(self, test):
36        self.completed += 1
37
38        if self.opts.incremental:
39            update_incremental_cache(test)
40
41        if self.progressBar:
42            self.progressBar.update(float(self.completed)/self.numTests,
43                                    test.getFullName())
44
45        shouldShow = test.result.code.isFailure or \
46            self.opts.showAllOutput or \
47            (not self.opts.quiet and not self.opts.succinct)
48        if not shouldShow:
49            return
50
51        if self.progressBar:
52            self.progressBar.clear()
53
54        # Show the test result line.
55        test_name = test.getFullName()
56        print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
57                                     self.completed, self.numTests))
58
59        # Show the test failure output, if requested.
60        if (test.result.code.isFailure and self.opts.showOutput) or \
61           self.opts.showAllOutput:
62            if test.result.code.isFailure:
63                print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
64                                                  '*'*20))
65            print(test.result.output)
66            print("*" * 20)
67
68        # Report test metrics, if present.
69        if test.result.metrics:
70            print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
71                                               '*'*10))
72            items = sorted(test.result.metrics.items())
73            for metric_name, value in items:
74                print('%s: %s ' % (metric_name, value.format()))
75            print("*" * 10)
76
77        # Ensure the output is flushed.
78        sys.stdout.flush()
79
80def write_test_results(run, lit_config, testing_time, output_path):
81    try:
82        import json
83    except ImportError:
84        lit_config.fatal('test output unsupported with Python 2.5')
85
86    # Construct the data we will write.
87    data = {}
88    # Encode the current lit version as a schema version.
89    data['__version__'] = lit.__versioninfo__
90    data['elapsed'] = testing_time
91    # FIXME: Record some information on the lit configuration used?
92    # FIXME: Record information from the individual test suites?
93
94    # Encode the tests.
95    data['tests'] = tests_data = []
96    for test in run.tests:
97        test_data = {
98            'name' : test.getFullName(),
99            'code' : test.result.code.name,
100            'output' : test.result.output,
101            'elapsed' : test.result.elapsed }
102
103        # Add test metrics, if present.
104        if test.result.metrics:
105            test_data['metrics'] = metrics_data = {}
106            for key, value in test.result.metrics.items():
107                metrics_data[key] = value.todata()
108
109        tests_data.append(test_data)
110
111    # Write the output.
112    f = open(output_path, 'w')
113    try:
114        json.dump(data, f, indent=2, sort_keys=True)
115        f.write('\n')
116    finally:
117        f.close()
118
119def update_incremental_cache(test):
120    if not test.result.code.isFailure:
121        return
122    fname = test.getFilePath()
123    os.utime(fname, None)
124
125def sort_by_incremental_cache(run):
126    def sortIndex(test):
127        fname = test.getFilePath()
128        try:
129            return -os.path.getmtime(fname)
130        except:
131            return 0
132    run.tests.sort(key = lambda t: sortIndex(t))
133
134def main(builtinParameters = {}):
135    # Use processes by default on Unix platforms.
136    isWindows = platform.system() == 'Windows'
137    useProcessesIsDefault = not isWindows
138
139    global options
140    from optparse import OptionParser, OptionGroup
141    parser = OptionParser("usage: %prog [options] {file-or-path}")
142
143    parser.add_option("", "--version", dest="show_version",
144                      help="Show version and exit",
145                      action="store_true", default=False)
146    parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
147                      help="Number of testing threads",
148                      type=int, action="store", default=None)
149    parser.add_option("", "--config-prefix", dest="configPrefix",
150                      metavar="NAME", help="Prefix for 'lit' config files",
151                      action="store", default=None)
152    parser.add_option("-D", "--param", dest="userParameters",
153                      metavar="NAME=VAL",
154                      help="Add 'NAME' = 'VAL' to the user defined parameters",
155                      type=str, action="append", default=[])
156
157    group = OptionGroup(parser, "Output Format")
158    # FIXME: I find these names very confusing, although I like the
159    # functionality.
160    group.add_option("-q", "--quiet", dest="quiet",
161                     help="Suppress no error output",
162                     action="store_true", default=False)
163    group.add_option("-s", "--succinct", dest="succinct",
164                     help="Reduce amount of output",
165                     action="store_true", default=False)
166    group.add_option("-v", "--verbose", dest="showOutput",
167                     help="Show test output for failures",
168                     action="store_true", default=False)
169    group.add_option("-a", "--show-all", dest="showAllOutput",
170                     help="Display all commandlines and output",
171                     action="store_true", default=False)
172    group.add_option("-o", "--output", dest="output_path",
173                     help="Write test results to the provided path",
174                     action="store", type=str, metavar="PATH")
175    group.add_option("", "--no-progress-bar", dest="useProgressBar",
176                     help="Do not use curses based progress bar",
177                     action="store_false", default=True)
178    group.add_option("", "--show-unsupported", dest="show_unsupported",
179                     help="Show unsupported tests",
180                     action="store_true", default=False)
181    group.add_option("", "--show-xfail", dest="show_xfail",
182                     help="Show tests that were expected to fail",
183                     action="store_true", default=False)
184    parser.add_option_group(group)
185
186    group = OptionGroup(parser, "Test Execution")
187    group.add_option("", "--path", dest="path",
188                     help="Additional paths to add to testing environment",
189                     action="append", type=str, default=[])
190    group.add_option("", "--vg", dest="useValgrind",
191                     help="Run tests under valgrind",
192                     action="store_true", default=False)
193    group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
194                     help="Check for memory leaks under valgrind",
195                     action="store_true", default=False)
196    group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
197                     help="Specify an extra argument for valgrind",
198                     type=str, action="append", default=[])
199    group.add_option("", "--time-tests", dest="timeTests",
200                     help="Track elapsed wall time for each test",
201                     action="store_true", default=False)
202    group.add_option("", "--no-execute", dest="noExecute",
203                     help="Don't execute any tests (assume PASS)",
204                     action="store_true", default=False)
205    group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
206                      help=("Write XUnit-compatible XML test reports to the"
207                            " specified file"), default=None)
208    group.add_option("", "--timeout", dest="maxIndividualTestTime",
209                     help="Maximum time to spend running a single test (in seconds)."
210                     "0 means no time limit. [Default: 0]",
211                    type=int, default=None)
212    parser.add_option_group(group)
213
214    group = OptionGroup(parser, "Test Selection")
215    group.add_option("", "--max-tests", dest="maxTests", metavar="N",
216                     help="Maximum number of tests to run",
217                     action="store", type=int, default=None)
218    group.add_option("", "--max-time", dest="maxTime", metavar="N",
219                     help="Maximum time to spend testing (in seconds)",
220                     action="store", type=float, default=None)
221    group.add_option("", "--shuffle", dest="shuffle",
222                     help="Run tests in random order",
223                     action="store_true", default=False)
224    group.add_option("-i", "--incremental", dest="incremental",
225                     help="Run modified and failing tests first (updates "
226                     "mtimes)",
227                     action="store_true", default=False)
228    group.add_option("", "--filter", dest="filter", metavar="REGEX",
229                     help=("Only run tests with paths matching the given "
230                           "regular expression"),
231                     action="store", default=None)
232    parser.add_option_group(group)
233
234    group = OptionGroup(parser, "Debug and Experimental Options")
235    group.add_option("", "--debug", dest="debug",
236                      help="Enable debugging (for 'lit' development)",
237                      action="store_true", default=False)
238    group.add_option("", "--show-suites", dest="showSuites",
239                      help="Show discovered test suites",
240                      action="store_true", default=False)
241    group.add_option("", "--show-tests", dest="showTests",
242                      help="Show all discovered tests",
243                      action="store_true", default=False)
244    group.add_option("", "--use-processes", dest="useProcesses",
245                      help="Run tests in parallel with processes (not threads)",
246                      action="store_true", default=useProcessesIsDefault)
247    group.add_option("", "--use-threads", dest="useProcesses",
248                      help="Run tests in parallel with threads (not processes)",
249                      action="store_false", default=useProcessesIsDefault)
250    parser.add_option_group(group)
251
252    (opts, args) = parser.parse_args()
253
254    if opts.show_version:
255        print("lit %s" % (lit.__version__,))
256        return
257
258    if not args:
259        parser.error('No inputs specified')
260
261    if opts.numThreads is None:
262# Python <2.5 has a race condition causing lit to always fail with numThreads>1
263# http://bugs.python.org/issue1731717
264# I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
265# threads by default there.
266       if sys.hexversion >= 0x2050200:
267               opts.numThreads = lit.util.detectCPUs()
268       else:
269               opts.numThreads = 1
270
271    inputs = args
272
273    # Create the user defined parameters.
274    userParams = dict(builtinParameters)
275    for entry in opts.userParameters:
276        if '=' not in entry:
277            name,val = entry,''
278        else:
279            name,val = entry.split('=', 1)
280        userParams[name] = val
281
282    # Decide what the requested maximum indvidual test time should be
283    if opts.maxIndividualTestTime != None:
284        maxIndividualTestTime = opts.maxIndividualTestTime
285    else:
286        # Default is zero
287        maxIndividualTestTime = 0
288
289
290    # Create the global config object.
291    litConfig = lit.LitConfig.LitConfig(
292        progname = os.path.basename(sys.argv[0]),
293        path = opts.path,
294        quiet = opts.quiet,
295        useValgrind = opts.useValgrind,
296        valgrindLeakCheck = opts.valgrindLeakCheck,
297        valgrindArgs = opts.valgrindArgs,
298        noExecute = opts.noExecute,
299        debug = opts.debug,
300        isWindows = isWindows,
301        params = userParams,
302        config_prefix = opts.configPrefix,
303        maxIndividualTestTime = maxIndividualTestTime)
304
305    # Perform test discovery.
306    run = lit.run.Run(litConfig,
307                      lit.discovery.find_tests_for_inputs(litConfig, inputs))
308
309    # After test discovery the configuration might have changed
310    # the maxIndividualTestTime. If we explicitly set this on the
311    # command line then override what was set in the test configuration
312    if opts.maxIndividualTestTime != None:
313        if opts.maxIndividualTestTime != litConfig.maxIndividualTestTime:
314            litConfig.note(('The test suite configuration requested an individual'
315                ' test timeout of {0} seconds but a timeout of {1} seconds was'
316                ' requested on the command line. Forcing timeout to be {1}'
317                ' seconds')
318                .format(litConfig.maxIndividualTestTime,
319                        opts.maxIndividualTestTime))
320            litConfig.maxIndividualTestTime = opts.maxIndividualTestTime
321
322    if opts.showSuites or opts.showTests:
323        # Aggregate the tests by suite.
324        suitesAndTests = {}
325        for result_test in run.tests:
326            if result_test.suite not in suitesAndTests:
327                suitesAndTests[result_test.suite] = []
328            suitesAndTests[result_test.suite].append(result_test)
329        suitesAndTests = list(suitesAndTests.items())
330        suitesAndTests.sort(key = lambda item: item[0].name)
331
332        # Show the suites, if requested.
333        if opts.showSuites:
334            print('-- Test Suites --')
335            for ts,ts_tests in suitesAndTests:
336                print('  %s - %d tests' %(ts.name, len(ts_tests)))
337                print('    Source Root: %s' % ts.source_root)
338                print('    Exec Root  : %s' % ts.exec_root)
339                if ts.config.available_features:
340                    print('    Available Features : %s' % ' '.join(
341                        sorted(ts.config.available_features)))
342
343        # Show the tests, if requested.
344        if opts.showTests:
345            print('-- Available Tests --')
346            for ts,ts_tests in suitesAndTests:
347                ts_tests.sort(key = lambda test: test.path_in_suite)
348                for test in ts_tests:
349                    print('  %s' % (test.getFullName(),))
350
351        # Exit.
352        sys.exit(0)
353
354    # Select and order the tests.
355    numTotalTests = len(run.tests)
356
357    # First, select based on the filter expression if given.
358    if opts.filter:
359        try:
360            rex = re.compile(opts.filter)
361        except:
362            parser.error("invalid regular expression for --filter: %r" % (
363                    opts.filter))
364        run.tests = [result_test for result_test in run.tests
365                     if rex.search(result_test.getFullName())]
366
367    # Then select the order.
368    if opts.shuffle:
369        random.shuffle(run.tests)
370    elif opts.incremental:
371        sort_by_incremental_cache(run)
372    else:
373        run.tests.sort(key = lambda t: (not t.isEarlyTest(), t.getFullName()))
374
375    # Finally limit the number of tests, if desired.
376    if opts.maxTests is not None:
377        run.tests = run.tests[:opts.maxTests]
378
379    # Don't create more threads than tests.
380    opts.numThreads = min(len(run.tests), opts.numThreads)
381
382    # Because some tests use threads internally, and at least on Linux each
383    # of these threads counts toward the current process limit, try to
384    # raise the (soft) process limit so that tests don't fail due to
385    # resource exhaustion.
386    try:
387        cpus = lit.util.detectCPUs()
388        desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
389
390        # Import the resource module here inside this try block because it
391        # will likely fail on Windows.
392        import resource
393
394        max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
395        desired_limit = min(desired_limit, max_procs_hard)
396
397        if max_procs_soft < desired_limit:
398            resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
399            litConfig.note('raised the process limit from %d to %d' % \
400                               (max_procs_soft, desired_limit))
401    except:
402        pass
403
404    extra = ''
405    if len(run.tests) != numTotalTests:
406        extra = ' of %d' % numTotalTests
407    header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
408                                                      opts.numThreads)
409    progressBar = None
410    if not opts.quiet:
411        if opts.succinct and opts.useProgressBar:
412            try:
413                tc = lit.ProgressBar.TerminalController()
414                progressBar = lit.ProgressBar.ProgressBar(tc, header)
415            except ValueError:
416                print(header)
417                progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
418        else:
419            print(header)
420
421    startTime = time.time()
422    display = TestingProgressDisplay(opts, len(run.tests), progressBar)
423    try:
424        run.execute_tests(display, opts.numThreads, opts.maxTime,
425                          opts.useProcesses)
426    except KeyboardInterrupt:
427        sys.exit(2)
428    display.finish()
429
430    testing_time = time.time() - startTime
431    if not opts.quiet:
432        print('Testing Time: %.2fs' % (testing_time,))
433
434    # Write out the test data, if requested.
435    if opts.output_path is not None:
436        write_test_results(run, litConfig, testing_time, opts.output_path)
437
438    # List test results organized by kind.
439    hasFailures = False
440    byCode = {}
441    for test in run.tests:
442        if test.result.code not in byCode:
443            byCode[test.result.code] = []
444        byCode[test.result.code].append(test)
445        if test.result.code.isFailure:
446            hasFailures = True
447
448    # Print each test in any of the failing groups.
449    for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
450                       ('Failing Tests', lit.Test.FAIL),
451                       ('Unresolved Tests', lit.Test.UNRESOLVED),
452                       ('Unsupported Tests', lit.Test.UNSUPPORTED),
453                       ('Expected Failing Tests', lit.Test.XFAIL),
454                       ('Timed Out Tests', lit.Test.TIMEOUT)):
455        if (lit.Test.XFAIL == code and not opts.show_xfail) or \
456           (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
457            continue
458        elts = byCode.get(code)
459        if not elts:
460            continue
461        print('*'*20)
462        print('%s (%d):' % (title, len(elts)))
463        for test in elts:
464            print('    %s' % test.getFullName())
465        sys.stdout.write('\n')
466
467    if opts.timeTests and run.tests:
468        # Order by time.
469        test_times = [(test.getFullName(), test.result.elapsed)
470                      for test in run.tests]
471        lit.util.printHistogram(test_times, title='Tests')
472
473    for name,code in (('Expected Passes    ', lit.Test.PASS),
474                      ('Passes With Retry  ', lit.Test.FLAKYPASS),
475                      ('Expected Failures  ', lit.Test.XFAIL),
476                      ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
477                      ('Unresolved Tests   ', lit.Test.UNRESOLVED),
478                      ('Unexpected Passes  ', lit.Test.XPASS),
479                      ('Unexpected Failures', lit.Test.FAIL),
480                      ('Individual Timeouts', lit.Test.TIMEOUT)):
481        if opts.quiet and not code.isFailure:
482            continue
483        N = len(byCode.get(code,[]))
484        if N:
485            print('  %s: %d' % (name,N))
486
487    if opts.xunit_output_file:
488        # Collect the tests, indexed by test suite
489        by_suite = {}
490        for result_test in run.tests:
491            suite = result_test.suite.config.name
492            if suite not in by_suite:
493                by_suite[suite] = {
494                                   'passes'   : 0,
495                                   'failures' : 0,
496                                   'tests'    : [] }
497            by_suite[suite]['tests'].append(result_test)
498            if result_test.result.code.isFailure:
499                by_suite[suite]['failures'] += 1
500            else:
501                by_suite[suite]['passes'] += 1
502        xunit_output_file = open(opts.xunit_output_file, "w")
503        xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
504        xunit_output_file.write("<testsuites>\n")
505        for suite_name, suite in by_suite.items():
506            safe_suite_name = suite_name.replace(".", "-")
507            xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
508            xunit_output_file.write(" tests='" + str(suite['passes'] +
509              suite['failures']) + "'")
510            xunit_output_file.write(" failures='" + str(suite['failures']) +
511              "'>\n")
512            for result_test in suite['tests']:
513                xunit_output_file.write(result_test.getJUnitXML() + "\n")
514            xunit_output_file.write("</testsuite>\n")
515        xunit_output_file.write("</testsuites>")
516        xunit_output_file.close()
517
518    # If we encountered any additional errors, exit abnormally.
519    if litConfig.numErrors:
520        sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
521        sys.exit(2)
522
523    # Warn about warnings.
524    if litConfig.numWarnings:
525        sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
526
527    if hasFailures:
528        sys.exit(1)
529    sys.exit(0)
530
531if __name__=='__main__':
532    main()
533