1#!/usr/bin/env python
2
3"""
4lit - LLVM Integrated Tester.
5
6See lit.pod for more information.
7"""
8
9import math, os, platform, random, re, sys, time, threading, traceback
10
11import ProgressBar
12import TestRunner
13import Util
14
15from TestingConfig import TestingConfig
16import LitConfig
17import Test
18
19# Configuration files to look for when discovering test suites. These can be
20# overridden with --config-prefix.
21#
22# FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ?
23gConfigName = 'lit.cfg'
24gSiteConfigName = 'lit.site.cfg'
25
26kLocalConfigName = 'lit.local.cfg'
27
28class TestingProgressDisplay:
29    def __init__(self, opts, numTests, progressBar=None):
30        self.opts = opts
31        self.numTests = numTests
32        self.current = None
33        self.lock = threading.Lock()
34        self.progressBar = progressBar
35        self.completed = 0
36
37    def update(self, test):
38        # Avoid locking overhead in quiet mode
39        if self.opts.quiet and not test.result.isFailure:
40            self.completed += 1
41            return
42
43        # Output lock.
44        self.lock.acquire()
45        try:
46            self.handleUpdate(test)
47        finally:
48            self.lock.release()
49
50    def finish(self):
51        if self.progressBar:
52            self.progressBar.clear()
53        elif self.opts.quiet:
54            pass
55        elif self.opts.succinct:
56            sys.stdout.write('\n')
57
58    def handleUpdate(self, test):
59        self.completed += 1
60        if self.progressBar:
61            self.progressBar.update(float(self.completed)/self.numTests,
62                                    test.getFullName())
63
64        if self.opts.succinct and not test.result.isFailure:
65            return
66
67        if self.progressBar:
68            self.progressBar.clear()
69
70        print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
71                                     self.completed, self.numTests)
72
73        if test.result.isFailure and self.opts.showOutput:
74            print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
75                                              '*'*20)
76            print test.output
77            print "*" * 20
78
79        sys.stdout.flush()
80
81class TestProvider:
82    def __init__(self, tests, maxTime):
83        self.maxTime = maxTime
84        self.iter = iter(tests)
85        self.lock = threading.Lock()
86        self.startTime = time.time()
87
88    def get(self):
89        # Check if we have run out of time.
90        if self.maxTime is not None:
91            if time.time() - self.startTime > self.maxTime:
92                return None
93
94        # Otherwise take the next test.
95        self.lock.acquire()
96        try:
97            item = self.iter.next()
98        except StopIteration:
99            item = None
100        self.lock.release()
101        return item
102
103class Tester(threading.Thread):
104    def __init__(self, litConfig, provider, display):
105        threading.Thread.__init__(self)
106        self.litConfig = litConfig
107        self.provider = provider
108        self.display = display
109
110    def run(self):
111        while 1:
112            item = self.provider.get()
113            if item is None:
114                break
115            self.runTest(item)
116
117    def runTest(self, test):
118        result = None
119        startTime = time.time()
120        try:
121            result, output = test.config.test_format.execute(test,
122                                                             self.litConfig)
123        except KeyboardInterrupt:
124            # This is a sad hack. Unfortunately subprocess goes
125            # bonkers with ctrl-c and we start forking merrily.
126            print '\nCtrl-C detected, goodbye.'
127            os.kill(0,9)
128        except:
129            if self.litConfig.debug:
130                raise
131            result = Test.UNRESOLVED
132            output = 'Exception during script execution:\n'
133            output += traceback.format_exc()
134            output += '\n'
135        elapsed = time.time() - startTime
136
137        test.setResult(result, output, elapsed)
138        self.display.update(test)
139
140def dirContainsTestSuite(path):
141    cfgpath = os.path.join(path, gSiteConfigName)
142    if os.path.exists(cfgpath):
143        return cfgpath
144    cfgpath = os.path.join(path, gConfigName)
145    if os.path.exists(cfgpath):
146        return cfgpath
147
148def getTestSuite(item, litConfig, cache):
149    """getTestSuite(item, litConfig, cache) -> (suite, relative_path)
150
151    Find the test suite containing @arg item.
152
153    @retval (None, ...) - Indicates no test suite contains @arg item.
154    @retval (suite, relative_path) - The suite that @arg item is in, and its
155    relative path inside that suite.
156    """
157    def search1(path):
158        # Check for a site config or a lit config.
159        cfgpath = dirContainsTestSuite(path)
160
161        # If we didn't find a config file, keep looking.
162        if not cfgpath:
163            parent,base = os.path.split(path)
164            if parent == path:
165                return (None, ())
166
167            ts, relative = search(parent)
168            return (ts, relative + (base,))
169
170        # We found a config file, load it.
171        if litConfig.debug:
172            litConfig.note('loading suite config %r' % cfgpath)
173
174        cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True)
175        source_root = os.path.realpath(cfg.test_source_root or path)
176        exec_root = os.path.realpath(cfg.test_exec_root or path)
177        return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
178
179    def search(path):
180        # Check for an already instantiated test suite.
181        res = cache.get(path)
182        if res is None:
183            cache[path] = res = search1(path)
184        return res
185
186    # Canonicalize the path.
187    item = os.path.realpath(item)
188
189    # Skip files and virtual components.
190    components = []
191    while not os.path.isdir(item):
192        parent,base = os.path.split(item)
193        if parent == item:
194            return (None, ())
195        components.append(base)
196        item = parent
197    components.reverse()
198
199    ts, relative = search(item)
200    return ts, tuple(relative + tuple(components))
201
202def getLocalConfig(ts, path_in_suite, litConfig, cache):
203    def search1(path_in_suite):
204        # Get the parent config.
205        if not path_in_suite:
206            parent = ts.config
207        else:
208            parent = search(path_in_suite[:-1])
209
210        # Load the local configuration.
211        source_path = ts.getSourcePath(path_in_suite)
212        cfgpath = os.path.join(source_path, kLocalConfigName)
213        if litConfig.debug:
214            litConfig.note('loading local config %r' % cfgpath)
215        return TestingConfig.frompath(cfgpath, parent, litConfig,
216                                    mustExist = False,
217                                    config = parent.clone(cfgpath))
218
219    def search(path_in_suite):
220        key = (ts, path_in_suite)
221        res = cache.get(key)
222        if res is None:
223            cache[key] = res = search1(path_in_suite)
224        return res
225
226    return search(path_in_suite)
227
228def getTests(path, litConfig, testSuiteCache, localConfigCache):
229    # Find the test suite for this input and its relative path.
230    ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
231    if ts is None:
232        litConfig.warning('unable to find test suite for %r' % path)
233        return (),()
234
235    if litConfig.debug:
236        litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
237                                                        path_in_suite))
238
239    return ts, getTestsInSuite(ts, path_in_suite, litConfig,
240                               testSuiteCache, localConfigCache)
241
242def getTestsInSuite(ts, path_in_suite, litConfig,
243                    testSuiteCache, localConfigCache):
244    # Check that the source path exists (errors here are reported by the
245    # caller).
246    source_path = ts.getSourcePath(path_in_suite)
247    if not os.path.exists(source_path):
248        return
249
250    # Check if the user named a test directly.
251    if not os.path.isdir(source_path):
252        lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
253        yield Test.Test(ts, path_in_suite, lc)
254        return
255
256    # Otherwise we have a directory to search for tests, start by getting the
257    # local configuration.
258    lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
259
260    # Search for tests.
261    if lc.test_format is not None:
262        for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
263                                                      litConfig, lc):
264            yield res
265
266    # Search subdirectories.
267    for filename in os.listdir(source_path):
268        # FIXME: This doesn't belong here?
269        if filename in ('Output', '.svn') or filename in lc.excludes:
270            continue
271
272        # Ignore non-directories.
273        file_sourcepath = os.path.join(source_path, filename)
274        if not os.path.isdir(file_sourcepath):
275            continue
276
277        # Check for nested test suites, first in the execpath in case there is a
278        # site configuration and then in the source path.
279        file_execpath = ts.getExecPath(path_in_suite + (filename,))
280        if dirContainsTestSuite(file_execpath):
281            sub_ts, subiter = getTests(file_execpath, litConfig,
282                                       testSuiteCache, localConfigCache)
283        elif dirContainsTestSuite(file_sourcepath):
284            sub_ts, subiter = getTests(file_sourcepath, litConfig,
285                                       testSuiteCache, localConfigCache)
286        else:
287            # Otherwise, continue loading from inside this test suite.
288            subiter = getTestsInSuite(ts, path_in_suite + (filename,),
289                                      litConfig, testSuiteCache,
290                                      localConfigCache)
291            sub_ts = None
292
293        N = 0
294        for res in subiter:
295            N += 1
296            yield res
297        if sub_ts and not N:
298            litConfig.warning('test suite %r contained no tests' % sub_ts.name)
299
300def runTests(numThreads, litConfig, provider, display):
301    # If only using one testing thread, don't use threads at all; this lets us
302    # profile, among other things.
303    if numThreads == 1:
304        t = Tester(litConfig, provider, display)
305        t.run()
306        return
307
308    # Otherwise spin up the testing threads and wait for them to finish.
309    testers = [Tester(litConfig, provider, display)
310               for i in range(numThreads)]
311    for t in testers:
312        t.start()
313    try:
314        for t in testers:
315            t.join()
316    except KeyboardInterrupt:
317        sys.exit(2)
318
319def load_test_suite(inputs):
320    import unittest
321
322    # Create the global config object.
323    litConfig = LitConfig.LitConfig(progname = 'lit',
324                                    path = [],
325                                    quiet = False,
326                                    useValgrind = False,
327                                    valgrindLeakCheck = False,
328                                    valgrindArgs = [],
329                                    useTclAsSh = False,
330                                    noExecute = False,
331                                    ignoreStdErr = False,
332                                    debug = False,
333                                    isWindows = (platform.system()=='Windows'),
334                                    params = {})
335
336    # Load the tests from the inputs.
337    tests = []
338    testSuiteCache = {}
339    localConfigCache = {}
340    for input in inputs:
341        prev = len(tests)
342        tests.extend(getTests(input, litConfig,
343                              testSuiteCache, localConfigCache)[1])
344        if prev == len(tests):
345            litConfig.warning('input %r contained no tests' % input)
346
347    # If there were any errors during test discovery, exit now.
348    if litConfig.numErrors:
349        print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
350        sys.exit(2)
351
352    # Return a unittest test suite which just runs the tests in order.
353    def get_test_fn(test):
354        return unittest.FunctionTestCase(
355            lambda: test.config.test_format.execute(
356                test, litConfig),
357            description = test.getFullName())
358
359    from LitTestCase import LitTestCase
360    return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests])
361
362def main(builtinParameters = {}):    # Bump the GIL check interval, its more important to get any one thread to a
363    # blocking operation (hopefully exec) than to try and unblock other threads.
364    #
365    # FIXME: This is a hack.
366    import sys
367    sys.setcheckinterval(1000)
368
369    global options
370    from optparse import OptionParser, OptionGroup
371    parser = OptionParser("usage: %prog [options] {file-or-path}")
372
373    parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
374                      help="Number of testing threads",
375                      type=int, action="store", default=None)
376    parser.add_option("", "--config-prefix", dest="configPrefix",
377                      metavar="NAME", help="Prefix for 'lit' config files",
378                      action="store", default=None)
379    parser.add_option("", "--param", dest="userParameters",
380                      metavar="NAME=VAL",
381                      help="Add 'NAME' = 'VAL' to the user defined parameters",
382                      type=str, action="append", default=[])
383
384    group = OptionGroup(parser, "Output Format")
385    # FIXME: I find these names very confusing, although I like the
386    # functionality.
387    group.add_option("-q", "--quiet", dest="quiet",
388                     help="Suppress no error output",
389                     action="store_true", default=False)
390    group.add_option("-s", "--succinct", dest="succinct",
391                     help="Reduce amount of output",
392                     action="store_true", default=False)
393    group.add_option("-v", "--verbose", dest="showOutput",
394                     help="Show all test output",
395                     action="store_true", default=False)
396    group.add_option("", "--no-progress-bar", dest="useProgressBar",
397                     help="Do not use curses based progress bar",
398                     action="store_false", default=True)
399    parser.add_option_group(group)
400
401    group = OptionGroup(parser, "Test Execution")
402    group.add_option("", "--path", dest="path",
403                     help="Additional paths to add to testing environment",
404                     action="append", type=str, default=[])
405    group.add_option("", "--vg", dest="useValgrind",
406                     help="Run tests under valgrind",
407                     action="store_true", default=False)
408    group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
409                     help="Check for memory leaks under valgrind",
410                     action="store_true", default=False)
411    group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
412                     help="Specify an extra argument for valgrind",
413                     type=str, action="append", default=[])
414    group.add_option("", "--time-tests", dest="timeTests",
415                     help="Track elapsed wall time for each test",
416                     action="store_true", default=False)
417    group.add_option("", "--no-execute", dest="noExecute",
418                     help="Don't execute any tests (assume PASS)",
419                     action="store_true", default=False)
420    parser.add_option_group(group)
421
422    group = OptionGroup(parser, "Test Selection")
423    group.add_option("", "--max-tests", dest="maxTests", metavar="N",
424                     help="Maximum number of tests to run",
425                     action="store", type=int, default=None)
426    group.add_option("", "--max-time", dest="maxTime", metavar="N",
427                     help="Maximum time to spend testing (in seconds)",
428                     action="store", type=float, default=None)
429    group.add_option("", "--shuffle", dest="shuffle",
430                     help="Run tests in random order",
431                     action="store_true", default=False)
432    group.add_option("", "--filter", dest="filter", metavar="EXPRESSION",
433                     help=("Only run tests with paths matching the given "
434                           "regular expression"),
435                     action="store", default=None)
436    parser.add_option_group(group)
437
438    group = OptionGroup(parser, "Debug and Experimental Options")
439    group.add_option("", "--debug", dest="debug",
440                      help="Enable debugging (for 'lit' development)",
441                      action="store_true", default=False)
442    group.add_option("", "--show-suites", dest="showSuites",
443                      help="Show discovered test suites",
444                      action="store_true", default=False)
445    group.add_option("", "--no-tcl-as-sh", dest="useTclAsSh",
446                      help="Don't run Tcl scripts using 'sh'",
447                      action="store_false", default=True)
448    group.add_option("", "--repeat", dest="repeatTests", metavar="N",
449                      help="Repeat tests N times (for timing)",
450                      action="store", default=None, type=int)
451    parser.add_option_group(group)
452
453    (opts, args) = parser.parse_args()
454
455    if not args:
456        parser.error('No inputs specified')
457
458    if opts.configPrefix is not None:
459        global gConfigName, gSiteConfigName, kLocalConfigName
460        gConfigName = '%s.cfg' % opts.configPrefix
461        gSiteConfigName = '%s.site.cfg' % opts.configPrefix
462        kLocalConfigName = '%s.local.cfg' % opts.configPrefix
463
464    if opts.numThreads is None:
465# Python <2.5 has a race condition causing lit to always fail with numThreads>1
466# http://bugs.python.org/issue1731717
467# I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
468# threads by default there.
469       if sys.hexversion >= 0x2050200:
470               opts.numThreads = Util.detectCPUs()
471       else:
472               opts.numThreads = 1
473
474    inputs = args
475
476    # Create the user defined parameters.
477    userParams = dict(builtinParameters)
478    for entry in opts.userParameters:
479        if '=' not in entry:
480            name,val = entry,''
481        else:
482            name,val = entry.split('=', 1)
483        userParams[name] = val
484
485    # Create the global config object.
486    litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]),
487                                    path = opts.path,
488                                    quiet = opts.quiet,
489                                    useValgrind = opts.useValgrind,
490                                    valgrindLeakCheck = opts.valgrindLeakCheck,
491                                    valgrindArgs = opts.valgrindArgs,
492                                    useTclAsSh = opts.useTclAsSh,
493                                    noExecute = opts.noExecute,
494                                    ignoreStdErr = False,
495                                    debug = opts.debug,
496                                    isWindows = (platform.system()=='Windows'),
497                                    params = userParams)
498
499    # Expand '@...' form in inputs.
500    actual_inputs = []
501    for input in inputs:
502        if os.path.exists(input) or not input.startswith('@'):
503            actual_inputs.append(input)
504        else:
505            f = open(input[1:])
506            try:
507                for ln in f:
508                    ln = ln.strip()
509                    if ln:
510                        actual_inputs.append(ln)
511            finally:
512                f.close()
513
514
515    # Load the tests from the inputs.
516    tests = []
517    testSuiteCache = {}
518    localConfigCache = {}
519    for input in actual_inputs:
520        prev = len(tests)
521        tests.extend(getTests(input, litConfig,
522                              testSuiteCache, localConfigCache)[1])
523        if prev == len(tests):
524            litConfig.warning('input %r contained no tests' % input)
525
526    # If there were any errors during test discovery, exit now.
527    if litConfig.numErrors:
528        print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
529        sys.exit(2)
530
531    if opts.showSuites:
532        suitesAndTests = dict([(ts,[])
533                               for ts,_ in testSuiteCache.values()
534                               if ts])
535        for t in tests:
536            suitesAndTests[t.suite].append(t)
537
538        print '-- Test Suites --'
539        suitesAndTests = suitesAndTests.items()
540        suitesAndTests.sort(key = lambda (ts,_): ts.name)
541        for ts,ts_tests in suitesAndTests:
542            print '  %s - %d tests' %(ts.name, len(ts_tests))
543            print '    Source Root: %s' % ts.source_root
544            print '    Exec Root  : %s' % ts.exec_root
545
546    # Select and order the tests.
547    numTotalTests = len(tests)
548
549    # First, select based on the filter expression if given.
550    if opts.filter:
551        try:
552            rex = re.compile(opts.filter)
553        except:
554            parser.error("invalid regular expression for --filter: %r" % (
555                    opts.filter))
556        tests = [t for t in tests
557                 if rex.search(t.getFullName())]
558
559    # Then select the order.
560    if opts.shuffle:
561        random.shuffle(tests)
562    else:
563        tests.sort(key = lambda t: t.getFullName())
564
565    # Finally limit the number of tests, if desired.
566    if opts.maxTests is not None:
567        tests = tests[:opts.maxTests]
568
569    # Don't create more threads than tests.
570    opts.numThreads = min(len(tests), opts.numThreads)
571
572    extra = ''
573    if len(tests) != numTotalTests:
574        extra = ' of %d' % numTotalTests
575    header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,
576                                                      opts.numThreads)
577
578    if opts.repeatTests:
579        tests = [t.copyWithIndex(i)
580                 for t in tests
581                 for i in range(opts.repeatTests)]
582
583    progressBar = None
584    if not opts.quiet:
585        if opts.succinct and opts.useProgressBar:
586            try:
587                tc = ProgressBar.TerminalController()
588                progressBar = ProgressBar.ProgressBar(tc, header)
589            except ValueError:
590                print header
591                progressBar = ProgressBar.SimpleProgressBar('Testing: ')
592        else:
593            print header
594
595    startTime = time.time()
596    display = TestingProgressDisplay(opts, len(tests), progressBar)
597    provider = TestProvider(tests, opts.maxTime)
598    runTests(opts.numThreads, litConfig, provider, display)
599    display.finish()
600
601    if not opts.quiet:
602        print 'Testing Time: %.2fs'%(time.time() - startTime)
603
604    # Update results for any tests which weren't run.
605    for t in tests:
606        if t.result is None:
607            t.setResult(Test.UNRESOLVED, '', 0.0)
608
609    # List test results organized by kind.
610    hasFailures = False
611    byCode = {}
612    for t in tests:
613        if t.result not in byCode:
614            byCode[t.result] = []
615        byCode[t.result].append(t)
616        if t.result.isFailure:
617            hasFailures = True
618
619    # FIXME: Show unresolved and (optionally) unsupported tests.
620    for title,code in (('Unexpected Passing Tests', Test.XPASS),
621                       ('Failing Tests', Test.FAIL)):
622        elts = byCode.get(code)
623        if not elts:
624            continue
625        print '*'*20
626        print '%s (%d):' % (title, len(elts))
627        for t in elts:
628            print '    %s' % t.getFullName()
629        print
630
631    if opts.timeTests:
632        # Collate, in case we repeated tests.
633        times = {}
634        for t in tests:
635            key = t.getFullName()
636            times[key] = times.get(key, 0.) + t.elapsed
637
638        byTime = list(times.items())
639        byTime.sort(key = lambda (name,elapsed): elapsed)
640        if byTime:
641            Util.printHistogram(byTime, title='Tests')
642
643    for name,code in (('Expected Passes    ', Test.PASS),
644                      ('Expected Failures  ', Test.XFAIL),
645                      ('Unsupported Tests  ', Test.UNSUPPORTED),
646                      ('Unresolved Tests   ', Test.UNRESOLVED),
647                      ('Unexpected Passes  ', Test.XPASS),
648                      ('Unexpected Failures', Test.FAIL),):
649        if opts.quiet and not code.isFailure:
650            continue
651        N = len(byCode.get(code,[]))
652        if N:
653            print '  %s: %d' % (name,N)
654
655    # If we encountered any additional errors, exit abnormally.
656    if litConfig.numErrors:
657        print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
658        sys.exit(2)
659
660    # Warn about warnings.
661    if litConfig.numWarnings:
662        print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings
663
664    if hasFailures:
665        sys.exit(1)
666    sys.exit(0)
667
668if __name__=='__main__':
669    main()
670