1#!/usr/bin/env python
2
3#
4# Copyright (c) 2009, 2011, ETH Zurich.
5# All rights reserved.
6#
7# This file is distributed under the terms in the attached LICENSE file.
8# If you do not find this file, copies can be found by writing to:
9# ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
10#
11
12import sys
13from machines import MachineFactory
14
15# check interpreter version to avoid confusion over syntax/module errors
16if sys.version_info < (2, 6):
17    sys.stderr.write('Error: Python 2.6 or greater is required\n')
18    sys.exit(1)
19
20import os
21import codecs
22import optparse
23import traceback
24import datetime
25import getpass
26import fnmatch
27import harness
28import debug
29import checkout
30import builds
31import tests
32import machines
33from tests.common import TimeoutError
34from socket import gethostname
35
36try:
37    from junit_xml import TestSuite, TestCase
38    have_junit_xml = True
39except:
40    have_junit_xml = False
41
42def list_all():
43    print 'Build types:\t', ', '.join([b.name for b in builds.all_builds])
44    print 'Machines:\t', ', '.join([m for m in MachineFactory.machineFactories.keys()])
45    print 'Tests:'
46    for t in sorted(tests.all_tests, key=lambda test: test.name):
47        print '  %-20s %s' % (t.name, (t.__doc__ or '').strip())
48
49
50def parse_args():
51    p = optparse.OptionParser(
52        usage='Usage: %prog [options] SOURCEDIR RESULTDIR',
53        description='Barrelfish regression/benchmark harness')
54
55    g = optparse.OptionGroup(p, 'Basic options')
56    g.add_option('-b', '--build', action='append', dest='buildspecs',
57                 metavar='BUILD', help='build types to perform [default: test]')
58    g.add_option('-B', '--buildbase', dest='buildbase', metavar='DIR',
59                 help='places builds under DIR [default: SOURCEDIR/builds]')
60    g.add_option('-e', '--existingbuild', dest='existingbuild', metavar='DIR',
61                 help='existing build directory (may not be used with -b)')
62    g.add_option('-m', '--machine', action='append', dest='machinespecs',
63                 metavar='MACHINE', help='victim machines to use')
64    g.add_option('-t', '--test', action='append', dest='testspecs',
65                 metavar='TEST', help='tests/benchmarks to run')
66    g.add_option('-c', '--comment', dest='comment',
67                 help='comment to store with all collected data')
68    g.add_option('-x', '--xml', dest='xml', action='store_true',
69                 default=False,
70                 help='output summary of tests in Junit XML format')
71    p.add_option_group(g)
72
73    g = optparse.OptionGroup(p, 'Debugging options')
74    g.add_option('-L', '--listall', action='store_true', dest='listall',
75                 help='list available builds, machines and tests')
76    debug.addopts(g, 'debuglevel')
77    g.add_option('-k', '--keepgoing', action='store_true', dest='keepgoing',
78                 help='attempt to continue on errors')
79    p.add_option_group(g)
80    p.set_defaults(debuglevel=debug.NORMAL)
81
82    options, args = p.parse_args()
83
84    debug.current_level = options.debuglevel
85
86    if options.listall:
87        list_all()
88        sys.exit(0)
89
90    if len(args) != 2:
91        p.error('source and results directories must be specified')
92    options.sourcedir, options.resultsdir = args
93
94    # determine default buildbase if needed
95    if options.buildbase is None:
96        options.buildbase = os.path.join(options.sourcedir, 'builds')
97
98    # check validity of source and results dirs
99    if not os.path.isdir(os.path.join(options.sourcedir, 'hake')):
100        p.error('invalid source directory %s' % options.sourcedir)
101    if not (os.path.isdir(options.resultsdir)
102            and os.access(options.resultsdir, os.W_OK)):
103        p.error('invalid results directory %s' % options.resultsdir)
104
105    if options.xml and not have_junit_xml:
106        p.error('--xml requires junit-xml.\n'
107                'Please install junit-xml through pip or easy_install')
108
109    # resolve and instantiate all builds
110    def _lookup(spec, classes, nameFn=lambda c: c.name.lower()):
111        spec = spec.lower()
112        return [c for c in classes if fnmatch.fnmatch(nameFn(c), spec)]
113
114    if options.existingbuild:
115        if options.buildspecs:
116            p.error('existing build directory cannot be used together'
117                    ' with build types (-b)')
118        options.builds = [builds.existingbuild(options, options.existingbuild)]
119        options.buildbase = options.existingbuild
120    else:
121        options.builds = []
122        if not options.buildspecs:
123            options.buildspecs = ['test']
124        for spec in options.buildspecs:
125            matches = _lookup(spec, builds.all_builds)
126            if matches == []:
127                p.error('no builds match "%s" (try -L for a list)' % spec)
128            options.builds.extend(
129                [b for b in matches if b not in options.builds])
130        options.builds = [b(options) for b in options.builds]
131
132    # resolve and instantiate all machines
133    if options.machinespecs is None:
134        p.error('no machines specified')
135    options.machines = []
136    for spec in options.machinespecs:
137        matches = _lookup(spec, MachineFactory.machineFactories, nameFn=lambda fac: fac.lower())
138        if matches == []:
139            p.error('no machines match "%s" (try -L for a list)' % spec)
140        options.machines.extend(
141            [m for m in matches if m not in options.machines])
142    options.machines = [MachineFactory.createMachineByName(m, options) for m in options.machines]
143
144    # resolve and instantiate all tests
145    if options.testspecs:
146        options.tests = []
147        for spec in options.testspecs:
148            matches = _lookup(spec, tests.all_tests)
149            if matches == []:
150                p.error('no tests match "%s" (try -L for a list)' % spec)
151            options.tests.extend(
152                [t for t in matches if t not in options.tests])
153    else:
154        p.error('no tests specified (try -t memtest if unsure)')
155    options.tests = [t(options) for t in options.tests]
156
157    debug.verbose('Host:     ' + gethostname())
158    debug.verbose('Builds:   ' + ', '.join([b.name for b in options.builds]))
159    debug.verbose('Machines: ' + ', '.join([m.getName() for m in options.machines]))
160    debug.verbose('Tests:    ' + ', '.join([t.name for t in options.tests]))
161
162    return options
163
164class Scalebench:
165
166    def __init__(self, options):
167        self._harness = harness.Harness()
168        self._options = options
169
170    def make_results_dir(self, build, machine, test):
171        # Create a unique directory for the output from this test
172        timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
173        dirname = '-'.join([test.name, build.name, machine.getName(), timestamp])
174        path = os.path.join(self._options.resultsdir, str(datetime.datetime.now().year), dirname)
175        debug.verbose('create result directory %s' % path)
176        os.makedirs(path)
177        return path
178
179    def make_run_dir(self, build, machine):
180        # Create a unique directory for the output from this test
181        timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
182        dirname = '-'.join([build.name, machine.getName(), timestamp])
183        path = os.path.join(self._options.resultsdir, str(datetime.datetime.now().year), dirname)
184        debug.verbose('create result directory %s' % path)
185        os.makedirs(path)
186        return path
187
188    def write_description(self, checkout, build, machine, test, path):
189        debug.verbose('write description file')
190        with codecs.open(os.path.join(path, 'description.txt'), 'w', 'utf-8') as f:
191            f.write('test: %s\n' % test.name)
192            f.write('revision: %s\n' % checkout.get_revision())
193            f.write('build: %s\n' % build.name)
194            f.write('machine: %s\n' % machine.getName())
195            f.write('start time: %s\n' % datetime.datetime.now())
196            f.write('user: %s\n' % getpass.getuser())
197            for item in checkout.get_meta().items():
198                f.write("%s: %s\n" % item)
199
200            if self._options.comment:
201                f.write('\n' + self._options.comment + '\n')
202
203        diff = checkout.get_diff()
204        if diff:
205            with codecs.open(os.path.join(path, 'changes.patch'), 'w', 'utf-8') as f:
206                f.write(diff)
207
208    def write_errorcase(self, build, machine, test, path, msg, start_ts, end_ts):
209        delta = end_ts - start_ts
210        tc = { 'name': test.name,
211               'time_elapsed': delta.total_seconds(),
212               'class': machine.getName(),
213               'stdout': '\n'.join(self._harness.process_output(test, path)),
214               'stderr': "",
215               'passed': False
216        }
217        if have_junit_xml:
218            ju_tc = TestCase(
219                    tc['name'],
220                    tc['class'],
221                    tc['time_elapsed'],
222                    tc['stdout'],
223                    )
224            ju_tc.add_error_info(message=msg)
225            return ju_tc
226        else:
227            return tc
228
229    def write_testcase(self, build, machine, test, path, passed,
230            start_ts, end_ts):
231        delta = end_ts - start_ts
232        tc = { 'name': test.name,
233               'class': machine.getName(),
234               'time_elapsed': delta.total_seconds(),
235               'stdout': '\n'.join(self._harness.process_output(test, path)),
236               'stderr': "",
237               'passed': passed
238        }
239        if have_junit_xml:
240            ju_tc = TestCase(
241                    tc['name'],
242                    tc['class'],
243                    tc['time_elapsed'],
244                    tc['stdout'],
245                    )
246            if not passed:
247                errors = self._harness.extract_errors(test, path)
248                errorstr = 'Failed'
249                if errors is not None and len(errors) > 0:
250                    errorstr += ': ' + ''.join([ unicode(l, errors='replace') for l in errors])
251                ju_tc.add_failure_info(message=errorstr)
252            return ju_tc
253        else:
254            return tc
255
256    def testcase_passed(self, testcase):
257        if have_junit_xml:
258            return not (testcase.is_failure() or testcase.is_error() or testcase.is_skipped())
259        else:
260            return testcase['passed']
261
262    def testcase_name(self, testcase):
263        if have_junit_xml:
264            return testcase.name
265        else:
266            return testcase['name']
267
268    def write_xml_report(self, testcases, path):
269        assert(have_junit_xml)
270        debug.log("producing junit-xml report")
271        ts = TestSuite('harness suite', testcases)
272        with open(os.path.join(path, 'report.xml'), 'w') as f:
273            TestSuite.to_file(f, [ts], prettyprint=False)
274
275    def run_test(self, build, machine, test, co, testcases):
276        debug.log('running test %s on %s, cwd is %s'
277          % (test.name, machine.getName(), os.getcwd()))
278        path = self.make_results_dir(build, machine, test)
279        self.write_description(co, build, machine, test, path)
280        start_timestamp = datetime.datetime.now()
281        try:
282            self._harness.run_test(build, machine, test, path)
283        except TimeoutError:
284            msg = 'Timeout while running test'
285            if self._options.keepgoing:
286                msg += ' (attempting to continue)'
287            debug.error(msg)
288            end_timestamp = datetime.datetime.now()
289            testcases.append(self.write_errorcase(build, machine, test, path,
290                msg + "\n" + traceback.format_exc(), start_timestamp, end_timestamp)
291                )
292            return False
293        except Exception, e:
294            msg = 'Exception while running test'
295            if self._options.keepgoing:
296                msg += ' (attempting to continue):'
297            debug.error(msg)
298            debug.error(str(e))
299            end_timestamp = datetime.datetime.now()
300            testcases.append(self.write_errorcase(build, machine, test, path,
301                msg + "\n" + traceback.format_exc(), start_timestamp, end_timestamp)
302                )
303            traceback.print_exc()
304            return False
305
306        end_timestamp = datetime.datetime.now()
307        debug.log('test complete, processing results')
308        try:
309            passed = self._harness.process_results(test, path)
310            debug.log('result: %s' % ("PASS" if passed else "FAIL"))
311        except Exception:
312            msg = 'Exception while processing results'
313            if self._options.keepgoing:
314                msg += ' (attempting to continue):'
315            debug.error(msg)
316            if self._options.keepgoing:
317                traceback.print_exc()
318
319        testcases.append(
320                self.write_testcase(build, machine, test, path, passed,
321                    start_timestamp, end_timestamp))
322        return passed
323
324    def execute_tests(self, co, buildarchs, testcases):
325        for build in self._options.builds:
326            debug.log('starting build: %s' % build.name)
327            build.configure(co, buildarchs)
328            for machine in self._options.machines:
329                passed = True
330                for test in self._options.tests:
331                    passed = self.run_test(build, machine, test, co, testcases)
332                    if not passed and not self._options.keepgoing:
333                        # Stop looping tests if keep going is not true and there
334                        # was an error
335                        break
336                # produce JUnit style xml report if requested
337                if self._options.xml:
338                    path = self.make_run_dir(build, machine)
339                    self.write_xml_report(testcases, path)
340                # Did we encounter an error?
341                if not passed and not self._options.keepgoing:
342                    return
343
344    def main(self):
345        retval = True  # everything was OK
346        co = checkout.create_for_dir(self._options.sourcedir)
347
348        # determine build architectures
349        buildarchs = set()
350        for m in self._options.machines:
351            buildarchs |= set(m.get_buildarchs())
352        buildarchs = list(buildarchs)
353
354        testcases = []
355
356        self.execute_tests(co, buildarchs, testcases)
357
358        pcount = len([ t for t in testcases if self.testcase_passed(t) ])
359        debug.log('\n%d/%d tests passed' % (pcount, len(testcases)))
360        if pcount < len(testcases):
361            debug.log('Failed tests:')
362            for t in [ t for t in testcases if not self.testcase_passed(t) ]:
363                debug.log(' * %s' % self.testcase_name(t))
364            # return False if we had test failures
365            retval = False
366        debug.log('all done!')
367        return retval
368
369if __name__ == "__main__":
370    options = parse_args()
371    scalebench = Scalebench(options)
372    if not scalebench.main():
373        sys.exit(1)  # one or more tests failed
374