1#!/usr/bin/env python3
2#
3# Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
4#
5# SPDX-License-Identifier: BSD-2-Clause
6#
7
8#
9# Very simple command-line test runner.
10#
11
12from __future__ import print_function
13
14import argparse
15import atexit
16import datetime
17import collections
18import cpuusage
19import fnmatch
20import memusage
21import os
22try:
23    import Queue
24except ImportError:
25    import queue
26    Queue = queue
27import signal
28import subprocess
29import sys
30import testspec
31import threading
32import time
33import traceback
34import warnings
35import xml.etree.ElementTree as ET
36
37import psutil
38if not hasattr(psutil.Process, "children"):
39    # psutil API change
40    psutil.Process.children = getattr(psutil.Process, "get_children")
41
42ANSI_RESET = "\033[0m"
43ANSI_RED = "\033[31;1m"
44ANSI_GREEN = "\033[32m"
45ANSI_YELLOW = "\033[33m"
46ANSI_WHITE = "\033[37m"
47ANSI_BOLD = "\033[1m"
48
49
50def output_color(color, s):
51    """Wrap the given string in the given color."""
52    if sys.stdout.isatty():
53        return color + s + ANSI_RESET
54    return s
55
56# Find a command in the PATH.
57
58
59def which(filename):
60    for path in os.environ["PATH"].split(os.pathsep):
61        candidate = os.path.join(path, filename)
62        if os.path.exists(candidate) and os.access(candidate, os.X_OK):
63            return candidate
64    return None
65
66#
67# Kill a process and all of its children.
68#
69# We attempt to handle races where a PID goes away while we
70# are looking at it, but not where a PID has been reused.
71#
72
73
74def kill_family(grace_period, parent_pid):
75    # Find process.
76    try:
77        process = psutil.Process(parent_pid)
78    except psutil.NoSuchProcess:
79        # Race. Nothing more to do.
80        return
81
82    process_list = [process]
83    for child in process.children(recursive=True):
84        process_list.append(child)
85
86    # Grace period for processes to clean up.
87    if grace_period > 0:
88        for p in process_list[:]:
89            try:
90                p.send_signal(signal.SIGINT)
91            except psutil.NoSuchProcess:
92                # Race
93                process_list.remove(p)
94
95        # Sleep up to grace_period, but possibly shorter
96        slept = 0
97        intvl = min(grace_period, 1.0)
98        while slept < grace_period:
99            if not process.is_running():
100                break
101            time.sleep(intvl)
102            slept += intvl
103
104    # SIGSTOP everyone first.
105    for p in process_list[:]:
106        try:
107            p.suspend()
108        except psutil.NoSuchProcess:
109            # Race.
110            process_list.remove(p)
111
112    # Now SIGKILL everyone.
113    process_list.reverse()
114    for p in process_list:
115        p.send_signal(signal.SIGKILL)
116
117
118# Process statuses
119(RUNNING,     # Still running
120 PASSED,      # Passed
121 FAILED,      # Failed
122 SKIPPED,     # Failed dependencies
123 ERROR,       # Failed to run test at all
124 TIMEOUT,     # Wall timeout
125 CPU_TIMEOUT,  # CPU timeout
126 STUCK,       # No CPU activity detected
127 CANCELLED    # Cancelled for external reasons
128 ) = range(9)
129
130# for print_test_line
131status_name = ['RUNNING (***bug***)',
132               'passed',
133               'FAILED',
134               'SKIPPED',
135               'ERROR',
136               'TIMEOUT',
137               'TIMEOUT',
138               'STUCK',
139               'CANCELLED']
140status_maxlen = max(len(s) for s in status_name[1:]) + len(" *")
141
142
143def run_test(test, status_queue, kill_switch,
144             verbose=False, stuck_timeout=None,
145             timeout_scale=1.0, timeouts_enabled=True,
146             grace_period=0):
147    '''
148    Run a single test.
149
150    Return a dict of (name, status, output, cpu time, elapsed time, memory usage).
151    This is placed onto the given queue.
152
153    Log only contains the output if verbose is *false*; otherwise, the
154    log is output to stdout where we can't easily get to it.
155
156    kill_switch is a threading.Event that is set if the
157    --fail-fast feature is triggered from some other thread.
158    '''
159
160    # Construct the base command.
161    command = ["bash", "-c", test.command]
162
163    # If we have a "pidspace" program, use that to ensure that programs
164    # that double-fork can't continue running after the parent command
165    # dies.
166    if which("pidspace") is not None:
167        command = [which("pidspace"), "--"] + command
168
169    # Print command and path.
170    if verbose:
171        print("\n")
172        if os.path.abspath(test.cwd) != os.path.abspath(os.getcwd()):
173            path = " [%s]" % os.path.relpath(test.cwd)
174        else:
175            path = ""
176        print("    command: %s%s" % (test.command, path))
177
178    # Determine where stdout should go. We can't print it live to stdout and
179    # also capture it, unfortunately.
180    output = sys.stdout if verbose else subprocess.PIPE
181
182    # Start timing.
183    start_time = datetime.datetime.now()
184
185    # Start the command.
186    peak_mem_usage = None
187    try:
188        process = subprocess.Popen(command,
189                                   stdout=output, stderr=subprocess.STDOUT, stdin=subprocess.PIPE,
190                                   cwd=test.cwd)
191    except:
192        output = "Exception while running test:\n\n%s" % (traceback.format_exc())
193        if verbose:
194            print(output)
195        status_queue.put({'name': test.name,
196                          'status': ERROR,
197                          'output': output,
198                          'real_time': datetime.datetime.now() - start_time,
199                          'cpu_time': 0,
200                          'mem_usage': peak_mem_usage})
201        return
202
203    # Now running the test.
204    # Wrap in a list to prevent nested functions getting the wrong scope
205    test_status = [RUNNING]
206
207    # If we exit for some reason, attempt to kill our test processes.
208    def emergency_stop():
209        if test_status[0] is RUNNING:
210            kill_family(grace_period, process.pid)
211    atexit.register(emergency_stop)
212
213    # Setup a timer for the timeout.
214    def do_timeout():
215        if test_status[0] is RUNNING:
216            test_status[0] = TIMEOUT
217            kill_family(grace_period, process.pid)
218
219    scaled_test_timeout = test.timeout * timeout_scale
220    timer = None
221    if timeouts_enabled and scaled_test_timeout > 0:
222        timer = threading.Timer(scaled_test_timeout, do_timeout)
223        timer.start()
224
225    # Poll the kill switch.
226    def watch_kill_switch():
227        while True:
228            interval = 1.0
229            if test_status[0] is not RUNNING:
230                break
231            if kill_switch.wait(1):
232                if test_status[0] is not RUNNING:
233                    break
234                test_status[0] = CANCELLED
235                kill_family(grace_period, process.pid)
236            time.sleep(interval)
237    kill_switch_thread = threading.Thread(target=watch_kill_switch)
238    kill_switch_thread.daemon = True
239    kill_switch_thread.start()
240
241    scaled_cpu_timeout = test.cpu_timeout * timeout_scale
242    with cpuusage.process_poller(process.pid) as c:
243        # Inactivity timeout
244        low_cpu_usage = 0.05  # 5%
245        cpu_history = collections.deque()  # sliding window
246        cpu_usage_total = [0]  # workaround for variable scope
247
248        # Also set a CPU timeout. We poll the cpu usage periodically.
249        def cpu_timeout():
250            last_cpu_usage = 0
251            interval = min(0.5, scaled_cpu_timeout / 10.0)
252            while test_status[0] is RUNNING:
253                thread_cpu_usage = c.cpu_usage()
254
255                if stuck_timeout:
256                    # append to window
257                    now = time.time()
258                    if not cpu_history:
259                        cpu_history.append((time.time(), thread_cpu_usage / interval))
260                    else:
261                        real_interval = now - cpu_history[-1][0]
262                        cpu_increment = thread_cpu_usage - last_cpu_usage
263                        cpu_history.append((now, cpu_increment / real_interval))
264                    cpu_usage_total[0] += cpu_history[-1][1]
265
266                    # pop from window, ensuring that window covers at least stuck_timeout interval
267                    while len(cpu_history) > 1 and cpu_history[1][0] + stuck_timeout <= now:
268                        cpu_usage_total[0] -= cpu_history[0][1]
269                        cpu_history.popleft()
270
271                    if (now - cpu_history[0][0] >= stuck_timeout and
272                            cpu_usage_total[0] / len(cpu_history) < low_cpu_usage):
273                        test_status[0] = STUCK
274                        kill_family(grace_period, process.pid)
275                        break
276
277                if thread_cpu_usage > scaled_cpu_timeout:
278                    test_status[0] = CPU_TIMEOUT
279                    kill_family(grace_period, process.pid)
280                    break
281
282                last_cpu_usage = thread_cpu_usage
283                time.sleep(interval)
284
285        cpu_timer = None
286        if timeouts_enabled and scaled_cpu_timeout > 0:
287            cpu_timer = threading.Thread(target=cpu_timeout)
288            cpu_timer.daemon = True
289            cpu_timer.start()
290
291        with memusage.process_poller(process.pid) as m:
292            # Wait for the command to finish.
293            (output, _) = process.communicate()
294            peak_mem_usage = m.peak_mem_usage()
295            cpu_usage = c.cpu_usage()
296
297        if process.returncode == 0:
298            test_status[0] = PASSED
299        elif test_status[0] is RUNNING:
300            # No special status, so assume it failed by itself
301            test_status[0] = FAILED
302
303        if cpu_timer is not None:
304            # prevent cpu_timer using c after it goes away
305            cpu_timer.join()
306
307    # Cancel the timer. Small race here (if the timer fires just after the
308    # process finished), but the return code of our process should still be 0,
309    # and hence we won't interpret the result as a timeout.
310    if timer is not None and test_status[0] is not TIMEOUT:
311        timer.cancel()
312
313    if output is None:
314        output = b''
315    output = output.decode(encoding='utf8', errors='replace')
316    if test_status[0] in [STUCK, TIMEOUT, CPU_TIMEOUT]:
317        output = output + extra_timeout_output(test.name)
318
319    status_queue.put({'name': test.name,
320                      'status': test_status[0],
321                      'output': output,
322                      'real_time': datetime.datetime.now() - start_time,
323                      'cpu_time': cpu_usage,
324                      'mem_usage': peak_mem_usage})
325
326# run a heuristic script for getting some output on a timeout
327
328
329def extra_timeout_output(test_name):
330    # we expect the script to be in the same directory as run_tests.py
331    here = os.path.dirname(os.path.abspath(__file__))
332    command = [os.path.join(here, 'timeout_output'), test_name]
333    try:
334        process = subprocess.Popen(command,
335                                   stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
336        (output, _) = process.communicate()
337        return output.decode('utf8')
338    except Exception as e:
339        return ("Exception launching timeout_output: %s" % str(e))
340
341# Print a status line.
342
343
344def print_test_line_start(test_name):
345    if sys.stdout.isatty():
346        print("  Started %-25s " % (test_name + " ..."))
347        sys.stdout.flush()
348
349
350def print_test_line(test_name, color, status, real_time=None, cpu_time=None, mem=None):
351    if mem is not None:
352        # Report memory usage in gigabytes.
353        mem = '%5.2fGB' % round(float(mem) / 1024 / 1024 / 1024, 2)
354
355    if real_time is not None:
356        # Format times as H:MM:SS; strip milliseconds for better printing.
357        real_time = datetime.timedelta(seconds=int(real_time.total_seconds()))
358        real_time = '%8s real' % real_time
359
360    if cpu_time is not None:
361        cpu_time = datetime.timedelta(seconds=int(cpu_time))
362        cpu_time = '%8s cpu' % cpu_time
363
364    extras = ', '.join(filter(None, [real_time, cpu_time, mem]))
365
366    # Print status line.
367    front = '  Finished %-25s ' % test_name
368    status_str = status_name[status]
369    if status is not PASSED:
370        status_str += " *"
371    print(front +
372          output_color(color, "{:<{}} ".format(status_str, status_maxlen)) +
373          ('(%s)' % extras if extras else ''))
374    sys.stdout.flush()
375
376#
377# Recursive glob
378#
379
380
381def rglob(base_dir, pattern):
382    matches = []
383    extras = []
384    for root, dirnames, filenames in os.walk(base_dir):
385        for filename in fnmatch.filter(filenames, pattern):
386            matches.append(os.path.join(root, filename))
387        for filename in fnmatch.filter(filenames, 'extra_tests'):
388            f = os.path.join(root, filename)
389            extras.extend([os.path.join(root, l.strip())
390                           for l in open(f) if l.strip()])
391    matches.extend([f for e in extras for f in rglob(e, pattern)])
392    return sorted(set(matches))
393
394# Print info about tests.
395
396
397def print_tests(msg, tests, verbose):
398    if verbose:
399        print('%d tests %s:' % (len(tests), msg))
400    for t in tests:
401        print(t.name)
402        if verbose:
403            print('  #> cd ' + t.cwd)
404            print('  #> ' + t.command)
405            print('  -- depends: %s' % list(t.depends))
406
407
408def print_test_deps(tests):
409    print('digraph "tests" {')
410    for t in tests:
411        for t_dep in t.depends:
412            print('  "%s" -> "%s";' % (t_dep, t.name))
413    print('}')
414
415#
416# Run tests.
417#
418
419
420def main():
421    # Parse arguments
422    parser = argparse.ArgumentParser(description="Parallel Regression Framework",
423                                     epilog="RUN_TESTS_DEFAULT can be used to overwrite the default set of tests")
424    parser.add_argument("-d", "--directory", action="store",
425                        metavar="DIR", help="directory to search for test files",
426                        default=os.getcwd())
427    parser.add_argument("--brief", action="store_true",
428                        help="don't print failure logs at end of test run")
429    parser.add_argument("-f", "--fail-fast", action="store_true",
430                        help="exit once the first failure is detected")
431    parser.add_argument("-j", "--jobs", type=int, default=1,
432                        help="Number of tests to run in parallel")
433    parser.add_argument("-l", "--list", action="store_true",
434                        help="list all known tests (-v for details)")
435    parser.add_argument("-L", "--dry-run", action="store_true",
436                        help="list tests to be run (-v for details)")
437    parser.add_argument("--no-dependencies", action="store_true",
438                        help="don't check for dependencies when running specific tests")
439    parser.add_argument("-x", "--exclude", action="append", metavar="TEST", default=[],
440                        help="exclude the given test; tests depending on it may still run")
441    parser.add_argument("-r", "--remove", action="append", metavar="TEST", default=[],
442                        help="remove the given test and tests that depend on it")
443    parser.add_argument("-v", "--verbose", action="store_true",
444                        help="print test output or list more details")
445    parser.add_argument("--dot", action="store_true",
446                        help="for -l or -L, output test dependencies in GraphViz format")
447    parser.add_argument("--junit-report", metavar="FILE",
448                        help="write JUnit-style test report")
449    parser.add_argument("--stuck-timeout", type=int, default=600, metavar='N',
450                        help="timeout tests if not using CPU for N seconds (default: 600)")
451    timeout_mod_args = parser.add_mutually_exclusive_group()
452    timeout_mod_args.add_argument("--scale-timeouts", type=float, default=1, metavar='N',
453                                  help="multiply test timeouts by N (e.g. 2 provides twice as much time)")
454    timeout_mod_args.add_argument("--no-timeouts", action="store_true",
455                                  help="do not enforce any test timeouts")
456    parser.add_argument("--grace-period", type=float, default=5, metavar='N',
457                        help="interrupt over-time processes N seconds before killing them (default: 5)")
458    parser.add_argument("tests", metavar="TESTS",
459                        help="select these tests to run (defaults to all tests)",
460                        nargs="*")
461    args = parser.parse_args()
462
463    if args.jobs < 1:
464        parser.error("Number of parallel jobs must be at least 1")
465
466    if args.scale_timeouts <= 0:
467        parser.error("--scale-timeouts value must be greater than 0")
468
469    # Search for test files:
470    test_xml = sorted(rglob(args.directory, "tests.xml"))
471    test_info = testspec.process_test_files(test_xml)
472    all_tests = test_info.tests
473
474    # List test names if requested.
475    if args.list:
476        if args.dot:
477            print_test_deps(all_tests)
478        else:
479            print_tests('total', all_tests, args.verbose)
480        sys.exit(0)
481
482    # Calculate which tests should be run.
483    desired_names = set(args.tests) or set(os.environ.get('RUN_TESTS_DEFAULT', '').split())
484    bad_names = desired_names - set([t.name for t in all_tests])
485    if bad_names:
486        parser.error("These tests are requested, but do not exist: %s" %
487                     (", ".join(sorted(bad_names))))
488
489    def get_tests(names):
490        '''Given a set of names, return the corresponding set of Tests.'''
491        return {t for t in all_tests if t.name in names}
492
493    def add_deps(x):
494        '''Given a set of Tests, add all dependencies to it.'''
495        x.update({t for w in x for t in add_deps(get_tests(w.depends))})
496        return x
497
498    if desired_names:
499        tests_to_run_set = get_tests(desired_names)
500    else:
501        tests_to_run_set = set(all_tests)
502
503    # Are we skipping dependencies? If not, add them.
504    if not args.no_dependencies:
505        add_deps(tests_to_run_set)
506
507    # Preserve the order of the original set of Tests.
508    tests_to_run = [t for t in all_tests if t in tests_to_run_set]
509
510    # Process --exclude'd tests.
511    exclude_tests = set(args.exclude)
512    tests_to_run = [t for t in tests_to_run if t.name not in exclude_tests]
513
514    # Process --remove'd tests transitively.
515    remove_trans = frozenset.union(frozenset(), *[
516        test_info.reverse_deps.rtrans(r, lambda x: frozenset())
517        for r in args.remove])
518    conflict_names = {t for t in desired_names if t not in exclude_tests and t in remove_trans}
519    if conflict_names:
520        # It's unclear what we should do if a selected test has been --removed.
521        # For now, just bail out.
522        parser.error(
523            "Cannot run these tests because they depend on removed tests: %s\n" %
524            ", ".join(sorted(conflict_names))
525            + "(The removed tests are: %s)" %
526            ", ".join(args.remove))
527    tests_to_run = [t for t in tests_to_run if t.name not in remove_trans]
528
529    bad_names = set.union(exclude_tests, set(args.remove)) - {t.name for t in all_tests}
530    if bad_names:
531        sys.stderr.write("Warning: These tests are excluded/removed, but do not exist: %s\n" %
532                         (", ".join(sorted(bad_names))))
533
534    if args.dry_run:
535        if args.dot:
536            print_test_deps(tests_to_run)
537        else:
538            print_tests('selected', tests_to_run, args.verbose)
539        sys.exit(0)
540
541    # Run the tests.
542    print("Running %d test(s)..." % len(tests_to_run))
543    failed_tests = set()
544    passed_tests = set()
545    test_results = {}
546
547    # Use a simple list to store the pending queue. We track the dependencies separately.
548    tests_queue = tests_to_run[:]
549    # Current jobs.
550    current_jobs = {}
551    # Newly finished jobs.
552    status_queue = Queue.Queue()
553
554    # If run from a tty and -v is off, we also track
555    # current jobs on the bottom line of the tty.
556    # We cache this status line to help us wipe it later.
557    tty_status_line = [""]
558
559    def wipe_tty_status():
560        if tty_status_line[0]:
561            print(" " * len(tty_status_line[0]) + "\r", end="")
562            sys.stdout.flush()
563            tty_status_line[0] = ""
564
565    # Handle --fail-fast
566    kill_switch = threading.Event()
567
568    while tests_queue or current_jobs:
569        # Update status line with pending jobs.
570        if current_jobs and sys.stdout.isatty() and not args.verbose:
571            tty_status_line[0] = "Running: " + ", ".join(sorted(current_jobs.keys()))
572            print(tty_status_line[0] + "\r", end="")
573            sys.stdout.flush()
574
575        # Check if we have a job slot.
576        if len(current_jobs) < args.jobs:
577            # Find the first non-blocked test and handle it.
578            for i, t in enumerate(tests_queue):
579                # Leave out dependencies that were excluded at the command line.
580                real_depends = t.depends & set(t.name for t in tests_to_run)
581                # Non-blocked but depends on a failed test. Remove it.
582                if (len(real_depends & failed_tests) > 0
583                    # --fail-fast triggered, fail all subsequent tests
584                        or kill_switch.is_set()):
585
586                    wipe_tty_status()
587                    print_test_line(t.name, ANSI_YELLOW, SKIPPED)
588                    failed_tests.add(t.name)
589                    del tests_queue[i]
590                    break
591                # Non-blocked and open. Start it.
592                if real_depends.issubset(passed_tests):
593                    test_thread = threading.Thread(
594                        target=run_test, name=t.name,
595                        args=(t, status_queue, kill_switch,
596                              args.verbose, args.stuck_timeout,
597                              args.scale_timeouts, not args.no_timeouts,
598                              args.grace_period))
599                    wipe_tty_status()
600                    print_test_line_start(t.name)
601                    test_thread.start()
602                    current_jobs[t.name] = test_thread
603                    del tests_queue[i]
604                    break
605
606        # Wait for jobs to complete.
607        try:
608            while True:
609                info = status_queue.get(block=True, timeout=0.1337)  # Built-in pause
610                name, status = info['name'], info['status']
611
612                test_results[name] = info
613                del current_jobs[name]
614
615                # Print result.
616                wipe_tty_status()
617                if status is PASSED:
618                    passed_tests.add(name)
619                    colour = ANSI_GREEN
620                elif status is CANCELLED:
621                    failed_tests.add(name)
622                    colour = ANSI_YELLOW
623                else:
624                    failed_tests.add(name)
625                    colour = ANSI_RED
626                print_test_line(name, colour, status,
627                                real_time=info['real_time'],
628                                cpu_time=info['cpu_time'],
629                                mem=info['mem_usage'])
630                if args.fail_fast and status != PASSED:
631                    # Notify current threads and future tests
632                    kill_switch.set()
633        except Queue.Empty:
634            pass
635    wipe_tty_status()
636
637    # Print failure summaries unless requested not to.
638    if not args.brief and len(failed_tests) > 0:
639        LINE_LIMIT = 40
640
641        def print_line():
642            print("".join(["-" for x in range(72)]))
643        print("")
644        # Sort failed_tests according to tests_to_run
645        for t in tests_to_run:
646            if t.name not in failed_tests:
647                continue
648            if t.name not in test_results:
649                continue
650
651            print_line()
652            print("TEST %s: %s" % (status_name[test_results[t.name]['status']], t.name))
653            print("")
654            output = test_results[t.name]['output'].rstrip("\n")
655            if output:
656                lines = output.split("\n") + ['']
657            else:
658                lines = ['(no output)']
659            if len(lines) > LINE_LIMIT:
660                lines = ["..."] + lines[-LINE_LIMIT:]
661            print("\n".join(lines))
662        print_line()
663
664    # Print JUnit-style test report.
665    # reference: https://github.com/notnoop/hudson-tools/blob/master/toJunitXML/sample-junit.xml
666    if args.junit_report is not None:
667        testsuite = ET.Element("testsuite")
668        for t in tests_to_run:
669            if t.name not in test_results:
670                # test was skipped
671                testcase = ET.SubElement(testsuite, "testcase",
672                                         classname="", name=t.name, time="0")
673                if t.depends & failed_tests:
674                    ET.SubElement(testcase, "error", type="error").text = (
675                        "Failed dependencies: " + ', '.join(t.depends & failed_tests))
676                else:
677                    ET.SubElement(testcase, "error", type="error").text = "Cancelled"
678            else:
679                info = test_results[t.name]
680                testcase = ET.SubElement(testsuite, "testcase",
681                                         classname="", name=t.name, time='%f' % info['real_time'].total_seconds())
682                if info['status'] is PASSED:
683                    if not args.verbose:
684                        ET.SubElement(testcase, "system-out").text = info['output']
685                elif info['status'] is FAILED:
686                    ET.SubElement(testcase, "failure", type="failure").text = info['output']
687                elif info['status'] in (TIMEOUT, CPU_TIMEOUT):
688                    ET.SubElement(testcase, "error", type="timeout").text = info['output']
689                elif info['status'] is STUCK:
690                    ET.SubElement(testcase, "error", type="stuck").text = info['output']
691                elif info['status'] is CANCELLED:
692                    ET.SubElement(testcase, "error", type="cancelled").text = info['output']
693                elif info['status'] is ERROR:
694                    ET.SubElement(testcase, "error", type="error").text = info['output']
695                else:
696                    warnings.warn("Unknown status code: {}".format(info['status']))
697                    ET.SubElement(testcase, "error", type="unknown").text = info['output']
698
699        ET.ElementTree(testsuite).write(args.junit_report)
700
701    # Print summary.
702    print(("\n\n"
703           + output_color(ANSI_WHITE, "%d/%d tests succeeded.") + "\n")
704          % (len(tests_to_run) - len(failed_tests), len(tests_to_run)))
705    if len(failed_tests) > 0:
706        print(output_color(ANSI_RED, "Tests failed.") + "\n")
707        if kill_switch.is_set():
708            print("Exiting early due to --fail-fast.")
709        sys.exit(1)
710    else:
711        print(output_color(ANSI_GREEN, "All tests passed.") + "\n")
712        sys.exit(0)
713
714
715if __name__ == "__main__":
716    main()
717