1# -*- Mode: cperl; cperl-indent-level: 4 -*-
2
3package Test::Harness;
4
5require 5.00405;
6use Test::Harness::Straps;
7use Test::Harness::Assert;
8use Exporter;
9use Benchmark;
10use Config;
11use strict;
12
13
14use vars qw(
15    $VERSION
16    @ISA @EXPORT @EXPORT_OK
17    $Verbose $Switches $Debug
18    $verbose $switches $debug
19    $Columns
20    $Timer
21    $ML $Last_ML_Print
22    $Strap
23    $has_time_hires
24);
25
26BEGIN {
27    eval q{use Time::HiRes 'time'};
28    $has_time_hires = !$@;
29}
30
31=head1 NAME
32
33Test::Harness - Run Perl standard test scripts with statistics
34
35=head1 VERSION
36
37Version 2.64
38
39=cut
40
41$VERSION = '2.64';
42
43# Backwards compatibility for exportable variable names.
44*verbose  = *Verbose;
45*switches = *Switches;
46*debug    = *Debug;
47
48$ENV{HARNESS_ACTIVE} = 1;
49$ENV{HARNESS_VERSION} = $VERSION;
50
51END {
52    # For VMS.
53    delete $ENV{HARNESS_ACTIVE};
54    delete $ENV{HARNESS_VERSION};
55}
56
57my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
58
59# Stolen from Params::Util
60sub _CLASS {
61    (defined $_[0] and ! ref $_[0] and $_[0] =~ m/^[^\W\d]\w*(?:::\w+)*$/s) ? $_[0] : undef;
62}
63
64# Strap Overloading
65if ( $ENV{HARNESS_STRAPS_CLASS} ) {
66    die 'Set HARNESS_STRAP_CLASS, singular, not HARNESS_STRAPS_CLASS';
67}
68my $HARNESS_STRAP_CLASS  = $ENV{HARNESS_STRAP_CLASS} || 'Test::Harness::Straps';
69if ( $HARNESS_STRAP_CLASS =~ /\.pm$/ ) {
70    # "Class" is actually a filename, that should return the
71    # class name as its true return value.
72    $HARNESS_STRAP_CLASS = require $HARNESS_STRAP_CLASS;
73    if ( !_CLASS($HARNESS_STRAP_CLASS) ) {
74        die "HARNESS_STRAP_CLASS '$HARNESS_STRAP_CLASS' is not a valid class name";
75    }
76}
77else {
78    # It is a class name within the current @INC
79    if ( !_CLASS($HARNESS_STRAP_CLASS) ) {
80        die "HARNESS_STRAP_CLASS '$HARNESS_STRAP_CLASS' is not a valid class name";
81    }
82    eval "require $HARNESS_STRAP_CLASS";
83    die $@ if $@;
84}
85if ( !$HARNESS_STRAP_CLASS->isa('Test::Harness::Straps') ) {
86    die "HARNESS_STRAP_CLASS '$HARNESS_STRAP_CLASS' must be a Test::Harness::Straps subclass";
87}
88
89$Strap = $HARNESS_STRAP_CLASS->new;
90
91sub strap { return $Strap };
92
93@ISA = ('Exporter');
94@EXPORT    = qw(&runtests);
95@EXPORT_OK = qw(&execute_tests $verbose $switches);
96
97$Verbose  = $ENV{HARNESS_VERBOSE} || 0;
98$Debug    = $ENV{HARNESS_DEBUG} || 0;
99$Switches = '-w';
100$Columns  = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
101$Columns--;             # Some shells have trouble with a full line of text.
102$Timer    = $ENV{HARNESS_TIMER} || 0;
103
104=head1 SYNOPSIS
105
106  use Test::Harness;
107
108  runtests(@test_files);
109
110=head1 DESCRIPTION
111
112B<STOP!> If all you want to do is write a test script, consider
113using Test::Simple.  Test::Harness is the module that reads the
114output from Test::Simple, Test::More and other modules based on
115Test::Builder.  You don't need to know about Test::Harness to use
116those modules.
117
118Test::Harness runs tests and expects output from the test in a
119certain format.  That format is called TAP, the Test Anything
120Protocol.  It is defined in L<Test::Harness::TAP>.
121
122C<Test::Harness::runtests(@tests)> runs all the testscripts named
123as arguments and checks standard output for the expected strings
124in TAP format.
125
126The F<prove> utility is a thin wrapper around Test::Harness.
127
128=head2 Taint mode
129
130Test::Harness will honor the C<-T> or C<-t> in the #! line on your
131test files.  So if you begin a test with:
132
133    #!perl -T
134
135the test will be run with taint mode on.
136
137=head2 Configuration variables.
138
139These variables can be used to configure the behavior of
140Test::Harness.  They are exported on request.
141
142=over 4
143
144=item C<$Test::Harness::Verbose>
145
146The package variable C<$Test::Harness::Verbose> is exportable and can be
147used to let C<runtests()> display the standard output of the script
148without altering the behavior otherwise.  The F<prove> utility's C<-v>
149flag will set this.
150
151=item C<$Test::Harness::switches>
152
153The package variable C<$Test::Harness::switches> is exportable and can be
154used to set perl command line options used for running the test
155script(s). The default value is C<-w>. It overrides C<HARNESS_PERL_SWITCHES>.
156
157=item C<$Test::Harness::Timer>
158
159If set to true, and C<Time::HiRes> is available, print elapsed seconds
160after each test file.
161
162=back
163
164
165=head2 Failure
166
167When tests fail, analyze the summary report:
168
169  t/base..............ok
170  t/nonumbers.........ok
171  t/ok................ok
172  t/test-harness......ok
173  t/waterloo..........dubious
174          Test returned status 3 (wstat 768, 0x300)
175  DIED. FAILED tests 1, 3, 5, 7, 9, 11, 13, 15, 17, 19
176          Failed 10/20 tests, 50.00% okay
177  Failed Test  Stat Wstat Total Fail  List of Failed
178  ---------------------------------------------------------------
179  t/waterloo.t    3   768    20   10  1 3 5 7 9 11 13 15 17 19
180  Failed 1/5 test scripts, 80.00% okay. 10/44 subtests failed, 77.27% okay.
181
182Everything passed but F<t/waterloo.t>.  It failed 10 of 20 tests and
183exited with non-zero status indicating something dubious happened.
184
185The columns in the summary report mean:
186
187=over 4
188
189=item B<Failed Test>
190
191The test file which failed.
192
193=item B<Stat>
194
195If the test exited with non-zero, this is its exit status.
196
197=item B<Wstat>
198
199The wait status of the test.
200
201=item B<Total>
202
203Total number of tests expected to run.
204
205=item B<Fail>
206
207Number which failed, either from "not ok" or because they never ran.
208
209=item B<List of Failed>
210
211A list of the tests which failed.  Successive failures may be
212abbreviated (ie. 15-20 to indicate that tests 15, 16, 17, 18, 19 and
21320 failed).
214
215=back
216
217
218=head1 FUNCTIONS
219
220The following functions are available.
221
222=head2 runtests( @test_files )
223
224This runs all the given I<@test_files> and divines whether they passed
225or failed based on their output to STDOUT (details above).  It prints
226out each individual test which failed along with a summary report and
227a how long it all took.
228
229It returns true if everything was ok.  Otherwise it will C<die()> with
230one of the messages in the DIAGNOSTICS section.
231
232=cut
233
234sub runtests {
235    my(@tests) = @_;
236
237    local ($\, $,);
238
239    my ($tot, $failedtests,$todo_passed) = execute_tests(tests => \@tests);
240    print get_results($tot, $failedtests,$todo_passed);
241
242    my $ok = _all_ok($tot);
243
244    assert(($ok xor keys %$failedtests),
245           q{ok status jives with $failedtests});
246
247    if (! $ok) {
248        die("Failed $tot->{bad}/$tot->{tests} test programs. " .
249            "@{[$tot->{max} - $tot->{ok}]}/$tot->{max} subtests failed.\n");
250    }
251
252    return $ok;
253}
254
255# my $ok = _all_ok(\%tot);
256# Tells you if this test run is overall successful or not.
257
258sub _all_ok {
259    my($tot) = shift;
260
261    return $tot->{bad} == 0 && ($tot->{max} || $tot->{skipped}) ? 1 : 0;
262}
263
264# Returns all the files in a directory.  This is shorthand for backwards
265# compatibility on systems where C<glob()> doesn't work right.
266
267sub _globdir {
268    local *DIRH;
269
270    opendir DIRH, shift;
271    my @f = readdir DIRH;
272    closedir DIRH;
273
274    return @f;
275}
276
277=head2 execute_tests( tests => \@test_files, out => \*FH )
278
279Runs all the given C<@test_files> (just like C<runtests()>) but
280doesn't generate the final report.  During testing, progress
281information will be written to the currently selected output
282filehandle (usually C<STDOUT>), or to the filehandle given by the
283C<out> parameter.  The I<out> is optional.
284
285Returns a list of two values, C<$total> and C<$failed>, describing the
286results.  C<$total> is a hash ref summary of all the tests run.  Its
287keys and values are this:
288
289    bonus           Number of individual todo tests unexpectedly passed
290    max             Number of individual tests ran
291    ok              Number of individual tests passed
292    sub_skipped     Number of individual tests skipped
293    todo            Number of individual todo tests
294
295    files           Number of test files ran
296    good            Number of test files passed
297    bad             Number of test files failed
298    tests           Number of test files originally given
299    skipped         Number of test files skipped
300
301If C<< $total->{bad} == 0 >> and C<< $total->{max} > 0 >>, you've
302got a successful test.
303
304C<$failed> is a hash ref of all the test scripts that failed.  Each key
305is the name of a test script, each value is another hash representing
306how that script failed.  Its keys are these:
307
308    name        Name of the test which failed
309    estat       Script's exit value
310    wstat       Script's wait status
311    max         Number of individual tests
312    failed      Number which failed
313    canon       List of tests which failed (as string).
314
315C<$failed> should be empty if everything passed.
316
317=cut
318
319sub execute_tests {
320    my %args = @_;
321    my @tests = @{$args{tests}};
322    my $out = $args{out} || select();
323
324    # We allow filehandles that are symbolic refs
325    no strict 'refs';
326    _autoflush($out);
327    _autoflush(\*STDERR);
328
329    my %failedtests;
330    my %todo_passed;
331
332    # Test-wide totals.
333    my(%tot) = (
334                bonus    => 0,
335                max      => 0,
336                ok       => 0,
337                files    => 0,
338                bad      => 0,
339                good     => 0,
340                tests    => scalar @tests,
341                sub_skipped  => 0,
342                todo     => 0,
343                skipped  => 0,
344                bench    => 0,
345               );
346
347    my @dir_files;
348    @dir_files = _globdir $Files_In_Dir if defined $Files_In_Dir;
349    my $run_start_time = new Benchmark;
350
351    my $width = _leader_width(@tests);
352    foreach my $tfile (@tests) {
353        $Last_ML_Print = 0;  # so each test prints at least once
354        my($leader, $ml) = _mk_leader($tfile, $width);
355        local $ML = $ml;
356
357        print $out $leader;
358
359        $tot{files}++;
360
361        $Strap->{_seen_header} = 0;
362        if ( $Test::Harness::Debug ) {
363            print $out "# Running: ", $Strap->_command_line($tfile), "\n";
364        }
365        my $test_start_time = $Timer ? time : 0;
366        my $results = $Strap->analyze_file($tfile) or
367          do { warn $Strap->{error}, "\n";  next };
368        my $elapsed;
369        if ( $Timer ) {
370            $elapsed = time - $test_start_time;
371            if ( $has_time_hires ) {
372                $elapsed = sprintf( " %8d ms", $elapsed*1000 );
373            }
374            else {
375                $elapsed = sprintf( " %8s s", $elapsed ? $elapsed : "<1" );
376            }
377        }
378        else {
379            $elapsed = "";
380        }
381
382        # state of the current test.
383        my @failed = grep { !$results->details->[$_-1]{ok} }
384                     1..@{$results->details};
385        my @todo_pass = grep { $results->details->[$_-1]{actual_ok} &&
386                               $results->details->[$_-1]{type} eq 'todo' }
387                        1..@{$results->details};
388
389        my %test = (
390            ok          => $results->ok,
391            'next'      => $Strap->{'next'},
392            max         => $results->max,
393            failed      => \@failed,
394            todo_pass   => \@todo_pass,
395            todo        => $results->todo,
396            bonus       => $results->bonus,
397            skipped     => $results->skip,
398            skip_reason => $results->skip_reason,
399            skip_all    => $Strap->{skip_all},
400            ml          => $ml,
401        );
402
403        $tot{bonus}       += $results->bonus;
404        $tot{max}         += $results->max;
405        $tot{ok}          += $results->ok;
406        $tot{todo}        += $results->todo;
407        $tot{sub_skipped} += $results->skip;
408
409        my $estatus = $results->exit;
410        my $wstatus = $results->wait;
411
412        if ( $results->passing ) {
413            # XXX Combine these first two
414            if ($test{max} and $test{skipped} + $test{bonus}) {
415                my @msg;
416                push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
417                    if $test{skipped};
418                if ($test{bonus}) {
419                    my ($txt, $canon) = _canondetail($test{todo},0,'TODO passed',
420                                                    @{$test{todo_pass}});
421                    $todo_passed{$tfile} = {
422                        canon   => $canon,
423                        max     => $test{todo},
424                        failed  => $test{bonus},
425                        name    => $tfile,
426                        estat   => '',
427                        wstat   => '',
428                    };
429
430                    push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded\n$txt");
431                }
432                print $out "$test{ml}ok$elapsed\n        ".join(', ', @msg)."\n";
433            }
434            elsif ( $test{max} ) {
435                print $out "$test{ml}ok$elapsed\n";
436            }
437            elsif ( defined $test{skip_all} and length $test{skip_all} ) {
438                print $out "skipped\n        all skipped: $test{skip_all}\n";
439                $tot{skipped}++;
440            }
441            else {
442                print $out "skipped\n        all skipped: no reason given\n";
443                $tot{skipped}++;
444            }
445            $tot{good}++;
446        }
447        else {
448            # List unrun tests as failures.
449            if ($test{'next'} <= $test{max}) {
450                push @{$test{failed}}, $test{'next'}..$test{max};
451            }
452            # List overruns as failures.
453            else {
454                my $details = $results->details;
455                foreach my $overrun ($test{max}+1..@$details) {
456                    next unless ref $details->[$overrun-1];
457                    push @{$test{failed}}, $overrun
458                }
459            }
460
461            if ($wstatus) {
462                $failedtests{$tfile} = _dubious_return(\%test, \%tot,
463                                                       $estatus, $wstatus);
464                $failedtests{$tfile}{name} = $tfile;
465            }
466            elsif ( $results->seen ) {
467                if (@{$test{failed}} and $test{max}) {
468                    my ($txt, $canon) = _canondetail($test{max},$test{skipped},'Failed',
469                                                    @{$test{failed}});
470                    print $out "$test{ml}$txt";
471                    $failedtests{$tfile} = { canon   => $canon,
472                                             max     => $test{max},
473                                             failed  => scalar @{$test{failed}},
474                                             name    => $tfile,
475                                             estat   => '',
476                                             wstat   => '',
477                                           };
478                }
479                else {
480                    print $out "Don't know which tests failed: got $test{ok} ok, ".
481                          "expected $test{max}\n";
482                    $failedtests{$tfile} = { canon   => '??',
483                                             max     => $test{max},
484                                             failed  => '??',
485                                             name    => $tfile,
486                                             estat   => '',
487                                             wstat   => '',
488                                           };
489                }
490                $tot{bad}++;
491            }
492            else {
493                print $out "FAILED before any test output arrived\n";
494                $tot{bad}++;
495                $failedtests{$tfile} = { canon       => '??',
496                                         max         => '??',
497                                         failed      => '??',
498                                         name        => $tfile,
499                                         estat       => '',
500                                         wstat       => '',
501                                       };
502            }
503        }
504
505        if (defined $Files_In_Dir) {
506            my @new_dir_files = _globdir $Files_In_Dir;
507            if (@new_dir_files != @dir_files) {
508                my %f;
509                @f{@new_dir_files} = (1) x @new_dir_files;
510                delete @f{@dir_files};
511                my @f = sort keys %f;
512                print $out "LEAKED FILES: @f\n";
513                @dir_files = @new_dir_files;
514            }
515        }
516    } # foreach test
517    $tot{bench} = timediff(new Benchmark, $run_start_time);
518
519    $Strap->_restore_PERL5LIB;
520
521    return(\%tot, \%failedtests, \%todo_passed);
522}
523
524# Turns on autoflush for the handle passed
525sub _autoflush {
526    my $flushy_fh = shift;
527    my $old_fh = select $flushy_fh;
528    $| = 1;
529    select $old_fh;
530}
531
532=for private _mk_leader
533
534    my($leader, $ml) = _mk_leader($test_file, $width);
535
536Generates the 't/foo........' leader for the given C<$test_file> as well
537as a similar version which will overwrite the current line (by use of
538\r and such).  C<$ml> may be empty if Test::Harness doesn't think you're
539on TTY.
540
541The C<$width> is the width of the "yada/blah.." string.
542
543=cut
544
545sub _mk_leader {
546    my($te, $width) = @_;
547    chomp($te);
548    $te =~ s/\.\w+$/./;
549
550    if ($^O eq 'VMS') {
551        $te =~ s/^.*\.t\./\[.t./s;
552    }
553    my $leader = "$te" . '.' x ($width - length($te));
554    my $ml = "";
555
556    if ( -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose ) {
557        $ml = "\r" . (' ' x 77) . "\r$leader"
558    }
559
560    return($leader, $ml);
561}
562
563=for private _leader_width
564
565  my($width) = _leader_width(@test_files);
566
567Calculates how wide the leader should be based on the length of the
568longest test name.
569
570=cut
571
572sub _leader_width {
573    my $maxlen = 0;
574    my $maxsuflen = 0;
575    foreach (@_) {
576        my $suf    = /\.(\w+)$/ ? $1 : '';
577        my $len    = length;
578        my $suflen = length $suf;
579        $maxlen    = $len    if $len    > $maxlen;
580        $maxsuflen = $suflen if $suflen > $maxsuflen;
581    }
582    # + 3 : we want three dots between the test name and the "ok"
583    return $maxlen + 3 - $maxsuflen;
584}
585
586sub get_results {
587    my $tot = shift;
588    my $failedtests = shift;
589    my $todo_passed = shift;
590
591    my $out = '';
592
593    my $bonusmsg = _bonusmsg($tot);
594
595    if (_all_ok($tot)) {
596        $out .= "All tests successful$bonusmsg.\n";
597        if ($tot->{bonus}) {
598            my($fmt_top, $fmt) = _create_fmts("Passed TODO",$todo_passed);
599            # Now write to formats
600            $out .= swrite( $fmt_top );
601            for my $script (sort keys %{$todo_passed||{}}) {
602                my $Curtest = $todo_passed->{$script};
603                $out .= swrite( $fmt, @{ $Curtest }{qw(name estat wstat max failed canon)} );
604            }
605        }
606    }
607    elsif (!$tot->{tests}){
608        die "FAILED--no tests were run for some reason.\n";
609    }
610    elsif (!$tot->{max}) {
611        my $blurb = $tot->{tests}==1 ? "script" : "scripts";
612        die "FAILED--$tot->{tests} test $blurb could be run, ".
613            "alas--no output ever seen\n";
614    }
615    else {
616        my $subresults = sprintf( " %d/%d subtests failed.",
617                              $tot->{max} - $tot->{ok}, $tot->{max} );
618
619        my($fmt_top, $fmt1, $fmt2) = _create_fmts("Failed Test",$failedtests);
620
621        # Now write to formats
622        $out .= swrite( $fmt_top );
623        for my $script (sort keys %$failedtests) {
624            my $Curtest = $failedtests->{$script};
625            $out .= swrite( $fmt1, @{ $Curtest }{qw(name estat wstat max failed canon)} );
626            $out .= swrite( $fmt2, $Curtest->{canon} );
627        }
628        if ($tot->{bad}) {
629            $bonusmsg =~ s/^,\s*//;
630            $out .= "$bonusmsg.\n" if $bonusmsg;
631            $out .= "Failed $tot->{bad}/$tot->{tests} test scripts.$subresults\n";
632        }
633    }
634
635    $out .= sprintf("Files=%d, Tests=%d, %s\n",
636           $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
637    return $out;
638}
639
640sub swrite {
641    my $format = shift;
642    $^A = '';
643    formline($format,@_);
644    my $out = $^A;
645    $^A = '';
646    return $out;
647}
648
649
650my %Handlers = (
651    header  => \&header_handler,
652    test    => \&test_handler,
653    bailout => \&bailout_handler,
654);
655
656$Strap->set_callback(\&strap_callback);
657sub strap_callback {
658    my($self, $line, $type, $totals) = @_;
659    print $line if $Verbose;
660
661    my $meth = $Handlers{$type};
662    $meth->($self, $line, $type, $totals) if $meth;
663};
664
665
666sub header_handler {
667    my($self, $line, $type, $totals) = @_;
668
669    warn "Test header seen more than once!\n" if $self->{_seen_header};
670
671    $self->{_seen_header}++;
672
673    warn "1..M can only appear at the beginning or end of tests\n"
674      if $totals->seen && ($totals->max < $totals->seen);
675};
676
677sub test_handler {
678    my($self, $line, $type, $totals) = @_;
679
680    my $curr = $totals->seen;
681    my $next = $self->{'next'};
682    my $max  = $totals->max;
683    my $detail = $totals->details->[-1];
684
685    if( $detail->{ok} ) {
686        _print_ml_less("ok $curr/$max");
687
688        if( $detail->{type} eq 'skip' ) {
689            $totals->set_skip_reason( $detail->{reason} )
690              unless defined $totals->skip_reason;
691            $totals->set_skip_reason( 'various reasons' )
692              if $totals->skip_reason ne $detail->{reason};
693        }
694    }
695    else {
696        _print_ml("NOK $curr/$max");
697    }
698
699    if( $curr > $next ) {
700        print "Test output counter mismatch [test $curr]\n";
701    }
702    elsif( $curr < $next ) {
703        print "Confused test output: test $curr answered after ".
704              "test ", $next - 1, "\n";
705    }
706
707};
708
709sub bailout_handler {
710    my($self, $line, $type, $totals) = @_;
711
712    die "FAILED--Further testing stopped" .
713      ($self->{bailout_reason} ? ": $self->{bailout_reason}\n" : ".\n");
714};
715
716
717sub _print_ml {
718    print join '', $ML, @_ if $ML;
719}
720
721
722# Print updates only once per second.
723sub _print_ml_less {
724    my $now = CORE::time;
725    if ( $Last_ML_Print != $now ) {
726        _print_ml(@_);
727        $Last_ML_Print = $now;
728    }
729}
730
731sub _bonusmsg {
732    my($tot) = @_;
733
734    my $bonusmsg = '';
735    $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
736               " UNEXPECTEDLY SUCCEEDED)")
737        if $tot->{bonus};
738
739    if ($tot->{skipped}) {
740        $bonusmsg .= ", $tot->{skipped} test"
741                     . ($tot->{skipped} != 1 ? 's' : '');
742        if ($tot->{sub_skipped}) {
743            $bonusmsg .= " and $tot->{sub_skipped} subtest"
744                         . ($tot->{sub_skipped} != 1 ? 's' : '');
745        }
746        $bonusmsg .= ' skipped';
747    }
748    elsif ($tot->{sub_skipped}) {
749        $bonusmsg .= ", $tot->{sub_skipped} subtest"
750                     . ($tot->{sub_skipped} != 1 ? 's' : '')
751                     . " skipped";
752    }
753    return $bonusmsg;
754}
755
756# Test program go boom.
757sub _dubious_return {
758    my($test, $tot, $estatus, $wstatus) = @_;
759
760    my $failed = '??';
761    my $canon  = '??';
762
763    printf "$test->{ml}dubious\n\tTest returned status $estatus ".
764           "(wstat %d, 0x%x)\n",
765           $wstatus,$wstatus;
766    print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
767
768    $tot->{bad}++;
769
770    if ($test->{max}) {
771        if ($test->{'next'} == $test->{max} + 1 and not @{$test->{failed}}) {
772            print "\tafter all the subtests completed successfully\n";
773            $failed = 0;        # But we do not set $canon!
774        }
775        else {
776            push @{$test->{failed}}, $test->{'next'}..$test->{max};
777            $failed = @{$test->{failed}};
778            (my $txt, $canon) = _canondetail($test->{max},$test->{skipped},'Failed',@{$test->{failed}});
779            print "DIED. ",$txt;
780        }
781    }
782
783    return { canon => $canon,  max => $test->{max} || '??',
784             failed => $failed,
785             estat => $estatus, wstat => $wstatus,
786           };
787}
788
789
790sub _create_fmts {
791    my $failed_str = shift;
792    my $failedtests = shift;
793
794    my ($type) = split /\s/,$failed_str;
795    my $short = substr($type,0,4);
796    my $total = $short eq 'Pass' ? 'TODOs' : 'Total';
797    my $middle_str = " Stat Wstat $total $short  ";
798    my $list_str = "List of $type";
799
800    # Figure out our longest name string for formatting purposes.
801    my $max_namelen = length($failed_str);
802    foreach my $script (keys %$failedtests) {
803        my $namelen = length $failedtests->{$script}->{name};
804        $max_namelen = $namelen if $namelen > $max_namelen;
805    }
806
807    my $list_len = $Columns - length($middle_str) - $max_namelen;
808    if ($list_len < length($list_str)) {
809        $list_len = length($list_str);
810        $max_namelen = $Columns - length($middle_str) - $list_len;
811        if ($max_namelen < length($failed_str)) {
812            $max_namelen = length($failed_str);
813            $Columns = $max_namelen + length($middle_str) + $list_len;
814        }
815    }
816
817    my $fmt_top =   sprintf("%-${max_namelen}s", $failed_str)
818                  . $middle_str
819                  . $list_str . "\n"
820                  . "-" x $Columns
821                  . "\n";
822
823    my $fmt1 =  "@" . "<" x ($max_namelen - 1)
824              . "  @>> @>>>> @>>>> @>>>  "
825              . "^" . "<" x ($list_len - 1) . "\n";
826    my $fmt2 =  "~~" . " " x ($Columns - $list_len - 2) . "^"
827              . "<" x ($list_len - 1) . "\n";
828
829    return($fmt_top, $fmt1, $fmt2);
830}
831
832sub _canondetail {
833    my $max = shift;
834    my $skipped = shift;
835    my $type = shift;
836    my @detail = @_;
837    my %seen;
838    @detail = sort {$a <=> $b} grep !$seen{$_}++, @detail;
839    my $detail = @detail;
840    my @result = ();
841    my @canon = ();
842    my $min;
843    my $last = $min = shift @detail;
844    my $canon;
845    my $uc_type = uc($type);
846    if (@detail) {
847        for (@detail, $detail[-1]) { # don't forget the last one
848            if ($_ > $last+1 || $_ == $last) {
849                push @canon, ($min == $last) ? $last : "$min-$last";
850                $min = $_;
851            }
852            $last = $_;
853        }
854        local $" = ", ";
855        push @result, "$uc_type tests @canon\n";
856        $canon = join ' ', @canon;
857    }
858    else {
859        push @result, "$uc_type test $last\n";
860        $canon = $last;
861    }
862
863    return (join("", @result), $canon)
864        if $type=~/todo/i;
865    push @result, "\t$type $detail/$max tests, ";
866    if ($max) {
867	push @result, sprintf("%.2f",100*(1-$detail/$max)), "% okay";
868    }
869    else {
870	push @result, "?% okay";
871    }
872    my $ender = 's' x ($skipped > 1);
873    if ($skipped) {
874        my $good = $max - $detail - $skipped;
875	my $skipmsg = " (less $skipped skipped test$ender: $good okay, ";
876	if ($max) {
877	    my $goodper = sprintf("%.2f",100*($good/$max));
878	    $skipmsg .= "$goodper%)";
879        }
880        else {
881	    $skipmsg .= "?%)";
882	}
883	push @result, $skipmsg;
884    }
885    push @result, "\n";
886    my $txt = join "", @result;
887    return ($txt, $canon);
888}
889
8901;
891__END__
892
893
894=head1 EXPORT
895
896C<&runtests> is exported by Test::Harness by default.
897
898C<&execute_tests>, C<$verbose>, C<$switches> and C<$debug> are
899exported upon request.
900
901=head1 DIAGNOSTICS
902
903=over 4
904
905=item C<All tests successful.\nFiles=%d,  Tests=%d, %s>
906
907If all tests are successful some statistics about the performance are
908printed.
909
910=item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
911
912For any single script that has failing subtests statistics like the
913above are printed.
914
915=item C<Test returned status %d (wstat %d)>
916
917Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
918and C<$?> are printed in a message similar to the above.
919
920=item C<Failed 1 test, %.2f%% okay. %s>
921
922=item C<Failed %d/%d tests, %.2f%% okay. %s>
923
924If not all tests were successful, the script dies with one of the
925above messages.
926
927=item C<FAILED--Further testing stopped: %s>
928
929If a single subtest decides that further testing will not make sense,
930the script dies with this message.
931
932=back
933
934=head1 ENVIRONMENT VARIABLES THAT TEST::HARNESS SETS
935
936Test::Harness sets these before executing the individual tests.
937
938=over 4
939
940=item C<HARNESS_ACTIVE>
941
942This is set to a true value.  It allows the tests to determine if they
943are being executed through the harness or by any other means.
944
945=item C<HARNESS_VERSION>
946
947This is the version of Test::Harness.
948
949=back
950
951=head1 ENVIRONMENT VARIABLES THAT AFFECT TEST::HARNESS
952
953=over 4
954
955=item C<HARNESS_COLUMNS>
956
957This value will be used for the width of the terminal. If it is not
958set then it will default to C<COLUMNS>. If this is not set, it will
959default to 80. Note that users of Bourne-sh based shells will need to
960C<export COLUMNS> for this module to use that variable.
961
962=item C<HARNESS_COMPILE_TEST>
963
964When true it will make harness attempt to compile the test using
965C<perlcc> before running it.
966
967B<NOTE> This currently only works when sitting in the perl source
968directory!
969
970=item C<HARNESS_DEBUG>
971
972If true, Test::Harness will print debugging information about itself as
973it runs the tests.  This is different from C<HARNESS_VERBOSE>, which prints
974the output from the test being run.  Setting C<$Test::Harness::Debug> will
975override this, or you can use the C<-d> switch in the F<prove> utility.
976
977=item C<HARNESS_FILELEAK_IN_DIR>
978
979When set to the name of a directory, harness will check after each
980test whether new files appeared in that directory, and report them as
981
982  LEAKED FILES: scr.tmp 0 my.db
983
984If relative, directory name is with respect to the current directory at
985the moment runtests() was called.  Putting absolute path into
986C<HARNESS_FILELEAK_IN_DIR> may give more predictable results.
987
988=item C<HARNESS_NOTTY>
989
990When set to a true value, forces it to behave as though STDOUT were
991not a console.  You may need to set this if you don't want harness to
992output more frequent progress messages using carriage returns.  Some
993consoles may not handle carriage returns properly (which results in a
994somewhat messy output).
995
996=item C<HARNESS_PERL>
997
998Usually your tests will be run by C<$^X>, the currently-executing Perl.
999However, you may want to have it run by a different executable, such as
1000a threading perl, or a different version.
1001
1002If you're using the F<prove> utility, you can use the C<--perl> switch.
1003
1004=item C<HARNESS_PERL_SWITCHES>
1005
1006Its value will be prepended to the switches used to invoke perl on
1007each test.  For example, setting C<HARNESS_PERL_SWITCHES> to C<-W> will
1008run all tests with all warnings enabled.
1009
1010=item C<HARNESS_TIMER>
1011
1012Setting this to true will make the harness display the number of
1013milliseconds each test took.  You can also use F<prove>'s C<--timer>
1014switch.
1015
1016=item C<HARNESS_VERBOSE>
1017
1018If true, Test::Harness will output the verbose results of running
1019its tests.  Setting C<$Test::Harness::verbose> will override this,
1020or you can use the C<-v> switch in the F<prove> utility.
1021
1022If true, Test::Harness will output the verbose results of running
1023its tests.  Setting C<$Test::Harness::verbose> will override this,
1024or you can use the C<-v> switch in the F<prove> utility.
1025
1026=item C<HARNESS_STRAP_CLASS>
1027
1028Defines the Test::Harness::Straps subclass to use.  The value may either
1029be a filename or a class name.
1030
1031If HARNESS_STRAP_CLASS is a class name, the class must be in C<@INC>
1032like any other class.
1033
1034If HARNESS_STRAP_CLASS is a filename, the .pm file must return the name
1035of the class, instead of the canonical "1".
1036
1037=back
1038
1039=head1 EXAMPLE
1040
1041Here's how Test::Harness tests itself
1042
1043  $ cd ~/src/devel/Test-Harness
1044  $ perl -Mblib -e 'use Test::Harness qw(&runtests $verbose);
1045    $verbose=0; runtests @ARGV;' t/*.t
1046  Using /home/schwern/src/devel/Test-Harness/blib
1047  t/base..............ok
1048  t/nonumbers.........ok
1049  t/ok................ok
1050  t/test-harness......ok
1051  All tests successful.
1052  Files=4, Tests=24, 2 wallclock secs ( 0.61 cusr + 0.41 csys = 1.02 CPU)
1053
1054=head1 SEE ALSO
1055
1056The included F<prove> utility for running test scripts from the command line,
1057L<Test> and L<Test::Simple> for writing test scripts, L<Benchmark> for
1058the underlying timing routines, and L<Devel::Cover> for test coverage
1059analysis.
1060
1061=head1 TODO
1062
1063Provide a way of running tests quietly (ie. no printing) for automated
1064validation of tests.  This will probably take the form of a version
1065of runtests() which rather than printing its output returns raw data
1066on the state of the tests.  (Partially done in Test::Harness::Straps)
1067
1068Document the format.
1069
1070Fix HARNESS_COMPILE_TEST without breaking its core usage.
1071
1072Figure a way to report test names in the failure summary.
1073
1074Rework the test summary so long test names are not truncated as badly.
1075(Partially done with new skip test styles)
1076
1077Add option for coverage analysis.
1078
1079Trap STDERR.
1080
1081Implement Straps total_results()
1082
1083Remember exit code
1084
1085Completely redo the print summary code.
1086
1087Straps->analyze_file() not taint clean, don't know if it can be
1088
1089Fix that damned VMS nit.
1090
1091Add a test for verbose.
1092
1093Change internal list of test results to a hash.
1094
1095Fix stats display when there's an overrun.
1096
1097Fix so perls with spaces in the filename work.
1098
1099Keeping whittling away at _run_all_tests()
1100
1101Clean up how the summary is printed.  Get rid of those damned formats.
1102
1103=head1 BUGS
1104
1105Please report any bugs or feature requests to
1106C<bug-test-harness at rt.cpan.org>, or through the web interface at
1107L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Test-Harness>.
1108I will be notified, and then you'll automatically be notified of progress on
1109your bug as I make changes.
1110
1111=head1 SUPPORT
1112
1113You can find documentation for this module with the F<perldoc> command.
1114
1115    perldoc Test::Harness
1116
1117You can get docs for F<prove> with
1118
1119    prove --man
1120
1121You can also look for information at:
1122
1123=over 4
1124
1125=item * AnnoCPAN: Annotated CPAN documentation
1126
1127L<http://annocpan.org/dist/Test-Harness>
1128
1129=item * CPAN Ratings
1130
1131L<http://cpanratings.perl.org/d/Test-Harness>
1132
1133=item * RT: CPAN's request tracker
1134
1135L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Test-Harness>
1136
1137=item * Search CPAN
1138
1139L<http://search.cpan.org/dist/Test-Harness>
1140
1141=back
1142
1143=head1 SOURCE CODE
1144
1145The source code repository for Test::Harness is at
1146L<http://svn.perl.org/modules/Test-Harness>.
1147
1148=head1 AUTHORS
1149
1150Either Tim Bunce or Andreas Koenig, we don't know. What we know for
1151sure is, that it was inspired by Larry Wall's F<TEST> script that came
1152with perl distributions for ages. Numerous anonymous contributors
1153exist.  Andreas Koenig held the torch for many years, and then
1154Michael G Schwern.
1155
1156Current maintainer is Andy Lester C<< <andy at petdance.com> >>.
1157
1158=head1 COPYRIGHT
1159
1160Copyright 2002-2006
1161by Michael G Schwern C<< <schwern at pobox.com> >>,
1162Andy Lester C<< <andy at petdance.com> >>.
1163
1164This program is free software; you can redistribute it and/or
1165modify it under the same terms as Perl itself.
1166
1167See L<http://www.perl.com/perl/misc/Artistic.html>.
1168
1169=cut
1170