3 # This is written in a peculiar style, since we're trying to avoid
4 # most of the constructs we'll be testing for. (This comment is
5 # probably obsolete on the avoidance side, though still current
6 # on the peculiarity side.)
8 # t/TEST and t/harness need to share code. The logical way to do this would be
9 # to have the common code in a file both require or use. However, t/TEST needs
10 # to still work, to generate test results, even if require isn't working, so
11 # we cannot do that. t/harness has no such restriction, so it is quite
12 # acceptable to have it require t/TEST.
14 # In which case, we need to stop t/TEST actually running tests, as all
15 # t/harness needs are its subroutines.
18 # directories with special sets of test switches
23 '../ext/File-Glob/t' => '-I.. -MTestInit', # FIXME - tests assume t/
26 # "not absolute" is the default, as it saves some fakery within TestInit
27 # which can perturb tests, and takes CPU. Working with the upstream author of
28 # any of these, to figure out how to remove them from this list, considered
31 '../cpan/Archive-Tar' => 1,
32 '../cpan/AutoLoader' => 1,
34 '../cpan/Class-ISA' => 1,
35 '../cpan/Devel-PPPort' => 1,
36 '../cpan/Encode' => 1,
37 '../cpan/ExtUtils-Constant' => 1,
38 '../cpan/ExtUtils-MakeMaker' => 1,
39 '../cpan/File-Fetch' => 1,
40 '../cpan/IPC-Cmd' => 1,
41 '../cpan/IPC-SysV' => 1,
42 '../cpan/Locale-Codes' => 1,
43 '../cpan/Module-Build' => 1,
44 '../cpan/Module-Load' => 1,
45 '../cpan/Module-Load-Conditional' => 1,
46 '../cpan/Package-Constants' => 1,
47 '../cpan/Parse-CPAN-Meta' => 1,
48 '../cpan/Pod-Simple' => 1,
49 '../cpan/Test-Simple' => 1,
50 '../cpan/podlators' => 1,
52 '../dist/ExtUtils-Command' => 1,
53 '../dist/ExtUtils-Install' => 1,
54 '../dist/ExtUtils-Manifest' => 1,
55 '../dist/ExtUtils-ParseXS' => 1,
56 '../dist/Tie-File' => 1,
60 ('../cpan/B-Debug' => 1,
61 '../cpan/Compress-Raw-Bzip2' => 1,
62 '../cpan/Compress-Raw-Zlib' => 1,
63 '../cpan/Devel-PPPort' => 1,
64 '../cpan/Getopt-Long' => 1,
65 '../cpan/IO-Compress' => 1,
66 '../cpan/MIME-Base64' => 1,
67 '../cpan/parent' => 1,
68 '../cpan/Parse-CPAN-Meta' => 1,
69 '../cpan/Pod-Simple' => 1,
70 '../cpan/podlators' => 1,
71 '../cpan/Test-Simple' => 1,
72 '../cpan/Tie-RefHash' => 1,
73 '../cpan/Unicode-Collate' => 1,
74 '../cpan/Unicode-Normalize' => 1,
77 # delete env vars that may influence the results
78 # but allow override via *_TEST env var if wanted
79 # (e.g. PERL5OPT_TEST=-d:NYTProf)
80 my @bad_env_vars = qw(
81 PERL5LIB PERLLIB PERL5OPT
82 PERL_YAML_BACKEND PERL_JSON_BACKEND
85 for my $envname (@bad_env_vars) {
86 my $override = $ENV{"${envname}_TEST"};
87 if (defined $override) {
88 warn "$0: $envname=$override\n";
89 $ENV{$envname} = $override;
92 delete $ENV{$envname};
100 # Location to put the Valgrind log.
105 # for testing TEST only
106 #BEGIN { require '../lib/strict.pm'; "strict"->import() };
107 #BEGIN { require '../lib/warnings.pm'; "warnings"->import() };
109 # remove empty elements due to insertion of empty symbols via "''p1'" syntax
110 @ARGV = grep($_,@ARGV) if $^O eq 'VMS';
111 our $show_elapsed_time = $ENV{HARNESS_TIMER} || 0;
113 # Cheesy version of Getopt::Std. We can't replace it with that, because we
114 # can't rely on require working.
117 foreach my $idx (0..$#ARGV) {
118 push( @argv, $ARGV[$idx] ), next unless $ARGV[$idx] =~ /^-(\S+)$/;
119 $::benchmark = 1 if $1 eq 'benchmark';
120 $::core = 1 if $1 eq 'core';
121 $::verbose = 1 if $1 eq 'v';
122 $::torture = 1 if $1 eq 'torture';
123 $::with_utf8 = 1 if $1 eq 'utf8';
124 $::with_utf16 = 1 if $1 eq 'utf16';
125 $::taintwarn = 1 if $1 eq 'taintwarn';
126 if ($1 =~ /^deparse(,.+)?$/) {
128 $::deparse_opts = $1;
134 chdir 't' if -f 't/TEST';
135 if (-f 'TEST' && -f 'harness' && -d '../lib') {
139 die "You need to run \"make test\" first to set things up.\n"
140 unless -e 'perl' or -e 'perl.exe' or -e 'perl.pm';
142 # check leakage for embedders
143 $ENV{PERL_DESTRUCT_LEVEL} = 2 unless exists $ENV{PERL_DESTRUCT_LEVEL};
144 # check existence of all symbols
145 $ENV{PERL_DL_NONLAZY} = 1 unless exists $ENV{PERL_DL_NONLAZY};
147 $ENV{EMXSHELL} = 'sh'; # For OS/2
149 if ($show_elapsed_time) { require Time::HiRes }
150 my %timings = (); # testname => [@et] pairs if $show_elapsed_time.
161 # Roll your own File::Find!
162 sub _find_tests { our @found=(); push @ARGV, _find_files('\.t$', $_[0]) }
164 my($patt, @dirs) = @_;
165 for my $dir (@dirs) {
166 opendir DIR, $dir or die "Trouble opening $dir: $!";
167 foreach my $f (sort { $a cmp $b } readdir DIR) {
170 my $fullpath = "$dir/$f";
173 _find_files($patt, $fullpath);
174 } elsif ($f =~ /$patt/) {
175 push @found, $fullpath;
183 # Scan the text of the test program to find switches and special options
184 # we might need to apply.
186 my($test, $type) = @_;
188 open(my $script, "<", $test) or die "Can't read $test.\n";
189 my $first_line = <$script>;
191 $first_line =~ tr/\0//d if $::with_utf16;
194 if ($first_line =~ /#!.*\bperl.*\s-\w*([tT])/) {
198 # not all tests are expected to pass with this option
206 if ($type eq 'deparse') {
207 # Look for #line directives which change the filename
209 $file_opts = $file_opts . ",-f$3$4"
210 if /^#\s*line\s+(\d+)\s+((\w+)|"([^"]+)")/;
221 $test =~ /^(.+)\/[^\/]+/;
223 my $testswitch = $dir_to_switch{$dir};
224 if (!defined $testswitch) {
225 if ($test =~ s!^(\.\./(cpan|dist|ext)/[^/]+)/t!t!) {
227 $return_dir = '../../t';
229 $perl = '../../t/perl';
230 $testswitch = "-I../.. -MTestInit=U2T";
231 if ($2 eq 'cpan' || $2 eq 'dist') {
233 $testswitch = $testswitch . ',A';
235 if ($temp_no_core{$run_dir}) {
236 $testswitch = $testswitch . ',NC';
239 } elsif ($test =~ m!^\.\./lib!) {
240 $testswitch = '-I.. -MTestInit=U1'; # -T will remove . from @INC
242 $testswitch = '-I.. -MTestInit'; # -T will remove . from @INC
246 my $utf8 = ($::with_utf8 || $::with_utf16) ? "-I$lib -Mutf8" : '';
253 return_dir => $return_dir,
254 testswitch => $testswitch,
264 my($options, $type) = @_;
266 my $test = $options->{test};
269 if ($type eq 'deparse') {
270 my $perl = "$options->{perl} $options->{testswitch}";
271 my $lib = $options->{lib};
274 "$perl $options->{switch} -I$lib -MO=-qq,Deparse,-sv1.,".
275 "-l$::deparse_opts$options->{file} ".
277 "&& $perl $options->{switch} -I$lib $test.dp"
280 elsif ($type eq 'perl') {
281 my $perl = $options->{perl};
282 my $redir = $^O eq 'VMS' ? '2>&1' : '';
284 if ($ENV{PERL_VALGRIND}) {
285 my $perl_supp = $options->{return_dir} ? "$options->{return_dir}/perl.supp" : "perl.supp";
286 my $valgrind_exe = $ENV{VALGRIND} // 'valgrind';
287 my $vg_opts = $ENV{VG_OPTS}
289 . "--suppressions=$perl_supp --leak-check=yes "
290 . "--leak-resolution=high --show-reachable=yes "
291 . "--num-callers=50 --track-origins=yes";
292 $perl = "$valgrind_exe $vg_opts $perl";
293 $redir = "3>$Valgrind_Log";
294 if ($options->{run_dir}) {
295 $Valgrind_Log = "$options->{run_dir}/$Valgrind_Log";
299 my $args = "$options->{testswitch} $options->{switch} $options->{utf8}";
300 $cmd = $perl . _quote_args($args) . " $test $redir";
308 if ($options->{run_dir}) {
309 my $run_dir = $options->{run_dir};
310 chdir $run_dir or die "Can't chdir to '$run_dir': $!";
319 if ($options->{return_dir}) {
320 my $return_dir = $options->{return_dir};
322 or die "Can't chdir from '$options->{run_dir}' to '$return_dir': $!";
329 my ($test, $type) = @_;
331 my $options = _scan_test($test, $type);
332 # $test might have changed if we're in ext/Foo, so don't use it anymore
333 # from now on. Use $options->{test} instead.
335 _before_fork($options);
337 my $cmd = _cmd($options, $type);
339 open(my $results, "$cmd |") or print "can't run '$cmd': $!.\n";
341 _after_fork($options);
343 # Our environment may force us to use UTF-8, but we can't be sure that
344 # anything we're reading from will be generating (well formed) UTF-8
345 # This may not be the best way - possibly we should unset ${^OPEN} up
356 foreach (split(/\s+/,$args)) {
357 # In VMS protect with doublequotes because otherwise
358 # DCL will lowercase -- unless already doublequoted.
359 $_ = q(").$_.q(") if ($^O eq 'VMS') && !/^\"/ && length($_) > 0;
360 $argstring = $argstring . ' ' . $_;
366 return unless defined $_[0];
367 return map {$_, 1} split /\s+/, $_[0];
370 sub _tests_from_manifest {
371 my ($extensions, $known_extensions) = @_;
373 my %extensions = _populate_hash($extensions);
374 my %known_extensions = _populate_hash($known_extensions);
376 foreach (keys %known_extensions) {
377 $skip{$_} = 1 unless $extensions{$_};
381 my $mani = '../MANIFEST';
382 if (open(MANI, $mani)) {
384 if (m!^((?:cpan|dist|ext)/(\S+)/+(?:[^/\s]+\.t|test\.pl)|lib/\S+?(?:\.t|test\.pl))\s!) {
387 if (!$::core || $t =~ m!^lib/[a-z]!) {
388 if (defined $extension) {
389 $extension =~ s!/t(:?/\S+)*$!!;
390 # XXX Do I want to warn that I'm skipping these?
391 next if $skip{$extension};
392 my $flat_extension = $extension;
393 $flat_extension =~ s!-!/!g;
394 next if $skip{$flat_extension}; # Foo/Bar may live in Foo-Bar
397 push @results, $path;
398 $::path_to_name{$path} = $t;
404 warn "$0: cannot open $mani: $!\n";
410 # base first, as TEST bails out if that can't run
411 # then comp, to validate that require works
412 # then run, to validate that -M works
413 # then we know we can -MTestInit for everything else, making life simpler
414 foreach my $dir (qw(base comp run cmd io re opbasic op uni mro)) {
418 _find_tests('porting');
421 # Config.pm may be broken for make minitest. And this is only a refinement
422 # for skipping tests on non-default builds, so it is allowed to fail.
423 # What we want to to is make a list of extensions which we did not build.
424 my $configsh = '../config.sh';
425 my ($extensions, $known_extensions);
427 open FH, $configsh or die "Can't open $configsh: $!";
429 if (/^extensions=['"](.*)['"]$/) {
432 elsif (/^known_extensions=['"](.*)['"]$/) {
433 $known_extensions = $1;
436 if (!defined $known_extensions) {
437 warn "No known_extensions line found in $configsh";
439 if (!defined $extensions) {
440 warn "No extensions line found in $configsh";
443 # The "complex" constructions of list return from a subroutine, and push of
444 # a list, might fail if perl is really hosed, but they aren't needed for
445 # make minitest, and the building of extensions will likely also fail if
446 # something is that badly wrong.
447 push @ARGV, _tests_from_manifest($extensions, $known_extensions);
450 _find_tests('japh') if $::torture;
451 _find_tests('t/benchmark') if $::benchmark or $ENV{PERL_BENCHMARK};
452 _find_tests('bigmem') if $ENV{PERL_TEST_MEMORY};
457 _testprogs('deparse', '', @ARGV);
459 elsif ($::with_utf16) {
462 print STDERR "# ENDIAN $e BOM $b\n";
465 my $u = $a . "." . ($e ? "l" : "b") . "e" . ($b ? "b" : "");
466 my $f = $e ? "v" : "n";
470 if (open(U, ">$u")) {
471 print U pack("$f", 0xFEFF) if $b;
473 print U pack("$f*", unpack("C*", $_));
480 _testprogs('perl', '', @UARGV);
486 _testprogs('perl', '', @ARGV);
490 my ($type, $args, @tests) = @_;
492 print <<'EOT' if ($type eq 'deparse');
493 ------------------------------------------------------------------------------
495 ------------------------------------------------------------------------------
500 foreach my $t (@tests) {
501 unless (exists $::path_to_name{$t}) {
503 $::path_to_name{$t} = $tname;
507 foreach (@::path_to_name{@tests}) {
508 s/\.\w+\z/ /; # space gives easy doubleclick to select fname
510 $maxlen = $len if $len > $maxlen;
512 # + 3 : we want three dots between the test name and the "ok"
513 my $dotdotdot = $maxlen + 3 ;
514 my $grind_ct = 0; # count of non-empty valgrind reports
515 my $total_files = @tests;
517 my $tested_files = 0;
520 my $toolnm; # valgrind, cachegrind, perf
522 while (my $test = shift @tests) {
523 my ($test_start_time, @starttimes) = 0;
524 if ($show_elapsed_time) {
525 $test_start_time = Time::HiRes::time();
526 # times() reports usage by TEST, but we want usage of each
527 # testprog it calls, so record accumulated times now,
528 # subtract them out afterwards. Ideally, we'd take times
529 # in BEGIN/END blocks (giving better visibility of self vs
530 # children of each testprog), but that would require some
531 # IPC to send results back here, or a completely different
532 # collection scheme (Storable isn't tuned for incremental use)
538 if ($type eq 'deparse') {
539 if ($test eq "comp/redef.t") {
540 # Redefinition happens at compile time
543 elsif ($test =~ m{lib/Switch/t/}) {
544 # B::Deparse doesn't support source filtering
548 my $te = $::path_to_name{$test} . '.'
549 x ($dotdotdot - length($::path_to_name{$test})) .' ';
551 if ($^O ne 'VMS') { # defer printing on VMS due to piping bug
556 (local $Valgrind_Log = "$test.valgrind-current") =~ s/^.*\///;
557 my $results = _run_test($test, $type);
563 my $trailing_leader = 0;
567 next if /^\s*$/; # skip blank lines
568 if (/^1..$/ && ($^O eq 'VMS')) {
569 # VMS pipe bug inserts blank lines.
571 if ($l2 =~ /^\s*$/) {
580 if ($trailing_leader) {
581 # shouldn't be anything following a postfix 1..n
582 $failure = 'FAILED--extra output after trailing 1..n';
585 if (/^1\.\.([0-9]+)( todo ([\d ]+))?/) {
587 $failure = 'FAILED--seen duplicate leader';
591 %todo = map { $_ => 1 } split / /, $3 if $3;
592 $totmax = $totmax + $max;
593 $tested_files = $tested_files + 1;
595 # 1..n appears at end of file
596 $trailing_leader = 1;
598 $failure = "FAILED--expected $max tests, saw $next";
608 if (/^(not )?ok(?: (\d+))?[^\#]*(\s*\#.*)?/) {
609 unless ($seen_leader) {
616 my($not, $num, $extra, $istodo) = ($1, $2, $3, 0);
617 $num = $next unless $num;
621 # SKIP is essentially the same as TODO for t/TEST
622 # this still conforms to TAP:
623 # http://testanything.org/wiki/index.php/TAP_specification
624 $extra and $istodo = $extra =~ /#\s*(?:TODO|SKIP)\b/;
625 $istodo = 1 if $todo{$num};
627 if( $not && !$istodo ) {
628 $failure = "FAILED at test $num";
633 $failure ="FAILED--expected test $next, saw test $num";
637 elsif (/^Bail out!\s*(.*)/i) { # magic words
638 die "FAILED--Further testing stopped" . ($1 ? ": $1\n" : ".\n");
641 # module tests are allowed extra output,
642 # because Test::Harness allows it
643 next if $test =~ /^\W*(cpan|dist|ext|lib)\b/;
644 $failure = "FAILED--unexpected output at test $next";
652 if (not defined $failure) {
653 $failure = 'FAILED--no leader found' unless $seen_leader;
656 if ($ENV{PERL_VALGRIND}) {
657 $toolnm = $ENV{VALGRIND};
658 $toolnm =~ s|.*/||; # keep basename
659 my @valgrind; # gets content of file
660 if (-e $Valgrind_Log) {
661 if (open(V, $Valgrind_Log)) {
665 warn "$0: Failed to open '$Valgrind_Log': $!\n";
668 if ($ENV{VG_OPTS} =~ /(cachegrind)/ or $toolnm =~ /(perf)/) {
670 if ($toolnm eq 'perf') {
671 # append perfs subcommand, not just stat
672 my ($sub) = split /\s/, $ENV{VG_OPTS};
675 if (rename $Valgrind_Log, "$test.$toolnm") {
678 warn "$0: Failed to create '$test.$toolnm': $!\n";
684 for my $i (0..$#valgrind) {
685 local $_ = $valgrind[$i];
686 if (/^==\d+== ERROR SUMMARY: (\d+) errors? /) {
687 $errors = $errors + $1; # there may be multiple error summaries
688 } elsif (/^==\d+== LEAK SUMMARY:/) {
689 for my $off (1 .. 4) {
690 if ($valgrind[$i+$off] =~
691 /(?:lost|reachable):\s+\d+ bytes in (\d+) blocks/) {
692 $leaks = $leaks + $1;
697 if ($errors or $leaks) {
698 if (rename $Valgrind_Log, "$test.valgrind") {
699 $grind_ct = $grind_ct + 1;
701 warn "$0: Failed to create '$test.valgrind': $!\n";
705 warn "No valgrind output?\n";
707 if (-e $Valgrind_Log) {
709 or warn "$0: Failed to unlink '$Valgrind_Log': $!\n";
712 if ($type eq 'deparse') {
715 if (not defined $failure and $next != $max) {
716 $failure="FAILED--expected $max tests, saw $next";
719 if( !defined $failure # don't mask a test failure
722 $failure = "FAILED--non-zero wait status: $?";
725 if (defined $failure) {
726 print "${te}$failure\n";
727 $::bad_files = $::bad_files + 1;
728 if ($test =~ /^base/ && ! defined &DynaLoader::boot_DynaLoader) {
729 # Die if running under minitest (no DynaLoader). Otherwise
730 # keep going, as we know that Perl basically works, or we
731 # would not have been able to actually compile it all the way.
732 die "Failed a basic test ($test) under minitest -- cannot continue.\n";
734 $failed_tests{$test} = 1;
738 my ($elapsed, $etms) = ("", 0);
739 if ( $show_elapsed_time ) {
740 $etms = (Time::HiRes::time() - $test_start_time) * 1000;
741 $elapsed = sprintf(" %8.0f ms", $etms);
743 my (@endtimes) = times;
744 $endtimes[$_] -= $starttimes[$_] for 0..$#endtimes;
745 splice @endtimes, 0, 2; # drop self/harness times
746 $_ *= 1000 for @endtimes; # and scale to ms
747 $timings{$test} = [$etms,@endtimes];
748 $elapsed .= sprintf(" %5.0f ms", $_) for @endtimes;
750 print "${te}ok$elapsed\n";
751 $good_files = $good_files + 1;
754 print "${te}skipped\n";
755 $tested_files = $tested_files - 1;
760 if ($::bad_files == 0) {
762 print "All tests successful.\n";
763 # XXX add mention of 'perlbug -ok' ?
766 die "FAILED--no tests were run for some reason.\n";
770 my $pct = $tested_files ? sprintf("%.2f", ($tested_files - $::bad_files) / $tested_files * 100) : "0.00";
771 my $s = $::bad_files == 1 ? "" : "s";
772 warn "Failed $::bad_files test$s out of $tested_files, $pct% okay.\n";
773 for my $test ( sort keys %failed_tests ) {
777 ### Since not all tests were successful, you may want to run some of
778 ### them individually and examine any diagnostic messages they produce.
779 ### See the INSTALL document's section on "make test".
781 warn <<'SHRDLU_2' if $good_files / $total_files > 0.8;
782 ### You have a good chance to get more information by running
784 ### in the 't' directory since most (>=80%) of the tests succeeded.
786 if (eval {require Config; import Config; 1}) {
787 if ($::Config{usedl} && (my $p = $::Config{ldlibpthname})) {
789 ### You may have to set your dynamic library search path,
790 ### $p, to point to the build directory:
792 if (exists $ENV{$p} && $ENV{$p} ne '') {
794 ### setenv $p `pwd`:\$$p; cd t; ./perl harness
795 ### $p=`pwd`:\$$p; export $p; cd t; ./perl harness
796 ### export $p=`pwd`:\$$p; cd t; ./perl harness
800 ### setenv $p `pwd`; cd t; ./perl harness
801 ### $p=`pwd`; export $p; cd t; ./perl harness
802 ### export $p=`pwd`; cd t; ./perl harness
806 ### for csh-style shells, like tcsh; or for traditional/modern
807 ### Bourne-style shells, like bash, ksh, and zsh, respectively.
812 my ($user,$sys,$cuser,$csys) = times;
813 my $tot = sprintf("u=%.2f s=%.2f cu=%.2f cs=%.2f scripts=%d tests=%d",
814 $user,$sys,$cuser,$csys,$tested_files,$totmax);
817 if (-d $show_elapsed_time) {
818 # HARNESS_TIMER = <a-directory>. Save timings etc to
819 # storable file there. NB: the test cds to ./t/, so
820 # relative path must account for that, ie ../../perf
821 # points to dir next to source tree.
824 $dt[5] += 1900; $dt[4] += 1; # fix year, month
825 my $fn = "$show_elapsed_time/".join('-', @dt[5,4,3,2,1]).".ttimes";
826 Storable::store({ perf => \%timings,
827 gather_conf_platform_info(),
830 print "wrote storable file: $fn\n";
833 if ($ENV{PERL_VALGRIND}) {
834 my $s = $grind_ct == 1 ? '' : 's';
835 print "$grind_ct valgrind report$s created.\n", ;
836 if ($toolnm eq 'cachegrind') {
837 # cachegrind leaves a lot of cachegrind.out.$pid litter
838 # around the tree, find and delete them
839 unlink _find_files('cachegrind.out.\d+$',
840 qw ( ../t ../cpan ../ext ../dist/ ));
844 exit ($::bad_files != 0);
846 # Collect platform, config data that should allow comparing
847 # performance data between different machines. With enough data,
848 # and/or clever statistical analysis, it should be possible to
849 # determine the effect of config choices, more memory, etc
851 sub gather_conf_platform_info {
852 # currently rather quick & dirty, and subject to change
853 # for both content and format.
855 my (%conf, @platform) = ();
856 $conf{$_} = $Config::Config{$_} for
857 grep /cc|git|config_arg\d+/, keys %Config::Config;
858 if (-f '/proc/cpuinfo') {
859 open my $fh, '/proc/cpuinfo' or warn "$!: /proc/cpuinfo\n";
860 @platform = grep /name|cpu/, <$fh>;
861 chomp $_ for @platform;
863 unshift @platform, $^O;
867 platform => {cpu => \@platform,
868 mem => [ grep s/\s+/ /,
869 grep chomp, `free` ],
870 load => [ grep chomp, `uptime` ],
872 host => (grep chomp, `hostname -f`),
873 version => '0.03', # bump for conf, platform, or data collection changes
877 # ex: set ts=8 sts=4 sw=4 noet: