-#!./perl -w
+#!./perl
# This is written in a peculiar style, since we're trying to avoid
# most of the constructs we'll be testing for. (This comment is
# In which case, we need to stop t/TEST actually running tests, as all
# t/harness needs are its subroutines.
+# Measure the elapsed wallclock time.
+my $t0 = time();
+
# If we're doing deparse tests, ignore failures for these
my $deparse_failures;
# And skip even running these
my $deparse_skips;
+my $deparse_skip_file = '../Porting/deparse-skips.txt';
+
# directories with special sets of test switches
my %dir_to_switch =
(base => '',
'../cpan/Archive-Tar' => 1,
'../cpan/AutoLoader' => 1,
'../cpan/CPAN' => 1,
- '../cpan/Devel-PPPort' => 1,
'../cpan/Encode' => 1,
- '../cpan/ExtUtils-Command' => 1,
'../cpan/ExtUtils-Constant' => 1,
'../cpan/ExtUtils-Install' => 1,
'../cpan/ExtUtils-MakeMaker' => 1,
'../cpan/File-Fetch' => 1,
'../cpan/IPC-Cmd' => 1,
'../cpan/IPC-SysV' => 1,
- '../cpan/Locale-Codes' => 1,
'../cpan/Module-Load' => 1,
'../cpan/Module-Load-Conditional' => 1,
- '../cpan/Parse-CPAN-Meta' => 1,
'../cpan/Pod-Simple' => 1,
'../cpan/Test-Simple' => 1,
'../cpan/podlators' => 1,
'../dist/Cwd' => 1,
+ '../dist/Devel-PPPort' => 1,
'../dist/ExtUtils-ParseXS' => 1,
'../dist/Tie-File' => 1,
);
-my %temp_no_core =
- ('../cpan/B-Debug' => 1,
+my %temp_no_core = (
'../cpan/Compress-Raw-Bzip2' => 1,
'../cpan/Compress-Raw-Zlib' => 1,
'../cpan/Devel-PPPort' => 1,
'../cpan/IO-Compress' => 1,
'../cpan/MIME-Base64' => 1,
'../cpan/parent' => 1,
- '../cpan/Parse-CPAN-Meta' => 1,
'../cpan/Pod-Simple' => 1,
'../cpan/podlators' => 1,
'../cpan/Test-Simple' => 1,
'../cpan/Tie-RefHash' => 1,
'../cpan/Unicode-Collate' => 1,
- '../cpan/Unicode-Normalize' => 1,
+ '../dist/Unicode-Normalize' => 1,
);
+# temporary workaround Apr 2017. These need '.' in @INC.
+# Ideally this # list will eventually be empty
+
+my %temp_needs_dot = map { $_ => 1 } qw(
+ ../cpan/Filter-Util-Call
+ ../cpan/libnet
+ ../cpan/Test-Simple
+);
+
+
# delete env vars that may influence the results
# but allow override via *_TEST env var if wanted
# (e.g. PERL5OPT_TEST=-d:NYTProf)
'.svn' => 1,
);
+
+if ($::do_nothing) {
+ return 1;
+}
+
$| = 1;
# for testing TEST only
#BEGIN { require '../lib/strict.pm'; "strict"->import() };
#BEGIN { require '../lib/warnings.pm'; "warnings"->import() };
-# allow setting -options from env
-unshift @ARGV, split /;/, $ENV{TEST_OPTS} if $ENV{TEST_OPTS};
-
# remove empty elements due to insertion of empty symbols via "''p1'" syntax
@ARGV = grep($_,@ARGV) if $^O eq 'VMS';
-our $show_elapsed_time = $ENV{HARNESS_TIMER} || 0;
-{ # create timestamp for any reports written, so theyre corelatable
- my @dt = localtime;
- $dt[5] += 1900; $dt[4] += 1; # fix year, month
- $::tstamp = sprintf("%d-%.2d%.2d-%.2d%.2d-%.2d-$$", @dt[5,4,3,2,1,0]);
-}
+# String eval to avoid loading File::Glob on non-miniperl.
+# (Windows only uses this script for miniperl.)
+@ARGV = eval 'map glob, @ARGV' if $^O eq 'MSWin32';
-# wrapper template for linux perf
-our %cmdwrap = (
- tool => "perf", # path
- cmd => "stat", # others possible, none tried
- rptdir => $ENV{PWD}, # typically ./t, better if absolute
- name => "", # $::tstamp prefixed implicitly
- # reps => 1 #
- opts => "", # pass-thru to perf stat: 'opts=-x,' gives CSV
- );
+our $show_elapsed_time = $ENV{HARNESS_TIMER} || 0;
# Cheesy version of Getopt::Std. We can't replace it with that, because we
# can't rely on require working.
{
my @argv = ();
foreach my $idx (0..$#ARGV) {
- push( @argv, $ARGV[$idx] ), next unless $ARGV[$idx] =~ /^-(\S+?)(?:=(.+))?$/;
+ push( @argv, $ARGV[$idx] ), next unless $ARGV[$idx] =~ /^-(\S+)$/;
$::benchmark = 1 if $1 eq 'benchmark';
$::core = 1 if $1 eq 'core';
$::verbose = 1 if $1 eq 'v';
$::deparse_opts = $1;
_process_deparse_config();
}
- if ($1 eq 'lxperf') {
- die "perf only available on linux" unless $^O eq 'linux';
- $::perf = 1;
- next unless $2;
- # $2 is either Reps or : delimited $k=$v pairs
- my (@v) = split /:/, $2;
- $cmdwrap{reps} = $v[0] if @v == 1 and $v[0] =~ /^\d+$/;
- # parse as $k=$v to override %cmdwrap defaults
- m/(.+?)=(.+)/ and $cmdwrap{$1} = $2 for @v;
- }
}
@ARGV = @argv;
}
-if ($::do_nothing || $::do_nothing) { # set by harness b4 requiring us
- return 1;
-}
-
chdir 't' if -f 't/TEST';
if (-f 'TEST' && -f 'harness' && -d '../lib') {
@INC = '../lib';
}
-die "You need to run \"make test\" first to set things up.\n"
+die "You need to run \"make test_prep\" first to set things up.\n"
unless -e 'perl' or -e 'perl.exe' or -e 'perl.pm';
# check leakage for embedders
my %timings = (); # testname => [@et] pairs if $show_elapsed_time.
# Roll your own File::Find!
-our @found;
-sub _find_tests { @found = (); push @ARGV, _find_files('\.t$', $_[0]) }
+sub _find_tests { our @found=(); push @ARGV, _find_files('\.t$', $_[0]) }
sub _find_files {
my($patt, @dirs) = @_;
for my $dir (@dirs) {
close $script;
- my $perl = './perl';
+ my $perl = $^O eq 'MSWin32' ? '.\perl' : './perl';
my $lib = '../lib';
my $run_dir;
my $return_dir;
if ($temp_no_core{$run_dir}) {
$testswitch = $testswitch . ',NC';
}
+ if($temp_needs_dot{$run_dir}) {
+ $testswitch = $testswitch . ',DOT';
+ }
}
} elsif ($test =~ m!^\.\./lib!) {
$testswitch = '-I.. -MTestInit=U1'; # -T will remove . from @INC
my $perl_supp = $options->{return_dir} ? "$options->{return_dir}/perl.supp" : "perl.supp";
my $valgrind_exe = $ENV{VALGRIND} // 'valgrind';
if ($options->{run_dir}) {
- $Valgrind_Log = "$options->{run_dir}/$Valgrind_Log";
+ require Cwd;
+ $Valgrind_Log = Cwd::abs_path("$options->{run_dir}/$Valgrind_Log");
}
my $vg_opts = $ENV{VG_OPTS}
// "--log-file=$Valgrind_Log "
}
$perl = "$valgrind_exe $vg_opts $perl";
}
- if ($::perf) {
- my $ofile = "$cmdwrap{rptdir}/$::tstamp$cmdwrap{name}.perf";
- my $wrapper = "$cmdwrap{tool} $cmdwrap{cmd} "
- . ( defined $cmdwrap{reps} && $cmdwrap{reps} > 0
- ? "--repeat $cmdwrap{reps}" : "" )
- . " --append -o $ofile $cmdwrap{opts} -- ";
- $perl = "$wrapper $perl";
- }
my $args = "$options->{testswitch} $options->{switch} $options->{utf8}";
$cmd = $perl . _quote_args($args) . " $test $redir";
if (m!^((?:cpan|dist|ext)/(\S+)/+(?:[^/\s]+\.t|test\.pl)|lib/\S+?(?:\.t|test\.pl))\s!) {
my $t = $1;
my $extension = $2;
+
+ # XXX Generates way too many error lines currently. Skip for
+ # v5.22
+ next if $t =~ /^cpan/ && ord("A") != 65;
+
if (!$::core || $t =~ m!^lib/[a-z]!) {
if (defined $extension) {
$extension =~ s!/t(:?/\S+)*$!!;
_find_tests('bigmem') if $ENV{PERL_TEST_MEMORY};
}
}
+@ARGV= do {
+ my @order= (
+ "base",
+ "comp",
+ "run",
+ "cmd",
+ "io",
+ "re",
+ "opbasic",
+ "op",
+ "uni",
+ "mro",
+ "lib",
+ "ext",
+ "dist",
+ "cpan",
+ "perf",
+ "porting",
+ );
+ my %order= map { $order[$_] => 1+$_ } 0..$#order;
+ my $idx= 0;
+ map {
+ $_->[0]
+ } sort {
+ $a->[3] <=> $b->[3] ||
+ $a->[1] <=> $b->[1]
+ } map {
+ my $root= /(\w+)/ ? $1 : "";
+ [ $_, $idx++, $root, $order{$root}||=0 ]
+ } @ARGV;
+};
if ($::deparse) {
_testprogs('deparse', '', @ARGV);
my $grind_ct = 0; # count of non-empty valgrind reports
my $total_files = @tests;
my $good_files = 0;
- my $tested_files = 0;
- my $totmax = 0; # += $plan after <$results> consumed. includes --repeats
+ my $tested_files = 0;
+ my $totmax = 0;
my %failed_tests;
+ my @unexpected_pass; # files where deparse-skips.txt says fail but passed
my $toolnm; # valgrind, cachegrind, perf
while (my $test = shift @tests) {
my $results = _run_test($test, $type);
- my $failure = "";
- my $plan = undef; # the N in "1..N"
- my $next = 0; # ++ after each "ok M", reset on "1..N"
- my $nextreps = 0; # ++ after each "ok M"
- my $Reps = 0; # ++ when "1..N" seen
+ my $failure;
+ my $next = 0;
+ my $seen_leader = 0;
+ my $seen_ok = 0;
+ my $trailing_leader = 0;
+ my $max;
my %todo;
while (<$results>) {
next if /^\s*$/; # skip blank lines
print $_;
}
unless (/^\#/) {
+ if ($trailing_leader) {
+ # shouldn't be anything following a postfix 1..n
+ $failure = 'FAILED--extra output after trailing 1..n';
+ last;
+ }
if (/^1\.\.([0-9]+)( todo ([\d ]+))?/) {
-
- # a "1..N" plan, maybe todos also
- if (!$plan) {
- $plan = $1;
- } else {
- # allow another plan agreeing on N
- if ($1 != $plan) {
- $failure = "latest plan disagrees: $1 != $plan\n";
+ if ($seen_leader) {
+ $failure = 'FAILED--seen duplicate leader';
+ last;
+ }
+ $max = $1;
+ %todo = map { $_ => 1 } split / /, $3 if $3;
+ $totmax = $totmax + $max;
+ $tested_files = $tested_files + 1;
+ if ($seen_ok) {
+ # 1..n appears at end of file
+ $trailing_leader = 1;
+ if ($next != $max) {
+ $failure = "FAILED--expected $max tests, saw $next";
last;
}
}
- unless ($next == 0 or $next == $plan) {
- $failure = "plan seen in middle of tests: $next\n";
- last;
+ else {
+ $next = 0;
}
- $Reps = $Reps + 1;
- $next = 0; # reset for next "1..N" plan check
-
- $totmax = $totmax + $plan;
- $tested_files = $tested_files + 1;
-
- # if $Reps>1, this work is redone $Reps-1 times, trivial extra.
- # assume todos match, since plans do.
- %todo = map { $_ => 1 } split / /, $3 if $3;
- next;
+ $seen_leader = 1;
}
else {
- if (/^(not )?ok(?:\s+(\d+))?[^\#]*(\s*\#.*)?/) {
+ if (/^(not )?ok(?: (\d+))?[^\#]*(\s*\#.*)?/) {
+ unless ($seen_leader) {
+ unless ($seen_ok) {
+ $next = 0;
+ }
+ }
+ $seen_ok = 1;
$next = $next + 1;
- $nextreps = $nextreps + 1;
- my ($not, $num, $extra, $istodo) = ($1, $2, $3, 0);
+ my($not, $num, $extra, $istodo) = ($1, $2, $3, 0);
+ $num = $next unless $num;
+
+ if ($num == $next) {
- $num = $next unless $num; # tolerate un-numbered
- if ($num != $next) {
- $failure ="FAILED--expected test $next, saw test $num";
- last;
- }
- else {
# SKIP is essentially the same as TODO for t/TEST
# this still conforms to TAP:
# http://testanything.org/wiki/index.php/TAP_specification
last;
}
}
+ else {
+ $failure ="FAILED--expected test $next, saw test $num";
+ last;
+ }
}
elsif (/^Bail out!\s*(.*)/i) { # magic words
die "FAILED--Further testing stopped" . ($1 ? ": $1\n" : ".\n");
}
}
}
+ my @junk = <$results>; # dump remaining output to prevent SIGPIPE
+ # (so far happens only on os390)
close $results;
+ undef @junk;
- if (not $failure) {
- $failure = "FAILED--no plan found next:$next" unless defined $plan;
+ if (not defined $failure) {
+ $failure = 'FAILED--no leader found' unless $seen_leader;
}
_check_valgrind(\$toolnm, \$grind_ct, \$test);
if ($type eq 'deparse' && !$ENV{KEEP_DEPARSE_FILES}) {
unlink "./$test.dp";
}
- if (not $failure and $nextreps != $Reps * $plan) {
- $failure="FAILED--expected $Reps * $plan tests, saw $nextreps";
+ if (not defined $failure and $next != $max) {
+ $failure="FAILED--expected $max tests, saw $next";
}
- if (not $failure and $? ) { # don't mask a test failure
+ if( !defined $failure # don't mask a test failure
+ and $? )
+ {
$failure = "FAILED--non-zero wait status: $?";
}
# Deparse? Should it have passed or failed?
if ($type eq 'deparse' && $test =~ $deparse_failures) {
if (!$failure) {
- # Wait, it didn't fail? Great news! Tell someone!
- $failure = "FAILED--all tests passed but test should have failed";
+ # Wait, it didn't fail? Great news!
+ push @unexpected_pass, $test;
} else {
# Bah, still failing. Mask it.
print "${te}skipped\n";
}
}
- if ($failure) {
+ if (defined $failure) {
print "${te}$failure\n";
$::bad_files = $::bad_files + 1;
if ($test =~ /^base/ && ! defined &DynaLoader::boot_DynaLoader) {
$failed_tests{$test} = 1;
}
else {
- if ($plan) {
+ if ($max) {
my ($elapsed, $etms) = ("", 0);
if ( $show_elapsed_time ) {
$etms = (Time::HiRes::time() - $test_start_time) * 1000;
}
else {
die "FAILED--no tests were run for some reason.\n";
- # forex: ./perl TEST run/dtrace.t
- # 1..0 # Skip no dtrace
}
}
else {
for my $test ( sort keys %failed_tests ) {
print "\t$test\n";
}
+
+ if (@unexpected_pass) {
+ print <<EOF;
+
+The following scripts were expected to fail under -deparse (at least
+according to $deparse_skip_file), but unexpectedly succeeded:
+EOF
+ print "\t$_\n" for sort @unexpected_pass;
+ print "\n";
+ }
+
warn <<'SHRDLU_1';
### Since not all tests were successful, you may want to run some of
### them individually and examine any diagnostic messages they produce.
}
}
}
+ printf "Elapsed: %d sec\n", time() - $t0;
my ($user,$sys,$cuser,$csys) = times;
my $tot = sprintf("u=%.2f s=%.2f cu=%.2f cs=%.2f scripts=%d tests=%d",
$user,$sys,$cuser,$csys,$tested_files,$totmax);
print "$tot\n";
if ($good_files) {
if (-d $show_elapsed_time) {
- # HARNESS_TIMER = <a-directory>. Save a storable file of
- # timings data into the dir. NB: the test cds to ./t/, so
- # relative path must account for that, or better, just
- # give an abs-path.
+ # HARNESS_TIMER = <a-directory>. Save timings etc to
+ # storable file there. NB: the test cds to ./t/, so
+ # relative path must account for that, ie ../../perf
+ # points to dir next to source tree.
require Storable;
- my $fn = "$show_elapsed_time/$::tstamp.ttimes";
- Storable::store({ times => \%timings,
+ my @dt = localtime;
+ $dt[5] += 1900; $dt[4] += 1; # fix year, month
+ my $fn = "$show_elapsed_time/".join('-', @dt[5,4,3,2,1]).".ttimes";
+ Storable::store({ perf => \%timings,
gather_conf_platform_info(),
total => $tot,
}, $fn);
load => [ grep chomp, `uptime` ],
},
host => (grep chomp, `hostname -f`),
- version => '0.031', # bump for conf, platform, or data collection changes
+ version => '0.03', # bump for conf, platform, or data collection changes
);
}
warn "$0: Failed to open '$Valgrind_Log': $!\n";
}
}
- # todo: precede if clause with $ENV{VG_OPTS} &&
if ($ENV{VG_OPTS} =~ /(cachegrind)/ or $$toolnm =~ /(perf)/) {
$$toolnm = $1;
if ($$toolnm eq 'perf') {
- # TBD: need VG_TEST=--help to pass test.valgrind when VALGRIND=/usr/bin/perf
- # VG_OPTS='stat -o perf-stat --append --' is also needed to run perf this way.
+ # append perfs subcommand, not just stat
my ($sub) = split /\s/, $ENV{VG_OPTS};
$$toolnm .= "-$sub";
}
}
# Generate regexps of known bad filenames / skips from Porting/deparse-skips.txt
-my $in;
sub _process_deparse_config {
my @deparse_failures;
my @deparse_skips;
- my $f = '../Porting/deparse-skips.txt';
+ my $f = $deparse_skip_file;
my $skips;
if (!open($skips, '<', $f)) {
return;
}
+ my $in;
while(<$skips>) {
if (/__DEPARSE_FAILURES__/) {
$in = \@deparse_failures; next;
next unless $_;
push @$in, $_;
+ warn "WARNING: $f:$.: excluded file doesn't exist: $_\n" unless -f $_;
}
for my $f (@deparse_failures, @deparse_skips) {