# In which case, we need to stop t/TEST actually running tests, as all
# t/harness needs are its subroutines.
+# Measure the elapsed wallclock time.
+my $t0 = time();
+
# If we're doing deparse tests, ignore failures for these
my $deparse_failures;
# then comp, to validate that require works
# then run, to validate that -M works
# then we know we can -MTestInit for everything else, making life simpler
- foreach my $dir (qw(base comp run cmd io re opbasic op uni mro)) {
+ foreach my $dir (qw(base comp run cmd io re opbasic op uni mro perf)) {
_find_tests($dir);
}
unless ($::core) {
push @ARGV, _tests_from_manifest($extensions, $known_extensions);
unless ($::core) {
_find_tests('japh') if $::torture;
- _find_tests('t/benchmark') if $::benchmark or $ENV{PERL_BENCHMARK};
+ _find_tests('benchmark') if $::benchmark or $ENV{PERL_BENCHMARK};
_find_tests('bigmem') if $ENV{PERL_TEST_MEMORY};
}
}
}
}
}
+ printf "Elapsed: %d sec\n", time() - $t0;
my ($user,$sys,$cuser,$csys) = times;
my $tot = sprintf("u=%.2f s=%.2f cu=%.2f cs=%.2f scripts=%d tests=%d",
$user,$sys,$cuser,$csys,$tested_files,$totmax);