This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
tweak perlmod.pod
[perl5.git] / Porting / bench.pl
CommitLineData
9e7973fa
DM
1#!/usr/bin/perl
2#
3# A tool for analysing the performance of the code snippets found in
4# t/perf/benchmarks or similar
5
6
7=head1 NAME
8
9bench.pl - Compare the performance of perl code snippets across multiple
10perls.
11
12=head1 SYNOPSIS
13
4a1358c2
FC
14 # Basic: run the tests in t/perf/benchmarks against two or
15 # more perls
9e7973fa 16
32dfbb33
JC
17 bench.pl [options] -- perlA[=labelA] perlB[=labelB] ...
18
19 # run the tests against same perlA 2x, with and without extra
20 # options
21
22 bench.pl [options] -- perlA=fast PerlA=slow -Mstrict -Dpsltoc
9e7973fa
DM
23
24 # Run bench.pl's own built-in sanity tests
25
26 bench.pl --action=selftest
27
28=head1 DESCRIPTION
29
30By default, F<bench.pl> will run code snippets found in
31F<t/perf/benchmarks> (or similar) under cachegrind, in order to calculate
32how many instruction reads, data writes, branches, cache misses, etc. that
33one execution of the snippet uses. It will run them against two or more
34perl executables and show how much each test has gotten better or worse.
35
36It is modelled on the F<perlbench> tool, but since it measures instruction
37reads etc., rather than timings, it is much more precise and reproducible.
e34630bf 38It is also considerably faster, and is capable of running tests in
9e7973fa
DM
39parallel (with C<-j>). Rather than displaying a single relative
40percentage per test/perl combination, it displays values for 13 different
41measurements, such as instruction reads, conditional branch misses etc.
42
43There are options to write the raw data to a file, and to read it back.
44This means that you can view the same run data in different views with
45different selection and sort options.
46
47The optional C<=label> after each perl executable is used in the display
48output.
49
50=head1 OPTIONS
51
52=over 4
53
54=item *
55
56--action=I<foo>
57
58What action to perform. The default is I<grind>, which runs the benchmarks
59using I<cachegrind> as the back end. The only other action at the moment is
60I<selftest>, which runs some basic sanity checks and produces TAP output.
61
62=item *
63
64--average
65
66Only display the overall average, rather than the results for each
67individual test.
68
69=item *
70
71--benchfile=I<foo>
72
73The path of the file which contains the benchmarks (F<t/perf/benchmarks>
74by default).
75
76=item *
77
78--bisect=I<field,minval,maxval>
79
80Run a single test against one perl and exit with a zero status if the
81named field is in the specified range; exit 1 otherwise. It will complain
82if more than one test or perl has been specified. It is intended to be
83called as part of a bisect run, to determine when something changed.
84For example,
85
4a1358c2
FC
86 bench.pl -j 8 --tests=foo --bisect=Ir,100,105 --perlargs=-Ilib \
87 ./miniperl
9e7973fa
DM
88
89might be called from bisect to find when the number of instruction reads
90for test I<foo> falls outside the range 100..105.
91
92=item *
93
df3d7b3a
DM
94--compact=<Iperl>
95
96Display the results for a single perl executable in a compact form.
97Which perl to display is specified in the same manner as C<--norm>.
98
99=item *
100
9e7973fa
DM
101--debug
102
103Enable verbose debugging output.
104
105=item *
106
107--fields=I<a,b,c>
108
109Display only the specified fields; for example,
110
111 --fields=Ir,Ir_m,Ir_mm
112
113If only one field is selected, the output is in more compact form.
114
115=item *
116
117--grindargs=I<foo>
118
8a094fee
JC
119Optional command-line arguments to pass to all cachegrind invocations.
120
121This option is appended to those which bench.pl uses for its own
122purposes; so it can be used to override them (see --debug output
123below), and can also be 'abused' to add redirects into the valgrind
124command invocation.
125
126For example, this writes PERL_MEM_LOG activity to foobar.$$, because
1273>foobar.$$ redirects fd 3, then perl under PERL_MEM_LOG writes to fd 3.
128
129 $ perl Porting/bench.pl --jobs=2 --verbose --debug \
130 --tests=call::sub::amp_empty \
131 \
132 --grindargs='--cachegrind-out-file=junk.$$ 3>foobar.$$' \
133 -- \
134 perl5.24.0 perl5.24.0:+memlog:PERL_MEM_LOG=3mst
135
136for the +memlog tests, this executes as: (shown via --debug, then prettyfied)
137
138 Command: PERL_HASH_SEED=0 PERL_MEM_LOG=3mst
139 valgrind --tool=cachegrind --branch-sim=yes
140 --cachegrind-out-file=/dev/null --cachegrind-out-file=junk.$$
141 3>foobar.$$ perl5.24.0 - 10 2>&1
142
143The result is that a set of junk.$$ files containing raw cachegrind
144output are written, and foobar.$$ contains the expected memlog output.
145
146Notes:
147
148Theres no obvious utility for those junk.$$ and foobar.$$ files, but
149you can have them anyway.
150
151The 3 in PERL_MEM_LOG=3mst is needed because the output would
152otherwize go to STDERR, and cause parse_cachegrind() to reject the
153test and die.
154
155The --grindargs redirect is needed to capture the memlog output;
156without it, the memlog output is written to fd3, around
157parse_cachegrind and effectively into /dev/null
158
159PERL_MEM_LOG is expensive when used.
160
161call::sub::amp_empty
162&foo function call with no args or body
163
164 perl5.24.0 perl5.24.0+memlog
165 ---------- -----------------
166 Ir 394.0 543477.5
167 Dr 161.0 146814.1
168 Dw 72.0 122304.6
169 COND 58.0 66796.4
170 IND 5.0 5537.7
171
172COND_m 0.0 6743.1
173 IND_m 5.0 1490.2
174
175 Ir_m1 0.0 683.7
176 Dr_m1 0.0 65.9
177 Dw_m1 0.0 8.5
178
179 Ir_mm 0.0 11.6
180 Dr_mm 0.0 10.6
181 Dw_mm 0.0 4.7
182
9e7973fa
DM
183
184=item *
185
186---help
187
188Display basic usage information.
189
190=item *
191
192-j I<N>
193--jobs=I<N>
194
195Run I<N> jobs in parallel (default 1). This determines how many cachegrind
196process will running at a time, and should generally be set to the number
197of CPUs available.
198
199=item *
200
201--norm=I<foo>
202
203Specify which perl column in the output to treat as the 100% norm.
204It may be a column number (0..N-1) or a perl executable name or label.
205It defaults to the leftmost column.
206
207=item *
208
209--perlargs=I<foo>
210
32dfbb33
JC
211Optional command-line arguments to pass to each perl-under-test
212(perlA, perlB in synopsis) For example, C<--perlargs=-Ilib>.
9e7973fa
DM
213
214=item *
215
216--raw
217
218Display raw data counts rather than percentages in the outputs. This
219allows you to see the exact number of intruction reads, branch misses etc.
220for each test/perl combination. It also causes the C<AVERAGE> display
221per field to be calculated based on the average of each tests's count
222rather than average of each percentage. This means that tests with very
223high counts will dominate.
224
225=item *
226
65f0b1c9 227--sort=I<field:perl>
9e7973fa
DM
228
229Order the tests in the output based on the value of I<field> in the
230column I<perl>. The I<perl> value is as per C<--norm>. For example
231
4a1358c2
FC
232 bench.pl --sort=Dw:perl-5.20.0 \
233 perl-5.16.0 perl-5.18.0 perl-5.20.0
9e7973fa
DM
234
235=item *
236
237-r I<file>
238--read=I<file>
239
240Read in saved data from a previous C<--write> run from the specified file.
241
242Requires C<JSON::PP> to be available.
243
244=item *
245
246--tests=I<FOO>
247
248Specify a subset of tests to run (or in the case of C<--read>, to display).
249It may be either a comma-separated list of test names, or a regular
250expression. For example
251
252 --tests=expr::assign::scalar_lex,expr::assign::2list_lex
253 --tests=/^expr::/
254
255=item *
256
257--verbose
258
259Display progress information.
260
261=item *
262
263-w I<file>
264--write=I<file>
265
266Save the raw data to the specified file. It can be read back later with
267C<--read>.
268
269Requires C<JSON::PP> to be available.
270
271=back
272
273=cut
274
275
276
277use 5.010000;
278use warnings;
279use strict;
d54523c4 280use Getopt::Long qw(:config no_auto_abbrev require_order);
9e7973fa
DM
281use IPC::Open2 ();
282use IO::Select;
c2d21e7a 283use IO::File;
9e7973fa
DM
284use POSIX ":sys_wait_h";
285
286# The version of the file format used to save data. We refuse to process
287# the file if the integer component differs.
288
289my $FORMAT_VERSION = 1.0;
290
291# The fields we know about
292
293my %VALID_FIELDS = map { $_ => 1 }
294 qw(Ir Ir_m1 Ir_mm Dr Dr_m1 Dr_mm Dw Dw_m1 Dw_mm COND COND_m IND IND_m);
295
296sub usage {
297 die <<EOF;
d54523c4 298usage: $0 [options] -- perl[=label] ...
9e7973fa
DM
299 --action=foo What action to perform [default: grind].
300 --average Only display average, not individual test results.
301 --benchfile=foo File containing the benchmarks;
302 [default: t/perf/benchmarks].
303 --bisect=f,min,max run a single test against one perl and exit with a
304 zero status if the named field is in the specified
305 range; exit 1 otherwise.
df3d7b3a
DM
306 --compact=perl Display the results of a single perl in compact form.
307 Which perl specified like --norm
9e7973fa
DM
308 --debug Enable verbose debugging output.
309 --fields=a,b,c Display only the specified fields (e.g. Ir,Ir_m,Ir_mm).
310 --grindargs=foo Optional command-line args to pass to cachegrind.
311 --help Display this help.
312 -j|--jobs=N Run N jobs in parallel [default 1].
313 --norm=perl Which perl column to treat as 100%; may be a column
314 number (0..N-1) or a perl executable name or label;
315 [default: 0].
316 --perlargs=foo Optional command-line args to pass to each perl to run.
317 --raw Display raw data counts rather than percentages.
65f0b1c9 318 --sort=field:perl Sort the tests based on the value of 'field' in the
9e7973fa
DM
319 column 'perl'. The perl value is as per --norm.
320 -r|--read=file Read in previously saved data from the specified file.
321 --tests=FOO Select only the specified tests from the benchmarks file;
322 FOO may be either of the form 'foo,bar' or '/regex/';
323 [default: all tests].
324 --verbose Display progress information.
325 -w|--write=file Save the raw data to the specified file.
326
327--action is one of:
328 grind run the code under cachegrind
329 selftest perform a selftest; produce TAP output
330
331The command line ends with one or more specified perl executables,
332which will be searched for in the current \$PATH. Each binary name may
333have an optional =LABEL appended, which will be used rather than the
334executable name in output. E.g.
335
336 perl-5.20.1=PRE-BUGFIX perl-5.20.1-new=POST-BUGFIX
337EOF
338}
339
340my %OPTS = (
341 action => 'grind',
342 average => 0,
343 benchfile => 't/perf/benchmarks',
344 bisect => undef,
df3d7b3a 345 compact => undef,
9e7973fa
DM
346 debug => 0,
347 grindargs => '',
348 fields => undef,
349 jobs => 1,
350 norm => 0,
351 perlargs => '',
352 raw => 0,
353 read => undef,
354 sort => undef,
355 tests => undef,
356 verbose => 0,
357 write => undef,
358);
359
360
361# process command-line args and call top-level action
362
363{
364 GetOptions(
365 'action=s' => \$OPTS{action},
366 'average' => \$OPTS{average},
367 'benchfile=s' => \$OPTS{benchfile},
368 'bisect=s' => \$OPTS{bisect},
df3d7b3a 369 'compact=s' => \$OPTS{compact},
9e7973fa
DM
370 'debug' => \$OPTS{debug},
371 'grindargs=s' => \$OPTS{grindargs},
372 'help' => \$OPTS{help},
373 'fields=s' => \$OPTS{fields},
374 'jobs|j=i' => \$OPTS{jobs},
375 'norm=s' => \$OPTS{norm},
376 'perlargs=s' => \$OPTS{perlargs},
377 'raw' => \$OPTS{raw},
378 'read|r=s' => \$OPTS{read},
379 'sort=s' => \$OPTS{sort},
380 'tests=s' => \$OPTS{tests},
381 'verbose' => \$OPTS{verbose},
382 'write|w=s' => \$OPTS{write},
383 ) or usage;
384
385 usage if $OPTS{help};
386
387
388 if (defined $OPTS{read} and defined $OPTS{write}) {
389 die "Error: can't specify both --read and --write options\n";
390 }
391
392 if (defined $OPTS{read} or defined $OPTS{write}) {
393 # fail early if it's not present
394 require JSON::PP;
395 }
396
397 if (defined $OPTS{fields}) {
398 my @f = split /,/, $OPTS{fields};
399 for (@f) {
400 die "Error: --fields: unknown field '$_'\n"
401 unless $VALID_FIELDS{$_};
402 }
403 my %f = map { $_ => 1 } @f;
404 $OPTS{fields} = \%f;
405 }
406
407 my %valid_actions = qw(grind 1 selftest 1);
408 unless ($valid_actions{$OPTS{action}}) {
409 die "Error: unrecognised action '$OPTS{action}'\n"
410 . "must be one of: " . join(', ', sort keys %valid_actions)."\n";
411 }
412
413 if (defined $OPTS{sort}) {
414 my @s = split /:/, $OPTS{sort};
415 if (@s != 2) {
416 die "Error: --sort argument should be of the form field:perl: "
417 . "'$OPTS{sort}'\n";
418 }
419 my ($field, $perl) = @s;
420 die "Error: --sort: unknown field '$field\n"
421 unless $VALID_FIELDS{$field};
422 # the 'perl' value will be validated later, after we have processed
423 # the perls
424 $OPTS{'sort-field'} = $field;
425 $OPTS{'sort-perl'} = $perl;
426 }
427
428 if ($OPTS{action} eq 'selftest') {
429 if (@ARGV) {
430 die "Error: no perl executables may be specified with --read\n"
431 }
432 }
433 elsif (defined $OPTS{bisect}) {
434 die "Error: exactly one perl executable must be specified for bisect\n"
435 unless @ARGV == 1;
436 die "Error: Can't specify both --bisect and --read\n"
437 if defined $OPTS{read};
438 die "Error: Can't specify both --bisect and --write\n"
439 if defined $OPTS{write};
440 }
441 elsif (defined $OPTS{read}) {
442 if (@ARGV) {
443 die "Error: no perl executables may be specified with --read\n"
444 }
445 }
446 elsif ($OPTS{raw}) {
447 unless (@ARGV) {
448 die "Error: at least one perl executable must be specified\n";
449 }
450 }
451 else {
452 unless (@ARGV >= 2) {
453 die "Error: at least two perl executables must be specified\n";
454 }
455 }
456
457 if ($OPTS{action} eq 'grind') {
458 do_grind(\@ARGV);
459 }
460 elsif ($OPTS{action} eq 'selftest') {
461 do_selftest();
462 }
463}
464exit 0;
465
466
467# Given a hash ref keyed by test names, filter it by deleting unwanted
468# tests, based on $OPTS{tests}.
469
470sub filter_tests {
471 my ($tests) = @_;
472
473 my $opt = $OPTS{tests};
474 return unless defined $opt;
475
476 my @tests;
477
478 if ($opt =~ m{^/}) {
479 $opt =~ s{^/(.+)/$}{$1}
480 or die "Error: --tests regex must be of the form /.../\n";
481 for (keys %$tests) {
482 delete $tests->{$_} unless /$opt/;
483 }
484 }
485 else {
486 my %t;
487 for (split /,/, $opt) {
24293753
JC
488 die "Error: no such test found: '$_'\n"
489 . ($OPTS{verbose} ? " have: @{[ sort keys %$tests ]}\n" : "")
490 unless exists $tests->{$_};
9e7973fa
DM
491 $t{$_} = 1;
492 }
493 for (keys %$tests) {
494 delete $tests->{$_} unless exists $t{$_};
495 }
496 }
497}
498
499
500# Read in the test file, and filter out any tests excluded by $OPTS{tests}
957d8930
DM
501# return a hash ref { testname => { test }, ... }
502# and an array ref of the original test names order,
9e7973fa
DM
503
504sub read_tests_file {
505 my ($file) = @_;
506
507 my $ta = do $file;
508 unless ($ta) {
509 die "Error: can't parse '$file': $@\n" if $@;
510 die "Error: can't read '$file': $!\n";
511 }
512
957d8930
DM
513 my @orig_order;
514 for (my $i=0; $i < @$ta; $i += 2) {
515 push @orig_order, $ta->[$i];
516 }
517
9e7973fa
DM
518 my $t = { @$ta };
519 filter_tests($t);
957d8930 520 return $t, \@orig_order;
9e7973fa
DM
521}
522
523
524# Process the perl/column argument of options like --norm and --sort.
525# Return the index of the matching perl.
526
527sub select_a_perl {
528 my ($perl, $perls, $who) = @_;
529
530 if ($perl =~ /^[0-9]$/) {
531 die "Error: $who value $perl outside range 0.." . $#$perls . "\n"
532 unless $perl < @$perls;
533 return $perl;
534 }
535 else {
536 my @perl = grep $perls->[$_][0] eq $perl
537 || $perls->[$_][1] eq $perl,
538 0..$#$perls;
539 die "Error: $who: unrecognised perl '$perl'\n"
540 unless @perl;
541 die "Error: $who: ambiguous perl '$perl'\n"
542 if @perl > 1;
543 return $perl[0];
544 }
545}
546
547
44b18357 548# Validate the list of perl=label (+ cmdline options) on the command line.
92683b8b
DM
549# Return a list of [ exe, label, cmdline-options ] tuples, i.e.
550# 'perl-under-test's (PUTs)
9e7973fa 551
d54523c4 552sub process_puts {
44b18357 553 my @res_puts; # returned, each item is [ perlexe, label, @putargs ]
955a736c 554 my %seen;
d54523c4
JC
555 my @putargs; # collect not-perls into args per PUT
556
557 for my $p (reverse @_) {
558 push @putargs, $p and next if $p =~ /^-/; # not-perl, dont send to qx//
559
60858fe8 560 my ($perl, $label, $env) = split /[=:,]/, $p, 3;
9e7973fa 561 $label //= $perl;
857716c9 562 $label = $perl.$label if $label =~ /^\+/;
92683b8b 563 die "$label cannot be used on 2 different perls under test\n" if $seen{$label}++;
955a736c 564
60858fe8
JC
565 my %env;
566 if ($env) {
567 %env = split /[=,]/, $env;
568 }
9e7973fa 569 my $r = qx($perl -e 'print qq(ok\n)' 2>&1);
d54523c4 570 if ($r eq "ok\n") {
60858fe8 571 push @res_puts, [ $perl, $label, \%env, reverse @putargs ];
d54523c4 572 @putargs = ();
44b18357
JC
573 warn "Added Perl-Under-Test: [ @{[@{$res_puts[-1]}]} ]\n"
574 if $OPTS{verbose};
d54523c4 575 } else {
92683b8b 576 warn "perl-under-test args: @putargs + a not-perl: $p $r\n"
44b18357 577 if $OPTS{verbose};
d54523c4
JC
578 push @putargs, $p; # not-perl
579 }
9e7973fa 580 }
44b18357 581 return reverse @res_puts;
9e7973fa
DM
582}
583
584
8fbd1c2c 585
9e7973fa
DM
586# Return a string containing perl test code wrapped in a loop
587# that runs $ARGV[0] times
588
589sub make_perl_prog {
590 my ($test, $desc, $setup, $code) = @_;
591
592 return <<EOF;
593# $desc
594package $test;
595BEGIN { srand(0) }
596$setup;
597for my \$__loop__ (1..\$ARGV[0]) {
598 $code;
599}
600EOF
601}
602
603
604# Parse the output from cachegrind. Return a hash ref.
605# See do_selftest() for examples of the output format.
606
607sub parse_cachegrind {
608 my ($output, $id, $perl) = @_;
609
610 my %res;
611
612 my @lines = split /\n/, $output;
613 for (@lines) {
614 unless (s/(==\d+==)|(--\d+--) //) {
615 die "Error: while executing $id:\n"
616 . "unexpected code or cachegrind output:\n$_\n";
617 }
618 if (/I refs:\s+([\d,]+)/) {
619 $res{Ir} = $1;
620 }
621 elsif (/I1 misses:\s+([\d,]+)/) {
622 $res{Ir_m1} = $1;
623 }
624 elsif (/LLi misses:\s+([\d,]+)/) {
625 $res{Ir_mm} = $1;
626 }
627 elsif (/D refs:\s+.*?([\d,]+) rd .*?([\d,]+) wr/) {
628 @res{qw(Dr Dw)} = ($1,$2);
629 }
630 elsif (/D1 misses:\s+.*?([\d,]+) rd .*?([\d,]+) wr/) {
631 @res{qw(Dr_m1 Dw_m1)} = ($1,$2);
632 }
633 elsif (/LLd misses:\s+.*?([\d,]+) rd .*?([\d,]+) wr/) {
634 @res{qw(Dr_mm Dw_mm)} = ($1,$2);
635 }
636 elsif (/Branches:\s+.*?([\d,]+) cond .*?([\d,]+) ind/) {
637 @res{qw(COND IND)} = ($1,$2);
638 }
639 elsif (/Mispredicts:\s+.*?([\d,]+) cond .*?([\d,]+) ind/) {
640 @res{qw(COND_m IND_m)} = ($1,$2);
641 }
642 }
643
644 for my $field (keys %VALID_FIELDS) {
645 die "Error: can't parse '$field' field from cachegrind output:\n$output"
646 unless exists $res{$field};
647 $res{$field} =~ s/,//g;
648 }
649
650 return \%res;
651}
652
653
654# Handle the 'grind' action
655
656sub do_grind {
657 my ($perl_args) = @_; # the residue of @ARGV after option processing
658
957d8930 659 my ($loop_counts, $perls, $results, $tests, $order);
9e7973fa
DM
660 my ($bisect_field, $bisect_min, $bisect_max);
661
662 if (defined $OPTS{bisect}) {
663 ($bisect_field, $bisect_min, $bisect_max) = split /,/, $OPTS{bisect}, 3;
664 die "Error: --bisect option must be of form 'field,integer,integer'\n"
665 unless
666 defined $bisect_max
667 and $bisect_min =~ /^[0-9]+$/
668 and $bisect_max =~ /^[0-9]+$/;
669
670 die "Error: unrecognised field '$bisect_field' in --bisect option\n"
671 unless $VALID_FIELDS{$bisect_field};
672
673 die "Error: --bisect min ($bisect_min) must be <= max ($bisect_max)\n"
674 if $bisect_min > $bisect_max;
675 }
676
677 if (defined $OPTS{read}) {
678 open my $in, '<:encoding(UTF-8)', $OPTS{read}
679 or die " Error: can't open $OPTS{read} for reading: $!\n";
680 my $data = do { local $/; <$in> };
681 close $in;
682
683 my $hash = JSON::PP::decode_json($data);
684 if (int($FORMAT_VERSION) < int($hash->{version})) {
685 die "Error: unsupported version $hash->{version} in file"
686 . "'$OPTS{read}' (too new)\n";
687 }
957d8930
DM
688 ($loop_counts, $perls, $results, $tests, $order) =
689 @$hash{qw(loop_counts perls results tests order)};
9e7973fa
DM
690
691 filter_tests($results);
692 filter_tests($tests);
957d8930
DM
693
694 if (!$order) {
695 $order = [ sort keys %$tests ];
696 }
9e7973fa
DM
697 }
698 else {
699 # How many times to execute the loop for the two trials. The lower
700 # value is intended to do the loop enough times that branch
701 # prediction has taken hold; the higher loop allows us to see the
702 # branch misses after that
703 $loop_counts = [10, 20];
704
957d8930 705 ($tests, $order) = read_tests_file($OPTS{benchfile});
9e7973fa
DM
706 die "Error: only a single test may be specified with --bisect\n"
707 if defined $OPTS{bisect} and keys %$tests != 1;
708
d54523c4 709 $perls = [ process_puts(@$perl_args) ];
8fbd1c2c
DM
710
711
957d8930 712 $results = grind_run($tests, $order, $perls, $loop_counts);
9e7973fa
DM
713 }
714
8fbd1c2c
DM
715 # now that we have a list of perls, use it to process the
716 # 'perl' component of the --norm and --sort args
717
718 $OPTS{norm} = select_a_perl($OPTS{norm}, $perls, "--norm");
719 if (defined $OPTS{'sort-perl'}) {
720 $OPTS{'sort-perl'} =
721 select_a_perl($OPTS{'sort-perl'}, $perls, "--sort");
722 }
723
df3d7b3a
DM
724 if (defined $OPTS{'compact'}) {
725 $OPTS{'compact'} =
726 select_a_perl($OPTS{'compact'}, $perls, "--compact");
727 }
9e7973fa
DM
728 if (defined $OPTS{write}) {
729 my $json = JSON::PP::encode_json({
730 version => $FORMAT_VERSION,
731 loop_counts => $loop_counts,
732 perls => $perls,
733 results => $results,
734 tests => $tests,
957d8930 735 order => $order,
9e7973fa
DM
736 });
737
738 open my $out, '>:encoding(UTF-8)', $OPTS{write}
739 or die " Error: can't open $OPTS{write} for writing: $!\n";
740 print $out $json or die "Error: writing to file '$OPTS{write}': $!\n";
741 close $out or die "Error: closing file '$OPTS{write}': $!\n";
742 }
743 else {
744 my ($processed, $averages) =
745 grind_process($results, $perls, $loop_counts);
746
747 if (defined $OPTS{bisect}) {
748 my @r = values %$results;
749 die "Panic: expected exactly one test result in bisect\n"
750 if @r != 1;
751 @r = values %{$r[0]};
752 die "Panic: expected exactly one perl result in bisect\n"
753 if @r != 1;
754 my $c = $r[0]{$bisect_field};
755 die "Panic: no result in bisect for field '$bisect_field'\n"
756 unless defined $c;
757 exit 0 if $bisect_min <= $c and $c <= $bisect_max;
758 exit 1;
759 }
df3d7b3a
DM
760 elsif (defined $OPTS{compact}) {
761 grind_print_compact($processed, $averages, $OPTS{compact},
762 $perls, $tests, $order);
763 }
9e7973fa 764 else {
957d8930 765 grind_print($processed, $averages, $perls, $tests, $order);
9e7973fa
DM
766 }
767 }
768}
769
770
771# Run cachegrind for every test/perl combo.
772# It may run several processes in parallel when -j is specified.
773# Return a hash ref suitable for input to grind_process()
774
775sub grind_run {
957d8930 776 my ($tests, $order, $perls, $counts) = @_;
9e7973fa
DM
777
778 # Build a list of all the jobs to run
779
780 my @jobs;
781
957d8930 782 for my $test (grep $tests->{$_}, @$order) {
9e7973fa
DM
783
784 # Create two test progs: one with an empty loop and one with code.
785 # Note that the empty loop is actually '{1;}' rather than '{}';
786 # this causes the loop to have a single nextstate rather than a
787 # stub op, so more closely matches the active loop; e.g.:
788 # {1;} => nextstate; unstack
789 # {$x=1;} => nextstate; const; gvsv; sassign; unstack
790 my @prog = (
791 make_perl_prog($test, @{$tests->{$test}}{qw(desc setup)}, '1'),
792 make_perl_prog($test, @{$tests->{$test}}{qw(desc setup code)}),
793 );
794
795 for my $p (@$perls) {
60858fe8 796 my ($perl, $label, $env, @putargs) = @$p;
9e7973fa
DM
797
798 # Run both the empty loop and the active loop
799 # $counts->[0] and $counts->[1] times.
800
801 for my $i (0,1) {
802 for my $j (0,1) {
60858fe8
JC
803 my $envstr = '';
804 if (ref $env) {
805 $envstr .= "$_=$env->{$_} " for sort keys %$env;
806 }
807 my $cmd = "PERL_HASH_SEED=0 $envstr"
9e7973fa
DM
808 . "valgrind --tool=cachegrind --branch-sim=yes "
809 . "--cachegrind-out-file=/dev/null "
810 . "$OPTS{grindargs} "
a9eceb2d 811 . "$perl $OPTS{perlargs} @putargs - $counts->[$j] 2>&1";
9e7973fa 812 # for debugging and error messages
c385646f 813 my $id = "$test/$label "
9e7973fa
DM
814 . ($i ? "active" : "empty") . "/"
815 . ($j ? "long" : "short") . " loop";
816
817 push @jobs, {
818 test => $test,
819 perl => $perl,
820 plabel => $label,
821 cmd => $cmd,
822 prog => $prog[$i],
823 active => $i,
824 loopix => $j,
825 id => $id,
826 };
827 }
828 }
829 }
830 }
831
832 # Execute each cachegrind and store the results in %results.
833
834 local $SIG{PIPE} = 'IGNORE';
835
836 my $max_jobs = $OPTS{jobs};
837 my $running = 0; # count of executing jobs
838 my %pids; # map pids to jobs
839 my %fds; # map fds to jobs
840 my %results;
841 my $select = IO::Select->new();
842
843 while (@jobs or $running) {
844
845 if ($OPTS{debug}) {
846 printf "Main loop: pending=%d running=%d\n",
847 scalar(@jobs), $running;
848 }
849
850 # Start new jobs
851
852 while (@jobs && $running < $max_jobs) {
853 my $job = shift @jobs;
854 my ($id, $cmd) =@$job{qw(id cmd)};
855
856 my ($in, $out, $pid);
857 warn "Starting $id\n" if $OPTS{verbose};
858 eval { $pid = IPC::Open2::open2($out, $in, $cmd); 1; }
859 or die "Error: while starting cachegrind subprocess"
860 ." for $id:\n$@";
861 $running++;
862 $pids{$pid} = $job;
863 $fds{"$out"} = $job;
864 $job->{out_fd} = $out;
865 $job->{output} = '';
866 $job->{pid} = $pid;
867
868 $out->blocking(0);
869 $select->add($out);
870
871 if ($OPTS{debug}) {
872 print "Started pid $pid for $id\n";
873 }
874
875 # Note:
876 # In principle we should write to $in in the main select loop,
877 # since it may block. In reality,
878 # a) the code we write to the perl process's stdin is likely
879 # to be less than the OS's pipe buffer size;
880 # b) by the time the perl process has read in all its stdin,
881 # the only output it should have generated is a few lines
882 # of cachegrind output preamble.
883 # If these assumptions change, then perform the following print
884 # in the select loop instead.
885
886 print $in $job->{prog};
887 close $in;
888 }
889
890 # Get output of running jobs
891
892 if ($OPTS{debug}) {
893 printf "Select: waiting on (%s)\n",
894 join ', ', sort { $a <=> $b } map $fds{$_}{pid},
895 $select->handles;
896 }
897
898 my @ready = $select->can_read;
899
900 if ($OPTS{debug}) {
901 printf "Select: pids (%s) ready\n",
902 join ', ', sort { $a <=> $b } map $fds{$_}{pid}, @ready;
903 }
904
905 unless (@ready) {
906 die "Panic: select returned no file handles\n";
907 }
908
909 for my $fd (@ready) {
910 my $j = $fds{"$fd"};
911 my $r = sysread $fd, $j->{output}, 8192, length($j->{output});
912 unless (defined $r) {
913 die "Panic: Read from process running $j->{id} gave:\n$!";
914 }
915 next if $r;
916
917 # EOF
918
919 if ($OPTS{debug}) {
920 print "Got eof for pid $fds{$fd}{pid} ($j->{id})\n";
921 }
922
923 $select->remove($j->{out_fd});
924 close($j->{out_fd})
925 or die "Panic: closing output fh on $j->{id} gave:\n$!\n";
926 $running--;
927 delete $fds{"$j->{out_fd}"};
928 my $output = $j->{output};
929
930 if ($OPTS{debug}) {
931 my $p = $j->{prog};
932 $p =~ s/^/ : /mg;
933 my $o = $output;
934 $o =~ s/^/ : /mg;
935
936 print "\n$j->{id}/\nCommand: $j->{cmd}\n"
937 . "Input:\n$p"
938 . "Output\n$o";
939 }
940
c385646f 941 $results{$j->{test}}{$j->{plabel}}[$j->{active}][$j->{loopix}]
9e7973fa
DM
942 = parse_cachegrind($output, $j->{id}, $j->{perl});
943 }
944
945 # Reap finished jobs
946
947 while (1) {
948 my $kid = waitpid(-1, WNOHANG);
949 my $ret = $?;
950 last if $kid <= 0;
951
952 unless (exists $pids{$kid}) {
953 die "Panic: reaped unexpected child $kid";
954 }
955 my $j = $pids{$kid};
956 if ($ret) {
957 die sprintf("Error: $j->{id} gave return status 0x%04x\n", $ret)
958 . "with the following output\n:$j->{output}\n";
959 }
960 delete $pids{$kid};
961 }
962 }
963
964 return \%results;
965}
966
967
968
969
970# grind_process(): process the data that has been extracted from
971# cachgegrind's output.
972#
973# $res is of the form ->{benchmark_name}{perl_name}[active][count]{field_name},
974# where active is 0 or 1 indicating an empty or active loop,
975# count is 0 or 1 indicating a short or long loop. E.g.
976#
977# $res->{'expr::assign::scalar_lex'}{perl-5.21.1}[0][10]{Dw_mm}
978#
979# The $res data structure is modified in-place by this sub.
980#
981# $perls is [ [ perl-exe, perl-label], .... ].
982#
983# $counts is [ N, M ] indicating the counts for the short and long loops.
984#
985#
986# return \%output, \%averages, where
987#
988# $output{benchmark_name}{perl_name}{field_name} = N
989# $averages{perl_name}{field_name} = M
990#
991# where N is the raw count ($OPTS{raw}), or count_perl0/count_perlI otherwise;
992# M is the average raw count over all tests ($OPTS{raw}), or
993# 1/(sum(count_perlI/count_perl0)/num_tests) otherwise.
994
995sub grind_process {
996 my ($res, $perls, $counts) = @_;
997
998 # Process the four results for each test/perf combo:
999 # Convert
1000 # $res->{benchmark_name}{perl_name}[active][count]{field_name} = n
1001 # to
1002 # $res->{benchmark_name}{perl_name}{field_name} = averaged_n
1003 #
1004 # $r[0][1] - $r[0][0] is the time to do ($counts->[1]-$counts->[0])
1005 # empty loops, eliminating startup time
1006 # $r[1][1] - $r[1][0] is the time to do ($counts->[1]-$counts->[0])
1007 # active loops, eliminating startup time
1008 # (the two startup times may be different because different code
1009 # is being compiled); the difference of the two results above
1010 # divided by the count difference is the time to execute the
1011 # active code once, eliminating both startup and loop overhead.
1012
1013 for my $tests (values %$res) {
1014 for my $r (values %$tests) {
1015 my $r2;
1016 for (keys %{$r->[0][0]}) {
1017 my $n = ( ($r->[1][1]{$_} - $r->[1][0]{$_})
1018 - ($r->[0][1]{$_} - $r->[0][0]{$_})
1019 ) / ($counts->[1] - $counts->[0]);
1020 $r2->{$_} = $n;
1021 }
1022 $r = $r2;
1023 }
1024 }
1025
1026 my %totals;
1027 my %counts;
1028 my %data;
1029
1a961f9f 1030 my $perl_norm = $perls->[$OPTS{norm}][1]; # the label of the reference perl
9e7973fa
DM
1031
1032 for my $test_name (keys %$res) {
1033 my $res1 = $res->{$test_name};
1034 my $res2_norm = $res1->{$perl_norm};
1035 for my $perl (keys %$res1) {
1036 my $res2 = $res1->{$perl};
1037 for my $field (keys %$res2) {
1038 my ($p, $q) = ($res2_norm->{$field}, $res2->{$field});
1039
1040 if ($OPTS{raw}) {
1041 # Avoid annoying '-0.0' displays. Ideally this number
1042 # should never be negative, but fluctuations in
1043 # startup etc can theoretically make this happen
1044 $q = 0 if ($q <= 0 && $q > -0.1);
1045 $totals{$perl}{$field} += $q;
1046 $counts{$perl}{$field}++;
1047 $data{$test_name}{$perl}{$field} = $q;
1048 next;
1049 }
1050
1051 # $p and $q are notionally integer counts, but
1052 # due to variations in startup etc, it's possible for a
1053 # count which is supposedly zero to be calculated as a
1054 # small positive or negative value.
1055 # In this case, set it to zero. Further below we
1056 # special-case zeros to avoid division by zero errors etc.
1057
1058 $p = 0.0 if $p < 0.01;
1059 $q = 0.0 if $q < 0.01;
1060
1061 if ($p == 0.0 && $q == 0.0) {
1062 # Both perls gave a count of zero, so no change:
1063 # treat as 100%
1064 $totals{$perl}{$field} += 1;
1065 $counts{$perl}{$field}++;
1066 $data{$test_name}{$perl}{$field} = 1;
1067 }
1068 elsif ($p == 0.0 || $q == 0.0) {
1069 # If either count is zero, there were too few events
1070 # to give a meaningful ratio (and we will end up with
1071 # division by zero if we try). Mark the result undef,
1072 # indicating that it shouldn't be displayed; and skip
1073 # adding to the average
1074 $data{$test_name}{$perl}{$field} = undef;
1075 }
1076 else {
1077 # For averages, we record q/p rather than p/q.
1078 # Consider a test where perl_norm took 1000 cycles
1079 # and perlN took 800 cycles. For the individual
1080 # results we display p/q, or 1.25; i.e. a quarter
1081 # quicker. For the averages, we instead sum all
1082 # the 0.8's, which gives the total cycles required to
1083 # execute all tests, with all tests given equal
1084 # weight. Later we reciprocate the final result,
1085 # i.e. 1/(sum(qi/pi)/n)
1086
1087 $totals{$perl}{$field} += $q/$p;
1088 $counts{$perl}{$field}++;
1089 $data{$test_name}{$perl}{$field} = $p/$q;
1090 }
1091 }
1092 }
1093 }
1094
1095 # Calculate averages based on %totals and %counts accumulated earlier.
1096
1097 my %averages;
1098 for my $perl (keys %totals) {
1099 my $t = $totals{$perl};
1100 for my $field (keys %$t) {
1101 $averages{$perl}{$field} = $OPTS{raw}
1102 ? $t->{$field} / $counts{$perl}{$field}
1103 # reciprocal - see comments above
1104 : $counts{$perl}{$field} / $t->{$field};
1105 }
1106 }
1107
1108 return \%data, \%averages;
1109}
1110
1111
9e7973fa 1112
df3d7b3a 1113# print a standard blurb at the start of the grind display
9e7973fa 1114
df3d7b3a
DM
1115sub grind_blurb {
1116 my ($perls) = @_;
9e7973fa
DM
1117
1118 print <<EOF;
1119Key:
1120 Ir Instruction read
1121 Dr Data read
1122 Dw Data write
1123 COND conditional branches
1124 IND indirect branches
1125 _m branch predict miss
1126 _m1 level 1 cache miss
1127 _mm last cache (e.g. L3) miss
1128 - indeterminate percentage (e.g. 1/0)
1129
1130EOF
1131
1132 if ($OPTS{raw}) {
1133 print "The numbers represent raw counts per loop iteration.\n";
1134 }
1135 else {
1136 print <<EOF;
1137The numbers represent relative counts per loop iteration, compared to
df3d7b3a 1138$perls->[$OPTS{norm}][1] at 100.0%.
9e7973fa
DM
1139Higher is better: for example, using half as many instructions gives 200%,
1140while using twice as many gives 50%.
1141EOF
1142 }
df3d7b3a
DM
1143}
1144
1145
1146# return a sorted list of the test names, plus 'AVERAGE'
9e7973fa 1147
df3d7b3a
DM
1148sub sorted_test_names {
1149 my ($results, $order, $perls) = @_;
9e7973fa 1150
df3d7b3a 1151 my @names;
9e7973fa
DM
1152 unless ($OPTS{average}) {
1153 if (defined $OPTS{'sort-field'}) {
1154 my ($field, $perlix) = @OPTS{'sort-field', 'sort-perl'};
beb8db25 1155 my $perl = $perls->[$perlix][1];
df3d7b3a 1156 @names = sort
9e7973fa
DM
1157 {
1158 $results->{$a}{$perl}{$field}
1159 <=> $results->{$b}{$perl}{$field}
1160 }
1161 keys %$results;
1162 }
1163 else {
df3d7b3a 1164 @names = grep $results->{$_}, @$order;
9e7973fa
DM
1165 }
1166 }
1167
1168 # No point in displaying average for only one test.
df3d7b3a
DM
1169 push @names, 'AVERAGE' unless @names == 1;
1170 @names;
1171}
1172
1173
1174# grind_print(): display the tabulated results of all the cachegrinds.
1175#
1176# Arguments are of the form:
1177# $results->{benchmark_name}{perl_name}{field_name} = N
1178# $averages->{perl_name}{field_name} = M
1179# $perls = [ [ perl-exe, perl-label ], ... ]
1180# $tests->{test_name}{desc => ..., ...}
1181
1182sub grind_print {
1183 my ($results, $averages, $perls, $tests, $order) = @_;
1184
1185 my @perl_names = map $_->[0], @$perls;
1a961f9f 1186 my @perl_labels = map $_->[1], @$perls;
df3d7b3a
DM
1187 my %perl_labels;
1188 $perl_labels{$_->[0]} = $_->[1] for @$perls;
1189
1190 my $field_label_width = 6;
1191 # Calculate the width to display for each column.
1192 my $min_width = $OPTS{raw} ? 8 : 6;
1193 my @widths = map { length($_) < $min_width ? $min_width : length($_) }
1a961f9f 1194 @perl_labels;
df3d7b3a
DM
1195
1196 # Print standard header.
1197 grind_blurb($perls);
1198
1199 my @test_names = sorted_test_names($results, $order, $perls);
9e7973fa
DM
1200
1201 # If only a single field is to be displayed, use a more compact
1202 # format with only a single line of output per test.
1203
1204 my $one_field = defined $OPTS{fields} && keys(%{$OPTS{fields}}) == 1;
1205
1206 if ($one_field) {
91cde97c 1207 print "Results for field " . (keys(%{$OPTS{fields}}))[0] . ".\n";
9e7973fa
DM
1208
1209 # The first column will now contain test names rather than
1210 # field names; Calculate the max width.
1211
1212 $field_label_width = 0;
1213 for (@test_names) {
1214 $field_label_width = length if length > $field_label_width;
1215 }
1216
1217 # Print the perl executables header.
1218
1219 print "\n";
1220 for my $i (0,1) {
1221 print " " x $field_label_width;
1222 for (0..$#widths) {
1223 printf " %*s", $widths[$_],
1a961f9f 1224 $i ? ('-' x$widths[$_]) : $perl_labels[$_];
9e7973fa
DM
1225 }
1226 print "\n";
1227 }
1228 }
1229
1230 # Dump the results for each test.
1231
1232 for my $test_name (@test_names) {
1233 my $doing_ave = ($test_name eq 'AVERAGE');
1234 my $res1 = $doing_ave ? $averages : $results->{$test_name};
1235
1236 unless ($one_field) {
1237 print "\n$test_name";
1238 print "\n$tests->{$test_name}{desc}" unless $doing_ave;
1239 print "\n\n";
1240
1241 # Print the perl executables header.
1242 for my $i (0,1) {
1243 print " " x $field_label_width;
1244 for (0..$#widths) {
1245 printf " %*s", $widths[$_],
1a961f9f 1246 $i ? ('-' x$widths[$_]) : $perl_labels[$_];
9e7973fa
DM
1247 }
1248 print "\n";
1249 }
1250 }
1251
1252 for my $field (qw(Ir Dr Dw COND IND
1253 N
1254 COND_m IND_m
1255 N
1256 Ir_m1 Dr_m1 Dw_m1
1257 N
1258 Ir_mm Dr_mm Dw_mm
1259 ))
1260 {
1261 next if $OPTS{fields} and ! exists $OPTS{fields}{$field};
1262
1263 if ($field eq 'N') {
1264 print "\n";
1265 next;
1266 }
1267
91cde97c
DM
1268 if ($one_field) {
1269 printf "%-*s", $field_label_width, $test_name;
1270 }
1271 else {
1272 printf "%*s", $field_label_width, $field;
1273 }
9e7973fa
DM
1274
1275 for my $i (0..$#widths) {
1a961f9f 1276 my $res2 = $res1->{$perl_labels[$i]};
9e7973fa
DM
1277 my $p = $res2->{$field};
1278 if (!defined $p) {
1279 printf " %*s", $widths[$i], '-';
1280 }
1281 elsif ($OPTS{raw}) {
1282 printf " %*.1f", $widths[$i], $p;
1283 }
1284 else {
1285 printf " %*.2f", $widths[$i], $p * 100;
1286 }
1287 }
1288 print "\n";
1289 }
1290 }
1291}
1292
1293
df3d7b3a
DM
1294
1295# grind_print_compact(): like grind_print(), but display a single perl
1296# in a compact form. Has an additional arg, $which_perl, which specifies
1297# which perl to display.
1298#
1299# Arguments are of the form:
1300# $results->{benchmark_name}{perl_name}{field_name} = N
1301# $averages->{perl_name}{field_name} = M
1302# $perls = [ [ perl-exe, perl-label ], ... ]
1303# $tests->{test_name}{desc => ..., ...}
1304
1305sub grind_print_compact {
1306 my ($results, $averages, $which_perl, $perls, $tests, $order) = @_;
1307
1308
1309 # the width to display for each column.
1310 my $width = $OPTS{raw} ? 7 : 6;
1311
1312 # Print standard header.
1313 grind_blurb($perls);
1314
1315 print "\nResults for $perls->[$which_perl][1]\n\n";
1316
1317 my @test_names = sorted_test_names($results, $order, $perls);
1318
1319 # Dump the results for each test.
1320
1321 my @fields = qw( Ir Dr Dw
1322 COND IND
1323 COND_m IND_m
1324 Ir_m1 Dr_m1 Dw_m1
1325 Ir_mm Dr_mm Dw_mm
1326 );
1327 if ($OPTS{fields}) {
1328 @fields = grep exists $OPTS{fields}{$_}, @fields;
1329 }
1330
1331 printf " %*s", $width, $_ for @fields;
1332 print "\n";
1333 printf " %*s", $width, '------' for @fields;
1334 print "\n";
1335
1336 for my $test_name (@test_names) {
1337 my $doing_ave = ($test_name eq 'AVERAGE');
1338 my $res = $doing_ave ? $averages : $results->{$test_name};
beb8db25 1339 $res = $res->{$perls->[$which_perl][1]};
df3d7b3a
DM
1340
1341 for my $field (@fields) {
1342 my $p = $res->{$field};
1343 if (!defined $p) {
1344 printf " %*s", $width, '-';
1345 }
1346 elsif ($OPTS{raw}) {
1347 printf " %*.1f", $width, $p;
1348 }
1349 else {
1350 printf " %*.2f", $width, $p * 100;
1351 }
1352
1353 }
1354
1355 print " $test_name\n";
1356 }
1357}
1358
1359
9e7973fa
DM
1360# do_selftest(): check that we can parse known cachegrind()
1361# output formats. If the output of cachegrind changes, add a *new*
1362# test here; keep the old tests to make sure we continue to parse
1363# old cachegrinds
1364
1365sub do_selftest {
1366
1367 my @tests = (
1368 'standard',
1369 <<'EOF',
1370==32350== Cachegrind, a cache and branch-prediction profiler
1371==32350== Copyright (C) 2002-2013, and GNU GPL'd, by Nicholas Nethercote et al.
1372==32350== Using Valgrind-3.9.0 and LibVEX; rerun with -h for copyright info
1373==32350== Command: perl5211o /tmp/uiS2gjdqe5 1
1374==32350==
1375--32350-- warning: L3 cache found, using its data for the LL simulation.
1376==32350==
1377==32350== I refs: 1,124,055
1378==32350== I1 misses: 5,573
1379==32350== LLi misses: 3,338
1380==32350== I1 miss rate: 0.49%
1381==32350== LLi miss rate: 0.29%
1382==32350==
1383==32350== D refs: 404,275 (259,191 rd + 145,084 wr)
1384==32350== D1 misses: 9,608 ( 6,098 rd + 3,510 wr)
1385==32350== LLd misses: 5,794 ( 2,781 rd + 3,013 wr)
1386==32350== D1 miss rate: 2.3% ( 2.3% + 2.4% )
1387==32350== LLd miss rate: 1.4% ( 1.0% + 2.0% )
1388==32350==
1389==32350== LL refs: 15,181 ( 11,671 rd + 3,510 wr)
1390==32350== LL misses: 9,132 ( 6,119 rd + 3,013 wr)
1391==32350== LL miss rate: 0.5% ( 0.4% + 2.0% )
1392==32350==
1393==32350== Branches: 202,372 (197,050 cond + 5,322 ind)
1394==32350== Mispredicts: 19,153 ( 17,742 cond + 1,411 ind)
1395==32350== Mispred rate: 9.4% ( 9.0% + 26.5% )
1396EOF
1397 {
1398 COND => 197050,
1399 COND_m => 17742,
1400 Dr => 259191,
1401 Dr_m1 => 6098,
1402 Dr_mm => 2781,
1403 Dw => 145084,
1404 Dw_m1 => 3510,
1405 Dw_mm => 3013,
1406 IND => 5322,
1407 IND_m => 1411,
1408 Ir => 1124055,
1409 Ir_m1 => 5573,
1410 Ir_mm => 3338,
1411 },
1412 );
1413
1414 for ('t', '.') {
1415 last if require "$_/test.pl";
1416 }
1417 plan(@tests / 3 * keys %VALID_FIELDS);
1418
1419 while (@tests) {
1420 my $desc = shift @tests;
1421 my $output = shift @tests;
1422 my $expected = shift @tests;
1423 my $p = parse_cachegrind($output);
1424 for (sort keys %VALID_FIELDS) {
1425 is($p->{$_}, $expected->{$_}, "$desc, $_");
1426 }
1427 }
1428}