sub O_ACCMODE () { O_RDONLY | O_RDWR | O_WRONLY }
-$VERSION = "1.00";
+$VERSION = "1.01";
my $DEFAULT_MEMORY_SIZE = 1<<21; # 2 megabytes
my $DEFAULT_AUTODEFER_THRESHHOLD = 3; # 3 records
my $DEFAULT_AUTODEFER_FILELEN_THRESHHOLD = 65536; # 16 disk blocksful
if ($pos < 0) {
$pos += $oldsize;
if ($pos < 0) {
- croak "Modification of non-creatable array value attempted, subscript $oldpos";
+ croak "Modification of non-creatable array value attempted, " .
+ "subscript $oldpos";
}
}
} elsif ($dpos == $spos) {
return;
}
-
+
while (! defined ($len) || $len > 0) {
my $readsize = ! defined($len) ? $blocksize
: $len > $blocksize ? $blocksize
my $fh = $self->{fh};
local *OFF = $self->{offsets};
-
+
$self->_seek(-1); # tricky -- see comment at _seek
# Tels says that inlining read_record() would make this loop
my $fh = $self->{fh};
$op = LOCK_EX unless defined $op;
my $locked = flock $fh, $op;
-
+
if ($locked && ($op & (LOCK_EX | LOCK_SH))) {
# If you're locking the file, then presumably it's because
# there might have been a write access by another process.
# If it's still undefined, there is no such record, so return 'undef'
return unless defined $o;
}
-
+
$self->{offsets}[$n];
}
}
if (! defined $offset && $self->{eof}) {
$good = 0;
- _ci_warn("The offset table was marked complete, but it is missing element $.");
+ _ci_warn("The offset table was marked complete, but it is missing " .
+ "element $.");
}
}
if (@{$self->{offsets}} > $.+1) {
# Total size of deferbuffer should not exceed the specified limit
if ($deferred_s > $self->{dw_size}) {
- _ci_warn("buffer size is $self->{deferred_s} which exceeds the limit of $self->{dw_size}");
+ _ci_warn("buffer size is $self->{deferred_s} which exceeds the limit " .
+ "of $self->{dw_size}");
$good = 0;
}
# Total size of cached data should not exceed the specified limit
if ($deferred_s + $cached > $self->{memory}) {
my $total = $deferred_s + $cached;
- _ci_warn("total stored data size is $total which exceeds the limit of $self->{memory}");
+ _ci_warn("total stored data size is $total which exceeds the limit " .
+ "of $self->{memory}");
$good = 0;
}
=item *
There is a large memory overhead for each record offset and for each
-cache entry: about 310 bytes per cached data record, and about 21 bytes per offset table entry.
+cache entry: about 310 bytes per cached data record, and about 21 bytes
+per offset table entry.
The per-record overhead will limit the maximum number of records you
can access per file. Note that I<accessing> the length of the array
dist/storable/storable.pm Verbatim line length including indents exceeds 79 by 4
dist/thread-queue/lib/thread/queue.pm Verbatim line length including indents exceeds 79 by 4
dist/threads/lib/threads.pm Verbatim line length including indents exceeds 79 by 3
-dist/tie-file/lib/tie/file.pm Verbatim line length including indents exceeds 79 by 3
ext/devel-peek/peek.pm ? Should you be using L<...> instead of 2
ext/dynaloader/dynaloader.pm Verbatim line length including indents exceeds 79 by 1
ext/file-find/lib/file/find.pm Verbatim line length including indents exceeds 79 by 1