This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
Change #3053 missed perldelta.
[perl5.git] / malloc.c
CommitLineData
a0d0e21e 1/* malloc.c
8d063cd8 2 *
8d063cd8
LW
3 */
4
87c6202a 5/*
741df71a
IZ
6 Here are some notes on configuring Perl's malloc. (For non-perl
7 usage see below.)
87c6202a
IZ
8
9 There are two macros which serve as bulk disablers of advanced
10 features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by
11 default). Look in the list of default values below to understand
12 their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the
13 state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC
14 returns it to the state as of Perl 5.000.
15
16 Note that some of the settings below may be ignored in the code based
17 on values of other macros. The PERL_CORE symbol is only defined when
18 perl itself is being compiled (so malloc can make some assumptions
19 about perl's facilities being available to it).
20
21 Each config option has a short description, followed by its name,
22 default value, and a comment about the default (if applicable). Some
23 options take a precise value, while the others are just boolean.
24 The boolean ones are listed first.
25
26 # Enable code for an emergency memory pool in $^M. See perlvar.pod
27 # for a description of $^M.
28 PERL_EMERGENCY_SBRK (!PLAIN_MALLOC && PERL_CORE)
29
30 # Enable code for printing memory statistics.
31 DEBUGGING_MSTATS (!PLAIN_MALLOC && PERL_CORE)
32
33 # Move allocation info for small buckets into separate areas.
34 # Memory optimization (especially for small allocations, of the
35 # less than 64 bytes). Since perl usually makes a large number
36 # of small allocations, this is usually a win.
37 PACK_MALLOC (!PLAIN_MALLOC && !RCHECK)
38
39 # Add one page to big powers of two when calculating bucket size.
40 # This is targeted at big allocations, as are common in image
41 # processing.
42 TWO_POT_OPTIMIZE !PLAIN_MALLOC
43
44 # Use intermediate bucket sizes between powers-of-two. This is
45 # generally a memory optimization, and a (small) speed pessimization.
46 BUCKETS_ROOT2 !NO_FANCY_MALLOC
47
48 # Do not check small deallocations for bad free(). Memory
49 # and speed optimization, error reporting pessimization.
50 IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK)
51
52 # Use table lookup to decide in which bucket a given allocation will go.
53 SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC
54
38ac2dc8
DD
55 # Use a perl-defined sbrk() instead of the (presumably broken or
56 # missing) system-supplied sbrk().
57 USE_PERL_SBRK undef
58
59 # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally
60 # only used with broken sbrk()s.
87c6202a
IZ
61 PERL_SBRK_VIA_MALLOC undef
62
38ac2dc8
DD
63 # Which allocator to use if PERL_SBRK_VIA_MALLOC
64 SYSTEM_ALLOC(a) malloc(a)
65
9ee81ef6 66 # Minimal alignment (in bytes, should be a power of 2) of SYSTEM_ALLOC
5bbd1ef5
IZ
67 SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES
68
87c6202a
IZ
69 # Disable memory overwrite checking with DEBUGGING. Memory and speed
70 # optimization, error reporting pessimization.
71 NO_RCHECK undef
72
73 # Enable memory overwrite checking with DEBUGGING. Memory and speed
74 # pessimization, error reporting optimization
75 RCHECK (DEBUGGING && !NO_RCHECK)
76
77 # Failed allocations bigger than this size croak (if
78 # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See
79 # perlvar.pod for a description of $^M.
80 BIG_SIZE (1<<16) # 64K
81
82 # Starting from this power of two, add an extra page to the
83 # size of the bucket. This enables optimized allocations of sizes
84 # close to powers of 2. Note that the value is indexed at 0.
85 FIRST_BIG_POW2 15 # 32K, 16K is used too often
86
87 # Estimate of minimal memory footprint. malloc uses this value to
88 # request the most reasonable largest blocks of memory from the system.
89 FIRST_SBRK (48*1024)
90
91 # Round up sbrk()s to multiples of this.
92 MIN_SBRK 2048
93
94 # Round up sbrk()s to multiples of this percent of footprint.
95 MIN_SBRK_FRAC 3
96
97 # Add this much memory to big powers of two to get the bucket size.
98 PERL_PAGESIZE 4096
99
100 # This many sbrk() discontinuities should be tolerated even
101 # from the start without deciding that sbrk() is usually
102 # discontinuous.
103 SBRK_ALLOW_FAILURES 3
104
105 # This many continuous sbrk()s compensate for one discontinuous one.
106 SBRK_FAILURE_PRICE 50
107
28ac10b1
IZ
108 # Some configurations may ask for 12-byte-or-so allocations which
109 # require 8-byte alignment (?!). In such situation one needs to
110 # define this to disable 12-byte bucket (will increase memory footprint)
111 STRICT_ALIGNMENT undef
112
87c6202a
IZ
113 This implementation assumes that calling PerlIO_printf() does not
114 result in any memory allocation calls (used during a panic).
115
116 */
117
741df71a
IZ
118/*
119 If used outside of Perl environment, it may be useful to redefine
120 the following macros (listed below with defaults):
121
122 # Type of address returned by allocation functions
123 Malloc_t void *
124
125 # Type of size argument for allocation functions
126 MEM_SIZE unsigned long
127
128 # Maximal value in LONG
129 LONG_MAX 0x7FFFFFFF
130
131 # Unsigned integer type big enough to keep a pointer
132 UV unsigned long
133
134 # Type of pointer with 1-byte granularity
135 caddr_t char *
136
137 # Type returned by free()
138 Free_t void
139
5bbd1ef5
IZ
140 # Very fatal condition reporting function (cannot call any )
141 fatalcroak(arg) write(2,arg,strlen(arg)) + exit(2)
142
741df71a
IZ
143 # Fatal error reporting function
144 croak(format, arg) warn(idem) + exit(1)
145
146 # Error reporting function
147 warn(format, arg) fprintf(stderr, idem)
148
149 # Locking/unlocking for MT operation
64f996d1
GS
150 MALLOC_LOCK MUTEX_LOCK(&PL_malloc_mutex)
151 MALLOC_UNLOCK MUTEX_UNLOCK(&PL_malloc_mutex)
741df71a
IZ
152
153 # Locking/unlocking mutex for MT operation
154 MUTEX_LOCK(l) void
155 MUTEX_UNLOCK(l) void
156 */
157
e8bc2b5c
GS
158#ifndef NO_FANCY_MALLOC
159# ifndef SMALL_BUCKET_VIA_TABLE
160# define SMALL_BUCKET_VIA_TABLE
161# endif
162# ifndef BUCKETS_ROOT2
163# define BUCKETS_ROOT2
164# endif
165# ifndef IGNORE_SMALL_BAD_FREE
166# define IGNORE_SMALL_BAD_FREE
167# endif
3562ef9b
IZ
168#endif
169
e8bc2b5c
GS
170#ifndef PLAIN_MALLOC /* Bulk enable features */
171# ifndef PACK_MALLOC
172# define PACK_MALLOC
173# endif
174# ifndef TWO_POT_OPTIMIZE
175# define TWO_POT_OPTIMIZE
176# endif
d720c441
IZ
177# if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK)
178# define PERL_EMERGENCY_SBRK
e8bc2b5c
GS
179# endif
180# if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
181# define DEBUGGING_MSTATS
182# endif
183#endif
184
185#define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
186#define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
187
61ae2fbf 188#if !(defined(I286) || defined(atarist) || defined(__MINT__))
e8bc2b5c
GS
189 /* take 2k unless the block is bigger than that */
190# define LOG_OF_MIN_ARENA 11
191#else
192 /* take 16k unless the block is bigger than that
193 (80286s like large segments!), probably good on the atari too */
194# define LOG_OF_MIN_ARENA 14
195#endif
196
8d063cd8 197#ifndef lint
1944739a
IZ
198# if defined(DEBUGGING) && !defined(NO_RCHECK)
199# define RCHECK
200# endif
e8bc2b5c
GS
201# if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
202# undef IGNORE_SMALL_BAD_FREE
203# endif
8d063cd8
LW
204/*
205 * malloc.c (Caltech) 2/21/82
206 * Chris Kingsley, kingsley@cit-20.
207 *
208 * This is a very fast storage allocator. It allocates blocks of a small
209 * number of different sizes, and keeps free lists of each size. Blocks that
210 * don't exactly fit are passed up to the next larger size. In this
211 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
cf5c4ad8 212 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
8d063cd8 213 * This is designed for use in a program that uses vast quantities of memory,
741df71a
IZ
214 * but bombs when it runs out.
215 *
216 * Modifications Copyright Ilya Zakharevich 1996-98.
217 *
218 * Still very quick, but much more thrifty. (Std config is 10% slower
219 * than it was, and takes 67% of old heap size for typical usage.)
220 *
221 * Allocations of small blocks are now table-driven to many different
222 * buckets. Sizes of really big buckets are increased to accomodata
223 * common size=power-of-2 blocks. Running-out-of-memory is made into
224 * an exception. Deeply configurable and thread-safe.
225 *
8d063cd8
LW
226 */
227
d720c441
IZ
228#ifdef PERL_CORE
229# include "EXTERN.h"
230# include "perl.h"
231#else
232# ifdef PERL_FOR_X2P
233# include "../EXTERN.h"
234# include "../perl.h"
235# else
236# include <stdlib.h>
237# include <stdio.h>
238# include <memory.h>
239# define _(arg) arg
240# ifndef Malloc_t
241# define Malloc_t void *
242# endif
243# ifndef MEM_SIZE
244# define MEM_SIZE unsigned long
245# endif
246# ifndef LONG_MAX
247# define LONG_MAX 0x7FFFFFFF
248# endif
249# ifndef UV
250# define UV unsigned long
251# endif
252# ifndef caddr_t
253# define caddr_t char *
254# endif
255# ifndef Free_t
256# define Free_t void
257# endif
258# define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t))
259# define PerlEnv_getenv getenv
260# define PerlIO_printf fprintf
261# define PerlIO_stderr() stderr
262# endif
e8bc2b5c 263# ifndef croak /* make depend */
741df71a 264# define croak(mess, arg) (warn((mess), (arg)), exit(1))
d720c441
IZ
265# endif
266# ifndef warn
741df71a 267# define warn(mess, arg) fprintf(stderr, (mess), (arg))
e8bc2b5c
GS
268# endif
269# ifdef DEBUG_m
270# undef DEBUG_m
271# endif
272# define DEBUG_m(a)
273# ifdef DEBUGGING
274# undef DEBUGGING
275# endif
276#endif
277
278#ifndef MUTEX_LOCK
279# define MUTEX_LOCK(l)
280#endif
281
282#ifndef MUTEX_UNLOCK
283# define MUTEX_UNLOCK(l)
284#endif
285
741df71a 286#ifndef MALLOC_LOCK
64f996d1 287# define MALLOC_LOCK MUTEX_LOCK(&PL_malloc_mutex)
741df71a
IZ
288#endif
289
290#ifndef MALLOC_UNLOCK
64f996d1 291# define MALLOC_UNLOCK MUTEX_UNLOCK(&PL_malloc_mutex)
741df71a
IZ
292#endif
293
5bbd1ef5
IZ
294# ifndef fatalcroak /* make depend */
295# define fatalcroak(mess) (write(2, (mess), strlen(mess)), exit(2))
296# endif
297
760ac839 298#ifdef DEBUGGING
e8bc2b5c 299# undef DEBUG_m
4a33f861 300# define DEBUG_m(a) if (PL_debug & 128) a
760ac839
LW
301#endif
302
e9397286
GS
303/*
304 * Layout of memory:
305 * ~~~~~~~~~~~~~~~~
306 * The memory is broken into "blocks" which occupy multiples of 2K (and
307 * generally speaking, have size "close" to a power of 2). The addresses
308 * of such *unused* blocks are kept in nextf[i] with big enough i. (nextf
309 * is an array of linked lists.) (Addresses of used blocks are not known.)
310 *
311 * Moreover, since the algorithm may try to "bite" smaller blocks of out
312 * of unused bigger ones, there are also regions of "irregular" size,
313 * managed separately, by a linked list chunk_chain.
314 *
315 * The third type of storage is the sbrk()ed-but-not-yet-used space, its
316 * end and size are kept in last_sbrk_top and sbrked_remains.
317 *
318 * Growing blocks "in place":
319 * ~~~~~~~~~~~~~~~~~~~~~~~~~
320 * The address of the block with the greatest address is kept in last_op
321 * (if not known, last_op is 0). If it is known that the memory above
322 * last_op is not continuous, or contains a chunk from chunk_chain,
323 * last_op is set to 0.
324 *
325 * The chunk with address last_op may be grown by expanding into
326 * sbrk()ed-but-not-yet-used space, or trying to sbrk() more continuous
327 * memory.
328 *
329 * Management of last_op:
330 * ~~~~~~~~~~~~~~~~~~~~~
331 *
332 * free() never changes the boundaries of blocks, so is not relevant.
333 *
334 * The only way realloc() may change the boundaries of blocks is if it
335 * grows a block "in place". However, in the case of success such a
336 * chunk is automatically last_op, and it remains last_op. In the case
337 * of failure getpages_adjacent() clears last_op.
338 *
339 * malloc() may change blocks by calling morecore() only.
340 *
341 * morecore() may create new blocks by:
342 * a) biting pieces from chunk_chain (cannot create one above last_op);
343 * b) biting a piece from an unused block (if block was last_op, this
344 * may create a chunk from chain above last_op, thus last_op is
345 * invalidated in such a case).
346 * c) biting of sbrk()ed-but-not-yet-used space. This creates
347 * a block which is last_op.
348 * d) Allocating new pages by calling getpages();
349 *
350 * getpages() creates a new block. It marks last_op at the bottom of
351 * the chunk of memory it returns.
352 *
353 * Active pages footprint:
354 * ~~~~~~~~~~~~~~~~~~~~~~
355 * Note that we do not need to traverse the lists in nextf[i], just take
356 * the first element of this list. However, we *need* to traverse the
357 * list in chunk_chain, but most the time it should be a very short one,
358 * so we do not step on a lot of pages we are not going to use.
359 *
360 * Flaws:
361 * ~~~~~
362 * get_from_bigger_buckets(): forget to increment price => Quite
363 * aggressive.
364 */
365
135863df
AB
366/* I don't much care whether these are defined in sys/types.h--LAW */
367
368#define u_char unsigned char
369#define u_int unsigned int
e8bc2b5c
GS
370
371#ifdef HAS_QUAD
372# define u_bigint UV /* Needs to eat *void. */
373#else /* needed? */
374# define u_bigint unsigned long /* Needs to eat *void. */
375#endif
376
135863df 377#define u_short unsigned short
8d063cd8 378
cf5c4ad8 379/* 286 and atarist like big chunks, which gives too much overhead. */
61ae2fbf 380#if (defined(RCHECK) || defined(I286) || defined(atarist) || defined(__MINT__)) && defined(PACK_MALLOC)
e8bc2b5c 381# undef PACK_MALLOC
cf5c4ad8 382#endif
383
8d063cd8 384/*
cf5c4ad8 385 * The description below is applicable if PACK_MALLOC is not defined.
386 *
8d063cd8
LW
387 * The overhead on a block is at least 4 bytes. When free, this space
388 * contains a pointer to the next free block, and the bottom two bits must
389 * be zero. When in use, the first byte is set to MAGIC, and the second
390 * byte is the size index. The remaining bytes are for alignment.
391 * If range checking is enabled and the size of the block fits
392 * in two bytes, then the top two bytes hold the size of the requested block
393 * plus the range checking words, and the header word MINUS ONE.
394 */
395union overhead {
396 union overhead *ov_next; /* when free */
85e6fe83 397#if MEM_ALIGNBYTES > 4
c623bd54 398 double strut; /* alignment problems */
a687059c 399#endif
8d063cd8
LW
400 struct {
401 u_char ovu_magic; /* magic number */
402 u_char ovu_index; /* bucket # */
403#ifdef RCHECK
404 u_short ovu_size; /* actual block size */
405 u_int ovu_rmagic; /* range magic number */
406#endif
407 } ovu;
408#define ov_magic ovu.ovu_magic
409#define ov_index ovu.ovu_index
410#define ov_size ovu.ovu_size
411#define ov_rmagic ovu.ovu_rmagic
412};
413
760ac839 414#ifdef DEBUGGING
d720c441 415static void botch _((char *diag, char *s));
a0d0e21e
LW
416#endif
417static void morecore _((int bucket));
418static int findbucket _((union overhead *freep, int srchlen));
28ac10b1 419static void add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip);
a0d0e21e 420
8d063cd8
LW
421#define MAGIC 0xff /* magic # on accounting info */
422#define RMAGIC 0x55555555 /* magic # on range info */
e8bc2b5c
GS
423#define RMAGIC_C 0x55 /* magic # on range info */
424
8d063cd8 425#ifdef RCHECK
c2a5c2d2
IZ
426# define RSLOP sizeof (u_int)
427# ifdef TWO_POT_OPTIMIZE
e8bc2b5c 428# define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
c2a5c2d2 429# else
e8bc2b5c 430# define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
c2a5c2d2 431# endif
8d063cd8 432#else
c2a5c2d2 433# define RSLOP 0
8d063cd8
LW
434#endif
435
e8bc2b5c
GS
436#if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
437# undef BUCKETS_ROOT2
438#endif
439
440#ifdef BUCKETS_ROOT2
441# define BUCKET_TABLE_SHIFT 2
442# define BUCKET_POW2_SHIFT 1
443# define BUCKETS_PER_POW2 2
444#else
445# define BUCKET_TABLE_SHIFT MIN_BUC_POW2
446# define BUCKET_POW2_SHIFT 0
447# define BUCKETS_PER_POW2 1
448#endif
449
274c7500
IZ
450#if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT))
451/* Figure out the alignment of void*. */
452struct aligner {
453 char c;
454 void *p;
455};
456# define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p)))
457#else
458# define ALIGN_SMALL MEM_ALIGNBYTES
459#endif
460
461#define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no))
462
e8bc2b5c
GS
463#ifdef BUCKETS_ROOT2
464# define MAX_BUCKET_BY_TABLE 13
465static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
466 {
467 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
468 };
469# define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
470# define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
471 ? buck_size[i] \
472 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
473 - MEM_OVERHEAD(i) \
474 + POW2_OPTIMIZE_SURPLUS(i)))
475#else
476# define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
477# define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
478#endif
479
480
cf5c4ad8 481#ifdef PACK_MALLOC
e8bc2b5c
GS
482/* In this case it is assumed that if we do sbrk() in 2K units, we
483 * will get 2K aligned arenas (at least after some initial
484 * alignment). The bucket number of the given subblock is on the start
485 * of 2K arena which contains the subblock. Several following bytes
486 * contain the magic numbers for the subblocks in the block.
cf5c4ad8 487 *
488 * Sizes of chunks are powers of 2 for chunks in buckets <=
489 * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to
490 * get alignment right).
491 *
e8bc2b5c
GS
492 * Consider an arena for 2^n with n>MAX_PACKED. We suppose that
493 * starts of all the chunks in a 2K arena are in different
494 * 2^n-byte-long chunks. If the top of the last chunk is aligned on a
495 * boundary of 2K block, this means that sizeof(union
496 * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K <
497 * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n -
498 * overhead is used. Since this rules out n = 7 for 8 byte alignment,
499 * we specialcase allocation of the first of 16 128-byte-long chunks.
cf5c4ad8 500 *
501 * Note that with the above assumption we automatically have enough
502 * place for MAGIC at the start of 2K block. Note also that we
e8bc2b5c
GS
503 * overlay union overhead over the chunk, thus the start of small chunks
504 * is immediately overwritten after freeing. */
505# define MAX_PACKED_POW2 6
506# define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
507# define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
508# define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
509# define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK)
510# define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK)
cf5c4ad8 511# define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block)))
512# define OV_INDEX(block) (*OV_INDEXp(block))
513# define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
e8bc2b5c
GS
514 (TWOK_SHIFT(block)>> \
515 (bucket>>BUCKET_POW2_SHIFT)) + \
516 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
517 /* A bucket can have a shift smaller than it size, we need to
518 shift its magic number so it will not overwrite index: */
519# ifdef BUCKETS_ROOT2
520# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
521# else
522# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
523# endif
cf5c4ad8 524# define CHUNK_SHIFT 0
525
e8bc2b5c
GS
526/* Number of active buckets of given ordinal. */
527#ifdef IGNORE_SMALL_BAD_FREE
528#define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
529# define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
530 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
531 : n_blks[bucket] )
532#else
533# define N_BLKS(bucket) n_blks[bucket]
534#endif
535
536static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
537 {
538# if BUCKETS_PER_POW2==1
539 0, 0,
540 (MIN_BUC_POW2==2 ? 384 : 0),
541 224, 120, 62, 31, 16, 8, 4, 2
542# else
543 0, 0, 0, 0,
544 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
545 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
546# endif
547 };
548
549/* Shift of the first bucket with the given ordinal inside 2K chunk. */
550#ifdef IGNORE_SMALL_BAD_FREE
551# define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
552 ? ((1<<LOG_OF_MIN_ARENA) \
553 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
554 : blk_shift[bucket])
555#else
556# define BLK_SHIFT(bucket) blk_shift[bucket]
557#endif
558
559static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
560 {
561# if BUCKETS_PER_POW2==1
562 0, 0,
563 (MIN_BUC_POW2==2 ? 512 : 0),
564 256, 128, 64, 64, /* 8 to 64 */
565 16*sizeof(union overhead),
566 8*sizeof(union overhead),
567 4*sizeof(union overhead),
568 2*sizeof(union overhead),
569# else
570 0, 0, 0, 0,
571 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
572 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
573 16*sizeof(union overhead), 16*sizeof(union overhead),
574 8*sizeof(union overhead), 8*sizeof(union overhead),
575 4*sizeof(union overhead), 4*sizeof(union overhead),
576 2*sizeof(union overhead), 2*sizeof(union overhead),
577# endif
578 };
cf5c4ad8 579
5bbd1ef5
IZ
580# define NEEDED_ALIGNMENT 0x800 /* 2k boundaries */
581# define WANTED_ALIGNMENT 0x800 /* 2k boundaries */
582
cf5c4ad8 583#else /* !PACK_MALLOC */
584
585# define OV_MAGIC(block,bucket) (block)->ov_magic
586# define OV_INDEX(block) (block)->ov_index
587# define CHUNK_SHIFT 1
e8bc2b5c 588# define MAX_PACKED -1
5bbd1ef5
IZ
589# define NEEDED_ALIGNMENT MEM_ALIGNBYTES
590# define WANTED_ALIGNMENT 0x400 /* 1k boundaries */
591
cf5c4ad8 592#endif /* !PACK_MALLOC */
593
e8bc2b5c
GS
594#define M_OVERHEAD (sizeof(union overhead) + RSLOP)
595
596#ifdef PACK_MALLOC
597# define MEM_OVERHEAD(bucket) \
598 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
599# ifdef SMALL_BUCKET_VIA_TABLE
600# define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
601# define START_SHIFT MAX_PACKED_POW2
602# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
603# define SIZE_TABLE_MAX 80
604# else
605# define SIZE_TABLE_MAX 64
606# endif
607static char bucket_of[] =
608 {
609# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
610 /* 0 to 15 in 4-byte increments. */
611 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
612 6, /* 8 */
274c7500 613 IF_ALIGN_8(8,7), 8, /* 16/12, 16 */
e8bc2b5c
GS
614 9, 9, 10, 10, /* 24, 32 */
615 11, 11, 11, 11, /* 48 */
616 12, 12, 12, 12, /* 64 */
617 13, 13, 13, 13, /* 80 */
618 13, 13, 13, 13 /* 80 */
619# else /* !BUCKETS_ROOT2 */
620 /* 0 to 15 in 4-byte increments. */
621 (sizeof(void*) > 4 ? 3 : 2),
622 3,
623 4, 4,
624 5, 5, 5, 5,
625 6, 6, 6, 6,
626 6, 6, 6, 6
627# endif /* !BUCKETS_ROOT2 */
628 };
629# else /* !SMALL_BUCKET_VIA_TABLE */
630# define START_SHIFTS_BUCKET MIN_BUCKET
631# define START_SHIFT (MIN_BUC_POW2 - 1)
632# endif /* !SMALL_BUCKET_VIA_TABLE */
633#else /* !PACK_MALLOC */
634# define MEM_OVERHEAD(bucket) M_OVERHEAD
635# ifdef SMALL_BUCKET_VIA_TABLE
636# undef SMALL_BUCKET_VIA_TABLE
637# endif
638# define START_SHIFTS_BUCKET MIN_BUCKET
639# define START_SHIFT (MIN_BUC_POW2 - 1)
640#endif /* !PACK_MALLOC */
cf5c4ad8 641
8d063cd8 642/*
55497cff 643 * Big allocations are often of the size 2^n bytes. To make them a
644 * little bit better, make blocks of size 2^n+pagesize for big n.
645 */
646
647#ifdef TWO_POT_OPTIMIZE
648
5f05dabc 649# ifndef PERL_PAGESIZE
650# define PERL_PAGESIZE 4096
651# endif
e8bc2b5c
GS
652# ifndef FIRST_BIG_POW2
653# define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
5f05dabc 654# endif
e8bc2b5c 655# define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
55497cff 656/* If this value or more, check against bigger blocks. */
657# define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
658/* If less than this value, goes into 2^n-overhead-block. */
659# define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
660
e8bc2b5c
GS
661# define POW2_OPTIMIZE_ADJUST(nbytes) \
662 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
663# define POW2_OPTIMIZE_SURPLUS(bucket) \
664 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
665
666#else /* !TWO_POT_OPTIMIZE */
667# define POW2_OPTIMIZE_ADJUST(nbytes)
668# define POW2_OPTIMIZE_SURPLUS(bucket) 0
669#endif /* !TWO_POT_OPTIMIZE */
670
671#if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
672# define BARK_64K_LIMIT(what,nbytes,size) \
673 if (nbytes > 0xffff) { \
674 PerlIO_printf(PerlIO_stderr(), \
675 "%s too large: %lx\n", what, size); \
676 my_exit(1); \
677 }
678#else /* !HAS_64K_LIMIT || !PERL_CORE */
679# define BARK_64K_LIMIT(what,nbytes,size)
680#endif /* !HAS_64K_LIMIT || !PERL_CORE */
55497cff 681
e8bc2b5c
GS
682#ifndef MIN_SBRK
683# define MIN_SBRK 2048
684#endif
685
686#ifndef FIRST_SBRK
d720c441 687# define FIRST_SBRK (48*1024)
e8bc2b5c
GS
688#endif
689
690/* Minimal sbrk in percents of what is already alloced. */
691#ifndef MIN_SBRK_FRAC
692# define MIN_SBRK_FRAC 3
693#endif
694
695#ifndef SBRK_ALLOW_FAILURES
696# define SBRK_ALLOW_FAILURES 3
697#endif
55497cff 698
e8bc2b5c
GS
699#ifndef SBRK_FAILURE_PRICE
700# define SBRK_FAILURE_PRICE 50
55497cff 701#endif
702
e8bc2b5c
GS
703#if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
704
705# ifndef BIG_SIZE
706# define BIG_SIZE (1<<16) /* 64K */
707# endif
708
3541dd58 709#ifdef I_MACH_CTHREADS
772fe5b3
HM
710# undef MUTEX_LOCK
711# define MUTEX_LOCK(m) STMT_START { if (*m) mutex_lock(*m); } STMT_END
712# undef MUTEX_UNLOCK
713# define MUTEX_UNLOCK(m) STMT_START { if (*m) mutex_unlock(*m); } STMT_END
3541dd58
HM
714#endif
715
55497cff 716static char *emergency_buffer;
717static MEM_SIZE emergency_buffer_size;
df0003d4 718static Malloc_t emergency_sbrk(MEM_SIZE size);
55497cff 719
52082926 720static Malloc_t
df0003d4 721emergency_sbrk(MEM_SIZE size)
55497cff 722{
28ac10b1
IZ
723 MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA;
724
55497cff 725 if (size >= BIG_SIZE) {
726 /* Give the possibility to recover: */
741df71a 727 MALLOC_UNLOCK;
1b979e0a 728 croak("Out of memory during \"large\" request for %i bytes", size);
55497cff 729 }
730
28ac10b1
IZ
731 if (emergency_buffer_size >= rsize) {
732 char *old = emergency_buffer;
733
734 emergency_buffer_size -= rsize;
735 emergency_buffer += rsize;
736 return old;
737 } else {
18f739ee 738 dTHR;
55497cff 739 /* First offense, give a possibility to recover by dieing. */
740 /* No malloc involved here: */
4a33f861 741 GV **gvp = (GV**)hv_fetch(PL_defstash, "^M", 2, 0);
55497cff 742 SV *sv;
743 char *pv;
28ac10b1 744 int have = 0;
2d8e6c8d 745 STRLEN n_a;
55497cff 746
28ac10b1
IZ
747 if (emergency_buffer_size) {
748 add_to_chain(emergency_buffer, emergency_buffer_size, 0);
749 emergency_buffer_size = 0;
750 emergency_buffer = Nullch;
751 have = 1;
752 }
4a33f861 753 if (!gvp) gvp = (GV**)hv_fetch(PL_defstash, "\015", 1, 0);
55497cff 754 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
28ac10b1
IZ
755 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD)) {
756 if (have)
757 goto do_croak;
55497cff 758 return (char *)-1; /* Now die die die... */
28ac10b1 759 }
55497cff 760 /* Got it, now detach SvPV: */
2d8e6c8d 761 pv = SvPV(sv, n_a);
55497cff 762 /* Check alignment: */
5bbd1ef5 763 if (((UV)(pv - sizeof(union overhead))) & (NEEDED_ALIGNMENT - 1)) {
55497cff 764 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
bbce6d69 765 return (char *)-1; /* die die die */
55497cff 766 }
767
28ac10b1
IZ
768 emergency_buffer = pv - sizeof(union overhead);
769 emergency_buffer_size = malloced_size(pv) + M_OVERHEAD;
55497cff 770 SvPOK_off(sv);
28ac10b1
IZ
771 SvPVX(sv) = Nullch;
772 SvCUR(sv) = SvLEN(sv) = 0;
55497cff 773 }
28ac10b1 774 do_croak:
741df71a 775 MALLOC_UNLOCK;
28ac10b1 776 croak("Out of memory during request for %i bytes", size);
55497cff 777}
778
e8bc2b5c 779#else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 780# define emergency_sbrk(size) -1
e8bc2b5c 781#endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 782
783/*
e8bc2b5c 784 * nextf[i] is the pointer to the next free block of size 2^i. The
8d063cd8
LW
785 * smallest allocatable block is 8 bytes. The overhead information
786 * precedes the data area returned to the user.
787 */
e8bc2b5c 788#define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
8d063cd8 789static union overhead *nextf[NBUCKETS];
cf5c4ad8 790
791#ifdef USE_PERL_SBRK
792#define sbrk(a) Perl_sbrk(a)
52082926 793Malloc_t Perl_sbrk _((int size));
8ac85365
NIS
794#else
795#ifdef DONT_DECLARE_STD
796#ifdef I_UNISTD
797#include <unistd.h>
798#endif
cf5c4ad8 799#else
52082926 800extern Malloc_t sbrk(int);
8ac85365 801#endif
cf5c4ad8 802#endif
8d063cd8 803
c07a80fd 804#ifdef DEBUGGING_MSTATS
8d063cd8
LW
805/*
806 * nmalloc[i] is the difference between the number of mallocs and frees
807 * for a given block size.
808 */
809static u_int nmalloc[NBUCKETS];
5f05dabc 810static u_int sbrk_slack;
811static u_int start_slack;
8d063cd8
LW
812#endif
813
e8bc2b5c
GS
814static u_int goodsbrk;
815
760ac839 816#ifdef DEBUGGING
3541dd58
HM
817#undef ASSERT
818#define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else
ee0007ab 819static void
d720c441 820botch(char *diag, char *s)
8d063cd8 821{
d720c441 822 PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s);
3028581b 823 PerlProc_abort();
8d063cd8
LW
824}
825#else
3541dd58 826#define ASSERT(p, diag)
8d063cd8
LW
827#endif
828
2304df62 829Malloc_t
86058a2d 830Perl_malloc(register size_t nbytes)
8d063cd8
LW
831{
832 register union overhead *p;
e8bc2b5c 833 register int bucket;
ee0007ab 834 register MEM_SIZE shiftr;
8d063cd8 835
c2a5c2d2 836#if defined(DEBUGGING) || defined(RCHECK)
ee0007ab 837 MEM_SIZE size = nbytes;
45d8adaa
LW
838#endif
839
e8bc2b5c 840 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
45d8adaa
LW
841#ifdef DEBUGGING
842 if ((long)nbytes < 0)
d720c441 843 croak("%s", "panic: malloc");
45d8adaa 844#endif
45d8adaa 845
741df71a 846 MALLOC_LOCK;
8d063cd8
LW
847 /*
848 * Convert amount of memory requested into
849 * closest block size stored in hash buckets
850 * which satisfies request. Account for
851 * space used per block for accounting.
852 */
cf5c4ad8 853#ifdef PACK_MALLOC
e8bc2b5c
GS
854# ifdef SMALL_BUCKET_VIA_TABLE
855 if (nbytes == 0)
856 bucket = MIN_BUCKET;
857 else if (nbytes <= SIZE_TABLE_MAX) {
858 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
859 } else
860# else
043bf814
RB
861 if (nbytes == 0)
862 nbytes = 1;
e8bc2b5c
GS
863 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
864 else
865# endif
55497cff 866#endif
e8bc2b5c
GS
867 {
868 POW2_OPTIMIZE_ADJUST(nbytes);
869 nbytes += M_OVERHEAD;
870 nbytes = (nbytes + 3) &~ 3;
871 do_shifts:
872 shiftr = (nbytes - 1) >> START_SHIFT;
873 bucket = START_SHIFTS_BUCKET;
874 /* apart from this loop, this is O(1) */
875 while (shiftr >>= 1)
876 bucket += BUCKETS_PER_POW2;
cf5c4ad8 877 }
8d063cd8
LW
878 /*
879 * If nothing in hash bucket right now,
880 * request more memory from the system.
881 */
882 if (nextf[bucket] == NULL)
883 morecore(bucket);
e8bc2b5c 884 if ((p = nextf[bucket]) == NULL) {
741df71a 885 MALLOC_UNLOCK;
55497cff 886#ifdef PERL_CORE
4a33f861 887 if (!PL_nomemok) {
760ac839 888 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
79072805 889 my_exit(1);
ee0007ab 890 }
45d8adaa 891#else
8d063cd8 892 return (NULL);
45d8adaa
LW
893#endif
894 }
895
e8bc2b5c
GS
896 DEBUG_m(PerlIO_printf(Perl_debug_log,
897 "0x%lx: (%05lu) malloc %ld bytes\n",
4a33f861 898 (unsigned long)(p+1), (unsigned long)(PL_an++),
e8bc2b5c 899 (long)size));
45d8adaa 900
8d063cd8 901 /* remove from linked list */
802004fa
DD
902#if defined(RCHECK)
903 if (((UV)p) & (MEM_ALIGNBYTES - 1))
760ac839 904 PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n",
a0d0e21e 905 (unsigned long)*((int*)p),(unsigned long)p);
bf38876a
LW
906#endif
907 nextf[bucket] = p->ov_next;
e8bc2b5c
GS
908#ifdef IGNORE_SMALL_BAD_FREE
909 if (bucket >= FIRST_BUCKET_WITH_CHECK)
910#endif
911 OV_MAGIC(p, bucket) = MAGIC;
cf5c4ad8 912#ifndef PACK_MALLOC
913 OV_INDEX(p) = bucket;
914#endif
8d063cd8
LW
915#ifdef RCHECK
916 /*
917 * Record allocated size of block and
918 * bound space with magic numbers.
919 */
8d063cd8 920 p->ov_rmagic = RMAGIC;
e8bc2b5c
GS
921 if (bucket <= MAX_SHORT_BUCKET) {
922 int i;
923
924 nbytes = size + M_OVERHEAD;
925 p->ov_size = nbytes - 1;
926 if ((i = nbytes & 3)) {
927 i = 4 - i;
928 while (i--)
929 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
930 }
931 nbytes = (nbytes + 3) &~ 3;
932 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
933 }
8d063cd8 934#endif
741df71a 935 MALLOC_UNLOCK;
cf5c4ad8 936 return ((Malloc_t)(p + CHUNK_SHIFT));
8d063cd8
LW
937}
938
e8bc2b5c
GS
939static char *last_sbrk_top;
940static char *last_op; /* This arena can be easily extended. */
941static int sbrked_remains;
942static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
943
944#ifdef DEBUGGING_MSTATS
945static int sbrks;
946#endif
947
948struct chunk_chain_s {
949 struct chunk_chain_s *next;
950 MEM_SIZE size;
951};
952static struct chunk_chain_s *chunk_chain;
953static int n_chunks;
954static char max_bucket;
955
956/* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
957static void *
958get_from_chain(MEM_SIZE size)
959{
960 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
961 struct chunk_chain_s **oldgoodp = NULL;
962 long min_remain = LONG_MAX;
963
964 while (elt) {
965 if (elt->size >= size) {
966 long remains = elt->size - size;
967 if (remains >= 0 && remains < min_remain) {
968 oldgoodp = oldp;
969 min_remain = remains;
970 }
971 if (remains == 0) {
972 break;
973 }
974 }
975 oldp = &( elt->next );
976 elt = elt->next;
977 }
978 if (!oldgoodp) return NULL;
979 if (min_remain) {
980 void *ret = *oldgoodp;
981 struct chunk_chain_s *next = (*oldgoodp)->next;
982
983 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
984 (*oldgoodp)->size = min_remain;
985 (*oldgoodp)->next = next;
986 return ret;
987 } else {
988 void *ret = *oldgoodp;
989 *oldgoodp = (*oldgoodp)->next;
990 n_chunks--;
991 return ret;
992 }
993}
994
995static void
996add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
997{
998 struct chunk_chain_s *next = chunk_chain;
999 char *cp = (char*)p;
1000
1001 cp += chip;
1002 chunk_chain = (struct chunk_chain_s *)cp;
1003 chunk_chain->size = size - chip;
1004 chunk_chain->next = next;
1005 n_chunks++;
1006}
1007
1008static void *
1009get_from_bigger_buckets(int bucket, MEM_SIZE size)
1010{
1011 int price = 1;
1012 static int bucketprice[NBUCKETS];
1013 while (bucket <= max_bucket) {
1014 /* We postpone stealing from bigger buckets until we want it
1015 often enough. */
1016 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
1017 /* Steal it! */
1018 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
1019 bucketprice[bucket] = 0;
1020 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
1021 last_op = NULL; /* Disable optimization */
1022 }
1023 nextf[bucket] = nextf[bucket]->ov_next;
1024#ifdef DEBUGGING_MSTATS
1025 nmalloc[bucket]--;
1026 start_slack -= M_OVERHEAD;
1027#endif
1028 add_to_chain(ret, (BUCKET_SIZE(bucket) +
1029 POW2_OPTIMIZE_SURPLUS(bucket)),
1030 size);
1031 return ret;
1032 }
1033 bucket++;
1034 }
1035 return NULL;
1036}
1037
fa423c5b
IZ
1038static union overhead *
1039getpages(int needed, int *nblksp, int bucket)
1040{
1041 /* Need to do (possibly expensive) system call. Try to
1042 optimize it for rare calling. */
1043 MEM_SIZE require = needed - sbrked_remains;
1044 char *cp;
1045 union overhead *ovp;
1046 int slack = 0;
1047
1048 if (sbrk_good > 0) {
1049 if (!last_sbrk_top && require < FIRST_SBRK)
1050 require = FIRST_SBRK;
1051 else if (require < MIN_SBRK) require = MIN_SBRK;
1052
1053 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
1054 require = goodsbrk * MIN_SBRK_FRAC / 100;
1055 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
1056 } else {
1057 require = needed;
1058 last_sbrk_top = 0;
1059 sbrked_remains = 0;
1060 }
1061
1062 DEBUG_m(PerlIO_printf(Perl_debug_log,
1063 "sbrk(%ld) for %ld-byte-long arena\n",
1064 (long)require, (long) needed));
1065 cp = (char *)sbrk(require);
1066#ifdef DEBUGGING_MSTATS
1067 sbrks++;
1068#endif
1069 if (cp == last_sbrk_top) {
1070 /* Common case, anything is fine. */
1071 sbrk_good++;
1072 ovp = (union overhead *) (cp - sbrked_remains);
e9397286 1073 last_op = cp - sbrked_remains;
fa423c5b
IZ
1074 sbrked_remains = require - (needed - sbrked_remains);
1075 } else if (cp == (char *)-1) { /* no more room! */
1076 ovp = (union overhead *)emergency_sbrk(needed);
1077 if (ovp == (union overhead *)-1)
1078 return 0;
e9397286
GS
1079 if (((char*)ovp) > last_op) { /* Cannot happen with current emergency_sbrk() */
1080 last_op = 0;
1081 }
fa423c5b
IZ
1082 return ovp;
1083 } else { /* Non-continuous or first sbrk(). */
1084 long add = sbrked_remains;
1085 char *newcp;
1086
1087 if (sbrked_remains) { /* Put rest into chain, we
1088 cannot use it right now. */
1089 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1090 sbrked_remains, 0);
1091 }
1092
1093 /* Second, check alignment. */
1094 slack = 0;
1095
61ae2fbf 1096#if !defined(atarist) && !defined(__MINT__) /* on the atari we dont have to worry about this */
fa423c5b 1097# ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
5bbd1ef5
IZ
1098 /* WANTED_ALIGNMENT may be more than NEEDED_ALIGNMENT, but this may
1099 improve performance of memory access. */
1100 if ((UV)cp & (WANTED_ALIGNMENT - 1)) { /* Not aligned. */
1101 slack = WANTED_ALIGNMENT - ((UV)cp & (WANTED_ALIGNMENT - 1));
fa423c5b
IZ
1102 add += slack;
1103 }
1104# endif
61ae2fbf 1105#endif /* !atarist && !MINT */
fa423c5b
IZ
1106
1107 if (add) {
1108 DEBUG_m(PerlIO_printf(Perl_debug_log,
1109 "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
1110 (long)add, (long) slack,
1111 (long) sbrked_remains));
1112 newcp = (char *)sbrk(add);
1113#if defined(DEBUGGING_MSTATS)
1114 sbrks++;
1115 sbrk_slack += add;
1116#endif
1117 if (newcp != cp + require) {
1118 /* Too bad: even rounding sbrk() is not continuous.*/
1119 DEBUG_m(PerlIO_printf(Perl_debug_log,
1120 "failed to fix bad sbrk()\n"));
1121#ifdef PACK_MALLOC
1122 if (slack) {
741df71a 1123 MALLOC_UNLOCK;
5bbd1ef5 1124 fatalcroak("panic: Off-page sbrk\n");
fa423c5b
IZ
1125 }
1126#endif
1127 if (sbrked_remains) {
1128 /* Try again. */
1129#if defined(DEBUGGING_MSTATS)
1130 sbrk_slack += require;
1131#endif
1132 require = needed;
1133 DEBUG_m(PerlIO_printf(Perl_debug_log,
1134 "straight sbrk(%ld)\n",
1135 (long)require));
1136 cp = (char *)sbrk(require);
1137#ifdef DEBUGGING_MSTATS
1138 sbrks++;
1139#endif
1140 if (cp == (char *)-1)
1141 return 0;
1142 }
1143 sbrk_good = -1; /* Disable optimization!
1144 Continue with not-aligned... */
1145 } else {
1146 cp += slack;
1147 require += sbrked_remains;
1148 }
1149 }
1150
1151 if (last_sbrk_top) {
1152 sbrk_good -= SBRK_FAILURE_PRICE;
1153 }
1154
1155 ovp = (union overhead *) cp;
1156 /*
1157 * Round up to minimum allocation size boundary
1158 * and deduct from block count to reflect.
1159 */
1160
5bbd1ef5
IZ
1161# if NEEDED_ALIGNMENT > MEM_ALIGNBYTES
1162 if ((UV)ovp & (NEEDED_ALIGNMENT - 1))
1163 fatalcroak("Misalignment of sbrk()\n");
1164 else
1165# endif
fa423c5b 1166#ifndef I286 /* Again, this should always be ok on an 80286 */
5bbd1ef5 1167 if ((UV)ovp & (MEM_ALIGNBYTES - 1)) {
fa423c5b
IZ
1168 DEBUG_m(PerlIO_printf(Perl_debug_log,
1169 "fixing sbrk(): %d bytes off machine alignement\n",
5bbd1ef5
IZ
1170 (int)((UV)ovp & (MEM_ALIGNBYTES - 1))));
1171 ovp = (union overhead *)(((UV)ovp + MEM_ALIGNBYTES) &
1172 (MEM_ALIGNBYTES - 1));
fa423c5b
IZ
1173 (*nblksp)--;
1174# if defined(DEBUGGING_MSTATS)
1175 /* This is only approx. if TWO_POT_OPTIMIZE: */
5bbd1ef5 1176 sbrk_slack += (1 << (bucket >> BUCKET_POW2_SHIFT));
fa423c5b
IZ
1177# endif
1178 }
1179#endif
5bbd1ef5 1180 ; /* Finish `else' */
fa423c5b 1181 sbrked_remains = require - needed;
e9397286 1182 last_op = cp;
fa423c5b
IZ
1183 }
1184 last_sbrk_top = cp + require;
fa423c5b
IZ
1185#ifdef DEBUGGING_MSTATS
1186 goodsbrk += require;
1187#endif
1188 return ovp;
1189}
1190
1191static int
1192getpages_adjacent(int require)
1193{
1194 if (require <= sbrked_remains) {
1195 sbrked_remains -= require;
1196 } else {
1197 char *cp;
1198
1199 require -= sbrked_remains;
1200 /* We do not try to optimize sbrks here, we go for place. */
1201 cp = (char*) sbrk(require);
1202#ifdef DEBUGGING_MSTATS
1203 sbrks++;
1204 goodsbrk += require;
1205#endif
1206 if (cp == last_sbrk_top) {
1207 sbrked_remains = 0;
1208 last_sbrk_top = cp + require;
1209 } else {
28ac10b1
IZ
1210 if (cp == (char*)-1) { /* Out of memory */
1211#ifdef DEBUGGING_MSTATS
1212 goodsbrk -= require;
1213#endif
1214 return 0;
1215 }
fa423c5b
IZ
1216 /* Report the failure: */
1217 if (sbrked_remains)
1218 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1219 sbrked_remains, 0);
1220 add_to_chain((void*)cp, require, 0);
1221 sbrk_good -= SBRK_FAILURE_PRICE;
1222 sbrked_remains = 0;
1223 last_sbrk_top = 0;
1224 last_op = 0;
1225 return 0;
1226 }
1227 }
1228
1229 return 1;
1230}
1231
8d063cd8
LW
1232/*
1233 * Allocate more memory to the indicated bucket.
1234 */
a0d0e21e 1235static void
8ac85365 1236morecore(register int bucket)
8d063cd8 1237{
72aaf631 1238 register union overhead *ovp;
8d063cd8 1239 register int rnu; /* 2^rnu bytes will be requested */
fa423c5b 1240 int nblks; /* become nblks blocks of the desired size */
bbce6d69 1241 register MEM_SIZE siz, needed;
8d063cd8
LW
1242
1243 if (nextf[bucket])
1244 return;
e8bc2b5c 1245 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
741df71a 1246 MALLOC_UNLOCK;
d720c441 1247 croak("%s", "Out of memory during ridiculously large request");
55497cff 1248 }
d720c441 1249 if (bucket > max_bucket)
e8bc2b5c 1250 max_bucket = bucket;
d720c441 1251
e8bc2b5c
GS
1252 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
1253 ? LOG_OF_MIN_ARENA
1254 : (bucket >> BUCKET_POW2_SHIFT) );
1255 /* This may be overwritten later: */
1256 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
1257 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
1258 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
1259 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
1260 nextf[rnu << BUCKET_POW2_SHIFT]
1261 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
1262#ifdef DEBUGGING_MSTATS
1263 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
1264 start_slack -= M_OVERHEAD;
1265#endif
1266 DEBUG_m(PerlIO_printf(Perl_debug_log,
1267 "stealing %ld bytes from %ld arena\n",
1268 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
1269 } else if (chunk_chain
1270 && (ovp = (union overhead*) get_from_chain(needed))) {
1271 DEBUG_m(PerlIO_printf(Perl_debug_log,
1272 "stealing %ld bytes from chain\n",
1273 (long) needed));
d720c441
IZ
1274 } else if ( (ovp = (union overhead*)
1275 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
1276 needed)) ) {
e8bc2b5c
GS
1277 DEBUG_m(PerlIO_printf(Perl_debug_log,
1278 "stealing %ld bytes from bigger buckets\n",
1279 (long) needed));
1280 } else if (needed <= sbrked_remains) {
1281 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
1282 sbrked_remains -= needed;
1283 last_op = (char*)ovp;
fa423c5b
IZ
1284 } else
1285 ovp = getpages(needed, &nblks, bucket);
e8bc2b5c 1286
fa423c5b
IZ
1287 if (!ovp)
1288 return;
e8bc2b5c 1289
8d063cd8
LW
1290 /*
1291 * Add new memory allocated to that on
1292 * free list for this hash bucket.
1293 */
e8bc2b5c 1294 siz = BUCKET_SIZE(bucket);
cf5c4ad8 1295#ifdef PACK_MALLOC
72aaf631 1296 *(u_char*)ovp = bucket; /* Fill index. */
e8bc2b5c
GS
1297 if (bucket <= MAX_PACKED) {
1298 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1299 nblks = N_BLKS(bucket);
cf5c4ad8 1300# ifdef DEBUGGING_MSTATS
e8bc2b5c 1301 start_slack += BLK_SHIFT(bucket);
cf5c4ad8 1302# endif
e8bc2b5c
GS
1303 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
1304 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
cf5c4ad8 1305 siz -= sizeof(union overhead);
72aaf631 1306 } else ovp++; /* One chunk per block. */
e8bc2b5c 1307#endif /* PACK_MALLOC */
72aaf631 1308 nextf[bucket] = ovp;
5f05dabc 1309#ifdef DEBUGGING_MSTATS
1310 nmalloc[bucket] += nblks;
e8bc2b5c
GS
1311 if (bucket > MAX_PACKED) {
1312 start_slack += M_OVERHEAD * nblks;
1313 }
5f05dabc 1314#endif
8d063cd8 1315 while (--nblks > 0) {
72aaf631
MB
1316 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
1317 ovp = (union overhead *)((caddr_t)ovp + siz);
8d063cd8 1318 }
8595d6f1 1319 /* Not all sbrks return zeroed memory.*/
72aaf631 1320 ovp->ov_next = (union overhead *)NULL;
cf5c4ad8 1321#ifdef PACK_MALLOC
e8bc2b5c
GS
1322 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
1323 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
1324 nextf[7*BUCKETS_PER_POW2] =
1325 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
1326 - sizeof(union overhead));
1327 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
cf5c4ad8 1328 }
1329#endif /* !PACK_MALLOC */
8d063cd8
LW
1330}
1331
94b6baf5 1332Free_t
86058a2d 1333Perl_mfree(void *mp)
8d063cd8 1334{
ee0007ab 1335 register MEM_SIZE size;
72aaf631 1336 register union overhead *ovp;
352d5a3a 1337 char *cp = (char*)mp;
cf5c4ad8 1338#ifdef PACK_MALLOC
1339 u_char bucket;
1340#endif
8d063cd8 1341
e8bc2b5c
GS
1342 DEBUG_m(PerlIO_printf(Perl_debug_log,
1343 "0x%lx: (%05lu) free\n",
4a33f861 1344 (unsigned long)cp, (unsigned long)(PL_an++)));
45d8adaa 1345
cf5c4ad8 1346 if (cp == NULL)
1347 return;
72aaf631 1348 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1349 - sizeof (union overhead) * CHUNK_SHIFT);
cf5c4ad8 1350#ifdef PACK_MALLOC
72aaf631 1351 bucket = OV_INDEX(ovp);
cf5c4ad8 1352#endif
e8bc2b5c
GS
1353#ifdef IGNORE_SMALL_BAD_FREE
1354 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1355 && (OV_MAGIC(ovp, bucket) != MAGIC))
1356#else
1357 if (OV_MAGIC(ovp, bucket) != MAGIC)
1358#endif
1359 {
68dc0745 1360 static int bad_free_warn = -1;
cf5c4ad8 1361 if (bad_free_warn == -1) {
5fd9e9a4 1362 char *pbf = PerlEnv_getenv("PERL_BADFREE");
cf5c4ad8 1363 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1364 }
1365 if (!bad_free_warn)
1366 return;
8990e307 1367#ifdef RCHECK
a687059c 1368 warn("%s free() ignored",
72aaf631 1369 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
8990e307 1370#else
d720c441 1371 warn("%s", "Bad free() ignored");
8990e307 1372#endif
8d063cd8 1373 return; /* sanity */
e8bc2b5c 1374 }
741df71a 1375 MALLOC_LOCK;
8d063cd8 1376#ifdef RCHECK
3541dd58 1377 ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
e8bc2b5c
GS
1378 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1379 int i;
1380 MEM_SIZE nbytes = ovp->ov_size + 1;
1381
1382 if ((i = nbytes & 3)) {
1383 i = 4 - i;
1384 while (i--) {
3541dd58 1385 ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
d720c441 1386 == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1387 }
1388 }
1389 nbytes = (nbytes + 3) &~ 3;
3541dd58 1390 ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite");
e8bc2b5c 1391 }
72aaf631 1392 ovp->ov_rmagic = RMAGIC - 1;
8d063cd8 1393#endif
3541dd58 1394 ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
72aaf631
MB
1395 size = OV_INDEX(ovp);
1396 ovp->ov_next = nextf[size];
1397 nextf[size] = ovp;
741df71a 1398 MALLOC_UNLOCK;
8d063cd8
LW
1399}
1400
1401/*
1402 * When a program attempts "storage compaction" as mentioned in the
1403 * old malloc man page, it realloc's an already freed block. Usually
1404 * this is the last block it freed; occasionally it might be farther
1405 * back. We have to search all the free lists for the block in order
1406 * to determine its bucket: 1st we make one pass thru the lists
1407 * checking only the first block in each; if that fails we search
378cc40b 1408 * ``reall_srchlen'' blocks in each list for a match (the variable
8d063cd8
LW
1409 * is extern so the caller can modify it). If that fails we just copy
1410 * however many bytes was given to realloc() and hope it's not huge.
1411 */
22c35a8c 1412#define reall_srchlen 4 /* 4 should be plenty, -1 =>'s whole list */
8d063cd8 1413
2304df62 1414Malloc_t
86058a2d 1415Perl_realloc(void *mp, size_t nbytes)
8d063cd8 1416{
ee0007ab 1417 register MEM_SIZE onb;
72aaf631 1418 union overhead *ovp;
d720c441
IZ
1419 char *res;
1420 int prev_bucket;
e8bc2b5c
GS
1421 register int bucket;
1422 int was_alloced = 0, incr;
352d5a3a 1423 char *cp = (char*)mp;
8d063cd8 1424
e8bc2b5c 1425#if defined(DEBUGGING) || !defined(PERL_CORE)
ee0007ab 1426 MEM_SIZE size = nbytes;
45d8adaa 1427
45d8adaa 1428 if ((long)nbytes < 0)
d720c441 1429 croak("%s", "panic: realloc");
45d8adaa 1430#endif
e8bc2b5c
GS
1431
1432 BARK_64K_LIMIT("Reallocation",nbytes,size);
1433 if (!cp)
86058a2d 1434 return Perl_malloc(nbytes);
45d8adaa 1435
741df71a 1436 MALLOC_LOCK;
72aaf631 1437 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c
GS
1438 - sizeof (union overhead) * CHUNK_SHIFT);
1439 bucket = OV_INDEX(ovp);
1440#ifdef IGNORE_SMALL_BAD_FREE
1441 if ((bucket < FIRST_BUCKET_WITH_CHECK)
1442 || (OV_MAGIC(ovp, bucket) == MAGIC))
1443#else
1444 if (OV_MAGIC(ovp, bucket) == MAGIC)
1445#endif
1446 {
55497cff 1447 was_alloced = 1;
8d063cd8
LW
1448 } else {
1449 /*
1450 * Already free, doing "compaction".
1451 *
1452 * Search for the old block of memory on the
1453 * free list. First, check the most common
1454 * case (last element free'd), then (this failing)
378cc40b 1455 * the last ``reall_srchlen'' items free'd.
8d063cd8
LW
1456 * If all lookups fail, then assume the size of
1457 * the memory block being realloc'd is the
1458 * smallest possible.
1459 */
e8bc2b5c
GS
1460 if ((bucket = findbucket(ovp, 1)) < 0 &&
1461 (bucket = findbucket(ovp, reall_srchlen)) < 0)
1462 bucket = 0;
8d063cd8 1463 }
e8bc2b5c 1464 onb = BUCKET_SIZE_REAL(bucket);
55497cff 1465 /*
1466 * avoid the copy if same size block.
e8bc2b5c
GS
1467 * We are not agressive with boundary cases. Note that it might
1468 * (for a small number of cases) give false negative if
55497cff 1469 * both new size and old one are in the bucket for
e8bc2b5c
GS
1470 * FIRST_BIG_POW2, but the new one is near the lower end.
1471 *
1472 * We do not try to go to 1.5 times smaller bucket so far.
55497cff 1473 */
e8bc2b5c
GS
1474 if (nbytes > onb) incr = 1;
1475 else {
1476#ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1477 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1478 nbytes > ( (onb >> 1) - M_OVERHEAD )
1479# ifdef TWO_POT_OPTIMIZE
1480 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1481# endif
1482 )
1483#else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1484 prev_bucket = ( (bucket > MAX_PACKED + 1)
1485 ? bucket - BUCKETS_PER_POW2
1486 : bucket - 1);
1487 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1488#endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1489 incr = 0;
1490 else incr = -1;
1491 }
1492 if (!was_alloced
2ce36478 1493#ifdef STRESS_REALLOC
e8bc2b5c 1494 || 1 /* always do it the hard way */
2ce36478 1495#endif
e8bc2b5c
GS
1496 ) goto hard_way;
1497 else if (incr == 0) {
852c2e52 1498 inplace_label:
a687059c
LW
1499#ifdef RCHECK
1500 /*
1501 * Record new allocated size of block and
1502 * bound space with magic numbers.
1503 */
72aaf631 1504 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
e8bc2b5c
GS
1505 int i, nb = ovp->ov_size + 1;
1506
1507 if ((i = nb & 3)) {
1508 i = 4 - i;
1509 while (i--) {
3541dd58 1510 ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1511 }
1512 }
1513 nb = (nb + 3) &~ 3;
3541dd58 1514 ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite");
a687059c
LW
1515 /*
1516 * Convert amount of memory requested into
1517 * closest block size stored in hash buckets
1518 * which satisfies request. Account for
1519 * space used per block for accounting.
1520 */
cf5c4ad8 1521 nbytes += M_OVERHEAD;
72aaf631 1522 ovp->ov_size = nbytes - 1;
e8bc2b5c
GS
1523 if ((i = nbytes & 3)) {
1524 i = 4 - i;
1525 while (i--)
1526 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1527 = RMAGIC_C;
1528 }
1529 nbytes = (nbytes + 3) &~ 3;
72aaf631 1530 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
a687059c
LW
1531 }
1532#endif
45d8adaa 1533 res = cp;
741df71a 1534 MALLOC_UNLOCK;
42ac124e
IZ
1535 DEBUG_m(PerlIO_printf(Perl_debug_log,
1536 "0x%lx: (%05lu) realloc %ld bytes inplace\n",
1537 (unsigned long)res,(unsigned long)(PL_an++),
1538 (long)size));
e8bc2b5c
GS
1539 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1540 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1541 MEM_SIZE require, newarena = nbytes, pow;
1542 int shiftr;
1543
1544 POW2_OPTIMIZE_ADJUST(newarena);
1545 newarena = newarena + M_OVERHEAD;
1546 /* newarena = (newarena + 3) &~ 3; */
1547 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1548 pow = LOG_OF_MIN_ARENA + 1;
1549 /* apart from this loop, this is O(1) */
1550 while (shiftr >>= 1)
1551 pow++;
1552 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1553 require = newarena - onb - M_OVERHEAD;
1554
fa423c5b 1555 if (getpages_adjacent(require)) {
e8bc2b5c 1556#ifdef DEBUGGING_MSTATS
fa423c5b
IZ
1557 nmalloc[bucket]--;
1558 nmalloc[pow * BUCKETS_PER_POW2]++;
e8bc2b5c 1559#endif
fa423c5b
IZ
1560 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
1561 goto inplace_label;
1562 } else
1563 goto hard_way;
e8bc2b5c
GS
1564 } else {
1565 hard_way:
741df71a 1566 MALLOC_UNLOCK;
42ac124e
IZ
1567 DEBUG_m(PerlIO_printf(Perl_debug_log,
1568 "0x%lx: (%05lu) realloc %ld bytes the hard way\n",
1569 (unsigned long)cp,(unsigned long)(PL_an++),
1570 (long)size));
86058a2d 1571 if ((res = (char*)Perl_malloc(nbytes)) == NULL)
e8bc2b5c
GS
1572 return (NULL);
1573 if (cp != res) /* common optimization */
1574 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
1575 if (was_alloced)
86058a2d 1576 Perl_mfree(cp);
45d8adaa 1577 }
2304df62 1578 return ((Malloc_t)res);
8d063cd8
LW
1579}
1580
1581/*
1582 * Search ``srchlen'' elements of each free list for a block whose
1583 * header starts at ``freep''. If srchlen is -1 search the whole list.
1584 * Return bucket number, or -1 if not found.
1585 */
ee0007ab 1586static int
8ac85365 1587findbucket(union overhead *freep, int srchlen)
8d063cd8
LW
1588{
1589 register union overhead *p;
1590 register int i, j;
1591
1592 for (i = 0; i < NBUCKETS; i++) {
1593 j = 0;
1594 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1595 if (p == freep)
1596 return (i);
1597 j++;
1598 }
1599 }
1600 return (-1);
1601}
1602
cf5c4ad8 1603Malloc_t
86058a2d 1604Perl_calloc(register size_t elements, register size_t size)
cf5c4ad8 1605{
1606 long sz = elements * size;
86058a2d 1607 Malloc_t p = Perl_malloc(sz);
cf5c4ad8 1608
1609 if (p) {
1610 memset((void*)p, 0, sz);
1611 }
1612 return p;
1613}
1614
e8bc2b5c
GS
1615MEM_SIZE
1616malloced_size(void *p)
1617{
8d6dde3e
IZ
1618 union overhead *ovp = (union overhead *)
1619 ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT);
1620 int bucket = OV_INDEX(ovp);
1621#ifdef RCHECK
1622 /* The caller wants to have a complete control over the chunk,
1623 disable the memory checking inside the chunk. */
1624 if (bucket <= MAX_SHORT_BUCKET) {
1625 MEM_SIZE size = BUCKET_SIZE_REAL(bucket);
1626 ovp->ov_size = size + M_OVERHEAD - 1;
1627 *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RSLOP)) = RMAGIC;
1628 }
1629#endif
e8bc2b5c
GS
1630 return BUCKET_SIZE_REAL(bucket);
1631}
1632
c07a80fd 1633#ifdef DEBUGGING_MSTATS
e8bc2b5c
GS
1634
1635# ifdef BUCKETS_ROOT2
1636# define MIN_EVEN_REPORT 6
1637# else
1638# define MIN_EVEN_REPORT MIN_BUCKET
1639# endif
8d063cd8
LW
1640/*
1641 * mstats - print out statistics about malloc
1642 *
1643 * Prints two lines of numbers, one showing the length of the free list
1644 * for each size category, the second showing the number of mallocs -
1645 * frees for each size category.
1646 */
ee0007ab 1647void
8ac85365 1648dump_mstats(char *s)
8d063cd8
LW
1649{
1650 register int i, j;
1651 register union overhead *p;
e8bc2b5c 1652 int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0;
c07a80fd 1653 u_int nfree[NBUCKETS];
e8bc2b5c
GS
1654 int total_chain = 0;
1655 struct chunk_chain_s* nextchain = chunk_chain;
8d063cd8 1656
e8bc2b5c 1657 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
8d063cd8
LW
1658 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1659 ;
c07a80fd 1660 nfree[i] = j;
e8bc2b5c
GS
1661 totfree += nfree[i] * BUCKET_SIZE_REAL(i);
1662 total += nmalloc[i] * BUCKET_SIZE_REAL(i);
1663 if (nmalloc[i]) {
1664 i % 2 ? (topbucket_odd = i) : (topbucket_ev = i);
1665 topbucket = i;
1666 }
c07a80fd 1667 }
1668 if (s)
e8bc2b5c 1669 PerlIO_printf(PerlIO_stderr(),
d720c441 1670 "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n",
e8bc2b5c 1671 s,
d720c441
IZ
1672 (long)BUCKET_SIZE_REAL(MIN_BUCKET),
1673 (long)BUCKET_SIZE(MIN_BUCKET),
1674 (long)BUCKET_SIZE_REAL(topbucket), (long)BUCKET_SIZE(topbucket));
5f05dabc 1675 PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree);
e8bc2b5c
GS
1676 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1677 PerlIO_printf(PerlIO_stderr(),
1678 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1679 ? " %5d"
1680 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1681 nfree[i]);
1682 }
1683#ifdef BUCKETS_ROOT2
1684 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1685 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1686 PerlIO_printf(PerlIO_stderr(),
1687 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1688 ? " %5d"
1689 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1690 nfree[i]);
8d063cd8 1691 }
e8bc2b5c 1692#endif
5f05dabc 1693 PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree);
e8bc2b5c
GS
1694 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1695 PerlIO_printf(PerlIO_stderr(),
1696 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1697 ? " %5d"
1698 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1699 nmalloc[i] - nfree[i]);
c07a80fd 1700 }
e8bc2b5c
GS
1701#ifdef BUCKETS_ROOT2
1702 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1703 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1704 PerlIO_printf(PerlIO_stderr(),
1705 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1706 ? " %5d"
1707 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1708 nmalloc[i] - nfree[i]);
1709 }
1710#endif
1711 while (nextchain) {
1712 total_chain += nextchain->size;
1713 nextchain = nextchain->next;
1714 }
1715 PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n",
1716 goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack,
1717 start_slack, total_chain, sbrked_remains);
c07a80fd 1718}
1719#else
1720void
8ac85365 1721dump_mstats(char *s)
c07a80fd 1722{
8d063cd8
LW
1723}
1724#endif
a687059c 1725#endif /* lint */
cf5c4ad8 1726
1727
1728#ifdef USE_PERL_SBRK
1729
2c92fcc0 1730# if defined(__MACHTEN_PPC__) || defined(__NeXT__)
38ac2dc8
DD
1731# define PERL_SBRK_VIA_MALLOC
1732/*
1733 * MachTen's malloc() returns a buffer aligned on a two-byte boundary.
1734 * While this is adequate, it may slow down access to longer data
1735 * types by forcing multiple memory accesses. It also causes
1736 * complaints when RCHECK is in force. So we allocate six bytes
1737 * more than we need to, and return an address rounded up to an
1738 * eight-byte boundary.
1739 *
1740 * 980701 Dominic Dunlop <domo@computer.org>
1741 */
5bbd1ef5 1742# define SYSTEM_ALLOC_ALIGNMENT 2
38ac2dc8
DD
1743# endif
1744
760ac839 1745# ifdef PERL_SBRK_VIA_MALLOC
cf5c4ad8 1746
1747/* it may seem schizophrenic to use perl's malloc and let it call system */
1748/* malloc, the reason for that is only the 3.2 version of the OS that had */
1749/* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
1750/* end to the cores */
1751
38ac2dc8
DD
1752# ifndef SYSTEM_ALLOC
1753# define SYSTEM_ALLOC(a) malloc(a)
1754# endif
5bbd1ef5
IZ
1755# ifndef SYSTEM_ALLOC_ALIGNMENT
1756# define SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES
1757# endif
cf5c4ad8 1758
760ac839 1759# endif /* PERL_SBRK_VIA_MALLOC */
cf5c4ad8 1760
1761static IV Perl_sbrk_oldchunk;
1762static long Perl_sbrk_oldsize;
1763
760ac839
LW
1764# define PERLSBRK_32_K (1<<15)
1765# define PERLSBRK_64_K (1<<16)
cf5c4ad8 1766
b63effbb 1767Malloc_t
df0003d4 1768Perl_sbrk(int size)
cf5c4ad8 1769{
1770 IV got;
1771 int small, reqsize;
1772
1773 if (!size) return 0;
55497cff 1774#ifdef PERL_CORE
cf5c4ad8 1775 reqsize = size; /* just for the DEBUG_m statement */
1776#endif
57569e04
HM
1777#ifdef PACK_MALLOC
1778 size = (size + 0x7ff) & ~0x7ff;
1779#endif
cf5c4ad8 1780 if (size <= Perl_sbrk_oldsize) {
1781 got = Perl_sbrk_oldchunk;
1782 Perl_sbrk_oldchunk += size;
1783 Perl_sbrk_oldsize -= size;
1784 } else {
1785 if (size >= PERLSBRK_32_K) {
1786 small = 0;
1787 } else {
cf5c4ad8 1788 size = PERLSBRK_64_K;
1789 small = 1;
1790 }
5bbd1ef5
IZ
1791# if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
1792 size += NEEDED_ALIGNMENT - SYSTEM_ALLOC_ALIGNMENT;
1793# endif
cf5c4ad8 1794 got = (IV)SYSTEM_ALLOC(size);
5bbd1ef5 1795# if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
5a7d6335 1796 got = (got + NEEDED_ALIGNMENT - 1) & ~(NEEDED_ALIGNMENT - 1);
5bbd1ef5 1797# endif
cf5c4ad8 1798 if (small) {
1799 /* Chunk is small, register the rest for future allocs. */
1800 Perl_sbrk_oldchunk = got + reqsize;
1801 Perl_sbrk_oldsize = size - reqsize;
1802 }
1803 }
1804
fb73857a 1805 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n",
cf5c4ad8 1806 size, reqsize, Perl_sbrk_oldsize, got));
cf5c4ad8 1807
1808 return (void *)got;
1809}
1810
1811#endif /* ! defined USE_PERL_SBRK */