This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
Configure regen to pick up the new installation directories
[perl5.git] / malloc.c
CommitLineData
a0d0e21e 1/* malloc.c
8d063cd8 2 *
8d063cd8
LW
3 */
4
87c6202a 5/*
741df71a
IZ
6 Here are some notes on configuring Perl's malloc. (For non-perl
7 usage see below.)
87c6202a
IZ
8
9 There are two macros which serve as bulk disablers of advanced
10 features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by
11 default). Look in the list of default values below to understand
12 their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the
13 state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC
14 returns it to the state as of Perl 5.000.
15
16 Note that some of the settings below may be ignored in the code based
17 on values of other macros. The PERL_CORE symbol is only defined when
18 perl itself is being compiled (so malloc can make some assumptions
19 about perl's facilities being available to it).
20
21 Each config option has a short description, followed by its name,
22 default value, and a comment about the default (if applicable). Some
23 options take a precise value, while the others are just boolean.
24 The boolean ones are listed first.
25
26 # Enable code for an emergency memory pool in $^M. See perlvar.pod
27 # for a description of $^M.
28 PERL_EMERGENCY_SBRK (!PLAIN_MALLOC && PERL_CORE)
29
30 # Enable code for printing memory statistics.
31 DEBUGGING_MSTATS (!PLAIN_MALLOC && PERL_CORE)
32
33 # Move allocation info for small buckets into separate areas.
34 # Memory optimization (especially for small allocations, of the
35 # less than 64 bytes). Since perl usually makes a large number
36 # of small allocations, this is usually a win.
37 PACK_MALLOC (!PLAIN_MALLOC && !RCHECK)
38
39 # Add one page to big powers of two when calculating bucket size.
40 # This is targeted at big allocations, as are common in image
41 # processing.
42 TWO_POT_OPTIMIZE !PLAIN_MALLOC
43
44 # Use intermediate bucket sizes between powers-of-two. This is
45 # generally a memory optimization, and a (small) speed pessimization.
46 BUCKETS_ROOT2 !NO_FANCY_MALLOC
47
48 # Do not check small deallocations for bad free(). Memory
49 # and speed optimization, error reporting pessimization.
50 IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK)
51
52 # Use table lookup to decide in which bucket a given allocation will go.
53 SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC
54
38ac2dc8
DD
55 # Use a perl-defined sbrk() instead of the (presumably broken or
56 # missing) system-supplied sbrk().
57 USE_PERL_SBRK undef
58
59 # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally
60 # only used with broken sbrk()s.
87c6202a
IZ
61 PERL_SBRK_VIA_MALLOC undef
62
38ac2dc8
DD
63 # Which allocator to use if PERL_SBRK_VIA_MALLOC
64 SYSTEM_ALLOC(a) malloc(a)
65
9ee81ef6 66 # Minimal alignment (in bytes, should be a power of 2) of SYSTEM_ALLOC
5bbd1ef5
IZ
67 SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES
68
87c6202a
IZ
69 # Disable memory overwrite checking with DEBUGGING. Memory and speed
70 # optimization, error reporting pessimization.
71 NO_RCHECK undef
72
73 # Enable memory overwrite checking with DEBUGGING. Memory and speed
74 # pessimization, error reporting optimization
75 RCHECK (DEBUGGING && !NO_RCHECK)
76
77 # Failed allocations bigger than this size croak (if
78 # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See
79 # perlvar.pod for a description of $^M.
80 BIG_SIZE (1<<16) # 64K
81
82 # Starting from this power of two, add an extra page to the
83 # size of the bucket. This enables optimized allocations of sizes
84 # close to powers of 2. Note that the value is indexed at 0.
85 FIRST_BIG_POW2 15 # 32K, 16K is used too often
86
87 # Estimate of minimal memory footprint. malloc uses this value to
88 # request the most reasonable largest blocks of memory from the system.
89 FIRST_SBRK (48*1024)
90
91 # Round up sbrk()s to multiples of this.
92 MIN_SBRK 2048
93
94 # Round up sbrk()s to multiples of this percent of footprint.
95 MIN_SBRK_FRAC 3
96
97 # Add this much memory to big powers of two to get the bucket size.
98 PERL_PAGESIZE 4096
99
100 # This many sbrk() discontinuities should be tolerated even
101 # from the start without deciding that sbrk() is usually
102 # discontinuous.
103 SBRK_ALLOW_FAILURES 3
104
105 # This many continuous sbrk()s compensate for one discontinuous one.
106 SBRK_FAILURE_PRICE 50
107
28ac10b1
IZ
108 # Some configurations may ask for 12-byte-or-so allocations which
109 # require 8-byte alignment (?!). In such situation one needs to
110 # define this to disable 12-byte bucket (will increase memory footprint)
111 STRICT_ALIGNMENT undef
112
87c6202a
IZ
113 This implementation assumes that calling PerlIO_printf() does not
114 result in any memory allocation calls (used during a panic).
115
116 */
117
741df71a
IZ
118/*
119 If used outside of Perl environment, it may be useful to redefine
120 the following macros (listed below with defaults):
121
122 # Type of address returned by allocation functions
123 Malloc_t void *
124
125 # Type of size argument for allocation functions
126 MEM_SIZE unsigned long
127
128 # Maximal value in LONG
129 LONG_MAX 0x7FFFFFFF
130
131 # Unsigned integer type big enough to keep a pointer
132 UV unsigned long
133
134 # Type of pointer with 1-byte granularity
135 caddr_t char *
136
137 # Type returned by free()
138 Free_t void
139
5bbd1ef5
IZ
140 # Very fatal condition reporting function (cannot call any )
141 fatalcroak(arg) write(2,arg,strlen(arg)) + exit(2)
142
741df71a
IZ
143 # Fatal error reporting function
144 croak(format, arg) warn(idem) + exit(1)
145
146 # Error reporting function
147 warn(format, arg) fprintf(stderr, idem)
148
149 # Locking/unlocking for MT operation
cea2e8a9
GS
150 MALLOC_LOCK MUTEX_LOCK_NOCONTEXT(&PL_malloc_mutex)
151 MALLOC_UNLOCK MUTEX_UNLOCK_NOCONTEXT(&PL_malloc_mutex)
741df71a
IZ
152
153 # Locking/unlocking mutex for MT operation
154 MUTEX_LOCK(l) void
155 MUTEX_UNLOCK(l) void
156 */
157
e8bc2b5c
GS
158#ifndef NO_FANCY_MALLOC
159# ifndef SMALL_BUCKET_VIA_TABLE
160# define SMALL_BUCKET_VIA_TABLE
161# endif
162# ifndef BUCKETS_ROOT2
163# define BUCKETS_ROOT2
164# endif
165# ifndef IGNORE_SMALL_BAD_FREE
166# define IGNORE_SMALL_BAD_FREE
167# endif
3562ef9b
IZ
168#endif
169
e8bc2b5c
GS
170#ifndef PLAIN_MALLOC /* Bulk enable features */
171# ifndef PACK_MALLOC
172# define PACK_MALLOC
173# endif
174# ifndef TWO_POT_OPTIMIZE
175# define TWO_POT_OPTIMIZE
176# endif
d720c441
IZ
177# if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK)
178# define PERL_EMERGENCY_SBRK
e8bc2b5c
GS
179# endif
180# if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
181# define DEBUGGING_MSTATS
182# endif
183#endif
184
185#define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
186#define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
187
61ae2fbf 188#if !(defined(I286) || defined(atarist) || defined(__MINT__))
e8bc2b5c
GS
189 /* take 2k unless the block is bigger than that */
190# define LOG_OF_MIN_ARENA 11
191#else
192 /* take 16k unless the block is bigger than that
193 (80286s like large segments!), probably good on the atari too */
194# define LOG_OF_MIN_ARENA 14
195#endif
196
8d063cd8 197#ifndef lint
1944739a
IZ
198# if defined(DEBUGGING) && !defined(NO_RCHECK)
199# define RCHECK
200# endif
e8bc2b5c
GS
201# if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
202# undef IGNORE_SMALL_BAD_FREE
203# endif
8d063cd8
LW
204/*
205 * malloc.c (Caltech) 2/21/82
206 * Chris Kingsley, kingsley@cit-20.
207 *
208 * This is a very fast storage allocator. It allocates blocks of a small
209 * number of different sizes, and keeps free lists of each size. Blocks that
210 * don't exactly fit are passed up to the next larger size. In this
211 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
cf5c4ad8 212 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
8d063cd8 213 * This is designed for use in a program that uses vast quantities of memory,
741df71a
IZ
214 * but bombs when it runs out.
215 *
4eb8286e 216 * Modifications Copyright Ilya Zakharevich 1996-99.
741df71a
IZ
217 *
218 * Still very quick, but much more thrifty. (Std config is 10% slower
219 * than it was, and takes 67% of old heap size for typical usage.)
220 *
221 * Allocations of small blocks are now table-driven to many different
222 * buckets. Sizes of really big buckets are increased to accomodata
223 * common size=power-of-2 blocks. Running-out-of-memory is made into
224 * an exception. Deeply configurable and thread-safe.
225 *
8d063cd8
LW
226 */
227
d720c441
IZ
228#ifdef PERL_CORE
229# include "EXTERN.h"
4ad56ec9 230# define PERL_IN_MALLOC_C
d720c441 231# include "perl.h"
cea2e8a9
GS
232# if defined(PERL_IMPLICIT_CONTEXT)
233# define croak Perl_croak_nocontext
234# define warn Perl_warn_nocontext
235# endif
d720c441
IZ
236#else
237# ifdef PERL_FOR_X2P
238# include "../EXTERN.h"
239# include "../perl.h"
240# else
241# include <stdlib.h>
242# include <stdio.h>
243# include <memory.h>
244# define _(arg) arg
245# ifndef Malloc_t
246# define Malloc_t void *
247# endif
248# ifndef MEM_SIZE
249# define MEM_SIZE unsigned long
250# endif
251# ifndef LONG_MAX
252# define LONG_MAX 0x7FFFFFFF
253# endif
254# ifndef UV
255# define UV unsigned long
256# endif
257# ifndef caddr_t
258# define caddr_t char *
259# endif
260# ifndef Free_t
261# define Free_t void
262# endif
263# define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t))
264# define PerlEnv_getenv getenv
265# define PerlIO_printf fprintf
266# define PerlIO_stderr() stderr
267# endif
e8bc2b5c 268# ifndef croak /* make depend */
741df71a 269# define croak(mess, arg) (warn((mess), (arg)), exit(1))
d720c441
IZ
270# endif
271# ifndef warn
741df71a 272# define warn(mess, arg) fprintf(stderr, (mess), (arg))
e8bc2b5c
GS
273# endif
274# ifdef DEBUG_m
275# undef DEBUG_m
276# endif
277# define DEBUG_m(a)
278# ifdef DEBUGGING
279# undef DEBUGGING
280# endif
cea2e8a9
GS
281# ifndef pTHX
282# define pTHX void
283# define pTHX_
284# define dTHX extern int Perl___notused
285# define WITH_THX(s) s
286# endif
c5be433b
GS
287# ifndef PERL_GET_INTERP
288# define PERL_GET_INTERP PL_curinterp
289# endif
4ad56ec9
IZ
290# ifndef Perl_malloc
291# define Perl_malloc malloc
292# endif
293# ifndef Perl_mfree
294# define Perl_mfree free
295# endif
296# ifndef Perl_realloc
297# define Perl_realloc realloc
298# endif
299# ifndef Perl_calloc
300# define Perl_calloc calloc
301# endif
302# ifndef Perl_strdup
303# define Perl_strdup strdup
304# endif
e8bc2b5c
GS
305#endif
306
307#ifndef MUTEX_LOCK
308# define MUTEX_LOCK(l)
309#endif
310
311#ifndef MUTEX_UNLOCK
312# define MUTEX_UNLOCK(l)
313#endif
314
741df71a 315#ifndef MALLOC_LOCK
cea2e8a9 316# define MALLOC_LOCK MUTEX_LOCK_NOCONTEXT(&PL_malloc_mutex)
741df71a
IZ
317#endif
318
319#ifndef MALLOC_UNLOCK
cea2e8a9 320# define MALLOC_UNLOCK MUTEX_UNLOCK_NOCONTEXT(&PL_malloc_mutex)
741df71a
IZ
321#endif
322
5bbd1ef5
IZ
323# ifndef fatalcroak /* make depend */
324# define fatalcroak(mess) (write(2, (mess), strlen(mess)), exit(2))
325# endif
326
760ac839 327#ifdef DEBUGGING
e8bc2b5c 328# undef DEBUG_m
0b250b9e
GS
329# define DEBUG_m(a) \
330 STMT_START { \
331 if (PERL_GET_INTERP) { dTHX; if (PL_debug & 128) { a; } } \
332 } STMT_END
760ac839
LW
333#endif
334
e9397286
GS
335/*
336 * Layout of memory:
337 * ~~~~~~~~~~~~~~~~
338 * The memory is broken into "blocks" which occupy multiples of 2K (and
339 * generally speaking, have size "close" to a power of 2). The addresses
340 * of such *unused* blocks are kept in nextf[i] with big enough i. (nextf
341 * is an array of linked lists.) (Addresses of used blocks are not known.)
342 *
4ad56ec9 343 * Moreover, since the algorithm may try to "bite" smaller blocks out
e9397286
GS
344 * of unused bigger ones, there are also regions of "irregular" size,
345 * managed separately, by a linked list chunk_chain.
346 *
347 * The third type of storage is the sbrk()ed-but-not-yet-used space, its
348 * end and size are kept in last_sbrk_top and sbrked_remains.
349 *
350 * Growing blocks "in place":
351 * ~~~~~~~~~~~~~~~~~~~~~~~~~
352 * The address of the block with the greatest address is kept in last_op
353 * (if not known, last_op is 0). If it is known that the memory above
354 * last_op is not continuous, or contains a chunk from chunk_chain,
355 * last_op is set to 0.
356 *
357 * The chunk with address last_op may be grown by expanding into
358 * sbrk()ed-but-not-yet-used space, or trying to sbrk() more continuous
359 * memory.
360 *
361 * Management of last_op:
362 * ~~~~~~~~~~~~~~~~~~~~~
363 *
364 * free() never changes the boundaries of blocks, so is not relevant.
365 *
366 * The only way realloc() may change the boundaries of blocks is if it
367 * grows a block "in place". However, in the case of success such a
368 * chunk is automatically last_op, and it remains last_op. In the case
369 * of failure getpages_adjacent() clears last_op.
370 *
371 * malloc() may change blocks by calling morecore() only.
372 *
373 * morecore() may create new blocks by:
374 * a) biting pieces from chunk_chain (cannot create one above last_op);
375 * b) biting a piece from an unused block (if block was last_op, this
376 * may create a chunk from chain above last_op, thus last_op is
377 * invalidated in such a case).
378 * c) biting of sbrk()ed-but-not-yet-used space. This creates
379 * a block which is last_op.
380 * d) Allocating new pages by calling getpages();
381 *
382 * getpages() creates a new block. It marks last_op at the bottom of
383 * the chunk of memory it returns.
384 *
385 * Active pages footprint:
386 * ~~~~~~~~~~~~~~~~~~~~~~
387 * Note that we do not need to traverse the lists in nextf[i], just take
388 * the first element of this list. However, we *need* to traverse the
389 * list in chunk_chain, but most the time it should be a very short one,
390 * so we do not step on a lot of pages we are not going to use.
391 *
392 * Flaws:
393 * ~~~~~
394 * get_from_bigger_buckets(): forget to increment price => Quite
395 * aggressive.
396 */
397
135863df
AB
398/* I don't much care whether these are defined in sys/types.h--LAW */
399
400#define u_char unsigned char
401#define u_int unsigned int
56431972
RB
402/*
403 * I removed the definition of u_bigint which appeared to be u_bigint = UV
404 * u_bigint was only used in TWOK_MASKED and TWOK_SHIFT
405 * where I have used PTR2UV. RMB
406 */
135863df 407#define u_short unsigned short
8d063cd8 408
cf5c4ad8 409/* 286 and atarist like big chunks, which gives too much overhead. */
61ae2fbf 410#if (defined(RCHECK) || defined(I286) || defined(atarist) || defined(__MINT__)) && defined(PACK_MALLOC)
e8bc2b5c 411# undef PACK_MALLOC
cf5c4ad8 412#endif
413
8d063cd8 414/*
cf5c4ad8 415 * The description below is applicable if PACK_MALLOC is not defined.
416 *
8d063cd8
LW
417 * The overhead on a block is at least 4 bytes. When free, this space
418 * contains a pointer to the next free block, and the bottom two bits must
419 * be zero. When in use, the first byte is set to MAGIC, and the second
420 * byte is the size index. The remaining bytes are for alignment.
421 * If range checking is enabled and the size of the block fits
422 * in two bytes, then the top two bytes hold the size of the requested block
423 * plus the range checking words, and the header word MINUS ONE.
424 */
425union overhead {
426 union overhead *ov_next; /* when free */
85e6fe83 427#if MEM_ALIGNBYTES > 4
c623bd54 428 double strut; /* alignment problems */
a687059c 429#endif
8d063cd8
LW
430 struct {
431 u_char ovu_magic; /* magic number */
432 u_char ovu_index; /* bucket # */
433#ifdef RCHECK
434 u_short ovu_size; /* actual block size */
435 u_int ovu_rmagic; /* range magic number */
436#endif
437 } ovu;
438#define ov_magic ovu.ovu_magic
439#define ov_index ovu.ovu_index
440#define ov_size ovu.ovu_size
441#define ov_rmagic ovu.ovu_rmagic
442};
443
444#define MAGIC 0xff /* magic # on accounting info */
445#define RMAGIC 0x55555555 /* magic # on range info */
e8bc2b5c
GS
446#define RMAGIC_C 0x55 /* magic # on range info */
447
8d063cd8 448#ifdef RCHECK
c2a5c2d2
IZ
449# define RSLOP sizeof (u_int)
450# ifdef TWO_POT_OPTIMIZE
e8bc2b5c 451# define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
c2a5c2d2 452# else
e8bc2b5c 453# define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
c2a5c2d2 454# endif
8d063cd8 455#else
c2a5c2d2 456# define RSLOP 0
8d063cd8
LW
457#endif
458
e8bc2b5c
GS
459#if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
460# undef BUCKETS_ROOT2
461#endif
462
463#ifdef BUCKETS_ROOT2
464# define BUCKET_TABLE_SHIFT 2
465# define BUCKET_POW2_SHIFT 1
466# define BUCKETS_PER_POW2 2
467#else
468# define BUCKET_TABLE_SHIFT MIN_BUC_POW2
469# define BUCKET_POW2_SHIFT 0
470# define BUCKETS_PER_POW2 1
471#endif
472
274c7500
IZ
473#if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT))
474/* Figure out the alignment of void*. */
475struct aligner {
476 char c;
477 void *p;
478};
479# define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p)))
480#else
481# define ALIGN_SMALL MEM_ALIGNBYTES
482#endif
483
484#define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no))
485
e8bc2b5c
GS
486#ifdef BUCKETS_ROOT2
487# define MAX_BUCKET_BY_TABLE 13
488static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
489 {
490 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
491 };
492# define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
493# define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
494 ? buck_size[i] \
495 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
496 - MEM_OVERHEAD(i) \
497 + POW2_OPTIMIZE_SURPLUS(i)))
498#else
499# define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
500# define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
501#endif
502
503
cf5c4ad8 504#ifdef PACK_MALLOC
4ad56ec9
IZ
505/* In this case there are several possible layout of arenas depending
506 * on the size. Arenas are of sizes multiple to 2K, 2K-aligned, and
507 * have a size close to a power of 2.
508 *
509 * Arenas of the size >= 4K keep one chunk only. Arenas of size 2K
510 * may keep one chunk or multiple chunks. Here are the possible
511 * layouts of arenas:
512 *
513 * # One chunk only, chunksize 2^k + SOMETHING - ALIGN, k >= 11
514 *
515 * INDEX MAGIC1 UNUSED CHUNK1
516 *
517 * # Multichunk with sanity checking and chunksize 2^k-ALIGN, k>7
518 *
519 * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 CHUNK2 CHUNK3 ...
520 *
521 * # Multichunk with sanity checking and size 2^k-ALIGN, k=7
522 *
523 * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 UNUSED CHUNK2 CHUNK3 ...
524 *
525 * # Multichunk with sanity checking and size up to 80
526 *
527 * INDEX UNUSED MAGIC1 UNUSED MAGIC2 UNUSED ... CHUNK1 CHUNK2 CHUNK3 ...
528 *
529 * # No sanity check (usually up to 48=byte-long buckets)
530 * INDEX UNUSED CHUNK1 CHUNK2 ...
531 *
532 * Above INDEX and MAGIC are one-byte-long. Sizes of UNUSED are
533 * appropriate to keep algorithms simple and memory aligned. INDEX
534 * encodes the size of the chunk, while MAGICn encodes state (used,
535 * free or non-managed-by-us-so-it-indicates-a-bug) of CHUNKn. MAGIC
536 * is used for sanity checking purposes only. SOMETHING is 0 or 4K
537 * (to make size of big CHUNK accomodate allocations for powers of two
538 * better).
539 *
540 * [There is no need to alignment between chunks, since C rules ensure
541 * that structs which need 2^k alignment have sizeof which is
542 * divisible by 2^k. Thus as far as the last chunk is aligned at the
543 * end of the arena, and 2K-alignment does not contradict things,
544 * everything is going to be OK for sizes of chunks 2^n and 2^n +
545 * 2^k. Say, 80-bit buckets will be 16-bit aligned, and as far as we
546 * put allocations for requests in 65..80 range, all is fine.
547 *
548 * Note, however, that standard malloc() puts more strict
549 * requirements than the above C rules. Moreover, our algorithms of
550 * realloc() may break this idyll, but we suppose that realloc() does
551 * need not change alignment.]
552 *
553 * Is very important to make calculation of the offset of MAGICm as
554 * quick as possible, since it is done on each malloc()/free(). In
555 * fact it is so quick that it has quite little effect on the speed of
556 * doing malloc()/free(). [By default] We forego such calculations
557 * for small chunks, but only to save extra 3% of memory, not because
558 * of speed considerations.
559 *
560 * Here is the algorithm [which is the same for all the allocations
561 * schemes above], see OV_MAGIC(block,bucket). Let OFFSETm be the
562 * offset of the CHUNKm from the start of ARENA. Then offset of
563 * MAGICm is (OFFSET1 >> SHIFT) + ADDOFFSET. Here SHIFT and ADDOFFSET
564 * are numbers which depend on the size of the chunks only.
565 *
566 * Let as check some sanity conditions. Numbers OFFSETm>>SHIFT are
567 * different for all the chunks in the arena if 2^SHIFT is not greater
568 * than size of the chunks in the arena. MAGIC1 will not overwrite
569 * INDEX provided ADDOFFSET is >0 if OFFSET1 < 2^SHIFT. MAGIClast
570 * will not overwrite CHUNK1 if OFFSET1 > (OFFSETlast >> SHIFT) +
571 * ADDOFFSET.
572 *
573 * Make SHIFT the maximal possible (there is no point in making it
574 * smaller). Since OFFSETlast is 2K - CHUNKSIZE, above restrictions
575 * give restrictions on OFFSET1 and on ADDOFFSET.
576 *
577 * In particular, for chunks of size 2^k with k>=6 we can put
578 * ADDOFFSET to be from 0 to 2^k - 2^(11-k), and have
579 * OFFSET1==chunksize. For chunks of size 80 OFFSET1 of 2K%80=48 is
580 * large enough to have ADDOFFSET between 1 and 16 (similarly for 96,
581 * when ADDOFFSET should be 1). In particular, keeping MAGICs for
582 * these sizes gives no additional size penalty.
583 *
584 * However, for chunks of size 2^k with k<=5 this gives OFFSET1 >=
585 * ADDOFSET + 2^(11-k). Keeping ADDOFFSET 0 allows for 2^(11-k)-2^(11-2k)
586 * chunks per arena. This is smaller than 2^(11-k) - 1 which are
587 * needed if no MAGIC is kept. [In fact, having a negative ADDOFFSET
588 * would allow for slightly more buckets per arena for k=2,3.]
589 *
590 * Similarly, for chunks of size 3/2*2^k with k<=5 MAGICs would span
591 * the area up to 2^(11-k)+ADDOFFSET. For k=4 this give optimal
592 * ADDOFFSET as -7..0. For k=3 ADDOFFSET can go up to 4 (with tiny
593 * savings for negative ADDOFFSET). For k=5 ADDOFFSET can go -1..16
594 * (with no savings for negative values).
cf5c4ad8 595 *
4ad56ec9
IZ
596 * In particular, keeping ADDOFFSET 0 for sizes of chunks up to 2^6
597 * leads to tiny pessimizations in case of sizes 4, 8, 12, 24, and
598 * leads to no contradictions except for size=80 (or 96.)
cf5c4ad8 599 *
4ad56ec9
IZ
600 * However, it also makes sense to keep no magic for sizes 48 or less.
601 * This is what we do. In this case one needs ADDOFFSET>=1 also for
602 * chunksizes 12, 24, and 48, unless one gets one less chunk per
603 * arena.
604 *
605 * The algo of OV_MAGIC(block,bucket) keeps ADDOFFSET 0 until
606 * chunksize of 64, then makes it 1.
cf5c4ad8 607 *
4ad56ec9
IZ
608 * This allows for an additional optimization: the above scheme leads
609 * to giant overheads for sizes 128 or more (one whole chunk needs to
610 * be sacrifised to keep INDEX). Instead we use chunks not of size
611 * 2^k, but of size 2^k-ALIGN. If we pack these chunks at the end of
612 * the arena, then the beginnings are still in different 2^k-long
613 * sections of the arena if k>=7 for ALIGN==4, and k>=8 if ALIGN=8.
614 * Thus for k>7 the above algo of calculating the offset of the magic
615 * will still give different answers for different chunks. And to
616 * avoid the overrun of MAGIC1 into INDEX, one needs ADDOFFSET of >=1.
617 * In the case k=7 we just move the first chunk an extra ALIGN
618 * backward inside the ARENA (this is done once per arena lifetime,
619 * thus is not a big overhead). */
e8bc2b5c
GS
620# define MAX_PACKED_POW2 6
621# define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
622# define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
623# define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
56431972
RB
624# define TWOK_MASKED(x) (PTR2UV(x) & ~TWOK_MASK)
625# define TWOK_SHIFT(x) (PTR2UV(x) & TWOK_MASK)
626# define OV_INDEXp(block) (INT2PTR(u_char*,TWOK_MASKED(block)))
cf5c4ad8 627# define OV_INDEX(block) (*OV_INDEXp(block))
628# define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
e8bc2b5c
GS
629 (TWOK_SHIFT(block)>> \
630 (bucket>>BUCKET_POW2_SHIFT)) + \
631 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
632 /* A bucket can have a shift smaller than it size, we need to
633 shift its magic number so it will not overwrite index: */
634# ifdef BUCKETS_ROOT2
635# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
636# else
637# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
638# endif
cf5c4ad8 639# define CHUNK_SHIFT 0
640
e8bc2b5c
GS
641/* Number of active buckets of given ordinal. */
642#ifdef IGNORE_SMALL_BAD_FREE
643#define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
644# define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
645 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
646 : n_blks[bucket] )
647#else
648# define N_BLKS(bucket) n_blks[bucket]
649#endif
650
651static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
652 {
653# if BUCKETS_PER_POW2==1
654 0, 0,
655 (MIN_BUC_POW2==2 ? 384 : 0),
656 224, 120, 62, 31, 16, 8, 4, 2
657# else
658 0, 0, 0, 0,
659 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
660 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
661# endif
662 };
663
664/* Shift of the first bucket with the given ordinal inside 2K chunk. */
665#ifdef IGNORE_SMALL_BAD_FREE
666# define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
667 ? ((1<<LOG_OF_MIN_ARENA) \
668 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
669 : blk_shift[bucket])
670#else
671# define BLK_SHIFT(bucket) blk_shift[bucket]
672#endif
673
674static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
675 {
676# if BUCKETS_PER_POW2==1
677 0, 0,
678 (MIN_BUC_POW2==2 ? 512 : 0),
679 256, 128, 64, 64, /* 8 to 64 */
680 16*sizeof(union overhead),
681 8*sizeof(union overhead),
682 4*sizeof(union overhead),
683 2*sizeof(union overhead),
684# else
685 0, 0, 0, 0,
686 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
687 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
688 16*sizeof(union overhead), 16*sizeof(union overhead),
689 8*sizeof(union overhead), 8*sizeof(union overhead),
690 4*sizeof(union overhead), 4*sizeof(union overhead),
691 2*sizeof(union overhead), 2*sizeof(union overhead),
692# endif
693 };
cf5c4ad8 694
5bbd1ef5
IZ
695# define NEEDED_ALIGNMENT 0x800 /* 2k boundaries */
696# define WANTED_ALIGNMENT 0x800 /* 2k boundaries */
697
cf5c4ad8 698#else /* !PACK_MALLOC */
699
700# define OV_MAGIC(block,bucket) (block)->ov_magic
701# define OV_INDEX(block) (block)->ov_index
702# define CHUNK_SHIFT 1
e8bc2b5c 703# define MAX_PACKED -1
5bbd1ef5
IZ
704# define NEEDED_ALIGNMENT MEM_ALIGNBYTES
705# define WANTED_ALIGNMENT 0x400 /* 1k boundaries */
706
cf5c4ad8 707#endif /* !PACK_MALLOC */
708
e8bc2b5c
GS
709#define M_OVERHEAD (sizeof(union overhead) + RSLOP)
710
711#ifdef PACK_MALLOC
712# define MEM_OVERHEAD(bucket) \
713 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
714# ifdef SMALL_BUCKET_VIA_TABLE
715# define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
716# define START_SHIFT MAX_PACKED_POW2
717# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
718# define SIZE_TABLE_MAX 80
719# else
720# define SIZE_TABLE_MAX 64
721# endif
722static char bucket_of[] =
723 {
724# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
725 /* 0 to 15 in 4-byte increments. */
726 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
727 6, /* 8 */
274c7500 728 IF_ALIGN_8(8,7), 8, /* 16/12, 16 */
e8bc2b5c
GS
729 9, 9, 10, 10, /* 24, 32 */
730 11, 11, 11, 11, /* 48 */
731 12, 12, 12, 12, /* 64 */
732 13, 13, 13, 13, /* 80 */
733 13, 13, 13, 13 /* 80 */
734# else /* !BUCKETS_ROOT2 */
735 /* 0 to 15 in 4-byte increments. */
736 (sizeof(void*) > 4 ? 3 : 2),
737 3,
738 4, 4,
739 5, 5, 5, 5,
740 6, 6, 6, 6,
741 6, 6, 6, 6
742# endif /* !BUCKETS_ROOT2 */
743 };
744# else /* !SMALL_BUCKET_VIA_TABLE */
745# define START_SHIFTS_BUCKET MIN_BUCKET
746# define START_SHIFT (MIN_BUC_POW2 - 1)
747# endif /* !SMALL_BUCKET_VIA_TABLE */
748#else /* !PACK_MALLOC */
749# define MEM_OVERHEAD(bucket) M_OVERHEAD
750# ifdef SMALL_BUCKET_VIA_TABLE
751# undef SMALL_BUCKET_VIA_TABLE
752# endif
753# define START_SHIFTS_BUCKET MIN_BUCKET
754# define START_SHIFT (MIN_BUC_POW2 - 1)
755#endif /* !PACK_MALLOC */
cf5c4ad8 756
8d063cd8 757/*
55497cff 758 * Big allocations are often of the size 2^n bytes. To make them a
759 * little bit better, make blocks of size 2^n+pagesize for big n.
760 */
761
762#ifdef TWO_POT_OPTIMIZE
763
5f05dabc 764# ifndef PERL_PAGESIZE
765# define PERL_PAGESIZE 4096
766# endif
e8bc2b5c
GS
767# ifndef FIRST_BIG_POW2
768# define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
5f05dabc 769# endif
e8bc2b5c 770# define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
55497cff 771/* If this value or more, check against bigger blocks. */
772# define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
773/* If less than this value, goes into 2^n-overhead-block. */
774# define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
775
e8bc2b5c
GS
776# define POW2_OPTIMIZE_ADJUST(nbytes) \
777 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
778# define POW2_OPTIMIZE_SURPLUS(bucket) \
779 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
780
781#else /* !TWO_POT_OPTIMIZE */
782# define POW2_OPTIMIZE_ADJUST(nbytes)
783# define POW2_OPTIMIZE_SURPLUS(bucket) 0
784#endif /* !TWO_POT_OPTIMIZE */
785
786#if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
787# define BARK_64K_LIMIT(what,nbytes,size) \
788 if (nbytes > 0xffff) { \
789 PerlIO_printf(PerlIO_stderr(), \
790 "%s too large: %lx\n", what, size); \
791 my_exit(1); \
792 }
793#else /* !HAS_64K_LIMIT || !PERL_CORE */
794# define BARK_64K_LIMIT(what,nbytes,size)
795#endif /* !HAS_64K_LIMIT || !PERL_CORE */
55497cff 796
e8bc2b5c
GS
797#ifndef MIN_SBRK
798# define MIN_SBRK 2048
799#endif
800
801#ifndef FIRST_SBRK
d720c441 802# define FIRST_SBRK (48*1024)
e8bc2b5c
GS
803#endif
804
805/* Minimal sbrk in percents of what is already alloced. */
806#ifndef MIN_SBRK_FRAC
807# define MIN_SBRK_FRAC 3
808#endif
809
810#ifndef SBRK_ALLOW_FAILURES
811# define SBRK_ALLOW_FAILURES 3
812#endif
55497cff 813
e8bc2b5c
GS
814#ifndef SBRK_FAILURE_PRICE
815# define SBRK_FAILURE_PRICE 50
55497cff 816#endif
817
e8bc2b5c
GS
818#if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
819
820# ifndef BIG_SIZE
821# define BIG_SIZE (1<<16) /* 64K */
822# endif
823
3541dd58 824#ifdef I_MACH_CTHREADS
772fe5b3
HM
825# undef MUTEX_LOCK
826# define MUTEX_LOCK(m) STMT_START { if (*m) mutex_lock(*m); } STMT_END
827# undef MUTEX_UNLOCK
828# define MUTEX_UNLOCK(m) STMT_START { if (*m) mutex_unlock(*m); } STMT_END
3541dd58
HM
829#endif
830
55497cff 831static char *emergency_buffer;
832static MEM_SIZE emergency_buffer_size;
833
cea2e8a9
GS
834static int findbucket (union overhead *freep, int srchlen);
835static void morecore (register int bucket);
836# if defined(DEBUGGING)
837static void botch (char *diag, char *s);
838# endif
839static void add_to_chain (void *p, MEM_SIZE size, MEM_SIZE chip);
840static Malloc_t emergency_sbrk (MEM_SIZE size);
841static void* get_from_chain (MEM_SIZE size);
842static void* get_from_bigger_buckets(int bucket, MEM_SIZE size);
843static union overhead *getpages (int needed, int *nblksp, int bucket);
844static int getpages_adjacent(int require);
845
846static Malloc_t
847emergency_sbrk(MEM_SIZE size)
55497cff 848{
28ac10b1
IZ
849 MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA;
850
55497cff 851 if (size >= BIG_SIZE) {
852 /* Give the possibility to recover: */
741df71a 853 MALLOC_UNLOCK;
1b979e0a 854 croak("Out of memory during \"large\" request for %i bytes", size);
55497cff 855 }
856
28ac10b1
IZ
857 if (emergency_buffer_size >= rsize) {
858 char *old = emergency_buffer;
859
860 emergency_buffer_size -= rsize;
861 emergency_buffer += rsize;
862 return old;
863 } else {
cea2e8a9 864 dTHX;
55497cff 865 /* First offense, give a possibility to recover by dieing. */
866 /* No malloc involved here: */
4a33f861 867 GV **gvp = (GV**)hv_fetch(PL_defstash, "^M", 2, 0);
55497cff 868 SV *sv;
869 char *pv;
28ac10b1 870 int have = 0;
2d8e6c8d 871 STRLEN n_a;
55497cff 872
28ac10b1
IZ
873 if (emergency_buffer_size) {
874 add_to_chain(emergency_buffer, emergency_buffer_size, 0);
875 emergency_buffer_size = 0;
876 emergency_buffer = Nullch;
877 have = 1;
878 }
4a33f861 879 if (!gvp) gvp = (GV**)hv_fetch(PL_defstash, "\015", 1, 0);
55497cff 880 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
28ac10b1
IZ
881 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD)) {
882 if (have)
883 goto do_croak;
55497cff 884 return (char *)-1; /* Now die die die... */
28ac10b1 885 }
55497cff 886 /* Got it, now detach SvPV: */
2d8e6c8d 887 pv = SvPV(sv, n_a);
55497cff 888 /* Check alignment: */
56431972 889 if ((PTR2UV(pv) - sizeof(union overhead)) & (NEEDED_ALIGNMENT - 1)) {
55497cff 890 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
bbce6d69 891 return (char *)-1; /* die die die */
55497cff 892 }
893
28ac10b1
IZ
894 emergency_buffer = pv - sizeof(union overhead);
895 emergency_buffer_size = malloced_size(pv) + M_OVERHEAD;
55497cff 896 SvPOK_off(sv);
28ac10b1
IZ
897 SvPVX(sv) = Nullch;
898 SvCUR(sv) = SvLEN(sv) = 0;
55497cff 899 }
28ac10b1 900 do_croak:
741df71a 901 MALLOC_UNLOCK;
28ac10b1 902 croak("Out of memory during request for %i bytes", size);
ce70748c
GS
903 /* NOTREACHED */
904 return Nullch;
55497cff 905}
906
e8bc2b5c 907#else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 908# define emergency_sbrk(size) -1
e8bc2b5c 909#endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 910
911/*
e8bc2b5c 912 * nextf[i] is the pointer to the next free block of size 2^i. The
8d063cd8
LW
913 * smallest allocatable block is 8 bytes. The overhead information
914 * precedes the data area returned to the user.
915 */
e8bc2b5c 916#define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
8d063cd8 917static union overhead *nextf[NBUCKETS];
cf5c4ad8 918
919#ifdef USE_PERL_SBRK
920#define sbrk(a) Perl_sbrk(a)
20ce7b12 921Malloc_t Perl_sbrk (int size);
8ac85365
NIS
922#else
923#ifdef DONT_DECLARE_STD
924#ifdef I_UNISTD
925#include <unistd.h>
926#endif
cf5c4ad8 927#else
52082926 928extern Malloc_t sbrk(int);
8ac85365 929#endif
cf5c4ad8 930#endif
8d063cd8 931
c07a80fd 932#ifdef DEBUGGING_MSTATS
8d063cd8
LW
933/*
934 * nmalloc[i] is the difference between the number of mallocs and frees
935 * for a given block size.
936 */
937static u_int nmalloc[NBUCKETS];
5f05dabc 938static u_int sbrk_slack;
939static u_int start_slack;
8d063cd8
LW
940#endif
941
e8bc2b5c
GS
942static u_int goodsbrk;
943
760ac839 944#ifdef DEBUGGING
3541dd58
HM
945#undef ASSERT
946#define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else
cea2e8a9
GS
947static void
948botch(char *diag, char *s)
8d063cd8 949{
32e30700 950 dTHXo;
d720c441 951 PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s);
3028581b 952 PerlProc_abort();
8d063cd8
LW
953}
954#else
3541dd58 955#define ASSERT(p, diag)
8d063cd8
LW
956#endif
957
2304df62 958Malloc_t
86058a2d 959Perl_malloc(register size_t nbytes)
8d063cd8
LW
960{
961 register union overhead *p;
e8bc2b5c 962 register int bucket;
ee0007ab 963 register MEM_SIZE shiftr;
8d063cd8 964
c2a5c2d2 965#if defined(DEBUGGING) || defined(RCHECK)
ee0007ab 966 MEM_SIZE size = nbytes;
45d8adaa
LW
967#endif
968
e8bc2b5c 969 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
45d8adaa
LW
970#ifdef DEBUGGING
971 if ((long)nbytes < 0)
cea2e8a9 972 croak("%s", "panic: malloc");
45d8adaa 973#endif
45d8adaa 974
8d063cd8
LW
975 /*
976 * Convert amount of memory requested into
977 * closest block size stored in hash buckets
978 * which satisfies request. Account for
979 * space used per block for accounting.
980 */
cf5c4ad8 981#ifdef PACK_MALLOC
e8bc2b5c
GS
982# ifdef SMALL_BUCKET_VIA_TABLE
983 if (nbytes == 0)
984 bucket = MIN_BUCKET;
985 else if (nbytes <= SIZE_TABLE_MAX) {
986 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
987 } else
988# else
043bf814
RB
989 if (nbytes == 0)
990 nbytes = 1;
e8bc2b5c
GS
991 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
992 else
993# endif
55497cff 994#endif
e8bc2b5c
GS
995 {
996 POW2_OPTIMIZE_ADJUST(nbytes);
997 nbytes += M_OVERHEAD;
998 nbytes = (nbytes + 3) &~ 3;
999 do_shifts:
1000 shiftr = (nbytes - 1) >> START_SHIFT;
1001 bucket = START_SHIFTS_BUCKET;
1002 /* apart from this loop, this is O(1) */
1003 while (shiftr >>= 1)
1004 bucket += BUCKETS_PER_POW2;
cf5c4ad8 1005 }
4ad56ec9 1006 MALLOC_LOCK;
8d063cd8
LW
1007 /*
1008 * If nothing in hash bucket right now,
1009 * request more memory from the system.
1010 */
1011 if (nextf[bucket] == NULL)
1012 morecore(bucket);
e8bc2b5c 1013 if ((p = nextf[bucket]) == NULL) {
741df71a 1014 MALLOC_UNLOCK;
55497cff 1015#ifdef PERL_CORE
0b250b9e
GS
1016 {
1017 dTHX;
1018 if (!PL_nomemok) {
1019 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
1020 my_exit(1);
1021 }
ee0007ab 1022 }
45d8adaa 1023#endif
4ad56ec9 1024 return (NULL);
45d8adaa
LW
1025 }
1026
e8bc2b5c
GS
1027 DEBUG_m(PerlIO_printf(Perl_debug_log,
1028 "0x%lx: (%05lu) malloc %ld bytes\n",
4a33f861 1029 (unsigned long)(p+1), (unsigned long)(PL_an++),
e8bc2b5c 1030 (long)size));
45d8adaa 1031
8d063cd8 1032 /* remove from linked list */
802004fa 1033#if defined(RCHECK)
32e30700
GS
1034 if ((PTR2UV(p)) & (MEM_ALIGNBYTES - 1)) {
1035 dTHXo;
760ac839 1036 PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n",
a0d0e21e 1037 (unsigned long)*((int*)p),(unsigned long)p);
32e30700 1038 }
bf38876a
LW
1039#endif
1040 nextf[bucket] = p->ov_next;
4ad56ec9
IZ
1041
1042 MALLOC_UNLOCK;
1043
e8bc2b5c
GS
1044#ifdef IGNORE_SMALL_BAD_FREE
1045 if (bucket >= FIRST_BUCKET_WITH_CHECK)
1046#endif
1047 OV_MAGIC(p, bucket) = MAGIC;
cf5c4ad8 1048#ifndef PACK_MALLOC
1049 OV_INDEX(p) = bucket;
1050#endif
8d063cd8
LW
1051#ifdef RCHECK
1052 /*
1053 * Record allocated size of block and
1054 * bound space with magic numbers.
1055 */
8d063cd8 1056 p->ov_rmagic = RMAGIC;
e8bc2b5c
GS
1057 if (bucket <= MAX_SHORT_BUCKET) {
1058 int i;
1059
1060 nbytes = size + M_OVERHEAD;
1061 p->ov_size = nbytes - 1;
1062 if ((i = nbytes & 3)) {
1063 i = 4 - i;
1064 while (i--)
1065 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
1066 }
1067 nbytes = (nbytes + 3) &~ 3;
1068 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
1069 }
8d063cd8 1070#endif
cf5c4ad8 1071 return ((Malloc_t)(p + CHUNK_SHIFT));
8d063cd8
LW
1072}
1073
e8bc2b5c
GS
1074static char *last_sbrk_top;
1075static char *last_op; /* This arena can be easily extended. */
1076static int sbrked_remains;
1077static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
1078
1079#ifdef DEBUGGING_MSTATS
1080static int sbrks;
1081#endif
1082
1083struct chunk_chain_s {
1084 struct chunk_chain_s *next;
1085 MEM_SIZE size;
1086};
1087static struct chunk_chain_s *chunk_chain;
1088static int n_chunks;
1089static char max_bucket;
1090
1091/* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
cea2e8a9
GS
1092static void *
1093get_from_chain(MEM_SIZE size)
e8bc2b5c
GS
1094{
1095 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
1096 struct chunk_chain_s **oldgoodp = NULL;
1097 long min_remain = LONG_MAX;
1098
1099 while (elt) {
1100 if (elt->size >= size) {
1101 long remains = elt->size - size;
1102 if (remains >= 0 && remains < min_remain) {
1103 oldgoodp = oldp;
1104 min_remain = remains;
1105 }
1106 if (remains == 0) {
1107 break;
1108 }
1109 }
1110 oldp = &( elt->next );
1111 elt = elt->next;
1112 }
1113 if (!oldgoodp) return NULL;
1114 if (min_remain) {
1115 void *ret = *oldgoodp;
1116 struct chunk_chain_s *next = (*oldgoodp)->next;
1117
1118 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
1119 (*oldgoodp)->size = min_remain;
1120 (*oldgoodp)->next = next;
1121 return ret;
1122 } else {
1123 void *ret = *oldgoodp;
1124 *oldgoodp = (*oldgoodp)->next;
1125 n_chunks--;
1126 return ret;
1127 }
1128}
1129
cea2e8a9
GS
1130static void
1131add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
e8bc2b5c
GS
1132{
1133 struct chunk_chain_s *next = chunk_chain;
1134 char *cp = (char*)p;
1135
1136 cp += chip;
1137 chunk_chain = (struct chunk_chain_s *)cp;
1138 chunk_chain->size = size - chip;
1139 chunk_chain->next = next;
1140 n_chunks++;
1141}
1142
cea2e8a9
GS
1143static void *
1144get_from_bigger_buckets(int bucket, MEM_SIZE size)
e8bc2b5c
GS
1145{
1146 int price = 1;
1147 static int bucketprice[NBUCKETS];
1148 while (bucket <= max_bucket) {
1149 /* We postpone stealing from bigger buckets until we want it
1150 often enough. */
1151 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
1152 /* Steal it! */
1153 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
1154 bucketprice[bucket] = 0;
1155 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
1156 last_op = NULL; /* Disable optimization */
1157 }
1158 nextf[bucket] = nextf[bucket]->ov_next;
1159#ifdef DEBUGGING_MSTATS
1160 nmalloc[bucket]--;
1161 start_slack -= M_OVERHEAD;
1162#endif
1163 add_to_chain(ret, (BUCKET_SIZE(bucket) +
1164 POW2_OPTIMIZE_SURPLUS(bucket)),
1165 size);
1166 return ret;
1167 }
1168 bucket++;
1169 }
1170 return NULL;
1171}
1172
cea2e8a9
GS
1173static union overhead *
1174getpages(int needed, int *nblksp, int bucket)
fa423c5b
IZ
1175{
1176 /* Need to do (possibly expensive) system call. Try to
1177 optimize it for rare calling. */
1178 MEM_SIZE require = needed - sbrked_remains;
1179 char *cp;
1180 union overhead *ovp;
1181 int slack = 0;
1182
1183 if (sbrk_good > 0) {
1184 if (!last_sbrk_top && require < FIRST_SBRK)
1185 require = FIRST_SBRK;
1186 else if (require < MIN_SBRK) require = MIN_SBRK;
1187
1188 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
1189 require = goodsbrk * MIN_SBRK_FRAC / 100;
1190 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
1191 } else {
1192 require = needed;
1193 last_sbrk_top = 0;
1194 sbrked_remains = 0;
1195 }
1196
1197 DEBUG_m(PerlIO_printf(Perl_debug_log,
1198 "sbrk(%ld) for %ld-byte-long arena\n",
1199 (long)require, (long) needed));
1200 cp = (char *)sbrk(require);
1201#ifdef DEBUGGING_MSTATS
1202 sbrks++;
1203#endif
1204 if (cp == last_sbrk_top) {
1205 /* Common case, anything is fine. */
1206 sbrk_good++;
1207 ovp = (union overhead *) (cp - sbrked_remains);
e9397286 1208 last_op = cp - sbrked_remains;
fa423c5b
IZ
1209 sbrked_remains = require - (needed - sbrked_remains);
1210 } else if (cp == (char *)-1) { /* no more room! */
1211 ovp = (union overhead *)emergency_sbrk(needed);
1212 if (ovp == (union overhead *)-1)
1213 return 0;
e9397286
GS
1214 if (((char*)ovp) > last_op) { /* Cannot happen with current emergency_sbrk() */
1215 last_op = 0;
1216 }
fa423c5b
IZ
1217 return ovp;
1218 } else { /* Non-continuous or first sbrk(). */
1219 long add = sbrked_remains;
1220 char *newcp;
1221
1222 if (sbrked_remains) { /* Put rest into chain, we
1223 cannot use it right now. */
1224 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1225 sbrked_remains, 0);
1226 }
1227
1228 /* Second, check alignment. */
1229 slack = 0;
1230
61ae2fbf 1231#if !defined(atarist) && !defined(__MINT__) /* on the atari we dont have to worry about this */
fa423c5b 1232# ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
5bbd1ef5
IZ
1233 /* WANTED_ALIGNMENT may be more than NEEDED_ALIGNMENT, but this may
1234 improve performance of memory access. */
56431972
RB
1235 if (PTR2UV(cp) & (WANTED_ALIGNMENT - 1)) { /* Not aligned. */
1236 slack = WANTED_ALIGNMENT - (PTR2UV(cp) & (WANTED_ALIGNMENT - 1));
fa423c5b
IZ
1237 add += slack;
1238 }
1239# endif
61ae2fbf 1240#endif /* !atarist && !MINT */
fa423c5b
IZ
1241
1242 if (add) {
1243 DEBUG_m(PerlIO_printf(Perl_debug_log,
1244 "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
1245 (long)add, (long) slack,
1246 (long) sbrked_remains));
1247 newcp = (char *)sbrk(add);
1248#if defined(DEBUGGING_MSTATS)
1249 sbrks++;
1250 sbrk_slack += add;
1251#endif
1252 if (newcp != cp + require) {
1253 /* Too bad: even rounding sbrk() is not continuous.*/
1254 DEBUG_m(PerlIO_printf(Perl_debug_log,
1255 "failed to fix bad sbrk()\n"));
1256#ifdef PACK_MALLOC
1257 if (slack) {
741df71a 1258 MALLOC_UNLOCK;
5bbd1ef5 1259 fatalcroak("panic: Off-page sbrk\n");
fa423c5b
IZ
1260 }
1261#endif
1262 if (sbrked_remains) {
1263 /* Try again. */
1264#if defined(DEBUGGING_MSTATS)
1265 sbrk_slack += require;
1266#endif
1267 require = needed;
1268 DEBUG_m(PerlIO_printf(Perl_debug_log,
1269 "straight sbrk(%ld)\n",
1270 (long)require));
1271 cp = (char *)sbrk(require);
1272#ifdef DEBUGGING_MSTATS
1273 sbrks++;
1274#endif
1275 if (cp == (char *)-1)
1276 return 0;
1277 }
1278 sbrk_good = -1; /* Disable optimization!
1279 Continue with not-aligned... */
1280 } else {
1281 cp += slack;
1282 require += sbrked_remains;
1283 }
1284 }
1285
1286 if (last_sbrk_top) {
1287 sbrk_good -= SBRK_FAILURE_PRICE;
1288 }
1289
1290 ovp = (union overhead *) cp;
1291 /*
1292 * Round up to minimum allocation size boundary
1293 * and deduct from block count to reflect.
1294 */
1295
5bbd1ef5 1296# if NEEDED_ALIGNMENT > MEM_ALIGNBYTES
56431972 1297 if (PTR2UV(ovp) & (NEEDED_ALIGNMENT - 1))
5bbd1ef5
IZ
1298 fatalcroak("Misalignment of sbrk()\n");
1299 else
1300# endif
fa423c5b 1301#ifndef I286 /* Again, this should always be ok on an 80286 */
56431972 1302 if (PTR2UV(ovp) & (MEM_ALIGNBYTES - 1)) {
fa423c5b
IZ
1303 DEBUG_m(PerlIO_printf(Perl_debug_log,
1304 "fixing sbrk(): %d bytes off machine alignement\n",
56431972
RB
1305 (int)(PTR2UV(ovp) & (MEM_ALIGNBYTES - 1))));
1306 ovp = INT2PTR(union overhead *,(PTR2UV(ovp) + MEM_ALIGNBYTES) &
5bbd1ef5 1307 (MEM_ALIGNBYTES - 1));
fa423c5b
IZ
1308 (*nblksp)--;
1309# if defined(DEBUGGING_MSTATS)
1310 /* This is only approx. if TWO_POT_OPTIMIZE: */
5bbd1ef5 1311 sbrk_slack += (1 << (bucket >> BUCKET_POW2_SHIFT));
fa423c5b
IZ
1312# endif
1313 }
1314#endif
5bbd1ef5 1315 ; /* Finish `else' */
fa423c5b 1316 sbrked_remains = require - needed;
e9397286 1317 last_op = cp;
fa423c5b
IZ
1318 }
1319 last_sbrk_top = cp + require;
fa423c5b
IZ
1320#ifdef DEBUGGING_MSTATS
1321 goodsbrk += require;
1322#endif
1323 return ovp;
1324}
1325
cea2e8a9
GS
1326static int
1327getpages_adjacent(int require)
fa423c5b
IZ
1328{
1329 if (require <= sbrked_remains) {
1330 sbrked_remains -= require;
1331 } else {
1332 char *cp;
1333
1334 require -= sbrked_remains;
1335 /* We do not try to optimize sbrks here, we go for place. */
1336 cp = (char*) sbrk(require);
1337#ifdef DEBUGGING_MSTATS
1338 sbrks++;
1339 goodsbrk += require;
1340#endif
1341 if (cp == last_sbrk_top) {
1342 sbrked_remains = 0;
1343 last_sbrk_top = cp + require;
1344 } else {
28ac10b1
IZ
1345 if (cp == (char*)-1) { /* Out of memory */
1346#ifdef DEBUGGING_MSTATS
1347 goodsbrk -= require;
1348#endif
1349 return 0;
1350 }
fa423c5b
IZ
1351 /* Report the failure: */
1352 if (sbrked_remains)
1353 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1354 sbrked_remains, 0);
1355 add_to_chain((void*)cp, require, 0);
1356 sbrk_good -= SBRK_FAILURE_PRICE;
1357 sbrked_remains = 0;
1358 last_sbrk_top = 0;
1359 last_op = 0;
1360 return 0;
1361 }
1362 }
1363
1364 return 1;
1365}
1366
8d063cd8
LW
1367/*
1368 * Allocate more memory to the indicated bucket.
1369 */
cea2e8a9
GS
1370static void
1371morecore(register int bucket)
8d063cd8 1372{
72aaf631 1373 register union overhead *ovp;
8d063cd8 1374 register int rnu; /* 2^rnu bytes will be requested */
fa423c5b 1375 int nblks; /* become nblks blocks of the desired size */
bbce6d69 1376 register MEM_SIZE siz, needed;
8d063cd8
LW
1377
1378 if (nextf[bucket])
1379 return;
e8bc2b5c 1380 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
741df71a 1381 MALLOC_UNLOCK;
d720c441 1382 croak("%s", "Out of memory during ridiculously large request");
55497cff 1383 }
d720c441 1384 if (bucket > max_bucket)
e8bc2b5c 1385 max_bucket = bucket;
d720c441 1386
e8bc2b5c
GS
1387 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
1388 ? LOG_OF_MIN_ARENA
1389 : (bucket >> BUCKET_POW2_SHIFT) );
1390 /* This may be overwritten later: */
1391 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
1392 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
1393 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
1394 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
1395 nextf[rnu << BUCKET_POW2_SHIFT]
1396 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
1397#ifdef DEBUGGING_MSTATS
1398 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
1399 start_slack -= M_OVERHEAD;
1400#endif
1401 DEBUG_m(PerlIO_printf(Perl_debug_log,
1402 "stealing %ld bytes from %ld arena\n",
1403 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
1404 } else if (chunk_chain
1405 && (ovp = (union overhead*) get_from_chain(needed))) {
1406 DEBUG_m(PerlIO_printf(Perl_debug_log,
1407 "stealing %ld bytes from chain\n",
1408 (long) needed));
d720c441
IZ
1409 } else if ( (ovp = (union overhead*)
1410 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
1411 needed)) ) {
e8bc2b5c
GS
1412 DEBUG_m(PerlIO_printf(Perl_debug_log,
1413 "stealing %ld bytes from bigger buckets\n",
1414 (long) needed));
1415 } else if (needed <= sbrked_remains) {
1416 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
1417 sbrked_remains -= needed;
1418 last_op = (char*)ovp;
fa423c5b
IZ
1419 } else
1420 ovp = getpages(needed, &nblks, bucket);
e8bc2b5c 1421
fa423c5b
IZ
1422 if (!ovp)
1423 return;
e8bc2b5c 1424
8d063cd8
LW
1425 /*
1426 * Add new memory allocated to that on
1427 * free list for this hash bucket.
1428 */
e8bc2b5c 1429 siz = BUCKET_SIZE(bucket);
cf5c4ad8 1430#ifdef PACK_MALLOC
72aaf631 1431 *(u_char*)ovp = bucket; /* Fill index. */
e8bc2b5c
GS
1432 if (bucket <= MAX_PACKED) {
1433 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1434 nblks = N_BLKS(bucket);
cf5c4ad8 1435# ifdef DEBUGGING_MSTATS
e8bc2b5c 1436 start_slack += BLK_SHIFT(bucket);
cf5c4ad8 1437# endif
e8bc2b5c
GS
1438 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
1439 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
cf5c4ad8 1440 siz -= sizeof(union overhead);
72aaf631 1441 } else ovp++; /* One chunk per block. */
e8bc2b5c 1442#endif /* PACK_MALLOC */
72aaf631 1443 nextf[bucket] = ovp;
5f05dabc 1444#ifdef DEBUGGING_MSTATS
1445 nmalloc[bucket] += nblks;
e8bc2b5c
GS
1446 if (bucket > MAX_PACKED) {
1447 start_slack += M_OVERHEAD * nblks;
1448 }
5f05dabc 1449#endif
8d063cd8 1450 while (--nblks > 0) {
72aaf631
MB
1451 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
1452 ovp = (union overhead *)((caddr_t)ovp + siz);
8d063cd8 1453 }
8595d6f1 1454 /* Not all sbrks return zeroed memory.*/
72aaf631 1455 ovp->ov_next = (union overhead *)NULL;
cf5c4ad8 1456#ifdef PACK_MALLOC
e8bc2b5c
GS
1457 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
1458 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
1459 nextf[7*BUCKETS_PER_POW2] =
1460 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
1461 - sizeof(union overhead));
1462 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
cf5c4ad8 1463 }
1464#endif /* !PACK_MALLOC */
8d063cd8
LW
1465}
1466
94b6baf5 1467Free_t
86058a2d 1468Perl_mfree(void *mp)
cea2e8a9 1469{
ee0007ab 1470 register MEM_SIZE size;
72aaf631 1471 register union overhead *ovp;
352d5a3a 1472 char *cp = (char*)mp;
cf5c4ad8 1473#ifdef PACK_MALLOC
1474 u_char bucket;
1475#endif
8d063cd8 1476
e8bc2b5c
GS
1477 DEBUG_m(PerlIO_printf(Perl_debug_log,
1478 "0x%lx: (%05lu) free\n",
4a33f861 1479 (unsigned long)cp, (unsigned long)(PL_an++)));
45d8adaa 1480
cf5c4ad8 1481 if (cp == NULL)
1482 return;
72aaf631 1483 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1484 - sizeof (union overhead) * CHUNK_SHIFT);
cf5c4ad8 1485#ifdef PACK_MALLOC
72aaf631 1486 bucket = OV_INDEX(ovp);
cf5c4ad8 1487#endif
e8bc2b5c
GS
1488#ifdef IGNORE_SMALL_BAD_FREE
1489 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1490 && (OV_MAGIC(ovp, bucket) != MAGIC))
1491#else
1492 if (OV_MAGIC(ovp, bucket) != MAGIC)
1493#endif
1494 {
68dc0745 1495 static int bad_free_warn = -1;
cf5c4ad8 1496 if (bad_free_warn == -1) {
32e30700 1497 dTHXo;
5fd9e9a4 1498 char *pbf = PerlEnv_getenv("PERL_BADFREE");
cf5c4ad8 1499 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1500 }
1501 if (!bad_free_warn)
1502 return;
8990e307 1503#ifdef RCHECK
a687059c 1504 warn("%s free() ignored",
72aaf631 1505 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
8990e307 1506#else
d720c441 1507 warn("%s", "Bad free() ignored");
8990e307 1508#endif
8d063cd8 1509 return; /* sanity */
e8bc2b5c 1510 }
8d063cd8 1511#ifdef RCHECK
3541dd58 1512 ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
e8bc2b5c
GS
1513 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1514 int i;
1515 MEM_SIZE nbytes = ovp->ov_size + 1;
1516
1517 if ((i = nbytes & 3)) {
1518 i = 4 - i;
1519 while (i--) {
3541dd58 1520 ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
d720c441 1521 == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1522 }
1523 }
1524 nbytes = (nbytes + 3) &~ 3;
3541dd58 1525 ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite");
e8bc2b5c 1526 }
72aaf631 1527 ovp->ov_rmagic = RMAGIC - 1;
8d063cd8 1528#endif
3541dd58 1529 ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
72aaf631 1530 size = OV_INDEX(ovp);
4ad56ec9
IZ
1531
1532 MALLOC_LOCK;
72aaf631
MB
1533 ovp->ov_next = nextf[size];
1534 nextf[size] = ovp;
741df71a 1535 MALLOC_UNLOCK;
8d063cd8
LW
1536}
1537
4ad56ec9
IZ
1538/* There is no need to do any locking in realloc (with an exception of
1539 trying to grow in place if we are at the end of the chain).
1540 If somebody calls us from a different thread with the same address,
1541 we are sole anyway. */
8d063cd8 1542
2304df62 1543Malloc_t
86058a2d 1544Perl_realloc(void *mp, size_t nbytes)
cea2e8a9 1545{
ee0007ab 1546 register MEM_SIZE onb;
72aaf631 1547 union overhead *ovp;
d720c441
IZ
1548 char *res;
1549 int prev_bucket;
e8bc2b5c 1550 register int bucket;
4ad56ec9
IZ
1551 int incr; /* 1 if does not fit, -1 if "easily" fits in a
1552 smaller bucket, otherwise 0. */
352d5a3a 1553 char *cp = (char*)mp;
8d063cd8 1554
e8bc2b5c 1555#if defined(DEBUGGING) || !defined(PERL_CORE)
ee0007ab 1556 MEM_SIZE size = nbytes;
45d8adaa 1557
45d8adaa 1558 if ((long)nbytes < 0)
cea2e8a9 1559 croak("%s", "panic: realloc");
45d8adaa 1560#endif
e8bc2b5c
GS
1561
1562 BARK_64K_LIMIT("Reallocation",nbytes,size);
1563 if (!cp)
86058a2d 1564 return Perl_malloc(nbytes);
45d8adaa 1565
72aaf631 1566 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c
GS
1567 - sizeof (union overhead) * CHUNK_SHIFT);
1568 bucket = OV_INDEX(ovp);
4ad56ec9 1569
e8bc2b5c 1570#ifdef IGNORE_SMALL_BAD_FREE
4ad56ec9
IZ
1571 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1572 && (OV_MAGIC(ovp, bucket) != MAGIC))
e8bc2b5c 1573#else
4ad56ec9 1574 if (OV_MAGIC(ovp, bucket) != MAGIC)
e8bc2b5c 1575#endif
4ad56ec9
IZ
1576 {
1577 static int bad_free_warn = -1;
1578 if (bad_free_warn == -1) {
32e30700 1579 dTHXo;
4ad56ec9
IZ
1580 char *pbf = PerlEnv_getenv("PERL_BADFREE");
1581 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1582 }
1583 if (!bad_free_warn)
ce70748c 1584 return Nullch;
4ad56ec9
IZ
1585#ifdef RCHECK
1586 warn("%srealloc() %signored",
1587 (ovp->ov_rmagic == RMAGIC - 1 ? "" : "Bad "),
1588 ovp->ov_rmagic == RMAGIC - 1 ? "of freed memory " : "");
1589#else
1590 warn("%s", "Bad realloc() ignored");
1591#endif
ce70748c 1592 return Nullch; /* sanity */
4ad56ec9
IZ
1593 }
1594
e8bc2b5c 1595 onb = BUCKET_SIZE_REAL(bucket);
55497cff 1596 /*
1597 * avoid the copy if same size block.
e8bc2b5c
GS
1598 * We are not agressive with boundary cases. Note that it might
1599 * (for a small number of cases) give false negative if
55497cff 1600 * both new size and old one are in the bucket for
e8bc2b5c
GS
1601 * FIRST_BIG_POW2, but the new one is near the lower end.
1602 *
1603 * We do not try to go to 1.5 times smaller bucket so far.
55497cff 1604 */
e8bc2b5c
GS
1605 if (nbytes > onb) incr = 1;
1606 else {
1607#ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1608 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1609 nbytes > ( (onb >> 1) - M_OVERHEAD )
1610# ifdef TWO_POT_OPTIMIZE
1611 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1612# endif
1613 )
1614#else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1615 prev_bucket = ( (bucket > MAX_PACKED + 1)
1616 ? bucket - BUCKETS_PER_POW2
1617 : bucket - 1);
1618 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1619#endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1620 incr = 0;
1621 else incr = -1;
1622 }
2ce36478 1623#ifdef STRESS_REALLOC
4ad56ec9 1624 goto hard_way;
2ce36478 1625#endif
4ad56ec9 1626 if (incr == 0) {
852c2e52 1627 inplace_label:
a687059c
LW
1628#ifdef RCHECK
1629 /*
1630 * Record new allocated size of block and
1631 * bound space with magic numbers.
1632 */
72aaf631 1633 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
e8bc2b5c
GS
1634 int i, nb = ovp->ov_size + 1;
1635
1636 if ((i = nb & 3)) {
1637 i = 4 - i;
1638 while (i--) {
3541dd58 1639 ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1640 }
1641 }
1642 nb = (nb + 3) &~ 3;
3541dd58 1643 ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite");
a687059c
LW
1644 /*
1645 * Convert amount of memory requested into
1646 * closest block size stored in hash buckets
1647 * which satisfies request. Account for
1648 * space used per block for accounting.
1649 */
cf5c4ad8 1650 nbytes += M_OVERHEAD;
72aaf631 1651 ovp->ov_size = nbytes - 1;
e8bc2b5c
GS
1652 if ((i = nbytes & 3)) {
1653 i = 4 - i;
1654 while (i--)
1655 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1656 = RMAGIC_C;
1657 }
1658 nbytes = (nbytes + 3) &~ 3;
72aaf631 1659 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
a687059c
LW
1660 }
1661#endif
45d8adaa 1662 res = cp;
42ac124e
IZ
1663 DEBUG_m(PerlIO_printf(Perl_debug_log,
1664 "0x%lx: (%05lu) realloc %ld bytes inplace\n",
1665 (unsigned long)res,(unsigned long)(PL_an++),
1666 (long)size));
e8bc2b5c
GS
1667 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1668 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1669 MEM_SIZE require, newarena = nbytes, pow;
1670 int shiftr;
1671
1672 POW2_OPTIMIZE_ADJUST(newarena);
1673 newarena = newarena + M_OVERHEAD;
1674 /* newarena = (newarena + 3) &~ 3; */
1675 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1676 pow = LOG_OF_MIN_ARENA + 1;
1677 /* apart from this loop, this is O(1) */
1678 while (shiftr >>= 1)
1679 pow++;
1680 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1681 require = newarena - onb - M_OVERHEAD;
1682
4ad56ec9
IZ
1683 MALLOC_LOCK;
1684 if (cp - M_OVERHEAD == last_op /* We *still* are the last chunk */
1685 && getpages_adjacent(require)) {
e8bc2b5c 1686#ifdef DEBUGGING_MSTATS
fa423c5b
IZ
1687 nmalloc[bucket]--;
1688 nmalloc[pow * BUCKETS_PER_POW2]++;
e8bc2b5c 1689#endif
fa423c5b 1690 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
4ad56ec9 1691 MALLOC_UNLOCK;
fa423c5b 1692 goto inplace_label;
4ad56ec9
IZ
1693 } else {
1694 MALLOC_UNLOCK;
fa423c5b 1695 goto hard_way;
4ad56ec9 1696 }
e8bc2b5c
GS
1697 } else {
1698 hard_way:
42ac124e
IZ
1699 DEBUG_m(PerlIO_printf(Perl_debug_log,
1700 "0x%lx: (%05lu) realloc %ld bytes the hard way\n",
1701 (unsigned long)cp,(unsigned long)(PL_an++),
1702 (long)size));
86058a2d 1703 if ((res = (char*)Perl_malloc(nbytes)) == NULL)
e8bc2b5c
GS
1704 return (NULL);
1705 if (cp != res) /* common optimization */
1706 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
4ad56ec9 1707 Perl_mfree(cp);
45d8adaa 1708 }
2304df62 1709 return ((Malloc_t)res);
8d063cd8
LW
1710}
1711
1712/*
1713 * Search ``srchlen'' elements of each free list for a block whose
1714 * header starts at ``freep''. If srchlen is -1 search the whole list.
1715 * Return bucket number, or -1 if not found.
1716 */
ee0007ab 1717static int
8ac85365 1718findbucket(union overhead *freep, int srchlen)
8d063cd8
LW
1719{
1720 register union overhead *p;
1721 register int i, j;
1722
1723 for (i = 0; i < NBUCKETS; i++) {
1724 j = 0;
1725 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1726 if (p == freep)
1727 return (i);
1728 j++;
1729 }
1730 }
1731 return (-1);
1732}
1733
cf5c4ad8 1734Malloc_t
86058a2d 1735Perl_calloc(register size_t elements, register size_t size)
cf5c4ad8 1736{
1737 long sz = elements * size;
86058a2d 1738 Malloc_t p = Perl_malloc(sz);
cf5c4ad8 1739
1740 if (p) {
1741 memset((void*)p, 0, sz);
1742 }
1743 return p;
1744}
1745
4ad56ec9
IZ
1746char *
1747Perl_strdup(const char *s)
1748{
1749 MEM_SIZE l = strlen(s);
1750 char *s1 = (char *)Perl_malloc(l);
1751
1752 Copy(s, s1, (MEM_SIZE)l, char);
1753 return s1;
1754}
1755
1756#ifdef PERL_CORE
1757int
1758Perl_putenv(char *a)
1759{
1760 /* Sometimes system's putenv conflicts with my_setenv() - this is system
1761 malloc vs Perl's free(). */
1762 dTHX;
1763 char *var;
1764 char *val = a;
1765 MEM_SIZE l;
1766 char buf[80];
1767
1768 while (*val && *val != '=')
1769 val++;
1770 if (!*val)
1771 return -1;
1772 l = val - a;
1773 if (l < sizeof(buf))
1774 var = buf;
1775 else
1776 var = Perl_malloc(l + 1);
1777 Copy(a, var, l, char);
1778 val++;
1779 my_setenv(var,val);
1780 if (var != buf)
1781 Perl_mfree(var);
1782 return 0;
1783}
1784# endif
1785
e8bc2b5c 1786MEM_SIZE
cea2e8a9 1787Perl_malloced_size(void *p)
e8bc2b5c 1788{
8d6dde3e
IZ
1789 union overhead *ovp = (union overhead *)
1790 ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT);
1791 int bucket = OV_INDEX(ovp);
1792#ifdef RCHECK
1793 /* The caller wants to have a complete control over the chunk,
1794 disable the memory checking inside the chunk. */
1795 if (bucket <= MAX_SHORT_BUCKET) {
1796 MEM_SIZE size = BUCKET_SIZE_REAL(bucket);
1797 ovp->ov_size = size + M_OVERHEAD - 1;
1798 *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RSLOP)) = RMAGIC;
1799 }
1800#endif
e8bc2b5c
GS
1801 return BUCKET_SIZE_REAL(bucket);
1802}
1803
e8bc2b5c
GS
1804# ifdef BUCKETS_ROOT2
1805# define MIN_EVEN_REPORT 6
1806# else
1807# define MIN_EVEN_REPORT MIN_BUCKET
1808# endif
8d063cd8
LW
1809/*
1810 * mstats - print out statistics about malloc
1811 *
1812 * Prints two lines of numbers, one showing the length of the free list
1813 * for each size category, the second showing the number of mallocs -
1814 * frees for each size category.
1815 */
ee0007ab 1816void
864dbfa3 1817Perl_dump_mstats(pTHX_ char *s)
8d063cd8 1818{
df31f264 1819#ifdef DEBUGGING_MSTATS
8d063cd8
LW
1820 register int i, j;
1821 register union overhead *p;
e8bc2b5c 1822 int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0;
c07a80fd 1823 u_int nfree[NBUCKETS];
e8bc2b5c 1824 int total_chain = 0;
4ad56ec9 1825 struct chunk_chain_s* nextchain;
8d063cd8 1826
4ad56ec9 1827 MALLOC_LOCK;
e8bc2b5c 1828 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
8d063cd8
LW
1829 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1830 ;
c07a80fd 1831 nfree[i] = j;
e8bc2b5c
GS
1832 totfree += nfree[i] * BUCKET_SIZE_REAL(i);
1833 total += nmalloc[i] * BUCKET_SIZE_REAL(i);
1834 if (nmalloc[i]) {
1835 i % 2 ? (topbucket_odd = i) : (topbucket_ev = i);
1836 topbucket = i;
1837 }
c07a80fd 1838 }
4ad56ec9
IZ
1839 nextchain = chunk_chain;
1840 while (nextchain) {
1841 total_chain += nextchain->size;
1842 nextchain = nextchain->next;
1843 }
1844 MALLOC_UNLOCK;
c07a80fd 1845 if (s)
bf49b057 1846 PerlIO_printf(Perl_error_log,
d720c441 1847 "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n",
e8bc2b5c 1848 s,
d720c441
IZ
1849 (long)BUCKET_SIZE_REAL(MIN_BUCKET),
1850 (long)BUCKET_SIZE(MIN_BUCKET),
1851 (long)BUCKET_SIZE_REAL(topbucket), (long)BUCKET_SIZE(topbucket));
bf49b057 1852 PerlIO_printf(Perl_error_log, "%8d free:", totfree);
e8bc2b5c 1853 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
bf49b057 1854 PerlIO_printf(Perl_error_log,
e8bc2b5c
GS
1855 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1856 ? " %5d"
1857 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1858 nfree[i]);
1859 }
1860#ifdef BUCKETS_ROOT2
bf49b057 1861 PerlIO_printf(Perl_error_log, "\n\t ");
e8bc2b5c 1862 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
bf49b057 1863 PerlIO_printf(Perl_error_log,
e8bc2b5c
GS
1864 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1865 ? " %5d"
1866 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1867 nfree[i]);
8d063cd8 1868 }
e8bc2b5c 1869#endif
bf49b057 1870 PerlIO_printf(Perl_error_log, "\n%8d used:", total - totfree);
e8bc2b5c 1871 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
bf49b057 1872 PerlIO_printf(Perl_error_log,
e8bc2b5c
GS
1873 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1874 ? " %5d"
1875 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1876 nmalloc[i] - nfree[i]);
c07a80fd 1877 }
e8bc2b5c 1878#ifdef BUCKETS_ROOT2
bf49b057 1879 PerlIO_printf(Perl_error_log, "\n\t ");
e8bc2b5c 1880 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
bf49b057 1881 PerlIO_printf(Perl_error_log,
e8bc2b5c
GS
1882 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1883 ? " %5d"
1884 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1885 nmalloc[i] - nfree[i]);
1886 }
1887#endif
bf49b057 1888 PerlIO_printf(Perl_error_log, "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n",
e8bc2b5c
GS
1889 goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack,
1890 start_slack, total_chain, sbrked_remains);
df31f264 1891#endif /* DEBUGGING_MSTATS */
c07a80fd 1892}
a687059c 1893#endif /* lint */
cf5c4ad8 1894
cf5c4ad8 1895#ifdef USE_PERL_SBRK
1896
8f1f23e8 1897# if defined(__MACHTEN_PPC__) || defined(NeXT) || defined(__NeXT__)
38ac2dc8
DD
1898# define PERL_SBRK_VIA_MALLOC
1899/*
1900 * MachTen's malloc() returns a buffer aligned on a two-byte boundary.
1901 * While this is adequate, it may slow down access to longer data
1902 * types by forcing multiple memory accesses. It also causes
1903 * complaints when RCHECK is in force. So we allocate six bytes
1904 * more than we need to, and return an address rounded up to an
1905 * eight-byte boundary.
1906 *
1907 * 980701 Dominic Dunlop <domo@computer.org>
1908 */
5bbd1ef5 1909# define SYSTEM_ALLOC_ALIGNMENT 2
38ac2dc8
DD
1910# endif
1911
760ac839 1912# ifdef PERL_SBRK_VIA_MALLOC
cf5c4ad8 1913
1914/* it may seem schizophrenic to use perl's malloc and let it call system */
1915/* malloc, the reason for that is only the 3.2 version of the OS that had */
1916/* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
1917/* end to the cores */
1918
38ac2dc8
DD
1919# ifndef SYSTEM_ALLOC
1920# define SYSTEM_ALLOC(a) malloc(a)
1921# endif
5bbd1ef5
IZ
1922# ifndef SYSTEM_ALLOC_ALIGNMENT
1923# define SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES
1924# endif
cf5c4ad8 1925
760ac839 1926# endif /* PERL_SBRK_VIA_MALLOC */
cf5c4ad8 1927
1928static IV Perl_sbrk_oldchunk;
1929static long Perl_sbrk_oldsize;
1930
760ac839
LW
1931# define PERLSBRK_32_K (1<<15)
1932# define PERLSBRK_64_K (1<<16)
cf5c4ad8 1933
b63effbb 1934Malloc_t
df0003d4 1935Perl_sbrk(int size)
cf5c4ad8 1936{
1937 IV got;
1938 int small, reqsize;
1939
1940 if (!size) return 0;
55497cff 1941#ifdef PERL_CORE
cf5c4ad8 1942 reqsize = size; /* just for the DEBUG_m statement */
1943#endif
57569e04
HM
1944#ifdef PACK_MALLOC
1945 size = (size + 0x7ff) & ~0x7ff;
1946#endif
cf5c4ad8 1947 if (size <= Perl_sbrk_oldsize) {
1948 got = Perl_sbrk_oldchunk;
1949 Perl_sbrk_oldchunk += size;
1950 Perl_sbrk_oldsize -= size;
1951 } else {
1952 if (size >= PERLSBRK_32_K) {
1953 small = 0;
1954 } else {
cf5c4ad8 1955 size = PERLSBRK_64_K;
1956 small = 1;
1957 }
5bbd1ef5
IZ
1958# if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
1959 size += NEEDED_ALIGNMENT - SYSTEM_ALLOC_ALIGNMENT;
1960# endif
cf5c4ad8 1961 got = (IV)SYSTEM_ALLOC(size);
5bbd1ef5 1962# if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
5a7d6335 1963 got = (got + NEEDED_ALIGNMENT - 1) & ~(NEEDED_ALIGNMENT - 1);
5bbd1ef5 1964# endif
cf5c4ad8 1965 if (small) {
1966 /* Chunk is small, register the rest for future allocs. */
1967 Perl_sbrk_oldchunk = got + reqsize;
1968 Perl_sbrk_oldsize = size - reqsize;
1969 }
1970 }
1971
fb73857a 1972 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n",
cf5c4ad8 1973 size, reqsize, Perl_sbrk_oldsize, got));
cf5c4ad8 1974
1975 return (void *)got;
1976}
1977
1978#endif /* ! defined USE_PERL_SBRK */