This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
OS/2 events get closer to Perl
[perl5.git] / malloc.c
CommitLineData
a0d0e21e 1/* malloc.c
8d063cd8 2 *
8d063cd8
LW
3 */
4
87c6202a 5/*
741df71a
IZ
6 Here are some notes on configuring Perl's malloc. (For non-perl
7 usage see below.)
87c6202a
IZ
8
9 There are two macros which serve as bulk disablers of advanced
10 features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by
11 default). Look in the list of default values below to understand
12 their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the
13 state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC
14 returns it to the state as of Perl 5.000.
15
16 Note that some of the settings below may be ignored in the code based
17 on values of other macros. The PERL_CORE symbol is only defined when
18 perl itself is being compiled (so malloc can make some assumptions
19 about perl's facilities being available to it).
20
21 Each config option has a short description, followed by its name,
22 default value, and a comment about the default (if applicable). Some
23 options take a precise value, while the others are just boolean.
24 The boolean ones are listed first.
25
26 # Enable code for an emergency memory pool in $^M. See perlvar.pod
27 # for a description of $^M.
28 PERL_EMERGENCY_SBRK (!PLAIN_MALLOC && PERL_CORE)
29
30 # Enable code for printing memory statistics.
31 DEBUGGING_MSTATS (!PLAIN_MALLOC && PERL_CORE)
32
33 # Move allocation info for small buckets into separate areas.
34 # Memory optimization (especially for small allocations, of the
35 # less than 64 bytes). Since perl usually makes a large number
36 # of small allocations, this is usually a win.
37 PACK_MALLOC (!PLAIN_MALLOC && !RCHECK)
38
39 # Add one page to big powers of two when calculating bucket size.
40 # This is targeted at big allocations, as are common in image
41 # processing.
42 TWO_POT_OPTIMIZE !PLAIN_MALLOC
43
44 # Use intermediate bucket sizes between powers-of-two. This is
45 # generally a memory optimization, and a (small) speed pessimization.
46 BUCKETS_ROOT2 !NO_FANCY_MALLOC
47
48 # Do not check small deallocations for bad free(). Memory
49 # and speed optimization, error reporting pessimization.
50 IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK)
51
52 # Use table lookup to decide in which bucket a given allocation will go.
53 SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC
54
38ac2dc8
DD
55 # Use a perl-defined sbrk() instead of the (presumably broken or
56 # missing) system-supplied sbrk().
57 USE_PERL_SBRK undef
58
59 # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally
60 # only used with broken sbrk()s.
87c6202a
IZ
61 PERL_SBRK_VIA_MALLOC undef
62
38ac2dc8
DD
63 # Which allocator to use if PERL_SBRK_VIA_MALLOC
64 SYSTEM_ALLOC(a) malloc(a)
65
87c6202a
IZ
66 # Disable memory overwrite checking with DEBUGGING. Memory and speed
67 # optimization, error reporting pessimization.
68 NO_RCHECK undef
69
70 # Enable memory overwrite checking with DEBUGGING. Memory and speed
71 # pessimization, error reporting optimization
72 RCHECK (DEBUGGING && !NO_RCHECK)
73
74 # Failed allocations bigger than this size croak (if
75 # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See
76 # perlvar.pod for a description of $^M.
77 BIG_SIZE (1<<16) # 64K
78
79 # Starting from this power of two, add an extra page to the
80 # size of the bucket. This enables optimized allocations of sizes
81 # close to powers of 2. Note that the value is indexed at 0.
82 FIRST_BIG_POW2 15 # 32K, 16K is used too often
83
84 # Estimate of minimal memory footprint. malloc uses this value to
85 # request the most reasonable largest blocks of memory from the system.
86 FIRST_SBRK (48*1024)
87
88 # Round up sbrk()s to multiples of this.
89 MIN_SBRK 2048
90
91 # Round up sbrk()s to multiples of this percent of footprint.
92 MIN_SBRK_FRAC 3
93
94 # Add this much memory to big powers of two to get the bucket size.
95 PERL_PAGESIZE 4096
96
97 # This many sbrk() discontinuities should be tolerated even
98 # from the start without deciding that sbrk() is usually
99 # discontinuous.
100 SBRK_ALLOW_FAILURES 3
101
102 # This many continuous sbrk()s compensate for one discontinuous one.
103 SBRK_FAILURE_PRICE 50
104
28ac10b1
IZ
105 # Some configurations may ask for 12-byte-or-so allocations which
106 # require 8-byte alignment (?!). In such situation one needs to
107 # define this to disable 12-byte bucket (will increase memory footprint)
108 STRICT_ALIGNMENT undef
109
87c6202a
IZ
110 This implementation assumes that calling PerlIO_printf() does not
111 result in any memory allocation calls (used during a panic).
112
113 */
114
741df71a
IZ
115/*
116 If used outside of Perl environment, it may be useful to redefine
117 the following macros (listed below with defaults):
118
119 # Type of address returned by allocation functions
120 Malloc_t void *
121
122 # Type of size argument for allocation functions
123 MEM_SIZE unsigned long
124
125 # Maximal value in LONG
126 LONG_MAX 0x7FFFFFFF
127
128 # Unsigned integer type big enough to keep a pointer
129 UV unsigned long
130
131 # Type of pointer with 1-byte granularity
132 caddr_t char *
133
134 # Type returned by free()
135 Free_t void
136
137 # Fatal error reporting function
138 croak(format, arg) warn(idem) + exit(1)
139
140 # Error reporting function
141 warn(format, arg) fprintf(stderr, idem)
142
143 # Locking/unlocking for MT operation
144 MALLOC_LOCK MUTEX_LOCK(PL_malloc_mutex)
145 MALLOC_UNLOCK MUTEX_UNLOCK(PL_malloc_mutex)
146
147 # Locking/unlocking mutex for MT operation
148 MUTEX_LOCK(l) void
149 MUTEX_UNLOCK(l) void
150 */
151
e8bc2b5c
GS
152#ifndef NO_FANCY_MALLOC
153# ifndef SMALL_BUCKET_VIA_TABLE
154# define SMALL_BUCKET_VIA_TABLE
155# endif
156# ifndef BUCKETS_ROOT2
157# define BUCKETS_ROOT2
158# endif
159# ifndef IGNORE_SMALL_BAD_FREE
160# define IGNORE_SMALL_BAD_FREE
161# endif
3562ef9b
IZ
162#endif
163
e8bc2b5c
GS
164#ifndef PLAIN_MALLOC /* Bulk enable features */
165# ifndef PACK_MALLOC
166# define PACK_MALLOC
167# endif
168# ifndef TWO_POT_OPTIMIZE
169# define TWO_POT_OPTIMIZE
170# endif
d720c441
IZ
171# if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK)
172# define PERL_EMERGENCY_SBRK
e8bc2b5c
GS
173# endif
174# if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
175# define DEBUGGING_MSTATS
176# endif
177#endif
178
179#define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
180#define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
181
182#if !(defined(I286) || defined(atarist))
183 /* take 2k unless the block is bigger than that */
184# define LOG_OF_MIN_ARENA 11
185#else
186 /* take 16k unless the block is bigger than that
187 (80286s like large segments!), probably good on the atari too */
188# define LOG_OF_MIN_ARENA 14
189#endif
190
8d063cd8 191#ifndef lint
1944739a
IZ
192# if defined(DEBUGGING) && !defined(NO_RCHECK)
193# define RCHECK
194# endif
e8bc2b5c
GS
195# if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
196# undef IGNORE_SMALL_BAD_FREE
197# endif
8d063cd8
LW
198/*
199 * malloc.c (Caltech) 2/21/82
200 * Chris Kingsley, kingsley@cit-20.
201 *
202 * This is a very fast storage allocator. It allocates blocks of a small
203 * number of different sizes, and keeps free lists of each size. Blocks that
204 * don't exactly fit are passed up to the next larger size. In this
205 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
cf5c4ad8 206 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
8d063cd8 207 * This is designed for use in a program that uses vast quantities of memory,
741df71a
IZ
208 * but bombs when it runs out.
209 *
210 * Modifications Copyright Ilya Zakharevich 1996-98.
211 *
212 * Still very quick, but much more thrifty. (Std config is 10% slower
213 * than it was, and takes 67% of old heap size for typical usage.)
214 *
215 * Allocations of small blocks are now table-driven to many different
216 * buckets. Sizes of really big buckets are increased to accomodata
217 * common size=power-of-2 blocks. Running-out-of-memory is made into
218 * an exception. Deeply configurable and thread-safe.
219 *
8d063cd8
LW
220 */
221
d720c441
IZ
222#ifdef PERL_CORE
223# include "EXTERN.h"
224# include "perl.h"
225#else
226# ifdef PERL_FOR_X2P
227# include "../EXTERN.h"
228# include "../perl.h"
229# else
230# include <stdlib.h>
231# include <stdio.h>
232# include <memory.h>
233# define _(arg) arg
234# ifndef Malloc_t
235# define Malloc_t void *
236# endif
237# ifndef MEM_SIZE
238# define MEM_SIZE unsigned long
239# endif
240# ifndef LONG_MAX
241# define LONG_MAX 0x7FFFFFFF
242# endif
243# ifndef UV
244# define UV unsigned long
245# endif
246# ifndef caddr_t
247# define caddr_t char *
248# endif
249# ifndef Free_t
250# define Free_t void
251# endif
252# define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t))
253# define PerlEnv_getenv getenv
254# define PerlIO_printf fprintf
255# define PerlIO_stderr() stderr
256# endif
e8bc2b5c 257# ifndef croak /* make depend */
741df71a 258# define croak(mess, arg) (warn((mess), (arg)), exit(1))
d720c441
IZ
259# endif
260# ifndef warn
741df71a 261# define warn(mess, arg) fprintf(stderr, (mess), (arg))
e8bc2b5c
GS
262# endif
263# ifdef DEBUG_m
264# undef DEBUG_m
265# endif
266# define DEBUG_m(a)
267# ifdef DEBUGGING
268# undef DEBUGGING
269# endif
270#endif
271
272#ifndef MUTEX_LOCK
273# define MUTEX_LOCK(l)
274#endif
275
276#ifndef MUTEX_UNLOCK
277# define MUTEX_UNLOCK(l)
278#endif
279
741df71a
IZ
280#ifndef MALLOC_LOCK
281# define MALLOC_LOCK MUTEX_LOCK(PL_malloc_mutex)
282#endif
283
284#ifndef MALLOC_UNLOCK
285# define MALLOC_UNLOCK MUTEX_UNLOCK(PL_malloc_mutex)
286#endif
287
760ac839 288#ifdef DEBUGGING
e8bc2b5c 289# undef DEBUG_m
4a33f861 290# define DEBUG_m(a) if (PL_debug & 128) a
760ac839
LW
291#endif
292
e9397286
GS
293/*
294 * Layout of memory:
295 * ~~~~~~~~~~~~~~~~
296 * The memory is broken into "blocks" which occupy multiples of 2K (and
297 * generally speaking, have size "close" to a power of 2). The addresses
298 * of such *unused* blocks are kept in nextf[i] with big enough i. (nextf
299 * is an array of linked lists.) (Addresses of used blocks are not known.)
300 *
301 * Moreover, since the algorithm may try to "bite" smaller blocks of out
302 * of unused bigger ones, there are also regions of "irregular" size,
303 * managed separately, by a linked list chunk_chain.
304 *
305 * The third type of storage is the sbrk()ed-but-not-yet-used space, its
306 * end and size are kept in last_sbrk_top and sbrked_remains.
307 *
308 * Growing blocks "in place":
309 * ~~~~~~~~~~~~~~~~~~~~~~~~~
310 * The address of the block with the greatest address is kept in last_op
311 * (if not known, last_op is 0). If it is known that the memory above
312 * last_op is not continuous, or contains a chunk from chunk_chain,
313 * last_op is set to 0.
314 *
315 * The chunk with address last_op may be grown by expanding into
316 * sbrk()ed-but-not-yet-used space, or trying to sbrk() more continuous
317 * memory.
318 *
319 * Management of last_op:
320 * ~~~~~~~~~~~~~~~~~~~~~
321 *
322 * free() never changes the boundaries of blocks, so is not relevant.
323 *
324 * The only way realloc() may change the boundaries of blocks is if it
325 * grows a block "in place". However, in the case of success such a
326 * chunk is automatically last_op, and it remains last_op. In the case
327 * of failure getpages_adjacent() clears last_op.
328 *
329 * malloc() may change blocks by calling morecore() only.
330 *
331 * morecore() may create new blocks by:
332 * a) biting pieces from chunk_chain (cannot create one above last_op);
333 * b) biting a piece from an unused block (if block was last_op, this
334 * may create a chunk from chain above last_op, thus last_op is
335 * invalidated in such a case).
336 * c) biting of sbrk()ed-but-not-yet-used space. This creates
337 * a block which is last_op.
338 * d) Allocating new pages by calling getpages();
339 *
340 * getpages() creates a new block. It marks last_op at the bottom of
341 * the chunk of memory it returns.
342 *
343 * Active pages footprint:
344 * ~~~~~~~~~~~~~~~~~~~~~~
345 * Note that we do not need to traverse the lists in nextf[i], just take
346 * the first element of this list. However, we *need* to traverse the
347 * list in chunk_chain, but most the time it should be a very short one,
348 * so we do not step on a lot of pages we are not going to use.
349 *
350 * Flaws:
351 * ~~~~~
352 * get_from_bigger_buckets(): forget to increment price => Quite
353 * aggressive.
354 */
355
135863df
AB
356/* I don't much care whether these are defined in sys/types.h--LAW */
357
358#define u_char unsigned char
359#define u_int unsigned int
e8bc2b5c
GS
360
361#ifdef HAS_QUAD
362# define u_bigint UV /* Needs to eat *void. */
363#else /* needed? */
364# define u_bigint unsigned long /* Needs to eat *void. */
365#endif
366
135863df 367#define u_short unsigned short
8d063cd8 368
cf5c4ad8 369/* 286 and atarist like big chunks, which gives too much overhead. */
370#if (defined(RCHECK) || defined(I286) || defined(atarist)) && defined(PACK_MALLOC)
e8bc2b5c 371# undef PACK_MALLOC
cf5c4ad8 372#endif
373
8d063cd8 374/*
cf5c4ad8 375 * The description below is applicable if PACK_MALLOC is not defined.
376 *
8d063cd8
LW
377 * The overhead on a block is at least 4 bytes. When free, this space
378 * contains a pointer to the next free block, and the bottom two bits must
379 * be zero. When in use, the first byte is set to MAGIC, and the second
380 * byte is the size index. The remaining bytes are for alignment.
381 * If range checking is enabled and the size of the block fits
382 * in two bytes, then the top two bytes hold the size of the requested block
383 * plus the range checking words, and the header word MINUS ONE.
384 */
385union overhead {
386 union overhead *ov_next; /* when free */
85e6fe83 387#if MEM_ALIGNBYTES > 4
c623bd54 388 double strut; /* alignment problems */
a687059c 389#endif
8d063cd8
LW
390 struct {
391 u_char ovu_magic; /* magic number */
392 u_char ovu_index; /* bucket # */
393#ifdef RCHECK
394 u_short ovu_size; /* actual block size */
395 u_int ovu_rmagic; /* range magic number */
396#endif
397 } ovu;
398#define ov_magic ovu.ovu_magic
399#define ov_index ovu.ovu_index
400#define ov_size ovu.ovu_size
401#define ov_rmagic ovu.ovu_rmagic
402};
403
760ac839 404#ifdef DEBUGGING
d720c441 405static void botch _((char *diag, char *s));
a0d0e21e
LW
406#endif
407static void morecore _((int bucket));
408static int findbucket _((union overhead *freep, int srchlen));
28ac10b1 409static void add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip);
a0d0e21e 410
8d063cd8
LW
411#define MAGIC 0xff /* magic # on accounting info */
412#define RMAGIC 0x55555555 /* magic # on range info */
e8bc2b5c
GS
413#define RMAGIC_C 0x55 /* magic # on range info */
414
8d063cd8 415#ifdef RCHECK
c2a5c2d2
IZ
416# define RSLOP sizeof (u_int)
417# ifdef TWO_POT_OPTIMIZE
e8bc2b5c 418# define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
c2a5c2d2 419# else
e8bc2b5c 420# define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
c2a5c2d2 421# endif
8d063cd8 422#else
c2a5c2d2 423# define RSLOP 0
8d063cd8
LW
424#endif
425
e8bc2b5c
GS
426#if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
427# undef BUCKETS_ROOT2
428#endif
429
430#ifdef BUCKETS_ROOT2
431# define BUCKET_TABLE_SHIFT 2
432# define BUCKET_POW2_SHIFT 1
433# define BUCKETS_PER_POW2 2
434#else
435# define BUCKET_TABLE_SHIFT MIN_BUC_POW2
436# define BUCKET_POW2_SHIFT 0
437# define BUCKETS_PER_POW2 1
438#endif
439
274c7500
IZ
440#if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT))
441/* Figure out the alignment of void*. */
442struct aligner {
443 char c;
444 void *p;
445};
446# define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p)))
447#else
448# define ALIGN_SMALL MEM_ALIGNBYTES
449#endif
450
451#define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no))
452
e8bc2b5c
GS
453#ifdef BUCKETS_ROOT2
454# define MAX_BUCKET_BY_TABLE 13
455static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
456 {
457 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
458 };
459# define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
460# define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
461 ? buck_size[i] \
462 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
463 - MEM_OVERHEAD(i) \
464 + POW2_OPTIMIZE_SURPLUS(i)))
465#else
466# define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
467# define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
468#endif
469
470
cf5c4ad8 471#ifdef PACK_MALLOC
e8bc2b5c
GS
472/* In this case it is assumed that if we do sbrk() in 2K units, we
473 * will get 2K aligned arenas (at least after some initial
474 * alignment). The bucket number of the given subblock is on the start
475 * of 2K arena which contains the subblock. Several following bytes
476 * contain the magic numbers for the subblocks in the block.
cf5c4ad8 477 *
478 * Sizes of chunks are powers of 2 for chunks in buckets <=
479 * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to
480 * get alignment right).
481 *
e8bc2b5c
GS
482 * Consider an arena for 2^n with n>MAX_PACKED. We suppose that
483 * starts of all the chunks in a 2K arena are in different
484 * 2^n-byte-long chunks. If the top of the last chunk is aligned on a
485 * boundary of 2K block, this means that sizeof(union
486 * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K <
487 * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n -
488 * overhead is used. Since this rules out n = 7 for 8 byte alignment,
489 * we specialcase allocation of the first of 16 128-byte-long chunks.
cf5c4ad8 490 *
491 * Note that with the above assumption we automatically have enough
492 * place for MAGIC at the start of 2K block. Note also that we
e8bc2b5c
GS
493 * overlay union overhead over the chunk, thus the start of small chunks
494 * is immediately overwritten after freeing. */
495# define MAX_PACKED_POW2 6
496# define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
497# define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
498# define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
499# define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK)
500# define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK)
cf5c4ad8 501# define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block)))
502# define OV_INDEX(block) (*OV_INDEXp(block))
503# define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
e8bc2b5c
GS
504 (TWOK_SHIFT(block)>> \
505 (bucket>>BUCKET_POW2_SHIFT)) + \
506 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
507 /* A bucket can have a shift smaller than it size, we need to
508 shift its magic number so it will not overwrite index: */
509# ifdef BUCKETS_ROOT2
510# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
511# else
512# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
513# endif
cf5c4ad8 514# define CHUNK_SHIFT 0
515
e8bc2b5c
GS
516/* Number of active buckets of given ordinal. */
517#ifdef IGNORE_SMALL_BAD_FREE
518#define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
519# define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
520 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
521 : n_blks[bucket] )
522#else
523# define N_BLKS(bucket) n_blks[bucket]
524#endif
525
526static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
527 {
528# if BUCKETS_PER_POW2==1
529 0, 0,
530 (MIN_BUC_POW2==2 ? 384 : 0),
531 224, 120, 62, 31, 16, 8, 4, 2
532# else
533 0, 0, 0, 0,
534 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
535 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
536# endif
537 };
538
539/* Shift of the first bucket with the given ordinal inside 2K chunk. */
540#ifdef IGNORE_SMALL_BAD_FREE
541# define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
542 ? ((1<<LOG_OF_MIN_ARENA) \
543 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
544 : blk_shift[bucket])
545#else
546# define BLK_SHIFT(bucket) blk_shift[bucket]
547#endif
548
549static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
550 {
551# if BUCKETS_PER_POW2==1
552 0, 0,
553 (MIN_BUC_POW2==2 ? 512 : 0),
554 256, 128, 64, 64, /* 8 to 64 */
555 16*sizeof(union overhead),
556 8*sizeof(union overhead),
557 4*sizeof(union overhead),
558 2*sizeof(union overhead),
559# else
560 0, 0, 0, 0,
561 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
562 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
563 16*sizeof(union overhead), 16*sizeof(union overhead),
564 8*sizeof(union overhead), 8*sizeof(union overhead),
565 4*sizeof(union overhead), 4*sizeof(union overhead),
566 2*sizeof(union overhead), 2*sizeof(union overhead),
567# endif
568 };
cf5c4ad8 569
cf5c4ad8 570#else /* !PACK_MALLOC */
571
572# define OV_MAGIC(block,bucket) (block)->ov_magic
573# define OV_INDEX(block) (block)->ov_index
574# define CHUNK_SHIFT 1
e8bc2b5c 575# define MAX_PACKED -1
cf5c4ad8 576#endif /* !PACK_MALLOC */
577
e8bc2b5c
GS
578#define M_OVERHEAD (sizeof(union overhead) + RSLOP)
579
580#ifdef PACK_MALLOC
581# define MEM_OVERHEAD(bucket) \
582 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
583# ifdef SMALL_BUCKET_VIA_TABLE
584# define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
585# define START_SHIFT MAX_PACKED_POW2
586# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
587# define SIZE_TABLE_MAX 80
588# else
589# define SIZE_TABLE_MAX 64
590# endif
591static char bucket_of[] =
592 {
593# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
594 /* 0 to 15 in 4-byte increments. */
595 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
596 6, /* 8 */
274c7500 597 IF_ALIGN_8(8,7), 8, /* 16/12, 16 */
e8bc2b5c
GS
598 9, 9, 10, 10, /* 24, 32 */
599 11, 11, 11, 11, /* 48 */
600 12, 12, 12, 12, /* 64 */
601 13, 13, 13, 13, /* 80 */
602 13, 13, 13, 13 /* 80 */
603# else /* !BUCKETS_ROOT2 */
604 /* 0 to 15 in 4-byte increments. */
605 (sizeof(void*) > 4 ? 3 : 2),
606 3,
607 4, 4,
608 5, 5, 5, 5,
609 6, 6, 6, 6,
610 6, 6, 6, 6
611# endif /* !BUCKETS_ROOT2 */
612 };
613# else /* !SMALL_BUCKET_VIA_TABLE */
614# define START_SHIFTS_BUCKET MIN_BUCKET
615# define START_SHIFT (MIN_BUC_POW2 - 1)
616# endif /* !SMALL_BUCKET_VIA_TABLE */
617#else /* !PACK_MALLOC */
618# define MEM_OVERHEAD(bucket) M_OVERHEAD
619# ifdef SMALL_BUCKET_VIA_TABLE
620# undef SMALL_BUCKET_VIA_TABLE
621# endif
622# define START_SHIFTS_BUCKET MIN_BUCKET
623# define START_SHIFT (MIN_BUC_POW2 - 1)
624#endif /* !PACK_MALLOC */
cf5c4ad8 625
8d063cd8 626/*
55497cff 627 * Big allocations are often of the size 2^n bytes. To make them a
628 * little bit better, make blocks of size 2^n+pagesize for big n.
629 */
630
631#ifdef TWO_POT_OPTIMIZE
632
5f05dabc 633# ifndef PERL_PAGESIZE
634# define PERL_PAGESIZE 4096
635# endif
e8bc2b5c
GS
636# ifndef FIRST_BIG_POW2
637# define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
5f05dabc 638# endif
e8bc2b5c 639# define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
55497cff 640/* If this value or more, check against bigger blocks. */
641# define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
642/* If less than this value, goes into 2^n-overhead-block. */
643# define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
644
e8bc2b5c
GS
645# define POW2_OPTIMIZE_ADJUST(nbytes) \
646 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
647# define POW2_OPTIMIZE_SURPLUS(bucket) \
648 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
649
650#else /* !TWO_POT_OPTIMIZE */
651# define POW2_OPTIMIZE_ADJUST(nbytes)
652# define POW2_OPTIMIZE_SURPLUS(bucket) 0
653#endif /* !TWO_POT_OPTIMIZE */
654
655#if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
656# define BARK_64K_LIMIT(what,nbytes,size) \
657 if (nbytes > 0xffff) { \
658 PerlIO_printf(PerlIO_stderr(), \
659 "%s too large: %lx\n", what, size); \
660 my_exit(1); \
661 }
662#else /* !HAS_64K_LIMIT || !PERL_CORE */
663# define BARK_64K_LIMIT(what,nbytes,size)
664#endif /* !HAS_64K_LIMIT || !PERL_CORE */
55497cff 665
e8bc2b5c
GS
666#ifndef MIN_SBRK
667# define MIN_SBRK 2048
668#endif
669
670#ifndef FIRST_SBRK
d720c441 671# define FIRST_SBRK (48*1024)
e8bc2b5c
GS
672#endif
673
674/* Minimal sbrk in percents of what is already alloced. */
675#ifndef MIN_SBRK_FRAC
676# define MIN_SBRK_FRAC 3
677#endif
678
679#ifndef SBRK_ALLOW_FAILURES
680# define SBRK_ALLOW_FAILURES 3
681#endif
55497cff 682
e8bc2b5c
GS
683#ifndef SBRK_FAILURE_PRICE
684# define SBRK_FAILURE_PRICE 50
55497cff 685#endif
686
e8bc2b5c
GS
687#if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
688
689# ifndef BIG_SIZE
690# define BIG_SIZE (1<<16) /* 64K */
691# endif
692
3541dd58 693#ifdef I_MACH_CTHREADS
772fe5b3
HM
694# undef MUTEX_LOCK
695# define MUTEX_LOCK(m) STMT_START { if (*m) mutex_lock(*m); } STMT_END
696# undef MUTEX_UNLOCK
697# define MUTEX_UNLOCK(m) STMT_START { if (*m) mutex_unlock(*m); } STMT_END
3541dd58
HM
698#endif
699
55497cff 700static char *emergency_buffer;
701static MEM_SIZE emergency_buffer_size;
df0003d4 702static Malloc_t emergency_sbrk(MEM_SIZE size);
55497cff 703
52082926 704static Malloc_t
df0003d4 705emergency_sbrk(MEM_SIZE size)
55497cff 706{
28ac10b1
IZ
707 MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA;
708
55497cff 709 if (size >= BIG_SIZE) {
710 /* Give the possibility to recover: */
741df71a 711 MALLOC_UNLOCK;
1b979e0a 712 croak("Out of memory during \"large\" request for %i bytes", size);
55497cff 713 }
714
28ac10b1
IZ
715 if (emergency_buffer_size >= rsize) {
716 char *old = emergency_buffer;
717
718 emergency_buffer_size -= rsize;
719 emergency_buffer += rsize;
720 return old;
721 } else {
18f739ee 722 dTHR;
55497cff 723 /* First offense, give a possibility to recover by dieing. */
724 /* No malloc involved here: */
4a33f861 725 GV **gvp = (GV**)hv_fetch(PL_defstash, "^M", 2, 0);
55497cff 726 SV *sv;
727 char *pv;
28ac10b1 728 int have = 0;
55497cff 729
28ac10b1
IZ
730 if (emergency_buffer_size) {
731 add_to_chain(emergency_buffer, emergency_buffer_size, 0);
732 emergency_buffer_size = 0;
733 emergency_buffer = Nullch;
734 have = 1;
735 }
4a33f861 736 if (!gvp) gvp = (GV**)hv_fetch(PL_defstash, "\015", 1, 0);
55497cff 737 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
28ac10b1
IZ
738 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD)) {
739 if (have)
740 goto do_croak;
55497cff 741 return (char *)-1; /* Now die die die... */
28ac10b1 742 }
55497cff 743 /* Got it, now detach SvPV: */
6b88bc9c 744 pv = SvPV(sv, PL_na);
55497cff 745 /* Check alignment: */
28ac10b1 746 if (((UV)(pv - sizeof(union overhead))) & ((1<<LOG_OF_MIN_ARENA) - 1)) {
55497cff 747 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
bbce6d69 748 return (char *)-1; /* die die die */
55497cff 749 }
750
28ac10b1
IZ
751 emergency_buffer = pv - sizeof(union overhead);
752 emergency_buffer_size = malloced_size(pv) + M_OVERHEAD;
55497cff 753 SvPOK_off(sv);
28ac10b1
IZ
754 SvPVX(sv) = Nullch;
755 SvCUR(sv) = SvLEN(sv) = 0;
55497cff 756 }
28ac10b1 757 do_croak:
741df71a 758 MALLOC_UNLOCK;
28ac10b1 759 croak("Out of memory during request for %i bytes", size);
55497cff 760}
761
e8bc2b5c 762#else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 763# define emergency_sbrk(size) -1
e8bc2b5c 764#endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 765
766/*
e8bc2b5c 767 * nextf[i] is the pointer to the next free block of size 2^i. The
8d063cd8
LW
768 * smallest allocatable block is 8 bytes. The overhead information
769 * precedes the data area returned to the user.
770 */
e8bc2b5c 771#define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
8d063cd8 772static union overhead *nextf[NBUCKETS];
cf5c4ad8 773
774#ifdef USE_PERL_SBRK
775#define sbrk(a) Perl_sbrk(a)
52082926 776Malloc_t Perl_sbrk _((int size));
8ac85365
NIS
777#else
778#ifdef DONT_DECLARE_STD
779#ifdef I_UNISTD
780#include <unistd.h>
781#endif
cf5c4ad8 782#else
52082926 783extern Malloc_t sbrk(int);
8ac85365 784#endif
cf5c4ad8 785#endif
8d063cd8 786
c07a80fd 787#ifdef DEBUGGING_MSTATS
8d063cd8
LW
788/*
789 * nmalloc[i] is the difference between the number of mallocs and frees
790 * for a given block size.
791 */
792static u_int nmalloc[NBUCKETS];
5f05dabc 793static u_int sbrk_slack;
794static u_int start_slack;
8d063cd8
LW
795#endif
796
e8bc2b5c
GS
797static u_int goodsbrk;
798
760ac839 799#ifdef DEBUGGING
3541dd58
HM
800#undef ASSERT
801#define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else
ee0007ab 802static void
d720c441 803botch(char *diag, char *s)
8d063cd8 804{
d720c441 805 PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s);
3028581b 806 PerlProc_abort();
8d063cd8
LW
807}
808#else
3541dd58 809#define ASSERT(p, diag)
8d063cd8
LW
810#endif
811
2304df62 812Malloc_t
8ac85365 813malloc(register size_t nbytes)
8d063cd8
LW
814{
815 register union overhead *p;
e8bc2b5c 816 register int bucket;
ee0007ab 817 register MEM_SIZE shiftr;
8d063cd8 818
c2a5c2d2 819#if defined(DEBUGGING) || defined(RCHECK)
ee0007ab 820 MEM_SIZE size = nbytes;
45d8adaa
LW
821#endif
822
e8bc2b5c 823 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
45d8adaa
LW
824#ifdef DEBUGGING
825 if ((long)nbytes < 0)
d720c441 826 croak("%s", "panic: malloc");
45d8adaa 827#endif
45d8adaa 828
741df71a 829 MALLOC_LOCK;
8d063cd8
LW
830 /*
831 * Convert amount of memory requested into
832 * closest block size stored in hash buckets
833 * which satisfies request. Account for
834 * space used per block for accounting.
835 */
cf5c4ad8 836#ifdef PACK_MALLOC
e8bc2b5c
GS
837# ifdef SMALL_BUCKET_VIA_TABLE
838 if (nbytes == 0)
839 bucket = MIN_BUCKET;
840 else if (nbytes <= SIZE_TABLE_MAX) {
841 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
842 } else
843# else
043bf814
RB
844 if (nbytes == 0)
845 nbytes = 1;
e8bc2b5c
GS
846 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
847 else
848# endif
55497cff 849#endif
e8bc2b5c
GS
850 {
851 POW2_OPTIMIZE_ADJUST(nbytes);
852 nbytes += M_OVERHEAD;
853 nbytes = (nbytes + 3) &~ 3;
854 do_shifts:
855 shiftr = (nbytes - 1) >> START_SHIFT;
856 bucket = START_SHIFTS_BUCKET;
857 /* apart from this loop, this is O(1) */
858 while (shiftr >>= 1)
859 bucket += BUCKETS_PER_POW2;
cf5c4ad8 860 }
8d063cd8
LW
861 /*
862 * If nothing in hash bucket right now,
863 * request more memory from the system.
864 */
865 if (nextf[bucket] == NULL)
866 morecore(bucket);
e8bc2b5c 867 if ((p = nextf[bucket]) == NULL) {
741df71a 868 MALLOC_UNLOCK;
55497cff 869#ifdef PERL_CORE
4a33f861 870 if (!PL_nomemok) {
760ac839 871 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
79072805 872 my_exit(1);
ee0007ab 873 }
45d8adaa 874#else
8d063cd8 875 return (NULL);
45d8adaa
LW
876#endif
877 }
878
e8bc2b5c
GS
879 DEBUG_m(PerlIO_printf(Perl_debug_log,
880 "0x%lx: (%05lu) malloc %ld bytes\n",
4a33f861 881 (unsigned long)(p+1), (unsigned long)(PL_an++),
e8bc2b5c 882 (long)size));
45d8adaa 883
8d063cd8 884 /* remove from linked list */
802004fa
DD
885#if defined(RCHECK)
886 if (((UV)p) & (MEM_ALIGNBYTES - 1))
760ac839 887 PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n",
a0d0e21e 888 (unsigned long)*((int*)p),(unsigned long)p);
bf38876a
LW
889#endif
890 nextf[bucket] = p->ov_next;
e8bc2b5c
GS
891#ifdef IGNORE_SMALL_BAD_FREE
892 if (bucket >= FIRST_BUCKET_WITH_CHECK)
893#endif
894 OV_MAGIC(p, bucket) = MAGIC;
cf5c4ad8 895#ifndef PACK_MALLOC
896 OV_INDEX(p) = bucket;
897#endif
8d063cd8
LW
898#ifdef RCHECK
899 /*
900 * Record allocated size of block and
901 * bound space with magic numbers.
902 */
8d063cd8 903 p->ov_rmagic = RMAGIC;
e8bc2b5c
GS
904 if (bucket <= MAX_SHORT_BUCKET) {
905 int i;
906
907 nbytes = size + M_OVERHEAD;
908 p->ov_size = nbytes - 1;
909 if ((i = nbytes & 3)) {
910 i = 4 - i;
911 while (i--)
912 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
913 }
914 nbytes = (nbytes + 3) &~ 3;
915 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
916 }
8d063cd8 917#endif
741df71a 918 MALLOC_UNLOCK;
cf5c4ad8 919 return ((Malloc_t)(p + CHUNK_SHIFT));
8d063cd8
LW
920}
921
e8bc2b5c
GS
922static char *last_sbrk_top;
923static char *last_op; /* This arena can be easily extended. */
924static int sbrked_remains;
925static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
926
927#ifdef DEBUGGING_MSTATS
928static int sbrks;
929#endif
930
931struct chunk_chain_s {
932 struct chunk_chain_s *next;
933 MEM_SIZE size;
934};
935static struct chunk_chain_s *chunk_chain;
936static int n_chunks;
937static char max_bucket;
938
939/* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
940static void *
941get_from_chain(MEM_SIZE size)
942{
943 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
944 struct chunk_chain_s **oldgoodp = NULL;
945 long min_remain = LONG_MAX;
946
947 while (elt) {
948 if (elt->size >= size) {
949 long remains = elt->size - size;
950 if (remains >= 0 && remains < min_remain) {
951 oldgoodp = oldp;
952 min_remain = remains;
953 }
954 if (remains == 0) {
955 break;
956 }
957 }
958 oldp = &( elt->next );
959 elt = elt->next;
960 }
961 if (!oldgoodp) return NULL;
962 if (min_remain) {
963 void *ret = *oldgoodp;
964 struct chunk_chain_s *next = (*oldgoodp)->next;
965
966 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
967 (*oldgoodp)->size = min_remain;
968 (*oldgoodp)->next = next;
969 return ret;
970 } else {
971 void *ret = *oldgoodp;
972 *oldgoodp = (*oldgoodp)->next;
973 n_chunks--;
974 return ret;
975 }
976}
977
978static void
979add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
980{
981 struct chunk_chain_s *next = chunk_chain;
982 char *cp = (char*)p;
983
984 cp += chip;
985 chunk_chain = (struct chunk_chain_s *)cp;
986 chunk_chain->size = size - chip;
987 chunk_chain->next = next;
988 n_chunks++;
989}
990
991static void *
992get_from_bigger_buckets(int bucket, MEM_SIZE size)
993{
994 int price = 1;
995 static int bucketprice[NBUCKETS];
996 while (bucket <= max_bucket) {
997 /* We postpone stealing from bigger buckets until we want it
998 often enough. */
999 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
1000 /* Steal it! */
1001 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
1002 bucketprice[bucket] = 0;
1003 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
1004 last_op = NULL; /* Disable optimization */
1005 }
1006 nextf[bucket] = nextf[bucket]->ov_next;
1007#ifdef DEBUGGING_MSTATS
1008 nmalloc[bucket]--;
1009 start_slack -= M_OVERHEAD;
1010#endif
1011 add_to_chain(ret, (BUCKET_SIZE(bucket) +
1012 POW2_OPTIMIZE_SURPLUS(bucket)),
1013 size);
1014 return ret;
1015 }
1016 bucket++;
1017 }
1018 return NULL;
1019}
1020
fa423c5b
IZ
1021static union overhead *
1022getpages(int needed, int *nblksp, int bucket)
1023{
1024 /* Need to do (possibly expensive) system call. Try to
1025 optimize it for rare calling. */
1026 MEM_SIZE require = needed - sbrked_remains;
1027 char *cp;
1028 union overhead *ovp;
1029 int slack = 0;
1030
1031 if (sbrk_good > 0) {
1032 if (!last_sbrk_top && require < FIRST_SBRK)
1033 require = FIRST_SBRK;
1034 else if (require < MIN_SBRK) require = MIN_SBRK;
1035
1036 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
1037 require = goodsbrk * MIN_SBRK_FRAC / 100;
1038 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
1039 } else {
1040 require = needed;
1041 last_sbrk_top = 0;
1042 sbrked_remains = 0;
1043 }
1044
1045 DEBUG_m(PerlIO_printf(Perl_debug_log,
1046 "sbrk(%ld) for %ld-byte-long arena\n",
1047 (long)require, (long) needed));
1048 cp = (char *)sbrk(require);
1049#ifdef DEBUGGING_MSTATS
1050 sbrks++;
1051#endif
1052 if (cp == last_sbrk_top) {
1053 /* Common case, anything is fine. */
1054 sbrk_good++;
1055 ovp = (union overhead *) (cp - sbrked_remains);
e9397286 1056 last_op = cp - sbrked_remains;
fa423c5b
IZ
1057 sbrked_remains = require - (needed - sbrked_remains);
1058 } else if (cp == (char *)-1) { /* no more room! */
1059 ovp = (union overhead *)emergency_sbrk(needed);
1060 if (ovp == (union overhead *)-1)
1061 return 0;
e9397286
GS
1062 if (((char*)ovp) > last_op) { /* Cannot happen with current emergency_sbrk() */
1063 last_op = 0;
1064 }
fa423c5b
IZ
1065 return ovp;
1066 } else { /* Non-continuous or first sbrk(). */
1067 long add = sbrked_remains;
1068 char *newcp;
1069
1070 if (sbrked_remains) { /* Put rest into chain, we
1071 cannot use it right now. */
1072 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1073 sbrked_remains, 0);
1074 }
1075
1076 /* Second, check alignment. */
1077 slack = 0;
1078
1079#ifndef atarist /* on the atari we dont have to worry about this */
1080# ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
1081
1082 /* CHUNK_SHIFT is 1 for PACK_MALLOC, 0 otherwise. */
1083 if ((UV)cp & (0x7FF >> CHUNK_SHIFT)) { /* Not aligned. */
1084 slack = (0x800 >> CHUNK_SHIFT)
1085 - ((UV)cp & (0x7FF >> CHUNK_SHIFT));
1086 add += slack;
1087 }
1088# endif
1089#endif /* atarist */
1090
1091 if (add) {
1092 DEBUG_m(PerlIO_printf(Perl_debug_log,
1093 "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
1094 (long)add, (long) slack,
1095 (long) sbrked_remains));
1096 newcp = (char *)sbrk(add);
1097#if defined(DEBUGGING_MSTATS)
1098 sbrks++;
1099 sbrk_slack += add;
1100#endif
1101 if (newcp != cp + require) {
1102 /* Too bad: even rounding sbrk() is not continuous.*/
1103 DEBUG_m(PerlIO_printf(Perl_debug_log,
1104 "failed to fix bad sbrk()\n"));
1105#ifdef PACK_MALLOC
1106 if (slack) {
741df71a 1107 MALLOC_UNLOCK;
fa423c5b
IZ
1108 croak("%s", "panic: Off-page sbrk");
1109 }
1110#endif
1111 if (sbrked_remains) {
1112 /* Try again. */
1113#if defined(DEBUGGING_MSTATS)
1114 sbrk_slack += require;
1115#endif
1116 require = needed;
1117 DEBUG_m(PerlIO_printf(Perl_debug_log,
1118 "straight sbrk(%ld)\n",
1119 (long)require));
1120 cp = (char *)sbrk(require);
1121#ifdef DEBUGGING_MSTATS
1122 sbrks++;
1123#endif
1124 if (cp == (char *)-1)
1125 return 0;
1126 }
1127 sbrk_good = -1; /* Disable optimization!
1128 Continue with not-aligned... */
1129 } else {
1130 cp += slack;
1131 require += sbrked_remains;
1132 }
1133 }
1134
1135 if (last_sbrk_top) {
1136 sbrk_good -= SBRK_FAILURE_PRICE;
1137 }
1138
1139 ovp = (union overhead *) cp;
1140 /*
1141 * Round up to minimum allocation size boundary
1142 * and deduct from block count to reflect.
1143 */
1144
1145#ifndef I286 /* Again, this should always be ok on an 80286 */
1146 if ((UV)ovp & 7) {
1147 ovp = (union overhead *)(((UV)ovp + 8) & ~7);
1148 DEBUG_m(PerlIO_printf(Perl_debug_log,
1149 "fixing sbrk(): %d bytes off machine alignement\n",
1150 (int)((UV)ovp & 7)));
1151 (*nblksp)--;
1152# if defined(DEBUGGING_MSTATS)
1153 /* This is only approx. if TWO_POT_OPTIMIZE: */
1154 sbrk_slack += (1 << bucket);
1155# endif
1156 }
1157#endif
1158 sbrked_remains = require - needed;
e9397286 1159 last_op = cp;
fa423c5b
IZ
1160 }
1161 last_sbrk_top = cp + require;
fa423c5b
IZ
1162#ifdef DEBUGGING_MSTATS
1163 goodsbrk += require;
1164#endif
1165 return ovp;
1166}
1167
1168static int
1169getpages_adjacent(int require)
1170{
1171 if (require <= sbrked_remains) {
1172 sbrked_remains -= require;
1173 } else {
1174 char *cp;
1175
1176 require -= sbrked_remains;
1177 /* We do not try to optimize sbrks here, we go for place. */
1178 cp = (char*) sbrk(require);
1179#ifdef DEBUGGING_MSTATS
1180 sbrks++;
1181 goodsbrk += require;
1182#endif
1183 if (cp == last_sbrk_top) {
1184 sbrked_remains = 0;
1185 last_sbrk_top = cp + require;
1186 } else {
28ac10b1
IZ
1187 if (cp == (char*)-1) { /* Out of memory */
1188#ifdef DEBUGGING_MSTATS
1189 goodsbrk -= require;
1190#endif
1191 return 0;
1192 }
fa423c5b
IZ
1193 /* Report the failure: */
1194 if (sbrked_remains)
1195 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1196 sbrked_remains, 0);
1197 add_to_chain((void*)cp, require, 0);
1198 sbrk_good -= SBRK_FAILURE_PRICE;
1199 sbrked_remains = 0;
1200 last_sbrk_top = 0;
1201 last_op = 0;
1202 return 0;
1203 }
1204 }
1205
1206 return 1;
1207}
1208
8d063cd8
LW
1209/*
1210 * Allocate more memory to the indicated bucket.
1211 */
a0d0e21e 1212static void
8ac85365 1213morecore(register int bucket)
8d063cd8 1214{
72aaf631 1215 register union overhead *ovp;
8d063cd8 1216 register int rnu; /* 2^rnu bytes will be requested */
fa423c5b 1217 int nblks; /* become nblks blocks of the desired size */
bbce6d69 1218 register MEM_SIZE siz, needed;
8d063cd8
LW
1219
1220 if (nextf[bucket])
1221 return;
e8bc2b5c 1222 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
741df71a 1223 MALLOC_UNLOCK;
d720c441 1224 croak("%s", "Out of memory during ridiculously large request");
55497cff 1225 }
d720c441 1226 if (bucket > max_bucket)
e8bc2b5c 1227 max_bucket = bucket;
d720c441 1228
e8bc2b5c
GS
1229 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
1230 ? LOG_OF_MIN_ARENA
1231 : (bucket >> BUCKET_POW2_SHIFT) );
1232 /* This may be overwritten later: */
1233 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
1234 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
1235 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
1236 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
1237 nextf[rnu << BUCKET_POW2_SHIFT]
1238 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
1239#ifdef DEBUGGING_MSTATS
1240 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
1241 start_slack -= M_OVERHEAD;
1242#endif
1243 DEBUG_m(PerlIO_printf(Perl_debug_log,
1244 "stealing %ld bytes from %ld arena\n",
1245 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
1246 } else if (chunk_chain
1247 && (ovp = (union overhead*) get_from_chain(needed))) {
1248 DEBUG_m(PerlIO_printf(Perl_debug_log,
1249 "stealing %ld bytes from chain\n",
1250 (long) needed));
d720c441
IZ
1251 } else if ( (ovp = (union overhead*)
1252 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
1253 needed)) ) {
e8bc2b5c
GS
1254 DEBUG_m(PerlIO_printf(Perl_debug_log,
1255 "stealing %ld bytes from bigger buckets\n",
1256 (long) needed));
1257 } else if (needed <= sbrked_remains) {
1258 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
1259 sbrked_remains -= needed;
1260 last_op = (char*)ovp;
fa423c5b
IZ
1261 } else
1262 ovp = getpages(needed, &nblks, bucket);
e8bc2b5c 1263
fa423c5b
IZ
1264 if (!ovp)
1265 return;
e8bc2b5c 1266
8d063cd8
LW
1267 /*
1268 * Add new memory allocated to that on
1269 * free list for this hash bucket.
1270 */
e8bc2b5c 1271 siz = BUCKET_SIZE(bucket);
cf5c4ad8 1272#ifdef PACK_MALLOC
72aaf631 1273 *(u_char*)ovp = bucket; /* Fill index. */
e8bc2b5c
GS
1274 if (bucket <= MAX_PACKED) {
1275 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1276 nblks = N_BLKS(bucket);
cf5c4ad8 1277# ifdef DEBUGGING_MSTATS
e8bc2b5c 1278 start_slack += BLK_SHIFT(bucket);
cf5c4ad8 1279# endif
e8bc2b5c
GS
1280 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
1281 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
cf5c4ad8 1282 siz -= sizeof(union overhead);
72aaf631 1283 } else ovp++; /* One chunk per block. */
e8bc2b5c 1284#endif /* PACK_MALLOC */
72aaf631 1285 nextf[bucket] = ovp;
5f05dabc 1286#ifdef DEBUGGING_MSTATS
1287 nmalloc[bucket] += nblks;
e8bc2b5c
GS
1288 if (bucket > MAX_PACKED) {
1289 start_slack += M_OVERHEAD * nblks;
1290 }
5f05dabc 1291#endif
8d063cd8 1292 while (--nblks > 0) {
72aaf631
MB
1293 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
1294 ovp = (union overhead *)((caddr_t)ovp + siz);
8d063cd8 1295 }
8595d6f1 1296 /* Not all sbrks return zeroed memory.*/
72aaf631 1297 ovp->ov_next = (union overhead *)NULL;
cf5c4ad8 1298#ifdef PACK_MALLOC
e8bc2b5c
GS
1299 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
1300 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
1301 nextf[7*BUCKETS_PER_POW2] =
1302 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
1303 - sizeof(union overhead));
1304 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
cf5c4ad8 1305 }
1306#endif /* !PACK_MALLOC */
8d063cd8
LW
1307}
1308
94b6baf5 1309Free_t
8ac85365 1310free(void *mp)
8d063cd8 1311{
ee0007ab 1312 register MEM_SIZE size;
72aaf631 1313 register union overhead *ovp;
352d5a3a 1314 char *cp = (char*)mp;
cf5c4ad8 1315#ifdef PACK_MALLOC
1316 u_char bucket;
1317#endif
8d063cd8 1318
e8bc2b5c
GS
1319 DEBUG_m(PerlIO_printf(Perl_debug_log,
1320 "0x%lx: (%05lu) free\n",
4a33f861 1321 (unsigned long)cp, (unsigned long)(PL_an++)));
45d8adaa 1322
cf5c4ad8 1323 if (cp == NULL)
1324 return;
72aaf631 1325 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1326 - sizeof (union overhead) * CHUNK_SHIFT);
cf5c4ad8 1327#ifdef PACK_MALLOC
72aaf631 1328 bucket = OV_INDEX(ovp);
cf5c4ad8 1329#endif
e8bc2b5c
GS
1330#ifdef IGNORE_SMALL_BAD_FREE
1331 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1332 && (OV_MAGIC(ovp, bucket) != MAGIC))
1333#else
1334 if (OV_MAGIC(ovp, bucket) != MAGIC)
1335#endif
1336 {
68dc0745 1337 static int bad_free_warn = -1;
cf5c4ad8 1338 if (bad_free_warn == -1) {
5fd9e9a4 1339 char *pbf = PerlEnv_getenv("PERL_BADFREE");
cf5c4ad8 1340 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1341 }
1342 if (!bad_free_warn)
1343 return;
8990e307 1344#ifdef RCHECK
a687059c 1345 warn("%s free() ignored",
72aaf631 1346 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
8990e307 1347#else
d720c441 1348 warn("%s", "Bad free() ignored");
8990e307 1349#endif
8d063cd8 1350 return; /* sanity */
e8bc2b5c 1351 }
741df71a 1352 MALLOC_LOCK;
8d063cd8 1353#ifdef RCHECK
3541dd58 1354 ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
e8bc2b5c
GS
1355 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1356 int i;
1357 MEM_SIZE nbytes = ovp->ov_size + 1;
1358
1359 if ((i = nbytes & 3)) {
1360 i = 4 - i;
1361 while (i--) {
3541dd58 1362 ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
d720c441 1363 == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1364 }
1365 }
1366 nbytes = (nbytes + 3) &~ 3;
3541dd58 1367 ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite");
e8bc2b5c 1368 }
72aaf631 1369 ovp->ov_rmagic = RMAGIC - 1;
8d063cd8 1370#endif
3541dd58 1371 ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
72aaf631
MB
1372 size = OV_INDEX(ovp);
1373 ovp->ov_next = nextf[size];
1374 nextf[size] = ovp;
741df71a 1375 MALLOC_UNLOCK;
8d063cd8
LW
1376}
1377
1378/*
1379 * When a program attempts "storage compaction" as mentioned in the
1380 * old malloc man page, it realloc's an already freed block. Usually
1381 * this is the last block it freed; occasionally it might be farther
1382 * back. We have to search all the free lists for the block in order
1383 * to determine its bucket: 1st we make one pass thru the lists
1384 * checking only the first block in each; if that fails we search
378cc40b 1385 * ``reall_srchlen'' blocks in each list for a match (the variable
8d063cd8
LW
1386 * is extern so the caller can modify it). If that fails we just copy
1387 * however many bytes was given to realloc() and hope it's not huge.
1388 */
22c35a8c 1389#define reall_srchlen 4 /* 4 should be plenty, -1 =>'s whole list */
8d063cd8 1390
2304df62 1391Malloc_t
8ac85365 1392realloc(void *mp, size_t nbytes)
8d063cd8 1393{
ee0007ab 1394 register MEM_SIZE onb;
72aaf631 1395 union overhead *ovp;
d720c441
IZ
1396 char *res;
1397 int prev_bucket;
e8bc2b5c
GS
1398 register int bucket;
1399 int was_alloced = 0, incr;
352d5a3a 1400 char *cp = (char*)mp;
8d063cd8 1401
e8bc2b5c 1402#if defined(DEBUGGING) || !defined(PERL_CORE)
ee0007ab 1403 MEM_SIZE size = nbytes;
45d8adaa 1404
45d8adaa 1405 if ((long)nbytes < 0)
d720c441 1406 croak("%s", "panic: realloc");
45d8adaa 1407#endif
e8bc2b5c
GS
1408
1409 BARK_64K_LIMIT("Reallocation",nbytes,size);
1410 if (!cp)
1411 return malloc(nbytes);
45d8adaa 1412
741df71a 1413 MALLOC_LOCK;
72aaf631 1414 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c
GS
1415 - sizeof (union overhead) * CHUNK_SHIFT);
1416 bucket = OV_INDEX(ovp);
1417#ifdef IGNORE_SMALL_BAD_FREE
1418 if ((bucket < FIRST_BUCKET_WITH_CHECK)
1419 || (OV_MAGIC(ovp, bucket) == MAGIC))
1420#else
1421 if (OV_MAGIC(ovp, bucket) == MAGIC)
1422#endif
1423 {
55497cff 1424 was_alloced = 1;
8d063cd8
LW
1425 } else {
1426 /*
1427 * Already free, doing "compaction".
1428 *
1429 * Search for the old block of memory on the
1430 * free list. First, check the most common
1431 * case (last element free'd), then (this failing)
378cc40b 1432 * the last ``reall_srchlen'' items free'd.
8d063cd8
LW
1433 * If all lookups fail, then assume the size of
1434 * the memory block being realloc'd is the
1435 * smallest possible.
1436 */
e8bc2b5c
GS
1437 if ((bucket = findbucket(ovp, 1)) < 0 &&
1438 (bucket = findbucket(ovp, reall_srchlen)) < 0)
1439 bucket = 0;
8d063cd8 1440 }
e8bc2b5c 1441 onb = BUCKET_SIZE_REAL(bucket);
55497cff 1442 /*
1443 * avoid the copy if same size block.
e8bc2b5c
GS
1444 * We are not agressive with boundary cases. Note that it might
1445 * (for a small number of cases) give false negative if
55497cff 1446 * both new size and old one are in the bucket for
e8bc2b5c
GS
1447 * FIRST_BIG_POW2, but the new one is near the lower end.
1448 *
1449 * We do not try to go to 1.5 times smaller bucket so far.
55497cff 1450 */
e8bc2b5c
GS
1451 if (nbytes > onb) incr = 1;
1452 else {
1453#ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1454 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1455 nbytes > ( (onb >> 1) - M_OVERHEAD )
1456# ifdef TWO_POT_OPTIMIZE
1457 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1458# endif
1459 )
1460#else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1461 prev_bucket = ( (bucket > MAX_PACKED + 1)
1462 ? bucket - BUCKETS_PER_POW2
1463 : bucket - 1);
1464 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1465#endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1466 incr = 0;
1467 else incr = -1;
1468 }
1469 if (!was_alloced
2ce36478 1470#ifdef STRESS_REALLOC
e8bc2b5c 1471 || 1 /* always do it the hard way */
2ce36478 1472#endif
e8bc2b5c
GS
1473 ) goto hard_way;
1474 else if (incr == 0) {
852c2e52 1475 inplace_label:
a687059c
LW
1476#ifdef RCHECK
1477 /*
1478 * Record new allocated size of block and
1479 * bound space with magic numbers.
1480 */
72aaf631 1481 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
e8bc2b5c
GS
1482 int i, nb = ovp->ov_size + 1;
1483
1484 if ((i = nb & 3)) {
1485 i = 4 - i;
1486 while (i--) {
3541dd58 1487 ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1488 }
1489 }
1490 nb = (nb + 3) &~ 3;
3541dd58 1491 ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite");
a687059c
LW
1492 /*
1493 * Convert amount of memory requested into
1494 * closest block size stored in hash buckets
1495 * which satisfies request. Account for
1496 * space used per block for accounting.
1497 */
cf5c4ad8 1498 nbytes += M_OVERHEAD;
72aaf631 1499 ovp->ov_size = nbytes - 1;
e8bc2b5c
GS
1500 if ((i = nbytes & 3)) {
1501 i = 4 - i;
1502 while (i--)
1503 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1504 = RMAGIC_C;
1505 }
1506 nbytes = (nbytes + 3) &~ 3;
72aaf631 1507 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
a687059c
LW
1508 }
1509#endif
45d8adaa 1510 res = cp;
741df71a 1511 MALLOC_UNLOCK;
42ac124e
IZ
1512 DEBUG_m(PerlIO_printf(Perl_debug_log,
1513 "0x%lx: (%05lu) realloc %ld bytes inplace\n",
1514 (unsigned long)res,(unsigned long)(PL_an++),
1515 (long)size));
e8bc2b5c
GS
1516 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1517 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1518 MEM_SIZE require, newarena = nbytes, pow;
1519 int shiftr;
1520
1521 POW2_OPTIMIZE_ADJUST(newarena);
1522 newarena = newarena + M_OVERHEAD;
1523 /* newarena = (newarena + 3) &~ 3; */
1524 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1525 pow = LOG_OF_MIN_ARENA + 1;
1526 /* apart from this loop, this is O(1) */
1527 while (shiftr >>= 1)
1528 pow++;
1529 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1530 require = newarena - onb - M_OVERHEAD;
1531
fa423c5b 1532 if (getpages_adjacent(require)) {
e8bc2b5c 1533#ifdef DEBUGGING_MSTATS
fa423c5b
IZ
1534 nmalloc[bucket]--;
1535 nmalloc[pow * BUCKETS_PER_POW2]++;
e8bc2b5c 1536#endif
fa423c5b
IZ
1537 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
1538 goto inplace_label;
1539 } else
1540 goto hard_way;
e8bc2b5c
GS
1541 } else {
1542 hard_way:
741df71a 1543 MALLOC_UNLOCK;
42ac124e
IZ
1544 DEBUG_m(PerlIO_printf(Perl_debug_log,
1545 "0x%lx: (%05lu) realloc %ld bytes the hard way\n",
1546 (unsigned long)cp,(unsigned long)(PL_an++),
1547 (long)size));
e8bc2b5c
GS
1548 if ((res = (char*)malloc(nbytes)) == NULL)
1549 return (NULL);
1550 if (cp != res) /* common optimization */
1551 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
1552 if (was_alloced)
1553 free(cp);
45d8adaa 1554 }
2304df62 1555 return ((Malloc_t)res);
8d063cd8
LW
1556}
1557
1558/*
1559 * Search ``srchlen'' elements of each free list for a block whose
1560 * header starts at ``freep''. If srchlen is -1 search the whole list.
1561 * Return bucket number, or -1 if not found.
1562 */
ee0007ab 1563static int
8ac85365 1564findbucket(union overhead *freep, int srchlen)
8d063cd8
LW
1565{
1566 register union overhead *p;
1567 register int i, j;
1568
1569 for (i = 0; i < NBUCKETS; i++) {
1570 j = 0;
1571 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1572 if (p == freep)
1573 return (i);
1574 j++;
1575 }
1576 }
1577 return (-1);
1578}
1579
cf5c4ad8 1580Malloc_t
8ac85365 1581calloc(register size_t elements, register size_t size)
cf5c4ad8 1582{
1583 long sz = elements * size;
1584 Malloc_t p = malloc(sz);
1585
1586 if (p) {
1587 memset((void*)p, 0, sz);
1588 }
1589 return p;
1590}
1591
e8bc2b5c
GS
1592MEM_SIZE
1593malloced_size(void *p)
1594{
8d6dde3e
IZ
1595 union overhead *ovp = (union overhead *)
1596 ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT);
1597 int bucket = OV_INDEX(ovp);
1598#ifdef RCHECK
1599 /* The caller wants to have a complete control over the chunk,
1600 disable the memory checking inside the chunk. */
1601 if (bucket <= MAX_SHORT_BUCKET) {
1602 MEM_SIZE size = BUCKET_SIZE_REAL(bucket);
1603 ovp->ov_size = size + M_OVERHEAD - 1;
1604 *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RSLOP)) = RMAGIC;
1605 }
1606#endif
e8bc2b5c
GS
1607 return BUCKET_SIZE_REAL(bucket);
1608}
1609
c07a80fd 1610#ifdef DEBUGGING_MSTATS
e8bc2b5c
GS
1611
1612# ifdef BUCKETS_ROOT2
1613# define MIN_EVEN_REPORT 6
1614# else
1615# define MIN_EVEN_REPORT MIN_BUCKET
1616# endif
8d063cd8
LW
1617/*
1618 * mstats - print out statistics about malloc
1619 *
1620 * Prints two lines of numbers, one showing the length of the free list
1621 * for each size category, the second showing the number of mallocs -
1622 * frees for each size category.
1623 */
ee0007ab 1624void
8ac85365 1625dump_mstats(char *s)
8d063cd8
LW
1626{
1627 register int i, j;
1628 register union overhead *p;
e8bc2b5c 1629 int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0;
c07a80fd 1630 u_int nfree[NBUCKETS];
e8bc2b5c
GS
1631 int total_chain = 0;
1632 struct chunk_chain_s* nextchain = chunk_chain;
8d063cd8 1633
e8bc2b5c 1634 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
8d063cd8
LW
1635 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1636 ;
c07a80fd 1637 nfree[i] = j;
e8bc2b5c
GS
1638 totfree += nfree[i] * BUCKET_SIZE_REAL(i);
1639 total += nmalloc[i] * BUCKET_SIZE_REAL(i);
1640 if (nmalloc[i]) {
1641 i % 2 ? (topbucket_odd = i) : (topbucket_ev = i);
1642 topbucket = i;
1643 }
c07a80fd 1644 }
1645 if (s)
e8bc2b5c 1646 PerlIO_printf(PerlIO_stderr(),
d720c441 1647 "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n",
e8bc2b5c 1648 s,
d720c441
IZ
1649 (long)BUCKET_SIZE_REAL(MIN_BUCKET),
1650 (long)BUCKET_SIZE(MIN_BUCKET),
1651 (long)BUCKET_SIZE_REAL(topbucket), (long)BUCKET_SIZE(topbucket));
5f05dabc 1652 PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree);
e8bc2b5c
GS
1653 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1654 PerlIO_printf(PerlIO_stderr(),
1655 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1656 ? " %5d"
1657 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1658 nfree[i]);
1659 }
1660#ifdef BUCKETS_ROOT2
1661 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1662 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1663 PerlIO_printf(PerlIO_stderr(),
1664 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1665 ? " %5d"
1666 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1667 nfree[i]);
8d063cd8 1668 }
e8bc2b5c 1669#endif
5f05dabc 1670 PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree);
e8bc2b5c
GS
1671 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1672 PerlIO_printf(PerlIO_stderr(),
1673 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1674 ? " %5d"
1675 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1676 nmalloc[i] - nfree[i]);
c07a80fd 1677 }
e8bc2b5c
GS
1678#ifdef BUCKETS_ROOT2
1679 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1680 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1681 PerlIO_printf(PerlIO_stderr(),
1682 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1683 ? " %5d"
1684 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1685 nmalloc[i] - nfree[i]);
1686 }
1687#endif
1688 while (nextchain) {
1689 total_chain += nextchain->size;
1690 nextchain = nextchain->next;
1691 }
1692 PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n",
1693 goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack,
1694 start_slack, total_chain, sbrked_remains);
c07a80fd 1695}
1696#else
1697void
8ac85365 1698dump_mstats(char *s)
c07a80fd 1699{
8d063cd8
LW
1700}
1701#endif
a687059c 1702#endif /* lint */
cf5c4ad8 1703
1704
1705#ifdef USE_PERL_SBRK
1706
2c92fcc0 1707# if defined(__MACHTEN_PPC__) || defined(__NeXT__)
38ac2dc8
DD
1708# define PERL_SBRK_VIA_MALLOC
1709/*
1710 * MachTen's malloc() returns a buffer aligned on a two-byte boundary.
1711 * While this is adequate, it may slow down access to longer data
1712 * types by forcing multiple memory accesses. It also causes
1713 * complaints when RCHECK is in force. So we allocate six bytes
1714 * more than we need to, and return an address rounded up to an
1715 * eight-byte boundary.
1716 *
1717 * 980701 Dominic Dunlop <domo@computer.org>
1718 */
1719# define SYSTEM_ALLOC(a) ((void *)(((unsigned)malloc((a)+6)+6)&~7))
1720# endif
1721
760ac839 1722# ifdef PERL_SBRK_VIA_MALLOC
72e5b9db 1723# if defined(HIDEMYMALLOC) || defined(EMBEDMYMALLOC)
38ac2dc8
DD
1724# undef malloc /* Expose names that */
1725# undef calloc /* HIDEMYMALLOC hides */
1726# undef realloc
1727# undef free
760ac839 1728# else
72e5b9db 1729# include "Error: -DPERL_SBRK_VIA_MALLOC needs -D(HIDE|EMBED)MYMALLOC"
760ac839 1730# endif
cf5c4ad8 1731
1732/* it may seem schizophrenic to use perl's malloc and let it call system */
1733/* malloc, the reason for that is only the 3.2 version of the OS that had */
1734/* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
1735/* end to the cores */
1736
38ac2dc8
DD
1737# ifndef SYSTEM_ALLOC
1738# define SYSTEM_ALLOC(a) malloc(a)
1739# endif
cf5c4ad8 1740
760ac839 1741# endif /* PERL_SBRK_VIA_MALLOC */
cf5c4ad8 1742
1743static IV Perl_sbrk_oldchunk;
1744static long Perl_sbrk_oldsize;
1745
760ac839
LW
1746# define PERLSBRK_32_K (1<<15)
1747# define PERLSBRK_64_K (1<<16)
cf5c4ad8 1748
b63effbb 1749Malloc_t
df0003d4 1750Perl_sbrk(int size)
cf5c4ad8 1751{
1752 IV got;
1753 int small, reqsize;
1754
1755 if (!size) return 0;
55497cff 1756#ifdef PERL_CORE
cf5c4ad8 1757 reqsize = size; /* just for the DEBUG_m statement */
1758#endif
57569e04
HM
1759#ifdef PACK_MALLOC
1760 size = (size + 0x7ff) & ~0x7ff;
1761#endif
cf5c4ad8 1762 if (size <= Perl_sbrk_oldsize) {
1763 got = Perl_sbrk_oldchunk;
1764 Perl_sbrk_oldchunk += size;
1765 Perl_sbrk_oldsize -= size;
1766 } else {
1767 if (size >= PERLSBRK_32_K) {
1768 small = 0;
1769 } else {
cf5c4ad8 1770 size = PERLSBRK_64_K;
1771 small = 1;
1772 }
1773 got = (IV)SYSTEM_ALLOC(size);
57569e04
HM
1774#ifdef PACK_MALLOC
1775 got = (got + 0x7ff) & ~0x7ff;
1776#endif
cf5c4ad8 1777 if (small) {
1778 /* Chunk is small, register the rest for future allocs. */
1779 Perl_sbrk_oldchunk = got + reqsize;
1780 Perl_sbrk_oldsize = size - reqsize;
1781 }
1782 }
1783
fb73857a 1784 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n",
cf5c4ad8 1785 size, reqsize, Perl_sbrk_oldsize, got));
cf5c4ad8 1786
1787 return (void *)got;
1788}
1789
1790#endif /* ! defined USE_PERL_SBRK */