This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
Renamed malloc.c ASSERT() macro to P_ASSERT() because
[perl5.git] / malloc.c
CommitLineData
a0d0e21e 1/* malloc.c
8d063cd8 2 *
8d063cd8
LW
3 */
4
87c6202a
IZ
5/*
6 Here are some notes on configuring Perl's malloc.
7
8 There are two macros which serve as bulk disablers of advanced
9 features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by
10 default). Look in the list of default values below to understand
11 their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the
12 state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC
13 returns it to the state as of Perl 5.000.
14
15 Note that some of the settings below may be ignored in the code based
16 on values of other macros. The PERL_CORE symbol is only defined when
17 perl itself is being compiled (so malloc can make some assumptions
18 about perl's facilities being available to it).
19
20 Each config option has a short description, followed by its name,
21 default value, and a comment about the default (if applicable). Some
22 options take a precise value, while the others are just boolean.
23 The boolean ones are listed first.
24
25 # Enable code for an emergency memory pool in $^M. See perlvar.pod
26 # for a description of $^M.
27 PERL_EMERGENCY_SBRK (!PLAIN_MALLOC && PERL_CORE)
28
29 # Enable code for printing memory statistics.
30 DEBUGGING_MSTATS (!PLAIN_MALLOC && PERL_CORE)
31
32 # Move allocation info for small buckets into separate areas.
33 # Memory optimization (especially for small allocations, of the
34 # less than 64 bytes). Since perl usually makes a large number
35 # of small allocations, this is usually a win.
36 PACK_MALLOC (!PLAIN_MALLOC && !RCHECK)
37
38 # Add one page to big powers of two when calculating bucket size.
39 # This is targeted at big allocations, as are common in image
40 # processing.
41 TWO_POT_OPTIMIZE !PLAIN_MALLOC
42
43 # Use intermediate bucket sizes between powers-of-two. This is
44 # generally a memory optimization, and a (small) speed pessimization.
45 BUCKETS_ROOT2 !NO_FANCY_MALLOC
46
47 # Do not check small deallocations for bad free(). Memory
48 # and speed optimization, error reporting pessimization.
49 IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK)
50
51 # Use table lookup to decide in which bucket a given allocation will go.
52 SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC
53
38ac2dc8
DD
54 # Use a perl-defined sbrk() instead of the (presumably broken or
55 # missing) system-supplied sbrk().
56 USE_PERL_SBRK undef
57
58 # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally
59 # only used with broken sbrk()s.
87c6202a
IZ
60 PERL_SBRK_VIA_MALLOC undef
61
38ac2dc8
DD
62 # Which allocator to use if PERL_SBRK_VIA_MALLOC
63 SYSTEM_ALLOC(a) malloc(a)
64
87c6202a
IZ
65 # Disable memory overwrite checking with DEBUGGING. Memory and speed
66 # optimization, error reporting pessimization.
67 NO_RCHECK undef
68
69 # Enable memory overwrite checking with DEBUGGING. Memory and speed
70 # pessimization, error reporting optimization
71 RCHECK (DEBUGGING && !NO_RCHECK)
72
73 # Failed allocations bigger than this size croak (if
74 # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See
75 # perlvar.pod for a description of $^M.
76 BIG_SIZE (1<<16) # 64K
77
78 # Starting from this power of two, add an extra page to the
79 # size of the bucket. This enables optimized allocations of sizes
80 # close to powers of 2. Note that the value is indexed at 0.
81 FIRST_BIG_POW2 15 # 32K, 16K is used too often
82
83 # Estimate of minimal memory footprint. malloc uses this value to
84 # request the most reasonable largest blocks of memory from the system.
85 FIRST_SBRK (48*1024)
86
87 # Round up sbrk()s to multiples of this.
88 MIN_SBRK 2048
89
90 # Round up sbrk()s to multiples of this percent of footprint.
91 MIN_SBRK_FRAC 3
92
93 # Add this much memory to big powers of two to get the bucket size.
94 PERL_PAGESIZE 4096
95
96 # This many sbrk() discontinuities should be tolerated even
97 # from the start without deciding that sbrk() is usually
98 # discontinuous.
99 SBRK_ALLOW_FAILURES 3
100
101 # This many continuous sbrk()s compensate for one discontinuous one.
102 SBRK_FAILURE_PRICE 50
103
28ac10b1
IZ
104 # Some configurations may ask for 12-byte-or-so allocations which
105 # require 8-byte alignment (?!). In such situation one needs to
106 # define this to disable 12-byte bucket (will increase memory footprint)
107 STRICT_ALIGNMENT undef
108
87c6202a
IZ
109 This implementation assumes that calling PerlIO_printf() does not
110 result in any memory allocation calls (used during a panic).
111
112 */
113
e8bc2b5c
GS
114#ifndef NO_FANCY_MALLOC
115# ifndef SMALL_BUCKET_VIA_TABLE
116# define SMALL_BUCKET_VIA_TABLE
117# endif
118# ifndef BUCKETS_ROOT2
119# define BUCKETS_ROOT2
120# endif
121# ifndef IGNORE_SMALL_BAD_FREE
122# define IGNORE_SMALL_BAD_FREE
123# endif
3562ef9b
IZ
124#endif
125
e8bc2b5c
GS
126#ifndef PLAIN_MALLOC /* Bulk enable features */
127# ifndef PACK_MALLOC
128# define PACK_MALLOC
129# endif
130# ifndef TWO_POT_OPTIMIZE
131# define TWO_POT_OPTIMIZE
132# endif
d720c441
IZ
133# if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK)
134# define PERL_EMERGENCY_SBRK
e8bc2b5c
GS
135# endif
136# if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
137# define DEBUGGING_MSTATS
138# endif
139#endif
140
141#define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
142#define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
143
144#if !(defined(I286) || defined(atarist))
145 /* take 2k unless the block is bigger than that */
146# define LOG_OF_MIN_ARENA 11
147#else
148 /* take 16k unless the block is bigger than that
149 (80286s like large segments!), probably good on the atari too */
150# define LOG_OF_MIN_ARENA 14
151#endif
152
8d063cd8 153#ifndef lint
1944739a
IZ
154# if defined(DEBUGGING) && !defined(NO_RCHECK)
155# define RCHECK
156# endif
e8bc2b5c
GS
157# if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
158# undef IGNORE_SMALL_BAD_FREE
159# endif
8d063cd8
LW
160/*
161 * malloc.c (Caltech) 2/21/82
162 * Chris Kingsley, kingsley@cit-20.
163 *
164 * This is a very fast storage allocator. It allocates blocks of a small
165 * number of different sizes, and keeps free lists of each size. Blocks that
166 * don't exactly fit are passed up to the next larger size. In this
167 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
cf5c4ad8 168 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
8d063cd8
LW
169 * This is designed for use in a program that uses vast quantities of memory,
170 * but bombs when it runs out.
171 */
172
d720c441
IZ
173#ifdef PERL_CORE
174# include "EXTERN.h"
175# include "perl.h"
176#else
177# ifdef PERL_FOR_X2P
178# include "../EXTERN.h"
179# include "../perl.h"
180# else
181# include <stdlib.h>
182# include <stdio.h>
183# include <memory.h>
184# define _(arg) arg
185# ifndef Malloc_t
186# define Malloc_t void *
187# endif
188# ifndef MEM_SIZE
189# define MEM_SIZE unsigned long
190# endif
191# ifndef LONG_MAX
192# define LONG_MAX 0x7FFFFFFF
193# endif
194# ifndef UV
195# define UV unsigned long
196# endif
197# ifndef caddr_t
198# define caddr_t char *
199# endif
200# ifndef Free_t
201# define Free_t void
202# endif
203# define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t))
204# define PerlEnv_getenv getenv
205# define PerlIO_printf fprintf
206# define PerlIO_stderr() stderr
207# endif
e8bc2b5c 208# ifndef croak /* make depend */
d720c441
IZ
209# define croak(mess, arg) warn((mess), (arg)); exit(1);
210# endif
211# ifndef warn
212# define warn(mess, arg) fprintf(stderr, (mess), (arg));
e8bc2b5c
GS
213# endif
214# ifdef DEBUG_m
215# undef DEBUG_m
216# endif
217# define DEBUG_m(a)
218# ifdef DEBUGGING
219# undef DEBUGGING
220# endif
221#endif
222
223#ifndef MUTEX_LOCK
224# define MUTEX_LOCK(l)
225#endif
226
227#ifndef MUTEX_UNLOCK
228# define MUTEX_UNLOCK(l)
229#endif
230
760ac839 231#ifdef DEBUGGING
e8bc2b5c 232# undef DEBUG_m
4a33f861 233# define DEBUG_m(a) if (PL_debug & 128) a
760ac839
LW
234#endif
235
135863df
AB
236/* I don't much care whether these are defined in sys/types.h--LAW */
237
238#define u_char unsigned char
239#define u_int unsigned int
e8bc2b5c
GS
240
241#ifdef HAS_QUAD
242# define u_bigint UV /* Needs to eat *void. */
243#else /* needed? */
244# define u_bigint unsigned long /* Needs to eat *void. */
245#endif
246
135863df 247#define u_short unsigned short
8d063cd8 248
cf5c4ad8
PP
249/* 286 and atarist like big chunks, which gives too much overhead. */
250#if (defined(RCHECK) || defined(I286) || defined(atarist)) && defined(PACK_MALLOC)
e8bc2b5c 251# undef PACK_MALLOC
cf5c4ad8
PP
252#endif
253
8d063cd8 254/*
cf5c4ad8
PP
255 * The description below is applicable if PACK_MALLOC is not defined.
256 *
8d063cd8
LW
257 * The overhead on a block is at least 4 bytes. When free, this space
258 * contains a pointer to the next free block, and the bottom two bits must
259 * be zero. When in use, the first byte is set to MAGIC, and the second
260 * byte is the size index. The remaining bytes are for alignment.
261 * If range checking is enabled and the size of the block fits
262 * in two bytes, then the top two bytes hold the size of the requested block
263 * plus the range checking words, and the header word MINUS ONE.
264 */
265union overhead {
266 union overhead *ov_next; /* when free */
85e6fe83 267#if MEM_ALIGNBYTES > 4
c623bd54 268 double strut; /* alignment problems */
a687059c 269#endif
8d063cd8
LW
270 struct {
271 u_char ovu_magic; /* magic number */
272 u_char ovu_index; /* bucket # */
273#ifdef RCHECK
274 u_short ovu_size; /* actual block size */
275 u_int ovu_rmagic; /* range magic number */
276#endif
277 } ovu;
278#define ov_magic ovu.ovu_magic
279#define ov_index ovu.ovu_index
280#define ov_size ovu.ovu_size
281#define ov_rmagic ovu.ovu_rmagic
282};
283
760ac839 284#ifdef DEBUGGING
d720c441 285static void botch _((char *diag, char *s));
a0d0e21e
LW
286#endif
287static void morecore _((int bucket));
288static int findbucket _((union overhead *freep, int srchlen));
28ac10b1 289static void add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip);
a0d0e21e 290
8d063cd8
LW
291#define MAGIC 0xff /* magic # on accounting info */
292#define RMAGIC 0x55555555 /* magic # on range info */
e8bc2b5c
GS
293#define RMAGIC_C 0x55 /* magic # on range info */
294
8d063cd8 295#ifdef RCHECK
c2a5c2d2
IZ
296# define RSLOP sizeof (u_int)
297# ifdef TWO_POT_OPTIMIZE
e8bc2b5c 298# define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
c2a5c2d2 299# else
e8bc2b5c 300# define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
c2a5c2d2 301# endif
8d063cd8 302#else
c2a5c2d2 303# define RSLOP 0
8d063cd8
LW
304#endif
305
e8bc2b5c
GS
306#if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
307# undef BUCKETS_ROOT2
308#endif
309
310#ifdef BUCKETS_ROOT2
311# define BUCKET_TABLE_SHIFT 2
312# define BUCKET_POW2_SHIFT 1
313# define BUCKETS_PER_POW2 2
314#else
315# define BUCKET_TABLE_SHIFT MIN_BUC_POW2
316# define BUCKET_POW2_SHIFT 0
317# define BUCKETS_PER_POW2 1
318#endif
319
274c7500
IZ
320#if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT))
321/* Figure out the alignment of void*. */
322struct aligner {
323 char c;
324 void *p;
325};
326# define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p)))
327#else
328# define ALIGN_SMALL MEM_ALIGNBYTES
329#endif
330
331#define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no))
332
e8bc2b5c
GS
333#ifdef BUCKETS_ROOT2
334# define MAX_BUCKET_BY_TABLE 13
335static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
336 {
337 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
338 };
339# define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
340# define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
341 ? buck_size[i] \
342 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
343 - MEM_OVERHEAD(i) \
344 + POW2_OPTIMIZE_SURPLUS(i)))
345#else
346# define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
347# define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
348#endif
349
350
cf5c4ad8 351#ifdef PACK_MALLOC
e8bc2b5c
GS
352/* In this case it is assumed that if we do sbrk() in 2K units, we
353 * will get 2K aligned arenas (at least after some initial
354 * alignment). The bucket number of the given subblock is on the start
355 * of 2K arena which contains the subblock. Several following bytes
356 * contain the magic numbers for the subblocks in the block.
cf5c4ad8
PP
357 *
358 * Sizes of chunks are powers of 2 for chunks in buckets <=
359 * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to
360 * get alignment right).
361 *
e8bc2b5c
GS
362 * Consider an arena for 2^n with n>MAX_PACKED. We suppose that
363 * starts of all the chunks in a 2K arena are in different
364 * 2^n-byte-long chunks. If the top of the last chunk is aligned on a
365 * boundary of 2K block, this means that sizeof(union
366 * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K <
367 * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n -
368 * overhead is used. Since this rules out n = 7 for 8 byte alignment,
369 * we specialcase allocation of the first of 16 128-byte-long chunks.
cf5c4ad8
PP
370 *
371 * Note that with the above assumption we automatically have enough
372 * place for MAGIC at the start of 2K block. Note also that we
e8bc2b5c
GS
373 * overlay union overhead over the chunk, thus the start of small chunks
374 * is immediately overwritten after freeing. */
375# define MAX_PACKED_POW2 6
376# define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
377# define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
378# define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
379# define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK)
380# define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK)
cf5c4ad8
PP
381# define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block)))
382# define OV_INDEX(block) (*OV_INDEXp(block))
383# define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
e8bc2b5c
GS
384 (TWOK_SHIFT(block)>> \
385 (bucket>>BUCKET_POW2_SHIFT)) + \
386 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
387 /* A bucket can have a shift smaller than it size, we need to
388 shift its magic number so it will not overwrite index: */
389# ifdef BUCKETS_ROOT2
390# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
391# else
392# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
393# endif
cf5c4ad8
PP
394# define CHUNK_SHIFT 0
395
e8bc2b5c
GS
396/* Number of active buckets of given ordinal. */
397#ifdef IGNORE_SMALL_BAD_FREE
398#define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
399# define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
400 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
401 : n_blks[bucket] )
402#else
403# define N_BLKS(bucket) n_blks[bucket]
404#endif
405
406static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
407 {
408# if BUCKETS_PER_POW2==1
409 0, 0,
410 (MIN_BUC_POW2==2 ? 384 : 0),
411 224, 120, 62, 31, 16, 8, 4, 2
412# else
413 0, 0, 0, 0,
414 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
415 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
416# endif
417 };
418
419/* Shift of the first bucket with the given ordinal inside 2K chunk. */
420#ifdef IGNORE_SMALL_BAD_FREE
421# define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
422 ? ((1<<LOG_OF_MIN_ARENA) \
423 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
424 : blk_shift[bucket])
425#else
426# define BLK_SHIFT(bucket) blk_shift[bucket]
427#endif
428
429static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
430 {
431# if BUCKETS_PER_POW2==1
432 0, 0,
433 (MIN_BUC_POW2==2 ? 512 : 0),
434 256, 128, 64, 64, /* 8 to 64 */
435 16*sizeof(union overhead),
436 8*sizeof(union overhead),
437 4*sizeof(union overhead),
438 2*sizeof(union overhead),
439# else
440 0, 0, 0, 0,
441 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
442 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
443 16*sizeof(union overhead), 16*sizeof(union overhead),
444 8*sizeof(union overhead), 8*sizeof(union overhead),
445 4*sizeof(union overhead), 4*sizeof(union overhead),
446 2*sizeof(union overhead), 2*sizeof(union overhead),
447# endif
448 };
cf5c4ad8 449
cf5c4ad8
PP
450#else /* !PACK_MALLOC */
451
452# define OV_MAGIC(block,bucket) (block)->ov_magic
453# define OV_INDEX(block) (block)->ov_index
454# define CHUNK_SHIFT 1
e8bc2b5c 455# define MAX_PACKED -1
cf5c4ad8
PP
456#endif /* !PACK_MALLOC */
457
e8bc2b5c
GS
458#define M_OVERHEAD (sizeof(union overhead) + RSLOP)
459
460#ifdef PACK_MALLOC
461# define MEM_OVERHEAD(bucket) \
462 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
463# ifdef SMALL_BUCKET_VIA_TABLE
464# define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
465# define START_SHIFT MAX_PACKED_POW2
466# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
467# define SIZE_TABLE_MAX 80
468# else
469# define SIZE_TABLE_MAX 64
470# endif
471static char bucket_of[] =
472 {
473# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
474 /* 0 to 15 in 4-byte increments. */
475 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
476 6, /* 8 */
274c7500 477 IF_ALIGN_8(8,7), 8, /* 16/12, 16 */
e8bc2b5c
GS
478 9, 9, 10, 10, /* 24, 32 */
479 11, 11, 11, 11, /* 48 */
480 12, 12, 12, 12, /* 64 */
481 13, 13, 13, 13, /* 80 */
482 13, 13, 13, 13 /* 80 */
483# else /* !BUCKETS_ROOT2 */
484 /* 0 to 15 in 4-byte increments. */
485 (sizeof(void*) > 4 ? 3 : 2),
486 3,
487 4, 4,
488 5, 5, 5, 5,
489 6, 6, 6, 6,
490 6, 6, 6, 6
491# endif /* !BUCKETS_ROOT2 */
492 };
493# else /* !SMALL_BUCKET_VIA_TABLE */
494# define START_SHIFTS_BUCKET MIN_BUCKET
495# define START_SHIFT (MIN_BUC_POW2 - 1)
496# endif /* !SMALL_BUCKET_VIA_TABLE */
497#else /* !PACK_MALLOC */
498# define MEM_OVERHEAD(bucket) M_OVERHEAD
499# ifdef SMALL_BUCKET_VIA_TABLE
500# undef SMALL_BUCKET_VIA_TABLE
501# endif
502# define START_SHIFTS_BUCKET MIN_BUCKET
503# define START_SHIFT (MIN_BUC_POW2 - 1)
504#endif /* !PACK_MALLOC */
cf5c4ad8 505
8d063cd8 506/*
55497cff
PP
507 * Big allocations are often of the size 2^n bytes. To make them a
508 * little bit better, make blocks of size 2^n+pagesize for big n.
509 */
510
511#ifdef TWO_POT_OPTIMIZE
512
5f05dabc
PP
513# ifndef PERL_PAGESIZE
514# define PERL_PAGESIZE 4096
515# endif
e8bc2b5c
GS
516# ifndef FIRST_BIG_POW2
517# define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
5f05dabc 518# endif
e8bc2b5c 519# define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
55497cff
PP
520/* If this value or more, check against bigger blocks. */
521# define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
522/* If less than this value, goes into 2^n-overhead-block. */
523# define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
524
e8bc2b5c
GS
525# define POW2_OPTIMIZE_ADJUST(nbytes) \
526 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
527# define POW2_OPTIMIZE_SURPLUS(bucket) \
528 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
529
530#else /* !TWO_POT_OPTIMIZE */
531# define POW2_OPTIMIZE_ADJUST(nbytes)
532# define POW2_OPTIMIZE_SURPLUS(bucket) 0
533#endif /* !TWO_POT_OPTIMIZE */
534
535#if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
536# define BARK_64K_LIMIT(what,nbytes,size) \
537 if (nbytes > 0xffff) { \
538 PerlIO_printf(PerlIO_stderr(), \
539 "%s too large: %lx\n", what, size); \
540 my_exit(1); \
541 }
542#else /* !HAS_64K_LIMIT || !PERL_CORE */
543# define BARK_64K_LIMIT(what,nbytes,size)
544#endif /* !HAS_64K_LIMIT || !PERL_CORE */
55497cff 545
e8bc2b5c
GS
546#ifndef MIN_SBRK
547# define MIN_SBRK 2048
548#endif
549
550#ifndef FIRST_SBRK
d720c441 551# define FIRST_SBRK (48*1024)
e8bc2b5c
GS
552#endif
553
554/* Minimal sbrk in percents of what is already alloced. */
555#ifndef MIN_SBRK_FRAC
556# define MIN_SBRK_FRAC 3
557#endif
558
559#ifndef SBRK_ALLOW_FAILURES
560# define SBRK_ALLOW_FAILURES 3
561#endif
55497cff 562
e8bc2b5c
GS
563#ifndef SBRK_FAILURE_PRICE
564# define SBRK_FAILURE_PRICE 50
55497cff
PP
565#endif
566
e8bc2b5c
GS
567#if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
568
569# ifndef BIG_SIZE
570# define BIG_SIZE (1<<16) /* 64K */
571# endif
572
55497cff
PP
573static char *emergency_buffer;
574static MEM_SIZE emergency_buffer_size;
df0003d4 575static Malloc_t emergency_sbrk(MEM_SIZE size);
55497cff 576
52082926 577static Malloc_t
df0003d4 578emergency_sbrk(MEM_SIZE size)
55497cff 579{
28ac10b1
IZ
580 MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA;
581
55497cff
PP
582 if (size >= BIG_SIZE) {
583 /* Give the possibility to recover: */
51dc0457 584 MUTEX_UNLOCK(&PL_malloc_mutex);
1b979e0a 585 croak("Out of memory during \"large\" request for %i bytes", size);
55497cff
PP
586 }
587
28ac10b1
IZ
588 if (emergency_buffer_size >= rsize) {
589 char *old = emergency_buffer;
590
591 emergency_buffer_size -= rsize;
592 emergency_buffer += rsize;
593 return old;
594 } else {
18f739ee 595 dTHR;
55497cff
PP
596 /* First offense, give a possibility to recover by dieing. */
597 /* No malloc involved here: */
4a33f861 598 GV **gvp = (GV**)hv_fetch(PL_defstash, "^M", 2, 0);
55497cff
PP
599 SV *sv;
600 char *pv;
28ac10b1 601 int have = 0;
55497cff 602
28ac10b1
IZ
603 if (emergency_buffer_size) {
604 add_to_chain(emergency_buffer, emergency_buffer_size, 0);
605 emergency_buffer_size = 0;
606 emergency_buffer = Nullch;
607 have = 1;
608 }
4a33f861 609 if (!gvp) gvp = (GV**)hv_fetch(PL_defstash, "\015", 1, 0);
55497cff 610 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
28ac10b1
IZ
611 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD)) {
612 if (have)
613 goto do_croak;
55497cff 614 return (char *)-1; /* Now die die die... */
28ac10b1 615 }
55497cff 616 /* Got it, now detach SvPV: */
6b88bc9c 617 pv = SvPV(sv, PL_na);
55497cff 618 /* Check alignment: */
28ac10b1 619 if (((UV)(pv - sizeof(union overhead))) & ((1<<LOG_OF_MIN_ARENA) - 1)) {
55497cff 620 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
bbce6d69 621 return (char *)-1; /* die die die */
55497cff
PP
622 }
623
28ac10b1
IZ
624 emergency_buffer = pv - sizeof(union overhead);
625 emergency_buffer_size = malloced_size(pv) + M_OVERHEAD;
55497cff 626 SvPOK_off(sv);
28ac10b1
IZ
627 SvPVX(sv) = Nullch;
628 SvCUR(sv) = SvLEN(sv) = 0;
55497cff 629 }
28ac10b1
IZ
630 do_croak:
631 MUTEX_UNLOCK(&PL_malloc_mutex);
632 croak("Out of memory during request for %i bytes", size);
55497cff
PP
633}
634
e8bc2b5c 635#else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 636# define emergency_sbrk(size) -1
e8bc2b5c 637#endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff
PP
638
639/*
e8bc2b5c 640 * nextf[i] is the pointer to the next free block of size 2^i. The
8d063cd8
LW
641 * smallest allocatable block is 8 bytes. The overhead information
642 * precedes the data area returned to the user.
643 */
e8bc2b5c 644#define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
8d063cd8 645static union overhead *nextf[NBUCKETS];
cf5c4ad8
PP
646
647#ifdef USE_PERL_SBRK
648#define sbrk(a) Perl_sbrk(a)
52082926 649Malloc_t Perl_sbrk _((int size));
8ac85365
NIS
650#else
651#ifdef DONT_DECLARE_STD
652#ifdef I_UNISTD
653#include <unistd.h>
654#endif
cf5c4ad8 655#else
52082926 656extern Malloc_t sbrk(int);
8ac85365 657#endif
cf5c4ad8 658#endif
8d063cd8 659
c07a80fd 660#ifdef DEBUGGING_MSTATS
8d063cd8
LW
661/*
662 * nmalloc[i] is the difference between the number of mallocs and frees
663 * for a given block size.
664 */
665static u_int nmalloc[NBUCKETS];
5f05dabc
PP
666static u_int sbrk_slack;
667static u_int start_slack;
8d063cd8
LW
668#endif
669
e8bc2b5c
GS
670static u_int goodsbrk;
671
760ac839 672#ifdef DEBUGGING
acc3bde0 673#define P_ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else
ee0007ab 674static void
d720c441 675botch(char *diag, char *s)
8d063cd8 676{
d720c441 677 PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s);
3028581b 678 PerlProc_abort();
8d063cd8
LW
679}
680#else
acc3bde0 681#define P_ASSERT(p, diag)
8d063cd8
LW
682#endif
683
2304df62 684Malloc_t
8ac85365 685malloc(register size_t nbytes)
8d063cd8
LW
686{
687 register union overhead *p;
e8bc2b5c 688 register int bucket;
ee0007ab 689 register MEM_SIZE shiftr;
8d063cd8 690
c2a5c2d2 691#if defined(DEBUGGING) || defined(RCHECK)
ee0007ab 692 MEM_SIZE size = nbytes;
45d8adaa
LW
693#endif
694
e8bc2b5c 695 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
45d8adaa
LW
696#ifdef DEBUGGING
697 if ((long)nbytes < 0)
d720c441 698 croak("%s", "panic: malloc");
45d8adaa 699#endif
45d8adaa 700
51dc0457 701 MUTEX_LOCK(&PL_malloc_mutex);
8d063cd8
LW
702 /*
703 * Convert amount of memory requested into
704 * closest block size stored in hash buckets
705 * which satisfies request. Account for
706 * space used per block for accounting.
707 */
cf5c4ad8 708#ifdef PACK_MALLOC
e8bc2b5c
GS
709# ifdef SMALL_BUCKET_VIA_TABLE
710 if (nbytes == 0)
711 bucket = MIN_BUCKET;
712 else if (nbytes <= SIZE_TABLE_MAX) {
713 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
714 } else
715# else
043bf814
RB
716 if (nbytes == 0)
717 nbytes = 1;
e8bc2b5c
GS
718 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
719 else
720# endif
55497cff 721#endif
e8bc2b5c
GS
722 {
723 POW2_OPTIMIZE_ADJUST(nbytes);
724 nbytes += M_OVERHEAD;
725 nbytes = (nbytes + 3) &~ 3;
726 do_shifts:
727 shiftr = (nbytes - 1) >> START_SHIFT;
728 bucket = START_SHIFTS_BUCKET;
729 /* apart from this loop, this is O(1) */
730 while (shiftr >>= 1)
731 bucket += BUCKETS_PER_POW2;
cf5c4ad8 732 }
8d063cd8
LW
733 /*
734 * If nothing in hash bucket right now,
735 * request more memory from the system.
736 */
737 if (nextf[bucket] == NULL)
738 morecore(bucket);
e8bc2b5c 739 if ((p = nextf[bucket]) == NULL) {
51dc0457 740 MUTEX_UNLOCK(&PL_malloc_mutex);
55497cff 741#ifdef PERL_CORE
4a33f861 742 if (!PL_nomemok) {
760ac839 743 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
79072805 744 my_exit(1);
ee0007ab 745 }
45d8adaa 746#else
8d063cd8 747 return (NULL);
45d8adaa
LW
748#endif
749 }
750
e8bc2b5c
GS
751 DEBUG_m(PerlIO_printf(Perl_debug_log,
752 "0x%lx: (%05lu) malloc %ld bytes\n",
4a33f861 753 (unsigned long)(p+1), (unsigned long)(PL_an++),
e8bc2b5c 754 (long)size));
45d8adaa 755
8d063cd8 756 /* remove from linked list */
802004fa
DD
757#if defined(RCHECK)
758 if (((UV)p) & (MEM_ALIGNBYTES - 1))
760ac839 759 PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n",
a0d0e21e 760 (unsigned long)*((int*)p),(unsigned long)p);
bf38876a
LW
761#endif
762 nextf[bucket] = p->ov_next;
e8bc2b5c
GS
763#ifdef IGNORE_SMALL_BAD_FREE
764 if (bucket >= FIRST_BUCKET_WITH_CHECK)
765#endif
766 OV_MAGIC(p, bucket) = MAGIC;
cf5c4ad8
PP
767#ifndef PACK_MALLOC
768 OV_INDEX(p) = bucket;
769#endif
8d063cd8
LW
770#ifdef RCHECK
771 /*
772 * Record allocated size of block and
773 * bound space with magic numbers.
774 */
8d063cd8 775 p->ov_rmagic = RMAGIC;
e8bc2b5c
GS
776 if (bucket <= MAX_SHORT_BUCKET) {
777 int i;
778
779 nbytes = size + M_OVERHEAD;
780 p->ov_size = nbytes - 1;
781 if ((i = nbytes & 3)) {
782 i = 4 - i;
783 while (i--)
784 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
785 }
786 nbytes = (nbytes + 3) &~ 3;
787 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
788 }
8d063cd8 789#endif
51dc0457 790 MUTEX_UNLOCK(&PL_malloc_mutex);
cf5c4ad8 791 return ((Malloc_t)(p + CHUNK_SHIFT));
8d063cd8
LW
792}
793
e8bc2b5c
GS
794static char *last_sbrk_top;
795static char *last_op; /* This arena can be easily extended. */
796static int sbrked_remains;
797static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
798
799#ifdef DEBUGGING_MSTATS
800static int sbrks;
801#endif
802
803struct chunk_chain_s {
804 struct chunk_chain_s *next;
805 MEM_SIZE size;
806};
807static struct chunk_chain_s *chunk_chain;
808static int n_chunks;
809static char max_bucket;
810
811/* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
812static void *
813get_from_chain(MEM_SIZE size)
814{
815 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
816 struct chunk_chain_s **oldgoodp = NULL;
817 long min_remain = LONG_MAX;
818
819 while (elt) {
820 if (elt->size >= size) {
821 long remains = elt->size - size;
822 if (remains >= 0 && remains < min_remain) {
823 oldgoodp = oldp;
824 min_remain = remains;
825 }
826 if (remains == 0) {
827 break;
828 }
829 }
830 oldp = &( elt->next );
831 elt = elt->next;
832 }
833 if (!oldgoodp) return NULL;
834 if (min_remain) {
835 void *ret = *oldgoodp;
836 struct chunk_chain_s *next = (*oldgoodp)->next;
837
838 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
839 (*oldgoodp)->size = min_remain;
840 (*oldgoodp)->next = next;
841 return ret;
842 } else {
843 void *ret = *oldgoodp;
844 *oldgoodp = (*oldgoodp)->next;
845 n_chunks--;
846 return ret;
847 }
848}
849
850static void
851add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
852{
853 struct chunk_chain_s *next = chunk_chain;
854 char *cp = (char*)p;
855
856 cp += chip;
857 chunk_chain = (struct chunk_chain_s *)cp;
858 chunk_chain->size = size - chip;
859 chunk_chain->next = next;
860 n_chunks++;
861}
862
863static void *
864get_from_bigger_buckets(int bucket, MEM_SIZE size)
865{
866 int price = 1;
867 static int bucketprice[NBUCKETS];
868 while (bucket <= max_bucket) {
869 /* We postpone stealing from bigger buckets until we want it
870 often enough. */
871 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
872 /* Steal it! */
873 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
874 bucketprice[bucket] = 0;
875 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
876 last_op = NULL; /* Disable optimization */
877 }
878 nextf[bucket] = nextf[bucket]->ov_next;
879#ifdef DEBUGGING_MSTATS
880 nmalloc[bucket]--;
881 start_slack -= M_OVERHEAD;
882#endif
883 add_to_chain(ret, (BUCKET_SIZE(bucket) +
884 POW2_OPTIMIZE_SURPLUS(bucket)),
885 size);
886 return ret;
887 }
888 bucket++;
889 }
890 return NULL;
891}
892
fa423c5b
IZ
893static union overhead *
894getpages(int needed, int *nblksp, int bucket)
895{
896 /* Need to do (possibly expensive) system call. Try to
897 optimize it for rare calling. */
898 MEM_SIZE require = needed - sbrked_remains;
899 char *cp;
900 union overhead *ovp;
901 int slack = 0;
902
903 if (sbrk_good > 0) {
904 if (!last_sbrk_top && require < FIRST_SBRK)
905 require = FIRST_SBRK;
906 else if (require < MIN_SBRK) require = MIN_SBRK;
907
908 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
909 require = goodsbrk * MIN_SBRK_FRAC / 100;
910 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
911 } else {
912 require = needed;
913 last_sbrk_top = 0;
914 sbrked_remains = 0;
915 }
916
917 DEBUG_m(PerlIO_printf(Perl_debug_log,
918 "sbrk(%ld) for %ld-byte-long arena\n",
919 (long)require, (long) needed));
920 cp = (char *)sbrk(require);
921#ifdef DEBUGGING_MSTATS
922 sbrks++;
923#endif
924 if (cp == last_sbrk_top) {
925 /* Common case, anything is fine. */
926 sbrk_good++;
927 ovp = (union overhead *) (cp - sbrked_remains);
928 sbrked_remains = require - (needed - sbrked_remains);
929 } else if (cp == (char *)-1) { /* no more room! */
930 ovp = (union overhead *)emergency_sbrk(needed);
931 if (ovp == (union overhead *)-1)
932 return 0;
933 return ovp;
934 } else { /* Non-continuous or first sbrk(). */
935 long add = sbrked_remains;
936 char *newcp;
937
938 if (sbrked_remains) { /* Put rest into chain, we
939 cannot use it right now. */
940 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
941 sbrked_remains, 0);
942 }
943
944 /* Second, check alignment. */
945 slack = 0;
946
947#ifndef atarist /* on the atari we dont have to worry about this */
948# ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
949
950 /* CHUNK_SHIFT is 1 for PACK_MALLOC, 0 otherwise. */
951 if ((UV)cp & (0x7FF >> CHUNK_SHIFT)) { /* Not aligned. */
952 slack = (0x800 >> CHUNK_SHIFT)
953 - ((UV)cp & (0x7FF >> CHUNK_SHIFT));
954 add += slack;
955 }
956# endif
957#endif /* atarist */
958
959 if (add) {
960 DEBUG_m(PerlIO_printf(Perl_debug_log,
961 "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
962 (long)add, (long) slack,
963 (long) sbrked_remains));
964 newcp = (char *)sbrk(add);
965#if defined(DEBUGGING_MSTATS)
966 sbrks++;
967 sbrk_slack += add;
968#endif
969 if (newcp != cp + require) {
970 /* Too bad: even rounding sbrk() is not continuous.*/
971 DEBUG_m(PerlIO_printf(Perl_debug_log,
972 "failed to fix bad sbrk()\n"));
973#ifdef PACK_MALLOC
974 if (slack) {
51dc0457 975 MUTEX_UNLOCK(&PL_malloc_mutex);
fa423c5b
IZ
976 croak("%s", "panic: Off-page sbrk");
977 }
978#endif
979 if (sbrked_remains) {
980 /* Try again. */
981#if defined(DEBUGGING_MSTATS)
982 sbrk_slack += require;
983#endif
984 require = needed;
985 DEBUG_m(PerlIO_printf(Perl_debug_log,
986 "straight sbrk(%ld)\n",
987 (long)require));
988 cp = (char *)sbrk(require);
989#ifdef DEBUGGING_MSTATS
990 sbrks++;
991#endif
992 if (cp == (char *)-1)
993 return 0;
994 }
995 sbrk_good = -1; /* Disable optimization!
996 Continue with not-aligned... */
997 } else {
998 cp += slack;
999 require += sbrked_remains;
1000 }
1001 }
1002
1003 if (last_sbrk_top) {
1004 sbrk_good -= SBRK_FAILURE_PRICE;
1005 }
1006
1007 ovp = (union overhead *) cp;
1008 /*
1009 * Round up to minimum allocation size boundary
1010 * and deduct from block count to reflect.
1011 */
1012
1013#ifndef I286 /* Again, this should always be ok on an 80286 */
1014 if ((UV)ovp & 7) {
1015 ovp = (union overhead *)(((UV)ovp + 8) & ~7);
1016 DEBUG_m(PerlIO_printf(Perl_debug_log,
1017 "fixing sbrk(): %d bytes off machine alignement\n",
1018 (int)((UV)ovp & 7)));
1019 (*nblksp)--;
1020# if defined(DEBUGGING_MSTATS)
1021 /* This is only approx. if TWO_POT_OPTIMIZE: */
1022 sbrk_slack += (1 << bucket);
1023# endif
1024 }
1025#endif
1026 sbrked_remains = require - needed;
1027 }
1028 last_sbrk_top = cp + require;
1029 last_op = (char*) cp;
1030#ifdef DEBUGGING_MSTATS
1031 goodsbrk += require;
1032#endif
1033 return ovp;
1034}
1035
1036static int
1037getpages_adjacent(int require)
1038{
1039 if (require <= sbrked_remains) {
1040 sbrked_remains -= require;
1041 } else {
1042 char *cp;
1043
1044 require -= sbrked_remains;
1045 /* We do not try to optimize sbrks here, we go for place. */
1046 cp = (char*) sbrk(require);
1047#ifdef DEBUGGING_MSTATS
1048 sbrks++;
1049 goodsbrk += require;
1050#endif
1051 if (cp == last_sbrk_top) {
1052 sbrked_remains = 0;
1053 last_sbrk_top = cp + require;
1054 } else {
28ac10b1
IZ
1055 if (cp == (char*)-1) { /* Out of memory */
1056#ifdef DEBUGGING_MSTATS
1057 goodsbrk -= require;
1058#endif
1059 return 0;
1060 }
fa423c5b
IZ
1061 /* Report the failure: */
1062 if (sbrked_remains)
1063 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1064 sbrked_remains, 0);
1065 add_to_chain((void*)cp, require, 0);
1066 sbrk_good -= SBRK_FAILURE_PRICE;
1067 sbrked_remains = 0;
1068 last_sbrk_top = 0;
1069 last_op = 0;
1070 return 0;
1071 }
1072 }
1073
1074 return 1;
1075}
1076
8d063cd8
LW
1077/*
1078 * Allocate more memory to the indicated bucket.
1079 */
a0d0e21e 1080static void
8ac85365 1081morecore(register int bucket)
8d063cd8 1082{
72aaf631 1083 register union overhead *ovp;
8d063cd8 1084 register int rnu; /* 2^rnu bytes will be requested */
fa423c5b 1085 int nblks; /* become nblks blocks of the desired size */
bbce6d69 1086 register MEM_SIZE siz, needed;
8d063cd8
LW
1087
1088 if (nextf[bucket])
1089 return;
e8bc2b5c 1090 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
51dc0457 1091 MUTEX_UNLOCK(&PL_malloc_mutex);
d720c441 1092 croak("%s", "Out of memory during ridiculously large request");
55497cff 1093 }
d720c441 1094 if (bucket > max_bucket)
e8bc2b5c 1095 max_bucket = bucket;
d720c441 1096
e8bc2b5c
GS
1097 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
1098 ? LOG_OF_MIN_ARENA
1099 : (bucket >> BUCKET_POW2_SHIFT) );
1100 /* This may be overwritten later: */
1101 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
1102 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
1103 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
1104 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
1105 nextf[rnu << BUCKET_POW2_SHIFT]
1106 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
1107#ifdef DEBUGGING_MSTATS
1108 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
1109 start_slack -= M_OVERHEAD;
1110#endif
1111 DEBUG_m(PerlIO_printf(Perl_debug_log,
1112 "stealing %ld bytes from %ld arena\n",
1113 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
1114 } else if (chunk_chain
1115 && (ovp = (union overhead*) get_from_chain(needed))) {
1116 DEBUG_m(PerlIO_printf(Perl_debug_log,
1117 "stealing %ld bytes from chain\n",
1118 (long) needed));
d720c441
IZ
1119 } else if ( (ovp = (union overhead*)
1120 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
1121 needed)) ) {
e8bc2b5c
GS
1122 DEBUG_m(PerlIO_printf(Perl_debug_log,
1123 "stealing %ld bytes from bigger buckets\n",
1124 (long) needed));
1125 } else if (needed <= sbrked_remains) {
1126 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
1127 sbrked_remains -= needed;
1128 last_op = (char*)ovp;
fa423c5b
IZ
1129 } else
1130 ovp = getpages(needed, &nblks, bucket);
e8bc2b5c 1131
fa423c5b
IZ
1132 if (!ovp)
1133 return;
e8bc2b5c 1134
8d063cd8
LW
1135 /*
1136 * Add new memory allocated to that on
1137 * free list for this hash bucket.
1138 */
e8bc2b5c 1139 siz = BUCKET_SIZE(bucket);
cf5c4ad8 1140#ifdef PACK_MALLOC
72aaf631 1141 *(u_char*)ovp = bucket; /* Fill index. */
e8bc2b5c
GS
1142 if (bucket <= MAX_PACKED) {
1143 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1144 nblks = N_BLKS(bucket);
cf5c4ad8 1145# ifdef DEBUGGING_MSTATS
e8bc2b5c 1146 start_slack += BLK_SHIFT(bucket);
cf5c4ad8 1147# endif
e8bc2b5c
GS
1148 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
1149 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
cf5c4ad8 1150 siz -= sizeof(union overhead);
72aaf631 1151 } else ovp++; /* One chunk per block. */
e8bc2b5c 1152#endif /* PACK_MALLOC */
72aaf631 1153 nextf[bucket] = ovp;
5f05dabc
PP
1154#ifdef DEBUGGING_MSTATS
1155 nmalloc[bucket] += nblks;
e8bc2b5c
GS
1156 if (bucket > MAX_PACKED) {
1157 start_slack += M_OVERHEAD * nblks;
1158 }
5f05dabc 1159#endif
8d063cd8 1160 while (--nblks > 0) {
72aaf631
MB
1161 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
1162 ovp = (union overhead *)((caddr_t)ovp + siz);
8d063cd8 1163 }
8595d6f1 1164 /* Not all sbrks return zeroed memory.*/
72aaf631 1165 ovp->ov_next = (union overhead *)NULL;
cf5c4ad8 1166#ifdef PACK_MALLOC
e8bc2b5c
GS
1167 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
1168 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
1169 nextf[7*BUCKETS_PER_POW2] =
1170 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
1171 - sizeof(union overhead));
1172 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
cf5c4ad8
PP
1173 }
1174#endif /* !PACK_MALLOC */
8d063cd8
LW
1175}
1176
94b6baf5 1177Free_t
8ac85365 1178free(void *mp)
8d063cd8 1179{
ee0007ab 1180 register MEM_SIZE size;
72aaf631 1181 register union overhead *ovp;
352d5a3a 1182 char *cp = (char*)mp;
cf5c4ad8
PP
1183#ifdef PACK_MALLOC
1184 u_char bucket;
1185#endif
8d063cd8 1186
e8bc2b5c
GS
1187 DEBUG_m(PerlIO_printf(Perl_debug_log,
1188 "0x%lx: (%05lu) free\n",
4a33f861 1189 (unsigned long)cp, (unsigned long)(PL_an++)));
45d8adaa 1190
cf5c4ad8
PP
1191 if (cp == NULL)
1192 return;
72aaf631 1193 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1194 - sizeof (union overhead) * CHUNK_SHIFT);
cf5c4ad8 1195#ifdef PACK_MALLOC
72aaf631 1196 bucket = OV_INDEX(ovp);
cf5c4ad8 1197#endif
e8bc2b5c
GS
1198#ifdef IGNORE_SMALL_BAD_FREE
1199 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1200 && (OV_MAGIC(ovp, bucket) != MAGIC))
1201#else
1202 if (OV_MAGIC(ovp, bucket) != MAGIC)
1203#endif
1204 {
68dc0745 1205 static int bad_free_warn = -1;
cf5c4ad8 1206 if (bad_free_warn == -1) {
5fd9e9a4 1207 char *pbf = PerlEnv_getenv("PERL_BADFREE");
cf5c4ad8
PP
1208 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1209 }
1210 if (!bad_free_warn)
1211 return;
8990e307 1212#ifdef RCHECK
a687059c 1213 warn("%s free() ignored",
72aaf631 1214 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
8990e307 1215#else
d720c441 1216 warn("%s", "Bad free() ignored");
8990e307 1217#endif
8d063cd8 1218 return; /* sanity */
e8bc2b5c 1219 }
51dc0457 1220 MUTEX_LOCK(&PL_malloc_mutex);
8d063cd8 1221#ifdef RCHECK
acc3bde0 1222 P_ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
e8bc2b5c
GS
1223 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1224 int i;
1225 MEM_SIZE nbytes = ovp->ov_size + 1;
1226
1227 if ((i = nbytes & 3)) {
1228 i = 4 - i;
1229 while (i--) {
acc3bde0 1230 P_ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
d720c441 1231 == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1232 }
1233 }
1234 nbytes = (nbytes + 3) &~ 3;
acc3bde0 1235 P_ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite");
e8bc2b5c 1236 }
72aaf631 1237 ovp->ov_rmagic = RMAGIC - 1;
8d063cd8 1238#endif
acc3bde0 1239 P_ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
72aaf631
MB
1240 size = OV_INDEX(ovp);
1241 ovp->ov_next = nextf[size];
1242 nextf[size] = ovp;
51dc0457 1243 MUTEX_UNLOCK(&PL_malloc_mutex);
8d063cd8
LW
1244}
1245
1246/*
1247 * When a program attempts "storage compaction" as mentioned in the
1248 * old malloc man page, it realloc's an already freed block. Usually
1249 * this is the last block it freed; occasionally it might be farther
1250 * back. We have to search all the free lists for the block in order
1251 * to determine its bucket: 1st we make one pass thru the lists
1252 * checking only the first block in each; if that fails we search
378cc40b 1253 * ``reall_srchlen'' blocks in each list for a match (the variable
8d063cd8
LW
1254 * is extern so the caller can modify it). If that fails we just copy
1255 * however many bytes was given to realloc() and hope it's not huge.
1256 */
22c35a8c 1257#define reall_srchlen 4 /* 4 should be plenty, -1 =>'s whole list */
8d063cd8 1258
2304df62 1259Malloc_t
8ac85365 1260realloc(void *mp, size_t nbytes)
8d063cd8 1261{
ee0007ab 1262 register MEM_SIZE onb;
72aaf631 1263 union overhead *ovp;
d720c441
IZ
1264 char *res;
1265 int prev_bucket;
e8bc2b5c
GS
1266 register int bucket;
1267 int was_alloced = 0, incr;
352d5a3a 1268 char *cp = (char*)mp;
8d063cd8 1269
e8bc2b5c 1270#if defined(DEBUGGING) || !defined(PERL_CORE)
ee0007ab 1271 MEM_SIZE size = nbytes;
45d8adaa 1272
45d8adaa 1273 if ((long)nbytes < 0)
d720c441 1274 croak("%s", "panic: realloc");
45d8adaa 1275#endif
e8bc2b5c
GS
1276
1277 BARK_64K_LIMIT("Reallocation",nbytes,size);
1278 if (!cp)
1279 return malloc(nbytes);
45d8adaa 1280
51dc0457 1281 MUTEX_LOCK(&PL_malloc_mutex);
72aaf631 1282 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c
GS
1283 - sizeof (union overhead) * CHUNK_SHIFT);
1284 bucket = OV_INDEX(ovp);
1285#ifdef IGNORE_SMALL_BAD_FREE
1286 if ((bucket < FIRST_BUCKET_WITH_CHECK)
1287 || (OV_MAGIC(ovp, bucket) == MAGIC))
1288#else
1289 if (OV_MAGIC(ovp, bucket) == MAGIC)
1290#endif
1291 {
55497cff 1292 was_alloced = 1;
8d063cd8
LW
1293 } else {
1294 /*
1295 * Already free, doing "compaction".
1296 *
1297 * Search for the old block of memory on the
1298 * free list. First, check the most common
1299 * case (last element free'd), then (this failing)
378cc40b 1300 * the last ``reall_srchlen'' items free'd.
8d063cd8
LW
1301 * If all lookups fail, then assume the size of
1302 * the memory block being realloc'd is the
1303 * smallest possible.
1304 */
e8bc2b5c
GS
1305 if ((bucket = findbucket(ovp, 1)) < 0 &&
1306 (bucket = findbucket(ovp, reall_srchlen)) < 0)
1307 bucket = 0;
8d063cd8 1308 }
e8bc2b5c 1309 onb = BUCKET_SIZE_REAL(bucket);
55497cff
PP
1310 /*
1311 * avoid the copy if same size block.
e8bc2b5c
GS
1312 * We are not agressive with boundary cases. Note that it might
1313 * (for a small number of cases) give false negative if
55497cff 1314 * both new size and old one are in the bucket for
e8bc2b5c
GS
1315 * FIRST_BIG_POW2, but the new one is near the lower end.
1316 *
1317 * We do not try to go to 1.5 times smaller bucket so far.
55497cff 1318 */
e8bc2b5c
GS
1319 if (nbytes > onb) incr = 1;
1320 else {
1321#ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1322 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1323 nbytes > ( (onb >> 1) - M_OVERHEAD )
1324# ifdef TWO_POT_OPTIMIZE
1325 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1326# endif
1327 )
1328#else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1329 prev_bucket = ( (bucket > MAX_PACKED + 1)
1330 ? bucket - BUCKETS_PER_POW2
1331 : bucket - 1);
1332 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1333#endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1334 incr = 0;
1335 else incr = -1;
1336 }
1337 if (!was_alloced
2ce36478 1338#ifdef STRESS_REALLOC
e8bc2b5c 1339 || 1 /* always do it the hard way */
2ce36478 1340#endif
e8bc2b5c
GS
1341 ) goto hard_way;
1342 else if (incr == 0) {
852c2e52 1343 inplace_label:
a687059c
LW
1344#ifdef RCHECK
1345 /*
1346 * Record new allocated size of block and
1347 * bound space with magic numbers.
1348 */
72aaf631 1349 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
e8bc2b5c
GS
1350 int i, nb = ovp->ov_size + 1;
1351
1352 if ((i = nb & 3)) {
1353 i = 4 - i;
1354 while (i--) {
acc3bde0 1355 P_ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1356 }
1357 }
1358 nb = (nb + 3) &~ 3;
acc3bde0 1359 P_ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite");
a687059c
LW
1360 /*
1361 * Convert amount of memory requested into
1362 * closest block size stored in hash buckets
1363 * which satisfies request. Account for
1364 * space used per block for accounting.
1365 */
cf5c4ad8 1366 nbytes += M_OVERHEAD;
72aaf631 1367 ovp->ov_size = nbytes - 1;
e8bc2b5c
GS
1368 if ((i = nbytes & 3)) {
1369 i = 4 - i;
1370 while (i--)
1371 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1372 = RMAGIC_C;
1373 }
1374 nbytes = (nbytes + 3) &~ 3;
72aaf631 1375 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
a687059c
LW
1376 }
1377#endif
45d8adaa 1378 res = cp;
51dc0457 1379 MUTEX_UNLOCK(&PL_malloc_mutex);
42ac124e
IZ
1380 DEBUG_m(PerlIO_printf(Perl_debug_log,
1381 "0x%lx: (%05lu) realloc %ld bytes inplace\n",
1382 (unsigned long)res,(unsigned long)(PL_an++),
1383 (long)size));
e8bc2b5c
GS
1384 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1385 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1386 MEM_SIZE require, newarena = nbytes, pow;
1387 int shiftr;
1388
1389 POW2_OPTIMIZE_ADJUST(newarena);
1390 newarena = newarena + M_OVERHEAD;
1391 /* newarena = (newarena + 3) &~ 3; */
1392 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1393 pow = LOG_OF_MIN_ARENA + 1;
1394 /* apart from this loop, this is O(1) */
1395 while (shiftr >>= 1)
1396 pow++;
1397 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1398 require = newarena - onb - M_OVERHEAD;
1399
fa423c5b 1400 if (getpages_adjacent(require)) {
e8bc2b5c 1401#ifdef DEBUGGING_MSTATS
fa423c5b
IZ
1402 nmalloc[bucket]--;
1403 nmalloc[pow * BUCKETS_PER_POW2]++;
e8bc2b5c 1404#endif
fa423c5b
IZ
1405 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
1406 goto inplace_label;
1407 } else
1408 goto hard_way;
e8bc2b5c
GS
1409 } else {
1410 hard_way:
51dc0457 1411 MUTEX_UNLOCK(&PL_malloc_mutex);
42ac124e
IZ
1412 DEBUG_m(PerlIO_printf(Perl_debug_log,
1413 "0x%lx: (%05lu) realloc %ld bytes the hard way\n",
1414 (unsigned long)cp,(unsigned long)(PL_an++),
1415 (long)size));
e8bc2b5c
GS
1416 if ((res = (char*)malloc(nbytes)) == NULL)
1417 return (NULL);
1418 if (cp != res) /* common optimization */
1419 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
1420 if (was_alloced)
1421 free(cp);
45d8adaa 1422 }
2304df62 1423 return ((Malloc_t)res);
8d063cd8
LW
1424}
1425
1426/*
1427 * Search ``srchlen'' elements of each free list for a block whose
1428 * header starts at ``freep''. If srchlen is -1 search the whole list.
1429 * Return bucket number, or -1 if not found.
1430 */
ee0007ab 1431static int
8ac85365 1432findbucket(union overhead *freep, int srchlen)
8d063cd8
LW
1433{
1434 register union overhead *p;
1435 register int i, j;
1436
1437 for (i = 0; i < NBUCKETS; i++) {
1438 j = 0;
1439 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1440 if (p == freep)
1441 return (i);
1442 j++;
1443 }
1444 }
1445 return (-1);
1446}
1447
cf5c4ad8 1448Malloc_t
8ac85365 1449calloc(register size_t elements, register size_t size)
cf5c4ad8
PP
1450{
1451 long sz = elements * size;
1452 Malloc_t p = malloc(sz);
1453
1454 if (p) {
1455 memset((void*)p, 0, sz);
1456 }
1457 return p;
1458}
1459
e8bc2b5c
GS
1460MEM_SIZE
1461malloced_size(void *p)
1462{
8d6dde3e
IZ
1463 union overhead *ovp = (union overhead *)
1464 ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT);
1465 int bucket = OV_INDEX(ovp);
1466#ifdef RCHECK
1467 /* The caller wants to have a complete control over the chunk,
1468 disable the memory checking inside the chunk. */
1469 if (bucket <= MAX_SHORT_BUCKET) {
1470 MEM_SIZE size = BUCKET_SIZE_REAL(bucket);
1471 ovp->ov_size = size + M_OVERHEAD - 1;
1472 *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RSLOP)) = RMAGIC;
1473 }
1474#endif
e8bc2b5c
GS
1475 return BUCKET_SIZE_REAL(bucket);
1476}
1477
c07a80fd 1478#ifdef DEBUGGING_MSTATS
e8bc2b5c
GS
1479
1480# ifdef BUCKETS_ROOT2
1481# define MIN_EVEN_REPORT 6
1482# else
1483# define MIN_EVEN_REPORT MIN_BUCKET
1484# endif
8d063cd8
LW
1485/*
1486 * mstats - print out statistics about malloc
1487 *
1488 * Prints two lines of numbers, one showing the length of the free list
1489 * for each size category, the second showing the number of mallocs -
1490 * frees for each size category.
1491 */
ee0007ab 1492void
8ac85365 1493dump_mstats(char *s)
8d063cd8
LW
1494{
1495 register int i, j;
1496 register union overhead *p;
e8bc2b5c 1497 int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0;
c07a80fd 1498 u_int nfree[NBUCKETS];
e8bc2b5c
GS
1499 int total_chain = 0;
1500 struct chunk_chain_s* nextchain = chunk_chain;
8d063cd8 1501
e8bc2b5c 1502 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
8d063cd8
LW
1503 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1504 ;
c07a80fd 1505 nfree[i] = j;
e8bc2b5c
GS
1506 totfree += nfree[i] * BUCKET_SIZE_REAL(i);
1507 total += nmalloc[i] * BUCKET_SIZE_REAL(i);
1508 if (nmalloc[i]) {
1509 i % 2 ? (topbucket_odd = i) : (topbucket_ev = i);
1510 topbucket = i;
1511 }
c07a80fd
PP
1512 }
1513 if (s)
e8bc2b5c 1514 PerlIO_printf(PerlIO_stderr(),
d720c441 1515 "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n",
e8bc2b5c 1516 s,
d720c441
IZ
1517 (long)BUCKET_SIZE_REAL(MIN_BUCKET),
1518 (long)BUCKET_SIZE(MIN_BUCKET),
1519 (long)BUCKET_SIZE_REAL(topbucket), (long)BUCKET_SIZE(topbucket));
5f05dabc 1520 PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree);
e8bc2b5c
GS
1521 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1522 PerlIO_printf(PerlIO_stderr(),
1523 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1524 ? " %5d"
1525 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1526 nfree[i]);
1527 }
1528#ifdef BUCKETS_ROOT2
1529 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1530 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1531 PerlIO_printf(PerlIO_stderr(),
1532 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1533 ? " %5d"
1534 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1535 nfree[i]);
8d063cd8 1536 }
e8bc2b5c 1537#endif
5f05dabc 1538 PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree);
e8bc2b5c
GS
1539 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1540 PerlIO_printf(PerlIO_stderr(),
1541 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1542 ? " %5d"
1543 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1544 nmalloc[i] - nfree[i]);
c07a80fd 1545 }
e8bc2b5c
GS
1546#ifdef BUCKETS_ROOT2
1547 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1548 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1549 PerlIO_printf(PerlIO_stderr(),
1550 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1551 ? " %5d"
1552 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1553 nmalloc[i] - nfree[i]);
1554 }
1555#endif
1556 while (nextchain) {
1557 total_chain += nextchain->size;
1558 nextchain = nextchain->next;
1559 }
1560 PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n",
1561 goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack,
1562 start_slack, total_chain, sbrked_remains);
c07a80fd
PP
1563}
1564#else
1565void
8ac85365 1566dump_mstats(char *s)
c07a80fd 1567{
8d063cd8
LW
1568}
1569#endif
a687059c 1570#endif /* lint */
cf5c4ad8
PP
1571
1572
1573#ifdef USE_PERL_SBRK
1574
2c92fcc0 1575# if defined(__MACHTEN_PPC__) || defined(__NeXT__)
38ac2dc8
DD
1576# define PERL_SBRK_VIA_MALLOC
1577/*
1578 * MachTen's malloc() returns a buffer aligned on a two-byte boundary.
1579 * While this is adequate, it may slow down access to longer data
1580 * types by forcing multiple memory accesses. It also causes
1581 * complaints when RCHECK is in force. So we allocate six bytes
1582 * more than we need to, and return an address rounded up to an
1583 * eight-byte boundary.
1584 *
1585 * 980701 Dominic Dunlop <domo@computer.org>
1586 */
1587# define SYSTEM_ALLOC(a) ((void *)(((unsigned)malloc((a)+6)+6)&~7))
1588# endif
1589
760ac839 1590# ifdef PERL_SBRK_VIA_MALLOC
72e5b9db 1591# if defined(HIDEMYMALLOC) || defined(EMBEDMYMALLOC)
38ac2dc8
DD
1592# undef malloc /* Expose names that */
1593# undef calloc /* HIDEMYMALLOC hides */
1594# undef realloc
1595# undef free
760ac839 1596# else
72e5b9db 1597# include "Error: -DPERL_SBRK_VIA_MALLOC needs -D(HIDE|EMBED)MYMALLOC"
760ac839 1598# endif
cf5c4ad8
PP
1599
1600/* it may seem schizophrenic to use perl's malloc and let it call system */
1601/* malloc, the reason for that is only the 3.2 version of the OS that had */
1602/* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
1603/* end to the cores */
1604
38ac2dc8
DD
1605# ifndef SYSTEM_ALLOC
1606# define SYSTEM_ALLOC(a) malloc(a)
1607# endif
cf5c4ad8 1608
760ac839 1609# endif /* PERL_SBRK_VIA_MALLOC */
cf5c4ad8
PP
1610
1611static IV Perl_sbrk_oldchunk;
1612static long Perl_sbrk_oldsize;
1613
760ac839
LW
1614# define PERLSBRK_32_K (1<<15)
1615# define PERLSBRK_64_K (1<<16)
cf5c4ad8 1616
b63effbb 1617Malloc_t
df0003d4 1618Perl_sbrk(int size)
cf5c4ad8
PP
1619{
1620 IV got;
1621 int small, reqsize;
1622
1623 if (!size) return 0;
55497cff 1624#ifdef PERL_CORE
cf5c4ad8
PP
1625 reqsize = size; /* just for the DEBUG_m statement */
1626#endif
57569e04
HM
1627#ifdef PACK_MALLOC
1628 size = (size + 0x7ff) & ~0x7ff;
1629#endif
cf5c4ad8
PP
1630 if (size <= Perl_sbrk_oldsize) {
1631 got = Perl_sbrk_oldchunk;
1632 Perl_sbrk_oldchunk += size;
1633 Perl_sbrk_oldsize -= size;
1634 } else {
1635 if (size >= PERLSBRK_32_K) {
1636 small = 0;
1637 } else {
cf5c4ad8
PP
1638 size = PERLSBRK_64_K;
1639 small = 1;
1640 }
1641 got = (IV)SYSTEM_ALLOC(size);
57569e04
HM
1642#ifdef PACK_MALLOC
1643 got = (got + 0x7ff) & ~0x7ff;
1644#endif
cf5c4ad8
PP
1645 if (small) {
1646 /* Chunk is small, register the rest for future allocs. */
1647 Perl_sbrk_oldchunk = got + reqsize;
1648 Perl_sbrk_oldsize = size - reqsize;
1649 }
1650 }
1651
fb73857a 1652 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n",
cf5c4ad8 1653 size, reqsize, Perl_sbrk_oldsize, got));
cf5c4ad8
PP
1654
1655 return (void *)got;
1656}
1657
1658#endif /* ! defined USE_PERL_SBRK */