This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
Re: [PATCH] Re: Not OK: perl 5.00553 on OPENSTEP-Mach 4_1
[perl5.git] / malloc.c
CommitLineData
a0d0e21e 1/* malloc.c
8d063cd8 2 *
8d063cd8
LW
3 */
4
87c6202a
IZ
5/*
6 Here are some notes on configuring Perl's malloc.
7
8 There are two macros which serve as bulk disablers of advanced
9 features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by
10 default). Look in the list of default values below to understand
11 their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the
12 state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC
13 returns it to the state as of Perl 5.000.
14
15 Note that some of the settings below may be ignored in the code based
16 on values of other macros. The PERL_CORE symbol is only defined when
17 perl itself is being compiled (so malloc can make some assumptions
18 about perl's facilities being available to it).
19
20 Each config option has a short description, followed by its name,
21 default value, and a comment about the default (if applicable). Some
22 options take a precise value, while the others are just boolean.
23 The boolean ones are listed first.
24
25 # Enable code for an emergency memory pool in $^M. See perlvar.pod
26 # for a description of $^M.
27 PERL_EMERGENCY_SBRK (!PLAIN_MALLOC && PERL_CORE)
28
29 # Enable code for printing memory statistics.
30 DEBUGGING_MSTATS (!PLAIN_MALLOC && PERL_CORE)
31
32 # Move allocation info for small buckets into separate areas.
33 # Memory optimization (especially for small allocations, of the
34 # less than 64 bytes). Since perl usually makes a large number
35 # of small allocations, this is usually a win.
36 PACK_MALLOC (!PLAIN_MALLOC && !RCHECK)
37
38 # Add one page to big powers of two when calculating bucket size.
39 # This is targeted at big allocations, as are common in image
40 # processing.
41 TWO_POT_OPTIMIZE !PLAIN_MALLOC
42
43 # Use intermediate bucket sizes between powers-of-two. This is
44 # generally a memory optimization, and a (small) speed pessimization.
45 BUCKETS_ROOT2 !NO_FANCY_MALLOC
46
47 # Do not check small deallocations for bad free(). Memory
48 # and speed optimization, error reporting pessimization.
49 IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK)
50
51 # Use table lookup to decide in which bucket a given allocation will go.
52 SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC
53
38ac2dc8
DD
54 # Use a perl-defined sbrk() instead of the (presumably broken or
55 # missing) system-supplied sbrk().
56 USE_PERL_SBRK undef
57
58 # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally
59 # only used with broken sbrk()s.
87c6202a
IZ
60 PERL_SBRK_VIA_MALLOC undef
61
38ac2dc8
DD
62 # Which allocator to use if PERL_SBRK_VIA_MALLOC
63 SYSTEM_ALLOC(a) malloc(a)
64
87c6202a
IZ
65 # Disable memory overwrite checking with DEBUGGING. Memory and speed
66 # optimization, error reporting pessimization.
67 NO_RCHECK undef
68
69 # Enable memory overwrite checking with DEBUGGING. Memory and speed
70 # pessimization, error reporting optimization
71 RCHECK (DEBUGGING && !NO_RCHECK)
72
73 # Failed allocations bigger than this size croak (if
74 # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See
75 # perlvar.pod for a description of $^M.
76 BIG_SIZE (1<<16) # 64K
77
78 # Starting from this power of two, add an extra page to the
79 # size of the bucket. This enables optimized allocations of sizes
80 # close to powers of 2. Note that the value is indexed at 0.
81 FIRST_BIG_POW2 15 # 32K, 16K is used too often
82
83 # Estimate of minimal memory footprint. malloc uses this value to
84 # request the most reasonable largest blocks of memory from the system.
85 FIRST_SBRK (48*1024)
86
87 # Round up sbrk()s to multiples of this.
88 MIN_SBRK 2048
89
90 # Round up sbrk()s to multiples of this percent of footprint.
91 MIN_SBRK_FRAC 3
92
93 # Add this much memory to big powers of two to get the bucket size.
94 PERL_PAGESIZE 4096
95
96 # This many sbrk() discontinuities should be tolerated even
97 # from the start without deciding that sbrk() is usually
98 # discontinuous.
99 SBRK_ALLOW_FAILURES 3
100
101 # This many continuous sbrk()s compensate for one discontinuous one.
102 SBRK_FAILURE_PRICE 50
103
28ac10b1
IZ
104 # Some configurations may ask for 12-byte-or-so allocations which
105 # require 8-byte alignment (?!). In such situation one needs to
106 # define this to disable 12-byte bucket (will increase memory footprint)
107 STRICT_ALIGNMENT undef
108
87c6202a
IZ
109 This implementation assumes that calling PerlIO_printf() does not
110 result in any memory allocation calls (used during a panic).
111
112 */
113
e8bc2b5c
GS
114#ifndef NO_FANCY_MALLOC
115# ifndef SMALL_BUCKET_VIA_TABLE
116# define SMALL_BUCKET_VIA_TABLE
117# endif
118# ifndef BUCKETS_ROOT2
119# define BUCKETS_ROOT2
120# endif
121# ifndef IGNORE_SMALL_BAD_FREE
122# define IGNORE_SMALL_BAD_FREE
123# endif
3562ef9b
IZ
124#endif
125
e8bc2b5c
GS
126#ifndef PLAIN_MALLOC /* Bulk enable features */
127# ifndef PACK_MALLOC
128# define PACK_MALLOC
129# endif
130# ifndef TWO_POT_OPTIMIZE
131# define TWO_POT_OPTIMIZE
132# endif
d720c441
IZ
133# if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK)
134# define PERL_EMERGENCY_SBRK
e8bc2b5c
GS
135# endif
136# if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
137# define DEBUGGING_MSTATS
138# endif
139#endif
140
141#define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
142#define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
143
144#if !(defined(I286) || defined(atarist))
145 /* take 2k unless the block is bigger than that */
146# define LOG_OF_MIN_ARENA 11
147#else
148 /* take 16k unless the block is bigger than that
149 (80286s like large segments!), probably good on the atari too */
150# define LOG_OF_MIN_ARENA 14
151#endif
152
8d063cd8 153#ifndef lint
1944739a
IZ
154# if defined(DEBUGGING) && !defined(NO_RCHECK)
155# define RCHECK
156# endif
e8bc2b5c
GS
157# if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
158# undef IGNORE_SMALL_BAD_FREE
159# endif
8d063cd8
LW
160/*
161 * malloc.c (Caltech) 2/21/82
162 * Chris Kingsley, kingsley@cit-20.
163 *
164 * This is a very fast storage allocator. It allocates blocks of a small
165 * number of different sizes, and keeps free lists of each size. Blocks that
166 * don't exactly fit are passed up to the next larger size. In this
167 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
cf5c4ad8 168 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
8d063cd8
LW
169 * This is designed for use in a program that uses vast quantities of memory,
170 * but bombs when it runs out.
171 */
172
d720c441
IZ
173#ifdef PERL_CORE
174# include "EXTERN.h"
175# include "perl.h"
176#else
177# ifdef PERL_FOR_X2P
178# include "../EXTERN.h"
179# include "../perl.h"
180# else
181# include <stdlib.h>
182# include <stdio.h>
183# include <memory.h>
184# define _(arg) arg
185# ifndef Malloc_t
186# define Malloc_t void *
187# endif
188# ifndef MEM_SIZE
189# define MEM_SIZE unsigned long
190# endif
191# ifndef LONG_MAX
192# define LONG_MAX 0x7FFFFFFF
193# endif
194# ifndef UV
195# define UV unsigned long
196# endif
197# ifndef caddr_t
198# define caddr_t char *
199# endif
200# ifndef Free_t
201# define Free_t void
202# endif
203# define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t))
204# define PerlEnv_getenv getenv
205# define PerlIO_printf fprintf
206# define PerlIO_stderr() stderr
207# endif
e8bc2b5c 208# ifndef croak /* make depend */
d720c441
IZ
209# define croak(mess, arg) warn((mess), (arg)); exit(1);
210# endif
211# ifndef warn
212# define warn(mess, arg) fprintf(stderr, (mess), (arg));
e8bc2b5c
GS
213# endif
214# ifdef DEBUG_m
215# undef DEBUG_m
216# endif
217# define DEBUG_m(a)
218# ifdef DEBUGGING
219# undef DEBUGGING
220# endif
221#endif
222
223#ifndef MUTEX_LOCK
224# define MUTEX_LOCK(l)
225#endif
226
227#ifndef MUTEX_UNLOCK
228# define MUTEX_UNLOCK(l)
229#endif
230
760ac839 231#ifdef DEBUGGING
e8bc2b5c 232# undef DEBUG_m
4a33f861 233# define DEBUG_m(a) if (PL_debug & 128) a
760ac839
LW
234#endif
235
135863df
AB
236/* I don't much care whether these are defined in sys/types.h--LAW */
237
238#define u_char unsigned char
239#define u_int unsigned int
e8bc2b5c
GS
240
241#ifdef HAS_QUAD
242# define u_bigint UV /* Needs to eat *void. */
243#else /* needed? */
244# define u_bigint unsigned long /* Needs to eat *void. */
245#endif
246
135863df 247#define u_short unsigned short
8d063cd8 248
cf5c4ad8
PP
249/* 286 and atarist like big chunks, which gives too much overhead. */
250#if (defined(RCHECK) || defined(I286) || defined(atarist)) && defined(PACK_MALLOC)
e8bc2b5c 251# undef PACK_MALLOC
cf5c4ad8
PP
252#endif
253
8d063cd8 254/*
cf5c4ad8
PP
255 * The description below is applicable if PACK_MALLOC is not defined.
256 *
8d063cd8
LW
257 * The overhead on a block is at least 4 bytes. When free, this space
258 * contains a pointer to the next free block, and the bottom two bits must
259 * be zero. When in use, the first byte is set to MAGIC, and the second
260 * byte is the size index. The remaining bytes are for alignment.
261 * If range checking is enabled and the size of the block fits
262 * in two bytes, then the top two bytes hold the size of the requested block
263 * plus the range checking words, and the header word MINUS ONE.
264 */
265union overhead {
266 union overhead *ov_next; /* when free */
85e6fe83 267#if MEM_ALIGNBYTES > 4
c623bd54 268 double strut; /* alignment problems */
a687059c 269#endif
8d063cd8
LW
270 struct {
271 u_char ovu_magic; /* magic number */
272 u_char ovu_index; /* bucket # */
273#ifdef RCHECK
274 u_short ovu_size; /* actual block size */
275 u_int ovu_rmagic; /* range magic number */
276#endif
277 } ovu;
278#define ov_magic ovu.ovu_magic
279#define ov_index ovu.ovu_index
280#define ov_size ovu.ovu_size
281#define ov_rmagic ovu.ovu_rmagic
282};
283
760ac839 284#ifdef DEBUGGING
d720c441 285static void botch _((char *diag, char *s));
a0d0e21e
LW
286#endif
287static void morecore _((int bucket));
288static int findbucket _((union overhead *freep, int srchlen));
28ac10b1 289static void add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip);
a0d0e21e 290
8d063cd8
LW
291#define MAGIC 0xff /* magic # on accounting info */
292#define RMAGIC 0x55555555 /* magic # on range info */
e8bc2b5c
GS
293#define RMAGIC_C 0x55 /* magic # on range info */
294
8d063cd8 295#ifdef RCHECK
c2a5c2d2
IZ
296# define RSLOP sizeof (u_int)
297# ifdef TWO_POT_OPTIMIZE
e8bc2b5c 298# define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
c2a5c2d2 299# else
e8bc2b5c 300# define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
c2a5c2d2 301# endif
8d063cd8 302#else
c2a5c2d2 303# define RSLOP 0
8d063cd8
LW
304#endif
305
e8bc2b5c
GS
306#if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
307# undef BUCKETS_ROOT2
308#endif
309
310#ifdef BUCKETS_ROOT2
311# define BUCKET_TABLE_SHIFT 2
312# define BUCKET_POW2_SHIFT 1
313# define BUCKETS_PER_POW2 2
314#else
315# define BUCKET_TABLE_SHIFT MIN_BUC_POW2
316# define BUCKET_POW2_SHIFT 0
317# define BUCKETS_PER_POW2 1
318#endif
319
274c7500
IZ
320#if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT))
321/* Figure out the alignment of void*. */
322struct aligner {
323 char c;
324 void *p;
325};
326# define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p)))
327#else
328# define ALIGN_SMALL MEM_ALIGNBYTES
329#endif
330
331#define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no))
332
e8bc2b5c
GS
333#ifdef BUCKETS_ROOT2
334# define MAX_BUCKET_BY_TABLE 13
335static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
336 {
337 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
338 };
339# define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
340# define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
341 ? buck_size[i] \
342 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
343 - MEM_OVERHEAD(i) \
344 + POW2_OPTIMIZE_SURPLUS(i)))
345#else
346# define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
347# define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
348#endif
349
350
cf5c4ad8 351#ifdef PACK_MALLOC
e8bc2b5c
GS
352/* In this case it is assumed that if we do sbrk() in 2K units, we
353 * will get 2K aligned arenas (at least after some initial
354 * alignment). The bucket number of the given subblock is on the start
355 * of 2K arena which contains the subblock. Several following bytes
356 * contain the magic numbers for the subblocks in the block.
cf5c4ad8
PP
357 *
358 * Sizes of chunks are powers of 2 for chunks in buckets <=
359 * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to
360 * get alignment right).
361 *
e8bc2b5c
GS
362 * Consider an arena for 2^n with n>MAX_PACKED. We suppose that
363 * starts of all the chunks in a 2K arena are in different
364 * 2^n-byte-long chunks. If the top of the last chunk is aligned on a
365 * boundary of 2K block, this means that sizeof(union
366 * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K <
367 * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n -
368 * overhead is used. Since this rules out n = 7 for 8 byte alignment,
369 * we specialcase allocation of the first of 16 128-byte-long chunks.
cf5c4ad8
PP
370 *
371 * Note that with the above assumption we automatically have enough
372 * place for MAGIC at the start of 2K block. Note also that we
e8bc2b5c
GS
373 * overlay union overhead over the chunk, thus the start of small chunks
374 * is immediately overwritten after freeing. */
375# define MAX_PACKED_POW2 6
376# define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
377# define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
378# define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
379# define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK)
380# define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK)
cf5c4ad8
PP
381# define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block)))
382# define OV_INDEX(block) (*OV_INDEXp(block))
383# define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
e8bc2b5c
GS
384 (TWOK_SHIFT(block)>> \
385 (bucket>>BUCKET_POW2_SHIFT)) + \
386 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
387 /* A bucket can have a shift smaller than it size, we need to
388 shift its magic number so it will not overwrite index: */
389# ifdef BUCKETS_ROOT2
390# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
391# else
392# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
393# endif
cf5c4ad8
PP
394# define CHUNK_SHIFT 0
395
e8bc2b5c
GS
396/* Number of active buckets of given ordinal. */
397#ifdef IGNORE_SMALL_BAD_FREE
398#define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
399# define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
400 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
401 : n_blks[bucket] )
402#else
403# define N_BLKS(bucket) n_blks[bucket]
404#endif
405
406static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
407 {
408# if BUCKETS_PER_POW2==1
409 0, 0,
410 (MIN_BUC_POW2==2 ? 384 : 0),
411 224, 120, 62, 31, 16, 8, 4, 2
412# else
413 0, 0, 0, 0,
414 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
415 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
416# endif
417 };
418
419/* Shift of the first bucket with the given ordinal inside 2K chunk. */
420#ifdef IGNORE_SMALL_BAD_FREE
421# define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
422 ? ((1<<LOG_OF_MIN_ARENA) \
423 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
424 : blk_shift[bucket])
425#else
426# define BLK_SHIFT(bucket) blk_shift[bucket]
427#endif
428
429static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
430 {
431# if BUCKETS_PER_POW2==1
432 0, 0,
433 (MIN_BUC_POW2==2 ? 512 : 0),
434 256, 128, 64, 64, /* 8 to 64 */
435 16*sizeof(union overhead),
436 8*sizeof(union overhead),
437 4*sizeof(union overhead),
438 2*sizeof(union overhead),
439# else
440 0, 0, 0, 0,
441 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
442 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
443 16*sizeof(union overhead), 16*sizeof(union overhead),
444 8*sizeof(union overhead), 8*sizeof(union overhead),
445 4*sizeof(union overhead), 4*sizeof(union overhead),
446 2*sizeof(union overhead), 2*sizeof(union overhead),
447# endif
448 };
cf5c4ad8 449
cf5c4ad8
PP
450#else /* !PACK_MALLOC */
451
452# define OV_MAGIC(block,bucket) (block)->ov_magic
453# define OV_INDEX(block) (block)->ov_index
454# define CHUNK_SHIFT 1
e8bc2b5c 455# define MAX_PACKED -1
cf5c4ad8
PP
456#endif /* !PACK_MALLOC */
457
e8bc2b5c
GS
458#define M_OVERHEAD (sizeof(union overhead) + RSLOP)
459
460#ifdef PACK_MALLOC
461# define MEM_OVERHEAD(bucket) \
462 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
463# ifdef SMALL_BUCKET_VIA_TABLE
464# define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
465# define START_SHIFT MAX_PACKED_POW2
466# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
467# define SIZE_TABLE_MAX 80
468# else
469# define SIZE_TABLE_MAX 64
470# endif
471static char bucket_of[] =
472 {
473# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
474 /* 0 to 15 in 4-byte increments. */
475 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
476 6, /* 8 */
274c7500 477 IF_ALIGN_8(8,7), 8, /* 16/12, 16 */
e8bc2b5c
GS
478 9, 9, 10, 10, /* 24, 32 */
479 11, 11, 11, 11, /* 48 */
480 12, 12, 12, 12, /* 64 */
481 13, 13, 13, 13, /* 80 */
482 13, 13, 13, 13 /* 80 */
483# else /* !BUCKETS_ROOT2 */
484 /* 0 to 15 in 4-byte increments. */
485 (sizeof(void*) > 4 ? 3 : 2),
486 3,
487 4, 4,
488 5, 5, 5, 5,
489 6, 6, 6, 6,
490 6, 6, 6, 6
491# endif /* !BUCKETS_ROOT2 */
492 };
493# else /* !SMALL_BUCKET_VIA_TABLE */
494# define START_SHIFTS_BUCKET MIN_BUCKET
495# define START_SHIFT (MIN_BUC_POW2 - 1)
496# endif /* !SMALL_BUCKET_VIA_TABLE */
497#else /* !PACK_MALLOC */
498# define MEM_OVERHEAD(bucket) M_OVERHEAD
499# ifdef SMALL_BUCKET_VIA_TABLE
500# undef SMALL_BUCKET_VIA_TABLE
501# endif
502# define START_SHIFTS_BUCKET MIN_BUCKET
503# define START_SHIFT (MIN_BUC_POW2 - 1)
504#endif /* !PACK_MALLOC */
cf5c4ad8 505
8d063cd8 506/*
55497cff
PP
507 * Big allocations are often of the size 2^n bytes. To make them a
508 * little bit better, make blocks of size 2^n+pagesize for big n.
509 */
510
511#ifdef TWO_POT_OPTIMIZE
512
5f05dabc
PP
513# ifndef PERL_PAGESIZE
514# define PERL_PAGESIZE 4096
515# endif
e8bc2b5c
GS
516# ifndef FIRST_BIG_POW2
517# define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
5f05dabc 518# endif
e8bc2b5c 519# define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
55497cff
PP
520/* If this value or more, check against bigger blocks. */
521# define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
522/* If less than this value, goes into 2^n-overhead-block. */
523# define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
524
e8bc2b5c
GS
525# define POW2_OPTIMIZE_ADJUST(nbytes) \
526 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
527# define POW2_OPTIMIZE_SURPLUS(bucket) \
528 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
529
530#else /* !TWO_POT_OPTIMIZE */
531# define POW2_OPTIMIZE_ADJUST(nbytes)
532# define POW2_OPTIMIZE_SURPLUS(bucket) 0
533#endif /* !TWO_POT_OPTIMIZE */
534
535#if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
536# define BARK_64K_LIMIT(what,nbytes,size) \
537 if (nbytes > 0xffff) { \
538 PerlIO_printf(PerlIO_stderr(), \
539 "%s too large: %lx\n", what, size); \
540 my_exit(1); \
541 }
542#else /* !HAS_64K_LIMIT || !PERL_CORE */
543# define BARK_64K_LIMIT(what,nbytes,size)
544#endif /* !HAS_64K_LIMIT || !PERL_CORE */
55497cff 545
e8bc2b5c
GS
546#ifndef MIN_SBRK
547# define MIN_SBRK 2048
548#endif
549
550#ifndef FIRST_SBRK
d720c441 551# define FIRST_SBRK (48*1024)
e8bc2b5c
GS
552#endif
553
554/* Minimal sbrk in percents of what is already alloced. */
555#ifndef MIN_SBRK_FRAC
556# define MIN_SBRK_FRAC 3
557#endif
558
559#ifndef SBRK_ALLOW_FAILURES
560# define SBRK_ALLOW_FAILURES 3
561#endif
55497cff 562
e8bc2b5c
GS
563#ifndef SBRK_FAILURE_PRICE
564# define SBRK_FAILURE_PRICE 50
55497cff
PP
565#endif
566
e8bc2b5c
GS
567#if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
568
569# ifndef BIG_SIZE
570# define BIG_SIZE (1<<16) /* 64K */
571# endif
572
3541dd58 573#ifdef I_MACH_CTHREADS
772fe5b3
HM
574# undef MUTEX_LOCK
575# define MUTEX_LOCK(m) STMT_START { if (*m) mutex_lock(*m); } STMT_END
576# undef MUTEX_UNLOCK
577# define MUTEX_UNLOCK(m) STMT_START { if (*m) mutex_unlock(*m); } STMT_END
3541dd58
HM
578#endif
579
55497cff
PP
580static char *emergency_buffer;
581static MEM_SIZE emergency_buffer_size;
df0003d4 582static Malloc_t emergency_sbrk(MEM_SIZE size);
55497cff 583
52082926 584static Malloc_t
df0003d4 585emergency_sbrk(MEM_SIZE size)
55497cff 586{
28ac10b1
IZ
587 MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA;
588
55497cff
PP
589 if (size >= BIG_SIZE) {
590 /* Give the possibility to recover: */
51dc0457 591 MUTEX_UNLOCK(&PL_malloc_mutex);
1b979e0a 592 croak("Out of memory during \"large\" request for %i bytes", size);
55497cff
PP
593 }
594
28ac10b1
IZ
595 if (emergency_buffer_size >= rsize) {
596 char *old = emergency_buffer;
597
598 emergency_buffer_size -= rsize;
599 emergency_buffer += rsize;
600 return old;
601 } else {
18f739ee 602 dTHR;
55497cff
PP
603 /* First offense, give a possibility to recover by dieing. */
604 /* No malloc involved here: */
4a33f861 605 GV **gvp = (GV**)hv_fetch(PL_defstash, "^M", 2, 0);
55497cff
PP
606 SV *sv;
607 char *pv;
28ac10b1 608 int have = 0;
55497cff 609
28ac10b1
IZ
610 if (emergency_buffer_size) {
611 add_to_chain(emergency_buffer, emergency_buffer_size, 0);
612 emergency_buffer_size = 0;
613 emergency_buffer = Nullch;
614 have = 1;
615 }
4a33f861 616 if (!gvp) gvp = (GV**)hv_fetch(PL_defstash, "\015", 1, 0);
55497cff 617 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
28ac10b1
IZ
618 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD)) {
619 if (have)
620 goto do_croak;
55497cff 621 return (char *)-1; /* Now die die die... */
28ac10b1 622 }
55497cff 623 /* Got it, now detach SvPV: */
6b88bc9c 624 pv = SvPV(sv, PL_na);
55497cff 625 /* Check alignment: */
28ac10b1 626 if (((UV)(pv - sizeof(union overhead))) & ((1<<LOG_OF_MIN_ARENA) - 1)) {
55497cff 627 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
bbce6d69 628 return (char *)-1; /* die die die */
55497cff
PP
629 }
630
28ac10b1
IZ
631 emergency_buffer = pv - sizeof(union overhead);
632 emergency_buffer_size = malloced_size(pv) + M_OVERHEAD;
55497cff 633 SvPOK_off(sv);
28ac10b1
IZ
634 SvPVX(sv) = Nullch;
635 SvCUR(sv) = SvLEN(sv) = 0;
55497cff 636 }
28ac10b1
IZ
637 do_croak:
638 MUTEX_UNLOCK(&PL_malloc_mutex);
639 croak("Out of memory during request for %i bytes", size);
55497cff
PP
640}
641
e8bc2b5c 642#else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 643# define emergency_sbrk(size) -1
e8bc2b5c 644#endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff
PP
645
646/*
e8bc2b5c 647 * nextf[i] is the pointer to the next free block of size 2^i. The
8d063cd8
LW
648 * smallest allocatable block is 8 bytes. The overhead information
649 * precedes the data area returned to the user.
650 */
e8bc2b5c 651#define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
8d063cd8 652static union overhead *nextf[NBUCKETS];
cf5c4ad8
PP
653
654#ifdef USE_PERL_SBRK
655#define sbrk(a) Perl_sbrk(a)
52082926 656Malloc_t Perl_sbrk _((int size));
8ac85365
NIS
657#else
658#ifdef DONT_DECLARE_STD
659#ifdef I_UNISTD
660#include <unistd.h>
661#endif
cf5c4ad8 662#else
52082926 663extern Malloc_t sbrk(int);
8ac85365 664#endif
cf5c4ad8 665#endif
8d063cd8 666
c07a80fd 667#ifdef DEBUGGING_MSTATS
8d063cd8
LW
668/*
669 * nmalloc[i] is the difference between the number of mallocs and frees
670 * for a given block size.
671 */
672static u_int nmalloc[NBUCKETS];
5f05dabc
PP
673static u_int sbrk_slack;
674static u_int start_slack;
8d063cd8
LW
675#endif
676
e8bc2b5c
GS
677static u_int goodsbrk;
678
760ac839 679#ifdef DEBUGGING
3541dd58
HM
680#undef ASSERT
681#define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else
ee0007ab 682static void
d720c441 683botch(char *diag, char *s)
8d063cd8 684{
d720c441 685 PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s);
3028581b 686 PerlProc_abort();
8d063cd8
LW
687}
688#else
3541dd58 689#define ASSERT(p, diag)
8d063cd8
LW
690#endif
691
2304df62 692Malloc_t
8ac85365 693malloc(register size_t nbytes)
8d063cd8
LW
694{
695 register union overhead *p;
e8bc2b5c 696 register int bucket;
ee0007ab 697 register MEM_SIZE shiftr;
8d063cd8 698
c2a5c2d2 699#if defined(DEBUGGING) || defined(RCHECK)
ee0007ab 700 MEM_SIZE size = nbytes;
45d8adaa
LW
701#endif
702
e8bc2b5c 703 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
45d8adaa
LW
704#ifdef DEBUGGING
705 if ((long)nbytes < 0)
d720c441 706 croak("%s", "panic: malloc");
45d8adaa 707#endif
45d8adaa 708
51dc0457 709 MUTEX_LOCK(&PL_malloc_mutex);
8d063cd8
LW
710 /*
711 * Convert amount of memory requested into
712 * closest block size stored in hash buckets
713 * which satisfies request. Account for
714 * space used per block for accounting.
715 */
cf5c4ad8 716#ifdef PACK_MALLOC
e8bc2b5c
GS
717# ifdef SMALL_BUCKET_VIA_TABLE
718 if (nbytes == 0)
719 bucket = MIN_BUCKET;
720 else if (nbytes <= SIZE_TABLE_MAX) {
721 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
722 } else
723# else
043bf814
RB
724 if (nbytes == 0)
725 nbytes = 1;
e8bc2b5c
GS
726 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
727 else
728# endif
55497cff 729#endif
e8bc2b5c
GS
730 {
731 POW2_OPTIMIZE_ADJUST(nbytes);
732 nbytes += M_OVERHEAD;
733 nbytes = (nbytes + 3) &~ 3;
734 do_shifts:
735 shiftr = (nbytes - 1) >> START_SHIFT;
736 bucket = START_SHIFTS_BUCKET;
737 /* apart from this loop, this is O(1) */
738 while (shiftr >>= 1)
739 bucket += BUCKETS_PER_POW2;
cf5c4ad8 740 }
8d063cd8
LW
741 /*
742 * If nothing in hash bucket right now,
743 * request more memory from the system.
744 */
745 if (nextf[bucket] == NULL)
746 morecore(bucket);
e8bc2b5c 747 if ((p = nextf[bucket]) == NULL) {
51dc0457 748 MUTEX_UNLOCK(&PL_malloc_mutex);
55497cff 749#ifdef PERL_CORE
4a33f861 750 if (!PL_nomemok) {
760ac839 751 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
79072805 752 my_exit(1);
ee0007ab 753 }
45d8adaa 754#else
8d063cd8 755 return (NULL);
45d8adaa
LW
756#endif
757 }
758
e8bc2b5c
GS
759 DEBUG_m(PerlIO_printf(Perl_debug_log,
760 "0x%lx: (%05lu) malloc %ld bytes\n",
4a33f861 761 (unsigned long)(p+1), (unsigned long)(PL_an++),
e8bc2b5c 762 (long)size));
45d8adaa 763
8d063cd8 764 /* remove from linked list */
802004fa
DD
765#if defined(RCHECK)
766 if (((UV)p) & (MEM_ALIGNBYTES - 1))
760ac839 767 PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n",
a0d0e21e 768 (unsigned long)*((int*)p),(unsigned long)p);
bf38876a
LW
769#endif
770 nextf[bucket] = p->ov_next;
e8bc2b5c
GS
771#ifdef IGNORE_SMALL_BAD_FREE
772 if (bucket >= FIRST_BUCKET_WITH_CHECK)
773#endif
774 OV_MAGIC(p, bucket) = MAGIC;
cf5c4ad8
PP
775#ifndef PACK_MALLOC
776 OV_INDEX(p) = bucket;
777#endif
8d063cd8
LW
778#ifdef RCHECK
779 /*
780 * Record allocated size of block and
781 * bound space with magic numbers.
782 */
8d063cd8 783 p->ov_rmagic = RMAGIC;
e8bc2b5c
GS
784 if (bucket <= MAX_SHORT_BUCKET) {
785 int i;
786
787 nbytes = size + M_OVERHEAD;
788 p->ov_size = nbytes - 1;
789 if ((i = nbytes & 3)) {
790 i = 4 - i;
791 while (i--)
792 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
793 }
794 nbytes = (nbytes + 3) &~ 3;
795 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
796 }
8d063cd8 797#endif
51dc0457 798 MUTEX_UNLOCK(&PL_malloc_mutex);
cf5c4ad8 799 return ((Malloc_t)(p + CHUNK_SHIFT));
8d063cd8
LW
800}
801
e8bc2b5c
GS
802static char *last_sbrk_top;
803static char *last_op; /* This arena can be easily extended. */
804static int sbrked_remains;
805static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
806
807#ifdef DEBUGGING_MSTATS
808static int sbrks;
809#endif
810
811struct chunk_chain_s {
812 struct chunk_chain_s *next;
813 MEM_SIZE size;
814};
815static struct chunk_chain_s *chunk_chain;
816static int n_chunks;
817static char max_bucket;
818
819/* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
820static void *
821get_from_chain(MEM_SIZE size)
822{
823 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
824 struct chunk_chain_s **oldgoodp = NULL;
825 long min_remain = LONG_MAX;
826
827 while (elt) {
828 if (elt->size >= size) {
829 long remains = elt->size - size;
830 if (remains >= 0 && remains < min_remain) {
831 oldgoodp = oldp;
832 min_remain = remains;
833 }
834 if (remains == 0) {
835 break;
836 }
837 }
838 oldp = &( elt->next );
839 elt = elt->next;
840 }
841 if (!oldgoodp) return NULL;
842 if (min_remain) {
843 void *ret = *oldgoodp;
844 struct chunk_chain_s *next = (*oldgoodp)->next;
845
846 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
847 (*oldgoodp)->size = min_remain;
848 (*oldgoodp)->next = next;
849 return ret;
850 } else {
851 void *ret = *oldgoodp;
852 *oldgoodp = (*oldgoodp)->next;
853 n_chunks--;
854 return ret;
855 }
856}
857
858static void
859add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
860{
861 struct chunk_chain_s *next = chunk_chain;
862 char *cp = (char*)p;
863
864 cp += chip;
865 chunk_chain = (struct chunk_chain_s *)cp;
866 chunk_chain->size = size - chip;
867 chunk_chain->next = next;
868 n_chunks++;
869}
870
871static void *
872get_from_bigger_buckets(int bucket, MEM_SIZE size)
873{
874 int price = 1;
875 static int bucketprice[NBUCKETS];
876 while (bucket <= max_bucket) {
877 /* We postpone stealing from bigger buckets until we want it
878 often enough. */
879 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
880 /* Steal it! */
881 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
882 bucketprice[bucket] = 0;
883 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
884 last_op = NULL; /* Disable optimization */
885 }
886 nextf[bucket] = nextf[bucket]->ov_next;
887#ifdef DEBUGGING_MSTATS
888 nmalloc[bucket]--;
889 start_slack -= M_OVERHEAD;
890#endif
891 add_to_chain(ret, (BUCKET_SIZE(bucket) +
892 POW2_OPTIMIZE_SURPLUS(bucket)),
893 size);
894 return ret;
895 }
896 bucket++;
897 }
898 return NULL;
899}
900
fa423c5b
IZ
901static union overhead *
902getpages(int needed, int *nblksp, int bucket)
903{
904 /* Need to do (possibly expensive) system call. Try to
905 optimize it for rare calling. */
906 MEM_SIZE require = needed - sbrked_remains;
907 char *cp;
908 union overhead *ovp;
909 int slack = 0;
910
911 if (sbrk_good > 0) {
912 if (!last_sbrk_top && require < FIRST_SBRK)
913 require = FIRST_SBRK;
914 else if (require < MIN_SBRK) require = MIN_SBRK;
915
916 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
917 require = goodsbrk * MIN_SBRK_FRAC / 100;
918 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
919 } else {
920 require = needed;
921 last_sbrk_top = 0;
922 sbrked_remains = 0;
923 }
924
925 DEBUG_m(PerlIO_printf(Perl_debug_log,
926 "sbrk(%ld) for %ld-byte-long arena\n",
927 (long)require, (long) needed));
928 cp = (char *)sbrk(require);
929#ifdef DEBUGGING_MSTATS
930 sbrks++;
931#endif
932 if (cp == last_sbrk_top) {
933 /* Common case, anything is fine. */
934 sbrk_good++;
935 ovp = (union overhead *) (cp - sbrked_remains);
936 sbrked_remains = require - (needed - sbrked_remains);
937 } else if (cp == (char *)-1) { /* no more room! */
938 ovp = (union overhead *)emergency_sbrk(needed);
939 if (ovp == (union overhead *)-1)
940 return 0;
941 return ovp;
942 } else { /* Non-continuous or first sbrk(). */
943 long add = sbrked_remains;
944 char *newcp;
945
946 if (sbrked_remains) { /* Put rest into chain, we
947 cannot use it right now. */
948 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
949 sbrked_remains, 0);
950 }
951
952 /* Second, check alignment. */
953 slack = 0;
954
955#ifndef atarist /* on the atari we dont have to worry about this */
956# ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
957
958 /* CHUNK_SHIFT is 1 for PACK_MALLOC, 0 otherwise. */
959 if ((UV)cp & (0x7FF >> CHUNK_SHIFT)) { /* Not aligned. */
960 slack = (0x800 >> CHUNK_SHIFT)
961 - ((UV)cp & (0x7FF >> CHUNK_SHIFT));
962 add += slack;
963 }
964# endif
965#endif /* atarist */
966
967 if (add) {
968 DEBUG_m(PerlIO_printf(Perl_debug_log,
969 "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
970 (long)add, (long) slack,
971 (long) sbrked_remains));
972 newcp = (char *)sbrk(add);
973#if defined(DEBUGGING_MSTATS)
974 sbrks++;
975 sbrk_slack += add;
976#endif
977 if (newcp != cp + require) {
978 /* Too bad: even rounding sbrk() is not continuous.*/
979 DEBUG_m(PerlIO_printf(Perl_debug_log,
980 "failed to fix bad sbrk()\n"));
981#ifdef PACK_MALLOC
982 if (slack) {
51dc0457 983 MUTEX_UNLOCK(&PL_malloc_mutex);
fa423c5b
IZ
984 croak("%s", "panic: Off-page sbrk");
985 }
986#endif
987 if (sbrked_remains) {
988 /* Try again. */
989#if defined(DEBUGGING_MSTATS)
990 sbrk_slack += require;
991#endif
992 require = needed;
993 DEBUG_m(PerlIO_printf(Perl_debug_log,
994 "straight sbrk(%ld)\n",
995 (long)require));
996 cp = (char *)sbrk(require);
997#ifdef DEBUGGING_MSTATS
998 sbrks++;
999#endif
1000 if (cp == (char *)-1)
1001 return 0;
1002 }
1003 sbrk_good = -1; /* Disable optimization!
1004 Continue with not-aligned... */
1005 } else {
1006 cp += slack;
1007 require += sbrked_remains;
1008 }
1009 }
1010
1011 if (last_sbrk_top) {
1012 sbrk_good -= SBRK_FAILURE_PRICE;
1013 }
1014
1015 ovp = (union overhead *) cp;
1016 /*
1017 * Round up to minimum allocation size boundary
1018 * and deduct from block count to reflect.
1019 */
1020
1021#ifndef I286 /* Again, this should always be ok on an 80286 */
1022 if ((UV)ovp & 7) {
1023 ovp = (union overhead *)(((UV)ovp + 8) & ~7);
1024 DEBUG_m(PerlIO_printf(Perl_debug_log,
1025 "fixing sbrk(): %d bytes off machine alignement\n",
1026 (int)((UV)ovp & 7)));
1027 (*nblksp)--;
1028# if defined(DEBUGGING_MSTATS)
1029 /* This is only approx. if TWO_POT_OPTIMIZE: */
1030 sbrk_slack += (1 << bucket);
1031# endif
1032 }
1033#endif
1034 sbrked_remains = require - needed;
1035 }
1036 last_sbrk_top = cp + require;
1037 last_op = (char*) cp;
1038#ifdef DEBUGGING_MSTATS
1039 goodsbrk += require;
1040#endif
1041 return ovp;
1042}
1043
1044static int
1045getpages_adjacent(int require)
1046{
1047 if (require <= sbrked_remains) {
1048 sbrked_remains -= require;
1049 } else {
1050 char *cp;
1051
1052 require -= sbrked_remains;
1053 /* We do not try to optimize sbrks here, we go for place. */
1054 cp = (char*) sbrk(require);
1055#ifdef DEBUGGING_MSTATS
1056 sbrks++;
1057 goodsbrk += require;
1058#endif
1059 if (cp == last_sbrk_top) {
1060 sbrked_remains = 0;
1061 last_sbrk_top = cp + require;
1062 } else {
28ac10b1
IZ
1063 if (cp == (char*)-1) { /* Out of memory */
1064#ifdef DEBUGGING_MSTATS
1065 goodsbrk -= require;
1066#endif
1067 return 0;
1068 }
fa423c5b
IZ
1069 /* Report the failure: */
1070 if (sbrked_remains)
1071 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1072 sbrked_remains, 0);
1073 add_to_chain((void*)cp, require, 0);
1074 sbrk_good -= SBRK_FAILURE_PRICE;
1075 sbrked_remains = 0;
1076 last_sbrk_top = 0;
1077 last_op = 0;
1078 return 0;
1079 }
1080 }
1081
1082 return 1;
1083}
1084
8d063cd8
LW
1085/*
1086 * Allocate more memory to the indicated bucket.
1087 */
a0d0e21e 1088static void
8ac85365 1089morecore(register int bucket)
8d063cd8 1090{
72aaf631 1091 register union overhead *ovp;
8d063cd8 1092 register int rnu; /* 2^rnu bytes will be requested */
fa423c5b 1093 int nblks; /* become nblks blocks of the desired size */
bbce6d69 1094 register MEM_SIZE siz, needed;
8d063cd8
LW
1095
1096 if (nextf[bucket])
1097 return;
e8bc2b5c 1098 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
51dc0457 1099 MUTEX_UNLOCK(&PL_malloc_mutex);
d720c441 1100 croak("%s", "Out of memory during ridiculously large request");
55497cff 1101 }
d720c441 1102 if (bucket > max_bucket)
e8bc2b5c 1103 max_bucket = bucket;
d720c441 1104
e8bc2b5c
GS
1105 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
1106 ? LOG_OF_MIN_ARENA
1107 : (bucket >> BUCKET_POW2_SHIFT) );
1108 /* This may be overwritten later: */
1109 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
1110 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
1111 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
1112 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
1113 nextf[rnu << BUCKET_POW2_SHIFT]
1114 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
1115#ifdef DEBUGGING_MSTATS
1116 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
1117 start_slack -= M_OVERHEAD;
1118#endif
1119 DEBUG_m(PerlIO_printf(Perl_debug_log,
1120 "stealing %ld bytes from %ld arena\n",
1121 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
1122 } else if (chunk_chain
1123 && (ovp = (union overhead*) get_from_chain(needed))) {
1124 DEBUG_m(PerlIO_printf(Perl_debug_log,
1125 "stealing %ld bytes from chain\n",
1126 (long) needed));
d720c441
IZ
1127 } else if ( (ovp = (union overhead*)
1128 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
1129 needed)) ) {
e8bc2b5c
GS
1130 DEBUG_m(PerlIO_printf(Perl_debug_log,
1131 "stealing %ld bytes from bigger buckets\n",
1132 (long) needed));
1133 } else if (needed <= sbrked_remains) {
1134 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
1135 sbrked_remains -= needed;
1136 last_op = (char*)ovp;
fa423c5b
IZ
1137 } else
1138 ovp = getpages(needed, &nblks, bucket);
e8bc2b5c 1139
fa423c5b
IZ
1140 if (!ovp)
1141 return;
e8bc2b5c 1142
8d063cd8
LW
1143 /*
1144 * Add new memory allocated to that on
1145 * free list for this hash bucket.
1146 */
e8bc2b5c 1147 siz = BUCKET_SIZE(bucket);
cf5c4ad8 1148#ifdef PACK_MALLOC
72aaf631 1149 *(u_char*)ovp = bucket; /* Fill index. */
e8bc2b5c
GS
1150 if (bucket <= MAX_PACKED) {
1151 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1152 nblks = N_BLKS(bucket);
cf5c4ad8 1153# ifdef DEBUGGING_MSTATS
e8bc2b5c 1154 start_slack += BLK_SHIFT(bucket);
cf5c4ad8 1155# endif
e8bc2b5c
GS
1156 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
1157 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
cf5c4ad8 1158 siz -= sizeof(union overhead);
72aaf631 1159 } else ovp++; /* One chunk per block. */
e8bc2b5c 1160#endif /* PACK_MALLOC */
72aaf631 1161 nextf[bucket] = ovp;
5f05dabc
PP
1162#ifdef DEBUGGING_MSTATS
1163 nmalloc[bucket] += nblks;
e8bc2b5c
GS
1164 if (bucket > MAX_PACKED) {
1165 start_slack += M_OVERHEAD * nblks;
1166 }
5f05dabc 1167#endif
8d063cd8 1168 while (--nblks > 0) {
72aaf631
MB
1169 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
1170 ovp = (union overhead *)((caddr_t)ovp + siz);
8d063cd8 1171 }
8595d6f1 1172 /* Not all sbrks return zeroed memory.*/
72aaf631 1173 ovp->ov_next = (union overhead *)NULL;
cf5c4ad8 1174#ifdef PACK_MALLOC
e8bc2b5c
GS
1175 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
1176 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
1177 nextf[7*BUCKETS_PER_POW2] =
1178 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
1179 - sizeof(union overhead));
1180 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
cf5c4ad8
PP
1181 }
1182#endif /* !PACK_MALLOC */
8d063cd8
LW
1183}
1184
94b6baf5 1185Free_t
8ac85365 1186free(void *mp)
8d063cd8 1187{
ee0007ab 1188 register MEM_SIZE size;
72aaf631 1189 register union overhead *ovp;
352d5a3a 1190 char *cp = (char*)mp;
cf5c4ad8
PP
1191#ifdef PACK_MALLOC
1192 u_char bucket;
1193#endif
8d063cd8 1194
e8bc2b5c
GS
1195 DEBUG_m(PerlIO_printf(Perl_debug_log,
1196 "0x%lx: (%05lu) free\n",
4a33f861 1197 (unsigned long)cp, (unsigned long)(PL_an++)));
45d8adaa 1198
cf5c4ad8
PP
1199 if (cp == NULL)
1200 return;
72aaf631 1201 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1202 - sizeof (union overhead) * CHUNK_SHIFT);
cf5c4ad8 1203#ifdef PACK_MALLOC
72aaf631 1204 bucket = OV_INDEX(ovp);
cf5c4ad8 1205#endif
e8bc2b5c
GS
1206#ifdef IGNORE_SMALL_BAD_FREE
1207 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1208 && (OV_MAGIC(ovp, bucket) != MAGIC))
1209#else
1210 if (OV_MAGIC(ovp, bucket) != MAGIC)
1211#endif
1212 {
68dc0745 1213 static int bad_free_warn = -1;
cf5c4ad8 1214 if (bad_free_warn == -1) {
5fd9e9a4 1215 char *pbf = PerlEnv_getenv("PERL_BADFREE");
cf5c4ad8
PP
1216 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1217 }
1218 if (!bad_free_warn)
1219 return;
8990e307 1220#ifdef RCHECK
a687059c 1221 warn("%s free() ignored",
72aaf631 1222 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
8990e307 1223#else
d720c441 1224 warn("%s", "Bad free() ignored");
8990e307 1225#endif
8d063cd8 1226 return; /* sanity */
e8bc2b5c 1227 }
51dc0457 1228 MUTEX_LOCK(&PL_malloc_mutex);
8d063cd8 1229#ifdef RCHECK
3541dd58 1230 ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
e8bc2b5c
GS
1231 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1232 int i;
1233 MEM_SIZE nbytes = ovp->ov_size + 1;
1234
1235 if ((i = nbytes & 3)) {
1236 i = 4 - i;
1237 while (i--) {
3541dd58 1238 ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
d720c441 1239 == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1240 }
1241 }
1242 nbytes = (nbytes + 3) &~ 3;
3541dd58 1243 ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite");
e8bc2b5c 1244 }
72aaf631 1245 ovp->ov_rmagic = RMAGIC - 1;
8d063cd8 1246#endif
3541dd58 1247 ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
72aaf631
MB
1248 size = OV_INDEX(ovp);
1249 ovp->ov_next = nextf[size];
1250 nextf[size] = ovp;
51dc0457 1251 MUTEX_UNLOCK(&PL_malloc_mutex);
8d063cd8
LW
1252}
1253
1254/*
1255 * When a program attempts "storage compaction" as mentioned in the
1256 * old malloc man page, it realloc's an already freed block. Usually
1257 * this is the last block it freed; occasionally it might be farther
1258 * back. We have to search all the free lists for the block in order
1259 * to determine its bucket: 1st we make one pass thru the lists
1260 * checking only the first block in each; if that fails we search
378cc40b 1261 * ``reall_srchlen'' blocks in each list for a match (the variable
8d063cd8
LW
1262 * is extern so the caller can modify it). If that fails we just copy
1263 * however many bytes was given to realloc() and hope it's not huge.
1264 */
22c35a8c 1265#define reall_srchlen 4 /* 4 should be plenty, -1 =>'s whole list */
8d063cd8 1266
2304df62 1267Malloc_t
8ac85365 1268realloc(void *mp, size_t nbytes)
8d063cd8 1269{
ee0007ab 1270 register MEM_SIZE onb;
72aaf631 1271 union overhead *ovp;
d720c441
IZ
1272 char *res;
1273 int prev_bucket;
e8bc2b5c
GS
1274 register int bucket;
1275 int was_alloced = 0, incr;
352d5a3a 1276 char *cp = (char*)mp;
8d063cd8 1277
e8bc2b5c 1278#if defined(DEBUGGING) || !defined(PERL_CORE)
ee0007ab 1279 MEM_SIZE size = nbytes;
45d8adaa 1280
45d8adaa 1281 if ((long)nbytes < 0)
d720c441 1282 croak("%s", "panic: realloc");
45d8adaa 1283#endif
e8bc2b5c
GS
1284
1285 BARK_64K_LIMIT("Reallocation",nbytes,size);
1286 if (!cp)
1287 return malloc(nbytes);
45d8adaa 1288
51dc0457 1289 MUTEX_LOCK(&PL_malloc_mutex);
72aaf631 1290 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c
GS
1291 - sizeof (union overhead) * CHUNK_SHIFT);
1292 bucket = OV_INDEX(ovp);
1293#ifdef IGNORE_SMALL_BAD_FREE
1294 if ((bucket < FIRST_BUCKET_WITH_CHECK)
1295 || (OV_MAGIC(ovp, bucket) == MAGIC))
1296#else
1297 if (OV_MAGIC(ovp, bucket) == MAGIC)
1298#endif
1299 {
55497cff 1300 was_alloced = 1;
8d063cd8
LW
1301 } else {
1302 /*
1303 * Already free, doing "compaction".
1304 *
1305 * Search for the old block of memory on the
1306 * free list. First, check the most common
1307 * case (last element free'd), then (this failing)
378cc40b 1308 * the last ``reall_srchlen'' items free'd.
8d063cd8
LW
1309 * If all lookups fail, then assume the size of
1310 * the memory block being realloc'd is the
1311 * smallest possible.
1312 */
e8bc2b5c
GS
1313 if ((bucket = findbucket(ovp, 1)) < 0 &&
1314 (bucket = findbucket(ovp, reall_srchlen)) < 0)
1315 bucket = 0;
8d063cd8 1316 }
e8bc2b5c 1317 onb = BUCKET_SIZE_REAL(bucket);
55497cff
PP
1318 /*
1319 * avoid the copy if same size block.
e8bc2b5c
GS
1320 * We are not agressive with boundary cases. Note that it might
1321 * (for a small number of cases) give false negative if
55497cff 1322 * both new size and old one are in the bucket for
e8bc2b5c
GS
1323 * FIRST_BIG_POW2, but the new one is near the lower end.
1324 *
1325 * We do not try to go to 1.5 times smaller bucket so far.
55497cff 1326 */
e8bc2b5c
GS
1327 if (nbytes > onb) incr = 1;
1328 else {
1329#ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1330 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1331 nbytes > ( (onb >> 1) - M_OVERHEAD )
1332# ifdef TWO_POT_OPTIMIZE
1333 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1334# endif
1335 )
1336#else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1337 prev_bucket = ( (bucket > MAX_PACKED + 1)
1338 ? bucket - BUCKETS_PER_POW2
1339 : bucket - 1);
1340 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1341#endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1342 incr = 0;
1343 else incr = -1;
1344 }
1345 if (!was_alloced
2ce36478 1346#ifdef STRESS_REALLOC
e8bc2b5c 1347 || 1 /* always do it the hard way */
2ce36478 1348#endif
e8bc2b5c
GS
1349 ) goto hard_way;
1350 else if (incr == 0) {
852c2e52 1351 inplace_label:
a687059c
LW
1352#ifdef RCHECK
1353 /*
1354 * Record new allocated size of block and
1355 * bound space with magic numbers.
1356 */
72aaf631 1357 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
e8bc2b5c
GS
1358 int i, nb = ovp->ov_size + 1;
1359
1360 if ((i = nb & 3)) {
1361 i = 4 - i;
1362 while (i--) {
3541dd58 1363 ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1364 }
1365 }
1366 nb = (nb + 3) &~ 3;
3541dd58 1367 ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite");
a687059c
LW
1368 /*
1369 * Convert amount of memory requested into
1370 * closest block size stored in hash buckets
1371 * which satisfies request. Account for
1372 * space used per block for accounting.
1373 */
cf5c4ad8 1374 nbytes += M_OVERHEAD;
72aaf631 1375 ovp->ov_size = nbytes - 1;
e8bc2b5c
GS
1376 if ((i = nbytes & 3)) {
1377 i = 4 - i;
1378 while (i--)
1379 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1380 = RMAGIC_C;
1381 }
1382 nbytes = (nbytes + 3) &~ 3;
72aaf631 1383 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
a687059c
LW
1384 }
1385#endif
45d8adaa 1386 res = cp;
51dc0457 1387 MUTEX_UNLOCK(&PL_malloc_mutex);
42ac124e
IZ
1388 DEBUG_m(PerlIO_printf(Perl_debug_log,
1389 "0x%lx: (%05lu) realloc %ld bytes inplace\n",
1390 (unsigned long)res,(unsigned long)(PL_an++),
1391 (long)size));
e8bc2b5c
GS
1392 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1393 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1394 MEM_SIZE require, newarena = nbytes, pow;
1395 int shiftr;
1396
1397 POW2_OPTIMIZE_ADJUST(newarena);
1398 newarena = newarena + M_OVERHEAD;
1399 /* newarena = (newarena + 3) &~ 3; */
1400 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1401 pow = LOG_OF_MIN_ARENA + 1;
1402 /* apart from this loop, this is O(1) */
1403 while (shiftr >>= 1)
1404 pow++;
1405 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1406 require = newarena - onb - M_OVERHEAD;
1407
fa423c5b 1408 if (getpages_adjacent(require)) {
e8bc2b5c 1409#ifdef DEBUGGING_MSTATS
fa423c5b
IZ
1410 nmalloc[bucket]--;
1411 nmalloc[pow * BUCKETS_PER_POW2]++;
e8bc2b5c 1412#endif
fa423c5b
IZ
1413 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
1414 goto inplace_label;
1415 } else
1416 goto hard_way;
e8bc2b5c
GS
1417 } else {
1418 hard_way:
51dc0457 1419 MUTEX_UNLOCK(&PL_malloc_mutex);
42ac124e
IZ
1420 DEBUG_m(PerlIO_printf(Perl_debug_log,
1421 "0x%lx: (%05lu) realloc %ld bytes the hard way\n",
1422 (unsigned long)cp,(unsigned long)(PL_an++),
1423 (long)size));
e8bc2b5c
GS
1424 if ((res = (char*)malloc(nbytes)) == NULL)
1425 return (NULL);
1426 if (cp != res) /* common optimization */
1427 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
1428 if (was_alloced)
1429 free(cp);
45d8adaa 1430 }
2304df62 1431 return ((Malloc_t)res);
8d063cd8
LW
1432}
1433
1434/*
1435 * Search ``srchlen'' elements of each free list for a block whose
1436 * header starts at ``freep''. If srchlen is -1 search the whole list.
1437 * Return bucket number, or -1 if not found.
1438 */
ee0007ab 1439static int
8ac85365 1440findbucket(union overhead *freep, int srchlen)
8d063cd8
LW
1441{
1442 register union overhead *p;
1443 register int i, j;
1444
1445 for (i = 0; i < NBUCKETS; i++) {
1446 j = 0;
1447 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1448 if (p == freep)
1449 return (i);
1450 j++;
1451 }
1452 }
1453 return (-1);
1454}
1455
cf5c4ad8 1456Malloc_t
8ac85365 1457calloc(register size_t elements, register size_t size)
cf5c4ad8
PP
1458{
1459 long sz = elements * size;
1460 Malloc_t p = malloc(sz);
1461
1462 if (p) {
1463 memset((void*)p, 0, sz);
1464 }
1465 return p;
1466}
1467
e8bc2b5c
GS
1468MEM_SIZE
1469malloced_size(void *p)
1470{
8d6dde3e
IZ
1471 union overhead *ovp = (union overhead *)
1472 ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT);
1473 int bucket = OV_INDEX(ovp);
1474#ifdef RCHECK
1475 /* The caller wants to have a complete control over the chunk,
1476 disable the memory checking inside the chunk. */
1477 if (bucket <= MAX_SHORT_BUCKET) {
1478 MEM_SIZE size = BUCKET_SIZE_REAL(bucket);
1479 ovp->ov_size = size + M_OVERHEAD - 1;
1480 *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RSLOP)) = RMAGIC;
1481 }
1482#endif
e8bc2b5c
GS
1483 return BUCKET_SIZE_REAL(bucket);
1484}
1485
c07a80fd 1486#ifdef DEBUGGING_MSTATS
e8bc2b5c
GS
1487
1488# ifdef BUCKETS_ROOT2
1489# define MIN_EVEN_REPORT 6
1490# else
1491# define MIN_EVEN_REPORT MIN_BUCKET
1492# endif
8d063cd8
LW
1493/*
1494 * mstats - print out statistics about malloc
1495 *
1496 * Prints two lines of numbers, one showing the length of the free list
1497 * for each size category, the second showing the number of mallocs -
1498 * frees for each size category.
1499 */
ee0007ab 1500void
8ac85365 1501dump_mstats(char *s)
8d063cd8
LW
1502{
1503 register int i, j;
1504 register union overhead *p;
e8bc2b5c 1505 int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0;
c07a80fd 1506 u_int nfree[NBUCKETS];
e8bc2b5c
GS
1507 int total_chain = 0;
1508 struct chunk_chain_s* nextchain = chunk_chain;
8d063cd8 1509
e8bc2b5c 1510 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
8d063cd8
LW
1511 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1512 ;
c07a80fd 1513 nfree[i] = j;
e8bc2b5c
GS
1514 totfree += nfree[i] * BUCKET_SIZE_REAL(i);
1515 total += nmalloc[i] * BUCKET_SIZE_REAL(i);
1516 if (nmalloc[i]) {
1517 i % 2 ? (topbucket_odd = i) : (topbucket_ev = i);
1518 topbucket = i;
1519 }
c07a80fd
PP
1520 }
1521 if (s)
e8bc2b5c 1522 PerlIO_printf(PerlIO_stderr(),
d720c441 1523 "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n",
e8bc2b5c 1524 s,
d720c441
IZ
1525 (long)BUCKET_SIZE_REAL(MIN_BUCKET),
1526 (long)BUCKET_SIZE(MIN_BUCKET),
1527 (long)BUCKET_SIZE_REAL(topbucket), (long)BUCKET_SIZE(topbucket));
5f05dabc 1528 PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree);
e8bc2b5c
GS
1529 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1530 PerlIO_printf(PerlIO_stderr(),
1531 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1532 ? " %5d"
1533 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1534 nfree[i]);
1535 }
1536#ifdef BUCKETS_ROOT2
1537 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1538 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1539 PerlIO_printf(PerlIO_stderr(),
1540 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1541 ? " %5d"
1542 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1543 nfree[i]);
8d063cd8 1544 }
e8bc2b5c 1545#endif
5f05dabc 1546 PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree);
e8bc2b5c
GS
1547 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1548 PerlIO_printf(PerlIO_stderr(),
1549 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1550 ? " %5d"
1551 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1552 nmalloc[i] - nfree[i]);
c07a80fd 1553 }
e8bc2b5c
GS
1554#ifdef BUCKETS_ROOT2
1555 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1556 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1557 PerlIO_printf(PerlIO_stderr(),
1558 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1559 ? " %5d"
1560 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1561 nmalloc[i] - nfree[i]);
1562 }
1563#endif
1564 while (nextchain) {
1565 total_chain += nextchain->size;
1566 nextchain = nextchain->next;
1567 }
1568 PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n",
1569 goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack,
1570 start_slack, total_chain, sbrked_remains);
c07a80fd
PP
1571}
1572#else
1573void
8ac85365 1574dump_mstats(char *s)
c07a80fd 1575{
8d063cd8
LW
1576}
1577#endif
a687059c 1578#endif /* lint */
cf5c4ad8
PP
1579
1580
1581#ifdef USE_PERL_SBRK
1582
2c92fcc0 1583# if defined(__MACHTEN_PPC__) || defined(__NeXT__)
38ac2dc8
DD
1584# define PERL_SBRK_VIA_MALLOC
1585/*
1586 * MachTen's malloc() returns a buffer aligned on a two-byte boundary.
1587 * While this is adequate, it may slow down access to longer data
1588 * types by forcing multiple memory accesses. It also causes
1589 * complaints when RCHECK is in force. So we allocate six bytes
1590 * more than we need to, and return an address rounded up to an
1591 * eight-byte boundary.
1592 *
1593 * 980701 Dominic Dunlop <domo@computer.org>
1594 */
1595# define SYSTEM_ALLOC(a) ((void *)(((unsigned)malloc((a)+6)+6)&~7))
1596# endif
1597
760ac839 1598# ifdef PERL_SBRK_VIA_MALLOC
72e5b9db 1599# if defined(HIDEMYMALLOC) || defined(EMBEDMYMALLOC)
38ac2dc8
DD
1600# undef malloc /* Expose names that */
1601# undef calloc /* HIDEMYMALLOC hides */
1602# undef realloc
1603# undef free
760ac839 1604# else
72e5b9db 1605# include "Error: -DPERL_SBRK_VIA_MALLOC needs -D(HIDE|EMBED)MYMALLOC"
760ac839 1606# endif
cf5c4ad8
PP
1607
1608/* it may seem schizophrenic to use perl's malloc and let it call system */
1609/* malloc, the reason for that is only the 3.2 version of the OS that had */
1610/* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
1611/* end to the cores */
1612
38ac2dc8
DD
1613# ifndef SYSTEM_ALLOC
1614# define SYSTEM_ALLOC(a) malloc(a)
1615# endif
cf5c4ad8 1616
760ac839 1617# endif /* PERL_SBRK_VIA_MALLOC */
cf5c4ad8
PP
1618
1619static IV Perl_sbrk_oldchunk;
1620static long Perl_sbrk_oldsize;
1621
760ac839
LW
1622# define PERLSBRK_32_K (1<<15)
1623# define PERLSBRK_64_K (1<<16)
cf5c4ad8 1624
b63effbb 1625Malloc_t
df0003d4 1626Perl_sbrk(int size)
cf5c4ad8
PP
1627{
1628 IV got;
1629 int small, reqsize;
1630
1631 if (!size) return 0;
55497cff 1632#ifdef PERL_CORE
cf5c4ad8
PP
1633 reqsize = size; /* just for the DEBUG_m statement */
1634#endif
57569e04
HM
1635#ifdef PACK_MALLOC
1636 size = (size + 0x7ff) & ~0x7ff;
1637#endif
cf5c4ad8
PP
1638 if (size <= Perl_sbrk_oldsize) {
1639 got = Perl_sbrk_oldchunk;
1640 Perl_sbrk_oldchunk += size;
1641 Perl_sbrk_oldsize -= size;
1642 } else {
1643 if (size >= PERLSBRK_32_K) {
1644 small = 0;
1645 } else {
cf5c4ad8
PP
1646 size = PERLSBRK_64_K;
1647 small = 1;
1648 }
1649 got = (IV)SYSTEM_ALLOC(size);
57569e04
HM
1650#ifdef PACK_MALLOC
1651 got = (got + 0x7ff) & ~0x7ff;
1652#endif
cf5c4ad8
PP
1653 if (small) {
1654 /* Chunk is small, register the rest for future allocs. */
1655 Perl_sbrk_oldchunk = got + reqsize;
1656 Perl_sbrk_oldsize = size - reqsize;
1657 }
1658 }
1659
fb73857a 1660 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n",
cf5c4ad8 1661 size, reqsize, Perl_sbrk_oldsize, got));
cf5c4ad8
PP
1662
1663 return (void *)got;
1664}
1665
1666#endif /* ! defined USE_PERL_SBRK */