This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
fixup patches for VMS
[perl5.git] / malloc.c
CommitLineData
a0d0e21e 1/* malloc.c
8d063cd8 2 *
8d063cd8
LW
3 */
4
e8bc2b5c
GS
5#ifndef NO_FANCY_MALLOC
6# ifndef SMALL_BUCKET_VIA_TABLE
7# define SMALL_BUCKET_VIA_TABLE
8# endif
9# ifndef BUCKETS_ROOT2
10# define BUCKETS_ROOT2
11# endif
12# ifndef IGNORE_SMALL_BAD_FREE
13# define IGNORE_SMALL_BAD_FREE
14# endif
3562ef9b
IZ
15#endif
16
e8bc2b5c
GS
17#ifndef PLAIN_MALLOC /* Bulk enable features */
18# ifndef PACK_MALLOC
19# define PACK_MALLOC
20# endif
21# ifndef TWO_POT_OPTIMIZE
22# define TWO_POT_OPTIMIZE
23# endif
d720c441
IZ
24# if defined(PERL_CORE) && !defined(PERL_EMERGENCY_SBRK)
25# define PERL_EMERGENCY_SBRK
e8bc2b5c
GS
26# endif
27# if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
28# define DEBUGGING_MSTATS
29# endif
30#endif
31
32#define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
33#define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
34
35#if !(defined(I286) || defined(atarist))
36 /* take 2k unless the block is bigger than that */
37# define LOG_OF_MIN_ARENA 11
38#else
39 /* take 16k unless the block is bigger than that
40 (80286s like large segments!), probably good on the atari too */
41# define LOG_OF_MIN_ARENA 14
42#endif
43
8d063cd8 44#ifndef lint
1944739a
IZ
45# if defined(DEBUGGING) && !defined(NO_RCHECK)
46# define RCHECK
47# endif
e8bc2b5c
GS
48# if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
49# undef IGNORE_SMALL_BAD_FREE
50# endif
8d063cd8
LW
51/*
52 * malloc.c (Caltech) 2/21/82
53 * Chris Kingsley, kingsley@cit-20.
54 *
55 * This is a very fast storage allocator. It allocates blocks of a small
56 * number of different sizes, and keeps free lists of each size. Blocks that
57 * don't exactly fit are passed up to the next larger size. In this
58 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
cf5c4ad8 59 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
8d063cd8
LW
60 * This is designed for use in a program that uses vast quantities of memory,
61 * but bombs when it runs out.
62 */
63
d720c441
IZ
64#ifdef PERL_CORE
65# include "EXTERN.h"
66# include "perl.h"
67#else
68# ifdef PERL_FOR_X2P
69# include "../EXTERN.h"
70# include "../perl.h"
71# else
72# include <stdlib.h>
73# include <stdio.h>
74# include <memory.h>
75# define _(arg) arg
76# ifndef Malloc_t
77# define Malloc_t void *
78# endif
79# ifndef MEM_SIZE
80# define MEM_SIZE unsigned long
81# endif
82# ifndef LONG_MAX
83# define LONG_MAX 0x7FFFFFFF
84# endif
85# ifndef UV
86# define UV unsigned long
87# endif
88# ifndef caddr_t
89# define caddr_t char *
90# endif
91# ifndef Free_t
92# define Free_t void
93# endif
94# define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t))
95# define PerlEnv_getenv getenv
96# define PerlIO_printf fprintf
97# define PerlIO_stderr() stderr
98# endif
e8bc2b5c 99# ifndef croak /* make depend */
d720c441
IZ
100# define croak(mess, arg) warn((mess), (arg)); exit(1);
101# endif
102# ifndef warn
103# define warn(mess, arg) fprintf(stderr, (mess), (arg));
e8bc2b5c
GS
104# endif
105# ifdef DEBUG_m
106# undef DEBUG_m
107# endif
108# define DEBUG_m(a)
109# ifdef DEBUGGING
110# undef DEBUGGING
111# endif
112#endif
113
114#ifndef MUTEX_LOCK
115# define MUTEX_LOCK(l)
116#endif
117
118#ifndef MUTEX_UNLOCK
119# define MUTEX_UNLOCK(l)
120#endif
121
760ac839 122#ifdef DEBUGGING
e8bc2b5c
GS
123# undef DEBUG_m
124# define DEBUG_m(a) if (debug & 128) a
760ac839
LW
125#endif
126
135863df
AB
127/* I don't much care whether these are defined in sys/types.h--LAW */
128
129#define u_char unsigned char
130#define u_int unsigned int
e8bc2b5c
GS
131
132#ifdef HAS_QUAD
133# define u_bigint UV /* Needs to eat *void. */
134#else /* needed? */
135# define u_bigint unsigned long /* Needs to eat *void. */
136#endif
137
135863df 138#define u_short unsigned short
8d063cd8 139
cf5c4ad8
PP
140/* 286 and atarist like big chunks, which gives too much overhead. */
141#if (defined(RCHECK) || defined(I286) || defined(atarist)) && defined(PACK_MALLOC)
e8bc2b5c 142# undef PACK_MALLOC
cf5c4ad8
PP
143#endif
144
8d063cd8 145/*
cf5c4ad8
PP
146 * The description below is applicable if PACK_MALLOC is not defined.
147 *
8d063cd8
LW
148 * The overhead on a block is at least 4 bytes. When free, this space
149 * contains a pointer to the next free block, and the bottom two bits must
150 * be zero. When in use, the first byte is set to MAGIC, and the second
151 * byte is the size index. The remaining bytes are for alignment.
152 * If range checking is enabled and the size of the block fits
153 * in two bytes, then the top two bytes hold the size of the requested block
154 * plus the range checking words, and the header word MINUS ONE.
155 */
156union overhead {
157 union overhead *ov_next; /* when free */
85e6fe83 158#if MEM_ALIGNBYTES > 4
c623bd54 159 double strut; /* alignment problems */
a687059c 160#endif
8d063cd8
LW
161 struct {
162 u_char ovu_magic; /* magic number */
163 u_char ovu_index; /* bucket # */
164#ifdef RCHECK
165 u_short ovu_size; /* actual block size */
166 u_int ovu_rmagic; /* range magic number */
167#endif
168 } ovu;
169#define ov_magic ovu.ovu_magic
170#define ov_index ovu.ovu_index
171#define ov_size ovu.ovu_size
172#define ov_rmagic ovu.ovu_rmagic
173};
174
760ac839 175#ifdef DEBUGGING
d720c441 176static void botch _((char *diag, char *s));
a0d0e21e
LW
177#endif
178static void morecore _((int bucket));
179static int findbucket _((union overhead *freep, int srchlen));
180
8d063cd8
LW
181#define MAGIC 0xff /* magic # on accounting info */
182#define RMAGIC 0x55555555 /* magic # on range info */
e8bc2b5c
GS
183#define RMAGIC_C 0x55 /* magic # on range info */
184
8d063cd8 185#ifdef RCHECK
c2a5c2d2
IZ
186# define RSLOP sizeof (u_int)
187# ifdef TWO_POT_OPTIMIZE
e8bc2b5c 188# define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2)
c2a5c2d2 189# else
e8bc2b5c 190# define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
c2a5c2d2 191# endif
8d063cd8 192#else
c2a5c2d2 193# define RSLOP 0
8d063cd8
LW
194#endif
195
e8bc2b5c
GS
196#if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
197# undef BUCKETS_ROOT2
198#endif
199
200#ifdef BUCKETS_ROOT2
201# define BUCKET_TABLE_SHIFT 2
202# define BUCKET_POW2_SHIFT 1
203# define BUCKETS_PER_POW2 2
204#else
205# define BUCKET_TABLE_SHIFT MIN_BUC_POW2
206# define BUCKET_POW2_SHIFT 0
207# define BUCKETS_PER_POW2 1
208#endif
209
210#ifdef BUCKETS_ROOT2
211# define MAX_BUCKET_BY_TABLE 13
212static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
213 {
214 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
215 };
216# define BUCKET_SIZE(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
217# define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
218 ? buck_size[i] \
219 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
220 - MEM_OVERHEAD(i) \
221 + POW2_OPTIMIZE_SURPLUS(i)))
222#else
223# define BUCKET_SIZE(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
224# define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i) + POW2_OPTIMIZE_SURPLUS(i))
225#endif
226
227
cf5c4ad8 228#ifdef PACK_MALLOC
e8bc2b5c
GS
229/* In this case it is assumed that if we do sbrk() in 2K units, we
230 * will get 2K aligned arenas (at least after some initial
231 * alignment). The bucket number of the given subblock is on the start
232 * of 2K arena which contains the subblock. Several following bytes
233 * contain the magic numbers for the subblocks in the block.
cf5c4ad8
PP
234 *
235 * Sizes of chunks are powers of 2 for chunks in buckets <=
236 * MAX_PACKED, after this they are (2^n - sizeof(union overhead)) (to
237 * get alignment right).
238 *
e8bc2b5c
GS
239 * Consider an arena for 2^n with n>MAX_PACKED. We suppose that
240 * starts of all the chunks in a 2K arena are in different
241 * 2^n-byte-long chunks. If the top of the last chunk is aligned on a
242 * boundary of 2K block, this means that sizeof(union
243 * overhead)*"number of chunks" < 2^n, or sizeof(union overhead)*2K <
244 * 4^n, or n > 6 + log2(sizeof()/2)/2, since a chunk of size 2^n -
245 * overhead is used. Since this rules out n = 7 for 8 byte alignment,
246 * we specialcase allocation of the first of 16 128-byte-long chunks.
cf5c4ad8
PP
247 *
248 * Note that with the above assumption we automatically have enough
249 * place for MAGIC at the start of 2K block. Note also that we
e8bc2b5c
GS
250 * overlay union overhead over the chunk, thus the start of small chunks
251 * is immediately overwritten after freeing. */
252# define MAX_PACKED_POW2 6
253# define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
254# define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
255# define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
256# define TWOK_MASKED(x) ((u_bigint)(x) & ~TWOK_MASK)
257# define TWOK_SHIFT(x) ((u_bigint)(x) & TWOK_MASK)
cf5c4ad8
PP
258# define OV_INDEXp(block) ((u_char*)(TWOK_MASKED(block)))
259# define OV_INDEX(block) (*OV_INDEXp(block))
260# define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
e8bc2b5c
GS
261 (TWOK_SHIFT(block)>> \
262 (bucket>>BUCKET_POW2_SHIFT)) + \
263 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
264 /* A bucket can have a shift smaller than it size, we need to
265 shift its magic number so it will not overwrite index: */
266# ifdef BUCKETS_ROOT2
267# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
268# else
269# define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
270# endif
cf5c4ad8
PP
271# define CHUNK_SHIFT 0
272
e8bc2b5c
GS
273/* Number of active buckets of given ordinal. */
274#ifdef IGNORE_SMALL_BAD_FREE
275#define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
276# define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
277 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE(bucket) \
278 : n_blks[bucket] )
279#else
280# define N_BLKS(bucket) n_blks[bucket]
281#endif
282
283static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
284 {
285# if BUCKETS_PER_POW2==1
286 0, 0,
287 (MIN_BUC_POW2==2 ? 384 : 0),
288 224, 120, 62, 31, 16, 8, 4, 2
289# else
290 0, 0, 0, 0,
291 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
292 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
293# endif
294 };
295
296/* Shift of the first bucket with the given ordinal inside 2K chunk. */
297#ifdef IGNORE_SMALL_BAD_FREE
298# define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
299 ? ((1<<LOG_OF_MIN_ARENA) \
300 - BUCKET_SIZE(bucket) * N_BLKS(bucket)) \
301 : blk_shift[bucket])
302#else
303# define BLK_SHIFT(bucket) blk_shift[bucket]
304#endif
305
306static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
307 {
308# if BUCKETS_PER_POW2==1
309 0, 0,
310 (MIN_BUC_POW2==2 ? 512 : 0),
311 256, 128, 64, 64, /* 8 to 64 */
312 16*sizeof(union overhead),
313 8*sizeof(union overhead),
314 4*sizeof(union overhead),
315 2*sizeof(union overhead),
316# else
317 0, 0, 0, 0,
318 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
319 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
320 16*sizeof(union overhead), 16*sizeof(union overhead),
321 8*sizeof(union overhead), 8*sizeof(union overhead),
322 4*sizeof(union overhead), 4*sizeof(union overhead),
323 2*sizeof(union overhead), 2*sizeof(union overhead),
324# endif
325 };
cf5c4ad8 326
cf5c4ad8
PP
327#else /* !PACK_MALLOC */
328
329# define OV_MAGIC(block,bucket) (block)->ov_magic
330# define OV_INDEX(block) (block)->ov_index
331# define CHUNK_SHIFT 1
e8bc2b5c 332# define MAX_PACKED -1
cf5c4ad8
PP
333#endif /* !PACK_MALLOC */
334
e8bc2b5c
GS
335#define M_OVERHEAD (sizeof(union overhead) + RSLOP)
336
337#ifdef PACK_MALLOC
338# define MEM_OVERHEAD(bucket) \
339 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
340# ifdef SMALL_BUCKET_VIA_TABLE
341# define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
342# define START_SHIFT MAX_PACKED_POW2
343# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
344# define SIZE_TABLE_MAX 80
345# else
346# define SIZE_TABLE_MAX 64
347# endif
348static char bucket_of[] =
349 {
350# ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
351 /* 0 to 15 in 4-byte increments. */
352 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
353 6, /* 8 */
354 7, 8, /* 12, 16 */
355 9, 9, 10, 10, /* 24, 32 */
356 11, 11, 11, 11, /* 48 */
357 12, 12, 12, 12, /* 64 */
358 13, 13, 13, 13, /* 80 */
359 13, 13, 13, 13 /* 80 */
360# else /* !BUCKETS_ROOT2 */
361 /* 0 to 15 in 4-byte increments. */
362 (sizeof(void*) > 4 ? 3 : 2),
363 3,
364 4, 4,
365 5, 5, 5, 5,
366 6, 6, 6, 6,
367 6, 6, 6, 6
368# endif /* !BUCKETS_ROOT2 */
369 };
370# else /* !SMALL_BUCKET_VIA_TABLE */
371# define START_SHIFTS_BUCKET MIN_BUCKET
372# define START_SHIFT (MIN_BUC_POW2 - 1)
373# endif /* !SMALL_BUCKET_VIA_TABLE */
374#else /* !PACK_MALLOC */
375# define MEM_OVERHEAD(bucket) M_OVERHEAD
376# ifdef SMALL_BUCKET_VIA_TABLE
377# undef SMALL_BUCKET_VIA_TABLE
378# endif
379# define START_SHIFTS_BUCKET MIN_BUCKET
380# define START_SHIFT (MIN_BUC_POW2 - 1)
381#endif /* !PACK_MALLOC */
cf5c4ad8 382
8d063cd8 383/*
55497cff
PP
384 * Big allocations are often of the size 2^n bytes. To make them a
385 * little bit better, make blocks of size 2^n+pagesize for big n.
386 */
387
388#ifdef TWO_POT_OPTIMIZE
389
5f05dabc
PP
390# ifndef PERL_PAGESIZE
391# define PERL_PAGESIZE 4096
392# endif
e8bc2b5c
GS
393# ifndef FIRST_BIG_POW2
394# define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
5f05dabc 395# endif
e8bc2b5c 396# define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
55497cff
PP
397/* If this value or more, check against bigger blocks. */
398# define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
399/* If less than this value, goes into 2^n-overhead-block. */
400# define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
401
e8bc2b5c
GS
402# define POW2_OPTIMIZE_ADJUST(nbytes) \
403 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
404# define POW2_OPTIMIZE_SURPLUS(bucket) \
405 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
406
407#else /* !TWO_POT_OPTIMIZE */
408# define POW2_OPTIMIZE_ADJUST(nbytes)
409# define POW2_OPTIMIZE_SURPLUS(bucket) 0
410#endif /* !TWO_POT_OPTIMIZE */
411
412#if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
413# define BARK_64K_LIMIT(what,nbytes,size) \
414 if (nbytes > 0xffff) { \
415 PerlIO_printf(PerlIO_stderr(), \
416 "%s too large: %lx\n", what, size); \
417 my_exit(1); \
418 }
419#else /* !HAS_64K_LIMIT || !PERL_CORE */
420# define BARK_64K_LIMIT(what,nbytes,size)
421#endif /* !HAS_64K_LIMIT || !PERL_CORE */
55497cff 422
e8bc2b5c
GS
423#ifndef MIN_SBRK
424# define MIN_SBRK 2048
425#endif
426
427#ifndef FIRST_SBRK
d720c441 428# define FIRST_SBRK (48*1024)
e8bc2b5c
GS
429#endif
430
431/* Minimal sbrk in percents of what is already alloced. */
432#ifndef MIN_SBRK_FRAC
433# define MIN_SBRK_FRAC 3
434#endif
435
436#ifndef SBRK_ALLOW_FAILURES
437# define SBRK_ALLOW_FAILURES 3
438#endif
55497cff 439
e8bc2b5c
GS
440#ifndef SBRK_FAILURE_PRICE
441# define SBRK_FAILURE_PRICE 50
55497cff
PP
442#endif
443
e8bc2b5c
GS
444#if defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)
445
446# ifndef BIG_SIZE
447# define BIG_SIZE (1<<16) /* 64K */
448# endif
449
55497cff
PP
450static char *emergency_buffer;
451static MEM_SIZE emergency_buffer_size;
452
52082926 453static Malloc_t
55497cff
PP
454emergency_sbrk(size)
455 MEM_SIZE size;
456{
457 if (size >= BIG_SIZE) {
458 /* Give the possibility to recover: */
d720c441
IZ
459 MUTEX_UNLOCK(&malloc_mutex);
460 croak("Out of memory during request for %i bytes", size);
55497cff
PP
461 }
462
463 if (!emergency_buffer) {
18f739ee 464 dTHR;
55497cff
PP
465 /* First offense, give a possibility to recover by dieing. */
466 /* No malloc involved here: */
467 GV **gvp = (GV**)hv_fetch(defstash, "^M", 2, 0);
468 SV *sv;
469 char *pv;
470
471 if (!gvp) gvp = (GV**)hv_fetch(defstash, "\015", 1, 0);
472 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
e8bc2b5c 473 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD))
55497cff
PP
474 return (char *)-1; /* Now die die die... */
475
476 /* Got it, now detach SvPV: */
bbce6d69 477 pv = SvPV(sv, na);
55497cff 478 /* Check alignment: */
e8bc2b5c 479 if (((u_bigint)(pv - M_OVERHEAD)) & ((1<<LOG_OF_MIN_ARENA) - 1)) {
55497cff 480 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
bbce6d69 481 return (char *)-1; /* die die die */
55497cff
PP
482 }
483
484 emergency_buffer = pv - M_OVERHEAD;
485 emergency_buffer_size = SvLEN(sv) + M_OVERHEAD;
486 SvPOK_off(sv);
487 SvREADONLY_on(sv);
d720c441
IZ
488 MUTEX_UNLOCK(&malloc_mutex);
489 croak("Out of memory during request for %i bytes", size);
ff68c719
PP
490 }
491 else if (emergency_buffer_size >= size) {
55497cff
PP
492 emergency_buffer_size -= size;
493 return emergency_buffer + emergency_buffer_size;
494 }
495
496 return (char *)-1; /* poor guy... */
497}
498
e8bc2b5c 499#else /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff 500# define emergency_sbrk(size) -1
e8bc2b5c 501#endif /* !(defined(PERL_EMERGENCY_SBRK) && defined(PERL_CORE)) */
55497cff
PP
502
503/*
e8bc2b5c 504 * nextf[i] is the pointer to the next free block of size 2^i. The
8d063cd8
LW
505 * smallest allocatable block is 8 bytes. The overhead information
506 * precedes the data area returned to the user.
507 */
e8bc2b5c 508#define NBUCKETS (32*BUCKETS_PER_POW2 + 1)
8d063cd8 509static union overhead *nextf[NBUCKETS];
cf5c4ad8
PP
510
511#ifdef USE_PERL_SBRK
512#define sbrk(a) Perl_sbrk(a)
52082926 513Malloc_t Perl_sbrk _((int size));
8ac85365
NIS
514#else
515#ifdef DONT_DECLARE_STD
516#ifdef I_UNISTD
517#include <unistd.h>
518#endif
cf5c4ad8 519#else
52082926 520extern Malloc_t sbrk(int);
8ac85365 521#endif
cf5c4ad8 522#endif
8d063cd8 523
c07a80fd 524#ifdef DEBUGGING_MSTATS
8d063cd8
LW
525/*
526 * nmalloc[i] is the difference between the number of mallocs and frees
527 * for a given block size.
528 */
529static u_int nmalloc[NBUCKETS];
5f05dabc
PP
530static u_int sbrk_slack;
531static u_int start_slack;
8d063cd8
LW
532#endif
533
e8bc2b5c
GS
534static u_int goodsbrk;
535
760ac839 536#ifdef DEBUGGING
d720c441 537#define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p)); else
ee0007ab 538static void
d720c441 539botch(char *diag, char *s)
8d063cd8 540{
d720c441 541 PerlIO_printf(PerlIO_stderr(), "assertion botched (%s?): %s\n", diag, s);
3028581b 542 PerlProc_abort();
8d063cd8
LW
543}
544#else
d720c441 545#define ASSERT(p, diag)
8d063cd8
LW
546#endif
547
2304df62 548Malloc_t
8ac85365 549malloc(register size_t nbytes)
8d063cd8
LW
550{
551 register union overhead *p;
e8bc2b5c 552 register int bucket;
ee0007ab 553 register MEM_SIZE shiftr;
8d063cd8 554
c2a5c2d2 555#if defined(DEBUGGING) || defined(RCHECK)
ee0007ab 556 MEM_SIZE size = nbytes;
45d8adaa
LW
557#endif
558
e8bc2b5c 559 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
45d8adaa
LW
560#ifdef DEBUGGING
561 if ((long)nbytes < 0)
d720c441 562 croak("%s", "panic: malloc");
45d8adaa 563#endif
45d8adaa 564
11343788 565 MUTEX_LOCK(&malloc_mutex);
8d063cd8
LW
566 /*
567 * Convert amount of memory requested into
568 * closest block size stored in hash buckets
569 * which satisfies request. Account for
570 * space used per block for accounting.
571 */
cf5c4ad8 572#ifdef PACK_MALLOC
e8bc2b5c
GS
573# ifdef SMALL_BUCKET_VIA_TABLE
574 if (nbytes == 0)
575 bucket = MIN_BUCKET;
576 else if (nbytes <= SIZE_TABLE_MAX) {
577 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
578 } else
579# else
043bf814
RB
580 if (nbytes == 0)
581 nbytes = 1;
e8bc2b5c
GS
582 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
583 else
584# endif
55497cff 585#endif
e8bc2b5c
GS
586 {
587 POW2_OPTIMIZE_ADJUST(nbytes);
588 nbytes += M_OVERHEAD;
589 nbytes = (nbytes + 3) &~ 3;
590 do_shifts:
591 shiftr = (nbytes - 1) >> START_SHIFT;
592 bucket = START_SHIFTS_BUCKET;
593 /* apart from this loop, this is O(1) */
594 while (shiftr >>= 1)
595 bucket += BUCKETS_PER_POW2;
cf5c4ad8 596 }
8d063cd8
LW
597 /*
598 * If nothing in hash bucket right now,
599 * request more memory from the system.
600 */
601 if (nextf[bucket] == NULL)
602 morecore(bucket);
e8bc2b5c 603 if ((p = nextf[bucket]) == NULL) {
11343788 604 MUTEX_UNLOCK(&malloc_mutex);
55497cff 605#ifdef PERL_CORE
ee0007ab 606 if (!nomemok) {
760ac839 607 PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
79072805 608 my_exit(1);
ee0007ab 609 }
45d8adaa 610#else
8d063cd8 611 return (NULL);
45d8adaa
LW
612#endif
613 }
614
e8bc2b5c
GS
615 DEBUG_m(PerlIO_printf(Perl_debug_log,
616 "0x%lx: (%05lu) malloc %ld bytes\n",
617 (unsigned long)(p+1), (unsigned long)(an++),
618 (long)size));
45d8adaa 619
8d063cd8 620 /* remove from linked list */
bf38876a
LW
621#ifdef RCHECK
622 if (*((int*)p) & (sizeof(union overhead) - 1))
760ac839 623 PerlIO_printf(PerlIO_stderr(), "Corrupt malloc ptr 0x%lx at 0x%lx\n",
a0d0e21e 624 (unsigned long)*((int*)p),(unsigned long)p);
bf38876a
LW
625#endif
626 nextf[bucket] = p->ov_next;
e8bc2b5c
GS
627#ifdef IGNORE_SMALL_BAD_FREE
628 if (bucket >= FIRST_BUCKET_WITH_CHECK)
629#endif
630 OV_MAGIC(p, bucket) = MAGIC;
cf5c4ad8
PP
631#ifndef PACK_MALLOC
632 OV_INDEX(p) = bucket;
633#endif
8d063cd8
LW
634#ifdef RCHECK
635 /*
636 * Record allocated size of block and
637 * bound space with magic numbers.
638 */
8d063cd8 639 p->ov_rmagic = RMAGIC;
e8bc2b5c
GS
640 if (bucket <= MAX_SHORT_BUCKET) {
641 int i;
642
643 nbytes = size + M_OVERHEAD;
644 p->ov_size = nbytes - 1;
645 if ((i = nbytes & 3)) {
646 i = 4 - i;
647 while (i--)
648 *((char *)((caddr_t)p + nbytes - RSLOP + i)) = RMAGIC_C;
649 }
650 nbytes = (nbytes + 3) &~ 3;
651 *((u_int *)((caddr_t)p + nbytes - RSLOP)) = RMAGIC;
652 }
8d063cd8 653#endif
11343788 654 MUTEX_UNLOCK(&malloc_mutex);
cf5c4ad8 655 return ((Malloc_t)(p + CHUNK_SHIFT));
8d063cd8
LW
656}
657
e8bc2b5c
GS
658static char *last_sbrk_top;
659static char *last_op; /* This arena can be easily extended. */
660static int sbrked_remains;
661static int sbrk_good = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
662
663#ifdef DEBUGGING_MSTATS
664static int sbrks;
665#endif
666
667struct chunk_chain_s {
668 struct chunk_chain_s *next;
669 MEM_SIZE size;
670};
671static struct chunk_chain_s *chunk_chain;
672static int n_chunks;
673static char max_bucket;
674
675/* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
676static void *
677get_from_chain(MEM_SIZE size)
678{
679 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
680 struct chunk_chain_s **oldgoodp = NULL;
681 long min_remain = LONG_MAX;
682
683 while (elt) {
684 if (elt->size >= size) {
685 long remains = elt->size - size;
686 if (remains >= 0 && remains < min_remain) {
687 oldgoodp = oldp;
688 min_remain = remains;
689 }
690 if (remains == 0) {
691 break;
692 }
693 }
694 oldp = &( elt->next );
695 elt = elt->next;
696 }
697 if (!oldgoodp) return NULL;
698 if (min_remain) {
699 void *ret = *oldgoodp;
700 struct chunk_chain_s *next = (*oldgoodp)->next;
701
702 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
703 (*oldgoodp)->size = min_remain;
704 (*oldgoodp)->next = next;
705 return ret;
706 } else {
707 void *ret = *oldgoodp;
708 *oldgoodp = (*oldgoodp)->next;
709 n_chunks--;
710 return ret;
711 }
712}
713
714static void
715add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
716{
717 struct chunk_chain_s *next = chunk_chain;
718 char *cp = (char*)p;
719
720 cp += chip;
721 chunk_chain = (struct chunk_chain_s *)cp;
722 chunk_chain->size = size - chip;
723 chunk_chain->next = next;
724 n_chunks++;
725}
726
727static void *
728get_from_bigger_buckets(int bucket, MEM_SIZE size)
729{
730 int price = 1;
731 static int bucketprice[NBUCKETS];
732 while (bucket <= max_bucket) {
733 /* We postpone stealing from bigger buckets until we want it
734 often enough. */
735 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
736 /* Steal it! */
737 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
738 bucketprice[bucket] = 0;
739 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
740 last_op = NULL; /* Disable optimization */
741 }
742 nextf[bucket] = nextf[bucket]->ov_next;
743#ifdef DEBUGGING_MSTATS
744 nmalloc[bucket]--;
745 start_slack -= M_OVERHEAD;
746#endif
747 add_to_chain(ret, (BUCKET_SIZE(bucket) +
748 POW2_OPTIMIZE_SURPLUS(bucket)),
749 size);
750 return ret;
751 }
752 bucket++;
753 }
754 return NULL;
755}
756
8d063cd8
LW
757/*
758 * Allocate more memory to the indicated bucket.
759 */
a0d0e21e 760static void
8ac85365 761morecore(register int bucket)
8d063cd8 762{
72aaf631 763 register union overhead *ovp;
8d063cd8
LW
764 register int rnu; /* 2^rnu bytes will be requested */
765 register int nblks; /* become nblks blocks of the desired size */
bbce6d69 766 register MEM_SIZE siz, needed;
cf5c4ad8 767 int slack = 0;
8d063cd8
LW
768
769 if (nextf[bucket])
770 return;
e8bc2b5c 771 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
d720c441
IZ
772 MUTEX_UNLOCK(&malloc_mutex);
773 croak("%s", "Out of memory during ridiculously large request");
55497cff 774 }
d720c441 775 if (bucket > max_bucket)
e8bc2b5c 776 max_bucket = bucket;
d720c441 777
e8bc2b5c
GS
778 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
779 ? LOG_OF_MIN_ARENA
780 : (bucket >> BUCKET_POW2_SHIFT) );
781 /* This may be overwritten later: */
782 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
783 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
784 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
785 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
786 nextf[rnu << BUCKET_POW2_SHIFT]
787 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
788#ifdef DEBUGGING_MSTATS
789 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
790 start_slack -= M_OVERHEAD;
791#endif
792 DEBUG_m(PerlIO_printf(Perl_debug_log,
793 "stealing %ld bytes from %ld arena\n",
794 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
795 } else if (chunk_chain
796 && (ovp = (union overhead*) get_from_chain(needed))) {
797 DEBUG_m(PerlIO_printf(Perl_debug_log,
798 "stealing %ld bytes from chain\n",
799 (long) needed));
d720c441
IZ
800 } else if ( (ovp = (union overhead*)
801 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
802 needed)) ) {
e8bc2b5c
GS
803 DEBUG_m(PerlIO_printf(Perl_debug_log,
804 "stealing %ld bytes from bigger buckets\n",
805 (long) needed));
806 } else if (needed <= sbrked_remains) {
807 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
808 sbrked_remains -= needed;
809 last_op = (char*)ovp;
810 } else {
811 /* Need to do (possibly expensive) system call. Try to
812 optimize it for rare calling. */
813 MEM_SIZE require = needed - sbrked_remains;
814 char *cp;
815
816 if (sbrk_good > 0) {
817 if (!last_sbrk_top && require < FIRST_SBRK)
818 require = FIRST_SBRK;
819 else if (require < MIN_SBRK) require = MIN_SBRK;
820
821 if (require < goodsbrk * MIN_SBRK_FRAC / 100)
822 require = goodsbrk * MIN_SBRK_FRAC / 100;
823 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
824 } else {
825 require = needed;
826 last_sbrk_top = 0;
827 sbrked_remains = 0;
828 }
829
830 DEBUG_m(PerlIO_printf(Perl_debug_log,
831 "sbrk(%ld) for %ld-byte-long arena\n",
832 (long)require, (long) needed));
833 cp = (char *)sbrk(require);
834#ifdef DEBUGGING_MSTATS
835 sbrks++;
836#endif
837 if (cp == last_sbrk_top) {
838 /* Common case, anything is fine. */
839 sbrk_good++;
840 ovp = (union overhead *) (cp - sbrked_remains);
841 sbrked_remains = require - (needed - sbrked_remains);
842 } else if (cp == (char *)-1) { /* no more room! */
843 ovp = (union overhead *)emergency_sbrk(needed);
844 if (ovp == (union overhead *)-1)
845 return;
846 goto gotit;
847 } else { /* Non-continuous or first sbrk(). */
848 long add = sbrked_remains;
849 char *newcp;
850
851 if (sbrked_remains) { /* Put rest into chain, we
852 cannot use it right now. */
853 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
854 sbrked_remains, 0);
855 }
856
857 /* Second, check alignment. */
858 slack = 0;
859
860#ifndef atarist /* on the atari we dont have to worry about this */
861# ifndef I286 /* The sbrk(0) call on the I286 always returns the next segment */
862
863 /* CHUNK_SHIFT is 1 for PACK_MALLOC, 0 otherwise. */
864 if ((UV)cp & (0x7FF >> CHUNK_SHIFT)) { /* Not aligned. */
865 slack = (0x800 >> CHUNK_SHIFT)
866 - ((UV)cp & (0x7FF >> CHUNK_SHIFT));
867 add += slack;
868 }
cf5c4ad8 869# endif
ee0007ab 870#endif /* atarist */
e8bc2b5c
GS
871
872 if (add) {
873 DEBUG_m(PerlIO_printf(Perl_debug_log,
874"sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
875 (long)add, (long) slack,
876 (long) sbrked_remains));
877 newcp = (char *)sbrk(add);
878#if defined(DEBUGGING_MSTATS)
879 sbrks++;
880 sbrk_slack += add;
a687059c 881#endif
e8bc2b5c
GS
882 if (newcp != cp + require) {
883 /* Too bad: even rounding sbrk() is not continuous.*/
884 DEBUG_m(PerlIO_printf(Perl_debug_log,
885 "failed to fix bad sbrk()\n"));
886#ifdef PACK_MALLOC
d720c441
IZ
887 if (slack) {
888 MUTEX_UNLOCK(&malloc_mutex);
889 croak("%s", "panic: Off-page sbrk");
890 }
e8bc2b5c
GS
891#endif
892 if (sbrked_remains) {
893 /* Try again. */
894#if defined(DEBUGGING_MSTATS)
895 sbrk_slack += require;
896#endif
897 require = needed;
898 DEBUG_m(PerlIO_printf(Perl_debug_log,
899 "straight sbrk(%ld)\n",
900 (long)require));
901 cp = (char *)sbrk(require);
902#ifdef DEBUGGING_MSTATS
903 sbrks++;
55497cff 904#endif
e8bc2b5c
GS
905 if (cp == (char *)-1)
906 return;
907 }
908 sbrk_good = -1; /* Disable optimization!
909 Continue with not-aligned... */
910 } else {
911 cp += slack;
912 require += sbrked_remains;
913 }
914 }
915
916 if (last_sbrk_top) {
917 sbrk_good -= SBRK_FAILURE_PRICE;
918 }
919
920 ovp = (union overhead *) cp;
921 /*
922 * Round up to minimum allocation size boundary
923 * and deduct from block count to reflect.
924 */
925
926#ifndef I286 /* Again, this should always be ok on an 80286 */
927 if ((UV)ovp & 7) {
928 ovp = (union overhead *)(((UV)ovp + 8) & ~7);
929 DEBUG_m(PerlIO_printf(Perl_debug_log,
930 "fixing sbrk(): %d bytes off machine alignement\n",
931 (int)((UV)ovp & 7)));
932 nblks--;
933# if defined(DEBUGGING_MSTATS)
934 /* This is only approx. if TWO_POT_OPTIMIZE: */
935 sbrk_slack += (1 << bucket);
936# endif
937 }
938#endif
939 sbrked_remains = require - needed;
940 }
941 last_sbrk_top = cp + require;
942 last_op = (char*) cp;
5f05dabc 943#ifdef DEBUGGING_MSTATS
e8bc2b5c 944 goodsbrk += require;
5f05dabc 945#endif
e8bc2b5c
GS
946 }
947
948 gotit:
8d063cd8
LW
949 /*
950 * Add new memory allocated to that on
951 * free list for this hash bucket.
952 */
e8bc2b5c 953 siz = BUCKET_SIZE(bucket);
cf5c4ad8 954#ifdef PACK_MALLOC
72aaf631 955 *(u_char*)ovp = bucket; /* Fill index. */
e8bc2b5c
GS
956 if (bucket <= MAX_PACKED) {
957 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
958 nblks = N_BLKS(bucket);
cf5c4ad8 959# ifdef DEBUGGING_MSTATS
e8bc2b5c 960 start_slack += BLK_SHIFT(bucket);
cf5c4ad8 961# endif
e8bc2b5c
GS
962 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
963 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
cf5c4ad8 964 siz -= sizeof(union overhead);
72aaf631 965 } else ovp++; /* One chunk per block. */
e8bc2b5c 966#endif /* PACK_MALLOC */
72aaf631 967 nextf[bucket] = ovp;
5f05dabc
PP
968#ifdef DEBUGGING_MSTATS
969 nmalloc[bucket] += nblks;
e8bc2b5c
GS
970 if (bucket > MAX_PACKED) {
971 start_slack += M_OVERHEAD * nblks;
972 }
5f05dabc 973#endif
8d063cd8 974 while (--nblks > 0) {
72aaf631
MB
975 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
976 ovp = (union overhead *)((caddr_t)ovp + siz);
8d063cd8 977 }
8595d6f1 978 /* Not all sbrks return zeroed memory.*/
72aaf631 979 ovp->ov_next = (union overhead *)NULL;
cf5c4ad8 980#ifdef PACK_MALLOC
e8bc2b5c
GS
981 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
982 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
983 nextf[7*BUCKETS_PER_POW2] =
984 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
985 - sizeof(union overhead));
986 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
cf5c4ad8
PP
987 }
988#endif /* !PACK_MALLOC */
8d063cd8
LW
989}
990
94b6baf5 991Free_t
8ac85365 992free(void *mp)
8d063cd8 993{
ee0007ab 994 register MEM_SIZE size;
72aaf631 995 register union overhead *ovp;
352d5a3a 996 char *cp = (char*)mp;
cf5c4ad8
PP
997#ifdef PACK_MALLOC
998 u_char bucket;
999#endif
8d063cd8 1000
e8bc2b5c
GS
1001 DEBUG_m(PerlIO_printf(Perl_debug_log,
1002 "0x%lx: (%05lu) free\n",
1003 (unsigned long)cp, (unsigned long)(an++)));
45d8adaa 1004
cf5c4ad8
PP
1005 if (cp == NULL)
1006 return;
72aaf631 1007 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c 1008 - sizeof (union overhead) * CHUNK_SHIFT);
cf5c4ad8 1009#ifdef PACK_MALLOC
72aaf631 1010 bucket = OV_INDEX(ovp);
cf5c4ad8 1011#endif
e8bc2b5c
GS
1012#ifdef IGNORE_SMALL_BAD_FREE
1013 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1014 && (OV_MAGIC(ovp, bucket) != MAGIC))
1015#else
1016 if (OV_MAGIC(ovp, bucket) != MAGIC)
1017#endif
1018 {
68dc0745 1019 static int bad_free_warn = -1;
cf5c4ad8 1020 if (bad_free_warn == -1) {
5fd9e9a4 1021 char *pbf = PerlEnv_getenv("PERL_BADFREE");
cf5c4ad8
PP
1022 bad_free_warn = (pbf) ? atoi(pbf) : 1;
1023 }
1024 if (!bad_free_warn)
1025 return;
8990e307 1026#ifdef RCHECK
a687059c 1027 warn("%s free() ignored",
72aaf631 1028 ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
8990e307 1029#else
d720c441 1030 warn("%s", "Bad free() ignored");
8990e307 1031#endif
8d063cd8 1032 return; /* sanity */
e8bc2b5c 1033 }
11343788 1034 MUTEX_LOCK(&malloc_mutex);
8d063cd8 1035#ifdef RCHECK
d720c441 1036 ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
e8bc2b5c
GS
1037 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1038 int i;
1039 MEM_SIZE nbytes = ovp->ov_size + 1;
1040
1041 if ((i = nbytes & 3)) {
1042 i = 4 - i;
1043 while (i--) {
1044 ASSERT(*((char *)((caddr_t)ovp + nbytes - RSLOP + i))
d720c441 1045 == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1046 }
1047 }
1048 nbytes = (nbytes + 3) &~ 3;
d720c441 1049 ASSERT(*(u_int *)((caddr_t)ovp + nbytes - RSLOP) == RMAGIC, "chunk's tail overwrite");
e8bc2b5c 1050 }
72aaf631 1051 ovp->ov_rmagic = RMAGIC - 1;
8d063cd8 1052#endif
d720c441 1053 ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
72aaf631
MB
1054 size = OV_INDEX(ovp);
1055 ovp->ov_next = nextf[size];
1056 nextf[size] = ovp;
11343788 1057 MUTEX_UNLOCK(&malloc_mutex);
8d063cd8
LW
1058}
1059
1060/*
1061 * When a program attempts "storage compaction" as mentioned in the
1062 * old malloc man page, it realloc's an already freed block. Usually
1063 * this is the last block it freed; occasionally it might be farther
1064 * back. We have to search all the free lists for the block in order
1065 * to determine its bucket: 1st we make one pass thru the lists
1066 * checking only the first block in each; if that fails we search
378cc40b 1067 * ``reall_srchlen'' blocks in each list for a match (the variable
8d063cd8
LW
1068 * is extern so the caller can modify it). If that fails we just copy
1069 * however many bytes was given to realloc() and hope it's not huge.
1070 */
378cc40b 1071int reall_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */
8d063cd8 1072
2304df62 1073Malloc_t
8ac85365 1074realloc(void *mp, size_t nbytes)
8d063cd8 1075{
ee0007ab 1076 register MEM_SIZE onb;
72aaf631 1077 union overhead *ovp;
d720c441
IZ
1078 char *res;
1079 int prev_bucket;
e8bc2b5c
GS
1080 register int bucket;
1081 int was_alloced = 0, incr;
352d5a3a 1082 char *cp = (char*)mp;
8d063cd8 1083
e8bc2b5c 1084#if defined(DEBUGGING) || !defined(PERL_CORE)
ee0007ab 1085 MEM_SIZE size = nbytes;
45d8adaa 1086
45d8adaa 1087 if ((long)nbytes < 0)
d720c441 1088 croak("%s", "panic: realloc");
45d8adaa 1089#endif
e8bc2b5c
GS
1090
1091 BARK_64K_LIMIT("Reallocation",nbytes,size);
1092 if (!cp)
1093 return malloc(nbytes);
45d8adaa 1094
11343788 1095 MUTEX_LOCK(&malloc_mutex);
72aaf631 1096 ovp = (union overhead *)((caddr_t)cp
e8bc2b5c
GS
1097 - sizeof (union overhead) * CHUNK_SHIFT);
1098 bucket = OV_INDEX(ovp);
1099#ifdef IGNORE_SMALL_BAD_FREE
1100 if ((bucket < FIRST_BUCKET_WITH_CHECK)
1101 || (OV_MAGIC(ovp, bucket) == MAGIC))
1102#else
1103 if (OV_MAGIC(ovp, bucket) == MAGIC)
1104#endif
1105 {
55497cff 1106 was_alloced = 1;
8d063cd8
LW
1107 } else {
1108 /*
1109 * Already free, doing "compaction".
1110 *
1111 * Search for the old block of memory on the
1112 * free list. First, check the most common
1113 * case (last element free'd), then (this failing)
378cc40b 1114 * the last ``reall_srchlen'' items free'd.
8d063cd8
LW
1115 * If all lookups fail, then assume the size of
1116 * the memory block being realloc'd is the
1117 * smallest possible.
1118 */
e8bc2b5c
GS
1119 if ((bucket = findbucket(ovp, 1)) < 0 &&
1120 (bucket = findbucket(ovp, reall_srchlen)) < 0)
1121 bucket = 0;
8d063cd8 1122 }
e8bc2b5c 1123 onb = BUCKET_SIZE_REAL(bucket);
55497cff
PP
1124 /*
1125 * avoid the copy if same size block.
e8bc2b5c
GS
1126 * We are not agressive with boundary cases. Note that it might
1127 * (for a small number of cases) give false negative if
55497cff 1128 * both new size and old one are in the bucket for
e8bc2b5c
GS
1129 * FIRST_BIG_POW2, but the new one is near the lower end.
1130 *
1131 * We do not try to go to 1.5 times smaller bucket so far.
55497cff 1132 */
e8bc2b5c
GS
1133 if (nbytes > onb) incr = 1;
1134 else {
1135#ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1136 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1137 nbytes > ( (onb >> 1) - M_OVERHEAD )
1138# ifdef TWO_POT_OPTIMIZE
1139 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1140# endif
1141 )
1142#else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1143 prev_bucket = ( (bucket > MAX_PACKED + 1)
1144 ? bucket - BUCKETS_PER_POW2
1145 : bucket - 1);
1146 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1147#endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1148 incr = 0;
1149 else incr = -1;
1150 }
1151 if (!was_alloced
2ce36478 1152#ifdef STRESS_REALLOC
e8bc2b5c 1153 || 1 /* always do it the hard way */
2ce36478 1154#endif
e8bc2b5c
GS
1155 ) goto hard_way;
1156 else if (incr == 0) {
852c2e52 1157 inplace_label:
a687059c
LW
1158#ifdef RCHECK
1159 /*
1160 * Record new allocated size of block and
1161 * bound space with magic numbers.
1162 */
72aaf631 1163 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
e8bc2b5c
GS
1164 int i, nb = ovp->ov_size + 1;
1165
1166 if ((i = nb & 3)) {
1167 i = 4 - i;
1168 while (i--) {
d720c441 1169 ASSERT(*((char *)((caddr_t)ovp + nb - RSLOP + i)) == RMAGIC_C, "chunk's tail overwrite");
e8bc2b5c
GS
1170 }
1171 }
1172 nb = (nb + 3) &~ 3;
d720c441 1173 ASSERT(*(u_int *)((caddr_t)ovp + nb - RSLOP) == RMAGIC, "chunk's tail overwrite");
a687059c
LW
1174 /*
1175 * Convert amount of memory requested into
1176 * closest block size stored in hash buckets
1177 * which satisfies request. Account for
1178 * space used per block for accounting.
1179 */
cf5c4ad8 1180 nbytes += M_OVERHEAD;
72aaf631 1181 ovp->ov_size = nbytes - 1;
e8bc2b5c
GS
1182 if ((i = nbytes & 3)) {
1183 i = 4 - i;
1184 while (i--)
1185 *((char *)((caddr_t)ovp + nbytes - RSLOP + i))
1186 = RMAGIC_C;
1187 }
1188 nbytes = (nbytes + 3) &~ 3;
72aaf631 1189 *((u_int *)((caddr_t)ovp + nbytes - RSLOP)) = RMAGIC;
a687059c
LW
1190 }
1191#endif
45d8adaa 1192 res = cp;
11343788 1193 MUTEX_UNLOCK(&malloc_mutex);
e8bc2b5c
GS
1194 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
1195 && (onb > (1 << LOG_OF_MIN_ARENA))) {
1196 MEM_SIZE require, newarena = nbytes, pow;
1197 int shiftr;
1198
1199 POW2_OPTIMIZE_ADJUST(newarena);
1200 newarena = newarena + M_OVERHEAD;
1201 /* newarena = (newarena + 3) &~ 3; */
1202 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
1203 pow = LOG_OF_MIN_ARENA + 1;
1204 /* apart from this loop, this is O(1) */
1205 while (shiftr >>= 1)
1206 pow++;
1207 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
1208 require = newarena - onb - M_OVERHEAD;
1209
1210 if (require <= sbrked_remains) {
1211 sbrked_remains -= require;
1212 } else {
1213 char *cp;
1214
1215 require -= sbrked_remains;
1216 /* We do not try to optimize sbrks here, we go for place. */
1217 cp = (char*) sbrk(require);
1218#ifdef DEBUGGING_MSTATS
1219 sbrks++;
1220 goodsbrk += require;
1221#endif
1222 if (cp == last_sbrk_top) {
1223 sbrked_remains = 0;
1224 last_sbrk_top = cp + require;
1225 } else {
1226 /* Report the failure: */
1227 if (sbrked_remains)
1228 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1229 sbrked_remains, 0);
1230 add_to_chain((void*)cp, require, 0);
1231 sbrk_good -= SBRK_FAILURE_PRICE;
1232 sbrked_remains = 0;
1233 last_sbrk_top = 0;
1234 last_op = 0;
1235 goto hard_way;
1236 }
1237 }
1238
1239#ifdef DEBUGGING_MSTATS
1240 nmalloc[bucket]--;
1241 nmalloc[pow * BUCKETS_PER_POW2]++;
1242#endif
1243 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
852c2e52 1244 goto inplace_label;
e8bc2b5c
GS
1245 } else {
1246 hard_way:
1247 MUTEX_UNLOCK(&malloc_mutex);
1248 if ((res = (char*)malloc(nbytes)) == NULL)
1249 return (NULL);
1250 if (cp != res) /* common optimization */
1251 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
1252 if (was_alloced)
1253 free(cp);
45d8adaa
LW
1254 }
1255
e8bc2b5c
GS
1256 DEBUG_m(PerlIO_printf(Perl_debug_log, "0x%lu: (%05lu) rfree\n",
1257 (unsigned long)res,(unsigned long)(an++)));
1258 DEBUG_m(PerlIO_printf(Perl_debug_log,
1259 "0x%lx: (%05lu) realloc %ld bytes\n",
1260 (unsigned long)res,(unsigned long)(an++),
1261 (long)size));
2304df62 1262 return ((Malloc_t)res);
8d063cd8
LW
1263}
1264
1265/*
1266 * Search ``srchlen'' elements of each free list for a block whose
1267 * header starts at ``freep''. If srchlen is -1 search the whole list.
1268 * Return bucket number, or -1 if not found.
1269 */
ee0007ab 1270static int
8ac85365 1271findbucket(union overhead *freep, int srchlen)
8d063cd8
LW
1272{
1273 register union overhead *p;
1274 register int i, j;
1275
1276 for (i = 0; i < NBUCKETS; i++) {
1277 j = 0;
1278 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
1279 if (p == freep)
1280 return (i);
1281 j++;
1282 }
1283 }
1284 return (-1);
1285}
1286
cf5c4ad8 1287Malloc_t
8ac85365 1288calloc(register size_t elements, register size_t size)
cf5c4ad8
PP
1289{
1290 long sz = elements * size;
1291 Malloc_t p = malloc(sz);
1292
1293 if (p) {
1294 memset((void*)p, 0, sz);
1295 }
1296 return p;
1297}
1298
e8bc2b5c
GS
1299MEM_SIZE
1300malloced_size(void *p)
1301{
1302 int bucket = OV_INDEX((union overhead *)p);
1303
1304 return BUCKET_SIZE_REAL(bucket);
1305}
1306
c07a80fd 1307#ifdef DEBUGGING_MSTATS
e8bc2b5c
GS
1308
1309# ifdef BUCKETS_ROOT2
1310# define MIN_EVEN_REPORT 6
1311# else
1312# define MIN_EVEN_REPORT MIN_BUCKET
1313# endif
8d063cd8
LW
1314/*
1315 * mstats - print out statistics about malloc
1316 *
1317 * Prints two lines of numbers, one showing the length of the free list
1318 * for each size category, the second showing the number of mallocs -
1319 * frees for each size category.
1320 */
ee0007ab 1321void
8ac85365 1322dump_mstats(char *s)
8d063cd8
LW
1323{
1324 register int i, j;
1325 register union overhead *p;
e8bc2b5c 1326 int topbucket=0, topbucket_ev=0, topbucket_odd=0, totfree=0, total=0;
c07a80fd 1327 u_int nfree[NBUCKETS];
e8bc2b5c
GS
1328 int total_chain = 0;
1329 struct chunk_chain_s* nextchain = chunk_chain;
8d063cd8 1330
e8bc2b5c 1331 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
8d063cd8
LW
1332 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
1333 ;
c07a80fd 1334 nfree[i] = j;
e8bc2b5c
GS
1335 totfree += nfree[i] * BUCKET_SIZE_REAL(i);
1336 total += nmalloc[i] * BUCKET_SIZE_REAL(i);
1337 if (nmalloc[i]) {
1338 i % 2 ? (topbucket_odd = i) : (topbucket_ev = i);
1339 topbucket = i;
1340 }
c07a80fd
PP
1341 }
1342 if (s)
e8bc2b5c 1343 PerlIO_printf(PerlIO_stderr(),
d720c441 1344 "Memory allocation statistics %s (buckets %ld(%ld)..%ld(%ld)\n",
e8bc2b5c 1345 s,
d720c441
IZ
1346 (long)BUCKET_SIZE_REAL(MIN_BUCKET),
1347 (long)BUCKET_SIZE(MIN_BUCKET),
1348 (long)BUCKET_SIZE_REAL(topbucket), (long)BUCKET_SIZE(topbucket));
5f05dabc 1349 PerlIO_printf(PerlIO_stderr(), "%8d free:", totfree);
e8bc2b5c
GS
1350 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1351 PerlIO_printf(PerlIO_stderr(),
1352 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1353 ? " %5d"
1354 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1355 nfree[i]);
1356 }
1357#ifdef BUCKETS_ROOT2
1358 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1359 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1360 PerlIO_printf(PerlIO_stderr(),
1361 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1362 ? " %5d"
1363 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1364 nfree[i]);
8d063cd8 1365 }
e8bc2b5c 1366#endif
5f05dabc 1367 PerlIO_printf(PerlIO_stderr(), "\n%8d used:", total - totfree);
e8bc2b5c
GS
1368 for (i = MIN_EVEN_REPORT; i <= topbucket; i += BUCKETS_PER_POW2) {
1369 PerlIO_printf(PerlIO_stderr(),
1370 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1371 ? " %5d"
1372 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1373 nmalloc[i] - nfree[i]);
c07a80fd 1374 }
e8bc2b5c
GS
1375#ifdef BUCKETS_ROOT2
1376 PerlIO_printf(PerlIO_stderr(), "\n\t ");
1377 for (i = MIN_BUCKET + 1; i <= topbucket_odd; i += BUCKETS_PER_POW2) {
1378 PerlIO_printf(PerlIO_stderr(),
1379 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
1380 ? " %5d"
1381 : ((i < 12*BUCKETS_PER_POW2) ? " %3d" : " %d")),
1382 nmalloc[i] - nfree[i]);
1383 }
1384#endif
1385 while (nextchain) {
1386 total_chain += nextchain->size;
1387 nextchain = nextchain->next;
1388 }
1389 PerlIO_printf(PerlIO_stderr(), "\nTotal sbrk(): %d/%d:%d. Odd ends: pad+heads+chain+tail: %d+%d+%d+%d.\n",
1390 goodsbrk + sbrk_slack, sbrks, sbrk_good, sbrk_slack,
1391 start_slack, total_chain, sbrked_remains);
c07a80fd
PP
1392}
1393#else
1394void
8ac85365 1395dump_mstats(char *s)
c07a80fd 1396{
8d063cd8
LW
1397}
1398#endif
a687059c 1399#endif /* lint */
cf5c4ad8
PP
1400
1401
1402#ifdef USE_PERL_SBRK
1403
760ac839
LW
1404# ifdef NeXT
1405# define PERL_SBRK_VIA_MALLOC
1406# endif
1407
1408# ifdef PERL_SBRK_VIA_MALLOC
72e5b9db 1409# if defined(HIDEMYMALLOC) || defined(EMBEDMYMALLOC)
760ac839
LW
1410# undef malloc
1411# else
72e5b9db 1412# include "Error: -DPERL_SBRK_VIA_MALLOC needs -D(HIDE|EMBED)MYMALLOC"
760ac839 1413# endif
cf5c4ad8
PP
1414
1415/* it may seem schizophrenic to use perl's malloc and let it call system */
1416/* malloc, the reason for that is only the 3.2 version of the OS that had */
1417/* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
1418/* end to the cores */
1419
760ac839 1420# define SYSTEM_ALLOC(a) malloc(a)
cf5c4ad8 1421
760ac839 1422# endif /* PERL_SBRK_VIA_MALLOC */
cf5c4ad8
PP
1423
1424static IV Perl_sbrk_oldchunk;
1425static long Perl_sbrk_oldsize;
1426
760ac839
LW
1427# define PERLSBRK_32_K (1<<15)
1428# define PERLSBRK_64_K (1<<16)
cf5c4ad8 1429
b63effbb 1430Malloc_t
cf5c4ad8
PP
1431Perl_sbrk(size)
1432int size;
1433{
1434 IV got;
1435 int small, reqsize;
1436
1437 if (!size) return 0;
55497cff 1438#ifdef PERL_CORE
cf5c4ad8
PP
1439 reqsize = size; /* just for the DEBUG_m statement */
1440#endif
57569e04
HM
1441#ifdef PACK_MALLOC
1442 size = (size + 0x7ff) & ~0x7ff;
1443#endif
cf5c4ad8
PP
1444 if (size <= Perl_sbrk_oldsize) {
1445 got = Perl_sbrk_oldchunk;
1446 Perl_sbrk_oldchunk += size;
1447 Perl_sbrk_oldsize -= size;
1448 } else {
1449 if (size >= PERLSBRK_32_K) {
1450 small = 0;
1451 } else {
cf5c4ad8
PP
1452 size = PERLSBRK_64_K;
1453 small = 1;
1454 }
1455 got = (IV)SYSTEM_ALLOC(size);
57569e04
HM
1456#ifdef PACK_MALLOC
1457 got = (got + 0x7ff) & ~0x7ff;
1458#endif
cf5c4ad8
PP
1459 if (small) {
1460 /* Chunk is small, register the rest for future allocs. */
1461 Perl_sbrk_oldchunk = got + reqsize;
1462 Perl_sbrk_oldsize = size - reqsize;
1463 }
1464 }
1465
fb73857a 1466 DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%lx\n",
cf5c4ad8 1467 size, reqsize, Perl_sbrk_oldsize, got));
cf5c4ad8
PP
1468
1469 return (void *)got;
1470}
1471
1472#endif /* ! defined USE_PERL_SBRK */