This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
better perl version output in corelist-diff
[perl5.git] / win32 / vmem.h
CommitLineData
7766f137
GS
1/* vmem.h
2 *
3 * (c) 1999 Microsoft Corporation. All rights reserved.
4 * Portions (c) 1999 ActiveState Tool Corp, http://www.ActiveState.com/
5 *
6 * You may distribute under the terms of either the GNU General Public
7 * License or the Artistic License, as specified in the README file.
8 *
f57e8d3b 9 * Options:
7766f137 10 *
f57e8d3b
GS
11 * Defining _USE_MSVCRT_MEM_ALLOC will cause all memory allocations
12 * to be forwarded to MSVCRT.DLL. Defining _USE_LINKED_LIST as well will
13 * track all allocations in a doubly linked list, so that the host can
14 * free all memory allocated when it goes away.
15 * If _USE_MSVCRT_MEM_ALLOC is not defined then Knuth's boundary tag algorithm
16 * is used; defining _USE_BUDDY_BLOCKS will use Knuth's algorithm R
17 * (Buddy system reservation)
18 *
19 */
20
21#ifndef ___VMEM_H_INC___
22#define ___VMEM_H_INC___
23
45496817 24#ifndef UNDER_CE
222c300a 25#define _USE_MSVCRT_MEM_ALLOC
7bd379e8 26#endif
222c300a 27#define _USE_LINKED_LIST
f57e8d3b
GS
28
29// #define _USE_BUDDY_BLOCKS
30
31// #define _DEBUG_MEM
32#ifdef _DEBUG_MEM
33#define ASSERT(f) if(!(f)) DebugBreak();
34
35inline void MEMODS(char *str)
36{
37 OutputDebugString(str);
38 OutputDebugString("\n");
39}
40
41inline void MEMODSlx(char *str, long x)
42{
43 char szBuffer[512];
44 sprintf(szBuffer, "%s %lx\n", str, x);
45 OutputDebugString(szBuffer);
46}
47
48#define WALKHEAP() WalkHeap(0)
49#define WALKHEAPTRACE() WalkHeap(1)
50
51#else
52
53#define ASSERT(f)
54#define MEMODS(x)
55#define MEMODSlx(x, y)
56#define WALKHEAP()
57#define WALKHEAPTRACE()
58
59#endif
60
61#ifdef _USE_MSVCRT_MEM_ALLOC
62
63#ifndef _USE_LINKED_LIST
64// #define _USE_LINKED_LIST
65#endif
66
67/*
68 * Pass all memory requests throught to msvcrt.dll
69 * optionaly track by using a doubly linked header
70 */
71
72typedef void (*LPFREE)(void *block);
73typedef void* (*LPMALLOC)(size_t size);
74typedef void* (*LPREALLOC)(void *block, size_t size);
75#ifdef _USE_LINKED_LIST
222c300a 76class VMem;
f57e8d3b
GS
77typedef struct _MemoryBlockHeader* PMEMORY_BLOCK_HEADER;
78typedef struct _MemoryBlockHeader {
79 PMEMORY_BLOCK_HEADER pNext;
80 PMEMORY_BLOCK_HEADER pPrev;
222c300a 81 VMem *owner;
f57e8d3b
GS
82} MEMORY_BLOCK_HEADER, *PMEMORY_BLOCK_HEADER;
83#endif
84
85class VMem
86{
87public:
88 VMem();
89 ~VMem();
90 virtual void* Malloc(size_t size);
91 virtual void* Realloc(void* pMem, size_t size);
92 virtual void Free(void* pMem);
93 virtual void GetLock(void);
94 virtual void FreeLock(void);
95 virtual int IsLocked(void);
96 virtual long Release(void);
97 virtual long AddRef(void);
98
99 inline BOOL CreateOk(void)
100 {
101 return TRUE;
102 };
103
104protected:
105#ifdef _USE_LINKED_LIST
106 void LinkBlock(PMEMORY_BLOCK_HEADER ptr)
107 {
108 PMEMORY_BLOCK_HEADER next = m_Dummy.pNext;
109 m_Dummy.pNext = ptr;
110 ptr->pPrev = &m_Dummy;
111 ptr->pNext = next;
222c300a 112 ptr->owner = this;
f57e8d3b
GS
113 next->pPrev = ptr;
114 }
115 void UnlinkBlock(PMEMORY_BLOCK_HEADER ptr)
116 {
117 PMEMORY_BLOCK_HEADER next = ptr->pNext;
118 PMEMORY_BLOCK_HEADER prev = ptr->pPrev;
119 prev->pNext = next;
120 next->pPrev = prev;
121 }
122
123 MEMORY_BLOCK_HEADER m_Dummy;
124#endif
125
126 long m_lRefCount; // number of current users
127 CRITICAL_SECTION m_cs; // access lock
128 HINSTANCE m_hLib;
129 LPFREE m_pfree;
130 LPMALLOC m_pmalloc;
131 LPREALLOC m_prealloc;
132};
133
134VMem::VMem()
135{
136 m_lRefCount = 1;
137 InitializeCriticalSection(&m_cs);
138#ifdef _USE_LINKED_LIST
139 m_Dummy.pNext = m_Dummy.pPrev = &m_Dummy;
222c300a 140 m_Dummy.owner = this;
f57e8d3b
GS
141#endif
142 m_hLib = LoadLibrary("msvcrt.dll");
143 if (m_hLib) {
144 m_pfree = (LPFREE)GetProcAddress(m_hLib, "free");
145 m_pmalloc = (LPMALLOC)GetProcAddress(m_hLib, "malloc");
146 m_prealloc = (LPREALLOC)GetProcAddress(m_hLib, "realloc");
147 }
148}
149
150VMem::~VMem(void)
151{
152#ifdef _USE_LINKED_LIST
153 while (m_Dummy.pNext != &m_Dummy) {
154 Free(m_Dummy.pNext+1);
155 }
156#endif
157 if (m_hLib)
158 FreeLibrary(m_hLib);
159 DeleteCriticalSection(&m_cs);
160}
161
162void* VMem::Malloc(size_t size)
163{
164#ifdef _USE_LINKED_LIST
222c300a 165 GetLock();
f57e8d3b 166 PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)m_pmalloc(size+sizeof(MEMORY_BLOCK_HEADER));
9e6e1211
DH
167 if (!ptr) {
168 FreeLock();
169 return NULL;
170 }
f57e8d3b 171 LinkBlock(ptr);
222c300a 172 FreeLock();
f57e8d3b
GS
173 return (ptr+1);
174#else
175 return m_pmalloc(size);
176#endif
177}
178
179void* VMem::Realloc(void* pMem, size_t size)
180{
181#ifdef _USE_LINKED_LIST
182 if (!pMem)
183 return Malloc(size);
184
185 if (!size) {
186 Free(pMem);
187 return NULL;
188 }
189
222c300a 190 GetLock();
f57e8d3b
GS
191 PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)(((char*)pMem)-sizeof(MEMORY_BLOCK_HEADER));
192 UnlinkBlock(ptr);
193 ptr = (PMEMORY_BLOCK_HEADER)m_prealloc(ptr, size+sizeof(MEMORY_BLOCK_HEADER));
9e6e1211
DH
194 if (!ptr) {
195 FreeLock();
196 return NULL;
197 }
f57e8d3b 198 LinkBlock(ptr);
222c300a 199 FreeLock();
f57e8d3b
GS
200
201 return (ptr+1);
202#else
203 return m_prealloc(pMem, size);
204#endif
205}
206
207void VMem::Free(void* pMem)
208{
209#ifdef _USE_LINKED_LIST
210 if (pMem) {
211 PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)(((char*)pMem)-sizeof(MEMORY_BLOCK_HEADER));
222c300a 212 if (ptr->owner != this) {
222c300a 213 if (ptr->owner) {
05ec9bb3
NIS
214#if 1
215 dTHX;
216 int *nowhere = NULL;
217 Perl_warn(aTHX_ "Free to wrong pool %p not %p",this,ptr->owner);
283d8f99
YO
218 *nowhere = 0; /* this segfault is deliberate,
219 so you can see the stack trace */
05ec9bb3
NIS
220#else
221 ptr->owner->Free(pMem);
222#endif
222c300a
NIS
223 }
224 return;
222c300a
NIS
225 }
226 GetLock();
f57e8d3b 227 UnlinkBlock(ptr);
222c300a 228 ptr->owner = NULL;
f57e8d3b 229 m_pfree(ptr);
222c300a 230 FreeLock();
f57e8d3b
GS
231 }
232#else
233 m_pfree(pMem);
234#endif
235}
236
237void VMem::GetLock(void)
238{
239 EnterCriticalSection(&m_cs);
240}
241
242void VMem::FreeLock(void)
243{
244 LeaveCriticalSection(&m_cs);
245}
246
247int VMem::IsLocked(void)
248{
249#if 0
250 /* XXX TryEnterCriticalSection() is not available in some versions
251 * of Windows 95. Since this code is not used anywhere yet, we
252 * skirt the issue for now. */
253 BOOL bAccessed = TryEnterCriticalSection(&m_cs);
254 if(bAccessed) {
255 LeaveCriticalSection(&m_cs);
256 }
257 return !bAccessed;
258#else
259 ASSERT(0); /* alarm bells for when somebody calls this */
260 return 0;
261#endif
262}
263
264long VMem::Release(void)
265{
266 long lCount = InterlockedDecrement(&m_lRefCount);
267 if(!lCount)
268 delete this;
269 return lCount;
270}
271
272long VMem::AddRef(void)
273{
274 long lCount = InterlockedIncrement(&m_lRefCount);
275 return lCount;
276}
277
278#else /* _USE_MSVCRT_MEM_ALLOC */
279
280/*
7766f137
GS
281 * Knuth's boundary tag algorithm Vol #1, Page 440.
282 *
283 * Each block in the heap has tag words before and after it,
284 * TAG
285 * block
286 * TAG
287 * The size is stored in these tags as a long word, and includes the 8 bytes
288 * of overhead that the boundary tags consume. Blocks are allocated on long
289 * word boundaries, so the size is always multiples of long words. When the
290 * block is allocated, bit 0, (the tag bit), of the size is set to 1. When
291 * a block is freed, it is merged with adjacent free blocks, and the tag bit
292 * is set to 0.
293 *
294 * A linked list is used to manage the free list. The first two long words of
295 * the block contain double links. These links are only valid when the block
296 * is freed, therefore space needs to be reserved for them. Thus, the minimum
297 * block size (not counting the tags) is 8 bytes.
298 *
f57e8d3b 299 * Since memory allocation may occur on a single threaded, explict locks are not
7766f137
GS
300 * provided.
301 *
302 */
303
f57e8d3b 304const long lAllocStart = 0x00020000; /* start at 128K */
7766f137
GS
305const long minBlockSize = sizeof(void*)*2;
306const long sizeofTag = sizeof(long);
307const long blockOverhead = sizeofTag*2;
308const long minAllocSize = minBlockSize+blockOverhead;
f57e8d3b
GS
309#ifdef _USE_BUDDY_BLOCKS
310const long lSmallBlockSize = 1024;
311const size_t nListEntries = ((lSmallBlockSize-minAllocSize)/sizeof(long));
312
313inline size_t CalcEntry(size_t size)
314{
315 ASSERT((size&(sizeof(long)-1)) == 0);
316 return ((size - minAllocSize) / sizeof(long));
317}
318#endif
7766f137
GS
319
320typedef BYTE* PBLOCK; /* pointer to a memory block */
321
322/*
323 * Macros for accessing hidden fields in a memory block:
324 *
325 * SIZE size of this block (tag bit 0 is 1 if block is allocated)
326 * PSIZE size of previous physical block
327 */
328
329#define SIZE(block) (*(ULONG*)(((PBLOCK)(block))-sizeofTag))
f57e8d3b 330#define PSIZE(block) (*(ULONG*)(((PBLOCK)(block))-(blockOverhead)))
7766f137
GS
331inline void SetTags(PBLOCK block, long size)
332{
333 SIZE(block) = size;
334 PSIZE(block+(size&~1)) = size;
335}
336
337/*
338 * Free list pointers
339 * PREV pointer to previous block
340 * NEXT pointer to next block
341 */
342
343#define PREV(block) (*(PBLOCK*)(block))
344#define NEXT(block) (*(PBLOCK*)((block)+sizeof(PBLOCK)))
345inline void SetLink(PBLOCK block, PBLOCK prev, PBLOCK next)
346{
347 PREV(block) = prev;
348 NEXT(block) = next;
349}
350inline void Unlink(PBLOCK p)
351{
352 PBLOCK next = NEXT(p);
353 PBLOCK prev = PREV(p);
354 NEXT(prev) = next;
355 PREV(next) = prev;
356}
f57e8d3b 357#ifndef _USE_BUDDY_BLOCKS
7766f137
GS
358inline void AddToFreeList(PBLOCK block, PBLOCK pInList)
359{
360 PBLOCK next = NEXT(pInList);
361 NEXT(pInList) = block;
362 SetLink(block, pInList, next);
363 PREV(next) = block;
364}
f57e8d3b 365#endif
7766f137
GS
366
367/* Macro for rounding up to the next sizeof(long) */
368#define ROUND_UP(n) (((ULONG)(n)+sizeof(long)-1)&~(sizeof(long)-1))
369#define ROUND_UP64K(n) (((ULONG)(n)+0x10000-1)&~(0x10000-1))
370#define ROUND_DOWN(n) ((ULONG)(n)&~(sizeof(long)-1))
371
372/*
373 * HeapRec - a list of all non-contiguous heap areas
374 *
375 * Each record in this array contains information about a non-contiguous heap area.
376 */
377
f57e8d3b 378const int maxHeaps = 32; /* 64 was overkill */
7766f137
GS
379const long lAllocMax = 0x80000000; /* max size of allocation */
380
f57e8d3b
GS
381#ifdef _USE_BUDDY_BLOCKS
382typedef struct _FreeListEntry
383{
384 BYTE Dummy[minAllocSize]; // dummy free block
385} FREE_LIST_ENTRY, *PFREE_LIST_ENTRY;
386#endif
387
388#ifndef _USE_BUDDY_BLOCKS
389#define USE_BIGBLOCK_ALLOC
390#endif
391/*
392 * performance tuning
393 * Use VirtualAlloc() for blocks bigger than nMaxHeapAllocSize since
394 * Windows 95/98/Me have heap managers that are designed for memory
395 * blocks smaller than four megabytes.
396 */
397
398#ifdef USE_BIGBLOCK_ALLOC
399const int nMaxHeapAllocSize = (1024*512); /* don't allocate anything larger than this from the heap */
400#endif
401
7766f137
GS
402typedef struct _HeapRec
403{
404 PBLOCK base; /* base of heap area */
405 ULONG len; /* size of heap area */
f57e8d3b
GS
406#ifdef USE_BIGBLOCK_ALLOC
407 BOOL bBigBlock; /* was allocate using VirtualAlloc */
408#endif
7766f137
GS
409} HeapRec;
410
7766f137
GS
411class VMem
412{
413public:
414 VMem();
415 ~VMem();
416 virtual void* Malloc(size_t size);
417 virtual void* Realloc(void* pMem, size_t size);
418 virtual void Free(void* pMem);
419 virtual void GetLock(void);
420 virtual void FreeLock(void);
421 virtual int IsLocked(void);
422 virtual long Release(void);
423 virtual long AddRef(void);
424
425 inline BOOL CreateOk(void)
426 {
f57e8d3b
GS
427#ifdef _USE_BUDDY_BLOCKS
428 return TRUE;
429#else
7766f137 430 return m_hHeap != NULL;
f57e8d3b 431#endif
7766f137
GS
432 };
433
434 void ReInit(void);
435
436protected:
437 void Init(void);
438 int Getmem(size_t size);
f57e8d3b
GS
439
440 int HeapAdd(void* ptr, size_t size
441#ifdef USE_BIGBLOCK_ALLOC
442 , BOOL bBigBlock
443#endif
444 );
445
7766f137 446 void* Expand(void* block, size_t size);
7766f137 447
f57e8d3b
GS
448#ifdef _USE_BUDDY_BLOCKS
449 inline PBLOCK GetFreeListLink(int index)
450 {
451 if (index >= nListEntries)
452 index = nListEntries-1;
453 return &m_FreeList[index].Dummy[sizeofTag];
454 }
455 inline PBLOCK GetOverSizeFreeList(void)
456 {
457 return &m_FreeList[nListEntries-1].Dummy[sizeofTag];
458 }
459 inline PBLOCK GetEOLFreeList(void)
460 {
461 return &m_FreeList[nListEntries].Dummy[sizeofTag];
462 }
463
464 void AddToFreeList(PBLOCK block, size_t size)
465 {
466 PBLOCK pFreeList = GetFreeListLink(CalcEntry(size));
467 PBLOCK next = NEXT(pFreeList);
468 NEXT(pFreeList) = block;
469 SetLink(block, pFreeList, next);
470 PREV(next) = block;
471 }
472#endif
473 inline size_t CalcAllocSize(size_t size)
474 {
475 /*
476 * Adjust the real size of the block to be a multiple of sizeof(long), and add
477 * the overhead for the boundary tags. Disallow negative or zero sizes.
478 */
479 return (size < minBlockSize) ? minAllocSize : (size_t)ROUND_UP(size) + blockOverhead;
480 }
481
482#ifdef _USE_BUDDY_BLOCKS
483 FREE_LIST_ENTRY m_FreeList[nListEntries+1]; // free list with dummy end of list entry as well
484#else
52cbf511
JH
485 HANDLE m_hHeap; // memory heap for this script
486 char m_FreeDummy[minAllocSize]; // dummy free block
487 PBLOCK m_pFreeList; // pointer to first block on free list
f57e8d3b 488#endif
52cbf511
JH
489 PBLOCK m_pRover; // roving pointer into the free list
490 HeapRec m_heaps[maxHeaps]; // list of all non-contiguous heap areas
491 int m_nHeaps; // no. of heaps in m_heaps
492 long m_lAllocSize; // current alloc size
493 long m_lRefCount; // number of current users
494 CRITICAL_SECTION m_cs; // access lock
f57e8d3b 495
df3728a2 496#ifdef _DEBUG_MEM
f57e8d3b
GS
497 void WalkHeap(int complete);
498 void MemoryUsageMessage(char *str, long x, long y, int c);
df3728a2
JH
499 FILE* m_pLog;
500#endif
7766f137
GS
501};
502
7766f137
GS
503VMem::VMem()
504{
505 m_lRefCount = 1;
f57e8d3b 506#ifndef _USE_BUDDY_BLOCKS
7766f137
GS
507 BOOL bRet = (NULL != (m_hHeap = HeapCreate(HEAP_NO_SERIALIZE,
508 lAllocStart, /* initial size of heap */
509 0))); /* no upper limit on size of heap */
510 ASSERT(bRet);
f57e8d3b 511#endif
7766f137
GS
512
513 InitializeCriticalSection(&m_cs);
df3728a2
JH
514#ifdef _DEBUG_MEM
515 m_pLog = 0;
516#endif
7766f137
GS
517
518 Init();
519}
520
521VMem::~VMem(void)
522{
f57e8d3b 523#ifndef _USE_BUDDY_BLOCKS
7766f137 524 ASSERT(HeapValidate(m_hHeap, HEAP_NO_SERIALIZE, NULL));
df3728a2 525#endif
f57e8d3b
GS
526 WALKHEAPTRACE();
527
7766f137 528 DeleteCriticalSection(&m_cs);
f57e8d3b
GS
529#ifdef _USE_BUDDY_BLOCKS
530 for(int index = 0; index < m_nHeaps; ++index) {
531 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
532 }
533#else /* !_USE_BUDDY_BLOCKS */
534#ifdef USE_BIGBLOCK_ALLOC
535 for(int index = 0; index < m_nHeaps; ++index) {
536 if (m_heaps[index].bBigBlock) {
537 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
538 }
539 }
540#endif
7766f137
GS
541 BOOL bRet = HeapDestroy(m_hHeap);
542 ASSERT(bRet);
f57e8d3b 543#endif /* _USE_BUDDY_BLOCKS */
7766f137
GS
544}
545
546void VMem::ReInit(void)
547{
f57e8d3b
GS
548 for(int index = 0; index < m_nHeaps; ++index) {
549#ifdef _USE_BUDDY_BLOCKS
550 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
551#else
552#ifdef USE_BIGBLOCK_ALLOC
553 if (m_heaps[index].bBigBlock) {
554 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
555 }
556 else
557#endif
558 HeapFree(m_hHeap, HEAP_NO_SERIALIZE, m_heaps[index].base);
559#endif /* _USE_BUDDY_BLOCKS */
560 }
7766f137
GS
561
562 Init();
563}
564
565void VMem::Init(void)
f57e8d3b
GS
566{
567#ifdef _USE_BUDDY_BLOCKS
568 PBLOCK pFreeList;
569 /*
570 * Initialize the free list by placing a dummy zero-length block on it.
571 * Set the end of list marker.
572 * Set the number of non-contiguous heaps to zero.
573 * Set the next allocation size.
574 */
575 for (int index = 0; index < nListEntries; ++index) {
576 pFreeList = GetFreeListLink(index);
577 SIZE(pFreeList) = PSIZE(pFreeList+minAllocSize) = 0;
578 PREV(pFreeList) = NEXT(pFreeList) = pFreeList;
579 }
580 pFreeList = GetEOLFreeList();
581 SIZE(pFreeList) = PSIZE(pFreeList+minAllocSize) = 0;
582 PREV(pFreeList) = NEXT(pFreeList) = NULL;
583 m_pRover = GetOverSizeFreeList();
584#else
585 /*
7766f137
GS
586 * Initialize the free list by placing a dummy zero-length block on it.
587 * Set the number of non-contiguous heaps to zero.
588 */
f57e8d3b
GS
589 m_pFreeList = m_pRover = (PBLOCK)(&m_FreeDummy[sizeofTag]);
590 PSIZE(m_pFreeList+minAllocSize) = SIZE(m_pFreeList) = 0;
7766f137 591 PREV(m_pFreeList) = NEXT(m_pFreeList) = m_pFreeList;
f57e8d3b 592#endif
7766f137
GS
593
594 m_nHeaps = 0;
595 m_lAllocSize = lAllocStart;
596}
597
598void* VMem::Malloc(size_t size)
599{
600 WALKHEAP();
601
f57e8d3b
GS
602 PBLOCK ptr;
603 size_t lsize, rem;
7766f137 604 /*
f57e8d3b 605 * Disallow negative or zero sizes.
7766f137 606 */
f57e8d3b 607 size_t realsize = CalcAllocSize(size);
7766f137
GS
608 if((int)realsize < minAllocSize || size == 0)
609 return NULL;
610
f57e8d3b
GS
611#ifdef _USE_BUDDY_BLOCKS
612 /*
613 * Check the free list of small blocks if this is free use it
614 * Otherwise check the rover if it has no blocks then
615 * Scan the free list entries use the first free block
616 * split the block if needed, stop at end of list marker
617 */
618 {
619 int index = CalcEntry(realsize);
620 if (index < nListEntries-1) {
621 ptr = GetFreeListLink(index);
622 lsize = SIZE(ptr);
623 if (lsize >= realsize) {
624 rem = lsize - realsize;
625 if(rem < minAllocSize) {
626 /* Unlink the block from the free list. */
627 Unlink(ptr);
628 }
629 else {
630 /*
631 * split the block
632 * The remainder is big enough to split off into a new block.
633 * Use the end of the block, resize the beginning of the block
634 * no need to change the free list.
635 */
636 SetTags(ptr, rem);
637 ptr += SIZE(ptr);
638 lsize = realsize;
639 }
640 SetTags(ptr, lsize | 1);
641 return ptr;
642 }
643 ptr = m_pRover;
644 lsize = SIZE(ptr);
645 if (lsize >= realsize) {
646 rem = lsize - realsize;
647 if(rem < minAllocSize) {
648 /* Unlink the block from the free list. */
649 Unlink(ptr);
650 }
651 else {
652 /*
653 * split the block
654 * The remainder is big enough to split off into a new block.
655 * Use the end of the block, resize the beginning of the block
656 * no need to change the free list.
657 */
658 SetTags(ptr, rem);
659 ptr += SIZE(ptr);
660 lsize = realsize;
661 }
662 SetTags(ptr, lsize | 1);
663 return ptr;
664 }
665 ptr = GetFreeListLink(index+1);
666 while (NEXT(ptr)) {
667 lsize = SIZE(ptr);
668 if (lsize >= realsize) {
669 size_t rem = lsize - realsize;
670 if(rem < minAllocSize) {
671 /* Unlink the block from the free list. */
672 Unlink(ptr);
673 }
674 else {
675 /*
676 * split the block
677 * The remainder is big enough to split off into a new block.
678 * Use the end of the block, resize the beginning of the block
679 * no need to change the free list.
680 */
681 SetTags(ptr, rem);
682 ptr += SIZE(ptr);
683 lsize = realsize;
684 }
685 SetTags(ptr, lsize | 1);
686 return ptr;
687 }
688 ptr += sizeof(FREE_LIST_ENTRY);
689 }
690 }
691 }
692#endif
693
7766f137
GS
694 /*
695 * Start searching the free list at the rover. If we arrive back at rover without
696 * finding anything, allocate some memory from the heap and try again.
697 */
f57e8d3b
GS
698 ptr = m_pRover; /* start searching at rover */
699 int loops = 2; /* allow two times through the loop */
7766f137 700 for(;;) {
f57e8d3b 701 lsize = SIZE(ptr);
7766f137
GS
702 ASSERT((lsize&1)==0);
703 /* is block big enough? */
704 if(lsize >= realsize) {
705 /* if the remainder is too small, don't bother splitting the block. */
f57e8d3b 706 rem = lsize - realsize;
7766f137
GS
707 if(rem < minAllocSize) {
708 if(m_pRover == ptr)
709 m_pRover = NEXT(ptr);
710
711 /* Unlink the block from the free list. */
712 Unlink(ptr);
713 }
714 else {
715 /*
716 * split the block
717 * The remainder is big enough to split off into a new block.
718 * Use the end of the block, resize the beginning of the block
719 * no need to change the free list.
720 */
721 SetTags(ptr, rem);
722 ptr += SIZE(ptr);
723 lsize = realsize;
724 }
725 /* Set the boundary tags to mark it as allocated. */
726 SetTags(ptr, lsize | 1);
727 return ((void *)ptr);
728 }
729
730 /*
731 * This block was unsuitable. If we've gone through this list once already without
732 * finding anything, allocate some new memory from the heap and try again.
733 */
734 ptr = NEXT(ptr);
735 if(ptr == m_pRover) {
736 if(!(loops-- && Getmem(realsize))) {
737 return NULL;
738 }
739 ptr = m_pRover;
740 }
741 }
742}
743
744void* VMem::Realloc(void* block, size_t size)
745{
746 WALKHEAP();
747
748 /* if size is zero, free the block. */
749 if(size == 0) {
750 Free(block);
751 return (NULL);
752 }
753
754 /* if block pointer is NULL, do a Malloc(). */
755 if(block == NULL)
756 return Malloc(size);
757
758 /*
759 * Grow or shrink the block in place.
760 * if the block grows then the next block will be used if free
761 */
762 if(Expand(block, size) != NULL)
763 return block;
764
f57e8d3b 765 size_t realsize = CalcAllocSize(size);
7766f137
GS
766 if((int)realsize < minAllocSize)
767 return NULL;
768
769 /*
770 * see if the previous block is free, and is it big enough to cover the new size
771 * if merged with the current block.
772 */
773 PBLOCK ptr = (PBLOCK)block;
774 size_t cursize = SIZE(ptr) & ~1;
775 size_t psize = PSIZE(ptr);
776 if((psize&1) == 0 && (psize + cursize) >= realsize) {
777 PBLOCK prev = ptr - psize;
778 if(m_pRover == prev)
779 m_pRover = NEXT(prev);
780
781 /* Unlink the next block from the free list. */
782 Unlink(prev);
783
784 /* Copy contents of old block to new location, make it the current block. */
785 memmove(prev, ptr, cursize);
786 cursize += psize; /* combine sizes */
787 ptr = prev;
788
789 size_t rem = cursize - realsize;
790 if(rem >= minAllocSize) {
791 /*
792 * The remainder is big enough to be a new block. Set boundary
793 * tags for the resized block and the new block.
794 */
795 prev = ptr + realsize;
796 /*
797 * add the new block to the free list.
798 * next block cannot be free
799 */
800 SetTags(prev, rem);
f57e8d3b
GS
801#ifdef _USE_BUDDY_BLOCKS
802 AddToFreeList(prev, rem);
803#else
7766f137 804 AddToFreeList(prev, m_pFreeList);
f57e8d3b 805#endif
7766f137
GS
806 cursize = realsize;
807 }
808 /* Set the boundary tags to mark it as allocated. */
809 SetTags(ptr, cursize | 1);
810 return ((void *)ptr);
811 }
812
813 /* Allocate a new block, copy the old to the new, and free the old. */
814 if((ptr = (PBLOCK)Malloc(size)) != NULL) {
f57e8d3b 815 memmove(ptr, block, cursize-blockOverhead);
7766f137
GS
816 Free(block);
817 }
818 return ((void *)ptr);
819}
820
821void VMem::Free(void* p)
822{
823 WALKHEAP();
824
825 /* Ignore null pointer. */
826 if(p == NULL)
827 return;
828
829 PBLOCK ptr = (PBLOCK)p;
830
831 /* Check for attempt to free a block that's already free. */
832 size_t size = SIZE(ptr);
833 if((size&1) == 0) {
834 MEMODSlx("Attempt to free previously freed block", (long)p);
835 return;
836 }
837 size &= ~1; /* remove allocated tag */
838
839 /* if previous block is free, add this block to it. */
f57e8d3b 840#ifndef _USE_BUDDY_BLOCKS
7766f137 841 int linked = FALSE;
f57e8d3b 842#endif
7766f137
GS
843 size_t psize = PSIZE(ptr);
844 if((psize&1) == 0) {
845 ptr -= psize; /* point to previous block */
846 size += psize; /* merge the sizes of the two blocks */
f57e8d3b
GS
847#ifdef _USE_BUDDY_BLOCKS
848 Unlink(ptr);
849#else
7766f137 850 linked = TRUE; /* it's already on the free list */
f57e8d3b 851#endif
7766f137
GS
852 }
853
854 /* if the next physical block is free, merge it with this block. */
855 PBLOCK next = ptr + size; /* point to next physical block */
856 size_t nsize = SIZE(next);
857 if((nsize&1) == 0) {
858 /* block is free move rover if needed */
859 if(m_pRover == next)
860 m_pRover = NEXT(next);
861
862 /* unlink the next block from the free list. */
863 Unlink(next);
864
865 /* merge the sizes of this block and the next block. */
866 size += nsize;
867 }
868
869 /* Set the boundary tags for the block; */
870 SetTags(ptr, size);
871
872 /* Link the block to the head of the free list. */
f57e8d3b
GS
873#ifdef _USE_BUDDY_BLOCKS
874 AddToFreeList(ptr, size);
875#else
7766f137
GS
876 if(!linked) {
877 AddToFreeList(ptr, m_pFreeList);
878 }
f57e8d3b 879#endif
7766f137
GS
880}
881
882void VMem::GetLock(void)
883{
884 EnterCriticalSection(&m_cs);
885}
886
887void VMem::FreeLock(void)
888{
889 LeaveCriticalSection(&m_cs);
890}
891
892int VMem::IsLocked(void)
893{
90430aa1
GS
894#if 0
895 /* XXX TryEnterCriticalSection() is not available in some versions
896 * of Windows 95. Since this code is not used anywhere yet, we
897 * skirt the issue for now. */
7766f137
GS
898 BOOL bAccessed = TryEnterCriticalSection(&m_cs);
899 if(bAccessed) {
900 LeaveCriticalSection(&m_cs);
901 }
902 return !bAccessed;
90430aa1
GS
903#else
904 ASSERT(0); /* alarm bells for when somebody calls this */
905 return 0;
906#endif
7766f137
GS
907}
908
909
910long VMem::Release(void)
911{
912 long lCount = InterlockedDecrement(&m_lRefCount);
913 if(!lCount)
914 delete this;
915 return lCount;
916}
917
918long VMem::AddRef(void)
919{
920 long lCount = InterlockedIncrement(&m_lRefCount);
921 return lCount;
922}
923
924
925int VMem::Getmem(size_t requestSize)
926{ /* returns -1 is successful 0 if not */
f57e8d3b
GS
927#ifdef USE_BIGBLOCK_ALLOC
928 BOOL bBigBlock;
929#endif
7766f137
GS
930 void *ptr;
931
932 /* Round up size to next multiple of 64K. */
933 size_t size = (size_t)ROUND_UP64K(requestSize);
f57e8d3b 934
7766f137
GS
935 /*
936 * if the size requested is smaller than our current allocation size
937 * adjust up
938 */
939 if(size < (unsigned long)m_lAllocSize)
940 size = m_lAllocSize;
941
942 /* Update the size to allocate on the next request */
943 if(m_lAllocSize != lAllocMax)
f57e8d3b 944 m_lAllocSize <<= 2;
7766f137 945
f57e8d3b
GS
946#ifndef _USE_BUDDY_BLOCKS
947 if(m_nHeaps != 0
948#ifdef USE_BIGBLOCK_ALLOC
949 && !m_heaps[m_nHeaps-1].bBigBlock
950#endif
951 ) {
7766f137 952 /* Expand the last allocated heap */
f57e8d3b 953 ptr = HeapReAlloc(m_hHeap, HEAP_REALLOC_IN_PLACE_ONLY|HEAP_NO_SERIALIZE,
7766f137
GS
954 m_heaps[m_nHeaps-1].base,
955 m_heaps[m_nHeaps-1].len + size);
956 if(ptr != 0) {
f57e8d3b
GS
957 HeapAdd(((char*)ptr) + m_heaps[m_nHeaps-1].len, size
958#ifdef USE_BIGBLOCK_ALLOC
959 , FALSE
960#endif
961 );
7766f137
GS
962 return -1;
963 }
964 }
f57e8d3b 965#endif /* _USE_BUDDY_BLOCKS */
7766f137
GS
966
967 /*
968 * if we didn't expand a block to cover the requested size
969 * allocate a new Heap
970 * the size of this block must include the additional dummy tags at either end
971 * the above ROUND_UP64K may not have added any memory to include this.
972 */
973 if(size == requestSize)
f57e8d3b
GS
974 size = (size_t)ROUND_UP64K(requestSize+(blockOverhead));
975
976Restart:
977#ifdef _USE_BUDDY_BLOCKS
978 ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
979#else
980#ifdef USE_BIGBLOCK_ALLOC
981 bBigBlock = FALSE;
982 if (size >= nMaxHeapAllocSize) {
983 bBigBlock = TRUE;
984 ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
985 }
986 else
987#endif
988 ptr = HeapAlloc(m_hHeap, HEAP_NO_SERIALIZE, size);
989#endif /* _USE_BUDDY_BLOCKS */
990
991 if (!ptr) {
992 /* try to allocate a smaller chunk */
993 size >>= 1;
994 if(size > requestSize)
995 goto Restart;
996 }
7766f137 997
7766f137
GS
998 if(ptr == 0) {
999 MEMODSlx("HeapAlloc failed on size!!!", size);
1000 return 0;
1001 }
1002
f57e8d3b
GS
1003#ifdef _USE_BUDDY_BLOCKS
1004 if (HeapAdd(ptr, size)) {
1005 VirtualFree(ptr, 0, MEM_RELEASE);
1006 return 0;
1007 }
1008#else
1009#ifdef USE_BIGBLOCK_ALLOC
1010 if (HeapAdd(ptr, size, bBigBlock)) {
1011 if (bBigBlock) {
1012 VirtualFree(ptr, 0, MEM_RELEASE);
1013 }
1014 }
1015#else
7766f137 1016 HeapAdd(ptr, size);
f57e8d3b
GS
1017#endif
1018#endif /* _USE_BUDDY_BLOCKS */
7766f137
GS
1019 return -1;
1020}
1021
f57e8d3b
GS
1022int VMem::HeapAdd(void* p, size_t size
1023#ifdef USE_BIGBLOCK_ALLOC
1024 , BOOL bBigBlock
1025#endif
1026 )
7766f137
GS
1027{ /* if the block can be succesfully added to the heap, returns 0; otherwise -1. */
1028 int index;
1029
1030 /* Check size, then round size down to next long word boundary. */
1031 if(size < minAllocSize)
1032 return -1;
1033
1034 size = (size_t)ROUND_DOWN(size);
1035 PBLOCK ptr = (PBLOCK)p;
1036
f57e8d3b
GS
1037#ifdef USE_BIGBLOCK_ALLOC
1038 if (!bBigBlock) {
1039#endif
1040 /*
1041 * Search for another heap area that's contiguous with the bottom of this new area.
1042 * (It should be extremely unusual to find one that's contiguous with the top).
1043 */
1044 for(index = 0; index < m_nHeaps; ++index) {
1045 if(ptr == m_heaps[index].base + (int)m_heaps[index].len) {
1046 /*
1047 * The new block is contiguous with a previously allocated heap area. Add its
a6d05634 1048 * length to that of the previous heap. Merge it with the dummy end-of-heap
f57e8d3b
GS
1049 * area marker of the previous heap.
1050 */
1051 m_heaps[index].len += size;
1052 break;
1053 }
7766f137 1054 }
f57e8d3b
GS
1055#ifdef USE_BIGBLOCK_ALLOC
1056 }
1057 else {
1058 index = m_nHeaps;
7766f137 1059 }
f57e8d3b 1060#endif
7766f137
GS
1061
1062 if(index == m_nHeaps) {
f57e8d3b 1063 /* The new block is not contiguous, or is BigBlock. Add it to the heap list. */
7766f137
GS
1064 if(m_nHeaps == maxHeaps) {
1065 return -1; /* too many non-contiguous heaps */
1066 }
1067 m_heaps[m_nHeaps].base = ptr;
1068 m_heaps[m_nHeaps].len = size;
f57e8d3b
GS
1069#ifdef USE_BIGBLOCK_ALLOC
1070 m_heaps[m_nHeaps].bBigBlock = bBigBlock;
1071#endif
7766f137
GS
1072 m_nHeaps++;
1073
1074 /*
1075 * Reserve the first LONG in the block for the ending boundary tag of a dummy
1076 * block at the start of the heap area.
1077 */
f57e8d3b
GS
1078 size -= blockOverhead;
1079 ptr += blockOverhead;
7766f137
GS
1080 PSIZE(ptr) = 1; /* mark the dummy previous block as allocated */
1081 }
1082
1083 /*
1084 * Convert the heap to one large block. Set up its boundary tags, and those of
1085 * marker block after it. The marker block before the heap will already have
1086 * been set up if this heap is not contiguous with the end of another heap.
1087 */
1088 SetTags(ptr, size | 1);
1089 PBLOCK next = ptr + size; /* point to dummy end block */
1090 SIZE(next) = 1; /* mark the dummy end block as allocated */
1091
1092 /*
1093 * Link the block to the start of the free list by calling free().
1094 * This will merge the block with any adjacent free blocks.
1095 */
1096 Free(ptr);
1097 return 0;
1098}
1099
1100
1101void* VMem::Expand(void* block, size_t size)
1102{
1103 /*
f57e8d3b 1104 * Disallow negative or zero sizes.
7766f137 1105 */
f57e8d3b 1106 size_t realsize = CalcAllocSize(size);
7766f137
GS
1107 if((int)realsize < minAllocSize || size == 0)
1108 return NULL;
1109
1110 PBLOCK ptr = (PBLOCK)block;
1111
1112 /* if the current size is the same as requested, do nothing. */
1113 size_t cursize = SIZE(ptr) & ~1;
1114 if(cursize == realsize) {
1115 return block;
1116 }
1117
1118 /* if the block is being shrunk, convert the remainder of the block into a new free block. */
1119 if(realsize <= cursize) {
1120 size_t nextsize = cursize - realsize; /* size of new remainder block */
1121 if(nextsize >= minAllocSize) {
1122 /*
1123 * Split the block
1124 * Set boundary tags for the resized block and the new block.
1125 */
1126 SetTags(ptr, realsize | 1);
1127 ptr += realsize;
1128
1129 /*
1130 * add the new block to the free list.
1131 * call Free to merge this block with next block if free
1132 */
1133 SetTags(ptr, nextsize | 1);
1134 Free(ptr);
1135 }
1136
1137 return block;
1138 }
1139
1140 PBLOCK next = ptr + cursize;
1141 size_t nextsize = SIZE(next);
1142
1143 /* Check the next block for consistency.*/
1144 if((nextsize&1) == 0 && (nextsize + cursize) >= realsize) {
1145 /*
1146 * The next block is free and big enough. Add the part that's needed
1147 * to our block, and split the remainder off into a new block.
1148 */
1149 if(m_pRover == next)
1150 m_pRover = NEXT(next);
1151
1152 /* Unlink the next block from the free list. */
1153 Unlink(next);
1154 cursize += nextsize; /* combine sizes */
1155
1156 size_t rem = cursize - realsize; /* size of remainder */
1157 if(rem >= minAllocSize) {
1158 /*
1159 * The remainder is big enough to be a new block.
1160 * Set boundary tags for the resized block and the new block.
1161 */
1162 next = ptr + realsize;
1163 /*
1164 * add the new block to the free list.
1165 * next block cannot be free
1166 */
1167 SetTags(next, rem);
f57e8d3b
GS
1168#ifdef _USE_BUDDY_BLOCKS
1169 AddToFreeList(next, rem);
1170#else
7766f137 1171 AddToFreeList(next, m_pFreeList);
f57e8d3b 1172#endif
7766f137
GS
1173 cursize = realsize;
1174 }
1175 /* Set the boundary tags to mark it as allocated. */
1176 SetTags(ptr, cursize | 1);
1177 return ((void *)ptr);
1178 }
1179 return NULL;
1180}
1181
1182#ifdef _DEBUG_MEM
df3728a2 1183#define LOG_FILENAME ".\\MemLog.txt"
7766f137 1184
f57e8d3b 1185void VMem::MemoryUsageMessage(char *str, long x, long y, int c)
7766f137 1186{
7766f137
GS
1187 char szBuffer[512];
1188 if(str) {
df3728a2
JH
1189 if(!m_pLog)
1190 m_pLog = fopen(LOG_FILENAME, "w");
7766f137 1191 sprintf(szBuffer, str, x, y, c);
df3728a2 1192 fputs(szBuffer, m_pLog);
7766f137
GS
1193 }
1194 else {
f57e8d3b
GS
1195 if(m_pLog) {
1196 fflush(m_pLog);
1197 fclose(m_pLog);
1198 m_pLog = 0;
1199 }
7766f137
GS
1200 }
1201}
1202
f57e8d3b 1203void VMem::WalkHeap(int complete)
7766f137 1204{
f57e8d3b
GS
1205 if(complete) {
1206 MemoryUsageMessage(NULL, 0, 0, 0);
1207 size_t total = 0;
1208 for(int i = 0; i < m_nHeaps; ++i) {
1209 total += m_heaps[i].len;
1210 }
1211 MemoryUsageMessage("VMem heaps used %d. Total memory %08x\n", m_nHeaps, total, 0);
1212
1213 /* Walk all the heaps - verify structures */
1214 for(int index = 0; index < m_nHeaps; ++index) {
1215 PBLOCK ptr = m_heaps[index].base;
1216 size_t size = m_heaps[index].len;
1217#ifndef _USE_BUDDY_BLOCKS
1218#ifdef USE_BIGBLOCK_ALLOC
1219 if (!m_heaps[m_nHeaps].bBigBlock)
1220#endif
1221 ASSERT(HeapValidate(m_hHeap, HEAP_NO_SERIALIZE, ptr));
1222#endif
7766f137 1223
f57e8d3b
GS
1224 /* set over reserved header block */
1225 size -= blockOverhead;
1226 ptr += blockOverhead;
1227 PBLOCK pLast = ptr + size;
1228 ASSERT(PSIZE(ptr) == 1); /* dummy previous block is allocated */
1229 ASSERT(SIZE(pLast) == 1); /* dummy next block is allocated */
1230 while(ptr < pLast) {
1231 ASSERT(ptr > m_heaps[index].base);
1232 size_t cursize = SIZE(ptr) & ~1;
1233 ASSERT((PSIZE(ptr+cursize) & ~1) == cursize);
1234 MemoryUsageMessage("Memory Block %08x: Size %08x %c\n", (long)ptr, cursize, (SIZE(ptr)&1) ? 'x' : ' ');
1235 if(!(SIZE(ptr)&1)) {
1236 /* this block is on the free list */
1237 PBLOCK tmp = NEXT(ptr);
1238 while(tmp != ptr) {
1239 ASSERT((SIZE(tmp)&1)==0);
1240 if(tmp == m_pFreeList)
1241 break;
1242 ASSERT(NEXT(tmp));
1243 tmp = NEXT(tmp);
1244 }
1245 if(tmp == ptr) {
1246 MemoryUsageMessage("Memory Block %08x: Size %08x free but not in free list\n", (long)ptr, cursize, 0);
1247 }
7766f137 1248 }
f57e8d3b 1249 ptr += cursize;
7766f137 1250 }
7766f137 1251 }
7766f137
GS
1252 MemoryUsageMessage(NULL, 0, 0, 0);
1253 }
1254}
f57e8d3b
GS
1255#endif /* _DEBUG_MEM */
1256
1257#endif /* _USE_MSVCRT_MEM_ALLOC */
7766f137
GS
1258
1259#endif /* ___VMEM_H_INC___ */