This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
Silence Win32 compiler warnings following change 25280
[perl5.git] / wince / vmem.h
CommitLineData
216db7ee
VK
1/* vmem.h
2 *
3 * (c) 1999 Microsoft Corporation. All rights reserved.
4 * Portions (c) 1999 ActiveState Tool Corp, http://www.ActiveState.com/
5 *
6 * You may distribute under the terms of either the GNU General Public
7 * License or the Artistic License, as specified in the README file.
8 *
9 * Options:
10 *
11 * Defining _USE_MSVCRT_MEM_ALLOC will cause all memory allocations
12 * to be forwarded to MSVCRT.DLL. Defining _USE_LINKED_LIST as well will
13 * track all allocations in a doubly linked list, so that the host can
14 * free all memory allocated when it goes away.
15 * If _USE_MSVCRT_MEM_ALLOC is not defined then Knuth's boundary tag algorithm
16 * is used; defining _USE_BUDDY_BLOCKS will use Knuth's algorithm R
17 * (Buddy system reservation)
18 *
19 */
20
21#ifndef ___VMEM_H_INC___
22#define ___VMEM_H_INC___
23
24// #define _USE_MSVCRT_MEM_ALLOC
25#define _USE_LINKED_LIST
26
27// #define _USE_BUDDY_BLOCKS
28
29// #define _DEBUG_MEM
30#ifdef _DEBUG_MEM
31#define ASSERT(f) if(!(f)) DebugBreak();
32
33inline void MEMODS(char *str)
34{
35 OutputDebugString(str);
36 OutputDebugString("\n");
37}
38
39inline void MEMODSlx(char *str, long x)
40{
41 char szBuffer[512];
42 sprintf(szBuffer, "%s %lx\n", str, x);
43 OutputDebugString(szBuffer);
44}
45
46#define WALKHEAP() WalkHeap(0)
47#define WALKHEAPTRACE() WalkHeap(1)
48
49#else
50
51#define ASSERT(f)
52#define MEMODS(x)
53#define MEMODSlx(x, y)
54#define WALKHEAP()
55#define WALKHEAPTRACE()
56
57#endif
58
59#ifdef _USE_MSVCRT_MEM_ALLOC
60
61#ifndef _USE_LINKED_LIST
62// #define _USE_LINKED_LIST
63#endif
64
65/*
66 * Pass all memory requests throught to msvcrt.dll
67 * optionaly track by using a doubly linked header
68 */
69
70typedef void (*LPFREE)(void *block);
71typedef void* (*LPMALLOC)(size_t size);
72typedef void* (*LPREALLOC)(void *block, size_t size);
73#ifdef _USE_LINKED_LIST
74class VMem;
75typedef struct _MemoryBlockHeader* PMEMORY_BLOCK_HEADER;
76typedef struct _MemoryBlockHeader {
77 PMEMORY_BLOCK_HEADER pNext;
78 PMEMORY_BLOCK_HEADER pPrev;
79 VMem *owner;
80} MEMORY_BLOCK_HEADER, *PMEMORY_BLOCK_HEADER;
81#endif
82
83class VMem
84{
85public:
86 VMem();
87 ~VMem();
88 virtual void* Malloc(size_t size);
89 virtual void* Realloc(void* pMem, size_t size);
90 virtual void Free(void* pMem);
91 virtual void GetLock(void);
92 virtual void FreeLock(void);
93 virtual int IsLocked(void);
94 virtual long Release(void);
95 virtual long AddRef(void);
96
97 inline BOOL CreateOk(void)
98 {
99 return TRUE;
100 };
101
102protected:
103#ifdef _USE_LINKED_LIST
104 void LinkBlock(PMEMORY_BLOCK_HEADER ptr)
105 {
106 PMEMORY_BLOCK_HEADER next = m_Dummy.pNext;
107 m_Dummy.pNext = ptr;
108 ptr->pPrev = &m_Dummy;
109 ptr->pNext = next;
110 ptr->owner = this;
111 next->pPrev = ptr;
112 }
113 void UnlinkBlock(PMEMORY_BLOCK_HEADER ptr)
114 {
115 PMEMORY_BLOCK_HEADER next = ptr->pNext;
116 PMEMORY_BLOCK_HEADER prev = ptr->pPrev;
117 prev->pNext = next;
118 next->pPrev = prev;
119 }
120
121 MEMORY_BLOCK_HEADER m_Dummy;
122#endif
123
124 long m_lRefCount; // number of current users
125 CRITICAL_SECTION m_cs; // access lock
126 HINSTANCE m_hLib;
127 LPFREE m_pfree;
128 LPMALLOC m_pmalloc;
129 LPREALLOC m_prealloc;
130};
131
132VMem::VMem()
133{
134 m_lRefCount = 1;
135 InitializeCriticalSection(&m_cs);
136#ifdef _USE_LINKED_LIST
137 m_Dummy.pNext = m_Dummy.pPrev = &m_Dummy;
138 m_Dummy.owner = this;
139#endif
140 m_hLib = LoadLibrary("msvcrt.dll");
141 if (m_hLib) {
142 m_pfree = (LPFREE)GetProcAddress(m_hLib, "free");
143 m_pmalloc = (LPMALLOC)GetProcAddress(m_hLib, "malloc");
144 m_prealloc = (LPREALLOC)GetProcAddress(m_hLib, "realloc");
145 }
146}
147
148VMem::~VMem(void)
149{
150#ifdef _USE_LINKED_LIST
151 while (m_Dummy.pNext != &m_Dummy) {
152 Free(m_Dummy.pNext+1);
153 }
154#endif
155 if (m_hLib)
156 FreeLibrary(m_hLib);
157 DeleteCriticalSection(&m_cs);
158}
159
160void* VMem::Malloc(size_t size)
161{
162#ifdef _USE_LINKED_LIST
163 GetLock();
164 PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)m_pmalloc(size+sizeof(MEMORY_BLOCK_HEADER));
165 LinkBlock(ptr);
166 FreeLock();
167 return (ptr+1);
168#else
169 return m_pmalloc(size);
170#endif
171}
172
173void* VMem::Realloc(void* pMem, size_t size)
174{
175#ifdef _USE_LINKED_LIST
176 if (!pMem)
177 return Malloc(size);
178
179 if (!size) {
180 Free(pMem);
181 return NULL;
182 }
183
184 GetLock();
185 PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)(((char*)pMem)-sizeof(MEMORY_BLOCK_HEADER));
186 UnlinkBlock(ptr);
187 ptr = (PMEMORY_BLOCK_HEADER)m_prealloc(ptr, size+sizeof(MEMORY_BLOCK_HEADER));
188 LinkBlock(ptr);
189 FreeLock();
190
191 return (ptr+1);
192#else
193 return m_prealloc(pMem, size);
194#endif
195}
196
197void VMem::Free(void* pMem)
198{
199#ifdef _USE_LINKED_LIST
200 if (pMem) {
201 PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)(((char*)pMem)-sizeof(MEMORY_BLOCK_HEADER));
202 if (ptr->owner != this) {
203 if (ptr->owner) {
204#if 1
205 dTHX;
206 int *nowhere = NULL;
207 Perl_warn(aTHX_ "Free to wrong pool %p not %p",this,ptr->owner);
208 *nowhere = 0;
209#else
210 ptr->owner->Free(pMem);
211#endif
212 }
213 return;
214 }
215 GetLock();
216 UnlinkBlock(ptr);
217 ptr->owner = NULL;
218 m_pfree(ptr);
219 FreeLock();
220 }
221#else
222 m_pfree(pMem);
223#endif
224}
225
226void VMem::GetLock(void)
227{
228 EnterCriticalSection(&m_cs);
229}
230
231void VMem::FreeLock(void)
232{
233 LeaveCriticalSection(&m_cs);
234}
235
236int VMem::IsLocked(void)
237{
238#if 0
239 /* XXX TryEnterCriticalSection() is not available in some versions
240 * of Windows 95. Since this code is not used anywhere yet, we
241 * skirt the issue for now. */
242 BOOL bAccessed = TryEnterCriticalSection(&m_cs);
243 if(bAccessed) {
244 LeaveCriticalSection(&m_cs);
245 }
246 return !bAccessed;
247#else
248 ASSERT(0); /* alarm bells for when somebody calls this */
249 return 0;
250#endif
251}
252
253long VMem::Release(void)
254{
255 long lCount = InterlockedDecrement(&m_lRefCount);
256 if(!lCount)
257 delete this;
258 return lCount;
259}
260
261long VMem::AddRef(void)
262{
263 long lCount = InterlockedIncrement(&m_lRefCount);
264 return lCount;
265}
266
267#else /* _USE_MSVCRT_MEM_ALLOC */
268
269/*
270 * Knuth's boundary tag algorithm Vol #1, Page 440.
271 *
272 * Each block in the heap has tag words before and after it,
273 * TAG
274 * block
275 * TAG
276 * The size is stored in these tags as a long word, and includes the 8 bytes
277 * of overhead that the boundary tags consume. Blocks are allocated on long
278 * word boundaries, so the size is always multiples of long words. When the
279 * block is allocated, bit 0, (the tag bit), of the size is set to 1. When
280 * a block is freed, it is merged with adjacent free blocks, and the tag bit
281 * is set to 0.
282 *
283 * A linked list is used to manage the free list. The first two long words of
284 * the block contain double links. These links are only valid when the block
285 * is freed, therefore space needs to be reserved for them. Thus, the minimum
286 * block size (not counting the tags) is 8 bytes.
287 *
288 * Since memory allocation may occur on a single threaded, explict locks are not
289 * provided.
290 *
291 */
292
293const long lAllocStart = 0x00020000; /* start at 128K */
294const long minBlockSize = sizeof(void*)*2;
295const long sizeofTag = sizeof(long);
296const long blockOverhead = sizeofTag*2;
297const long minAllocSize = minBlockSize+blockOverhead;
298#ifdef _USE_BUDDY_BLOCKS
299const long lSmallBlockSize = 1024;
300const size_t nListEntries = ((lSmallBlockSize-minAllocSize)/sizeof(long));
301
302inline size_t CalcEntry(size_t size)
303{
304 ASSERT((size&(sizeof(long)-1)) == 0);
305 return ((size - minAllocSize) / sizeof(long));
306}
307#endif
308
309typedef BYTE* PBLOCK; /* pointer to a memory block */
310
311/*
312 * Macros for accessing hidden fields in a memory block:
313 *
314 * SIZE size of this block (tag bit 0 is 1 if block is allocated)
315 * PSIZE size of previous physical block
316 */
317
318#define SIZE(block) (*(ULONG*)(((PBLOCK)(block))-sizeofTag))
319#define PSIZE(block) (*(ULONG*)(((PBLOCK)(block))-(blockOverhead)))
320inline void SetTags(PBLOCK block, long size)
321{
322 SIZE(block) = size;
323 PSIZE(block+(size&~1)) = size;
324}
325
326/*
327 * Free list pointers
328 * PREV pointer to previous block
329 * NEXT pointer to next block
330 */
331
332#define PREV(block) (*(PBLOCK*)(block))
333#define NEXT(block) (*(PBLOCK*)((block)+sizeof(PBLOCK)))
334inline void SetLink(PBLOCK block, PBLOCK prev, PBLOCK next)
335{
336 PREV(block) = prev;
337 NEXT(block) = next;
338}
339inline void Unlink(PBLOCK p)
340{
341 PBLOCK next = NEXT(p);
342 PBLOCK prev = PREV(p);
343 NEXT(prev) = next;
344 PREV(next) = prev;
345}
346#ifndef _USE_BUDDY_BLOCKS
347inline void AddToFreeList(PBLOCK block, PBLOCK pInList)
348{
349 PBLOCK next = NEXT(pInList);
350 NEXT(pInList) = block;
351 SetLink(block, pInList, next);
352 PREV(next) = block;
353}
354#endif
355
356/* Macro for rounding up to the next sizeof(long) */
357#define ROUND_UP(n) (((ULONG)(n)+sizeof(long)-1)&~(sizeof(long)-1))
358#define ROUND_UP64K(n) (((ULONG)(n)+0x10000-1)&~(0x10000-1))
359#define ROUND_DOWN(n) ((ULONG)(n)&~(sizeof(long)-1))
360
361/*
362 * HeapRec - a list of all non-contiguous heap areas
363 *
364 * Each record in this array contains information about a non-contiguous heap area.
365 */
366
367const int maxHeaps = 32; /* 64 was overkill */
368const long lAllocMax = 0x80000000; /* max size of allocation */
369
370#ifdef _USE_BUDDY_BLOCKS
371typedef struct _FreeListEntry
372{
373 BYTE Dummy[minAllocSize]; // dummy free block
374} FREE_LIST_ENTRY, *PFREE_LIST_ENTRY;
375#endif
376
377#ifndef _USE_BUDDY_BLOCKS
378#define USE_BIGBLOCK_ALLOC
379#endif
380/*
381 * performance tuning
382 * Use VirtualAlloc() for blocks bigger than nMaxHeapAllocSize since
383 * Windows 95/98/Me have heap managers that are designed for memory
384 * blocks smaller than four megabytes.
385 */
386
387#ifdef USE_BIGBLOCK_ALLOC
388const int nMaxHeapAllocSize = (1024*512); /* don't allocate anything larger than this from the heap */
389#endif
390
391typedef struct _HeapRec
392{
393 PBLOCK base; /* base of heap area */
394 ULONG len; /* size of heap area */
395#ifdef USE_BIGBLOCK_ALLOC
396 BOOL bBigBlock; /* was allocate using VirtualAlloc */
397#endif
398} HeapRec;
399
400class VMem
401{
402public:
403 VMem();
404 ~VMem();
405 virtual void* Malloc(size_t size);
406 virtual void* Realloc(void* pMem, size_t size);
407 virtual void Free(void* pMem);
408 virtual void GetLock(void);
409 virtual void FreeLock(void);
410 virtual int IsLocked(void);
411 virtual long Release(void);
412 virtual long AddRef(void);
413
414 inline BOOL CreateOk(void)
415 {
416#ifdef _USE_BUDDY_BLOCKS
417 return TRUE;
418#else
419 return m_hHeap != NULL;
420#endif
421 };
422
423 void ReInit(void);
424
425protected:
426 void Init(void);
427 int Getmem(size_t size);
428
429 int HeapAdd(void* ptr, size_t size
430#ifdef USE_BIGBLOCK_ALLOC
431 , BOOL bBigBlock
432#endif
433 );
434
435 void* Expand(void* block, size_t size);
436
437#ifdef _USE_BUDDY_BLOCKS
438 inline PBLOCK GetFreeListLink(int index)
439 {
440 if (index >= nListEntries)
441 index = nListEntries-1;
442 return &m_FreeList[index].Dummy[sizeofTag];
443 }
444 inline PBLOCK GetOverSizeFreeList(void)
445 {
446 return &m_FreeList[nListEntries-1].Dummy[sizeofTag];
447 }
448 inline PBLOCK GetEOLFreeList(void)
449 {
450 return &m_FreeList[nListEntries].Dummy[sizeofTag];
451 }
452
453 void AddToFreeList(PBLOCK block, size_t size)
454 {
455 PBLOCK pFreeList = GetFreeListLink(CalcEntry(size));
456 PBLOCK next = NEXT(pFreeList);
457 NEXT(pFreeList) = block;
458 SetLink(block, pFreeList, next);
459 PREV(next) = block;
460 }
461#endif
462 inline size_t CalcAllocSize(size_t size)
463 {
464 /*
465 * Adjust the real size of the block to be a multiple of sizeof(long), and add
466 * the overhead for the boundary tags. Disallow negative or zero sizes.
467 */
468 return (size < minBlockSize) ? minAllocSize : (size_t)ROUND_UP(size) + blockOverhead;
469 }
470
471#ifdef _USE_BUDDY_BLOCKS
472 FREE_LIST_ENTRY m_FreeList[nListEntries+1]; // free list with dummy end of list entry as well
473#else
474 HANDLE m_hHeap; // memory heap for this script
475 char m_FreeDummy[minAllocSize]; // dummy free block
476 PBLOCK m_pFreeList; // pointer to first block on free list
477#endif
478 PBLOCK m_pRover; // roving pointer into the free list
479 HeapRec m_heaps[maxHeaps]; // list of all non-contiguous heap areas
480 int m_nHeaps; // no. of heaps in m_heaps
481 long m_lAllocSize; // current alloc size
482 long m_lRefCount; // number of current users
483 CRITICAL_SECTION m_cs; // access lock
484
485#ifdef _DEBUG_MEM
486 void WalkHeap(int complete);
487 void MemoryUsageMessage(char *str, long x, long y, int c);
488 FILE* m_pLog;
489#endif
490};
491
492VMem::VMem()
493{
494 m_lRefCount = 1;
495#ifndef _USE_BUDDY_BLOCKS
496 BOOL bRet = (NULL != (m_hHeap = HeapCreate(HEAP_NO_SERIALIZE,
497 lAllocStart, /* initial size of heap */
498 0))); /* no upper limit on size of heap */
499 ASSERT(bRet);
500#endif
501
502 InitializeCriticalSection(&m_cs);
503#ifdef _DEBUG_MEM
504 m_pLog = 0;
505#endif
506
507 Init();
508}
509
510VMem::~VMem(void)
511{
512#ifndef _USE_BUDDY_BLOCKS
513 ASSERT(HeapValidate(m_hHeap, HEAP_NO_SERIALIZE, NULL));
514#endif
515 WALKHEAPTRACE();
516
517 DeleteCriticalSection(&m_cs);
518#ifdef _USE_BUDDY_BLOCKS
519 for(int index = 0; index < m_nHeaps; ++index) {
520 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
521 }
522#else /* !_USE_BUDDY_BLOCKS */
523#ifdef USE_BIGBLOCK_ALLOC
524 for(int index = 0; index < m_nHeaps; ++index) {
525 if (m_heaps[index].bBigBlock) {
526 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
527 }
528 }
529#endif
530 BOOL bRet = HeapDestroy(m_hHeap);
531 ASSERT(bRet);
532#endif /* _USE_BUDDY_BLOCKS */
533}
534
535void VMem::ReInit(void)
536{
537 for(int index = 0; index < m_nHeaps; ++index) {
538#ifdef _USE_BUDDY_BLOCKS
539 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
540#else
541#ifdef USE_BIGBLOCK_ALLOC
542 if (m_heaps[index].bBigBlock) {
543 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
544 }
545 else
546#endif
547 HeapFree(m_hHeap, HEAP_NO_SERIALIZE, m_heaps[index].base);
548#endif /* _USE_BUDDY_BLOCKS */
549 }
550
551 Init();
552}
553
554void VMem::Init(void)
555{
556#ifdef _USE_BUDDY_BLOCKS
557 PBLOCK pFreeList;
558 /*
559 * Initialize the free list by placing a dummy zero-length block on it.
560 * Set the end of list marker.
561 * Set the number of non-contiguous heaps to zero.
562 * Set the next allocation size.
563 */
564 for (int index = 0; index < nListEntries; ++index) {
565 pFreeList = GetFreeListLink(index);
566 SIZE(pFreeList) = PSIZE(pFreeList+minAllocSize) = 0;
567 PREV(pFreeList) = NEXT(pFreeList) = pFreeList;
568 }
569 pFreeList = GetEOLFreeList();
570 SIZE(pFreeList) = PSIZE(pFreeList+minAllocSize) = 0;
571 PREV(pFreeList) = NEXT(pFreeList) = NULL;
572 m_pRover = GetOverSizeFreeList();
573#else
574 /*
575 * Initialize the free list by placing a dummy zero-length block on it.
576 * Set the number of non-contiguous heaps to zero.
577 */
578 m_pFreeList = m_pRover = (PBLOCK)(&m_FreeDummy[sizeofTag]);
579 PSIZE(m_pFreeList+minAllocSize) = SIZE(m_pFreeList) = 0;
580 PREV(m_pFreeList) = NEXT(m_pFreeList) = m_pFreeList;
581#endif
582
583 m_nHeaps = 0;
584 m_lAllocSize = lAllocStart;
585}
586
587void* VMem::Malloc(size_t size)
588{
589 WALKHEAP();
590
591 PBLOCK ptr;
592 size_t lsize, rem;
593 /*
594 * Disallow negative or zero sizes.
595 */
596 size_t realsize = CalcAllocSize(size);
597 if((int)realsize < minAllocSize || size == 0)
598 return NULL;
599
600#ifdef _USE_BUDDY_BLOCKS
601 /*
602 * Check the free list of small blocks if this is free use it
603 * Otherwise check the rover if it has no blocks then
604 * Scan the free list entries use the first free block
605 * split the block if needed, stop at end of list marker
606 */
607 {
608 int index = CalcEntry(realsize);
609 if (index < nListEntries-1) {
610 ptr = GetFreeListLink(index);
611 lsize = SIZE(ptr);
612 if (lsize >= realsize) {
613 rem = lsize - realsize;
614 if(rem < minAllocSize) {
615 /* Unlink the block from the free list. */
616 Unlink(ptr);
617 }
618 else {
619 /*
620 * split the block
621 * The remainder is big enough to split off into a new block.
622 * Use the end of the block, resize the beginning of the block
623 * no need to change the free list.
624 */
625 SetTags(ptr, rem);
626 ptr += SIZE(ptr);
627 lsize = realsize;
628 }
629 SetTags(ptr, lsize | 1);
630 return ptr;
631 }
632 ptr = m_pRover;
633 lsize = SIZE(ptr);
634 if (lsize >= realsize) {
635 rem = lsize - realsize;
636 if(rem < minAllocSize) {
637 /* Unlink the block from the free list. */
638 Unlink(ptr);
639 }
640 else {
641 /*
642 * split the block
643 * The remainder is big enough to split off into a new block.
644 * Use the end of the block, resize the beginning of the block
645 * no need to change the free list.
646 */
647 SetTags(ptr, rem);
648 ptr += SIZE(ptr);
649 lsize = realsize;
650 }
651 SetTags(ptr, lsize | 1);
652 return ptr;
653 }
654 ptr = GetFreeListLink(index+1);
655 while (NEXT(ptr)) {
656 lsize = SIZE(ptr);
657 if (lsize >= realsize) {
658 size_t rem = lsize - realsize;
659 if(rem < minAllocSize) {
660 /* Unlink the block from the free list. */
661 Unlink(ptr);
662 }
663 else {
664 /*
665 * split the block
666 * The remainder is big enough to split off into a new block.
667 * Use the end of the block, resize the beginning of the block
668 * no need to change the free list.
669 */
670 SetTags(ptr, rem);
671 ptr += SIZE(ptr);
672 lsize = realsize;
673 }
674 SetTags(ptr, lsize | 1);
675 return ptr;
676 }
677 ptr += sizeof(FREE_LIST_ENTRY);
678 }
679 }
680 }
681#endif
682
683 /*
684 * Start searching the free list at the rover. If we arrive back at rover without
685 * finding anything, allocate some memory from the heap and try again.
686 */
687 ptr = m_pRover; /* start searching at rover */
688 int loops = 2; /* allow two times through the loop */
689 for(;;) {
690 lsize = SIZE(ptr);
691 ASSERT((lsize&1)==0);
692 /* is block big enough? */
693 if(lsize >= realsize) {
694 /* if the remainder is too small, don't bother splitting the block. */
695 rem = lsize - realsize;
696 if(rem < minAllocSize) {
697 if(m_pRover == ptr)
698 m_pRover = NEXT(ptr);
699
700 /* Unlink the block from the free list. */
701 Unlink(ptr);
702 }
703 else {
704 /*
705 * split the block
706 * The remainder is big enough to split off into a new block.
707 * Use the end of the block, resize the beginning of the block
708 * no need to change the free list.
709 */
710 SetTags(ptr, rem);
711 ptr += SIZE(ptr);
712 lsize = realsize;
713 }
714 /* Set the boundary tags to mark it as allocated. */
715 SetTags(ptr, lsize | 1);
716 return ((void *)ptr);
717 }
718
719 /*
720 * This block was unsuitable. If we've gone through this list once already without
721 * finding anything, allocate some new memory from the heap and try again.
722 */
723 ptr = NEXT(ptr);
724 if(ptr == m_pRover) {
725 if(!(loops-- && Getmem(realsize))) {
726 return NULL;
727 }
728 ptr = m_pRover;
729 }
730 }
731}
732
733void* VMem::Realloc(void* block, size_t size)
734{
735 WALKHEAP();
736
737 /* if size is zero, free the block. */
738 if(size == 0) {
739 Free(block);
740 return (NULL);
741 }
742
743 /* if block pointer is NULL, do a Malloc(). */
744 if(block == NULL)
745 return Malloc(size);
746
747 /*
748 * Grow or shrink the block in place.
749 * if the block grows then the next block will be used if free
750 */
751 if(Expand(block, size) != NULL)
752 return block;
753
754 size_t realsize = CalcAllocSize(size);
755 if((int)realsize < minAllocSize)
756 return NULL;
757
758 /*
759 * see if the previous block is free, and is it big enough to cover the new size
760 * if merged with the current block.
761 */
762 PBLOCK ptr = (PBLOCK)block;
763 size_t cursize = SIZE(ptr) & ~1;
764 size_t psize = PSIZE(ptr);
765 if((psize&1) == 0 && (psize + cursize) >= realsize) {
766 PBLOCK prev = ptr - psize;
767 if(m_pRover == prev)
768 m_pRover = NEXT(prev);
769
770 /* Unlink the next block from the free list. */
771 Unlink(prev);
772
773 /* Copy contents of old block to new location, make it the current block. */
774 memmove(prev, ptr, cursize);
775 cursize += psize; /* combine sizes */
776 ptr = prev;
777
778 size_t rem = cursize - realsize;
779 if(rem >= minAllocSize) {
780 /*
781 * The remainder is big enough to be a new block. Set boundary
782 * tags for the resized block and the new block.
783 */
784 prev = ptr + realsize;
785 /*
786 * add the new block to the free list.
787 * next block cannot be free
788 */
789 SetTags(prev, rem);
790#ifdef _USE_BUDDY_BLOCKS
791 AddToFreeList(prev, rem);
792#else
793 AddToFreeList(prev, m_pFreeList);
794#endif
795 cursize = realsize;
796 }
797 /* Set the boundary tags to mark it as allocated. */
798 SetTags(ptr, cursize | 1);
799 return ((void *)ptr);
800 }
801
802 /* Allocate a new block, copy the old to the new, and free the old. */
803 if((ptr = (PBLOCK)Malloc(size)) != NULL) {
804 memmove(ptr, block, cursize-blockOverhead);
805 Free(block);
806 }
807 return ((void *)ptr);
808}
809
810void VMem::Free(void* p)
811{
812 WALKHEAP();
813
814 /* Ignore null pointer. */
815 if(p == NULL)
816 return;
817
818 PBLOCK ptr = (PBLOCK)p;
819
820 /* Check for attempt to free a block that's already free. */
821 size_t size = SIZE(ptr);
822 if((size&1) == 0) {
823 MEMODSlx("Attempt to free previously freed block", (long)p);
824 return;
825 }
826 size &= ~1; /* remove allocated tag */
827
828 /* if previous block is free, add this block to it. */
829#ifndef _USE_BUDDY_BLOCKS
830 int linked = FALSE;
831#endif
832 size_t psize = PSIZE(ptr);
833 if((psize&1) == 0) {
834 ptr -= psize; /* point to previous block */
835 size += psize; /* merge the sizes of the two blocks */
836#ifdef _USE_BUDDY_BLOCKS
837 Unlink(ptr);
838#else
839 linked = TRUE; /* it's already on the free list */
840#endif
841 }
842
843 /* if the next physical block is free, merge it with this block. */
844 PBLOCK next = ptr + size; /* point to next physical block */
845 size_t nsize = SIZE(next);
846 if((nsize&1) == 0) {
847 /* block is free move rover if needed */
848 if(m_pRover == next)
849 m_pRover = NEXT(next);
850
851 /* unlink the next block from the free list. */
852 Unlink(next);
853
854 /* merge the sizes of this block and the next block. */
855 size += nsize;
856 }
857
858 /* Set the boundary tags for the block; */
859 SetTags(ptr, size);
860
861 /* Link the block to the head of the free list. */
862#ifdef _USE_BUDDY_BLOCKS
863 AddToFreeList(ptr, size);
864#else
865 if(!linked) {
866 AddToFreeList(ptr, m_pFreeList);
867 }
868#endif
869}
870
871void VMem::GetLock(void)
872{
873 EnterCriticalSection(&m_cs);
874}
875
876void VMem::FreeLock(void)
877{
878 LeaveCriticalSection(&m_cs);
879}
880
881int VMem::IsLocked(void)
882{
883#if 0
884 /* XXX TryEnterCriticalSection() is not available in some versions
885 * of Windows 95. Since this code is not used anywhere yet, we
886 * skirt the issue for now. */
887 BOOL bAccessed = TryEnterCriticalSection(&m_cs);
888 if(bAccessed) {
889 LeaveCriticalSection(&m_cs);
890 }
891 return !bAccessed;
892#else
893 ASSERT(0); /* alarm bells for when somebody calls this */
894 return 0;
895#endif
896}
897
898
899long VMem::Release(void)
900{
901 long lCount = InterlockedDecrement(&m_lRefCount);
902 if(!lCount)
903 delete this;
904 return lCount;
905}
906
907long VMem::AddRef(void)
908{
909 long lCount = InterlockedIncrement(&m_lRefCount);
910 return lCount;
911}
912
913
914int VMem::Getmem(size_t requestSize)
915{ /* returns -1 is successful 0 if not */
916#ifdef USE_BIGBLOCK_ALLOC
917 BOOL bBigBlock;
918#endif
919 void *ptr;
920
921 /* Round up size to next multiple of 64K. */
922 size_t size = (size_t)ROUND_UP64K(requestSize);
923
924 /*
925 * if the size requested is smaller than our current allocation size
926 * adjust up
927 */
928 if(size < (unsigned long)m_lAllocSize)
929 size = m_lAllocSize;
930
931 /* Update the size to allocate on the next request */
932 if(m_lAllocSize != lAllocMax)
933 m_lAllocSize <<= 2;
934
935#ifndef _USE_BUDDY_BLOCKS
936 if(m_nHeaps != 0
937#ifdef USE_BIGBLOCK_ALLOC
938 && !m_heaps[m_nHeaps-1].bBigBlock
939#endif
940 ) {
941 /* Expand the last allocated heap */
942 ptr = HeapReAlloc(m_hHeap, HEAP_REALLOC_IN_PLACE_ONLY|HEAP_NO_SERIALIZE,
943 m_heaps[m_nHeaps-1].base,
944 m_heaps[m_nHeaps-1].len + size);
945 if(ptr != 0) {
946 HeapAdd(((char*)ptr) + m_heaps[m_nHeaps-1].len, size
947#ifdef USE_BIGBLOCK_ALLOC
948 , FALSE
949#endif
950 );
951 return -1;
952 }
953 }
954#endif /* _USE_BUDDY_BLOCKS */
955
956 /*
957 * if we didn't expand a block to cover the requested size
958 * allocate a new Heap
959 * the size of this block must include the additional dummy tags at either end
960 * the above ROUND_UP64K may not have added any memory to include this.
961 */
962 if(size == requestSize)
963 size = (size_t)ROUND_UP64K(requestSize+(blockOverhead));
964
965Restart:
966#ifdef _USE_BUDDY_BLOCKS
967 ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
968#else
969#ifdef USE_BIGBLOCK_ALLOC
970 bBigBlock = FALSE;
971 if (size >= nMaxHeapAllocSize) {
972 bBigBlock = TRUE;
973 ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
974 }
975 else
976#endif
977 ptr = HeapAlloc(m_hHeap, HEAP_NO_SERIALIZE, size);
978#endif /* _USE_BUDDY_BLOCKS */
979
980 if (!ptr) {
981 /* try to allocate a smaller chunk */
982 size >>= 1;
983 if(size > requestSize)
984 goto Restart;
985 }
986
987 if(ptr == 0) {
988 MEMODSlx("HeapAlloc failed on size!!!", size);
989 return 0;
990 }
991
992#ifdef _USE_BUDDY_BLOCKS
993 if (HeapAdd(ptr, size)) {
994 VirtualFree(ptr, 0, MEM_RELEASE);
995 return 0;
996 }
997#else
998#ifdef USE_BIGBLOCK_ALLOC
999 if (HeapAdd(ptr, size, bBigBlock)) {
1000 if (bBigBlock) {
1001 VirtualFree(ptr, 0, MEM_RELEASE);
1002 }
1003 }
1004#else
1005 HeapAdd(ptr, size);
1006#endif
1007#endif /* _USE_BUDDY_BLOCKS */
1008 return -1;
1009}
1010
1011int VMem::HeapAdd(void* p, size_t size
1012#ifdef USE_BIGBLOCK_ALLOC
1013 , BOOL bBigBlock
1014#endif
1015 )
1016{ /* if the block can be succesfully added to the heap, returns 0; otherwise -1. */
1017 int index;
1018
1019 /* Check size, then round size down to next long word boundary. */
1020 if(size < minAllocSize)
1021 return -1;
1022
1023 size = (size_t)ROUND_DOWN(size);
1024 PBLOCK ptr = (PBLOCK)p;
1025
1026#ifdef USE_BIGBLOCK_ALLOC
1027 if (!bBigBlock) {
1028#endif
1029 /*
1030 * Search for another heap area that's contiguous with the bottom of this new area.
1031 * (It should be extremely unusual to find one that's contiguous with the top).
1032 */
1033 for(index = 0; index < m_nHeaps; ++index) {
1034 if(ptr == m_heaps[index].base + (int)m_heaps[index].len) {
1035 /*
1036 * The new block is contiguous with a previously allocated heap area. Add its
1037 * length to that of the previous heap. Merge it with the dummy end-of-heap
1038 * area marker of the previous heap.
1039 */
1040 m_heaps[index].len += size;
1041 break;
1042 }
1043 }
1044#ifdef USE_BIGBLOCK_ALLOC
1045 }
1046 else {
1047 index = m_nHeaps;
1048 }
1049#endif
1050
1051 if(index == m_nHeaps) {
1052 /* The new block is not contiguous, or is BigBlock. Add it to the heap list. */
1053 if(m_nHeaps == maxHeaps) {
1054 return -1; /* too many non-contiguous heaps */
1055 }
1056 m_heaps[m_nHeaps].base = ptr;
1057 m_heaps[m_nHeaps].len = size;
1058#ifdef USE_BIGBLOCK_ALLOC
1059 m_heaps[m_nHeaps].bBigBlock = bBigBlock;
1060#endif
1061 m_nHeaps++;
1062
1063 /*
1064 * Reserve the first LONG in the block for the ending boundary tag of a dummy
1065 * block at the start of the heap area.
1066 */
1067 size -= blockOverhead;
1068 ptr += blockOverhead;
1069 PSIZE(ptr) = 1; /* mark the dummy previous block as allocated */
1070 }
1071
1072 /*
1073 * Convert the heap to one large block. Set up its boundary tags, and those of
1074 * marker block after it. The marker block before the heap will already have
1075 * been set up if this heap is not contiguous with the end of another heap.
1076 */
1077 SetTags(ptr, size | 1);
1078 PBLOCK next = ptr + size; /* point to dummy end block */
1079 SIZE(next) = 1; /* mark the dummy end block as allocated */
1080
1081 /*
1082 * Link the block to the start of the free list by calling free().
1083 * This will merge the block with any adjacent free blocks.
1084 */
1085 Free(ptr);
1086 return 0;
1087}
1088
1089
1090void* VMem::Expand(void* block, size_t size)
1091{
1092 /*
1093 * Disallow negative or zero sizes.
1094 */
1095 size_t realsize = CalcAllocSize(size);
1096 if((int)realsize < minAllocSize || size == 0)
1097 return NULL;
1098
1099 PBLOCK ptr = (PBLOCK)block;
1100
1101 /* if the current size is the same as requested, do nothing. */
1102 size_t cursize = SIZE(ptr) & ~1;
1103 if(cursize == realsize) {
1104 return block;
1105 }
1106
1107 /* if the block is being shrunk, convert the remainder of the block into a new free block. */
1108 if(realsize <= cursize) {
1109 size_t nextsize = cursize - realsize; /* size of new remainder block */
1110 if(nextsize >= minAllocSize) {
1111 /*
1112 * Split the block
1113 * Set boundary tags for the resized block and the new block.
1114 */
1115 SetTags(ptr, realsize | 1);
1116 ptr += realsize;
1117
1118 /*
1119 * add the new block to the free list.
1120 * call Free to merge this block with next block if free
1121 */
1122 SetTags(ptr, nextsize | 1);
1123 Free(ptr);
1124 }
1125
1126 return block;
1127 }
1128
1129 PBLOCK next = ptr + cursize;
1130 size_t nextsize = SIZE(next);
1131
1132 /* Check the next block for consistency.*/
1133 if((nextsize&1) == 0 && (nextsize + cursize) >= realsize) {
1134 /*
1135 * The next block is free and big enough. Add the part that's needed
1136 * to our block, and split the remainder off into a new block.
1137 */
1138 if(m_pRover == next)
1139 m_pRover = NEXT(next);
1140
1141 /* Unlink the next block from the free list. */
1142 Unlink(next);
1143 cursize += nextsize; /* combine sizes */
1144
1145 size_t rem = cursize - realsize; /* size of remainder */
1146 if(rem >= minAllocSize) {
1147 /*
1148 * The remainder is big enough to be a new block.
1149 * Set boundary tags for the resized block and the new block.
1150 */
1151 next = ptr + realsize;
1152 /*
1153 * add the new block to the free list.
1154 * next block cannot be free
1155 */
1156 SetTags(next, rem);
1157#ifdef _USE_BUDDY_BLOCKS
1158 AddToFreeList(next, rem);
1159#else
1160 AddToFreeList(next, m_pFreeList);
1161#endif
1162 cursize = realsize;
1163 }
1164 /* Set the boundary tags to mark it as allocated. */
1165 SetTags(ptr, cursize | 1);
1166 return ((void *)ptr);
1167 }
1168 return NULL;
1169}
1170
1171#ifdef _DEBUG_MEM
1172#define LOG_FILENAME ".\\MemLog.txt"
1173
1174void VMem::MemoryUsageMessage(char *str, long x, long y, int c)
1175{
1176 char szBuffer[512];
1177 if(str) {
1178 if(!m_pLog)
1179 m_pLog = fopen(LOG_FILENAME, "w");
1180 sprintf(szBuffer, str, x, y, c);
1181 fputs(szBuffer, m_pLog);
1182 }
1183 else {
1184 if(m_pLog) {
1185 fflush(m_pLog);
1186 fclose(m_pLog);
1187 m_pLog = 0;
1188 }
1189 }
1190}
1191
1192void VMem::WalkHeap(int complete)
1193{
1194 if(complete) {
1195 MemoryUsageMessage(NULL, 0, 0, 0);
1196 size_t total = 0;
1197 for(int i = 0; i < m_nHeaps; ++i) {
1198 total += m_heaps[i].len;
1199 }
1200 MemoryUsageMessage("VMem heaps used %d. Total memory %08x\n", m_nHeaps, total, 0);
1201
1202 /* Walk all the heaps - verify structures */
1203 for(int index = 0; index < m_nHeaps; ++index) {
1204 PBLOCK ptr = m_heaps[index].base;
1205 size_t size = m_heaps[index].len;
1206#ifndef _USE_BUDDY_BLOCKS
1207#ifdef USE_BIGBLOCK_ALLOC
1208 if (!m_heaps[m_nHeaps].bBigBlock)
1209#endif
1210 ASSERT(HeapValidate(m_hHeap, HEAP_NO_SERIALIZE, ptr));
1211#endif
1212
1213 /* set over reserved header block */
1214 size -= blockOverhead;
1215 ptr += blockOverhead;
1216 PBLOCK pLast = ptr + size;
1217 ASSERT(PSIZE(ptr) == 1); /* dummy previous block is allocated */
1218 ASSERT(SIZE(pLast) == 1); /* dummy next block is allocated */
1219 while(ptr < pLast) {
1220 ASSERT(ptr > m_heaps[index].base);
1221 size_t cursize = SIZE(ptr) & ~1;
1222 ASSERT((PSIZE(ptr+cursize) & ~1) == cursize);
1223 MemoryUsageMessage("Memory Block %08x: Size %08x %c\n", (long)ptr, cursize, (SIZE(ptr)&1) ? 'x' : ' ');
1224 if(!(SIZE(ptr)&1)) {
1225 /* this block is on the free list */
1226 PBLOCK tmp = NEXT(ptr);
1227 while(tmp != ptr) {
1228 ASSERT((SIZE(tmp)&1)==0);
1229 if(tmp == m_pFreeList)
1230 break;
1231 ASSERT(NEXT(tmp));
1232 tmp = NEXT(tmp);
1233 }
1234 if(tmp == ptr) {
1235 MemoryUsageMessage("Memory Block %08x: Size %08x free but not in free list\n", (long)ptr, cursize, 0);
1236 }
1237 }
1238 ptr += cursize;
1239 }
1240 }
1241 MemoryUsageMessage(NULL, 0, 0, 0);
1242 }
1243}
1244#endif /* _DEBUG_MEM */
1245
1246#endif /* _USE_MSVCRT_MEM_ALLOC */
1247
1248#endif /* ___VMEM_H_INC___ */