Commit | Line | Data |
---|---|---|
7766f137 GS |
1 | /* vmem.h |
2 | * | |
3 | * (c) 1999 Microsoft Corporation. All rights reserved. | |
4 | * Portions (c) 1999 ActiveState Tool Corp, http://www.ActiveState.com/ | |
5 | * | |
6 | * You may distribute under the terms of either the GNU General Public | |
7 | * License or the Artistic License, as specified in the README file. | |
8 | * | |
f57e8d3b | 9 | * Options: |
7766f137 | 10 | * |
f57e8d3b | 11 | * Defining _USE_MSVCRT_MEM_ALLOC will cause all memory allocations |
b75e1177 SH |
12 | * to be forwarded to the compiler's MSVCR*.DLL. Defining _USE_LINKED_LIST as |
13 | * well will track all allocations in a doubly linked list, so that the host can | |
f57e8d3b GS |
14 | * free all memory allocated when it goes away. |
15 | * If _USE_MSVCRT_MEM_ALLOC is not defined then Knuth's boundary tag algorithm | |
16 | * is used; defining _USE_BUDDY_BLOCKS will use Knuth's algorithm R | |
17 | * (Buddy system reservation) | |
18 | * | |
19 | */ | |
20 | ||
21 | #ifndef ___VMEM_H_INC___ | |
22 | #define ___VMEM_H_INC___ | |
23 | ||
222c300a NIS |
24 | #define _USE_MSVCRT_MEM_ALLOC |
25 | #define _USE_LINKED_LIST | |
f57e8d3b GS |
26 | |
27 | // #define _USE_BUDDY_BLOCKS | |
28 | ||
29 | // #define _DEBUG_MEM | |
30 | #ifdef _DEBUG_MEM | |
31 | #define ASSERT(f) if(!(f)) DebugBreak(); | |
32 | ||
33 | inline void MEMODS(char *str) | |
34 | { | |
35 | OutputDebugString(str); | |
36 | OutputDebugString("\n"); | |
37 | } | |
38 | ||
39 | inline void MEMODSlx(char *str, long x) | |
40 | { | |
41 | char szBuffer[512]; | |
42 | sprintf(szBuffer, "%s %lx\n", str, x); | |
43 | OutputDebugString(szBuffer); | |
44 | } | |
45 | ||
46 | #define WALKHEAP() WalkHeap(0) | |
47 | #define WALKHEAPTRACE() WalkHeap(1) | |
48 | ||
49 | #else | |
50 | ||
51 | #define ASSERT(f) | |
52 | #define MEMODS(x) | |
53 | #define MEMODSlx(x, y) | |
54 | #define WALKHEAP() | |
55 | #define WALKHEAPTRACE() | |
56 | ||
57 | #endif | |
58 | ||
59 | #ifdef _USE_MSVCRT_MEM_ALLOC | |
60 | ||
61 | #ifndef _USE_LINKED_LIST | |
62 | // #define _USE_LINKED_LIST | |
63 | #endif | |
64 | ||
65 | /* | |
b75e1177 SH |
66 | * Pass all memory requests through to the compiler's msvcr*.dll. |
67 | * Optionaly track by using a doubly linked header. | |
f57e8d3b GS |
68 | */ |
69 | ||
f57e8d3b | 70 | #ifdef _USE_LINKED_LIST |
222c300a | 71 | class VMem; |
14e63e09 | 72 | |
07e16c74 S |
73 | /* |
74 | * Address an alignment issue with x64 mingw-w64 ports of gcc-12 and | |
75 | * (presumably) later. We do the same thing again 16 lines further down. | |
76 | * See https://github.com/Perl/perl5/issues/19824 | |
77 | */ | |
78 | ||
14e63e09 S |
79 | #if defined(__MINGW64__) && __GNUC__ > 11 |
80 | typedef struct _MemoryBlockHeader* PMEMORY_BLOCK_HEADER __attribute__ ((aligned(16))); | |
81 | #else | |
f57e8d3b | 82 | typedef struct _MemoryBlockHeader* PMEMORY_BLOCK_HEADER; |
14e63e09 S |
83 | #endif |
84 | ||
f57e8d3b GS |
85 | typedef struct _MemoryBlockHeader { |
86 | PMEMORY_BLOCK_HEADER pNext; | |
87 | PMEMORY_BLOCK_HEADER pPrev; | |
222c300a | 88 | VMem *owner; |
14e63e09 S |
89 | |
90 | #if defined(__MINGW64__) && __GNUC__ > 11 | |
91 | } MEMORY_BLOCK_HEADER __attribute__ ((aligned(16))), *PMEMORY_BLOCK_HEADER; | |
92 | #else | |
f57e8d3b GS |
93 | } MEMORY_BLOCK_HEADER, *PMEMORY_BLOCK_HEADER; |
94 | #endif | |
95 | ||
14e63e09 S |
96 | #endif |
97 | ||
f57e8d3b GS |
98 | class VMem |
99 | { | |
100 | public: | |
101 | VMem(); | |
102 | ~VMem(); | |
624a1c42 | 103 | void* Malloc(size_t size); |
b75e1177 SH |
104 | void* Realloc(void* pMem, size_t size); |
105 | void Free(void* pMem); | |
106 | void GetLock(void); | |
107 | void FreeLock(void); | |
108 | int IsLocked(void); | |
109 | long Release(void); | |
110 | long AddRef(void); | |
f57e8d3b GS |
111 | |
112 | inline BOOL CreateOk(void) | |
113 | { | |
1604cfb0 | 114 | return TRUE; |
f57e8d3b GS |
115 | }; |
116 | ||
117 | protected: | |
118 | #ifdef _USE_LINKED_LIST | |
119 | void LinkBlock(PMEMORY_BLOCK_HEADER ptr) | |
120 | { | |
1604cfb0 MS |
121 | PMEMORY_BLOCK_HEADER next = m_Dummy.pNext; |
122 | m_Dummy.pNext = ptr; | |
123 | ptr->pPrev = &m_Dummy; | |
124 | ptr->pNext = next; | |
222c300a | 125 | ptr->owner = this; |
1604cfb0 | 126 | next->pPrev = ptr; |
f57e8d3b GS |
127 | } |
128 | void UnlinkBlock(PMEMORY_BLOCK_HEADER ptr) | |
129 | { | |
1604cfb0 MS |
130 | PMEMORY_BLOCK_HEADER next = ptr->pNext; |
131 | PMEMORY_BLOCK_HEADER prev = ptr->pPrev; | |
132 | prev->pNext = next; | |
133 | next->pPrev = prev; | |
f57e8d3b GS |
134 | } |
135 | ||
136 | MEMORY_BLOCK_HEADER m_Dummy; | |
624a1c42 | 137 | CRITICAL_SECTION m_cs; // access lock |
f57e8d3b GS |
138 | #endif |
139 | ||
140 | long m_lRefCount; // number of current users | |
f57e8d3b GS |
141 | }; |
142 | ||
143 | VMem::VMem() | |
144 | { | |
145 | m_lRefCount = 1; | |
f57e8d3b | 146 | #ifdef _USE_LINKED_LIST |
624a1c42 | 147 | InitializeCriticalSection(&m_cs); |
f57e8d3b | 148 | m_Dummy.pNext = m_Dummy.pPrev = &m_Dummy; |
222c300a | 149 | m_Dummy.owner = this; |
f57e8d3b | 150 | #endif |
f57e8d3b GS |
151 | } |
152 | ||
153 | VMem::~VMem(void) | |
154 | { | |
155 | #ifdef _USE_LINKED_LIST | |
156 | while (m_Dummy.pNext != &m_Dummy) { | |
1604cfb0 | 157 | Free(m_Dummy.pNext+1); |
f57e8d3b | 158 | } |
f57e8d3b | 159 | DeleteCriticalSection(&m_cs); |
624a1c42 | 160 | #endif |
f57e8d3b GS |
161 | } |
162 | ||
163 | void* VMem::Malloc(size_t size) | |
164 | { | |
165 | #ifdef _USE_LINKED_LIST | |
222c300a | 166 | GetLock(); |
624a1c42 | 167 | PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)malloc(size+sizeof(MEMORY_BLOCK_HEADER)); |
9e6e1211 | 168 | if (!ptr) { |
1604cfb0 MS |
169 | FreeLock(); |
170 | return NULL; | |
9e6e1211 | 171 | } |
f57e8d3b | 172 | LinkBlock(ptr); |
222c300a | 173 | FreeLock(); |
f57e8d3b GS |
174 | return (ptr+1); |
175 | #else | |
624a1c42 | 176 | return malloc(size); |
f57e8d3b GS |
177 | #endif |
178 | } | |
179 | ||
180 | void* VMem::Realloc(void* pMem, size_t size) | |
181 | { | |
182 | #ifdef _USE_LINKED_LIST | |
183 | if (!pMem) | |
1604cfb0 | 184 | return Malloc(size); |
f57e8d3b GS |
185 | |
186 | if (!size) { | |
1604cfb0 MS |
187 | Free(pMem); |
188 | return NULL; | |
f57e8d3b GS |
189 | } |
190 | ||
222c300a | 191 | GetLock(); |
f57e8d3b GS |
192 | PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)(((char*)pMem)-sizeof(MEMORY_BLOCK_HEADER)); |
193 | UnlinkBlock(ptr); | |
624a1c42 | 194 | ptr = (PMEMORY_BLOCK_HEADER)realloc(ptr, size+sizeof(MEMORY_BLOCK_HEADER)); |
9e6e1211 | 195 | if (!ptr) { |
1604cfb0 MS |
196 | FreeLock(); |
197 | return NULL; | |
9e6e1211 | 198 | } |
f57e8d3b | 199 | LinkBlock(ptr); |
222c300a | 200 | FreeLock(); |
f57e8d3b GS |
201 | |
202 | return (ptr+1); | |
203 | #else | |
624a1c42 | 204 | return realloc(pMem, size); |
f57e8d3b GS |
205 | #endif |
206 | } | |
207 | ||
208 | void VMem::Free(void* pMem) | |
209 | { | |
210 | #ifdef _USE_LINKED_LIST | |
211 | if (pMem) { | |
1604cfb0 | 212 | PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)(((char*)pMem)-sizeof(MEMORY_BLOCK_HEADER)); |
222c300a | 213 | if (ptr->owner != this) { |
1604cfb0 | 214 | if (ptr->owner) { |
05ec9bb3 | 215 | #if 1 |
1604cfb0 MS |
216 | int *nowhere = NULL; |
217 | Perl_warn_nocontext("Free to wrong pool %p not %p",this,ptr->owner); | |
218 | *nowhere = 0; /* this segfault is deliberate, | |
219 | so you can see the stack trace */ | |
05ec9bb3 NIS |
220 | #else |
221 | ptr->owner->Free(pMem); | |
222 | #endif | |
1604cfb0 MS |
223 | } |
224 | return; | |
222c300a | 225 | } |
1604cfb0 MS |
226 | GetLock(); |
227 | UnlinkBlock(ptr); | |
228 | ptr->owner = NULL; | |
229 | free(ptr); | |
230 | FreeLock(); | |
f57e8d3b | 231 | } |
624a1c42 DD |
232 | #else /*_USE_LINKED_LIST*/ |
233 | free(pMem); | |
f57e8d3b GS |
234 | #endif |
235 | } | |
236 | ||
237 | void VMem::GetLock(void) | |
238 | { | |
624a1c42 | 239 | #ifdef _USE_LINKED_LIST |
f57e8d3b | 240 | EnterCriticalSection(&m_cs); |
624a1c42 | 241 | #endif |
f57e8d3b GS |
242 | } |
243 | ||
244 | void VMem::FreeLock(void) | |
245 | { | |
624a1c42 | 246 | #ifdef _USE_LINKED_LIST |
f57e8d3b | 247 | LeaveCriticalSection(&m_cs); |
624a1c42 | 248 | #endif |
f57e8d3b GS |
249 | } |
250 | ||
251 | int VMem::IsLocked(void) | |
252 | { | |
253 | #if 0 | |
254 | /* XXX TryEnterCriticalSection() is not available in some versions | |
255 | * of Windows 95. Since this code is not used anywhere yet, we | |
256 | * skirt the issue for now. */ | |
257 | BOOL bAccessed = TryEnterCriticalSection(&m_cs); | |
258 | if(bAccessed) { | |
1604cfb0 | 259 | LeaveCriticalSection(&m_cs); |
f57e8d3b GS |
260 | } |
261 | return !bAccessed; | |
262 | #else | |
263 | ASSERT(0); /* alarm bells for when somebody calls this */ | |
264 | return 0; | |
265 | #endif | |
266 | } | |
267 | ||
268 | long VMem::Release(void) | |
269 | { | |
270 | long lCount = InterlockedDecrement(&m_lRefCount); | |
271 | if(!lCount) | |
1604cfb0 | 272 | delete this; |
f57e8d3b GS |
273 | return lCount; |
274 | } | |
275 | ||
276 | long VMem::AddRef(void) | |
277 | { | |
278 | long lCount = InterlockedIncrement(&m_lRefCount); | |
279 | return lCount; | |
280 | } | |
281 | ||
282 | #else /* _USE_MSVCRT_MEM_ALLOC */ | |
283 | ||
284 | /* | |
7766f137 GS |
285 | * Knuth's boundary tag algorithm Vol #1, Page 440. |
286 | * | |
287 | * Each block in the heap has tag words before and after it, | |
288 | * TAG | |
289 | * block | |
290 | * TAG | |
291 | * The size is stored in these tags as a long word, and includes the 8 bytes | |
292 | * of overhead that the boundary tags consume. Blocks are allocated on long | |
293 | * word boundaries, so the size is always multiples of long words. When the | |
294 | * block is allocated, bit 0, (the tag bit), of the size is set to 1. When | |
295 | * a block is freed, it is merged with adjacent free blocks, and the tag bit | |
296 | * is set to 0. | |
297 | * | |
298 | * A linked list is used to manage the free list. The first two long words of | |
299 | * the block contain double links. These links are only valid when the block | |
300 | * is freed, therefore space needs to be reserved for them. Thus, the minimum | |
301 | * block size (not counting the tags) is 8 bytes. | |
302 | * | |
bb0f0a6a | 303 | * Since memory allocation may occur on a single threaded, explicit locks are not |
7766f137 GS |
304 | * provided. |
305 | * | |
306 | */ | |
307 | ||
f57e8d3b | 308 | const long lAllocStart = 0x00020000; /* start at 128K */ |
7766f137 GS |
309 | const long minBlockSize = sizeof(void*)*2; |
310 | const long sizeofTag = sizeof(long); | |
311 | const long blockOverhead = sizeofTag*2; | |
312 | const long minAllocSize = minBlockSize+blockOverhead; | |
f57e8d3b GS |
313 | #ifdef _USE_BUDDY_BLOCKS |
314 | const long lSmallBlockSize = 1024; | |
315 | const size_t nListEntries = ((lSmallBlockSize-minAllocSize)/sizeof(long)); | |
316 | ||
317 | inline size_t CalcEntry(size_t size) | |
318 | { | |
319 | ASSERT((size&(sizeof(long)-1)) == 0); | |
320 | return ((size - minAllocSize) / sizeof(long)); | |
321 | } | |
322 | #endif | |
7766f137 GS |
323 | |
324 | typedef BYTE* PBLOCK; /* pointer to a memory block */ | |
325 | ||
326 | /* | |
327 | * Macros for accessing hidden fields in a memory block: | |
328 | * | |
329 | * SIZE size of this block (tag bit 0 is 1 if block is allocated) | |
330 | * PSIZE size of previous physical block | |
331 | */ | |
332 | ||
333 | #define SIZE(block) (*(ULONG*)(((PBLOCK)(block))-sizeofTag)) | |
f57e8d3b | 334 | #define PSIZE(block) (*(ULONG*)(((PBLOCK)(block))-(blockOverhead))) |
7766f137 GS |
335 | inline void SetTags(PBLOCK block, long size) |
336 | { | |
337 | SIZE(block) = size; | |
338 | PSIZE(block+(size&~1)) = size; | |
339 | } | |
340 | ||
341 | /* | |
342 | * Free list pointers | |
343 | * PREV pointer to previous block | |
344 | * NEXT pointer to next block | |
345 | */ | |
346 | ||
347 | #define PREV(block) (*(PBLOCK*)(block)) | |
348 | #define NEXT(block) (*(PBLOCK*)((block)+sizeof(PBLOCK))) | |
349 | inline void SetLink(PBLOCK block, PBLOCK prev, PBLOCK next) | |
350 | { | |
351 | PREV(block) = prev; | |
352 | NEXT(block) = next; | |
353 | } | |
354 | inline void Unlink(PBLOCK p) | |
355 | { | |
356 | PBLOCK next = NEXT(p); | |
357 | PBLOCK prev = PREV(p); | |
358 | NEXT(prev) = next; | |
359 | PREV(next) = prev; | |
360 | } | |
f57e8d3b | 361 | #ifndef _USE_BUDDY_BLOCKS |
7766f137 GS |
362 | inline void AddToFreeList(PBLOCK block, PBLOCK pInList) |
363 | { | |
364 | PBLOCK next = NEXT(pInList); | |
365 | NEXT(pInList) = block; | |
366 | SetLink(block, pInList, next); | |
367 | PREV(next) = block; | |
368 | } | |
f57e8d3b | 369 | #endif |
7766f137 GS |
370 | |
371 | /* Macro for rounding up to the next sizeof(long) */ | |
372 | #define ROUND_UP(n) (((ULONG)(n)+sizeof(long)-1)&~(sizeof(long)-1)) | |
373 | #define ROUND_UP64K(n) (((ULONG)(n)+0x10000-1)&~(0x10000-1)) | |
374 | #define ROUND_DOWN(n) ((ULONG)(n)&~(sizeof(long)-1)) | |
375 | ||
376 | /* | |
377 | * HeapRec - a list of all non-contiguous heap areas | |
378 | * | |
379 | * Each record in this array contains information about a non-contiguous heap area. | |
380 | */ | |
381 | ||
f57e8d3b | 382 | const int maxHeaps = 32; /* 64 was overkill */ |
7766f137 GS |
383 | const long lAllocMax = 0x80000000; /* max size of allocation */ |
384 | ||
f57e8d3b GS |
385 | #ifdef _USE_BUDDY_BLOCKS |
386 | typedef struct _FreeListEntry | |
387 | { | |
388 | BYTE Dummy[minAllocSize]; // dummy free block | |
389 | } FREE_LIST_ENTRY, *PFREE_LIST_ENTRY; | |
390 | #endif | |
391 | ||
392 | #ifndef _USE_BUDDY_BLOCKS | |
393 | #define USE_BIGBLOCK_ALLOC | |
394 | #endif | |
395 | /* | |
396 | * performance tuning | |
397 | * Use VirtualAlloc() for blocks bigger than nMaxHeapAllocSize since | |
398 | * Windows 95/98/Me have heap managers that are designed for memory | |
399 | * blocks smaller than four megabytes. | |
400 | */ | |
401 | ||
402 | #ifdef USE_BIGBLOCK_ALLOC | |
403 | const int nMaxHeapAllocSize = (1024*512); /* don't allocate anything larger than this from the heap */ | |
404 | #endif | |
405 | ||
7766f137 GS |
406 | typedef struct _HeapRec |
407 | { | |
408 | PBLOCK base; /* base of heap area */ | |
409 | ULONG len; /* size of heap area */ | |
f57e8d3b GS |
410 | #ifdef USE_BIGBLOCK_ALLOC |
411 | BOOL bBigBlock; /* was allocate using VirtualAlloc */ | |
412 | #endif | |
7766f137 GS |
413 | } HeapRec; |
414 | ||
7766f137 GS |
415 | class VMem |
416 | { | |
417 | public: | |
418 | VMem(); | |
419 | ~VMem(); | |
b75e1177 SH |
420 | void* Malloc(size_t size); |
421 | void* Realloc(void* pMem, size_t size); | |
422 | void Free(void* pMem); | |
423 | void GetLock(void); | |
424 | void FreeLock(void); | |
425 | int IsLocked(void); | |
426 | long Release(void); | |
427 | long AddRef(void); | |
7766f137 GS |
428 | |
429 | inline BOOL CreateOk(void) | |
430 | { | |
f57e8d3b | 431 | #ifdef _USE_BUDDY_BLOCKS |
1604cfb0 | 432 | return TRUE; |
f57e8d3b | 433 | #else |
1604cfb0 | 434 | return m_hHeap != NULL; |
f57e8d3b | 435 | #endif |
7766f137 GS |
436 | }; |
437 | ||
438 | void ReInit(void); | |
439 | ||
440 | protected: | |
441 | void Init(void); | |
442 | int Getmem(size_t size); | |
f57e8d3b GS |
443 | |
444 | int HeapAdd(void* ptr, size_t size | |
445 | #ifdef USE_BIGBLOCK_ALLOC | |
1604cfb0 | 446 | , BOOL bBigBlock |
f57e8d3b GS |
447 | #endif |
448 | ); | |
449 | ||
7766f137 | 450 | void* Expand(void* block, size_t size); |
7766f137 | 451 | |
f57e8d3b GS |
452 | #ifdef _USE_BUDDY_BLOCKS |
453 | inline PBLOCK GetFreeListLink(int index) | |
454 | { | |
1604cfb0 MS |
455 | if (index >= nListEntries) |
456 | index = nListEntries-1; | |
457 | return &m_FreeList[index].Dummy[sizeofTag]; | |
f57e8d3b GS |
458 | } |
459 | inline PBLOCK GetOverSizeFreeList(void) | |
460 | { | |
1604cfb0 | 461 | return &m_FreeList[nListEntries-1].Dummy[sizeofTag]; |
f57e8d3b GS |
462 | } |
463 | inline PBLOCK GetEOLFreeList(void) | |
464 | { | |
1604cfb0 | 465 | return &m_FreeList[nListEntries].Dummy[sizeofTag]; |
f57e8d3b GS |
466 | } |
467 | ||
468 | void AddToFreeList(PBLOCK block, size_t size) | |
469 | { | |
1604cfb0 MS |
470 | PBLOCK pFreeList = GetFreeListLink(CalcEntry(size)); |
471 | PBLOCK next = NEXT(pFreeList); | |
472 | NEXT(pFreeList) = block; | |
473 | SetLink(block, pFreeList, next); | |
474 | PREV(next) = block; | |
f57e8d3b GS |
475 | } |
476 | #endif | |
477 | inline size_t CalcAllocSize(size_t size) | |
478 | { | |
1604cfb0 MS |
479 | /* |
480 | * Adjust the real size of the block to be a multiple of sizeof(long), and add | |
481 | * the overhead for the boundary tags. Disallow negative or zero sizes. | |
482 | */ | |
483 | return (size < minBlockSize) ? minAllocSize : (size_t)ROUND_UP(size) + blockOverhead; | |
f57e8d3b GS |
484 | } |
485 | ||
486 | #ifdef _USE_BUDDY_BLOCKS | |
487 | FREE_LIST_ENTRY m_FreeList[nListEntries+1]; // free list with dummy end of list entry as well | |
488 | #else | |
52cbf511 JH |
489 | HANDLE m_hHeap; // memory heap for this script |
490 | char m_FreeDummy[minAllocSize]; // dummy free block | |
491 | PBLOCK m_pFreeList; // pointer to first block on free list | |
f57e8d3b | 492 | #endif |
52cbf511 JH |
493 | PBLOCK m_pRover; // roving pointer into the free list |
494 | HeapRec m_heaps[maxHeaps]; // list of all non-contiguous heap areas | |
495 | int m_nHeaps; // no. of heaps in m_heaps | |
496 | long m_lAllocSize; // current alloc size | |
497 | long m_lRefCount; // number of current users | |
498 | CRITICAL_SECTION m_cs; // access lock | |
f57e8d3b | 499 | |
df3728a2 | 500 | #ifdef _DEBUG_MEM |
f57e8d3b GS |
501 | void WalkHeap(int complete); |
502 | void MemoryUsageMessage(char *str, long x, long y, int c); | |
df3728a2 JH |
503 | FILE* m_pLog; |
504 | #endif | |
7766f137 GS |
505 | }; |
506 | ||
7766f137 GS |
507 | VMem::VMem() |
508 | { | |
509 | m_lRefCount = 1; | |
f57e8d3b | 510 | #ifndef _USE_BUDDY_BLOCKS |
7766f137 | 511 | BOOL bRet = (NULL != (m_hHeap = HeapCreate(HEAP_NO_SERIALIZE, |
1604cfb0 MS |
512 | lAllocStart, /* initial size of heap */ |
513 | 0))); /* no upper limit on size of heap */ | |
7766f137 | 514 | ASSERT(bRet); |
f57e8d3b | 515 | #endif |
7766f137 GS |
516 | |
517 | InitializeCriticalSection(&m_cs); | |
df3728a2 JH |
518 | #ifdef _DEBUG_MEM |
519 | m_pLog = 0; | |
520 | #endif | |
7766f137 GS |
521 | |
522 | Init(); | |
523 | } | |
524 | ||
525 | VMem::~VMem(void) | |
526 | { | |
f57e8d3b | 527 | #ifndef _USE_BUDDY_BLOCKS |
7766f137 | 528 | ASSERT(HeapValidate(m_hHeap, HEAP_NO_SERIALIZE, NULL)); |
df3728a2 | 529 | #endif |
f57e8d3b GS |
530 | WALKHEAPTRACE(); |
531 | ||
7766f137 | 532 | DeleteCriticalSection(&m_cs); |
f57e8d3b GS |
533 | #ifdef _USE_BUDDY_BLOCKS |
534 | for(int index = 0; index < m_nHeaps; ++index) { | |
1604cfb0 | 535 | VirtualFree(m_heaps[index].base, 0, MEM_RELEASE); |
f57e8d3b GS |
536 | } |
537 | #else /* !_USE_BUDDY_BLOCKS */ | |
538 | #ifdef USE_BIGBLOCK_ALLOC | |
539 | for(int index = 0; index < m_nHeaps; ++index) { | |
1604cfb0 MS |
540 | if (m_heaps[index].bBigBlock) { |
541 | VirtualFree(m_heaps[index].base, 0, MEM_RELEASE); | |
542 | } | |
f57e8d3b GS |
543 | } |
544 | #endif | |
7766f137 GS |
545 | BOOL bRet = HeapDestroy(m_hHeap); |
546 | ASSERT(bRet); | |
f57e8d3b | 547 | #endif /* _USE_BUDDY_BLOCKS */ |
7766f137 GS |
548 | } |
549 | ||
550 | void VMem::ReInit(void) | |
551 | { | |
f57e8d3b GS |
552 | for(int index = 0; index < m_nHeaps; ++index) { |
553 | #ifdef _USE_BUDDY_BLOCKS | |
1604cfb0 | 554 | VirtualFree(m_heaps[index].base, 0, MEM_RELEASE); |
f57e8d3b GS |
555 | #else |
556 | #ifdef USE_BIGBLOCK_ALLOC | |
1604cfb0 MS |
557 | if (m_heaps[index].bBigBlock) { |
558 | VirtualFree(m_heaps[index].base, 0, MEM_RELEASE); | |
559 | } | |
560 | else | |
f57e8d3b | 561 | #endif |
1604cfb0 | 562 | HeapFree(m_hHeap, HEAP_NO_SERIALIZE, m_heaps[index].base); |
f57e8d3b GS |
563 | #endif /* _USE_BUDDY_BLOCKS */ |
564 | } | |
7766f137 GS |
565 | |
566 | Init(); | |
567 | } | |
568 | ||
569 | void VMem::Init(void) | |
f57e8d3b GS |
570 | { |
571 | #ifdef _USE_BUDDY_BLOCKS | |
572 | PBLOCK pFreeList; | |
573 | /* | |
574 | * Initialize the free list by placing a dummy zero-length block on it. | |
575 | * Set the end of list marker. | |
576 | * Set the number of non-contiguous heaps to zero. | |
577 | * Set the next allocation size. | |
578 | */ | |
579 | for (int index = 0; index < nListEntries; ++index) { | |
1604cfb0 MS |
580 | pFreeList = GetFreeListLink(index); |
581 | SIZE(pFreeList) = PSIZE(pFreeList+minAllocSize) = 0; | |
582 | PREV(pFreeList) = NEXT(pFreeList) = pFreeList; | |
f57e8d3b GS |
583 | } |
584 | pFreeList = GetEOLFreeList(); | |
585 | SIZE(pFreeList) = PSIZE(pFreeList+minAllocSize) = 0; | |
586 | PREV(pFreeList) = NEXT(pFreeList) = NULL; | |
587 | m_pRover = GetOverSizeFreeList(); | |
588 | #else | |
589 | /* | |
7766f137 GS |
590 | * Initialize the free list by placing a dummy zero-length block on it. |
591 | * Set the number of non-contiguous heaps to zero. | |
592 | */ | |
f57e8d3b GS |
593 | m_pFreeList = m_pRover = (PBLOCK)(&m_FreeDummy[sizeofTag]); |
594 | PSIZE(m_pFreeList+minAllocSize) = SIZE(m_pFreeList) = 0; | |
7766f137 | 595 | PREV(m_pFreeList) = NEXT(m_pFreeList) = m_pFreeList; |
f57e8d3b | 596 | #endif |
7766f137 GS |
597 | |
598 | m_nHeaps = 0; | |
599 | m_lAllocSize = lAllocStart; | |
600 | } | |
601 | ||
602 | void* VMem::Malloc(size_t size) | |
603 | { | |
604 | WALKHEAP(); | |
605 | ||
f57e8d3b GS |
606 | PBLOCK ptr; |
607 | size_t lsize, rem; | |
7766f137 | 608 | /* |
f57e8d3b | 609 | * Disallow negative or zero sizes. |
7766f137 | 610 | */ |
f57e8d3b | 611 | size_t realsize = CalcAllocSize(size); |
7766f137 | 612 | if((int)realsize < minAllocSize || size == 0) |
1604cfb0 | 613 | return NULL; |
7766f137 | 614 | |
f57e8d3b GS |
615 | #ifdef _USE_BUDDY_BLOCKS |
616 | /* | |
617 | * Check the free list of small blocks if this is free use it | |
618 | * Otherwise check the rover if it has no blocks then | |
619 | * Scan the free list entries use the first free block | |
620 | * split the block if needed, stop at end of list marker | |
621 | */ | |
622 | { | |
1604cfb0 MS |
623 | int index = CalcEntry(realsize); |
624 | if (index < nListEntries-1) { | |
625 | ptr = GetFreeListLink(index); | |
626 | lsize = SIZE(ptr); | |
627 | if (lsize >= realsize) { | |
628 | rem = lsize - realsize; | |
629 | if(rem < minAllocSize) { | |
630 | /* Unlink the block from the free list. */ | |
631 | Unlink(ptr); | |
632 | } | |
633 | else { | |
634 | /* | |
635 | * split the block | |
636 | * The remainder is big enough to split off into a new block. | |
637 | * Use the end of the block, resize the beginning of the block | |
638 | * no need to change the free list. | |
639 | */ | |
640 | SetTags(ptr, rem); | |
641 | ptr += SIZE(ptr); | |
642 | lsize = realsize; | |
643 | } | |
644 | SetTags(ptr, lsize | 1); | |
645 | return ptr; | |
646 | } | |
647 | ptr = m_pRover; | |
648 | lsize = SIZE(ptr); | |
649 | if (lsize >= realsize) { | |
650 | rem = lsize - realsize; | |
651 | if(rem < minAllocSize) { | |
652 | /* Unlink the block from the free list. */ | |
653 | Unlink(ptr); | |
654 | } | |
655 | else { | |
656 | /* | |
657 | * split the block | |
658 | * The remainder is big enough to split off into a new block. | |
659 | * Use the end of the block, resize the beginning of the block | |
660 | * no need to change the free list. | |
661 | */ | |
662 | SetTags(ptr, rem); | |
663 | ptr += SIZE(ptr); | |
664 | lsize = realsize; | |
665 | } | |
666 | SetTags(ptr, lsize | 1); | |
667 | return ptr; | |
668 | } | |
669 | ptr = GetFreeListLink(index+1); | |
670 | while (NEXT(ptr)) { | |
671 | lsize = SIZE(ptr); | |
672 | if (lsize >= realsize) { | |
673 | size_t rem = lsize - realsize; | |
674 | if(rem < minAllocSize) { | |
675 | /* Unlink the block from the free list. */ | |
676 | Unlink(ptr); | |
677 | } | |
678 | else { | |
679 | /* | |
680 | * split the block | |
681 | * The remainder is big enough to split off into a new block. | |
682 | * Use the end of the block, resize the beginning of the block | |
683 | * no need to change the free list. | |
684 | */ | |
685 | SetTags(ptr, rem); | |
686 | ptr += SIZE(ptr); | |
687 | lsize = realsize; | |
688 | } | |
689 | SetTags(ptr, lsize | 1); | |
690 | return ptr; | |
691 | } | |
692 | ptr += sizeof(FREE_LIST_ENTRY); | |
693 | } | |
694 | } | |
f57e8d3b GS |
695 | } |
696 | #endif | |
697 | ||
7766f137 GS |
698 | /* |
699 | * Start searching the free list at the rover. If we arrive back at rover without | |
700 | * finding anything, allocate some memory from the heap and try again. | |
701 | */ | |
f57e8d3b GS |
702 | ptr = m_pRover; /* start searching at rover */ |
703 | int loops = 2; /* allow two times through the loop */ | |
7766f137 | 704 | for(;;) { |
1604cfb0 MS |
705 | lsize = SIZE(ptr); |
706 | ASSERT((lsize&1)==0); | |
707 | /* is block big enough? */ | |
708 | if(lsize >= realsize) { | |
709 | /* if the remainder is too small, don't bother splitting the block. */ | |
710 | rem = lsize - realsize; | |
711 | if(rem < minAllocSize) { | |
712 | if(m_pRover == ptr) | |
713 | m_pRover = NEXT(ptr); | |
714 | ||
715 | /* Unlink the block from the free list. */ | |
716 | Unlink(ptr); | |
717 | } | |
718 | else { | |
719 | /* | |
720 | * split the block | |
721 | * The remainder is big enough to split off into a new block. | |
722 | * Use the end of the block, resize the beginning of the block | |
723 | * no need to change the free list. | |
724 | */ | |
725 | SetTags(ptr, rem); | |
726 | ptr += SIZE(ptr); | |
727 | lsize = realsize; | |
728 | } | |
729 | /* Set the boundary tags to mark it as allocated. */ | |
730 | SetTags(ptr, lsize | 1); | |
731 | return ((void *)ptr); | |
732 | } | |
733 | ||
734 | /* | |
735 | * This block was unsuitable. If we've gone through this list once already without | |
736 | * finding anything, allocate some new memory from the heap and try again. | |
737 | */ | |
738 | ptr = NEXT(ptr); | |
739 | if(ptr == m_pRover) { | |
740 | if(!(loops-- && Getmem(realsize))) { | |
741 | return NULL; | |
742 | } | |
743 | ptr = m_pRover; | |
744 | } | |
7766f137 GS |
745 | } |
746 | } | |
747 | ||
748 | void* VMem::Realloc(void* block, size_t size) | |
749 | { | |
750 | WALKHEAP(); | |
751 | ||
752 | /* if size is zero, free the block. */ | |
753 | if(size == 0) { | |
1604cfb0 MS |
754 | Free(block); |
755 | return (NULL); | |
7766f137 GS |
756 | } |
757 | ||
758 | /* if block pointer is NULL, do a Malloc(). */ | |
759 | if(block == NULL) | |
1604cfb0 | 760 | return Malloc(size); |
7766f137 GS |
761 | |
762 | /* | |
763 | * Grow or shrink the block in place. | |
764 | * if the block grows then the next block will be used if free | |
765 | */ | |
766 | if(Expand(block, size) != NULL) | |
1604cfb0 | 767 | return block; |
7766f137 | 768 | |
f57e8d3b | 769 | size_t realsize = CalcAllocSize(size); |
7766f137 | 770 | if((int)realsize < minAllocSize) |
1604cfb0 | 771 | return NULL; |
7766f137 GS |
772 | |
773 | /* | |
774 | * see if the previous block is free, and is it big enough to cover the new size | |
775 | * if merged with the current block. | |
776 | */ | |
777 | PBLOCK ptr = (PBLOCK)block; | |
778 | size_t cursize = SIZE(ptr) & ~1; | |
779 | size_t psize = PSIZE(ptr); | |
780 | if((psize&1) == 0 && (psize + cursize) >= realsize) { | |
1604cfb0 MS |
781 | PBLOCK prev = ptr - psize; |
782 | if(m_pRover == prev) | |
783 | m_pRover = NEXT(prev); | |
784 | ||
785 | /* Unlink the next block from the free list. */ | |
786 | Unlink(prev); | |
787 | ||
788 | /* Copy contents of old block to new location, make it the current block. */ | |
789 | memmove(prev, ptr, cursize); | |
790 | cursize += psize; /* combine sizes */ | |
791 | ptr = prev; | |
792 | ||
793 | size_t rem = cursize - realsize; | |
794 | if(rem >= minAllocSize) { | |
795 | /* | |
796 | * The remainder is big enough to be a new block. Set boundary | |
797 | * tags for the resized block and the new block. | |
798 | */ | |
799 | prev = ptr + realsize; | |
800 | /* | |
801 | * add the new block to the free list. | |
802 | * next block cannot be free | |
803 | */ | |
804 | SetTags(prev, rem); | |
f57e8d3b | 805 | #ifdef _USE_BUDDY_BLOCKS |
1604cfb0 | 806 | AddToFreeList(prev, rem); |
f57e8d3b | 807 | #else |
1604cfb0 | 808 | AddToFreeList(prev, m_pFreeList); |
f57e8d3b | 809 | #endif |
1604cfb0 | 810 | cursize = realsize; |
7766f137 | 811 | } |
1604cfb0 MS |
812 | /* Set the boundary tags to mark it as allocated. */ |
813 | SetTags(ptr, cursize | 1); | |
7766f137 GS |
814 | return ((void *)ptr); |
815 | } | |
816 | ||
817 | /* Allocate a new block, copy the old to the new, and free the old. */ | |
818 | if((ptr = (PBLOCK)Malloc(size)) != NULL) { | |
1604cfb0 MS |
819 | memmove(ptr, block, cursize-blockOverhead); |
820 | Free(block); | |
7766f137 GS |
821 | } |
822 | return ((void *)ptr); | |
823 | } | |
824 | ||
825 | void VMem::Free(void* p) | |
826 | { | |
827 | WALKHEAP(); | |
828 | ||
829 | /* Ignore null pointer. */ | |
830 | if(p == NULL) | |
1604cfb0 | 831 | return; |
7766f137 GS |
832 | |
833 | PBLOCK ptr = (PBLOCK)p; | |
834 | ||
835 | /* Check for attempt to free a block that's already free. */ | |
836 | size_t size = SIZE(ptr); | |
837 | if((size&1) == 0) { | |
1604cfb0 MS |
838 | MEMODSlx("Attempt to free previously freed block", (long)p); |
839 | return; | |
7766f137 GS |
840 | } |
841 | size &= ~1; /* remove allocated tag */ | |
842 | ||
843 | /* if previous block is free, add this block to it. */ | |
f57e8d3b | 844 | #ifndef _USE_BUDDY_BLOCKS |
7766f137 | 845 | int linked = FALSE; |
f57e8d3b | 846 | #endif |
7766f137 GS |
847 | size_t psize = PSIZE(ptr); |
848 | if((psize&1) == 0) { | |
1604cfb0 MS |
849 | ptr -= psize; /* point to previous block */ |
850 | size += psize; /* merge the sizes of the two blocks */ | |
f57e8d3b | 851 | #ifdef _USE_BUDDY_BLOCKS |
1604cfb0 | 852 | Unlink(ptr); |
f57e8d3b | 853 | #else |
1604cfb0 | 854 | linked = TRUE; /* it's already on the free list */ |
f57e8d3b | 855 | #endif |
7766f137 GS |
856 | } |
857 | ||
858 | /* if the next physical block is free, merge it with this block. */ | |
859 | PBLOCK next = ptr + size; /* point to next physical block */ | |
860 | size_t nsize = SIZE(next); | |
861 | if((nsize&1) == 0) { | |
1604cfb0 MS |
862 | /* block is free move rover if needed */ |
863 | if(m_pRover == next) | |
864 | m_pRover = NEXT(next); | |
7766f137 | 865 | |
1604cfb0 MS |
866 | /* unlink the next block from the free list. */ |
867 | Unlink(next); | |
7766f137 | 868 | |
1604cfb0 MS |
869 | /* merge the sizes of this block and the next block. */ |
870 | size += nsize; | |
7766f137 GS |
871 | } |
872 | ||
873 | /* Set the boundary tags for the block; */ | |
874 | SetTags(ptr, size); | |
875 | ||
876 | /* Link the block to the head of the free list. */ | |
f57e8d3b | 877 | #ifdef _USE_BUDDY_BLOCKS |
1604cfb0 | 878 | AddToFreeList(ptr, size); |
f57e8d3b | 879 | #else |
7766f137 | 880 | if(!linked) { |
1604cfb0 | 881 | AddToFreeList(ptr, m_pFreeList); |
7766f137 | 882 | } |
f57e8d3b | 883 | #endif |
7766f137 GS |
884 | } |
885 | ||
886 | void VMem::GetLock(void) | |
887 | { | |
888 | EnterCriticalSection(&m_cs); | |
889 | } | |
890 | ||
891 | void VMem::FreeLock(void) | |
892 | { | |
893 | LeaveCriticalSection(&m_cs); | |
894 | } | |
895 | ||
896 | int VMem::IsLocked(void) | |
897 | { | |
90430aa1 GS |
898 | #if 0 |
899 | /* XXX TryEnterCriticalSection() is not available in some versions | |
900 | * of Windows 95. Since this code is not used anywhere yet, we | |
901 | * skirt the issue for now. */ | |
7766f137 GS |
902 | BOOL bAccessed = TryEnterCriticalSection(&m_cs); |
903 | if(bAccessed) { | |
1604cfb0 | 904 | LeaveCriticalSection(&m_cs); |
7766f137 GS |
905 | } |
906 | return !bAccessed; | |
90430aa1 GS |
907 | #else |
908 | ASSERT(0); /* alarm bells for when somebody calls this */ | |
909 | return 0; | |
910 | #endif | |
7766f137 GS |
911 | } |
912 | ||
913 | ||
914 | long VMem::Release(void) | |
915 | { | |
916 | long lCount = InterlockedDecrement(&m_lRefCount); | |
917 | if(!lCount) | |
1604cfb0 | 918 | delete this; |
7766f137 GS |
919 | return lCount; |
920 | } | |
921 | ||
922 | long VMem::AddRef(void) | |
923 | { | |
924 | long lCount = InterlockedIncrement(&m_lRefCount); | |
925 | return lCount; | |
926 | } | |
927 | ||
928 | ||
929 | int VMem::Getmem(size_t requestSize) | |
930 | { /* returns -1 is successful 0 if not */ | |
f57e8d3b GS |
931 | #ifdef USE_BIGBLOCK_ALLOC |
932 | BOOL bBigBlock; | |
933 | #endif | |
7766f137 GS |
934 | void *ptr; |
935 | ||
936 | /* Round up size to next multiple of 64K. */ | |
937 | size_t size = (size_t)ROUND_UP64K(requestSize); | |
f57e8d3b | 938 | |
7766f137 GS |
939 | /* |
940 | * if the size requested is smaller than our current allocation size | |
941 | * adjust up | |
942 | */ | |
943 | if(size < (unsigned long)m_lAllocSize) | |
1604cfb0 | 944 | size = m_lAllocSize; |
7766f137 GS |
945 | |
946 | /* Update the size to allocate on the next request */ | |
947 | if(m_lAllocSize != lAllocMax) | |
1604cfb0 | 948 | m_lAllocSize <<= 2; |
7766f137 | 949 | |
f57e8d3b GS |
950 | #ifndef _USE_BUDDY_BLOCKS |
951 | if(m_nHeaps != 0 | |
952 | #ifdef USE_BIGBLOCK_ALLOC | |
1604cfb0 | 953 | && !m_heaps[m_nHeaps-1].bBigBlock |
f57e8d3b | 954 | #endif |
1604cfb0 MS |
955 | ) { |
956 | /* Expand the last allocated heap */ | |
957 | ptr = HeapReAlloc(m_hHeap, HEAP_REALLOC_IN_PLACE_ONLY|HEAP_NO_SERIALIZE, | |
958 | m_heaps[m_nHeaps-1].base, | |
959 | m_heaps[m_nHeaps-1].len + size); | |
960 | if(ptr != 0) { | |
961 | HeapAdd(((char*)ptr) + m_heaps[m_nHeaps-1].len, size | |
f57e8d3b | 962 | #ifdef USE_BIGBLOCK_ALLOC |
1604cfb0 | 963 | , FALSE |
f57e8d3b | 964 | #endif |
1604cfb0 MS |
965 | ); |
966 | return -1; | |
967 | } | |
7766f137 | 968 | } |
f57e8d3b | 969 | #endif /* _USE_BUDDY_BLOCKS */ |
7766f137 GS |
970 | |
971 | /* | |
972 | * if we didn't expand a block to cover the requested size | |
973 | * allocate a new Heap | |
974 | * the size of this block must include the additional dummy tags at either end | |
975 | * the above ROUND_UP64K may not have added any memory to include this. | |
976 | */ | |
977 | if(size == requestSize) | |
1604cfb0 | 978 | size = (size_t)ROUND_UP64K(requestSize+(blockOverhead)); |
f57e8d3b GS |
979 | |
980 | Restart: | |
981 | #ifdef _USE_BUDDY_BLOCKS | |
982 | ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); | |
983 | #else | |
984 | #ifdef USE_BIGBLOCK_ALLOC | |
985 | bBigBlock = FALSE; | |
986 | if (size >= nMaxHeapAllocSize) { | |
1604cfb0 MS |
987 | bBigBlock = TRUE; |
988 | ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); | |
f57e8d3b GS |
989 | } |
990 | else | |
991 | #endif | |
992 | ptr = HeapAlloc(m_hHeap, HEAP_NO_SERIALIZE, size); | |
993 | #endif /* _USE_BUDDY_BLOCKS */ | |
994 | ||
995 | if (!ptr) { | |
1604cfb0 MS |
996 | /* try to allocate a smaller chunk */ |
997 | size >>= 1; | |
998 | if(size > requestSize) | |
999 | goto Restart; | |
f57e8d3b | 1000 | } |
7766f137 | 1001 | |
7766f137 | 1002 | if(ptr == 0) { |
1604cfb0 MS |
1003 | MEMODSlx("HeapAlloc failed on size!!!", size); |
1004 | return 0; | |
7766f137 GS |
1005 | } |
1006 | ||
f57e8d3b GS |
1007 | #ifdef _USE_BUDDY_BLOCKS |
1008 | if (HeapAdd(ptr, size)) { | |
1604cfb0 MS |
1009 | VirtualFree(ptr, 0, MEM_RELEASE); |
1010 | return 0; | |
f57e8d3b GS |
1011 | } |
1012 | #else | |
1013 | #ifdef USE_BIGBLOCK_ALLOC | |
1014 | if (HeapAdd(ptr, size, bBigBlock)) { | |
1604cfb0 MS |
1015 | if (bBigBlock) { |
1016 | VirtualFree(ptr, 0, MEM_RELEASE); | |
1017 | } | |
f57e8d3b GS |
1018 | } |
1019 | #else | |
7766f137 | 1020 | HeapAdd(ptr, size); |
f57e8d3b GS |
1021 | #endif |
1022 | #endif /* _USE_BUDDY_BLOCKS */ | |
7766f137 GS |
1023 | return -1; |
1024 | } | |
1025 | ||
f57e8d3b GS |
1026 | int VMem::HeapAdd(void* p, size_t size |
1027 | #ifdef USE_BIGBLOCK_ALLOC | |
1028 | , BOOL bBigBlock | |
1029 | #endif | |
1030 | ) | |
bb0f0a6a | 1031 | { /* if the block can be successfully added to the heap, returns 0; otherwise -1. */ |
7766f137 GS |
1032 | int index; |
1033 | ||
1034 | /* Check size, then round size down to next long word boundary. */ | |
1035 | if(size < minAllocSize) | |
1604cfb0 | 1036 | return -1; |
7766f137 GS |
1037 | |
1038 | size = (size_t)ROUND_DOWN(size); | |
1039 | PBLOCK ptr = (PBLOCK)p; | |
1040 | ||
f57e8d3b GS |
1041 | #ifdef USE_BIGBLOCK_ALLOC |
1042 | if (!bBigBlock) { | |
1043 | #endif | |
1604cfb0 MS |
1044 | /* |
1045 | * Search for another heap area that's contiguous with the bottom of this new area. | |
1046 | * (It should be extremely unusual to find one that's contiguous with the top). | |
1047 | */ | |
1048 | for(index = 0; index < m_nHeaps; ++index) { | |
1049 | if(ptr == m_heaps[index].base + (int)m_heaps[index].len) { | |
1050 | /* | |
1051 | * The new block is contiguous with a previously allocated heap area. Add its | |
1052 | * length to that of the previous heap. Merge it with the dummy end-of-heap | |
1053 | * area marker of the previous heap. | |
1054 | */ | |
1055 | m_heaps[index].len += size; | |
1056 | break; | |
1057 | } | |
1058 | } | |
f57e8d3b GS |
1059 | #ifdef USE_BIGBLOCK_ALLOC |
1060 | } | |
1061 | else { | |
1604cfb0 | 1062 | index = m_nHeaps; |
7766f137 | 1063 | } |
f57e8d3b | 1064 | #endif |
7766f137 GS |
1065 | |
1066 | if(index == m_nHeaps) { | |
1604cfb0 MS |
1067 | /* The new block is not contiguous, or is BigBlock. Add it to the heap list. */ |
1068 | if(m_nHeaps == maxHeaps) { | |
1069 | return -1; /* too many non-contiguous heaps */ | |
1070 | } | |
1071 | m_heaps[m_nHeaps].base = ptr; | |
1072 | m_heaps[m_nHeaps].len = size; | |
f57e8d3b | 1073 | #ifdef USE_BIGBLOCK_ALLOC |
1604cfb0 | 1074 | m_heaps[m_nHeaps].bBigBlock = bBigBlock; |
f57e8d3b | 1075 | #endif |
1604cfb0 MS |
1076 | m_nHeaps++; |
1077 | ||
1078 | /* | |
1079 | * Reserve the first LONG in the block for the ending boundary tag of a dummy | |
1080 | * block at the start of the heap area. | |
1081 | */ | |
1082 | size -= blockOverhead; | |
1083 | ptr += blockOverhead; | |
1084 | PSIZE(ptr) = 1; /* mark the dummy previous block as allocated */ | |
7766f137 GS |
1085 | } |
1086 | ||
1087 | /* | |
1088 | * Convert the heap to one large block. Set up its boundary tags, and those of | |
1089 | * marker block after it. The marker block before the heap will already have | |
1090 | * been set up if this heap is not contiguous with the end of another heap. | |
1091 | */ | |
1092 | SetTags(ptr, size | 1); | |
1093 | PBLOCK next = ptr + size; /* point to dummy end block */ | |
1094 | SIZE(next) = 1; /* mark the dummy end block as allocated */ | |
1095 | ||
1096 | /* | |
1097 | * Link the block to the start of the free list by calling free(). | |
1098 | * This will merge the block with any adjacent free blocks. | |
1099 | */ | |
1100 | Free(ptr); | |
1101 | return 0; | |
1102 | } | |
1103 | ||
1104 | ||
1105 | void* VMem::Expand(void* block, size_t size) | |
1106 | { | |
1107 | /* | |
f57e8d3b | 1108 | * Disallow negative or zero sizes. |
7766f137 | 1109 | */ |
f57e8d3b | 1110 | size_t realsize = CalcAllocSize(size); |
7766f137 | 1111 | if((int)realsize < minAllocSize || size == 0) |
1604cfb0 | 1112 | return NULL; |
7766f137 GS |
1113 | |
1114 | PBLOCK ptr = (PBLOCK)block; | |
1115 | ||
1116 | /* if the current size is the same as requested, do nothing. */ | |
1117 | size_t cursize = SIZE(ptr) & ~1; | |
1118 | if(cursize == realsize) { | |
1604cfb0 | 1119 | return block; |
7766f137 GS |
1120 | } |
1121 | ||
1122 | /* if the block is being shrunk, convert the remainder of the block into a new free block. */ | |
1123 | if(realsize <= cursize) { | |
1604cfb0 MS |
1124 | size_t nextsize = cursize - realsize; /* size of new remainder block */ |
1125 | if(nextsize >= minAllocSize) { | |
1126 | /* | |
1127 | * Split the block | |
1128 | * Set boundary tags for the resized block and the new block. | |
1129 | */ | |
1130 | SetTags(ptr, realsize | 1); | |
1131 | ptr += realsize; | |
1132 | ||
1133 | /* | |
1134 | * add the new block to the free list. | |
1135 | * call Free to merge this block with next block if free | |
1136 | */ | |
1137 | SetTags(ptr, nextsize | 1); | |
1138 | Free(ptr); | |
1139 | } | |
1140 | ||
1141 | return block; | |
7766f137 GS |
1142 | } |
1143 | ||
1144 | PBLOCK next = ptr + cursize; | |
1145 | size_t nextsize = SIZE(next); | |
1146 | ||
1147 | /* Check the next block for consistency.*/ | |
1148 | if((nextsize&1) == 0 && (nextsize + cursize) >= realsize) { | |
1604cfb0 MS |
1149 | /* |
1150 | * The next block is free and big enough. Add the part that's needed | |
1151 | * to our block, and split the remainder off into a new block. | |
1152 | */ | |
1153 | if(m_pRover == next) | |
1154 | m_pRover = NEXT(next); | |
1155 | ||
1156 | /* Unlink the next block from the free list. */ | |
1157 | Unlink(next); | |
1158 | cursize += nextsize; /* combine sizes */ | |
1159 | ||
1160 | size_t rem = cursize - realsize; /* size of remainder */ | |
1161 | if(rem >= minAllocSize) { | |
1162 | /* | |
1163 | * The remainder is big enough to be a new block. | |
1164 | * Set boundary tags for the resized block and the new block. | |
1165 | */ | |
1166 | next = ptr + realsize; | |
1167 | /* | |
1168 | * add the new block to the free list. | |
1169 | * next block cannot be free | |
1170 | */ | |
1171 | SetTags(next, rem); | |
f57e8d3b | 1172 | #ifdef _USE_BUDDY_BLOCKS |
1604cfb0 | 1173 | AddToFreeList(next, rem); |
f57e8d3b | 1174 | #else |
1604cfb0 | 1175 | AddToFreeList(next, m_pFreeList); |
f57e8d3b | 1176 | #endif |
1604cfb0 | 1177 | cursize = realsize; |
7766f137 | 1178 | } |
1604cfb0 MS |
1179 | /* Set the boundary tags to mark it as allocated. */ |
1180 | SetTags(ptr, cursize | 1); | |
1181 | return ((void *)ptr); | |
7766f137 GS |
1182 | } |
1183 | return NULL; | |
1184 | } | |
1185 | ||
1186 | #ifdef _DEBUG_MEM | |
df3728a2 | 1187 | #define LOG_FILENAME ".\\MemLog.txt" |
7766f137 | 1188 | |
f57e8d3b | 1189 | void VMem::MemoryUsageMessage(char *str, long x, long y, int c) |
7766f137 | 1190 | { |
7766f137 GS |
1191 | char szBuffer[512]; |
1192 | if(str) { | |
1604cfb0 MS |
1193 | if(!m_pLog) |
1194 | m_pLog = fopen(LOG_FILENAME, "w"); | |
1195 | sprintf(szBuffer, str, x, y, c); | |
1196 | fputs(szBuffer, m_pLog); | |
7766f137 GS |
1197 | } |
1198 | else { | |
1604cfb0 MS |
1199 | if(m_pLog) { |
1200 | fflush(m_pLog); | |
1201 | fclose(m_pLog); | |
1202 | m_pLog = 0; | |
1203 | } | |
7766f137 GS |
1204 | } |
1205 | } | |
1206 | ||
f57e8d3b | 1207 | void VMem::WalkHeap(int complete) |
7766f137 | 1208 | { |
f57e8d3b | 1209 | if(complete) { |
1604cfb0 MS |
1210 | MemoryUsageMessage(NULL, 0, 0, 0); |
1211 | size_t total = 0; | |
1212 | for(int i = 0; i < m_nHeaps; ++i) { | |
1213 | total += m_heaps[i].len; | |
1214 | } | |
1215 | MemoryUsageMessage("VMem heaps used %d. Total memory %08x\n", m_nHeaps, total, 0); | |
1216 | ||
1217 | /* Walk all the heaps - verify structures */ | |
1218 | for(int index = 0; index < m_nHeaps; ++index) { | |
1219 | PBLOCK ptr = m_heaps[index].base; | |
1220 | size_t size = m_heaps[index].len; | |
f57e8d3b GS |
1221 | #ifndef _USE_BUDDY_BLOCKS |
1222 | #ifdef USE_BIGBLOCK_ALLOC | |
1604cfb0 | 1223 | if (!m_heaps[m_nHeaps].bBigBlock) |
f57e8d3b | 1224 | #endif |
1604cfb0 | 1225 | ASSERT(HeapValidate(m_hHeap, HEAP_NO_SERIALIZE, ptr)); |
f57e8d3b | 1226 | #endif |
7766f137 | 1227 | |
1604cfb0 MS |
1228 | /* set over reserved header block */ |
1229 | size -= blockOverhead; | |
1230 | ptr += blockOverhead; | |
1231 | PBLOCK pLast = ptr + size; | |
1232 | ASSERT(PSIZE(ptr) == 1); /* dummy previous block is allocated */ | |
1233 | ASSERT(SIZE(pLast) == 1); /* dummy next block is allocated */ | |
1234 | while(ptr < pLast) { | |
1235 | ASSERT(ptr > m_heaps[index].base); | |
1236 | size_t cursize = SIZE(ptr) & ~1; | |
1237 | ASSERT((PSIZE(ptr+cursize) & ~1) == cursize); | |
1238 | MemoryUsageMessage("Memory Block %08x: Size %08x %c\n", (long)ptr, cursize, (SIZE(ptr)&1) ? 'x' : ' '); | |
1239 | if(!(SIZE(ptr)&1)) { | |
1240 | /* this block is on the free list */ | |
1241 | PBLOCK tmp = NEXT(ptr); | |
1242 | while(tmp != ptr) { | |
1243 | ASSERT((SIZE(tmp)&1)==0); | |
1244 | if(tmp == m_pFreeList) | |
1245 | break; | |
1246 | ASSERT(NEXT(tmp)); | |
1247 | tmp = NEXT(tmp); | |
1248 | } | |
1249 | if(tmp == ptr) { | |
1250 | MemoryUsageMessage("Memory Block %08x: Size %08x free but not in free list\n", (long)ptr, cursize, 0); | |
1251 | } | |
1252 | } | |
1253 | ptr += cursize; | |
1254 | } | |
1255 | } | |
1256 | MemoryUsageMessage(NULL, 0, 0, 0); | |
7766f137 GS |
1257 | } |
1258 | } | |
f57e8d3b GS |
1259 | #endif /* _DEBUG_MEM */ |
1260 | ||
1261 | #endif /* _USE_MSVCRT_MEM_ALLOC */ | |
7766f137 GS |
1262 | |
1263 | #endif /* ___VMEM_H_INC___ */ |