1/*
2 * PROGRAM: Client/Server Common Code
3 * MODULE: alloc.h
4 * DESCRIPTION: Memory Pool Manager (based on B+ tree)
5 *
6 * The contents of this file are subject to the Initial
7 * Developer's Public License Version 1.0 (the "License");
8 * you may not use this file except in compliance with the
9 * License. You may obtain a copy of the License at
10 * http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
11 *
12 * Software distributed under the License is distributed AS IS,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied.
14 * See the License for the specific language governing rights
15 * and limitations under the License.
16 *
17 * The Original Code was created by Nickolay Samofatov
18 * for the Firebird Open Source RDBMS project.
19 *
20 * STL allocator is based on one by Mike Nordell and John Bellardo
21 *
22 * Copyright (c) 2004 Nickolay Samofatov <nickolay@broadviewsoftware.com>
23 * and all contributors signed below.
24 *
25 * All Rights Reserved.
26 *
27 * The Original Code was created by James A. Starkey for IBPhoenix.
28 *
29 * Copyright (c) 2004 James A. Starkey
30 * All Rights Reserved.
31 *
32 * Contributor(s):
33 *
34 * Alex Peshkoff <peshkoff@mail.ru>
35 * added PermanentStorage and AutoStorage classes.
36 * merged parts of Nickolay and Jim code to be used together
37 *
38 */
39
40#ifndef CLASSES_ALLOC_H
41#define CLASSES_ALLOC_H
42
43#include "firebird.h"
44#include "fb_types.h"
45#include "../common/classes/locks.h"
46#include "../common/classes/auto.h"
47#include "../common/classes/fb_atomic.h"
48
49#include <stdio.h>
50
51#if defined(MVS) || defined(__VMS) || defined (DARWIN)
52#include <stdlib.h>
53#else
54#include <malloc.h>
55#endif
56
57#include <memory.h>
58
59#undef MEM_DEBUG
60#ifdef DEBUG_GDS_ALLOC
61#define MEM_DEBUG
62#endif
63
64#ifdef USE_VALGRIND
65// Size of Valgrind red zone applied before and after memory block allocated for user
66#define VALGRIND_REDZONE 0 //8
67// When memory block is deallocated by user from the pool it must pass queue of this
68// length before it is actually deallocated and access protection from it removed.
69#define DELAYED_FREE_COUNT 1024
70// When memory extent is deallocated when pool is destroying it must pass through
71// queue of this length before it is actually returned to system
72#define DELAYED_EXTENT_COUNT 32
73#undef MEM_DEBUG // valgrind works instead
74#else
75#define VALGRIND_REDZONE 8
76#endif
77
78#ifdef USE_SYSTEM_NEW
79#define OOM_EXCEPTION std::bad_alloc
80#else
81#define OOM_EXCEPTION Firebird::BadAlloc
82#endif
83
84
85namespace Firebird {
86
87// Alignment for all memory blocks. Sizes of memory blocks in headers are measured in this units
88const size_t ALLOC_ALIGNMENT = FB_ALIGNMENT;
89
90static inline size_t MEM_ALIGN(size_t value)
91{
92 return FB_ALIGN(value, ALLOC_ALIGNMENT);
93}
94
95static const unsigned int DEFAULT_ROUNDING = 8;
96static const unsigned int DEFAULT_CUTOFF = 4096;
97static const size_t DEFAULT_ALLOCATION = 65536;
98
99class MemoryPool;
100class MemoryStats
101{
102public:
103 explicit MemoryStats(MemoryStats* parent = NULL)
104 : mst_parent(parent), mst_usage(0), mst_mapped(0), mst_max_usage(0), mst_max_mapped(0)
105 {}
106
107 ~MemoryStats()
108 {}
109
110 size_t getCurrentUsage() const throw () { return mst_usage.value(); }
111 size_t getMaximumUsage() const throw () { return mst_max_usage; }
112 size_t getCurrentMapping() const throw () { return mst_mapped.value(); }
113 size_t getMaximumMapping() const throw () { return mst_max_mapped; }
114
115private:
116 // Forbid copying/assignment
117 MemoryStats(const MemoryStats&);
118 MemoryStats& operator=(const MemoryStats&);
119
120 MemoryStats* mst_parent;
121
122 // Currently allocated memory (without allocator overhead)
123 // Useful for monitoring engine memory leaks
124 AtomicCounter mst_usage;
125 // Amount of memory mapped (including all overheads)
126 // Useful for monitoring OS memory consumption
127 AtomicCounter mst_mapped;
128
129 // We don't particularily care about extreme precision of these max values,
130 // this is why we don't synchronize them
131 size_t mst_max_usage;
132 size_t mst_max_mapped;
133
134 // These methods are thread-safe due to usage of atomic counters only
135 void increment_usage(size_t size) throw ()
136 {
137 for (MemoryStats* statistics = this; statistics; statistics = statistics->mst_parent)
138 {
139 const size_t temp = statistics->mst_usage.exchangeAdd(size) + size;
140 if (temp > statistics->mst_max_usage)
141 statistics->mst_max_usage = temp;
142 }
143 }
144
145 void decrement_usage(size_t size) throw ()
146 {
147 for (MemoryStats* statistics = this; statistics; statistics = statistics->mst_parent)
148 {
149 statistics->mst_usage -= size;
150 }
151 }
152
153 void increment_mapping(size_t size) throw ()
154 {
155 for (MemoryStats* statistics = this; statistics; statistics = statistics->mst_parent)
156 {
157 const size_t temp = statistics->mst_mapped.exchangeAdd(size) + size;
158 if (temp > statistics->mst_max_mapped)
159 statistics->mst_max_mapped = temp;
160 }
161 }
162
163 void decrement_mapping(size_t size) throw ()
164 {
165 for (MemoryStats* statistics = this; statistics; statistics = statistics->mst_parent)
166 {
167 statistics->mst_mapped -= size;
168 }
169 }
170
171 friend class MemoryPool;
172};
173
174typedef SLONG INT32;
175
176class MemBlock;
177
178class MemHeader
179{
180public:
181 union
182 {
183 MemoryPool* pool;
184 MemBlock* next;
185 };
186 size_t length;
187#ifdef DEBUG_GDS_ALLOC
188 INT32 lineNumber;
189 const char *fileName;
190#endif
191#if defined(USE_VALGRIND) && (VALGRIND_REDZONE != 0)
192 const char mbk_valgrind_redzone[VALGRIND_REDZONE];
193#endif
194};
195
196class MemBlock : public MemHeader
197{
198public:
199 UCHAR body;
200};
201
202class MemBigObject;
203
204class MemBigHeader
205{
206public:
207 MemBigObject *next;
208 MemBigObject *prior;
209};
210
211class MemBigObject : public MemBigHeader
212{
213public:
214 MemHeader memHeader;
215};
216
217
218class MemFreeBlock : public MemBigObject
219{
220public:
221 MemFreeBlock *nextLarger;
222 MemFreeBlock *priorSmaller;
223 MemFreeBlock *nextTwin;
224 MemFreeBlock *priorTwin;
225};
226
227
228class MemSmallHunk
229{
230public:
231 MemSmallHunk *nextHunk;
232 size_t length;
233 UCHAR *memory;
234 size_t spaceRemaining;
235};
236
237class MemBigHunk
238{
239public:
240 MemBigHunk *nextHunk;
241 size_t length;
242 MemBigHeader blocks;
243};
244
245class MemoryPool
246{
247private:
248 MemoryPool(MemoryPool& parent, MemoryStats& stats,
249 bool shared = true, int rounding = DEFAULT_ROUNDING,
250 int cutoff = DEFAULT_CUTOFF, int minAllocation = DEFAULT_ALLOCATION);
251 explicit MemoryPool(bool shared = true, int rounding = DEFAULT_ROUNDING,
252 int cutoff = DEFAULT_CUTOFF, int minAllocation = DEFAULT_ALLOCATION);
253 void init(void* memory, size_t length);
254 virtual ~MemoryPool(void);
255
256public:
257 static MemoryPool* defaultMemoryManager;
258
259private:
260 size_t roundingSize, threshold, minAllocation;
261 //int headerSize;
262 typedef AtomicPointer<MemBlock> FreeChainPtr;
263 FreeChainPtr *freeObjects;
264 MemBigHunk *bigHunks;
265 MemSmallHunk *smallHunks;
266 MemFreeBlock freeBlocks;
267 MemFreeBlock junk;
268 Mutex mutex;
269 int blocksAllocated;
270 int blocksActive;
271 bool threadShared; // Shared across threads, requires locking
272 bool pool_destroying;
273
274 // Default statistics group for process
275 static MemoryStats* default_stats_group;
276 // Statistics group for the pool
277 MemoryStats* stats;
278 // Parent pool if present
279 MemoryPool* parent;
280 // Memory used
281 AtomicCounter used_memory, mapped_memory;
282
283protected:
284 MemBlock* alloc(const size_t length) throw (OOM_EXCEPTION);
285 void releaseBlock(MemBlock *block) throw ();
286
287public:
288 void* allocate(size_t size
289#ifdef DEBUG_GDS_ALLOC
290 , const char* fileName = NULL, int line = 0
291#endif
292 ) throw (OOM_EXCEPTION);
293
294protected:
295 static void corrupt(const char* text) throw ();
296
297private:
298 virtual void memoryIsExhausted(void) throw (OOM_EXCEPTION);
299 void remove(MemFreeBlock* block) throw ();
300 void insert(MemFreeBlock* block) throw ();
301 void* allocRaw(size_t length) throw (OOM_EXCEPTION);
302 void validateFreeList(void) throw ();
303 void validateBigBlock(MemBigObject* block) throw ();
304 static void release(void* block) throw ();
305 static void releaseRaw(bool destroying, void *block, size_t size, bool use_cache = true) throw ();
306
307#ifdef USE_VALGRIND
308 // Circular FIFO buffer of read/write protected blocks pending free operation
309 MemBlock* delayedFree[DELAYED_FREE_COUNT];
310 size_t delayedFreeCount;
311 size_t delayedFreePos;
312#endif
313
314public:
315 static void deletePool(MemoryPool* pool);
316 static void globalFree(void* block) throw ();
317 void* calloc(size_t size
318#ifdef DEBUG_GDS_ALLOC
319 , const char* fileName, int line
320#endif
321 ) throw (OOM_EXCEPTION);
322 static void deallocate(void* block) throw ();
323 void validate(void) throw ();
324
325#ifdef LIBC_CALLS_NEW
326 static void* globalAlloc(size_t s) throw (OOM_EXCEPTION);
327#else
328 static void* globalAlloc(size_t s) throw (OOM_EXCEPTION)
329 {
330 return defaultMemoryManager->allocate(s
331#ifdef DEBUG_GDS_ALLOC
332 , __FILE__, __LINE__
333#endif
334 );
335 }
336#endif // LIBC_CALLS_NEW
337
338 // Create memory pool instance
339 static MemoryPool* createPool(MemoryPool* parent = NULL, MemoryStats& stats = *default_stats_group);
340
341 // Set context pool for current thread of execution
342 static MemoryPool* setContextPool(MemoryPool* newPool);
343
344 // Get context pool for current thread of execution
345 static MemoryPool* getContextPool();
346
347 // Set statistics group for pool. Usage counters will be decremented from
348 // previously set group and added to new
349 void setStatsGroup(MemoryStats& stats) throw ();
350
351 // Just a helper for AutoPtr.
352 static void clear(MemoryPool* pool)
353 {
354 deletePool(pool);
355 }
356
357 // Initialize and finalize global memory pool
358 static void init();
359 static void cleanup();
360
361 // Statistics
362 void increment_usage(size_t size) throw ()
363 {
364 stats->increment_usage(size);
365 used_memory += size;
366 }
367
368 void decrement_usage(size_t size) throw ()
369 {
370 stats->decrement_usage(size);
371 used_memory -= size;
372 }
373
374 void increment_mapping(size_t size) throw ()
375 {
376 stats->increment_mapping(size);
377 mapped_memory += size;
378 }
379
380 void decrement_mapping(size_t size) throw ()
381 {
382 stats->decrement_mapping(size);
383 mapped_memory -= size;
384 }
385
386 // Print out pool contents. This is debugging routine
387 void print_contents(FILE*, bool = false, const char* filter_path = 0) throw ();
388 // The same routine, but more easily callable from the debugger
389 void print_contents(const char* filename, bool = false, const char* filter_path = 0) throw ();
390};
391
392} // namespace Firebird
393
394static inline Firebird::MemoryPool* getDefaultMemoryPool() throw()
395{
396 fb_assert(Firebird::MemoryPool::defaultMemoryManager);
397 return Firebird::MemoryPool::defaultMemoryManager;
398}
399
400namespace Firebird {
401
402// Class intended to manage execution context pool stack
403// Declare instance of this class when you need to set new context pool and it
404// will be restored automatically as soon holder variable gets out of scope
405class ContextPoolHolder
406{
407public:
408 explicit ContextPoolHolder(MemoryPool* newPool)
409 {
410 savedPool = MemoryPool::setContextPool(newPool);
411 }
412 ~ContextPoolHolder()
413 {
414 MemoryPool::setContextPool(savedPool);
415 }
416private:
417 MemoryPool* savedPool;
418};
419
420// template enabling common use of old and new pools control code
421// to be dropped when old-style code goes away
422template <typename SubsystemThreadData, typename SubsystemPool>
423class SubsystemContextPoolHolder : public ContextPoolHolder
424{
425public:
426 SubsystemContextPoolHolder <SubsystemThreadData, SubsystemPool>
427 (
428 SubsystemThreadData* subThreadData,
429 SubsystemPool* newPool
430 )
431 : ContextPoolHolder(newPool),
432 savedThreadData(subThreadData),
433 savedPool(savedThreadData->getDefaultPool())
434 {
435 savedThreadData->setDefaultPool(newPool);
436 }
437 ~SubsystemContextPoolHolder()
438 {
439 savedThreadData->setDefaultPool(savedPool);
440 }
441private:
442 SubsystemThreadData* savedThreadData;
443 SubsystemPool* savedPool;
444};
445
446} // namespace Firebird
447
448using Firebird::MemoryPool;
449
450// Global versions of operators new and delete
451inline void* operator new(size_t s) throw (OOM_EXCEPTION)
452{
453 return MemoryPool::globalAlloc(s);
454}
455inline void* operator new[](size_t s) throw (OOM_EXCEPTION)
456{
457 return MemoryPool::globalAlloc(s);
458}
459
460inline void operator delete(void* mem) throw()
461{
462 MemoryPool::globalFree(mem);
463}
464inline void operator delete[](void* mem) throw()
465{
466 MemoryPool::globalFree(mem);
467}
468
469#ifdef DEBUG_GDS_ALLOC
470inline void* operator new(size_t s, Firebird::MemoryPool& pool, const char* file, int line) throw (OOM_EXCEPTION)
471{
472 return pool.allocate(s, file, line);
473}
474inline void* operator new[](size_t s, Firebird::MemoryPool& pool, const char* file, int line) throw (OOM_EXCEPTION)
475{
476 return pool.allocate(s, file, line);
477}
478#define FB_NEW(pool) new(pool, __FILE__, __LINE__)
479#define FB_NEW_RPT(pool, count) new(pool, count, __FILE__, __LINE__)
480#else
481inline void* operator new(size_t s, Firebird::MemoryPool& pool) throw (OOM_EXCEPTION)
482{
483 return pool.allocate(s);
484}
485inline void* operator new[](size_t s, Firebird::MemoryPool& pool) throw (OOM_EXCEPTION)
486{
487 return pool.allocate(s);
488}
489#define FB_NEW(pool) new(pool)
490#define FB_NEW_RPT(pool, count) new(pool, count)
491#endif
492
493#ifndef USE_SYSTEM_NEW
494// We must define placement operators NEW & DELETE ourselves
495inline void* operator new(size_t s, void* place) throw ()
496{
497 return place;
498}
499inline void* operator new[](size_t s, void* place) throw ()
500{
501 return place;
502}
503inline void operator delete(void*, void*) throw()
504{ }
505inline void operator delete[](void*, void*) throw()
506{ }
507#endif
508
509namespace Firebird
510{
511 // Global storage makes it possible to use new and delete for classes,
512 // based on it, to behave traditionally, i.e. get memory from permanent pool.
513 class GlobalStorage
514 {
515 public:
516 void* operator new(size_t size)
517 {
518 return getDefaultMemoryPool()->allocate(size);
519 }
520
521 void operator delete(void* mem)
522 {
523 getDefaultMemoryPool()->deallocate(mem);
524 }
525
526 MemoryPool& getPool() const
527 {
528 return *getDefaultMemoryPool();
529 }
530 };
531
532
533 // Permanent storage is used as base class for all objects,
534 // performing memory allocation in methods other than
535 // constructors of this objects. Permanent means that pool,
536 // which will be later used for such allocations, must
537 // be explicitly passed in all constructors of such object.
538 class PermanentStorage
539 {
540 protected:
541 explicit PermanentStorage(MemoryPool& p) : pool(p) { }
542
543 public:
544 MemoryPool& getPool() const { return pool; }
545
546 private:
547 MemoryPool& pool;
548 };
549
550 // Automatic storage is used as base class for objects,
551 // that may have constructors without explicit MemoryPool
552 // parameter. In this case AutoStorage sends AutoMemoryPool
553 // to PermanentStorage. To ensure this operation to be safe
554 // such trick possible only for local (on stack) variables.
555 class AutoStorage : public PermanentStorage
556 {
557 private:
558#if defined(DEV_BUILD)
559 void ProbeStack() const;
560#endif
561 public:
562 static MemoryPool& getAutoMemoryPool();
563 protected:
564 AutoStorage()
565 : PermanentStorage(getAutoMemoryPool())
566 {
567#if defined(DEV_BUILD)
568 ProbeStack();
569#endif
570 }
571 explicit AutoStorage(MemoryPool& p) : PermanentStorage(p) { }
572 };
573
574 typedef AutoPtr<MemoryPool, MemoryPool> AutoMemoryPool;
575
576} // namespace Firebird
577
578
579#endif // CLASSES_ALLOC_H
580