• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (C) 2002-2005  3Dlabs Inc. Ltd.
3 // Copyright (C) 2012-2013 LunarG, Inc.
4 //
5 // All rights reserved.
6 //
7 // Redistribution and use in source and binary forms, with or without
8 // modification, are permitted provided that the following conditions
9 // are met:
10 //
11 //    Redistributions of source code must retain the above copyright
12 //    notice, this list of conditions and the following disclaimer.
13 //
14 //    Redistributions in binary form must reproduce the above
15 //    copyright notice, this list of conditions and the following
16 //    disclaimer in the documentation and/or other materials provided
17 //    with the distribution.
18 //
19 //    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
20 //    contributors may be used to endorse or promote products derived
21 //    from this software without specific prior written permission.
22 //
23 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 // POSSIBILITY OF SUCH DAMAGE.
35 //
36 
37 #ifndef _POOLALLOC_INCLUDED_
38 #define _POOLALLOC_INCLUDED_
39 
40 #ifndef NDEBUG
41 #  define GUARD_BLOCKS  // define to enable guard block sanity checking
42 #endif
43 
44 //
45 // This header defines an allocator that can be used to efficiently
46 // allocate a large number of small requests for heap memory, with the
47 // intention that they are not individually deallocated, but rather
48 // collectively deallocated at one time.
49 //
50 // This simultaneously
51 //
52 // * Makes each individual allocation much more efficient; the
53 //     typical allocation is trivial.
54 // * Completely avoids the cost of doing individual deallocation.
55 // * Saves the trouble of tracking down and plugging a large class of leaks.
56 //
57 // Individual classes can use this allocator by supplying their own
58 // new and delete methods.
59 //
60 // STL containers can use this allocator by using the pool_allocator
61 // class as the allocator (second) template argument.
62 //
63 
64 #include <cstddef>
65 #include <cstring>
66 #include <vector>
67 
68 namespace glslang {
69 
70 // If we are using guard blocks, we must track each individual
71 // allocation.  If we aren't using guard blocks, these
72 // never get instantiated, so won't have any impact.
73 //
74 
75 class TAllocation {
76 public:
77     TAllocation(size_t size, unsigned char* mem, TAllocation* prev = nullptr) :
size(size)78         size(size), mem(mem), prevAlloc(prev) {
79         // Allocations are bracketed:
80         //    [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
81         // This would be cleaner with if (guardBlockSize)..., but that
82         // makes the compiler print warnings about 0 length memsets,
83         // even with the if() protecting them.
84 #       ifdef GUARD_BLOCKS
85             memset(preGuard(),  guardBlockBeginVal, guardBlockSize);
86             memset(data(),      userDataFill,       size);
87             memset(postGuard(), guardBlockEndVal,   guardBlockSize);
88 #       endif
89     }
90 
check()91     void check() const {
92         checkGuardBlock(preGuard(),  guardBlockBeginVal, "before");
93         checkGuardBlock(postGuard(), guardBlockEndVal,   "after");
94     }
95 
96     void checkAllocList() const;
97 
98     // Return total size needed to accommodate user buffer of 'size',
99     // plus our tracking data.
allocationSize(size_t size)100     inline static size_t allocationSize(size_t size) {
101         return size + 2 * guardBlockSize + headerSize();
102     }
103 
104     // Offset from surrounding buffer to get to user data buffer.
offsetAllocation(unsigned char * m)105     inline static unsigned char* offsetAllocation(unsigned char* m) {
106         return m + guardBlockSize + headerSize();
107     }
108 
109 private:
110     void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
111 
112     // Find offsets to pre and post guard blocks, and user data buffer
preGuard()113     unsigned char* preGuard()  const { return mem + headerSize(); }
data()114     unsigned char* data()      const { return preGuard() + guardBlockSize; }
postGuard()115     unsigned char* postGuard() const { return data() + size; }
116 
117     size_t size;                  // size of the user data area
118     unsigned char* mem;           // beginning of our allocation (pts to header)
119     TAllocation* prevAlloc;       // prior allocation in the chain
120 
121     static inline constexpr unsigned char guardBlockBeginVal = 0xfb;
122     static inline constexpr unsigned char guardBlockEndVal = 0xfe;
123     static inline constexpr unsigned char userDataFill = 0xcd;
124 
125 #   ifdef GUARD_BLOCKS
126     static inline constexpr size_t guardBlockSize = 16;
127 #   else
128     static inline constexpr size_t guardBlockSize = 0;
129 #   endif
130 
131 #   ifdef GUARD_BLOCKS
headerSize()132     inline static size_t headerSize() { return sizeof(TAllocation); }
133 #   else
headerSize()134     inline static size_t headerSize() { return 0; }
135 #   endif
136 };
137 
138 //
139 // There are several stacks.  One is to track the pushing and popping
140 // of the user, and not yet implemented.  The others are simply a
141 // repositories of free pages or used pages.
142 //
143 // Page stacks are linked together with a simple header at the beginning
144 // of each allocation obtained from the underlying OS.  Multi-page allocations
145 // are returned to the OS.  Individual page allocations are kept for future
146 // re-use.
147 //
148 // The "page size" used is not, nor must it match, the underlying OS
149 // page size.  But, having it be about that size or equal to a set of
150 // pages is likely most optimal.
151 //
152 class TPoolAllocator {
153 public:
154     TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
155 
156     //
157     // Don't call the destructor just to free up the memory, call pop()
158     //
159     ~TPoolAllocator();
160 
161     //
162     // Call push() to establish a new place to pop memory too.  Does not
163     // have to be called to get things started.
164     //
165     void push();
166 
167     //
168     // Call pop() to free all memory allocated since the last call to push(),
169     // or if no last call to push, frees all memory since first allocation.
170     //
171     void pop();
172 
173     //
174     // Call popAll() to free all memory allocated.
175     //
176     void popAll();
177 
178     //
179     // Call allocate() to actually acquire memory.  Returns nullptr if no memory
180     // available, otherwise a properly aligned pointer to 'numBytes' of memory.
181     //
182     void* allocate(size_t numBytes);
183 
184     //
185     // There is no deallocate.  The point of this class is that
186     // deallocation can be skipped by the user of it, as the model
187     // of use is to simultaneously deallocate everything at once
188     // by calling pop(), and to not have to solve memory leak problems.
189     //
190 
191 protected:
192     friend struct tHeader;
193 
194     struct tHeader {
tHeadertHeader195         tHeader(tHeader* nextPage, size_t pageCount) :
196 #ifdef GUARD_BLOCKS
197         lastAllocation(nullptr),
198 #endif
199         nextPage(nextPage), pageCount(pageCount) { }
200 
~tHeadertHeader201         ~tHeader() {
202 #ifdef GUARD_BLOCKS
203             if (lastAllocation)
204                 lastAllocation->checkAllocList();
205 #endif
206         }
207 
208 #ifdef GUARD_BLOCKS
209         TAllocation* lastAllocation;
210 #endif
211         tHeader* nextPage;
212         size_t pageCount;
213     };
214 
215     struct tAllocState {
216         size_t offset;
217         tHeader* page;
218     };
219     typedef std::vector<tAllocState> tAllocStack;
220 
221     // Track allocations if and only if we're using guard blocks
222 #ifndef GUARD_BLOCKS
initializeAllocation(tHeader *,unsigned char * memory,size_t)223     void* initializeAllocation(tHeader*, unsigned char* memory, size_t) {
224 #else
225     void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
226         new(memory) TAllocation(numBytes, memory, block->lastAllocation);
227         block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
228 #endif
229 
230         // This is optimized entirely away if GUARD_BLOCKS is not defined.
231         return TAllocation::offsetAllocation(memory);
232     }
233 
234     size_t pageSize;        // granularity of allocation from the OS
235     size_t alignment;       // all returned allocations will be aligned at
236                             //      this granularity, which will be a power of 2
237     size_t alignmentMask;
238     size_t headerSkip;      // amount of memory to skip to make room for the
239                             //      header (basically, size of header, rounded
240                             //      up to make it aligned
241     size_t currentPageOffset;  // next offset in top of inUseList to allocate from
242     tHeader* freeList;      // list of popped memory
243     tHeader* inUseList;     // list of all memory currently being used
244     tAllocStack stack;      // stack of where to allocate from, to partition pool
245 
246     int numCalls;           // just an interesting statistic
247     size_t totalBytes;      // just an interesting statistic
248 private:
249     TPoolAllocator& operator=(const TPoolAllocator&);  // don't allow assignment operator
250     TPoolAllocator(const TPoolAllocator&);  // don't allow default copy constructor
251 };
252 
253 //
254 // There could potentially be many pools with pops happening at
255 // different times.  But a simple use is to have a global pop
256 // with everyone using the same global allocator.
257 //
258 extern TPoolAllocator& GetThreadPoolAllocator();
259 void SetThreadPoolAllocator(TPoolAllocator* poolAllocator);
260 
261 //
262 // This STL compatible allocator is intended to be used as the allocator
263 // parameter to templatized STL containers, like vector and map.
264 //
265 // It will use the pools for allocation, and not
266 // do any deallocation, but will still do destruction.
267 //
268 template<class T>
269 class pool_allocator {
270 public:
271     typedef size_t size_type;
272     typedef ptrdiff_t difference_type;
273     typedef T *pointer;
274     typedef const T *const_pointer;
275     typedef T& reference;
276     typedef const T& const_reference;
277     typedef T value_type;
278     template<class Other>
279         struct rebind {
280             typedef pool_allocator<Other> other;
281         };
address(reference x)282     pointer address(reference x) const { return &x; }
address(const_reference x)283     const_pointer address(const_reference x) const { return &x; }
284 
pool_allocator()285     pool_allocator() : allocator(GetThreadPoolAllocator()) { }
pool_allocator(TPoolAllocator & a)286     pool_allocator(TPoolAllocator& a) : allocator(a) { }
pool_allocator(const pool_allocator<T> & p)287     pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
288 
289     template<class Other>
pool_allocator(const pool_allocator<Other> & p)290         pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
291 
allocate(size_type n)292     pointer allocate(size_type n) {
293         return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
allocate(size_type n,const void *)294     pointer allocate(size_type n, const void*) {
295         return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
296 
deallocate(void *,size_type)297     void deallocate(void*, size_type) { }
deallocate(pointer,size_type)298     void deallocate(pointer, size_type) { }
299 
_Charalloc(size_t n)300     pointer _Charalloc(size_t n) {
301         return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
302 
construct(pointer p,const T & val)303     void construct(pointer p, const T& val) { new ((void *)p) T(val); }
destroy(pointer p)304     void destroy(pointer p) { p->T::~T(); }
305 
306     bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
307     bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
308 
max_size()309     size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
max_size(int size)310     size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
311 
getAllocator()312     TPoolAllocator& getAllocator() const { return allocator; }
313 
select_on_container_copy_construction()314     pool_allocator select_on_container_copy_construction() const { return pool_allocator{}; }
315 
316 protected:
317     pool_allocator& operator=(const pool_allocator&) { return *this; }
318     TPoolAllocator& allocator;
319 };
320 
321 } // end namespace glslang
322 
323 #endif // _POOLALLOC_INCLUDED_
324