• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (C) 2002-2005  3Dlabs Inc. Ltd.
3 // All rights reserved.
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions
7 // are met:
8 //
9 //    Redistributions of source code must retain the above copyright
10 //    notice, this list of conditions and the following disclaimer.
11 //
12 //    Redistributions in binary form must reproduce the above
13 //    copyright notice, this list of conditions and the following
14 //    disclaimer in the documentation and/or other materials provided
15 //    with the distribution.
16 //
17 //    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
18 //    contributors may be used to endorse or promote products derived
19 //    from this software without specific prior written permission.
20 //
21 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 // POSSIBILITY OF SUCH DAMAGE.
33 //
34 
35 #include "../Include/Common.h"
36 #include "../Include/PoolAlloc.h"
37 
38 namespace glslang {
39 
40 namespace {
41 thread_local TPoolAllocator* threadPoolAllocator = nullptr;
42 
GetDefaultThreadPoolAllocator()43 TPoolAllocator* GetDefaultThreadPoolAllocator()
44 {
45     thread_local TPoolAllocator defaultAllocator;
46     return &defaultAllocator;
47 }
48 } // anonymous namespace
49 
50 // Return the thread-specific current pool.
GetThreadPoolAllocator()51 TPoolAllocator& GetThreadPoolAllocator()
52 {
53     return *(threadPoolAllocator ? threadPoolAllocator : GetDefaultThreadPoolAllocator());
54 }
55 
56 // Set the thread-specific current pool.
SetThreadPoolAllocator(TPoolAllocator * poolAllocator)57 void SetThreadPoolAllocator(TPoolAllocator* poolAllocator)
58 {
59     threadPoolAllocator = poolAllocator;
60 }
61 
62 //
63 // Implement the functionality of the TPoolAllocator class, which
64 // is documented in PoolAlloc.h.
65 //
TPoolAllocator(int growthIncrement,int allocationAlignment)66 TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
67     pageSize(growthIncrement),
68     alignment(allocationAlignment),
69     freeList(nullptr),
70     inUseList(nullptr),
71     numCalls(0)
72 {
73     //
74     // Don't allow page sizes we know are smaller than all common
75     // OS page sizes.
76     //
77     if (pageSize < 4*1024)
78         pageSize = 4*1024;
79 
80     //
81     // A large currentPageOffset indicates a new page needs to
82     // be obtained to allocate memory.
83     //
84     currentPageOffset = pageSize;
85 
86     //
87     // Adjust alignment to be at least pointer aligned and
88     // power of 2.
89     //
90     size_t minAlign = sizeof(void*);
91     alignment &= ~(minAlign - 1);
92     if (alignment < minAlign)
93         alignment = minAlign;
94     size_t a = 1;
95     while (a < alignment)
96         a <<= 1;
97     alignment = a;
98     alignmentMask = a - 1;
99 
100     //
101     // Align header skip
102     //
103     headerSkip = minAlign;
104     if (headerSkip < sizeof(tHeader)) {
105         headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
106     }
107 
108     push();
109 }
110 
~TPoolAllocator()111 TPoolAllocator::~TPoolAllocator()
112 {
113     while (inUseList) {
114         tHeader* next = inUseList->nextPage;
115         inUseList->~tHeader();
116         delete [] reinterpret_cast<char*>(inUseList);
117         inUseList = next;
118     }
119 
120     //
121     // Always delete the free list memory - it can't be being
122     // (correctly) referenced, whether the pool allocator was
123     // global or not.  We should not check the guard blocks
124     // here, because we did it already when the block was
125     // placed into the free list.
126     //
127     while (freeList) {
128         tHeader* next = freeList->nextPage;
129         delete [] reinterpret_cast<char*>(freeList);
130         freeList = next;
131     }
132 }
133 
134 //
135 // Check a single guard block for damage
136 //
137 #ifdef GUARD_BLOCKS
checkGuardBlock(unsigned char * blockMem,unsigned char val,const char * locText) const138 void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
139 #else
140 void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const
141 #endif
142 {
143 #ifdef GUARD_BLOCKS
144     for (size_t x = 0; x < guardBlockSize; x++) {
145         if (blockMem[x] != val) {
146             const int maxSize = 80;
147             char assertMsg[maxSize];
148 
149             // We don't print the assert message.  It's here just to be helpful.
150             snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
151                       locText, size, data());
152             assert(0 && "PoolAlloc: Damage in guard block");
153         }
154     }
155 #else
156     assert(guardBlockSize == 0);
157 #endif
158 }
159 
push()160 void TPoolAllocator::push()
161 {
162     tAllocState state = { currentPageOffset, inUseList };
163 
164     stack.push_back(state);
165 
166     //
167     // Indicate there is no current page to allocate from.
168     //
169     currentPageOffset = pageSize;
170 }
171 
172 //
173 // Do a mass-deallocation of all the individual allocations
174 // that have occurred since the last push(), or since the
175 // last pop(), or since the object's creation.
176 //
177 // The deallocated pages are saved for future allocations.
178 //
pop()179 void TPoolAllocator::pop()
180 {
181     if (stack.size() < 1)
182         return;
183 
184     tHeader* page = stack.back().page;
185     currentPageOffset = stack.back().offset;
186 
187     while (inUseList != page) {
188         tHeader* nextInUse = inUseList->nextPage;
189         size_t pageCount = inUseList->pageCount;
190 
191         // This technically ends the lifetime of the header as C++ object,
192         // but we will still control the memory and reuse it.
193         inUseList->~tHeader(); // currently, just a debug allocation checker
194 
195         if (pageCount > 1) {
196             delete [] reinterpret_cast<char*>(inUseList);
197         } else {
198             inUseList->nextPage = freeList;
199             freeList = inUseList;
200         }
201         inUseList = nextInUse;
202     }
203 
204     stack.pop_back();
205 }
206 
207 //
208 // Do a mass-deallocation of all the individual allocations
209 // that have occurred.
210 //
popAll()211 void TPoolAllocator::popAll()
212 {
213     while (stack.size() > 0)
214         pop();
215 }
216 
allocate(size_t numBytes)217 void* TPoolAllocator::allocate(size_t numBytes)
218 {
219     // If we are using guard blocks, all allocations are bracketed by
220     // them: [guardblock][allocation][guardblock].  numBytes is how
221     // much memory the caller asked for.  allocationSize is the total
222     // size including guard blocks.  In release build,
223     // guardBlockSize=0 and this all gets optimized away.
224     size_t allocationSize = TAllocation::allocationSize(numBytes);
225 
226     //
227     // Just keep some interesting statistics.
228     //
229     ++numCalls;
230     totalBytes += numBytes;
231 
232     //
233     // Do the allocation, most likely case first, for efficiency.
234     // This step could be moved to be inline sometime.
235     //
236     if (currentPageOffset + allocationSize <= pageSize) {
237         //
238         // Safe to allocate from currentPageOffset.
239         //
240         unsigned char* memory = reinterpret_cast<unsigned char*>(inUseList) + currentPageOffset;
241         currentPageOffset += allocationSize;
242         currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
243 
244         return initializeAllocation(inUseList, memory, numBytes);
245     }
246 
247     if (allocationSize + headerSkip > pageSize) {
248         //
249         // Do a multi-page allocation.  Don't mix these with the others.
250         // The OS is efficient and allocating and free-ing multiple pages.
251         //
252         size_t numBytesToAlloc = allocationSize + headerSkip;
253         tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
254         if (memory == nullptr)
255             return nullptr;
256 
257         // Use placement-new to initialize header
258         new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
259         inUseList = memory;
260 
261         currentPageOffset = pageSize;  // make next allocation come from a new page
262 
263         // No guard blocks for multi-page allocations (yet)
264         return reinterpret_cast<void*>(reinterpret_cast<UINT_PTR>(memory) + headerSkip);
265     }
266 
267     //
268     // Need a simple page to allocate from.
269     //
270     tHeader* memory;
271     if (freeList) {
272         memory = freeList;
273         freeList = freeList->nextPage;
274     } else {
275         memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
276         if (memory == nullptr)
277             return nullptr;
278     }
279 
280     // Use placement-new to initialize header
281     new(memory) tHeader(inUseList, 1);
282     inUseList = memory;
283 
284     unsigned char* ret = reinterpret_cast<unsigned char*>(inUseList) + headerSkip;
285     currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
286 
287     return initializeAllocation(inUseList, ret, numBytes);
288 }
289 
290 //
291 // Check all allocations in a list for damage by calling check on each.
292 //
checkAllocList() const293 void TAllocation::checkAllocList() const
294 {
295     for (const TAllocation* alloc = this; alloc != nullptr; alloc = alloc->prevAlloc)
296         alloc->check();
297 }
298 
299 } // end namespace glslang
300