1 //
2 // Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
3 // All rights reserved.
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions
7 // are met:
8 //
9 // Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 //
12 // Redistributions in binary form must reproduce the above
13 // copyright notice, this list of conditions and the following
14 // disclaimer in the documentation and/or other materials provided
15 // with the distribution.
16 //
17 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
18 // contributors may be used to endorse or promote products derived
19 // from this software without specific prior written permission.
20 //
21 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 // POSSIBILITY OF SUCH DAMAGE.
33 //
34
35 #include "../Include/Common.h"
36 #include "../Include/PoolAlloc.h"
37
38 #include "../Include/InitializeGlobals.h"
39 #include "../OSDependent/osinclude.h"
40
41 namespace glslang {
42
43 // Process-wide TLS index
44 OS_TLSIndex PoolIndex;
45
46 // Return the thread-specific current pool.
GetThreadPoolAllocator()47 TPoolAllocator& GetThreadPoolAllocator()
48 {
49 return *static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex));
50 }
51
52 // Set the thread-specific current pool.
SetThreadPoolAllocator(TPoolAllocator * poolAllocator)53 void SetThreadPoolAllocator(TPoolAllocator* poolAllocator)
54 {
55 OS_SetTLSValue(PoolIndex, poolAllocator);
56 }
57
58 // Process-wide set up of the TLS pool storage.
InitializePoolIndex()59 bool InitializePoolIndex()
60 {
61 // Allocate a TLS index.
62 if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
63 return false;
64
65 return true;
66 }
67
68 //
69 // Implement the functionality of the TPoolAllocator class, which
70 // is documented in PoolAlloc.h.
71 //
TPoolAllocator(int growthIncrement,int allocationAlignment)72 TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
73 pageSize(growthIncrement),
74 alignment(allocationAlignment),
75 freeList(nullptr),
76 inUseList(nullptr),
77 numCalls(0)
78 {
79 //
80 // Don't allow page sizes we know are smaller than all common
81 // OS page sizes.
82 //
83 if (pageSize < 4*1024)
84 pageSize = 4*1024;
85
86 //
87 // A large currentPageOffset indicates a new page needs to
88 // be obtained to allocate memory.
89 //
90 currentPageOffset = pageSize;
91
92 //
93 // Adjust alignment to be at least pointer aligned and
94 // power of 2.
95 //
96 size_t minAlign = sizeof(void*);
97 alignment &= ~(minAlign - 1);
98 if (alignment < minAlign)
99 alignment = minAlign;
100 size_t a = 1;
101 while (a < alignment)
102 a <<= 1;
103 alignment = a;
104 alignmentMask = a - 1;
105
106 //
107 // Align header skip
108 //
109 headerSkip = minAlign;
110 if (headerSkip < sizeof(tHeader)) {
111 headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
112 }
113
114 push();
115 }
116
~TPoolAllocator()117 TPoolAllocator::~TPoolAllocator()
118 {
119 while (inUseList) {
120 tHeader* next = inUseList->nextPage;
121 inUseList->~tHeader();
122 delete [] reinterpret_cast<char*>(inUseList);
123 inUseList = next;
124 }
125
126 //
127 // Always delete the free list memory - it can't be being
128 // (correctly) referenced, whether the pool allocator was
129 // global or not. We should not check the guard blocks
130 // here, because we did it already when the block was
131 // placed into the free list.
132 //
133 while (freeList) {
134 tHeader* next = freeList->nextPage;
135 delete [] reinterpret_cast<char*>(freeList);
136 freeList = next;
137 }
138 }
139
140 const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
141 const unsigned char TAllocation::guardBlockEndVal = 0xfe;
142 const unsigned char TAllocation::userDataFill = 0xcd;
143
144 # ifdef GUARD_BLOCKS
145 const size_t TAllocation::guardBlockSize = 16;
146 # else
147 const size_t TAllocation::guardBlockSize = 0;
148 # endif
149
150 //
151 // Check a single guard block for damage
152 //
153 #ifdef GUARD_BLOCKS
checkGuardBlock(unsigned char * blockMem,unsigned char val,const char * locText) const154 void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
155 #else
156 void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const
157 #endif
158 {
159 #ifdef GUARD_BLOCKS
160 for (size_t x = 0; x < guardBlockSize; x++) {
161 if (blockMem[x] != val) {
162 const int maxSize = 80;
163 char assertMsg[maxSize];
164
165 // We don't print the assert message. It's here just to be helpful.
166 snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
167 locText, size, data());
168 assert(0 && "PoolAlloc: Damage in guard block");
169 }
170 }
171 #else
172 assert(guardBlockSize == 0);
173 #endif
174 }
175
push()176 void TPoolAllocator::push()
177 {
178 tAllocState state = { currentPageOffset, inUseList };
179
180 stack.push_back(state);
181
182 //
183 // Indicate there is no current page to allocate from.
184 //
185 currentPageOffset = pageSize;
186 }
187
188 //
189 // Do a mass-deallocation of all the individual allocations
190 // that have occurred since the last push(), or since the
191 // last pop(), or since the object's creation.
192 //
193 // The deallocated pages are saved for future allocations.
194 //
pop()195 void TPoolAllocator::pop()
196 {
197 if (stack.size() < 1)
198 return;
199
200 tHeader* page = stack.back().page;
201 currentPageOffset = stack.back().offset;
202
203 while (inUseList != page) {
204 tHeader* nextInUse = inUseList->nextPage;
205 size_t pageCount = inUseList->pageCount;
206
207 // This technically ends the lifetime of the header as C++ object,
208 // but we will still control the memory and reuse it.
209 inUseList->~tHeader(); // currently, just a debug allocation checker
210
211 if (pageCount > 1) {
212 delete [] reinterpret_cast<char*>(inUseList);
213 } else {
214 inUseList->nextPage = freeList;
215 freeList = inUseList;
216 }
217 inUseList = nextInUse;
218 }
219
220 stack.pop_back();
221 }
222
223 //
224 // Do a mass-deallocation of all the individual allocations
225 // that have occurred.
226 //
popAll()227 void TPoolAllocator::popAll()
228 {
229 while (stack.size() > 0)
230 pop();
231 }
232
allocate(size_t numBytes)233 void* TPoolAllocator::allocate(size_t numBytes)
234 {
235 // If we are using guard blocks, all allocations are bracketed by
236 // them: [guardblock][allocation][guardblock]. numBytes is how
237 // much memory the caller asked for. allocationSize is the total
238 // size including guard blocks. In release build,
239 // guardBlockSize=0 and this all gets optimized away.
240 size_t allocationSize = TAllocation::allocationSize(numBytes);
241
242 //
243 // Just keep some interesting statistics.
244 //
245 ++numCalls;
246 totalBytes += numBytes;
247
248 //
249 // Do the allocation, most likely case first, for efficiency.
250 // This step could be moved to be inline sometime.
251 //
252 if (currentPageOffset + allocationSize <= pageSize) {
253 //
254 // Safe to allocate from currentPageOffset.
255 //
256 unsigned char* memory = reinterpret_cast<unsigned char*>(inUseList) + currentPageOffset;
257 currentPageOffset += allocationSize;
258 currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
259
260 return initializeAllocation(inUseList, memory, numBytes);
261 }
262
263 if (allocationSize + headerSkip > pageSize) {
264 //
265 // Do a multi-page allocation. Don't mix these with the others.
266 // The OS is efficient and allocating and free-ing multiple pages.
267 //
268 size_t numBytesToAlloc = allocationSize + headerSkip;
269 tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
270 if (memory == 0)
271 return 0;
272
273 // Use placement-new to initialize header
274 new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
275 inUseList = memory;
276
277 currentPageOffset = pageSize; // make next allocation come from a new page
278
279 // No guard blocks for multi-page allocations (yet)
280 return reinterpret_cast<void*>(reinterpret_cast<UINT_PTR>(memory) + headerSkip);
281 }
282
283 //
284 // Need a simple page to allocate from.
285 //
286 tHeader* memory;
287 if (freeList) {
288 memory = freeList;
289 freeList = freeList->nextPage;
290 } else {
291 memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
292 if (memory == 0)
293 return 0;
294 }
295
296 // Use placement-new to initialize header
297 new(memory) tHeader(inUseList, 1);
298 inUseList = memory;
299
300 unsigned char* ret = reinterpret_cast<unsigned char*>(inUseList) + headerSkip;
301 currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
302
303 return initializeAllocation(inUseList, ret, numBytes);
304 }
305
306 //
307 // Check all allocations in a list for damage by calling check on each.
308 //
checkAllocList() const309 void TAllocation::checkAllocList() const
310 {
311 for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
312 alloc->check();
313 }
314
315 } // end namespace glslang
316