1 //
2 // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6
7 #include "compiler/translator/PoolAlloc.h"
8
9 #include "compiler/translator/InitializeGlobals.h"
10
11 #include "common/platform.h"
12 #include "common/angleutils.h"
13 #include "common/tls.h"
14
15 #include <stdint.h>
16 #include <stdio.h>
17 #include <assert.h>
18
19 TLSIndex PoolIndex = TLS_INVALID_INDEX;
20
InitializePoolIndex()21 bool InitializePoolIndex()
22 {
23 assert(PoolIndex == TLS_INVALID_INDEX);
24
25 PoolIndex = CreateTLSIndex();
26 return PoolIndex != TLS_INVALID_INDEX;
27 }
28
FreePoolIndex()29 void FreePoolIndex()
30 {
31 assert(PoolIndex != TLS_INVALID_INDEX);
32
33 DestroyTLSIndex(PoolIndex);
34 PoolIndex = TLS_INVALID_INDEX;
35 }
36
GetGlobalPoolAllocator()37 TPoolAllocator* GetGlobalPoolAllocator()
38 {
39 assert(PoolIndex != TLS_INVALID_INDEX);
40 return static_cast<TPoolAllocator*>(GetTLSValue(PoolIndex));
41 }
42
SetGlobalPoolAllocator(TPoolAllocator * poolAllocator)43 void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator)
44 {
45 assert(PoolIndex != TLS_INVALID_INDEX);
46 SetTLSValue(PoolIndex, poolAllocator);
47 }
48
49 //
50 // Implement the functionality of the TPoolAllocator class, which
51 // is documented in PoolAlloc.h.
52 //
TPoolAllocator(int growthIncrement,int allocationAlignment)53 TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
54 pageSize(growthIncrement),
55 alignment(allocationAlignment),
56 freeList(0),
57 inUseList(0),
58 numCalls(0),
59 totalBytes(0)
60 {
61 //
62 // Don't allow page sizes we know are smaller than all common
63 // OS page sizes.
64 //
65 if (pageSize < 4*1024)
66 pageSize = 4*1024;
67
68 //
69 // A large currentPageOffset indicates a new page needs to
70 // be obtained to allocate memory.
71 //
72 currentPageOffset = pageSize;
73
74 //
75 // Adjust alignment to be at least pointer aligned and
76 // power of 2.
77 //
78 size_t minAlign = sizeof(void*);
79 alignment &= ~(minAlign - 1);
80 if (alignment < minAlign)
81 alignment = minAlign;
82 size_t a = 1;
83 while (a < alignment)
84 a <<= 1;
85 alignment = a;
86 alignmentMask = a - 1;
87
88 //
89 // Align header skip
90 //
91 headerSkip = minAlign;
92 if (headerSkip < sizeof(tHeader)) {
93 headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
94 }
95 }
96
~TPoolAllocator()97 TPoolAllocator::~TPoolAllocator()
98 {
99 while (inUseList) {
100 tHeader* next = inUseList->nextPage;
101 inUseList->~tHeader();
102 delete [] reinterpret_cast<char*>(inUseList);
103 inUseList = next;
104 }
105
106 // We should not check the guard blocks
107 // here, because we did it already when the block was
108 // placed into the free list.
109 //
110 while (freeList) {
111 tHeader* next = freeList->nextPage;
112 delete [] reinterpret_cast<char*>(freeList);
113 freeList = next;
114 }
115 }
116
117 // Support MSVC++ 6.0
118 const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
119 const unsigned char TAllocation::guardBlockEndVal = 0xfe;
120 const unsigned char TAllocation::userDataFill = 0xcd;
121
122 #ifdef GUARD_BLOCKS
123 const size_t TAllocation::guardBlockSize = 16;
124 #else
125 const size_t TAllocation::guardBlockSize = 0;
126 #endif
127
128 //
129 // Check a single guard block for damage
130 //
checkGuardBlock(unsigned char * blockMem,unsigned char val,const char * locText) const131 void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
132 {
133 #ifdef GUARD_BLOCKS
134 for (size_t x = 0; x < guardBlockSize; x++) {
135 if (blockMem[x] != val) {
136 char assertMsg[80];
137
138 // We don't print the assert message. It's here just to be helpful.
139 #if defined(_MSC_VER)
140 snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n",
141 locText, size, data());
142 #else
143 snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
144 locText, size, data());
145 #endif
146 assert(0 && "PoolAlloc: Damage in guard block");
147 }
148 }
149 #endif
150 }
151
152
push()153 void TPoolAllocator::push()
154 {
155 tAllocState state = { currentPageOffset, inUseList };
156
157 stack.push_back(state);
158
159 //
160 // Indicate there is no current page to allocate from.
161 //
162 currentPageOffset = pageSize;
163 }
164
165 //
166 // Do a mass-deallocation of all the individual allocations
167 // that have occurred since the last push(), or since the
168 // last pop(), or since the object's creation.
169 //
170 // The deallocated pages are saved for future allocations.
171 //
pop()172 void TPoolAllocator::pop()
173 {
174 if (stack.size() < 1)
175 return;
176
177 tHeader* page = stack.back().page;
178 currentPageOffset = stack.back().offset;
179
180 while (inUseList != page) {
181 // invoke destructor to free allocation list
182 inUseList->~tHeader();
183
184 tHeader* nextInUse = inUseList->nextPage;
185 if (inUseList->pageCount > 1)
186 delete [] reinterpret_cast<char*>(inUseList);
187 else {
188 inUseList->nextPage = freeList;
189 freeList = inUseList;
190 }
191 inUseList = nextInUse;
192 }
193
194 stack.pop_back();
195 }
196
197 //
198 // Do a mass-deallocation of all the individual allocations
199 // that have occurred.
200 //
popAll()201 void TPoolAllocator::popAll()
202 {
203 while (stack.size() > 0)
204 pop();
205 }
206
allocate(size_t numBytes)207 void* TPoolAllocator::allocate(size_t numBytes)
208 {
209 //
210 // Just keep some interesting statistics.
211 //
212 ++numCalls;
213 totalBytes += numBytes;
214
215 // If we are using guard blocks, all allocations are bracketed by
216 // them: [guardblock][allocation][guardblock]. numBytes is how
217 // much memory the caller asked for. allocationSize is the total
218 // size including guard blocks. In release build,
219 // guardBlockSize=0 and this all gets optimized away.
220 size_t allocationSize = TAllocation::allocationSize(numBytes);
221 // Detect integer overflow.
222 if (allocationSize < numBytes)
223 return 0;
224
225 //
226 // Do the allocation, most likely case first, for efficiency.
227 // This step could be moved to be inline sometime.
228 //
229 if (allocationSize <= pageSize - currentPageOffset) {
230 //
231 // Safe to allocate from currentPageOffset.
232 //
233 unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
234 currentPageOffset += allocationSize;
235 currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
236
237 return initializeAllocation(inUseList, memory, numBytes);
238 }
239
240 if (allocationSize > pageSize - headerSkip) {
241 //
242 // Do a multi-page allocation. Don't mix these with the others.
243 // The OS is efficient and allocating and free-ing multiple pages.
244 //
245 size_t numBytesToAlloc = allocationSize + headerSkip;
246 // Detect integer overflow.
247 if (numBytesToAlloc < allocationSize)
248 return 0;
249
250 tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
251 if (memory == 0)
252 return 0;
253
254 // Use placement-new to initialize header
255 new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
256 inUseList = memory;
257
258 currentPageOffset = pageSize; // make next allocation come from a new page
259
260 // No guard blocks for multi-page allocations (yet)
261 return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
262 }
263
264 //
265 // Need a simple page to allocate from.
266 //
267 tHeader* memory;
268 if (freeList) {
269 memory = freeList;
270 freeList = freeList->nextPage;
271 } else {
272 memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
273 if (memory == 0)
274 return 0;
275 }
276
277 // Use placement-new to initialize header
278 new(memory) tHeader(inUseList, 1);
279 inUseList = memory;
280
281 unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
282 currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
283
284 return initializeAllocation(inUseList, ret, numBytes);
285 }
286
287
288 //
289 // Check all allocations in a list for damage by calling check on each.
290 //
checkAllocList() const291 void TAllocation::checkAllocList() const
292 {
293 for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
294 alloc->check();
295 }
296