1 //
2 // Copyright 2019 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // PoolAlloc.cpp:
7 // Implements the class methods for PoolAllocator and Allocation classes.
8 //
9
10 #include "common/PoolAlloc.h"
11
12 #include <assert.h>
13 #include <stdint.h>
14 #include <stdio.h>
15
16 #include "common/angleutils.h"
17 #include "common/debug.h"
18 #include "common/mathutil.h"
19 #include "common/platform.h"
20 #include "common/tls.h"
21
22 namespace angle
23 {
24 // If we are using guard blocks, we must track each individual allocation. If we aren't using guard
25 // blocks, these never get instantiated, so won't have any impact.
26
27 class Allocation
28 {
29 public:
Allocation(size_t size,unsigned char * mem,Allocation * prev=0)30 Allocation(size_t size, unsigned char *mem, Allocation *prev = 0)
31 : mSize(size), mMem(mem), mPrevAlloc(prev)
32 {
33 // Allocations are bracketed:
34 //
35 // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
36 //
37 // This would be cleaner with if (kGuardBlockSize)..., but that makes the compiler print
38 // warnings about 0 length memsets, even with the if() protecting them.
39 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
40 memset(preGuard(), kGuardBlockBeginVal, kGuardBlockSize);
41 memset(data(), kUserDataFill, mSize);
42 memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize);
43 #endif
44 }
45
46 void checkAllocList() const;
47
AlignedHeaderSize(uint8_t * allocationBasePtr,size_t alignment)48 static size_t AlignedHeaderSize(uint8_t *allocationBasePtr, size_t alignment)
49 {
50 // Make sure that the data offset after the header is aligned to the given alignment.
51 size_t base = reinterpret_cast<size_t>(allocationBasePtr);
52 return rx::roundUpPow2(base + kGuardBlockSize + HeaderSize(), alignment) - base;
53 }
54
55 // Return total size needed to accommodate user buffer of 'size',
56 // plus our tracking data and any necessary alignments.
AllocationSize(uint8_t * allocationBasePtr,size_t size,size_t alignment,size_t * preAllocationPaddingOut)57 static size_t AllocationSize(uint8_t *allocationBasePtr,
58 size_t size,
59 size_t alignment,
60 size_t *preAllocationPaddingOut)
61 {
62 // The allocation will be laid out as such:
63 //
64 // Aligned to |alignment|
65 // ^
66 // preAllocationPaddingOut |
67 // ___^___ |
68 // / \ |
69 // <padding>[header][guard][data][guard]
70 // \___________ __________/
71 // V
72 // dataOffset
73 //
74 // Note that alignment is at least as much as a pointer alignment, so the pointers in the
75 // header are also necessarily aligned appropriately.
76 //
77 size_t dataOffset = AlignedHeaderSize(allocationBasePtr, alignment);
78 *preAllocationPaddingOut = dataOffset - HeaderSize() - kGuardBlockSize;
79
80 return dataOffset + size + kGuardBlockSize;
81 }
82
83 // Given memory pointing to |header|, returns |data|.
GetDataPointer(uint8_t * memory,size_t alignment)84 static uint8_t *GetDataPointer(uint8_t *memory, size_t alignment)
85 {
86 uint8_t *alignedPtr = memory + kGuardBlockSize + HeaderSize();
87
88 // |memory| must be aligned already such that user data is aligned to |alignment|.
89 ASSERT((reinterpret_cast<uintptr_t>(alignedPtr) & (alignment - 1)) == 0);
90
91 return alignedPtr;
92 }
93
94 private:
95 void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
96
checkAlloc() const97 void checkAlloc() const
98 {
99 checkGuardBlock(preGuard(), kGuardBlockBeginVal, "before");
100 checkGuardBlock(postGuard(), kGuardBlockEndVal, "after");
101 }
102
103 // Find offsets to pre and post guard blocks, and user data buffer
preGuard() const104 unsigned char *preGuard() const { return mMem + HeaderSize(); }
data() const105 unsigned char *data() const { return preGuard() + kGuardBlockSize; }
postGuard() const106 unsigned char *postGuard() const { return data() + mSize; }
107 size_t mSize; // size of the user data area
108 unsigned char *mMem; // beginning of our allocation (points to header)
109 Allocation *mPrevAlloc; // prior allocation in the chain
110
111 static constexpr unsigned char kGuardBlockBeginVal = 0xfb;
112 static constexpr unsigned char kGuardBlockEndVal = 0xfe;
113 static constexpr unsigned char kUserDataFill = 0xcd;
114 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
115 static constexpr size_t kGuardBlockSize = 16;
HeaderSize()116 static constexpr size_t HeaderSize() { return sizeof(Allocation); }
117 #else
118 static constexpr size_t kGuardBlockSize = 0;
HeaderSize()119 static constexpr size_t HeaderSize() { return 0; }
120 #endif
121 };
122
123 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
124 class PageHeader
125 {
126 public:
PageHeader(PageHeader * nextPage,size_t pageCount)127 PageHeader(PageHeader *nextPage, size_t pageCount)
128 : nextPage(nextPage),
129 pageCount(pageCount)
130 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
131 ,
132 lastAllocation(nullptr)
133 # endif
134 {}
135
~PageHeader()136 ~PageHeader()
137 {
138 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
139 if (lastAllocation)
140 {
141 lastAllocation->checkAllocList();
142 }
143 # endif
144 }
145
146 PageHeader *nextPage;
147 size_t pageCount;
148 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
149 Allocation *lastAllocation;
150 # endif
151 };
152 #endif
153
154 //
155 // Implement the functionality of the PoolAllocator class, which
156 // is documented in PoolAlloc.h.
157 //
PoolAllocator(int growthIncrement,int allocationAlignment)158 PoolAllocator::PoolAllocator(int growthIncrement, int allocationAlignment)
159 : mAlignment(allocationAlignment),
160 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
161 mPageSize(growthIncrement),
162 mFreeList(nullptr),
163 mInUseList(nullptr),
164 mNumCalls(0),
165 mTotalBytes(0),
166 #endif
167 mLocked(false)
168 {
169 initialize(growthIncrement, allocationAlignment);
170 }
171
initialize(int pageSize,int alignment)172 void PoolAllocator::initialize(int pageSize, int alignment)
173 {
174 mAlignment = alignment;
175 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
176 mPageSize = pageSize;
177 mPageHeaderSkip = sizeof(PageHeader);
178
179 // Alignment == 1 is a special fast-path where fastAllocate() is enabled
180 if (mAlignment != 1)
181 {
182 #endif
183 // Adjust mAlignment to be at least pointer aligned and
184 // power of 2.
185 //
186 size_t minAlign = sizeof(void *);
187 if (mAlignment < minAlign)
188 {
189 mAlignment = minAlign;
190 }
191 mAlignment = gl::ceilPow2(static_cast<unsigned int>(mAlignment));
192 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
193 }
194 //
195 // Don't allow page sizes we know are smaller than all common
196 // OS page sizes.
197 //
198 if (mPageSize < 4 * 1024)
199 {
200 mPageSize = 4 * 1024;
201 }
202
203 //
204 // A large mCurrentPageOffset indicates a new page needs to
205 // be obtained to allocate memory.
206 //
207 mCurrentPageOffset = mPageSize;
208
209 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
210 mStack.push_back({});
211 #endif
212 }
213
~PoolAllocator()214 PoolAllocator::~PoolAllocator()
215 {
216 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
217 while (mInUseList)
218 {
219 PageHeader *next = mInUseList->nextPage;
220 mInUseList->~PageHeader();
221 delete[] reinterpret_cast<char *>(mInUseList);
222 mInUseList = next;
223 }
224 // We should not check the guard blocks
225 // here, because we did it already when the block was
226 // placed into the free list.
227 //
228 while (mFreeList)
229 {
230 PageHeader *next = mFreeList->nextPage;
231 delete[] reinterpret_cast<char *>(mFreeList);
232 mFreeList = next;
233 }
234 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
235 for (auto &allocs : mStack)
236 {
237 for (auto alloc : allocs)
238 {
239 free(alloc);
240 }
241 }
242 mStack.clear();
243 #endif
244 }
245
246 //
247 // Check a single guard block for damage
248 //
checkGuardBlock(unsigned char * blockMem,unsigned char val,const char * locText) const249 void Allocation::checkGuardBlock(unsigned char *blockMem,
250 unsigned char val,
251 const char *locText) const
252 {
253 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
254 for (size_t x = 0; x < kGuardBlockSize; x++)
255 {
256 if (blockMem[x] != val)
257 {
258 char assertMsg[80];
259 // We don't print the assert message. It's here just to be helpful.
260 snprintf(assertMsg, sizeof(assertMsg),
261 "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, mSize, data());
262 assert(0 && "PoolAlloc: Damage in guard block");
263 }
264 }
265 #endif
266 }
267
push()268 void PoolAllocator::push()
269 {
270 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
271 AllocState state = {mCurrentPageOffset, mInUseList};
272
273 mStack.push_back(state);
274
275 //
276 // Indicate there is no current page to allocate from.
277 //
278 mCurrentPageOffset = mPageSize;
279 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
280 mStack.push_back({});
281 #endif
282 }
283
284 // Do a mass-deallocation of all the individual allocations that have occurred since the last
285 // push(), or since the last pop(), or since the object's creation.
286 //
287 // The deallocated pages are saved for future allocations.
pop()288 void PoolAllocator::pop()
289 {
290 if (mStack.size() < 1)
291 {
292 return;
293 }
294
295 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
296 PageHeader *page = mStack.back().page;
297 mCurrentPageOffset = mStack.back().offset;
298
299 while (mInUseList != page)
300 {
301 // invoke destructor to free allocation list
302 mInUseList->~PageHeader();
303
304 PageHeader *nextInUse = mInUseList->nextPage;
305 if (mInUseList->pageCount > 1)
306 {
307 delete[] reinterpret_cast<char *>(mInUseList);
308 }
309 else
310 {
311 mInUseList->nextPage = mFreeList;
312 mFreeList = mInUseList;
313 }
314 mInUseList = nextInUse;
315 }
316
317 mStack.pop_back();
318 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
319 for (auto &alloc : mStack.back())
320 {
321 free(alloc);
322 }
323 mStack.pop_back();
324 #endif
325 }
326
327 //
328 // Do a mass-deallocation of all the individual allocations
329 // that have occurred.
330 //
popAll()331 void PoolAllocator::popAll()
332 {
333 while (mStack.size() > 0)
334 pop();
335 }
336
allocate(size_t numBytes)337 void *PoolAllocator::allocate(size_t numBytes)
338 {
339 ASSERT(!mLocked);
340
341 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
342 //
343 // Just keep some interesting statistics.
344 //
345 ++mNumCalls;
346 mTotalBytes += numBytes;
347
348 uint8_t *currentPagePtr = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset;
349
350 size_t preAllocationPadding = 0;
351 size_t allocationSize =
352 Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
353
354 // Integer overflow is unexpected.
355 ASSERT(allocationSize >= numBytes);
356
357 // Do the allocation, most likely case first, for efficiency.
358 if (allocationSize <= mPageSize - mCurrentPageOffset)
359 {
360 // There is enough room to allocate from the current page at mCurrentPageOffset.
361 uint8_t *memory = currentPagePtr + preAllocationPadding;
362 mCurrentPageOffset += allocationSize;
363
364 return initializeAllocation(memory, numBytes);
365 }
366
367 if (allocationSize > mPageSize - mPageHeaderSkip)
368 {
369 // If the allocation is larger than a whole page, do a multi-page allocation. These are not
370 // mixed with the others. The OS is efficient in allocating and freeing multiple pages.
371
372 // We don't know what the alignment of the new allocated memory will be, so conservatively
373 // allocate enough memory for up to alignment extra bytes being needed.
374 allocationSize = Allocation::AllocationSize(reinterpret_cast<uint8_t *>(mPageHeaderSkip),
375 numBytes, mAlignment, &preAllocationPadding);
376
377 size_t numBytesToAlloc = allocationSize + mPageHeaderSkip + mAlignment;
378
379 // Integer overflow is unexpected.
380 ASSERT(numBytesToAlloc >= allocationSize);
381
382 PageHeader *memory = reinterpret_cast<PageHeader *>(::new char[numBytesToAlloc]);
383 if (memory == nullptr)
384 {
385 return nullptr;
386 }
387
388 // Use placement-new to initialize header
389 new (memory) PageHeader(mInUseList, (numBytesToAlloc + mPageSize - 1) / mPageSize);
390 mInUseList = memory;
391
392 // Make next allocation come from a new page
393 mCurrentPageOffset = mPageSize;
394
395 // Now that we actually have the pointer, make sure the data pointer will be aligned.
396 currentPagePtr = reinterpret_cast<uint8_t *>(memory) + mPageHeaderSkip;
397 Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
398
399 return initializeAllocation(currentPagePtr + preAllocationPadding, numBytes);
400 }
401
402 uint8_t *newPageAddr = allocateNewPage(numBytes);
403 return initializeAllocation(newPageAddr, numBytes);
404
405 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
406
407 void *alloc = malloc(numBytes + mAlignment - 1);
408 mStack.back().push_back(alloc);
409
410 intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
411 intAlloc = rx::roundUpPow2<intptr_t>(intAlloc, mAlignment);
412 return reinterpret_cast<void *>(intAlloc);
413 #endif
414 }
415
416 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
allocateNewPage(size_t numBytes)417 uint8_t *PoolAllocator::allocateNewPage(size_t numBytes)
418 {
419 // Need a simple page to allocate from. Pick a page from the free list, if any. Otherwise need
420 // to make the allocation.
421 PageHeader *memory;
422 if (mFreeList)
423 {
424 memory = mFreeList;
425 mFreeList = mFreeList->nextPage;
426 }
427 else
428 {
429 memory = reinterpret_cast<PageHeader *>(::new char[mPageSize]);
430 if (memory == nullptr)
431 {
432 return nullptr;
433 }
434 }
435 // Use placement-new to initialize header
436 new (memory) PageHeader(mInUseList, 1);
437 mInUseList = memory;
438
439 // Leave room for the page header.
440 mCurrentPageOffset = mPageHeaderSkip;
441 uint8_t *currentPagePtr = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset;
442
443 size_t preAllocationPadding = 0;
444 size_t allocationSize =
445 Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
446
447 mCurrentPageOffset += allocationSize;
448
449 // The new allocation is made after the page header and any alignment required before it.
450 return reinterpret_cast<uint8_t *>(mInUseList) + mPageHeaderSkip + preAllocationPadding;
451 }
452
initializeAllocation(uint8_t * memory,size_t numBytes)453 void *PoolAllocator::initializeAllocation(uint8_t *memory, size_t numBytes)
454 {
455 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
456 new (memory) Allocation(numBytes, memory, mInUseList->lastAllocation);
457 mInUseList->lastAllocation = reinterpret_cast<Allocation *>(memory);
458 # endif
459
460 return Allocation::GetDataPointer(memory, mAlignment);
461 }
462 #endif
463
lock()464 void PoolAllocator::lock()
465 {
466 ASSERT(!mLocked);
467 mLocked = true;
468 }
469
unlock()470 void PoolAllocator::unlock()
471 {
472 ASSERT(mLocked);
473 mLocked = false;
474 }
475
476 //
477 // Check all allocations in a list for damage by calling check on each.
478 //
checkAllocList() const479 void Allocation::checkAllocList() const
480 {
481 for (const Allocation *alloc = this; alloc != nullptr; alloc = alloc->mPrevAlloc)
482 {
483 alloc->checkAlloc();
484 }
485 }
486
487 } // namespace angle
488