1 //
2 // Copyright 2019 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // PoolAlloc.cpp:
7 // Implements the class methods for PoolAllocator and Allocation classes.
8 //
9
10 #include "common/PoolAlloc.h"
11
12 #include <assert.h>
13 #include <stdint.h>
14 #include <stdio.h>
15
16 #include "common/angleutils.h"
17 #include "common/debug.h"
18 #include "common/mathutil.h"
19 #include "common/platform.h"
20 #include "common/tls.h"
21
22 #if defined(ANGLE_WITH_ASAN)
23 # include <sanitizer/asan_interface.h>
24 #endif
25
26 namespace angle
27 {
28 // If we are using guard blocks, we must track each individual allocation. If we aren't using guard
29 // blocks, these never get instantiated, so won't have any impact.
30
31 class Allocation
32 {
33 public:
Allocation(size_t size,unsigned char * mem,Allocation * prev=0)34 Allocation(size_t size, unsigned char *mem, Allocation *prev = 0)
35 : mSize(size), mMem(mem), mPrevAlloc(prev)
36 {
37 // Allocations are bracketed:
38 //
39 // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
40 //
41 // This would be cleaner with if (kGuardBlockSize)..., but that makes the compiler print
42 // warnings about 0 length memsets, even with the if() protecting them.
43 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
44 memset(preGuard(), kGuardBlockBeginVal, kGuardBlockSize);
45 memset(data(), kUserDataFill, mSize);
46 memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize);
47 #endif
48 }
49
50 void checkAllocList() const;
51
AlignedHeaderSize(uint8_t * allocationBasePtr,size_t alignment)52 static size_t AlignedHeaderSize(uint8_t *allocationBasePtr, size_t alignment)
53 {
54 // Make sure that the data offset after the header is aligned to the given alignment.
55 size_t base = reinterpret_cast<size_t>(allocationBasePtr);
56 return rx::roundUpPow2(base + kGuardBlockSize + HeaderSize(), alignment) - base;
57 }
58
59 // Return total size needed to accommodate user buffer of 'size',
60 // plus our tracking data and any necessary alignments.
AllocationSize(uint8_t * allocationBasePtr,size_t size,size_t alignment,size_t * preAllocationPaddingOut)61 static size_t AllocationSize(uint8_t *allocationBasePtr,
62 size_t size,
63 size_t alignment,
64 size_t *preAllocationPaddingOut)
65 {
66 // The allocation will be laid out as such:
67 //
68 // Aligned to |alignment|
69 // ^
70 // preAllocationPaddingOut |
71 // ___^___ |
72 // / \ |
73 // <padding>[header][guard][data][guard]
74 // \___________ __________/
75 // V
76 // dataOffset
77 //
78 // Note that alignment is at least as much as a pointer alignment, so the pointers in the
79 // header are also necessarily aligned appropriately.
80 //
81 size_t dataOffset = AlignedHeaderSize(allocationBasePtr, alignment);
82 *preAllocationPaddingOut = dataOffset - HeaderSize() - kGuardBlockSize;
83
84 return dataOffset + size + kGuardBlockSize;
85 }
86
87 // Given memory pointing to |header|, returns |data|.
GetDataPointer(uint8_t * memory,size_t alignment)88 static uint8_t *GetDataPointer(uint8_t *memory, size_t alignment)
89 {
90 uint8_t *alignedPtr = memory + kGuardBlockSize + HeaderSize();
91
92 // |memory| must be aligned already such that user data is aligned to |alignment|.
93 ASSERT((reinterpret_cast<uintptr_t>(alignedPtr) & (alignment - 1)) == 0);
94
95 return alignedPtr;
96 }
97
98 private:
99 void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
100
checkAlloc() const101 void checkAlloc() const
102 {
103 checkGuardBlock(preGuard(), kGuardBlockBeginVal, "before");
104 checkGuardBlock(postGuard(), kGuardBlockEndVal, "after");
105 }
106
107 // Find offsets to pre and post guard blocks, and user data buffer
preGuard() const108 unsigned char *preGuard() const { return mMem + HeaderSize(); }
data() const109 unsigned char *data() const { return preGuard() + kGuardBlockSize; }
postGuard() const110 unsigned char *postGuard() const { return data() + mSize; }
111 size_t mSize; // size of the user data area
112 unsigned char *mMem; // beginning of our allocation (points to header)
113 Allocation *mPrevAlloc; // prior allocation in the chain
114
115 static constexpr unsigned char kGuardBlockBeginVal = 0xfb;
116 static constexpr unsigned char kGuardBlockEndVal = 0xfe;
117 static constexpr unsigned char kUserDataFill = 0xcd;
118 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
119 static constexpr size_t kGuardBlockSize = 16;
HeaderSize()120 static constexpr size_t HeaderSize() { return sizeof(Allocation); }
121 #else
122 static constexpr size_t kGuardBlockSize = 0;
HeaderSize()123 static constexpr size_t HeaderSize() { return 0; }
124 #endif
125 };
126
127 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
128 class PageHeader
129 {
130 public:
PageHeader(PageHeader * nextPage,size_t pageCount)131 PageHeader(PageHeader *nextPage, size_t pageCount)
132 : nextPage(nextPage),
133 pageCount(pageCount)
134 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
135 ,
136 lastAllocation(nullptr)
137 # endif
138 {}
139
~PageHeader()140 ~PageHeader()
141 {
142 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
143 if (lastAllocation)
144 {
145 lastAllocation->checkAllocList();
146 }
147 # endif
148 }
149
150 PageHeader *nextPage;
151 size_t pageCount;
152 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
153 Allocation *lastAllocation;
154 # endif
155 };
156 #endif
157
158 //
159 // Implement the functionality of the PoolAllocator class, which
160 // is documented in PoolAlloc.h.
161 //
PoolAllocator(int growthIncrement,int allocationAlignment)162 PoolAllocator::PoolAllocator(int growthIncrement, int allocationAlignment)
163 : mAlignment(allocationAlignment),
164 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
165 mPageSize(growthIncrement),
166 mFreeList(nullptr),
167 mInUseList(nullptr),
168 mNumCalls(0),
169 mTotalBytes(0),
170 #endif
171 mLocked(false)
172 {
173 initialize(growthIncrement, allocationAlignment);
174 }
175
initialize(int pageSize,int alignment)176 void PoolAllocator::initialize(int pageSize, int alignment)
177 {
178 mAlignment = alignment;
179 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
180 mPageSize = pageSize;
181 mPageHeaderSkip = sizeof(PageHeader);
182
183 // Alignment == 1 is a special fast-path where fastAllocate() is enabled
184 if (mAlignment != 1)
185 {
186 #endif
187 // Adjust mAlignment to be at least pointer aligned and
188 // power of 2.
189 //
190 size_t minAlign = sizeof(void *);
191 if (mAlignment < minAlign)
192 {
193 mAlignment = minAlign;
194 }
195 mAlignment = gl::ceilPow2(static_cast<unsigned int>(mAlignment));
196 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
197 }
198 //
199 // Don't allow page sizes we know are smaller than all common
200 // OS page sizes.
201 //
202 if (mPageSize < 4 * 1024)
203 {
204 mPageSize = 4 * 1024;
205 }
206
207 //
208 // A large mCurrentPageOffset indicates a new page needs to
209 // be obtained to allocate memory.
210 //
211 mCurrentPageOffset = mPageSize;
212
213 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
214 mStack.push_back({});
215 #endif
216 }
217
~PoolAllocator()218 PoolAllocator::~PoolAllocator()
219 {
220 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
221 while (mInUseList)
222 {
223 PageHeader *next = mInUseList->nextPage;
224 mInUseList->~PageHeader();
225 delete[] reinterpret_cast<char *>(mInUseList);
226 mInUseList = next;
227 }
228 // We should not check the guard blocks
229 // here, because we did it already when the block was
230 // placed into the free list.
231 //
232 while (mFreeList)
233 {
234 PageHeader *next = mFreeList->nextPage;
235 delete[] reinterpret_cast<char *>(mFreeList);
236 mFreeList = next;
237 }
238 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
239 for (auto &allocs : mStack)
240 {
241 for (auto alloc : allocs)
242 {
243 free(alloc);
244 }
245 }
246 mStack.clear();
247 #endif
248 }
249
250 //
251 // Check a single guard block for damage
252 //
checkGuardBlock(unsigned char * blockMem,unsigned char val,const char * locText) const253 void Allocation::checkGuardBlock(unsigned char *blockMem,
254 unsigned char val,
255 const char *locText) const
256 {
257 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
258 for (size_t x = 0; x < kGuardBlockSize; x++)
259 {
260 if (blockMem[x] != val)
261 {
262 char assertMsg[80];
263 // We don't print the assert message. It's here just to be helpful.
264 snprintf(assertMsg, sizeof(assertMsg),
265 "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, mSize, data());
266 assert(0 && "PoolAlloc: Damage in guard block");
267 }
268 }
269 #endif
270 }
271
push()272 void PoolAllocator::push()
273 {
274 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
275 AllocState state = {mCurrentPageOffset, mInUseList};
276
277 mStack.push_back(state);
278
279 //
280 // Indicate there is no current page to allocate from.
281 //
282 mCurrentPageOffset = mPageSize;
283 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
284 mStack.push_back({});
285 #endif
286 }
287
288 // Do a mass-deallocation of all the individual allocations that have occurred since the last
289 // push(), or since the last pop(), or since the object's creation.
290 //
291 // The deallocated pages are saved for future allocations.
pop()292 void PoolAllocator::pop()
293 {
294 if (mStack.size() < 1)
295 {
296 return;
297 }
298
299 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
300 PageHeader *page = mStack.back().page;
301 mCurrentPageOffset = mStack.back().offset;
302
303 while (mInUseList != page)
304 {
305
306 // invoke destructor to free allocation list
307 mInUseList->~PageHeader();
308
309 PageHeader *nextInUse = mInUseList->nextPage;
310 if (mInUseList->pageCount > 1)
311 {
312 delete[] reinterpret_cast<char *>(mInUseList);
313 }
314 else
315 {
316 # if defined(ANGLE_WITH_ASAN)
317 // Clear any container annotations left over from when the memory
318 // was last used. (crbug.com/1419798)
319 __asan_unpoison_memory_region(mInUseList, mPageSize);
320 # endif
321 mInUseList->nextPage = mFreeList;
322 mFreeList = mInUseList;
323 }
324 mInUseList = nextInUse;
325 }
326
327 mStack.pop_back();
328 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
329 for (auto &alloc : mStack.back())
330 {
331 free(alloc);
332 }
333 mStack.pop_back();
334 #endif
335 }
336
337 //
338 // Do a mass-deallocation of all the individual allocations
339 // that have occurred.
340 //
popAll()341 void PoolAllocator::popAll()
342 {
343 while (mStack.size() > 0)
344 pop();
345 }
346
allocate(size_t numBytes)347 void *PoolAllocator::allocate(size_t numBytes)
348 {
349 ASSERT(!mLocked);
350
351 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
352 //
353 // Just keep some interesting statistics.
354 //
355 ++mNumCalls;
356 mTotalBytes += numBytes;
357
358 uint8_t *currentPagePtr = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset;
359
360 size_t preAllocationPadding = 0;
361 size_t allocationSize =
362 Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
363
364 // Integer overflow is unexpected.
365 ASSERT(allocationSize >= numBytes);
366
367 // Do the allocation, most likely case first, for efficiency.
368 if (allocationSize <= mPageSize - mCurrentPageOffset)
369 {
370 // There is enough room to allocate from the current page at mCurrentPageOffset.
371 uint8_t *memory = currentPagePtr + preAllocationPadding;
372 mCurrentPageOffset += allocationSize;
373
374 return initializeAllocation(memory, numBytes);
375 }
376
377 if (allocationSize > mPageSize - mPageHeaderSkip)
378 {
379 // If the allocation is larger than a whole page, do a multi-page allocation. These are not
380 // mixed with the others. The OS is efficient in allocating and freeing multiple pages.
381
382 // We don't know what the alignment of the new allocated memory will be, so conservatively
383 // allocate enough memory for up to alignment extra bytes being needed.
384 allocationSize = Allocation::AllocationSize(reinterpret_cast<uint8_t *>(mPageHeaderSkip),
385 numBytes, mAlignment, &preAllocationPadding);
386
387 size_t numBytesToAlloc = allocationSize + mPageHeaderSkip + mAlignment;
388
389 // Integer overflow is unexpected.
390 ASSERT(numBytesToAlloc >= allocationSize);
391
392 PageHeader *memory = reinterpret_cast<PageHeader *>(::new char[numBytesToAlloc]);
393 if (memory == nullptr)
394 {
395 return nullptr;
396 }
397
398 // Use placement-new to initialize header
399 new (memory) PageHeader(mInUseList, (numBytesToAlloc + mPageSize - 1) / mPageSize);
400 mInUseList = memory;
401
402 // Make next allocation come from a new page
403 mCurrentPageOffset = mPageSize;
404
405 // Now that we actually have the pointer, make sure the data pointer will be aligned.
406 currentPagePtr = reinterpret_cast<uint8_t *>(memory) + mPageHeaderSkip;
407 Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
408
409 return initializeAllocation(currentPagePtr + preAllocationPadding, numBytes);
410 }
411
412 uint8_t *newPageAddr = allocateNewPage(numBytes);
413 return initializeAllocation(newPageAddr, numBytes);
414
415 #else // !defined(ANGLE_DISABLE_POOL_ALLOC)
416
417 void *alloc = malloc(numBytes + mAlignment - 1);
418 mStack.back().push_back(alloc);
419
420 intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
421 intAlloc = rx::roundUpPow2<intptr_t>(intAlloc, mAlignment);
422 return reinterpret_cast<void *>(intAlloc);
423 #endif
424 }
425
426 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
allocateNewPage(size_t numBytes)427 uint8_t *PoolAllocator::allocateNewPage(size_t numBytes)
428 {
429 // Need a simple page to allocate from. Pick a page from the free list, if any. Otherwise need
430 // to make the allocation.
431 PageHeader *memory;
432 if (mFreeList)
433 {
434 memory = mFreeList;
435 mFreeList = mFreeList->nextPage;
436 }
437 else
438 {
439 memory = reinterpret_cast<PageHeader *>(::new char[mPageSize]);
440 if (memory == nullptr)
441 {
442 return nullptr;
443 }
444 }
445 // Use placement-new to initialize header
446 new (memory) PageHeader(mInUseList, 1);
447 mInUseList = memory;
448
449 // Leave room for the page header.
450 mCurrentPageOffset = mPageHeaderSkip;
451 uint8_t *currentPagePtr = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset;
452
453 size_t preAllocationPadding = 0;
454 size_t allocationSize =
455 Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
456
457 mCurrentPageOffset += allocationSize;
458
459 // The new allocation is made after the page header and any alignment required before it.
460 return reinterpret_cast<uint8_t *>(mInUseList) + mPageHeaderSkip + preAllocationPadding;
461 }
462
initializeAllocation(uint8_t * memory,size_t numBytes)463 void *PoolAllocator::initializeAllocation(uint8_t *memory, size_t numBytes)
464 {
465 # if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
466 new (memory) Allocation(numBytes, memory, mInUseList->lastAllocation);
467 mInUseList->lastAllocation = reinterpret_cast<Allocation *>(memory);
468 # endif
469
470 return Allocation::GetDataPointer(memory, mAlignment);
471 }
472 #endif
473
lock()474 void PoolAllocator::lock()
475 {
476 ASSERT(!mLocked);
477 mLocked = true;
478 }
479
unlock()480 void PoolAllocator::unlock()
481 {
482 ASSERT(mLocked);
483 mLocked = false;
484 }
485
486 //
487 // Check all allocations in a list for damage by calling check on each.
488 //
checkAllocList() const489 void Allocation::checkAllocList() const
490 {
491 for (const Allocation *alloc = this; alloc != nullptr; alloc = alloc->mPrevAlloc)
492 {
493 alloc->checkAlloc();
494 }
495 }
496
497 } // namespace angle
498