• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2019 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // PoolAlloc.cpp:
7 //    Implements the class methods for PoolAllocator and Allocation classes.
8 //
9 
10 #include "common/PoolAlloc.h"
11 
12 #include <assert.h>
13 #include <stdint.h>
14 #include <stdio.h>
15 
16 #include "common/angleutils.h"
17 #include "common/debug.h"
18 #include "common/mathutil.h"
19 #include "common/platform.h"
20 #include "common/tls.h"
21 
22 namespace angle
23 {
24 
25 //
26 // Implement the functionality of the PoolAllocator class, which
27 // is documented in PoolAlloc.h.
28 //
PoolAllocator(int growthIncrement,int allocationAlignment)29 PoolAllocator::PoolAllocator(int growthIncrement, int allocationAlignment)
30     : mAlignment(allocationAlignment),
31 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
32       mPageSize(growthIncrement),
33       mFreeList(0),
34       mInUseList(0),
35       mNumCalls(0),
36       mTotalBytes(0),
37 #endif
38       mLocked(false)
39 {
40     initialize(growthIncrement, allocationAlignment);
41 }
42 
initialize(int pageSize,int alignment)43 void PoolAllocator::initialize(int pageSize, int alignment)
44 {
45     mAlignment = alignment;
46 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
47     mPageSize = pageSize;
48     if (mAlignment == 1)
49     {
50         // This is a special fast-path where fastAllocation() is enabled
51         mAlignmentMask = 0;
52         mHeaderSkip    = sizeof(Header);
53     }
54     else
55     {
56 #endif
57         //
58         // Adjust mAlignment to be at least pointer aligned and
59         // power of 2.
60         //
61         size_t minAlign = sizeof(void *);
62         mAlignment &= ~(minAlign - 1);
63         if (mAlignment < minAlign)
64             mAlignment = minAlign;
65         mAlignment     = gl::ceilPow2(static_cast<unsigned int>(mAlignment));
66         mAlignmentMask = mAlignment - 1;
67 
68 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
69         //
70         // Align header skip
71         //
72         mHeaderSkip = minAlign;
73         if (mHeaderSkip < sizeof(Header))
74         {
75             mHeaderSkip = rx::roundUpPow2(sizeof(Header), mAlignment);
76         }
77     }
78     //
79     // Don't allow page sizes we know are smaller than all common
80     // OS page sizes.
81     //
82     if (mPageSize < 4 * 1024)
83         mPageSize = 4 * 1024;
84     //
85     // A large mCurrentPageOffset indicates a new page needs to
86     // be obtained to allocate memory.
87     //
88     mCurrentPageOffset = mPageSize;
89 #else  // !defined(ANGLE_DISABLE_POOL_ALLOC)
90     mStack.push_back({});
91 #endif
92 }
93 
~PoolAllocator()94 PoolAllocator::~PoolAllocator()
95 {
96 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
97     while (mInUseList)
98     {
99         Header *next = mInUseList->nextPage;
100         mInUseList->~Header();
101         delete[] reinterpret_cast<char *>(mInUseList);
102         mInUseList = next;
103     }
104     // We should not check the guard blocks
105     // here, because we did it already when the block was
106     // placed into the free list.
107     //
108     while (mFreeList)
109     {
110         Header *next = mFreeList->nextPage;
111         delete[] reinterpret_cast<char *>(mFreeList);
112         mFreeList = next;
113     }
114 #else  // !defined(ANGLE_DISABLE_POOL_ALLOC)
115     for (auto &allocs : mStack)
116     {
117         for (auto alloc : allocs)
118         {
119             free(alloc);
120         }
121     }
122     mStack.clear();
123 #endif
124 }
125 
126 //
127 // Check a single guard block for damage
128 //
checkGuardBlock(unsigned char * blockMem,unsigned char val,const char * locText) const129 void Allocation::checkGuardBlock(unsigned char *blockMem,
130                                  unsigned char val,
131                                  const char *locText) const
132 {
133 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
134     for (size_t x = 0; x < kGuardBlockSize; x++)
135     {
136         if (blockMem[x] != val)
137         {
138             char assertMsg[80];
139             // We don't print the assert message.  It's here just to be helpful.
140             snprintf(assertMsg, sizeof(assertMsg),
141                      "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, mSize, data());
142             assert(0 && "PoolAlloc: Damage in guard block");
143         }
144     }
145 #endif
146 }
147 
push()148 void PoolAllocator::push()
149 {
150 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
151     AllocState state = {mCurrentPageOffset, mInUseList};
152 
153     mStack.push_back(state);
154 
155     //
156     // Indicate there is no current page to allocate from.
157     //
158     mCurrentPageOffset = mPageSize;
159 #else  // !defined(ANGLE_DISABLE_POOL_ALLOC)
160     mStack.push_back({});
161 #endif
162 }
163 
164 //
165 // Do a mass-deallocation of all the individual allocations
166 // that have occurred since the last push(), or since the
167 // last pop(), or since the object's creation.
168 //
169 // The deallocated pages are saved for future allocations.
170 //
pop()171 void PoolAllocator::pop()
172 {
173     if (mStack.size() < 1)
174         return;
175 
176 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
177     Header *page       = mStack.back().page;
178     mCurrentPageOffset = mStack.back().offset;
179 
180     while (mInUseList != page)
181     {
182         // invoke destructor to free allocation list
183         mInUseList->~Header();
184 
185         Header *nextInUse = mInUseList->nextPage;
186         if (mInUseList->pageCount > 1)
187             delete[] reinterpret_cast<char *>(mInUseList);
188         else
189         {
190             mInUseList->nextPage = mFreeList;
191             mFreeList            = mInUseList;
192         }
193         mInUseList = nextInUse;
194     }
195 
196     mStack.pop_back();
197 #else  // !defined(ANGLE_DISABLE_POOL_ALLOC)
198     for (auto &alloc : mStack.back())
199     {
200         free(alloc);
201     }
202     mStack.pop_back();
203 #endif
204 }
205 
206 //
207 // Do a mass-deallocation of all the individual allocations
208 // that have occurred.
209 //
popAll()210 void PoolAllocator::popAll()
211 {
212     while (mStack.size() > 0)
213         pop();
214 }
215 
allocate(size_t numBytes)216 void *PoolAllocator::allocate(size_t numBytes)
217 {
218     ASSERT(!mLocked);
219 
220 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
221     //
222     // Just keep some interesting statistics.
223     //
224     ++mNumCalls;
225     mTotalBytes += numBytes;
226 
227     // If we are using guard blocks, all allocations are bracketed by
228     // them: [guardblock][allocation][guardblock].  numBytes is how
229     // much memory the caller asked for.  allocationSize is the total
230     // size including guard blocks.  In release build,
231     // kGuardBlockSize=0 and this all gets optimized away.
232     size_t allocationSize = Allocation::AllocationSize(numBytes) + mAlignment;
233     // Detect integer overflow.
234     if (allocationSize < numBytes)
235         return 0;
236 
237     //
238     // Do the allocation, most likely case first, for efficiency.
239     // This step could be moved to be inline sometime.
240     //
241     if (allocationSize <= mPageSize - mCurrentPageOffset)
242     {
243         //
244         // Safe to allocate from mCurrentPageOffset.
245         //
246         unsigned char *memory = reinterpret_cast<unsigned char *>(mInUseList) + mCurrentPageOffset;
247         mCurrentPageOffset += allocationSize;
248         mCurrentPageOffset = (mCurrentPageOffset + mAlignmentMask) & ~mAlignmentMask;
249 
250         return initializeAllocation(mInUseList, memory, numBytes);
251     }
252 
253     if (allocationSize > mPageSize - mHeaderSkip)
254     {
255         //
256         // Do a multi-page allocation.  Don't mix these with the others.
257         // The OS is efficient in allocating and freeing multiple pages.
258         //
259         size_t numBytesToAlloc = allocationSize + mHeaderSkip;
260         // Detect integer overflow.
261         if (numBytesToAlloc < allocationSize)
262             return 0;
263 
264         Header *memory = reinterpret_cast<Header *>(::new char[numBytesToAlloc]);
265         if (memory == 0)
266             return 0;
267 
268         // Use placement-new to initialize header
269         new (memory) Header(mInUseList, (numBytesToAlloc + mPageSize - 1) / mPageSize);
270         mInUseList = memory;
271 
272         mCurrentPageOffset = mPageSize;  // make next allocation come from a new page
273 
274         // No guard blocks for multi-page allocations (yet)
275         void *unalignedPtr =
276             reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memory) + mHeaderSkip);
277         return std::align(mAlignment, numBytes, unalignedPtr, allocationSize);
278     }
279     unsigned char *newPageAddr =
280         static_cast<unsigned char *>(allocateNewPage(numBytes, allocationSize));
281     return initializeAllocation(mInUseList, newPageAddr, numBytes);
282 #else  // !defined(ANGLE_DISABLE_POOL_ALLOC)
283     void *alloc = malloc(numBytes + mAlignmentMask);
284     mStack.back().push_back(alloc);
285 
286     intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
287     intAlloc          = (intAlloc + mAlignmentMask) & ~mAlignmentMask;
288     return reinterpret_cast<void *>(intAlloc);
289 #endif
290 }
291 
292 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
allocateNewPage(size_t numBytes,size_t allocationSize)293 void *PoolAllocator::allocateNewPage(size_t numBytes, size_t allocationSize)
294 {
295     //
296     // Need a simple page to allocate from.
297     //
298     Header *memory;
299     if (mFreeList)
300     {
301         memory    = mFreeList;
302         mFreeList = mFreeList->nextPage;
303     }
304     else
305     {
306         memory = reinterpret_cast<Header *>(::new char[mPageSize]);
307         if (memory == 0)
308             return 0;
309     }
310     // Use placement-new to initialize header
311     new (memory) Header(mInUseList, 1);
312     mInUseList = memory;
313 
314     unsigned char *ret = reinterpret_cast<unsigned char *>(mInUseList) + mHeaderSkip;
315     mCurrentPageOffset = (mHeaderSkip + allocationSize + mAlignmentMask) & ~mAlignmentMask;
316     return ret;
317 }
318 #endif
319 
lock()320 void PoolAllocator::lock()
321 {
322     ASSERT(!mLocked);
323     mLocked = true;
324 }
325 
unlock()326 void PoolAllocator::unlock()
327 {
328     ASSERT(mLocked);
329     mLocked = false;
330 }
331 
332 //
333 // Check all allocations in a list for damage by calling check on each.
334 //
checkAllocList() const335 void Allocation::checkAllocList() const
336 {
337     for (const Allocation *alloc = this; alloc != 0; alloc = alloc->mPrevAlloc)
338         alloc->checkAlloc();
339 }
340 
341 }  // namespace angle
342