• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2019 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // PoolAlloc.h:
7 //    Defines the class interface for PoolAllocator and the Allocation
8 //    class that it uses internally.
9 //
10 
11 #ifndef COMMON_POOLALLOC_H_
12 #define COMMON_POOLALLOC_H_
13 
14 #if !defined(NDEBUG)
15 #    define ANGLE_POOL_ALLOC_GUARD_BLOCKS  // define to enable guard block checking
16 #endif
17 
18 //
19 // This header defines an allocator that can be used to efficiently
20 // allocate a large number of small requests for heap memory, with the
21 // intention that they are not individually deallocated, but rather
22 // collectively deallocated at one time.
23 //
24 // This simultaneously
25 //
26 // * Makes each individual allocation much more efficient; the
27 //     typical allocation is trivial.
28 // * Completely avoids the cost of doing individual deallocation.
29 // * Saves the trouble of tracking down and plugging a large class of leaks.
30 //
31 // Individual classes can use this allocator by supplying their own
32 // new and delete methods.
33 //
34 
35 #include <stddef.h>
36 #include <string.h>
37 #include <memory>
38 #include <vector>
39 
40 #include "angleutils.h"
41 #include "common/debug.h"
42 
43 namespace angle
44 {
45 // If we are using guard blocks, we must track each individual
46 // allocation.  If we aren't using guard blocks, these
47 // never get instantiated, so won't have any impact.
48 //
49 
50 class Allocation
51 {
52   public:
53     Allocation(size_t size, unsigned char *mem, Allocation *prev = 0)
mSize(size)54         : mSize(size), mMem(mem), mPrevAlloc(prev)
55     {
56 // Allocations are bracketed:
57 //    [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
58 // This would be cleaner with if (kGuardBlockSize)..., but that
59 // makes the compiler print warnings about 0 length memsets,
60 // even with the if() protecting them.
61 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
62         memset(preGuard(), kGuardBlockBeginVal, kGuardBlockSize);
63         memset(data(), kUserDataFill, mSize);
64         memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize);
65 #endif
66     }
67 
checkAlloc()68     void checkAlloc() const
69     {
70         checkGuardBlock(preGuard(), kGuardBlockBeginVal, "before");
71         checkGuardBlock(postGuard(), kGuardBlockEndVal, "after");
72     }
73 
74     void checkAllocList() const;
75 
76     // Return total size needed to accommodate user buffer of 'size',
77     // plus our tracking data.
AllocationSize(size_t size)78     static size_t AllocationSize(size_t size) { return size + 2 * kGuardBlockSize + HeaderSize(); }
79 
80     // Offset from surrounding buffer to get to user data buffer.
OffsetAllocation(unsigned char * m)81     static unsigned char *OffsetAllocation(unsigned char *m)
82     {
83         return m + kGuardBlockSize + HeaderSize();
84     }
85 
86   private:
87     void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
88 
89     // Find offsets to pre and post guard blocks, and user data buffer
preGuard()90     unsigned char *preGuard() const { return mMem + HeaderSize(); }
data()91     unsigned char *data() const { return preGuard() + kGuardBlockSize; }
postGuard()92     unsigned char *postGuard() const { return data() + mSize; }
93     size_t mSize;            // size of the user data area
94     unsigned char *mMem;     // beginning of our allocation (pts to header)
95     Allocation *mPrevAlloc;  // prior allocation in the chain
96 
97     static constexpr unsigned char kGuardBlockBeginVal = 0xfb;
98     static constexpr unsigned char kGuardBlockEndVal   = 0xfe;
99     static constexpr unsigned char kUserDataFill       = 0xcd;
100 #if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
101     static constexpr size_t kGuardBlockSize = 16;
HeaderSize()102     static constexpr size_t HeaderSize() { return sizeof(Allocation); }
103 #else
104     static constexpr size_t kGuardBlockSize = 0;
HeaderSize()105     static constexpr size_t HeaderSize() { return 0; }
106 #endif
107 };
108 
109 //
110 // There are several stacks.  One is to track the pushing and popping
111 // of the user, and not yet implemented.  The others are simply a
112 // repositories of free pages or used pages.
113 //
114 // Page stacks are linked together with a simple header at the beginning
115 // of each allocation obtained from the underlying OS.  Multi-page allocations
116 // are returned to the OS.  Individual page allocations are kept for future
117 // re-use.
118 //
119 // The "page size" used is not, nor must it match, the underlying OS
120 // page size.  But, having it be about that size or equal to a set of
121 // pages is likely most optimal.
122 //
123 class PoolAllocator : angle::NonCopyable
124 {
125   public:
126     static const int kDefaultAlignment = 16;
127     //
128     // Create PoolAllocator. If alignment is set to 1 byte then fastAllocate()
129     //  function can be used to make allocations with less overhead.
130     //
131     PoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = kDefaultAlignment);
132 
133     //
134     // Don't call the destructor just to free up the memory, call pop()
135     //
136     ~PoolAllocator();
137 
138     //
139     // Initialize page size and alignment after construction
140     //
141     void initialize(int pageSize, int alignment);
142 
143     //
144     // Call push() to establish a new place to pop memory to.  Does not
145     // have to be called to get things started.
146     //
147     void push();
148 
149     //
150     // Call pop() to free all memory allocated since the last call to push(),
151     // or if no last call to push, frees all memory since first allocation.
152     //
153     void pop();
154 
155     //
156     // Call popAll() to free all memory allocated.
157     //
158     void popAll();
159 
160     //
161     // Call allocate() to actually acquire memory.  Returns 0 if no memory
162     // available, otherwise a properly aligned pointer to 'numBytes' of memory.
163     //
164     void *allocate(size_t numBytes);
165 
166     //
167     // Call fastAllocate() for a faster allocate function that does minimal bookkeeping
168     // preCondition: Allocator must have been created w/ alignment of 1
fastAllocate(size_t numBytes)169     ANGLE_INLINE uint8_t *fastAllocate(size_t numBytes)
170     {
171 #if defined(ANGLE_DISABLE_POOL_ALLOC)
172         return reinterpret_cast<uint8_t *>(allocate(numBytes));
173 #else
174         ASSERT(mAlignment == 1);
175         // No multi-page allocations
176         ASSERT(numBytes <= (mPageSize - mHeaderSkip));
177         //
178         // Do the allocation, most likely case inline first, for efficiency.
179         //
180         if (numBytes <= mPageSize - mCurrentPageOffset)
181         {
182             //
183             // Safe to allocate from mCurrentPageOffset.
184             //
185             uint8_t *memory = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset;
186             mCurrentPageOffset += numBytes;
187             return memory;
188         }
189         return reinterpret_cast<uint8_t *>(allocateNewPage(numBytes, numBytes));
190 #endif
191     }
192 
193     //
194     // There is no deallocate.  The point of this class is that
195     // deallocation can be skipped by the user of it, as the model
196     // of use is to simultaneously deallocate everything at once
197     // by calling pop(), and to not have to solve memory leak problems.
198     //
199 
200     // Catch unwanted allocations.
201     // TODO(jmadill): Remove this when we remove the global allocator.
202     void lock();
203     void unlock();
204 
205   private:
206     size_t mAlignment;  // all returned allocations will be aligned at
207                         // this granularity, which will be a power of 2
208     size_t mAlignmentMask;
209 #if !defined(ANGLE_DISABLE_POOL_ALLOC)
210     friend struct Header;
211 
212     struct Header
213     {
HeaderHeader214         Header(Header *nextPage, size_t pageCount)
215             : nextPage(nextPage),
216               pageCount(pageCount)
217 #    if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
218               ,
219               lastAllocation(0)
220 #    endif
221         {}
222 
~HeaderHeader223         ~Header()
224         {
225 #    if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
226             if (lastAllocation)
227                 lastAllocation->checkAllocList();
228 #    endif
229         }
230 
231         Header *nextPage;
232         size_t pageCount;
233 #    if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
234         Allocation *lastAllocation;
235 #    endif
236     };
237 
238     struct AllocState
239     {
240         size_t offset;
241         Header *page;
242     };
243     using AllocStack = std::vector<AllocState>;
244 
245     // Slow path of allocation when we have to get a new page.
246     void *allocateNewPage(size_t numBytes, size_t allocationSize);
247     // Track allocations if and only if we're using guard blocks
initializeAllocation(Header * block,unsigned char * memory,size_t numBytes)248     void *initializeAllocation(Header *block, unsigned char *memory, size_t numBytes)
249     {
250 #    if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
251         new (memory) Allocation(numBytes + mAlignment, memory, block->lastAllocation);
252         block->lastAllocation = reinterpret_cast<Allocation *>(memory);
253 #    endif
254         // The OffsetAllocation() call is optimized away if !defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
255         void *unalignedPtr  = Allocation::OffsetAllocation(memory);
256         size_t alignedBytes = numBytes + mAlignment;
257         return std::align(mAlignment, numBytes, unalignedPtr, alignedBytes);
258     }
259 
260     size_t mPageSize;           // granularity of allocation from the OS
261     size_t mHeaderSkip;         // amount of memory to skip to make room for the
262                                 //      header (basically, size of header, rounded
263                                 //      up to make it aligned
264     size_t mCurrentPageOffset;  // next offset in top of inUseList to allocate from
265     Header *mFreeList;          // list of popped memory
266     Header *mInUseList;         // list of all memory currently being used
267     AllocStack mStack;          // stack of where to allocate from, to partition pool
268 
269     int mNumCalls;       // just an interesting statistic
270     size_t mTotalBytes;  // just an interesting statistic
271 
272 #else  // !defined(ANGLE_DISABLE_POOL_ALLOC)
273     std::vector<std::vector<void *>> mStack;
274 #endif
275 
276     bool mLocked;
277 };
278 
279 }  // namespace angle
280 
281 #endif  // COMMON_POOLALLOC_H_
282