• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkArenaAlloc_DEFINED
9 #define SkArenaAlloc_DEFINED
10 
11 #include "include/private/base/SkAssert.h"
12 #include "include/private/base/SkTFitsIn.h"
13 #include "include/private/base/SkTo.h"
14 #include "src/base/SkASAN.h"
15 
16 #include <algorithm>
17 #include <array>
18 #include <cstdint>
19 #include <cstdlib>
20 #include <cstring>
21 #include <limits>
22 #include <new>
23 #include <type_traits>
24 #include <utility>
25 
26 // We found allocating strictly doubling amounts of memory from the heap left too
27 // much unused slop, particularly on Android.  Instead we'll follow a Fibonacci-like
28 // progression.
29 
30 // SkFibonacci47 is the first 47 Fibonacci numbers. Fib(47) is the largest value less than 2 ^ 32.
31 extern std::array<const uint32_t, 47> SkFibonacci47;
32 template<uint32_t kMaxSize>
33 class SkFibBlockSizes {
34 public:
35     // staticBlockSize, and firstAllocationSize are parameters describing the initial memory
36     // layout. staticBlockSize describes the size of the inlined memory, and firstAllocationSize
37     // describes the size of the first block to be allocated if the static block is exhausted. By
38     // convention, firstAllocationSize is the first choice for the block unit size followed by
39     // staticBlockSize followed by the default of 1024 bytes.
SkFibBlockSizes(uint32_t staticBlockSize,uint32_t firstAllocationSize)40     SkFibBlockSizes(uint32_t staticBlockSize, uint32_t firstAllocationSize) : fIndex{0} {
41         fBlockUnitSize = firstAllocationSize > 0 ? firstAllocationSize :
42                          staticBlockSize     > 0 ? staticBlockSize     : 1024;
43 
44         SkASSERT_RELEASE(0 < fBlockUnitSize);
45         SkASSERT_RELEASE(fBlockUnitSize < std::min(kMaxSize, (1u << 26) - 1));
46     }
47 
nextBlockSize()48     uint32_t nextBlockSize() {
49         uint32_t result = SkFibonacci47[fIndex] * fBlockUnitSize;
50 
51         if (SkTo<size_t>(fIndex + 1) < SkFibonacci47.size() &&
52             SkFibonacci47[fIndex + 1] < kMaxSize / fBlockUnitSize)
53         {
54             fIndex += 1;
55         }
56 
57         return result;
58     }
59 
60 private:
61     uint32_t fIndex : 6;
62     uint32_t fBlockUnitSize : 26;
63 };
64 
65 // SkArenaAlloc allocates object and destroys the allocated objects when destroyed. It's designed
66 // to minimize the number of underlying block allocations. SkArenaAlloc allocates first out of an
67 // (optional) user-provided block of memory, and when that's exhausted it allocates on the heap,
68 // starting with an allocation of firstHeapAllocation bytes.  If your data (plus a small overhead)
69 // fits in the user-provided block, SkArenaAlloc never uses the heap, and if it fits in
70 // firstHeapAllocation bytes, it'll use the heap only once. If 0 is specified for
71 // firstHeapAllocation, then blockSize is used unless that too is 0, then 1024 is used.
72 //
73 // Examples:
74 //
75 //   char block[mostCasesSize];
76 //   SkArenaAlloc arena(block, mostCasesSize);
77 //
78 // If mostCasesSize is too large for the stack, you can use the following pattern.
79 //
80 //   std::unique_ptr<char[]> block{new char[mostCasesSize]};
81 //   SkArenaAlloc arena(block.get(), mostCasesSize, almostAllCasesSize);
82 //
83 // If the program only sometimes allocates memory, use the following pattern.
84 //
85 //   SkArenaAlloc arena(nullptr, 0, almostAllCasesSize);
86 //
87 // The storage does not necessarily need to be on the stack. Embedding the storage in a class also
88 // works.
89 //
90 //   class Foo {
91 //       char storage[mostCasesSize];
92 //       SkArenaAlloc arena (storage, mostCasesSize);
93 //   };
94 //
95 // In addition, the system is optimized to handle POD data including arrays of PODs (where
96 // POD is really data with no destructors). For POD data it has zero overhead per item, and a
97 // typical per block overhead of 8 bytes. For non-POD objects there is a per item overhead of 4
98 // bytes. For arrays of non-POD objects there is a per array overhead of typically 8 bytes. There
99 // is an addition overhead when switching from POD data to non-POD data of typically 8 bytes.
100 //
101 // If additional blocks are needed they are increased exponentially. This strategy bounds the
102 // recursion of the RunDtorsOnBlock to be limited to O(log size-of-memory). Block size grow using
103 // the Fibonacci sequence which means that for 2^32 memory there are 48 allocations, and for 2^48
104 // there are 71 allocations.
105 class SkArenaAlloc {
106 public:
107     SkArenaAlloc(char* block, size_t blockSize, size_t firstHeapAllocation);
108 
SkArenaAlloc(size_t firstHeapAllocation)109     explicit SkArenaAlloc(size_t firstHeapAllocation)
110         : SkArenaAlloc(nullptr, 0, firstHeapAllocation) {}
111 
112     SkArenaAlloc(const SkArenaAlloc&) = delete;
113     SkArenaAlloc& operator=(const SkArenaAlloc&) = delete;
114     SkArenaAlloc(SkArenaAlloc&&) = delete;
115     SkArenaAlloc& operator=(SkArenaAlloc&&) = delete;
116 
117     ~SkArenaAlloc();
118 
119     template <typename Ctor>
120     auto make(Ctor&& ctor) -> decltype(ctor(nullptr)) {
121         using T = std::remove_pointer_t<decltype(ctor(nullptr))>;
122 
123         uint32_t size      = SkToU32(sizeof(T));
124         uint32_t alignment = SkToU32(alignof(T));
125         char* objStart;
126         if (std::is_trivially_destructible<T>::value) {
127             objStart = this->allocObject(size, alignment);
128             fCursor = objStart + size;
129             sk_asan_unpoison_memory_region(objStart, size);
130         } else {
131             objStart = this->allocObjectWithFooter(size + sizeof(Footer), alignment);
132             // Can never be UB because max value is alignof(T).
133             uint32_t padding = SkToU32(objStart - fCursor);
134 
135             // Advance to end of object to install footer.
136             fCursor = objStart + size;
137             sk_asan_unpoison_memory_region(objStart, size);
138             FooterAction* releaser = [](char* objEnd) {
139                 char* objStart = objEnd - (sizeof(T) + sizeof(Footer));
140                 ((T*)objStart)->~T();
141                 return objStart;
142             };
143             this->installFooter(releaser, padding);
144         }
145 
146         // This must be last to make objects with nested use of this allocator work.
147         return ctor(objStart);
148     }
149 
150     template <typename T, typename... Args>
make(Args &&...args)151     T* make(Args&&... args) {
152         return this->make([&](void* objStart) {
153             return new(objStart) T(std::forward<Args>(args)...);
154         });
155     }
156 
157     template <typename T>
makeArrayDefault(size_t count)158     T* makeArrayDefault(size_t count) {
159         T* array = this->allocUninitializedArray<T>(count);
160         for (size_t i = 0; i < count; i++) {
161             // Default initialization: if T is primitive then the value is left uninitialized.
162             new (&array[i]) T;
163         }
164         return array;
165     }
166 
167     template <typename T>
makeArray(size_t count)168     T* makeArray(size_t count) {
169         T* array = this->allocUninitializedArray<T>(count);
170         for (size_t i = 0; i < count; i++) {
171             // Value initialization: if T is primitive then the value is zero-initialized.
172             new (&array[i]) T();
173         }
174         return array;
175     }
176 
177     template <typename T, typename Initializer>
makeInitializedArray(size_t count,Initializer initializer)178     T* makeInitializedArray(size_t count, Initializer initializer) {
179         T* array = this->allocUninitializedArray<T>(count);
180         for (size_t i = 0; i < count; i++) {
181             new (&array[i]) T(initializer(i));
182         }
183         return array;
184     }
185 
186     // Only use makeBytesAlignedTo if none of the typed variants are impractical to use.
makeBytesAlignedTo(size_t size,size_t align)187     void* makeBytesAlignedTo(size_t size, size_t align) {
188         AssertRelease(SkTFitsIn<uint32_t>(size));
189         auto objStart = this->allocObject(SkToU32(size), SkToU32(align));
190         fCursor = objStart + size;
191         sk_asan_unpoison_memory_region(objStart, size);
192         return objStart;
193     }
194 
195 private:
AssertRelease(bool cond)196     static void AssertRelease(bool cond) { if (!cond) { ::abort(); } }
197 
198     using FooterAction = char* (char*);
199     struct Footer {
200         uint8_t unaligned_action[sizeof(FooterAction*)];
201         uint8_t padding;
202     };
203 
204     static char* SkipPod(char* footerEnd);
205     static void RunDtorsOnBlock(char* footerEnd);
206     static char* NextBlock(char* footerEnd);
207 
208     template <typename T>
installRaw(const T & val)209     void installRaw(const T& val) {
210         sk_asan_unpoison_memory_region(fCursor, sizeof(val));
211         memcpy(fCursor, &val, sizeof(val));
212         fCursor += sizeof(val);
213     }
214     void installFooter(FooterAction* releaser, uint32_t padding);
215 
216     void ensureSpace(uint32_t size, uint32_t alignment);
217 
allocObject(uint32_t size,uint32_t alignment)218     char* allocObject(uint32_t size, uint32_t alignment) {
219         uintptr_t mask = alignment - 1;
220         uintptr_t alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
221         uintptr_t totalSize = size + alignedOffset;
222         AssertRelease(totalSize >= size);
223         if (totalSize > static_cast<uintptr_t>(fEnd - fCursor)) {
224             this->ensureSpace(size, alignment);
225             alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
226         }
227 
228         char* object = fCursor + alignedOffset;
229 
230         SkASSERT((reinterpret_cast<uintptr_t>(object) & (alignment - 1)) == 0);
231         SkASSERT(object + size <= fEnd);
232 
233         return object;
234     }
235 
236     char* allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment);
237 
238     template <typename T>
allocUninitializedArray(size_t countZ)239     T* allocUninitializedArray(size_t countZ) {
240         AssertRelease(SkTFitsIn<uint32_t>(countZ));
241         uint32_t count = SkToU32(countZ);
242 
243         char* objStart;
244         AssertRelease(count <= std::numeric_limits<uint32_t>::max() / sizeof(T));
245         uint32_t arraySize = SkToU32(count * sizeof(T));
246         uint32_t alignment = SkToU32(alignof(T));
247 
248         if (std::is_trivially_destructible<T>::value) {
249             objStart = this->allocObject(arraySize, alignment);
250             fCursor = objStart + arraySize;
251             sk_asan_unpoison_memory_region(objStart, arraySize);
252         } else {
253             constexpr uint32_t overhead = sizeof(Footer) + sizeof(uint32_t);
254             AssertRelease(arraySize <= std::numeric_limits<uint32_t>::max() - overhead);
255             uint32_t totalSize = arraySize + overhead;
256             objStart = this->allocObjectWithFooter(totalSize, alignment);
257 
258             // Can never be UB because max value is alignof(T).
259             uint32_t padding = SkToU32(objStart - fCursor);
260 
261             // Advance to end of array to install footer.
262             fCursor = objStart + arraySize;
263             sk_asan_unpoison_memory_region(objStart, arraySize);
264             this->installRaw(SkToU32(count));
265             this->installFooter(
266                 [](char* footerEnd) {
267                     char* objEnd = footerEnd - (sizeof(Footer) + sizeof(uint32_t));
268                     uint32_t count;
269                     memmove(&count, objEnd, sizeof(uint32_t));
270                     char* objStart = objEnd - count * sizeof(T);
271                     T* array = (T*) objStart;
272                     for (uint32_t i = 0; i < count; i++) {
273                         array[i].~T();
274                     }
275                     return objStart;
276                 },
277                 padding);
278         }
279 
280         return (T*)objStart;
281     }
282 
283     char*          fDtorCursor;
284     char*          fCursor;
285     char*          fEnd;
286 
287     SkFibBlockSizes<std::numeric_limits<uint32_t>::max()> fFibonacciProgression;
288 };
289 
290 class SkArenaAllocWithReset : public SkArenaAlloc {
291 public:
292     SkArenaAllocWithReset(char* block, size_t blockSize, size_t firstHeapAllocation);
293 
SkArenaAllocWithReset(size_t firstHeapAllocation)294     explicit SkArenaAllocWithReset(size_t firstHeapAllocation)
295             : SkArenaAllocWithReset(nullptr, 0, firstHeapAllocation) {}
296 
297     // Destroy all allocated objects, free any heap allocations.
298     void reset();
299 
300 private:
301     char* const    fFirstBlock;
302     const uint32_t fFirstSize;
303     const uint32_t fFirstHeapAllocationSize;
304 };
305 
306 // Helper for defining allocators with inline/reserved storage.
307 // For argument declarations, stick to the base type (SkArenaAlloc).
308 // Note: Inheriting from the storage first means the storage will outlive the
309 // SkArenaAlloc, letting ~SkArenaAlloc read it as it calls destructors.
310 // (This is mostly only relevant for strict tools like MSAN.)
311 template <size_t InlineStorageSize>
312 class SkSTArenaAlloc : private std::array<char, InlineStorageSize>, public SkArenaAlloc {
313 public:
314     explicit SkSTArenaAlloc(size_t firstHeapAllocation = InlineStorageSize)
315         : SkArenaAlloc{this->data(), this->size(), firstHeapAllocation} {}
316 };
317 
318 template <size_t InlineStorageSize>
319 class SkSTArenaAllocWithReset
320         : private std::array<char, InlineStorageSize>, public SkArenaAllocWithReset {
321 public:
322     explicit SkSTArenaAllocWithReset(size_t firstHeapAllocation = InlineStorageSize)
323             : SkArenaAllocWithReset{this->data(), this->size(), firstHeapAllocation} {}
324 };
325 
326 #endif  // SkArenaAlloc_DEFINED
327