1 /* 2 * Copyright 2016 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef SkArenaAlloc_DEFINED 9 #define SkArenaAlloc_DEFINED 10 11 #include "include/private/base/SkASAN.h" 12 #include "include/private/base/SkAssert.h" 13 #include "include/private/base/SkTFitsIn.h" 14 #include "include/private/base/SkTo.h" 15 16 #include <algorithm> 17 #include <array> 18 #include <cstdint> 19 #include <cstdlib> 20 #include <cstring> 21 #include <limits> 22 #include <new> 23 #include <type_traits> 24 #include <utility> 25 26 // We found allocating strictly doubling amounts of memory from the heap left too 27 // much unused slop, particularly on Android. Instead we'll follow a Fibonacci-like 28 // progression. 29 30 // SkFibonacci47 is the first 47 Fibonacci numbers. Fib(47) is the largest value less than 2 ^ 32. 31 extern std::array<const uint32_t, 47> SkFibonacci47; 32 template<uint32_t kMaxSize> 33 class SkFibBlockSizes { 34 public: 35 // staticBlockSize, and firstAllocationSize are parameters describing the initial memory 36 // layout. staticBlockSize describes the size of the inlined memory, and firstAllocationSize 37 // describes the size of the first block to be allocated if the static block is exhausted. By 38 // convention, firstAllocationSize is the first choice for the block unit size followed by 39 // staticBlockSize followed by the default of 1024 bytes. SkFibBlockSizes(uint32_t staticBlockSize,uint32_t firstAllocationSize)40 SkFibBlockSizes(uint32_t staticBlockSize, uint32_t firstAllocationSize) : fIndex{0} { 41 fBlockUnitSize = firstAllocationSize > 0 ? firstAllocationSize : 42 staticBlockSize > 0 ? staticBlockSize : 1024; 43 44 SkASSERT_RELEASE(0 < fBlockUnitSize); 45 SkASSERT_RELEASE(fBlockUnitSize < std::min(kMaxSize, (1u << 26) - 1)); 46 } 47 nextBlockSize()48 uint32_t nextBlockSize() { 49 uint32_t result = SkFibonacci47[fIndex] * fBlockUnitSize; 50 51 if (SkTo<size_t>(fIndex + 1) < SkFibonacci47.size() && 52 SkFibonacci47[fIndex + 1] < kMaxSize / fBlockUnitSize) 53 { 54 fIndex += 1; 55 } 56 57 return result; 58 } 59 60 private: 61 uint32_t fIndex : 6; 62 uint32_t fBlockUnitSize : 26; 63 }; 64 65 // SkArenaAlloc allocates object and destroys the allocated objects when destroyed. It's designed 66 // to minimize the number of underlying block allocations. SkArenaAlloc allocates first out of an 67 // (optional) user-provided block of memory, and when that's exhausted it allocates on the heap, 68 // starting with an allocation of firstHeapAllocation bytes. If your data (plus a small overhead) 69 // fits in the user-provided block, SkArenaAlloc never uses the heap, and if it fits in 70 // firstHeapAllocation bytes, it'll use the heap only once. If 0 is specified for 71 // firstHeapAllocation, then blockSize is used unless that too is 0, then 1024 is used. 72 // 73 // Examples: 74 // 75 // char block[mostCasesSize]; 76 // SkArenaAlloc arena(block, mostCasesSize); 77 // 78 // If mostCasesSize is too large for the stack, you can use the following pattern. 79 // 80 // std::unique_ptr<char[]> block{new char[mostCasesSize]}; 81 // SkArenaAlloc arena(block.get(), mostCasesSize, almostAllCasesSize); 82 // 83 // If the program only sometimes allocates memory, use the following pattern. 84 // 85 // SkArenaAlloc arena(nullptr, 0, almostAllCasesSize); 86 // 87 // The storage does not necessarily need to be on the stack. Embedding the storage in a class also 88 // works. 89 // 90 // class Foo { 91 // char storage[mostCasesSize]; 92 // SkArenaAlloc arena (storage, mostCasesSize); 93 // }; 94 // 95 // In addition, the system is optimized to handle POD data including arrays of PODs (where 96 // POD is really data with no destructors). For POD data it has zero overhead per item, and a 97 // typical per block overhead of 8 bytes. For non-POD objects there is a per item overhead of 4 98 // bytes. For arrays of non-POD objects there is a per array overhead of typically 8 bytes. There 99 // is an addition overhead when switching from POD data to non-POD data of typically 8 bytes. 100 // 101 // If additional blocks are needed they are increased exponentially. This strategy bounds the 102 // recursion of the RunDtorsOnBlock to be limited to O(log size-of-memory). Block size grow using 103 // the Fibonacci sequence which means that for 2^32 memory there are 48 allocations, and for 2^48 104 // there are 71 allocations. 105 class SkArenaAlloc { 106 public: 107 SkArenaAlloc(char* block, size_t blockSize, size_t firstHeapAllocation); 108 SkArenaAlloc(size_t firstHeapAllocation)109 explicit SkArenaAlloc(size_t firstHeapAllocation) 110 : SkArenaAlloc(nullptr, 0, firstHeapAllocation) {} 111 112 SkArenaAlloc(const SkArenaAlloc&) = delete; 113 SkArenaAlloc& operator=(const SkArenaAlloc&) = delete; 114 SkArenaAlloc(SkArenaAlloc&&) = delete; 115 SkArenaAlloc& operator=(SkArenaAlloc&&) = delete; 116 117 ~SkArenaAlloc(); 118 119 template <typename Ctor> 120 auto make(Ctor&& ctor) -> decltype(ctor(nullptr)) { 121 using T = std::remove_pointer_t<decltype(ctor(nullptr))>; 122 123 uint32_t size = SkToU32(sizeof(T)); 124 uint32_t alignment = SkToU32(alignof(T)); 125 char* objStart; 126 if (std::is_trivially_destructible<T>::value) { 127 objStart = this->allocObject(size, alignment); 128 fCursor = objStart + size; 129 sk_asan_unpoison_memory_region(objStart, size); 130 } else { 131 objStart = this->allocObjectWithFooter(size + sizeof(Footer), alignment); 132 // Can never be UB because max value is alignof(T). 133 uint32_t padding = SkToU32(objStart - fCursor); 134 135 // Advance to end of object to install footer. 136 fCursor = objStart + size; 137 sk_asan_unpoison_memory_region(objStart, size); 138 FooterAction* releaser = [](char* objEnd) { 139 char* objStart = objEnd - (sizeof(T) + sizeof(Footer)); 140 ((T*)objStart)->~T(); 141 return objStart; 142 }; 143 this->installFooter(releaser, padding); 144 } 145 146 // This must be last to make objects with nested use of this allocator work. 147 return ctor(objStart); 148 } 149 150 template <typename T, typename... Args> make(Args &&...args)151 T* make(Args&&... args) { 152 return this->make([&](void* objStart) { 153 return new(objStart) T(std::forward<Args>(args)...); 154 }); 155 } 156 157 template <typename T> make()158 T* make() { 159 if constexpr (std::is_standard_layout<T>::value && std::is_trivial<T>::value) { 160 // Just allocate some aligned bytes. This generates smaller code. 161 return (T*)this->makeBytesAlignedTo(sizeof(T), alignof(T)); 162 } else { 163 // This isn't a POD type, so construct the object properly. 164 return this->make([&](void* objStart) { 165 return new(objStart) T(); 166 }); 167 } 168 } 169 170 template <typename T> makeArrayDefault(size_t count)171 T* makeArrayDefault(size_t count) { 172 T* array = this->allocUninitializedArray<T>(count); 173 for (size_t i = 0; i < count; i++) { 174 // Default initialization: if T is primitive then the value is left uninitialized. 175 new (&array[i]) T; 176 } 177 return array; 178 } 179 180 template <typename T> makeArray(size_t count)181 T* makeArray(size_t count) { 182 T* array = this->allocUninitializedArray<T>(count); 183 for (size_t i = 0; i < count; i++) { 184 // Value initialization: if T is primitive then the value is zero-initialized. 185 new (&array[i]) T(); 186 } 187 return array; 188 } 189 190 template <typename T, typename Initializer> makeInitializedArray(size_t count,Initializer initializer)191 T* makeInitializedArray(size_t count, Initializer initializer) { 192 T* array = this->allocUninitializedArray<T>(count); 193 for (size_t i = 0; i < count; i++) { 194 new (&array[i]) T(initializer(i)); 195 } 196 return array; 197 } 198 199 // Only use makeBytesAlignedTo if none of the typed variants are practical to use. makeBytesAlignedTo(size_t size,size_t align)200 void* makeBytesAlignedTo(size_t size, size_t align) { 201 AssertRelease(SkTFitsIn<uint32_t>(size)); 202 auto objStart = this->allocObject(SkToU32(size), SkToU32(align)); 203 fCursor = objStart + size; 204 sk_asan_unpoison_memory_region(objStart, size); 205 return objStart; 206 } 207 208 protected: 209 using FooterAction = char* (char*); 210 struct Footer { 211 uint8_t unaligned_action[sizeof(FooterAction*)]; 212 uint8_t padding; 213 }; 214 cursor()215 char* cursor() { return fCursor; } end()216 char* end() { return fEnd; } 217 218 private: AssertRelease(bool cond)219 static void AssertRelease(bool cond) { if (!cond) { ::abort(); } } 220 221 static char* SkipPod(char* footerEnd); 222 static void RunDtorsOnBlock(char* footerEnd); 223 static char* NextBlock(char* footerEnd); 224 225 template <typename T> installRaw(const T & val)226 void installRaw(const T& val) { 227 sk_asan_unpoison_memory_region(fCursor, sizeof(val)); 228 memcpy(fCursor, &val, sizeof(val)); 229 fCursor += sizeof(val); 230 } 231 void installFooter(FooterAction* releaser, uint32_t padding); 232 233 void ensureSpace(uint32_t size, uint32_t alignment); 234 allocObject(uint32_t size,uint32_t alignment)235 char* allocObject(uint32_t size, uint32_t alignment) { 236 uintptr_t mask = alignment - 1; 237 uintptr_t alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask; 238 uintptr_t totalSize = size + alignedOffset; 239 AssertRelease(totalSize >= size); 240 if (totalSize > static_cast<uintptr_t>(fEnd - fCursor)) { 241 this->ensureSpace(size, alignment); 242 alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask; 243 } 244 245 char* object = fCursor + alignedOffset; 246 247 SkASSERT((reinterpret_cast<uintptr_t>(object) & (alignment - 1)) == 0); 248 SkASSERT(object + size <= fEnd); 249 250 return object; 251 } 252 253 char* allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment); 254 255 template <typename T> allocUninitializedArray(size_t countZ)256 T* allocUninitializedArray(size_t countZ) { 257 AssertRelease(SkTFitsIn<uint32_t>(countZ)); 258 uint32_t count = SkToU32(countZ); 259 260 char* objStart; 261 AssertRelease(count <= std::numeric_limits<uint32_t>::max() / sizeof(T)); 262 uint32_t arraySize = SkToU32(count * sizeof(T)); 263 uint32_t alignment = SkToU32(alignof(T)); 264 265 if (std::is_trivially_destructible<T>::value) { 266 objStart = this->allocObject(arraySize, alignment); 267 fCursor = objStart + arraySize; 268 sk_asan_unpoison_memory_region(objStart, arraySize); 269 } else { 270 constexpr uint32_t overhead = sizeof(Footer) + sizeof(uint32_t); 271 AssertRelease(arraySize <= std::numeric_limits<uint32_t>::max() - overhead); 272 uint32_t totalSize = arraySize + overhead; 273 objStart = this->allocObjectWithFooter(totalSize, alignment); 274 275 // Can never be UB because max value is alignof(T). 276 uint32_t padding = SkToU32(objStart - fCursor); 277 278 // Advance to end of array to install footer. 279 fCursor = objStart + arraySize; 280 sk_asan_unpoison_memory_region(objStart, arraySize); 281 this->installRaw(SkToU32(count)); 282 this->installFooter( 283 [](char* footerEnd) { 284 char* objEnd = footerEnd - (sizeof(Footer) + sizeof(uint32_t)); 285 uint32_t count; 286 memmove(&count, objEnd, sizeof(uint32_t)); 287 char* objStart = objEnd - count * sizeof(T); 288 T* array = (T*) objStart; 289 for (uint32_t i = 0; i < count; i++) { 290 array[i].~T(); 291 } 292 return objStart; 293 }, 294 padding); 295 } 296 297 return (T*)objStart; 298 } 299 300 char* fDtorCursor; 301 char* fCursor; 302 char* fEnd; 303 304 SkFibBlockSizes<std::numeric_limits<uint32_t>::max()> fFibonacciProgression; 305 }; 306 307 class SkArenaAllocWithReset : public SkArenaAlloc { 308 public: 309 SkArenaAllocWithReset(char* block, size_t blockSize, size_t firstHeapAllocation); 310 SkArenaAllocWithReset(size_t firstHeapAllocation)311 explicit SkArenaAllocWithReset(size_t firstHeapAllocation) 312 : SkArenaAllocWithReset(nullptr, 0, firstHeapAllocation) {} 313 314 // Destroy all allocated objects, free any heap allocations. 315 void reset(); 316 317 // Returns true if the alloc has never made any objects. 318 bool isEmpty(); 319 320 private: 321 char* const fFirstBlock; 322 const uint32_t fFirstSize; 323 const uint32_t fFirstHeapAllocationSize; 324 }; 325 326 // Helper for defining allocators with inline/reserved storage. 327 // For argument declarations, stick to the base type (SkArenaAlloc). 328 // Note: Inheriting from the storage first means the storage will outlive the 329 // SkArenaAlloc, letting ~SkArenaAlloc read it as it calls destructors. 330 // (This is mostly only relevant for strict tools like MSAN.) 331 template <size_t InlineStorageSize> 332 class SkSTArenaAlloc : private std::array<char, InlineStorageSize>, public SkArenaAlloc { 333 public: 334 explicit SkSTArenaAlloc(size_t firstHeapAllocation = InlineStorageSize) 335 : SkArenaAlloc{this->data(), this->size(), firstHeapAllocation} {} 336 ~SkSTArenaAlloc()337 ~SkSTArenaAlloc() { 338 // Be sure to unpoison the memory that is probably on the stack. 339 sk_asan_unpoison_memory_region(this->data(), this->size()); 340 } 341 }; 342 343 template <size_t InlineStorageSize> 344 class SkSTArenaAllocWithReset 345 : private std::array<char, InlineStorageSize>, public SkArenaAllocWithReset { 346 public: 347 explicit SkSTArenaAllocWithReset(size_t firstHeapAllocation = InlineStorageSize) 348 : SkArenaAllocWithReset{this->data(), this->size(), firstHeapAllocation} {} 349 ~SkSTArenaAllocWithReset()350 ~SkSTArenaAllocWithReset() { 351 // Be sure to unpoison the memory that is probably on the stack. 352 sk_asan_unpoison_memory_region(this->data(), this->size()); 353 } 354 }; 355 356 #endif // SkArenaAlloc_DEFINED 357