• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkBlockAllocator_DEFINED
9 #define SkBlockAllocator_DEFINED
10 
11 #include "include/core/SkMath.h"
12 #include "include/core/SkTypes.h"
13 #include "include/private/SkMacros.h"
14 #include "include/private/SkNoncopyable.h"
15 #include "include/private/SkTo.h"
16 #include "src/core/SkASAN.h"
17 
18 #include <memory>  // std::unique_ptr
19 #include <cstddef> // max_align_t
20 #include <limits> // numeric_limits
21 #include <algorithm> // max
22 
23 /**
24  * SkBlockAllocator provides low-level support for a block allocated arena with a dynamic tail that
25  * tracks space reservations within each block. Its APIs provide the ability to reserve space,
26  * resize reservations, and release reservations. It will automatically create new blocks if needed
27  * and destroy all remaining blocks when it is destructed. It assumes that anything allocated within
28  * its blocks has its destructors called externally. It is recommended that SkBlockAllocator is
29  * wrapped by a higher-level allocator that uses the low-level APIs to implement a simpler,
30  * purpose-focused API w/o having to worry as much about byte-level concerns.
31  *
32  * SkBlockAllocator has no limit to its total size, but each allocation is limited to 512MB (which
33  * should be sufficient for Skia's use cases). This upper allocation limit allows all internal
34  * operations to be performed using 'int' and avoid many overflow checks. Static asserts are used
35  * to ensure that those operations would not overflow when using the largest possible values.
36  *
37  * Possible use modes:
38  * 1. No upfront allocation, either on the stack or as a field
39  *    SkBlockAllocator allocator(policy, heapAllocSize);
40  *
41  * 2. In-place new'd
42  *    void* mem = operator new(totalSize);
43  *    SkBlockAllocator* allocator = new (mem) SkBlockAllocator(policy, heapAllocSize,
44  *                                                             totalSize- sizeof(SkBlockAllocator));
45  *    delete allocator;
46  *
47  * 3. Use SkSBlockAllocator to increase the preallocation size
48  *    SkSBlockAllocator<1024> allocator(policy, heapAllocSize);
49  *    sizeof(allocator) == 1024;
50  */
51 // TODO(michaelludwig) - While API is different, this shares similarities to SkArenaAlloc and
52 // SkFibBlockSizes, so we should work to integrate them.
53 class SkBlockAllocator final : SkNoncopyable {
54 public:
55     // Largest size that can be requested from allocate(), chosen because it's the largest pow-2
56     // that is less than int32_t::max()/2.
57     inline static constexpr int kMaxAllocationSize = 1 << 29;
58 
59     enum class GrowthPolicy : int {
60         kFixed,       // Next block size = N
61         kLinear,      //   = #blocks * N
62         kFibonacci,   //   = fibonacci(#blocks) * N
63         kExponential, //   = 2^#blocks * N
64         kLast = kExponential
65     };
66     inline static constexpr int kGrowthPolicyCount = static_cast<int>(GrowthPolicy::kLast) + 1;
67 
68     class Block;
69 
70     // Tuple representing a range of bytes, marking the unaligned start, the first aligned point
71     // after any padding, and the upper limit depending on requested size.
72     struct ByteRange {
73         Block* fBlock;         // Owning block
74         int    fStart;         // Inclusive byte lower limit of byte range
75         int    fAlignedOffset; // >= start, matching alignment requirement (i.e. first real byte)
76         int    fEnd;           // Exclusive upper limit of byte range
77     };
78 
79     class Block final {
80     public:
81         ~Block();
delete(void * p)82         void operator delete(void* p) { ::operator delete(p); }
83 
84         // Return the maximum allocation size with the given alignment that can fit in this block.
85         template <size_t Align = 1, size_t Padding = 0>
avail()86         int avail() const { return std::max(0, fSize - this->cursor<Align, Padding>()); }
87 
88         // Return the aligned offset of the first allocation, assuming it was made with the
89         // specified Align, and Padding. The returned offset does not mean a valid allocation
90         // starts at that offset, this is a utility function for classes built on top to manage
91         // indexing into a block effectively.
92         template <size_t Align = 1, size_t Padding = 0>
firstAlignedOffset()93         int firstAlignedOffset() const { return this->alignedOffset<Align, Padding>(kDataStart); }
94 
95         // Convert an offset into this block's storage into a usable pointer.
ptr(int offset)96         void* ptr(int offset) {
97             SkASSERT(offset >= kDataStart && offset < fSize);
98             return reinterpret_cast<char*>(this) + offset;
99         }
ptr(int offset)100         const void* ptr(int offset) const { return const_cast<Block*>(this)->ptr(offset); }
101 
102         // Every block has an extra 'int' for clients to use however they want. It will start
103         // at 0 when a new block is made, or when the head block is reset.
metadata()104         int metadata() const { return fMetadata; }
setMetadata(int value)105         void setMetadata(int value) { fMetadata = value; }
106 
107         /**
108          * Release the byte range between offset 'start' (inclusive) and 'end' (exclusive). This
109          * will return true if those bytes were successfully reclaimed, i.e. a subsequent allocation
110          * request could occupy the space. Regardless of return value, the provided byte range that
111          * [start, end) represents should not be used until it's re-allocated with allocate<...>().
112          */
113         inline bool release(int start, int end);
114 
115         /**
116          * Resize a previously reserved byte range of offset 'start' (inclusive) to 'end'
117          * (exclusive). 'deltaBytes' is the SIGNED change to length of the reservation.
118          *
119          * When negative this means the reservation is shrunk and the new length is (end - start -
120          * |deltaBytes|). If this new length would be 0, the byte range can no longer be used (as if
121          * it were released instead). Asserts that it would not shrink the reservation below 0.
122          *
123          * If 'deltaBytes' is positive, the allocator attempts to increase the length of the
124          * reservation. If 'deltaBytes' is less than or equal to avail() and it was the last
125          * allocation in the block, it can be resized. If there is not enough available bytes to
126          * accommodate the increase in size, or another allocation is blocking the increase in size,
127          * then false will be returned and the reserved byte range is unmodified.
128          */
129         inline bool resize(int start, int end, int deltaBytes);
130 
131     private:
132         friend class SkBlockAllocator;
133 
134         Block(Block* prev, int allocationSize);
135 
136         // We poison the unallocated space in a Block to allow ASAN to catch invalid writes.
poisonRange(int start,int end)137         void poisonRange(int start, int end) {
138             sk_asan_poison_memory_region(reinterpret_cast<char*>(this) + start, end - start);
139         }
unpoisonRange(int start,int end)140         void unpoisonRange(int start, int end) {
141             sk_asan_unpoison_memory_region(reinterpret_cast<char*>(this) + start, end - start);
142         }
143 
144         // Get fCursor, but aligned such that ptr(rval) satisfies Align.
145         template <size_t Align, size_t Padding>
cursor()146         int cursor() const { return this->alignedOffset<Align, Padding>(fCursor); }
147 
148         template <size_t Align, size_t Padding>
149         int alignedOffset(int offset) const;
150 
isScratch()151         bool isScratch() const { return fCursor < 0; }
markAsScratch()152         void markAsScratch() {
153             fCursor = -1;
154             this->poisonRange(kDataStart, fSize);
155         }
156 
157         SkDEBUGCODE(int fSentinel;) // known value to check for bad back pointers to blocks
158 
159         Block*          fNext;      // doubly-linked list of blocks
160         Block*          fPrev;
161 
162         // Each block tracks its own cursor because as later blocks are released, an older block
163         // may become the active tail again.
164         int             fSize;      // includes the size of the BlockHeader and requested metadata
165         int             fCursor;    // (this + fCursor) points to next available allocation
166         int             fMetadata;
167 
168         // On release builds, a Block's other 2 pointers and 3 int fields leaves 4 bytes of padding
169         // for 8 and 16 aligned systems. Currently this is only manipulated in the head block for
170         // an allocator-level metadata and is explicitly not reset when the head block is "released"
171         // Down the road we could instead choose to offer multiple metadata slots per block.
172         int             fAllocatorMetadata;
173     };
174 
175     // The size of the head block is determined by 'additionalPreallocBytes'. Subsequent heap blocks
176     // are determined by 'policy' and 'blockIncrementBytes', although 'blockIncrementBytes' will be
177     // aligned to std::max_align_t.
178     //
179     // When 'additionalPreallocBytes' > 0, the allocator assumes that many extra bytes immediately
180     // after the allocator can be used by its inline head block. This is useful when the allocator
181     // is in-place new'ed into a larger block of memory, but it should remain set to 0 if stack
182     // allocated or if the class layout does not guarantee that space is present.
183     SkBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
184                      size_t additionalPreallocBytes = 0);
185 
~SkBlockAllocator()186     ~SkBlockAllocator() { this->reset(); }
delete(void * p)187     void operator delete(void* p) { ::operator delete(p); }
188 
189     /**
190      * Helper to calculate the minimum number of bytes needed for heap block size, under the
191      * assumption that Align will be the requested alignment of the first call to allocate().
192      * Ex. To store N instances of T in a heap block, the 'blockIncrementBytes' should be set to
193      *   BlockOverhead<alignof(T)>() + N * sizeof(T) when making the SkBlockAllocator.
194      */
195     template<size_t Align = 1, size_t Padding = 0>
196     static constexpr size_t BlockOverhead();
197 
198     /**
199      * Helper to calculate the minimum number of bytes needed for a preallocation, under the
200      * assumption that Align will be the requested alignment of the first call to allocate().
201      * Ex. To preallocate a SkSBlockAllocator to hold N instances of T, its arge should be
202      *   Overhead<alignof(T)>() + N * sizeof(T)
203      */
204     template<size_t Align = 1, size_t Padding = 0>
205     static constexpr size_t Overhead();
206 
207     /**
208      * Return the total number of bytes of the allocator, including its instance overhead, per-block
209      * overhead and space used for allocations.
210      */
211     size_t totalSize() const;
212     /**
213      * Return the total number of bytes usable for allocations. This includes bytes that have
214      * been reserved already by a call to allocate() and bytes that are still available. It is
215      * totalSize() minus all allocator and block-level overhead.
216      */
217     size_t totalUsableSpace() const;
218     /**
219      * Return the total number of usable bytes that have been reserved by allocations. This will
220      * be less than or equal to totalUsableSpace().
221      */
222     size_t totalSpaceInUse() const;
223 
224     /**
225      * Return the total number of bytes that were pre-allocated for the SkBlockAllocator. This will
226      * include 'additionalPreallocBytes' passed to the constructor, and represents what the total
227      * size would become after a call to reset().
228      */
preallocSize()229     size_t preallocSize() const {
230         // Don't double count fHead's Block overhead in both sizeof(SkBlockAllocator) and fSize.
231         return sizeof(SkBlockAllocator) + fHead.fSize - BaseHeadBlockSize();
232     }
233     /**
234      * Return the usable size of the inline head block; this will be equal to
235      * 'additionalPreallocBytes' plus any alignment padding that the system had to add to Block.
236      * The returned value represents what could be allocated before a heap block is be created.
237      */
preallocUsableSpace()238     size_t preallocUsableSpace() const {
239         return fHead.fSize - kDataStart;
240     }
241 
242     /**
243      * Get the current value of the allocator-level metadata (a user-oriented slot). This is
244      * separate from any block-level metadata, but can serve a similar purpose to compactly support
245      * data collections on top of SkBlockAllocator.
246      */
metadata()247     int metadata() const { return fHead.fAllocatorMetadata; }
248 
249     /**
250      * Set the current value of the allocator-level metadata.
251      */
setMetadata(int value)252     void setMetadata(int value) { fHead.fAllocatorMetadata = value; }
253 
254     /**
255      * Reserve space that will hold 'size' bytes. This will automatically allocate a new block if
256      * there is not enough available space in the current block to provide 'size' bytes. The
257      * returned ByteRange tuple specifies the Block owning the reserved memory, the full byte range,
258      * and the aligned offset within that range to use for the user-facing pointer. The following
259      * invariants hold:
260      *
261      *  1. block->ptr(alignedOffset) is aligned to Align
262      *  2. end - alignedOffset == size
263      *  3. Padding <= alignedOffset - start <= Padding + Align - 1
264      *
265      * Invariant #3, when Padding > 0, allows intermediate allocators to embed metadata along with
266      * the allocations. If the Padding bytes are used for some 'struct Meta', then
267      * ptr(alignedOffset - sizeof(Meta)) can be safely used as a Meta* if Meta's alignment
268      * requirements are less than or equal to the alignment specified in allocate<>. This can be
269      * easily guaranteed by using the pattern:
270      *
271      *    allocate<max(UserAlign, alignof(Meta)), sizeof(Meta)>(userSize);
272      *
273      * This ensures that ptr(alignedOffset) will always satisfy UserAlign and
274      * ptr(alignedOffset - sizeof(Meta)) will always satisfy alignof(Meta).  Alternatively, memcpy
275      * can be used to read and write values between start and alignedOffset without worrying about
276      * alignment requirements of the metadata.
277      *
278      * For over-aligned allocations, the alignedOffset (as an int) may not be a multiple of Align,
279      * but the result of ptr(alignedOffset) will be a multiple of Align.
280      */
281     template <size_t Align, size_t Padding = 0>
282     ByteRange allocate(size_t size);
283 
284     enum ReserveFlags : unsigned {
285         // If provided to reserve(), the input 'size' will be rounded up to the next size determined
286         // by the growth policy of the SkBlockAllocator. If not, 'size' will be aligned to max_align
287         kIgnoreGrowthPolicy_Flag  = 0b01,
288         // If provided to reserve(), the number of available bytes of the current block  will not
289         // be used to satisfy the reservation (assuming the contiguous range was long enough to
290         // begin with).
291         kIgnoreExistingBytes_Flag = 0b10,
292 
293         kNo_ReserveFlags          = 0b00
294     };
295 
296     /**
297      * Ensure the block allocator has 'size' contiguous available bytes. After calling this
298      * function, currentBlock()->avail<Align, Padding>() may still report less than 'size' if the
299      * reserved space was added as a scratch block. This is done so that anything remaining in
300      * the current block can still be used if a smaller-than-size allocation is requested. If 'size'
301      * is requested by a subsequent allocation, the scratch block will automatically be activated
302      * and the request will not itself trigger any malloc.
303      *
304      * The optional 'flags' controls how the input size is allocated; by default it will attempt
305      * to use available contiguous bytes in the current block and will respect the growth policy
306      * of the allocator.
307      */
308     template <size_t Align = 1, size_t Padding = 0>
309     void reserve(size_t size, ReserveFlags flags = kNo_ReserveFlags);
310 
311     /**
312      * Return a pointer to the start of the current block. This will never be null.
313      */
currentBlock()314     const Block* currentBlock() const { return fTail; }
currentBlock()315     Block* currentBlock() { return fTail; }
316 
headBlock()317     const Block* headBlock() const { return &fHead; }
headBlock()318     Block* headBlock() { return &fHead; }
319 
320     /**
321      * Return the block that owns the allocated 'ptr'. Assuming that earlier, an allocation was
322      * returned as {b, start, alignedOffset, end}, and 'p = b->ptr(alignedOffset)', then a call
323      * to 'owningBlock<Align, Padding>(p, start) == b'.
324      *
325      * If calling code has already made a pointer to their metadata, i.e. 'm = p - Padding', then
326      * 'owningBlock<Align, 0>(m, start)' will also return b, allowing you to recover the block from
327      * the metadata pointer.
328      *
329      * If calling code has access to the original alignedOffset, this function should not be used
330      * since the owning block is just 'p - alignedOffset', regardless of original Align or Padding.
331      */
332     template <size_t Align, size_t Padding = 0>
333     Block* owningBlock(const void* ptr, int start);
334 
335     template <size_t Align, size_t Padding = 0>
owningBlock(const void * ptr,int start)336     const Block* owningBlock(const void* ptr, int start) const {
337         return const_cast<SkBlockAllocator*>(this)->owningBlock<Align, Padding>(ptr, start);
338     }
339 
340     /**
341      * Find the owning block of the allocated pointer, 'p'. Without any additional information this
342      * is O(N) on the number of allocated blocks.
343      */
344     Block* findOwningBlock(const void* ptr);
findOwningBlock(const void * ptr)345     const Block* findOwningBlock(const void* ptr) const {
346         return const_cast<SkBlockAllocator*>(this)->findOwningBlock(ptr);
347     }
348 
349     /**
350      * Explicitly free an entire block, invalidating any remaining allocations from the block.
351      * SkBlockAllocator will release all alive blocks automatically when it is destroyed, but this
352      * function can be used to reclaim memory over the lifetime of the allocator. The provided
353      * 'block' pointer must have previously come from a call to currentBlock() or allocate().
354      *
355      * If 'block' represents the inline-allocated head block, its cursor and metadata are instead
356      * reset to their defaults.
357      *
358      * If the block is not the head block, it may be kept as a scratch block to be reused for
359      * subsequent allocation requests, instead of making an entirely new block. A scratch block is
360      * not visible when iterating over blocks but is reported in the total size of the allocator.
361      */
362     void releaseBlock(Block* block);
363 
364     /**
365      * Detach every heap-allocated block owned by 'other' and concatenate them to this allocator's
366      * list of blocks. This memory is now managed by this allocator. Since this only transfers
367      * ownership of a Block, and a Block itself does not move, any previous allocations remain
368      * valid and associated with their original Block instances. SkBlockAllocator-level functions
369      * that accept allocated pointers (e.g. findOwningBlock), must now use this allocator and not
370      * 'other' for these allocations.
371      *
372      * The head block of 'other' cannot be stolen, so higher-level allocators and memory structures
373      * must handle that data differently.
374      */
375     void stealHeapBlocks(SkBlockAllocator* other);
376 
377     /**
378      * Explicitly free all blocks (invalidating all allocations), and resets the head block to its
379      * default state. The allocator-level metadata is reset to 0 as well.
380      */
381     void reset();
382 
383     /**
384      * Remove any reserved scratch space, either from calling reserve() or releaseBlock().
385      */
386     void resetScratchSpace();
387 
388     template <bool Forward, bool Const> class BlockIter;
389 
390     /**
391      * Clients can iterate over all active Blocks in the SkBlockAllocator using for loops:
392      *
393      * Forward iteration from head to tail block (or non-const variant):
394      *   for (const Block* b : this->blocks()) { }
395      * Reverse iteration from tail to head block:
396      *   for (const Block* b : this->rblocks()) { }
397      *
398      * It is safe to call releaseBlock() on the active block while looping.
399      */
400     inline BlockIter<true, false> blocks();
401     inline BlockIter<true, true> blocks() const;
402     inline BlockIter<false, false> rblocks();
403     inline BlockIter<false, true> rblocks() const;
404 
405 #ifdef SK_DEBUG
406     inline static constexpr int kAssignedMarker = 0xBEEFFACE;
407     inline static constexpr int kFreedMarker    = 0xCAFEBABE;
408 
409     void validate() const;
410 #endif
411 
412 private:
413     friend class BlockAllocatorTestAccess;
414     friend class TBlockListTestAccess;
415 
416     inline static constexpr int kDataStart = sizeof(Block);
417     #ifdef SK_FORCE_8_BYTE_ALIGNMENT
418         // This is an issue for WASM builds using emscripten, which had std::max_align_t = 16, but
419         // was returning pointers only aligned to 8 bytes.
420         // https://github.com/emscripten-core/emscripten/issues/10072
421         //
422         // Setting this to 8 will let SkBlockAllocator properly correct for the pointer address if
423         // a 16-byte aligned allocation is requested in wasm (unlikely since we don't use long
424         // doubles).
425         inline static constexpr size_t kAddressAlign = 8;
426     #else
427         // The alignment Block addresses will be at when created using operator new
428         // (spec-compliant is pointers are aligned to max_align_t).
429         inline static constexpr size_t kAddressAlign = alignof(std::max_align_t);
430     #endif
431 
432     // Calculates the size of a new Block required to store a kMaxAllocationSize request for the
433     // given alignment and padding bytes. Also represents maximum valid fCursor value in a Block.
434     template<size_t Align, size_t Padding>
435     static constexpr size_t MaxBlockSize();
436 
BaseHeadBlockSize()437     static constexpr int BaseHeadBlockSize() {
438         return sizeof(SkBlockAllocator) - offsetof(SkBlockAllocator, fHead);
439     }
440 
441     // Append a new block to the end of the block linked list, updating fTail. 'minSize' must
442     // have enough room for sizeof(Block). 'maxSize' is the upper limit of fSize for the new block
443     // that will preserve the static guarantees SkBlockAllocator makes.
444     void addBlock(int minSize, int maxSize);
445 
scratchBlockSize()446     int scratchBlockSize() const { return fHead.fPrev ? fHead.fPrev->fSize : 0; }
447 
448     Block* fTail; // All non-head blocks are heap allocated; tail will never be null.
449 
450     // All remaining state is packed into 64 bits to keep SkBlockAllocator at 16 bytes + head block
451     // (on a 64-bit system).
452 
453     // Growth of the block size is controlled by four factors: BlockIncrement, N0 and N1, and a
454     // policy defining how N0 is updated. When a new block is needed, we calculate N1' = N0 + N1.
455     // Depending on the policy, N0' = N0 (no growth or linear growth), or N0' = N1 (Fibonacci), or
456     // N0' = N1' (exponential). The size of the new block is N1' * BlockIncrement * MaxAlign,
457     // after which fN0 and fN1 store N0' and N1' clamped into 23 bits. With current bit allocations,
458     // N1' is limited to 2^24, and assuming MaxAlign=16, then BlockIncrement must be '2' in order to
459     // eventually reach the hard 2^29 size limit of SkBlockAllocator.
460 
461     // Next heap block size = (fBlockIncrement * alignof(std::max_align_t) * (fN0 + fN1))
462     uint64_t fBlockIncrement : 16;
463     uint64_t fGrowthPolicy   : 2;  // GrowthPolicy
464     uint64_t fN0             : 23; // = 1 for linear/exp.; = 0 for fixed/fibonacci, initially
465     uint64_t fN1             : 23; // = 1 initially
466 
467     // Inline head block, must be at the end so that it can utilize any additional reserved space
468     // from the initial allocation.
469     // The head block's prev pointer may be non-null, which signifies a scratch block that may be
470     // reused instead of allocating an entirely new block (this helps when allocate+release calls
471     // bounce back and forth across the capacity of a block).
472     alignas(kAddressAlign) Block fHead;
473 
474     static_assert(kGrowthPolicyCount <= 4);
475 };
476 
477 // A wrapper around SkBlockAllocator that includes preallocated storage for the head block.
478 // N will be the preallocSize() reported by the allocator.
479 template<size_t N>
480 class SkSBlockAllocator : SkNoncopyable {
481 public:
482     using GrowthPolicy = SkBlockAllocator::GrowthPolicy;
483 
SkSBlockAllocator()484     SkSBlockAllocator() {
485         new (fStorage) SkBlockAllocator(GrowthPolicy::kFixed, N, N - sizeof(SkBlockAllocator));
486     }
SkSBlockAllocator(GrowthPolicy policy)487     explicit SkSBlockAllocator(GrowthPolicy policy) {
488         new (fStorage) SkBlockAllocator(policy, N, N - sizeof(SkBlockAllocator));
489     }
490 
SkSBlockAllocator(GrowthPolicy policy,size_t blockIncrementBytes)491     SkSBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes) {
492         new (fStorage) SkBlockAllocator(policy, blockIncrementBytes, N - sizeof(SkBlockAllocator));
493     }
494 
~SkSBlockAllocator()495     ~SkSBlockAllocator() {
496         this->allocator()->~SkBlockAllocator();
497     }
498 
499     SkBlockAllocator* operator->() { return this->allocator(); }
500     const SkBlockAllocator* operator->() const { return this->allocator(); }
501 
allocator()502     SkBlockAllocator* allocator() { return reinterpret_cast<SkBlockAllocator*>(fStorage); }
allocator()503     const SkBlockAllocator* allocator() const {
504         return reinterpret_cast<const SkBlockAllocator*>(fStorage);
505     }
506 
507 private:
508     static_assert(N >= sizeof(SkBlockAllocator));
509 
510     // Will be used to placement new the allocator
511     alignas(SkBlockAllocator) char fStorage[N];
512 };
513 
514 ///////////////////////////////////////////////////////////////////////////////////////////////////
515 // Template and inline implementations
516 
SK_MAKE_BITFIELD_OPS(SkBlockAllocator::ReserveFlags)517 SK_MAKE_BITFIELD_OPS(SkBlockAllocator::ReserveFlags)
518 
519 template<size_t Align, size_t Padding>
520 constexpr size_t SkBlockAllocator::BlockOverhead() {
521     static_assert(SkAlignTo(kDataStart + Padding, Align) >= sizeof(Block));
522     return SkAlignTo(kDataStart + Padding, Align);
523 }
524 
525 template<size_t Align, size_t Padding>
Overhead()526 constexpr size_t SkBlockAllocator::Overhead() {
527     // NOTE: On most platforms, SkBlockAllocator is packed; this is not the case on debug builds
528     // due to extra fields, or on WASM due to 4byte pointers but 16byte max align.
529     return std::max(sizeof(SkBlockAllocator),
530                     offsetof(SkBlockAllocator, fHead) + BlockOverhead<Align, Padding>());
531 }
532 
533 template<size_t Align, size_t Padding>
MaxBlockSize()534 constexpr size_t SkBlockAllocator::MaxBlockSize() {
535     // Without loss of generality, assumes 'align' will be the largest encountered alignment for the
536     // allocator (if it's not, the largest align will be encountered by the compiler and pass/fail
537     // the same set of static asserts).
538     return BlockOverhead<Align, Padding>() + kMaxAllocationSize;
539 }
540 
541 template<size_t Align, size_t Padding>
reserve(size_t size,ReserveFlags flags)542 void SkBlockAllocator::reserve(size_t size, ReserveFlags flags) {
543     if (size > kMaxAllocationSize) {
544         SK_ABORT("Allocation too large (%zu bytes requested)", size);
545     }
546     int iSize = (int) size;
547     if ((flags & kIgnoreExistingBytes_Flag) ||
548         this->currentBlock()->avail<Align, Padding>() < iSize) {
549 
550         int blockSize = BlockOverhead<Align, Padding>() + iSize;
551         int maxSize = (flags & kIgnoreGrowthPolicy_Flag) ? blockSize
552                                                          : MaxBlockSize<Align, Padding>();
553         SkASSERT((size_t) maxSize <= (MaxBlockSize<Align, Padding>()));
554 
555         SkDEBUGCODE(auto oldTail = fTail;)
556         this->addBlock(blockSize, maxSize);
557         SkASSERT(fTail != oldTail);
558         // Releasing the just added block will move it into scratch space, allowing the original
559         // tail's bytes to be used first before the scratch block is activated.
560         this->releaseBlock(fTail);
561     }
562 }
563 
564 template <size_t Align, size_t Padding>
allocate(size_t size)565 SkBlockAllocator::ByteRange SkBlockAllocator::allocate(size_t size) {
566     // Amount of extra space for a new block to make sure the allocation can succeed.
567     static constexpr int kBlockOverhead = (int) BlockOverhead<Align, Padding>();
568 
569     // Ensures 'offset' and 'end' calculations will be valid
570     static_assert((kMaxAllocationSize + SkAlignTo(MaxBlockSize<Align, Padding>(), Align))
571                         <= (size_t) std::numeric_limits<int32_t>::max());
572     // Ensures size + blockOverhead + addBlock's alignment operations will be valid
573     static_assert(kMaxAllocationSize + kBlockOverhead + ((1 << 12) - 1) // 4K align for large blocks
574                         <= std::numeric_limits<int32_t>::max());
575 
576     if (size > kMaxAllocationSize) {
577         SK_ABORT("Allocation too large (%zu bytes requested)", size);
578     }
579 
580     int iSize = (int) size;
581     int offset = fTail->cursor<Align, Padding>();
582     int end = offset + iSize;
583     if (end > fTail->fSize) {
584         this->addBlock(iSize + kBlockOverhead, MaxBlockSize<Align, Padding>());
585         offset = fTail->cursor<Align, Padding>();
586         end = offset + iSize;
587     }
588 
589     // Check invariants
590     SkASSERT(end <= fTail->fSize);
591     SkASSERT(end - offset == iSize);
592     SkASSERT(offset - fTail->fCursor >= (int) Padding &&
593              offset - fTail->fCursor <= (int) (Padding + Align - 1));
594     SkASSERT(reinterpret_cast<uintptr_t>(fTail->ptr(offset)) % Align == 0);
595 
596     int start = fTail->fCursor;
597     fTail->fCursor = end;
598 
599     fTail->unpoisonRange(offset - Padding, end);
600 
601     return {fTail, start, offset, end};
602 }
603 
604 template <size_t Align, size_t Padding>
owningBlock(const void * p,int start)605 SkBlockAllocator::Block* SkBlockAllocator::owningBlock(const void* p, int start) {
606     // 'p' was originally formed by aligning 'block + start + Padding', producing the inequality:
607     //     block + start + Padding <= p <= block + start + Padding + Align-1
608     // Rearranging this yields:
609     //     block <= p - start - Padding <= block + Align-1
610     // Masking these terms by ~(Align-1) reconstructs 'block' if the alignment of the block is
611     // greater than or equal to Align (since block & ~(Align-1) == (block + Align-1) & ~(Align-1)
612     // in that case). Overalignment does not reduce to inequality unfortunately.
613     if /* constexpr */ (Align <= kAddressAlign) {
614         Block* block = reinterpret_cast<Block*>(
615                 (reinterpret_cast<uintptr_t>(p) - start - Padding) & ~(Align - 1));
616         SkASSERT(block->fSentinel == kAssignedMarker);
617         return block;
618     } else {
619         // There's not a constant-time expression available to reconstruct the block from 'p',
620         // but this is unlikely to happen frequently.
621         return this->findOwningBlock(p);
622     }
623 }
624 
625 template <size_t Align, size_t Padding>
alignedOffset(int offset)626 int SkBlockAllocator::Block::alignedOffset(int offset) const {
627     static_assert(SkIsPow2(Align));
628     // Aligning adds (Padding + Align - 1) as an intermediate step, so ensure that can't overflow
629     static_assert(MaxBlockSize<Align, Padding>() + Padding + Align - 1
630                         <= (size_t) std::numeric_limits<int32_t>::max());
631 
632     if /* constexpr */ (Align <= kAddressAlign) {
633         // Same as SkAlignTo, but operates on ints instead of size_t
634         return (offset + Padding + Align - 1) & ~(Align - 1);
635     } else {
636         // Must take into account that 'this' may be starting at a pointer that doesn't satisfy the
637         // larger alignment request, so must align the entire pointer, not just offset
638         uintptr_t blockPtr = reinterpret_cast<uintptr_t>(this);
639         uintptr_t alignedPtr = (blockPtr + offset + Padding + Align - 1) & ~(Align - 1);
640         SkASSERT(alignedPtr - blockPtr <= (uintptr_t) std::numeric_limits<int32_t>::max());
641         return (int) (alignedPtr - blockPtr);
642     }
643 }
644 
resize(int start,int end,int deltaBytes)645 bool SkBlockAllocator::Block::resize(int start, int end, int deltaBytes) {
646     SkASSERT(fSentinel == kAssignedMarker);
647     SkASSERT(start >= kDataStart && end <= fSize && start < end);
648 
649     if (deltaBytes > kMaxAllocationSize || deltaBytes < -kMaxAllocationSize) {
650         // Cannot possibly satisfy the resize and could overflow subsequent math
651         return false;
652     }
653     if (fCursor == end) {
654         int nextCursor = end + deltaBytes;
655         SkASSERT(nextCursor >= start);
656         // We still check nextCursor >= start for release builds that wouldn't assert.
657         if (nextCursor <= fSize && nextCursor >= start) {
658             if (nextCursor < fCursor) {
659                 // The allocation got smaller; poison the space that can no longer be used.
660                 this->poisonRange(nextCursor + 1, end);
661             } else {
662                 // The allocation got larger; unpoison the space that can now be used.
663                 this->unpoisonRange(end, nextCursor);
664             }
665 
666             fCursor = nextCursor;
667             return true;
668         }
669     }
670     return false;
671 }
672 
673 // NOTE: release is equivalent to resize(start, end, start - end), and the compiler can optimize
674 // most of the operations away, but it wasn't able to remove the unnecessary branch comparing the
675 // new cursor to the block size or old start, so release() gets a specialization.
release(int start,int end)676 bool SkBlockAllocator::Block::release(int start, int end) {
677     SkASSERT(fSentinel == kAssignedMarker);
678     SkASSERT(start >= kDataStart && end <= fSize && start < end);
679 
680     this->poisonRange(start, end);
681 
682     if (fCursor == end) {
683         fCursor = start;
684         return true;
685     } else {
686         return false;
687     }
688 }
689 
690 ///////// Block iteration
691 template <bool Forward, bool Const>
692 class SkBlockAllocator::BlockIter {
693 private:
694     using BlockT = typename std::conditional<Const, const Block, Block>::type;
695     using AllocatorT =
696             typename std::conditional<Const, const SkBlockAllocator, SkBlockAllocator>::type;
697 
698 public:
BlockIter(AllocatorT * allocator)699     BlockIter(AllocatorT* allocator) : fAllocator(allocator) {}
700 
701     class Item {
702     public:
703         bool operator!=(const Item& other) const { return fBlock != other.fBlock; }
704 
705         BlockT* operator*() const { return fBlock; }
706 
707         Item& operator++() {
708             this->advance(fNext);
709             return *this;
710         }
711 
712     private:
713         friend BlockIter;
714 
Item(BlockT * block)715         Item(BlockT* block) { this->advance(block); }
716 
advance(BlockT * block)717         void advance(BlockT* block) {
718             fBlock = block;
719             fNext = block ? (Forward ? block->fNext : block->fPrev) : nullptr;
720             if (!Forward && fNext && fNext->isScratch()) {
721                 // For reverse-iteration only, we need to stop at the head, not the scratch block
722                 // possibly stashed in head->prev.
723                 fNext = nullptr;
724             }
725             SkASSERT(!fNext || !fNext->isScratch());
726         }
727 
728         BlockT* fBlock;
729         // Cache this before operator++ so that fBlock can be released during iteration
730         BlockT* fNext;
731     };
732 
begin()733     Item begin() const { return Item(Forward ? &fAllocator->fHead : fAllocator->fTail); }
end()734     Item end() const { return Item(nullptr); }
735 
736 private:
737     AllocatorT* fAllocator;
738 };
739 
blocks()740 SkBlockAllocator::BlockIter<true, false> SkBlockAllocator::blocks() {
741     return BlockIter<true, false>(this);
742 }
blocks()743 SkBlockAllocator::BlockIter<true, true> SkBlockAllocator::blocks() const {
744     return BlockIter<true, true>(this);
745 }
rblocks()746 SkBlockAllocator::BlockIter<false, false> SkBlockAllocator::rblocks() {
747     return BlockIter<false, false>(this);
748 }
rblocks()749 SkBlockAllocator::BlockIter<false, true> SkBlockAllocator::rblocks() const {
750     return BlockIter<false, true>(this);
751 }
752 
753 #endif // SkBlockAllocator_DEFINED
754