1 /*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrBlockAllocator_DEFINED
9 #define GrBlockAllocator_DEFINED
10
11 #include "include/private/GrTypesPriv.h"
12 #include "include/private/SkNoncopyable.h"
13 #include "src/core/SkASAN.h"
14
15 #include <memory> // std::unique_ptr
16 #include <cstddef> // max_align_t
17
18 /**
19 * GrBlockAllocator provides low-level support for a block allocated arena with a dynamic tail that
20 * tracks space reservations within each block. Its APIs provide the ability to reserve space,
21 * resize reservations, and release reservations. It will automatically create new blocks if needed
22 * and destroy all remaining blocks when it is destructed. It assumes that anything allocated within
23 * its blocks has its destructors called externally. It is recommended that GrBlockAllocator is
24 * wrapped by a higher-level allocator that uses the low-level APIs to implement a simpler,
25 * purpose-focused API w/o having to worry as much about byte-level concerns.
26 *
27 * GrBlockAllocator has no limit to its total size, but each allocation is limited to 512MB (which
28 * should be sufficient for Ganesh's use cases). This upper allocation limit allows all internal
29 * operations to be performed using 'int' and avoid many overflow checks. Static asserts are used
30 * to ensure that those operations would not overflow when using the largest possible values.
31 *
32 * Possible use modes:
33 * 1. No upfront allocation, either on the stack or as a field
34 * GrBlockAllocator allocator(policy, heapAllocSize);
35 *
36 * 2. In-place new'd
37 * void* mem = operator new(totalSize);
38 * GrBlockAllocator* allocator = new (mem) GrBlockAllocator(policy, heapAllocSize,
39 * totalSize- sizeof(GrBlockAllocator));
40 * delete allocator;
41 *
42 * 3. Use GrSBlockAllocator to increase the preallocation size
43 * GrSBlockAllocator<1024> allocator(policy, heapAllocSize);
44 * sizeof(allocator) == 1024;
45 */
46 class GrBlockAllocator final : SkNoncopyable {
47 public:
48 // Largest size that can be requested from allocate(), chosen because it's the largest pow-2
49 // that is less than int32_t::max()/2.
50 static constexpr int kMaxAllocationSize = 1 << 29;
51
52 enum class GrowthPolicy : int {
53 kFixed, // Next block size = N
54 kLinear, // = #blocks * N
55 kFibonacci, // = fibonacci(#blocks) * N
56 kExponential, // = 2^#blocks * N
57 kLast = kExponential
58 };
59 static constexpr int kGrowthPolicyCount = static_cast<int>(GrowthPolicy::kLast) + 1;
60
61 class Block;
62
63 // Tuple representing a range of bytes, marking the unaligned start, the first aligned point
64 // after any padding, and the upper limit depending on requested size.
65 struct ByteRange {
66 Block* fBlock; // Owning block
67 int fStart; // Inclusive byte lower limit of byte range
68 int fAlignedOffset; // >= start, matching alignment requirement (i.e. first real byte)
69 int fEnd; // Exclusive upper limit of byte range
70 };
71
72 class Block final {
73 public:
74 ~Block();
delete(void * p)75 void operator delete(void* p) { ::operator delete(p); }
76
77 // Return the maximum allocation size with the given alignment that can fit in this block.
78 template <size_t Align = 1, size_t Padding = 0>
avail()79 int avail() const { return std::max(0, fSize - this->cursor<Align, Padding>()); }
80
81 // Return the aligned offset of the first allocation, assuming it was made with the
82 // specified Align, and Padding. The returned offset does not mean a valid allocation
83 // starts at that offset, this is a utility function for classes built on top to manage
84 // indexing into a block effectively.
85 template <size_t Align = 1, size_t Padding = 0>
firstAlignedOffset()86 int firstAlignedOffset() const { return this->alignedOffset<Align, Padding>(kDataStart); }
87
88 // Convert an offset into this block's storage into a usable pointer.
ptr(int offset)89 void* ptr(int offset) {
90 SkASSERT(offset >= kDataStart && offset < fSize);
91 return reinterpret_cast<char*>(this) + offset;
92 }
ptr(int offset)93 const void* ptr(int offset) const { return const_cast<Block*>(this)->ptr(offset); }
94
95 // Every block has an extra 'int' for clients to use however they want. It will start
96 // at 0 when a new block is made, or when the head block is reset.
metadata()97 int metadata() const { return fMetadata; }
setMetadata(int value)98 void setMetadata(int value) { fMetadata = value; }
99
100 /**
101 * Release the byte range between offset 'start' (inclusive) and 'end' (exclusive). This
102 * will return true if those bytes were successfully reclaimed, i.e. a subsequent allocation
103 * request could occupy the space. Regardless of return value, the provided byte range that
104 * [start, end) represents should not be used until it's re-allocated with allocate<...>().
105 */
106 inline bool release(int start, int end);
107
108 /**
109 * Resize a previously reserved byte range of offset 'start' (inclusive) to 'end'
110 * (exclusive). 'deltaBytes' is the SIGNED change to length of the reservation.
111 *
112 * When negative this means the reservation is shrunk and the new length is (end - start -
113 * |deltaBytes|). If this new length would be 0, the byte range can no longer be used (as if
114 * it were released instead). Asserts that it would not shrink the reservation below 0.
115 *
116 * If 'deltaBytes' is positive, the allocator attempts to increase the length of the
117 * reservation. If 'deltaBytes' is less than or equal to avail() and it was the last
118 * allocation in the block, it can be resized. If there is not enough available bytes to
119 * accommodate the increase in size, or another allocation is blocking the increase in size,
120 * then false will be returned and the reserved byte range is unmodified.
121 */
122 inline bool resize(int start, int end, int deltaBytes);
123
124 private:
125 friend class GrBlockAllocator;
126
127 Block(Block* prev, int allocationSize);
128
129 // We poison the unallocated space in a Block to allow ASAN to catch invalid writes.
poisonRange(int start,int end)130 void poisonRange(int start, int end) {
131 sk_asan_poison_memory_region(reinterpret_cast<char*>(this) + start, end - start);
132 }
unpoisonRange(int start,int end)133 void unpoisonRange(int start, int end) {
134 sk_asan_unpoison_memory_region(reinterpret_cast<char*>(this) + start, end - start);
135 }
136
137 // Get fCursor, but aligned such that ptr(rval) satisfies Align.
138 template <size_t Align, size_t Padding>
cursor()139 int cursor() const { return this->alignedOffset<Align, Padding>(fCursor); }
140
141 template <size_t Align, size_t Padding>
142 int alignedOffset(int offset) const;
143
isScratch()144 bool isScratch() const { return fCursor < 0; }
markAsScratch()145 void markAsScratch() {
146 fCursor = -1;
147 this->poisonRange(kDataStart, fSize);
148 }
149
150 SkDEBUGCODE(int fSentinel;) // known value to check for bad back pointers to blocks
151
152 Block* fNext; // doubly-linked list of blocks
153 Block* fPrev;
154
155 // Each block tracks its own cursor because as later blocks are released, an older block
156 // may become the active tail again.
157 int fSize; // includes the size of the BlockHeader and requested metadata
158 int fCursor; // (this + fCursor) points to next available allocation
159 int fMetadata;
160
161 // On release builds, a Block's other 2 pointers and 3 int fields leaves 4 bytes of padding
162 // for 8 and 16 aligned systems. Currently this is only manipulated in the head block for
163 // an allocator-level metadata and is explicitly not reset when the head block is "released"
164 // Down the road we could instead choose to offer multiple metadata slots per block.
165 int fAllocatorMetadata;
166 };
167
168 // The size of the head block is determined by 'additionalPreallocBytes'. Subsequent heap blocks
169 // are determined by 'policy' and 'blockIncrementBytes', although 'blockIncrementBytes' will be
170 // aligned to std::max_align_t.
171 //
172 // When 'additionalPreallocBytes' > 0, the allocator assumes that many extra bytes immediately
173 // after the allocator can be used by its inline head block. This is useful when the allocator
174 // is in-place new'ed into a larger block of memory, but it should remain set to 0 if stack
175 // allocated or if the class layout does not guarantee that space is present.
176 GrBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
177 size_t additionalPreallocBytes = 0);
178
~GrBlockAllocator()179 ~GrBlockAllocator() { this->reset(); }
delete(void * p)180 void operator delete(void* p) { ::operator delete(p); }
181
182 /**
183 * Helper to calculate the minimum number of bytes needed for heap block size, under the
184 * assumption that Align will be the requested alignment of the first call to allocate().
185 * Ex. To store N instances of T in a heap block, the 'blockIncrementBytes' should be set to
186 * BlockOverhead<alignof(T)>() + N * sizeof(T) when making the GrBlockAllocator.
187 */
188 template<size_t Align = 1, size_t Padding = 0>
189 static constexpr size_t BlockOverhead();
190
191 /**
192 * Helper to calculate the minimum number of bytes needed for a preallocation, under the
193 * assumption that Align will be the requested alignment of the first call to allocate().
194 * Ex. To preallocate a GrSBlockAllocator to hold N instances of T, its arge should be
195 * Overhead<alignof(T)>() + N * sizeof(T)
196 */
197 template<size_t Align = 1, size_t Padding = 0>
198 static constexpr size_t Overhead();
199
200 /**
201 * Return the total number of bytes of the allocator, including its instance overhead, per-block
202 * overhead and space used for allocations.
203 */
204 size_t totalSize() const;
205 /**
206 * Return the total number of bytes usable for allocations. This includes bytes that have
207 * been reserved already by a call to allocate() and bytes that are still available. It is
208 * totalSize() minus all allocator and block-level overhead.
209 */
210 size_t totalUsableSpace() const;
211 /**
212 * Return the total number of usable bytes that have been reserved by allocations. This will
213 * be less than or equal to totalUsableSpace().
214 */
215 size_t totalSpaceInUse() const;
216
217 /**
218 * Return the total number of bytes that were pre-allocated for the GrBlockAllocator. This will
219 * include 'additionalPreallocBytes' passed to the constructor, and represents what the total
220 * size would become after a call to reset().
221 */
preallocSize()222 size_t preallocSize() const {
223 // Don't double count fHead's Block overhead in both sizeof(GrBlockAllocator) and fSize.
224 return sizeof(GrBlockAllocator) + fHead.fSize - BaseHeadBlockSize();
225 }
226 /**
227 * Return the usable size of the inline head block; this will be equal to
228 * 'additionalPreallocBytes' plus any alignment padding that the system had to add to Block.
229 * The returned value represents what could be allocated before a heap block is be created.
230 */
preallocUsableSpace()231 size_t preallocUsableSpace() const {
232 return fHead.fSize - kDataStart;
233 }
234
235 /**
236 * Get the current value of the allocator-level metadata (a user-oriented slot). This is
237 * separate from any block-level metadata, but can serve a similar purpose to compactly support
238 * data collections on top of GrBlockAllocator.
239 */
metadata()240 int metadata() const { return fHead.fAllocatorMetadata; }
241
242 /**
243 * Set the current value of the allocator-level metadata.
244 */
setMetadata(int value)245 void setMetadata(int value) { fHead.fAllocatorMetadata = value; }
246
247 /**
248 * Reserve space that will hold 'size' bytes. This will automatically allocate a new block if
249 * there is not enough available space in the current block to provide 'size' bytes. The
250 * returned ByteRange tuple specifies the Block owning the reserved memory, the full byte range,
251 * and the aligned offset within that range to use for the user-facing pointer. The following
252 * invariants hold:
253 *
254 * 1. block->ptr(alignedOffset) is aligned to Align
255 * 2. end - alignedOffset == size
256 * 3. Padding <= alignedOffset - start <= Padding + Align - 1
257 *
258 * Invariant #3, when Padding > 0, allows intermediate allocators to embed metadata along with
259 * the allocations. If the Padding bytes are used for some 'struct Meta', then
260 * ptr(alignedOffset - sizeof(Meta)) can be safely used as a Meta* if Meta's alignment
261 * requirements are less than or equal to the alignment specified in allocate<>. This can be
262 * easily guaranteed by using the pattern:
263 *
264 * allocate<max(UserAlign, alignof(Meta)), sizeof(Meta)>(userSize);
265 *
266 * This ensures that ptr(alignedOffset) will always satisfy UserAlign and
267 * ptr(alignedOffset - sizeof(Meta)) will always satisfy alignof(Meta). Alternatively, memcpy
268 * can be used to read and write values between start and alignedOffset without worrying about
269 * alignment requirements of the metadata.
270 *
271 * For over-aligned allocations, the alignedOffset (as an int) may not be a multiple of Align,
272 * but the result of ptr(alignedOffset) will be a multiple of Align.
273 */
274 template <size_t Align, size_t Padding = 0>
275 ByteRange allocate(size_t size);
276
277 enum ReserveFlags : unsigned {
278 // If provided to reserve(), the input 'size' will be rounded up to the next size determined
279 // by the growth policy of the GrBlockAllocator. If not, 'size' will be aligned to max_align
280 kIgnoreGrowthPolicy_Flag = 0b01,
281 // If provided to reserve(), the number of available bytes of the current block will not
282 // be used to satisfy the reservation (assuming the contiguous range was long enough to
283 // begin with).
284 kIgnoreExistingBytes_Flag = 0b10,
285
286 kNo_ReserveFlags = 0b00
287 };
288
289 /**
290 * Ensure the block allocator has 'size' contiguous available bytes. After calling this
291 * function, currentBlock()->avail<Align, Padding>() may still report less than 'size' if the
292 * reserved space was added as a scratch block. This is done so that anything remaining in
293 * the current block can still be used if a smaller-than-size allocation is requested. If 'size'
294 * is requested by a subsequent allocation, the scratch block will automatically be activated
295 * and the request will not itself trigger any malloc.
296 *
297 * The optional 'flags' controls how the input size is allocated; by default it will attempt
298 * to use available contiguous bytes in the current block and will respect the growth policy
299 * of the allocator.
300 */
301 template <size_t Align = 1, size_t Padding = 0>
302 void reserve(size_t size, ReserveFlags flags = kNo_ReserveFlags);
303
304 /**
305 * Return a pointer to the start of the current block. This will never be null.
306 */
currentBlock()307 const Block* currentBlock() const { return fTail; }
currentBlock()308 Block* currentBlock() { return fTail; }
309
headBlock()310 const Block* headBlock() const { return &fHead; }
headBlock()311 Block* headBlock() { return &fHead; }
312
313 /**
314 * Return the block that owns the allocated 'ptr'. Assuming that earlier, an allocation was
315 * returned as {b, start, alignedOffset, end}, and 'p = b->ptr(alignedOffset)', then a call
316 * to 'owningBlock<Align, Padding>(p, start) == b'.
317 *
318 * If calling code has already made a pointer to their metadata, i.e. 'm = p - Padding', then
319 * 'owningBlock<Align, 0>(m, start)' will also return b, allowing you to recover the block from
320 * the metadata pointer.
321 *
322 * If calling code has access to the original alignedOffset, this function should not be used
323 * since the owning block is just 'p - alignedOffset', regardless of original Align or Padding.
324 */
325 template <size_t Align, size_t Padding = 0>
326 Block* owningBlock(const void* ptr, int start);
327
328 template <size_t Align, size_t Padding = 0>
owningBlock(const void * ptr,int start)329 const Block* owningBlock(const void* ptr, int start) const {
330 return const_cast<GrBlockAllocator*>(this)->owningBlock<Align, Padding>(ptr, start);
331 }
332
333 /**
334 * Find the owning block of the allocated pointer, 'p'. Without any additional information this
335 * is O(N) on the number of allocated blocks.
336 */
337 Block* findOwningBlock(const void* ptr);
findOwningBlock(const void * ptr)338 const Block* findOwningBlock(const void* ptr) const {
339 return const_cast<GrBlockAllocator*>(this)->findOwningBlock(ptr);
340 }
341
342 /**
343 * Explicitly free an entire block, invalidating any remaining allocations from the block.
344 * GrBlockAllocator will release all alive blocks automatically when it is destroyed, but this
345 * function can be used to reclaim memory over the lifetime of the allocator. The provided
346 * 'block' pointer must have previously come from a call to currentBlock() or allocate().
347 *
348 * If 'block' represents the inline-allocated head block, its cursor and metadata are instead
349 * reset to their defaults.
350 *
351 * If the block is not the head block, it may be kept as a scratch block to be reused for
352 * subsequent allocation requests, instead of making an entirely new block. A scratch block is
353 * not visible when iterating over blocks but is reported in the total size of the allocator.
354 */
355 void releaseBlock(Block* block);
356
357 /**
358 * Detach every heap-allocated block owned by 'other' and concatenate them to this allocator's
359 * list of blocks. This memory is now managed by this allocator. Since this only transfers
360 * ownership of a Block, and a Block itself does not move, any previous allocations remain
361 * valid and associated with their original Block instances. GrBlockAllocator-level functions
362 * that accept allocated pointers (e.g. findOwningBlock), must now use this allocator and not
363 * 'other' for these allocations.
364 *
365 * The head block of 'other' cannot be stolen, so higher-level allocators and memory structures
366 * must handle that data differently.
367 */
368 void stealHeapBlocks(GrBlockAllocator* other);
369
370 /**
371 * Explicitly free all blocks (invalidating all allocations), and resets the head block to its
372 * default state. The allocator-level metadata is reset to 0 as well.
373 */
374 void reset();
375
376 /**
377 * Remove any reserved scratch space, either from calling reserve() or releaseBlock().
378 */
379 void resetScratchSpace();
380
381 template <bool Forward, bool Const> class BlockIter;
382
383 /**
384 * Clients can iterate over all active Blocks in the GrBlockAllocator using for loops:
385 *
386 * Forward iteration from head to tail block (or non-const variant):
387 * for (const Block* b : this->blocks()) { }
388 * Reverse iteration from tail to head block:
389 * for (const Block* b : this->rblocks()) { }
390 *
391 * It is safe to call releaseBlock() on the active block while looping.
392 */
393 inline BlockIter<true, false> blocks();
394 inline BlockIter<true, true> blocks() const;
395 inline BlockIter<false, false> rblocks();
396 inline BlockIter<false, true> rblocks() const;
397
398 #ifdef SK_DEBUG
399 static constexpr int kAssignedMarker = 0xBEEFFACE;
400 static constexpr int kFreedMarker = 0xCAFEBABE;
401
402 void validate() const;
403 #endif
404
405 #if GR_TEST_UTILS
testingOnly_scratchBlockSize()406 int testingOnly_scratchBlockSize() const { return this->scratchBlockSize(); }
407 #endif
408
409 private:
410 static constexpr int kDataStart = sizeof(Block);
411 #ifdef SK_FORCE_8_BYTE_ALIGNMENT
412 // This is an issue for WASM builds using emscripten, which had std::max_align_t = 16, but
413 // was returning pointers only aligned to 8 bytes.
414 // https://github.com/emscripten-core/emscripten/issues/10072
415 //
416 // Setting this to 8 will let GrBlockAllocator properly correct for the pointer address if
417 // a 16-byte aligned allocation is requested in wasm (unlikely since we don't use long
418 // doubles).
419 static constexpr size_t kAddressAlign = 8;
420 #else
421 // The alignment Block addresses will be at when created using operator new
422 // (spec-compliant is pointers are aligned to max_align_t).
423 static constexpr size_t kAddressAlign = alignof(std::max_align_t);
424 #endif
425
426 // Calculates the size of a new Block required to store a kMaxAllocationSize request for the
427 // given alignment and padding bytes. Also represents maximum valid fCursor value in a Block.
428 template<size_t Align, size_t Padding>
429 static constexpr size_t MaxBlockSize();
430
BaseHeadBlockSize()431 static constexpr int BaseHeadBlockSize() {
432 return sizeof(GrBlockAllocator) - offsetof(GrBlockAllocator, fHead);
433 }
434
435 // Append a new block to the end of the block linked list, updating fTail. 'minSize' must
436 // have enough room for sizeof(Block). 'maxSize' is the upper limit of fSize for the new block
437 // that will preserve the static guarantees GrBlockAllocator makes.
438 void addBlock(int minSize, int maxSize);
439
scratchBlockSize()440 int scratchBlockSize() const { return fHead.fPrev ? fHead.fPrev->fSize : 0; }
441
442 Block* fTail; // All non-head blocks are heap allocated; tail will never be null.
443
444 // All remaining state is packed into 64 bits to keep GrBlockAllocator at 16 bytes + head block
445 // (on a 64-bit system).
446
447 // Growth of the block size is controlled by four factors: BlockIncrement, N0 and N1, and a
448 // policy defining how N0 is updated. When a new block is needed, we calculate N1' = N0 + N1.
449 // Depending on the policy, N0' = N0 (no growth or linear growth), or N0' = N1 (Fibonacci), or
450 // N0' = N1' (exponential). The size of the new block is N1' * BlockIncrement * MaxAlign,
451 // after which fN0 and fN1 store N0' and N1' clamped into 23 bits. With current bit allocations,
452 // N1' is limited to 2^24, and assuming MaxAlign=16, then BlockIncrement must be '2' in order to
453 // eventually reach the hard 2^29 size limit of GrBlockAllocator.
454
455 // Next heap block size = (fBlockIncrement * alignof(std::max_align_t) * (fN0 + fN1))
456 uint64_t fBlockIncrement : 16;
457 uint64_t fGrowthPolicy : 2; // GrowthPolicy
458 uint64_t fN0 : 23; // = 1 for linear/exp.; = 0 for fixed/fibonacci, initially
459 uint64_t fN1 : 23; // = 1 initially
460
461 // Inline head block, must be at the end so that it can utilize any additional reserved space
462 // from the initial allocation.
463 // The head block's prev pointer may be non-null, which signifies a scratch block that may be
464 // reused instead of allocating an entirely new block (this helps when allocate+release calls
465 // bounce back and forth across the capacity of a block).
466 alignas(kAddressAlign) Block fHead;
467
468 static_assert(kGrowthPolicyCount <= 4);
469 };
470
471 // A wrapper around GrBlockAllocator that includes preallocated storage for the head block.
472 // N will be the preallocSize() reported by the allocator.
473 template<size_t N>
474 class GrSBlockAllocator : SkNoncopyable {
475 public:
476 using GrowthPolicy = GrBlockAllocator::GrowthPolicy;
477
GrSBlockAllocator()478 GrSBlockAllocator() {
479 new (fStorage) GrBlockAllocator(GrowthPolicy::kFixed, N, N - sizeof(GrBlockAllocator));
480 }
GrSBlockAllocator(GrowthPolicy policy)481 explicit GrSBlockAllocator(GrowthPolicy policy) {
482 new (fStorage) GrBlockAllocator(policy, N, N - sizeof(GrBlockAllocator));
483 }
484
GrSBlockAllocator(GrowthPolicy policy,size_t blockIncrementBytes)485 GrSBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes) {
486 new (fStorage) GrBlockAllocator(policy, blockIncrementBytes, N - sizeof(GrBlockAllocator));
487 }
488
~GrSBlockAllocator()489 ~GrSBlockAllocator() {
490 this->allocator()->~GrBlockAllocator();
491 }
492
493 GrBlockAllocator* operator->() { return this->allocator(); }
494 const GrBlockAllocator* operator->() const { return this->allocator(); }
495
allocator()496 GrBlockAllocator* allocator() { return reinterpret_cast<GrBlockAllocator*>(fStorage); }
allocator()497 const GrBlockAllocator* allocator() const {
498 return reinterpret_cast<const GrBlockAllocator*>(fStorage);
499 }
500
501 private:
502 static_assert(N >= sizeof(GrBlockAllocator));
503
504 // Will be used to placement new the allocator
505 alignas(GrBlockAllocator) char fStorage[N];
506 };
507
508 ///////////////////////////////////////////////////////////////////////////////////////////////////
509 // Template and inline implementations
510
GR_MAKE_BITFIELD_OPS(GrBlockAllocator::ReserveFlags)511 GR_MAKE_BITFIELD_OPS(GrBlockAllocator::ReserveFlags)
512
513 template<size_t Align, size_t Padding>
514 constexpr size_t GrBlockAllocator::BlockOverhead() {
515 static_assert(GrAlignTo(kDataStart + Padding, Align) >= sizeof(Block));
516 return GrAlignTo(kDataStart + Padding, Align);
517 }
518
519 template<size_t Align, size_t Padding>
Overhead()520 constexpr size_t GrBlockAllocator::Overhead() {
521 // NOTE: On most platforms, GrBlockAllocator is packed; this is not the case on debug builds
522 // due to extra fields, or on WASM due to 4byte pointers but 16byte max align.
523 return std::max(sizeof(GrBlockAllocator),
524 offsetof(GrBlockAllocator, fHead) + BlockOverhead<Align, Padding>());
525 }
526
527 template<size_t Align, size_t Padding>
MaxBlockSize()528 constexpr size_t GrBlockAllocator::MaxBlockSize() {
529 // Without loss of generality, assumes 'align' will be the largest encountered alignment for the
530 // allocator (if it's not, the largest align will be encountered by the compiler and pass/fail
531 // the same set of static asserts).
532 return BlockOverhead<Align, Padding>() + kMaxAllocationSize;
533 }
534
535 template<size_t Align, size_t Padding>
reserve(size_t size,ReserveFlags flags)536 void GrBlockAllocator::reserve(size_t size, ReserveFlags flags) {
537 if (size > kMaxAllocationSize) {
538 SK_ABORT("Allocation too large (%zu bytes requested)", size);
539 }
540 int iSize = (int) size;
541 if ((flags & kIgnoreExistingBytes_Flag) ||
542 this->currentBlock()->avail<Align, Padding>() < iSize) {
543
544 int blockSize = BlockOverhead<Align, Padding>() + iSize;
545 int maxSize = (flags & kIgnoreGrowthPolicy_Flag) ? blockSize
546 : MaxBlockSize<Align, Padding>();
547 SkASSERT((size_t) maxSize <= (MaxBlockSize<Align, Padding>()));
548
549 SkDEBUGCODE(auto oldTail = fTail;)
550 this->addBlock(blockSize, maxSize);
551 SkASSERT(fTail != oldTail);
552 // Releasing the just added block will move it into scratch space, allowing the original
553 // tail's bytes to be used first before the scratch block is activated.
554 this->releaseBlock(fTail);
555 }
556 }
557
558 template <size_t Align, size_t Padding>
allocate(size_t size)559 GrBlockAllocator::ByteRange GrBlockAllocator::allocate(size_t size) {
560 // Amount of extra space for a new block to make sure the allocation can succeed.
561 static constexpr int kBlockOverhead = (int) BlockOverhead<Align, Padding>();
562
563 // Ensures 'offset' and 'end' calculations will be valid
564 static_assert((kMaxAllocationSize + GrAlignTo(MaxBlockSize<Align, Padding>(), Align))
565 <= (size_t) std::numeric_limits<int32_t>::max());
566 // Ensures size + blockOverhead + addBlock's alignment operations will be valid
567 static_assert(kMaxAllocationSize + kBlockOverhead + ((1 << 12) - 1) // 4K align for large blocks
568 <= std::numeric_limits<int32_t>::max());
569
570 if (size > kMaxAllocationSize) {
571 SK_ABORT("Allocation too large (%zu bytes requested)", size);
572 }
573
574 int iSize = (int) size;
575 int offset = fTail->cursor<Align, Padding>();
576 int end = offset + iSize;
577 if (end > fTail->fSize) {
578 this->addBlock(iSize + kBlockOverhead, MaxBlockSize<Align, Padding>());
579 offset = fTail->cursor<Align, Padding>();
580 end = offset + iSize;
581 }
582
583 // Check invariants
584 SkASSERT(end <= fTail->fSize);
585 SkASSERT(end - offset == iSize);
586 SkASSERT(offset - fTail->fCursor >= (int) Padding &&
587 offset - fTail->fCursor <= (int) (Padding + Align - 1));
588 SkASSERT(reinterpret_cast<uintptr_t>(fTail->ptr(offset)) % Align == 0);
589
590 int start = fTail->fCursor;
591 fTail->fCursor = end;
592
593 fTail->unpoisonRange(offset - Padding, end);
594
595 return {fTail, start, offset, end};
596 }
597
598 template <size_t Align, size_t Padding>
owningBlock(const void * p,int start)599 GrBlockAllocator::Block* GrBlockAllocator::owningBlock(const void* p, int start) {
600 // 'p' was originally formed by aligning 'block + start + Padding', producing the inequality:
601 // block + start + Padding <= p <= block + start + Padding + Align-1
602 // Rearranging this yields:
603 // block <= p - start - Padding <= block + Align-1
604 // Masking these terms by ~(Align-1) reconstructs 'block' if the alignment of the block is
605 // greater than or equal to Align (since block & ~(Align-1) == (block + Align-1) & ~(Align-1)
606 // in that case). Overalignment does not reduce to inequality unfortunately.
607 if /* constexpr */ (Align <= kAddressAlign) {
608 Block* block = reinterpret_cast<Block*>(
609 (reinterpret_cast<uintptr_t>(p) - start - Padding) & ~(Align - 1));
610 SkASSERT(block->fSentinel == kAssignedMarker);
611 return block;
612 } else {
613 // There's not a constant-time expression available to reconstruct the block from 'p',
614 // but this is unlikely to happen frequently.
615 return this->findOwningBlock(p);
616 }
617 }
618
619 template <size_t Align, size_t Padding>
alignedOffset(int offset)620 int GrBlockAllocator::Block::alignedOffset(int offset) const {
621 static_assert(SkIsPow2(Align));
622 // Aligning adds (Padding + Align - 1) as an intermediate step, so ensure that can't overflow
623 static_assert(MaxBlockSize<Align, Padding>() + Padding + Align - 1
624 <= (size_t) std::numeric_limits<int32_t>::max());
625
626 if /* constexpr */ (Align <= kAddressAlign) {
627 // Same as GrAlignTo, but operates on ints instead of size_t
628 return (offset + Padding + Align - 1) & ~(Align - 1);
629 } else {
630 // Must take into account that 'this' may be starting at a pointer that doesn't satisfy the
631 // larger alignment request, so must align the entire pointer, not just offset
632 uintptr_t blockPtr = reinterpret_cast<uintptr_t>(this);
633 uintptr_t alignedPtr = (blockPtr + offset + Padding + Align - 1) & ~(Align - 1);
634 SkASSERT(alignedPtr - blockPtr <= (uintptr_t) std::numeric_limits<int32_t>::max());
635 return (int) (alignedPtr - blockPtr);
636 }
637 }
638
resize(int start,int end,int deltaBytes)639 bool GrBlockAllocator::Block::resize(int start, int end, int deltaBytes) {
640 SkASSERT(fSentinel == kAssignedMarker);
641 SkASSERT(start >= kDataStart && end <= fSize && start < end);
642
643 if (deltaBytes > kMaxAllocationSize || deltaBytes < -kMaxAllocationSize) {
644 // Cannot possibly satisfy the resize and could overflow subsequent math
645 return false;
646 }
647 if (fCursor == end) {
648 int nextCursor = end + deltaBytes;
649 SkASSERT(nextCursor >= start);
650 // We still check nextCursor >= start for release builds that wouldn't assert.
651 if (nextCursor <= fSize && nextCursor >= start) {
652 if (nextCursor < fCursor) {
653 // The allocation got smaller; poison the space that can no longer be used.
654 this->poisonRange(nextCursor + 1, end);
655 } else {
656 // The allocation got larger; unpoison the space that can now be used.
657 this->unpoisonRange(end, nextCursor);
658 }
659
660 fCursor = nextCursor;
661 return true;
662 }
663 }
664 return false;
665 }
666
667 // NOTE: release is equivalent to resize(start, end, start - end), and the compiler can optimize
668 // most of the operations away, but it wasn't able to remove the unnecessary branch comparing the
669 // new cursor to the block size or old start, so release() gets a specialization.
release(int start,int end)670 bool GrBlockAllocator::Block::release(int start, int end) {
671 SkASSERT(fSentinel == kAssignedMarker);
672 SkASSERT(start >= kDataStart && end <= fSize && start < end);
673
674 this->poisonRange(start, end);
675
676 if (fCursor == end) {
677 fCursor = start;
678 return true;
679 } else {
680 return false;
681 }
682 }
683
684 ///////// Block iteration
685 template <bool Forward, bool Const>
686 class GrBlockAllocator::BlockIter {
687 private:
688 using BlockT = typename std::conditional<Const, const Block, Block>::type;
689 using AllocatorT =
690 typename std::conditional<Const, const GrBlockAllocator, GrBlockAllocator>::type;
691
692 public:
BlockIter(AllocatorT * allocator)693 BlockIter(AllocatorT* allocator) : fAllocator(allocator) {}
694
695 class Item {
696 public:
697 bool operator!=(const Item& other) const { return fBlock != other.fBlock; }
698
699 BlockT* operator*() const { return fBlock; }
700
701 Item& operator++() {
702 this->advance(fNext);
703 return *this;
704 }
705
706 private:
707 friend BlockIter;
708
Item(BlockT * block)709 Item(BlockT* block) { this->advance(block); }
710
advance(BlockT * block)711 void advance(BlockT* block) {
712 fBlock = block;
713 fNext = block ? (Forward ? block->fNext : block->fPrev) : nullptr;
714 if (!Forward && fNext && fNext->isScratch()) {
715 // For reverse-iteration only, we need to stop at the head, not the scratch block
716 // possibly stashed in head->prev.
717 fNext = nullptr;
718 }
719 SkASSERT(!fNext || !fNext->isScratch());
720 }
721
722 BlockT* fBlock;
723 // Cache this before operator++ so that fBlock can be released during iteration
724 BlockT* fNext;
725 };
726
begin()727 Item begin() const { return Item(Forward ? &fAllocator->fHead : fAllocator->fTail); }
end()728 Item end() const { return Item(nullptr); }
729
730 private:
731 AllocatorT* fAllocator;
732 };
733
blocks()734 GrBlockAllocator::BlockIter<true, false> GrBlockAllocator::blocks() {
735 return BlockIter<true, false>(this);
736 }
blocks()737 GrBlockAllocator::BlockIter<true, true> GrBlockAllocator::blocks() const {
738 return BlockIter<true, true>(this);
739 }
rblocks()740 GrBlockAllocator::BlockIter<false, false> GrBlockAllocator::rblocks() {
741 return BlockIter<false, false>(this);
742 }
rblocks()743 GrBlockAllocator::BlockIter<false, true> GrBlockAllocator::rblocks() const {
744 return BlockIter<false, true>(this);
745 }
746
747 #endif // GrBlockAllocator_DEFINED
748