• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrBlockAllocator.h"
9 #include "tests/Test.h"
10 
11 #include <cstring>
12 
13 using Block = GrBlockAllocator::Block;
14 using GrowthPolicy = GrBlockAllocator::GrowthPolicy;
15 
16 // Helper functions for modifying the allocator in a controlled manner
17 template<size_t N>
block_count(const GrSBlockAllocator<N> & pool)18 static int block_count(const GrSBlockAllocator<N>& pool) {
19     int ct = 0;
20     for (const Block* b : pool->blocks()) {
21         (void) b;
22         ct++;
23     }
24     return ct;
25 }
26 
27 template<size_t N>
get_block(GrSBlockAllocator<N> & pool,int blockIndex)28 static Block* get_block(GrSBlockAllocator<N>& pool, int blockIndex) {
29     Block* found = nullptr;
30     int i = 0;
31     for (Block* b: pool->blocks()) {
32         if (i == blockIndex) {
33             found = b;
34             break;
35         }
36         i++;
37     }
38 
39     SkASSERT(found != nullptr);
40     return found;
41 }
42 
43 // GrBlockAllocator holds on to the largest last-released block to reuse for new allocations,
44 // and this is still counted in its totalSize(). However, it's easier to reason about size - scratch
45 // in many of these tests.
46 template<size_t N>
total_size(GrSBlockAllocator<N> & pool)47 static size_t total_size(GrSBlockAllocator<N>& pool) {
48     return pool->totalSize() - pool->testingOnly_scratchBlockSize();
49 }
50 
51 template<size_t N>
add_block(GrSBlockAllocator<N> & pool)52 static size_t add_block(GrSBlockAllocator<N>& pool) {
53     size_t currentSize = total_size(pool);
54     GrBlockAllocator::Block* current = pool->currentBlock();
55     while(pool->currentBlock() == current) {
56         pool->template allocate<4>(pool->preallocSize() / 2);
57     }
58     return total_size(pool) - currentSize;
59 }
60 
61 template<size_t N>
alloc_byte(GrSBlockAllocator<N> & pool)62 static void* alloc_byte(GrSBlockAllocator<N>& pool) {
63     auto br = pool->template allocate<1>(1);
64     return br.fBlock->ptr(br.fAlignedOffset);
65 }
66 
DEF_TEST(GrBlockAllocatorPreallocSize,r)67 DEF_TEST(GrBlockAllocatorPreallocSize, r) {
68     // Tests stack/member initialization, option #1 described in doc
69     GrBlockAllocator stack{GrowthPolicy::kFixed, 2048};
70     SkDEBUGCODE(stack.validate();)
71 
72     REPORTER_ASSERT(r, stack.preallocSize() == sizeof(GrBlockAllocator));
73     REPORTER_ASSERT(r, stack.preallocUsableSpace() == (size_t) stack.currentBlock()->avail());
74 
75     // Tests placement new initialization to increase head block size, option #2
76     void* mem = operator new(1024);
77     GrBlockAllocator* placement = new (mem) GrBlockAllocator(GrowthPolicy::kLinear, 1024,
78                                                              1024 - sizeof(GrBlockAllocator));
79     REPORTER_ASSERT(r, placement->preallocSize() == 1024);
80     REPORTER_ASSERT(r, placement->preallocUsableSpace() < 1024 &&
81                        placement->preallocUsableSpace() >= (1024 - sizeof(GrBlockAllocator)));
82     delete placement;
83 
84     // Tests inline increased preallocation, option #3
85     GrSBlockAllocator<2048> inlined{};
86     SkDEBUGCODE(inlined->validate();)
87     REPORTER_ASSERT(r, inlined->preallocSize() == 2048);
88     REPORTER_ASSERT(r, inlined->preallocUsableSpace() < 2048 &&
89                        inlined->preallocUsableSpace() >= (2048 - sizeof(GrBlockAllocator)));
90 }
91 
DEF_TEST(GrBlockAllocatorAlloc,r)92 DEF_TEST(GrBlockAllocatorAlloc, r) {
93     GrSBlockAllocator<1024> pool{};
94     SkDEBUGCODE(pool->validate();)
95 
96     // Assumes the previous pointer was in the same block
97     auto validate_ptr = [&](int align, int size,
98                             GrBlockAllocator::ByteRange br,
99                             GrBlockAllocator::ByteRange* prevBR) {
100         uintptr_t pt = reinterpret_cast<uintptr_t>(br.fBlock->ptr(br.fAlignedOffset));
101         // Matches the requested align
102         REPORTER_ASSERT(r, pt % align == 0);
103         // And large enough
104         REPORTER_ASSERT(r, br.fEnd - br.fAlignedOffset >= size);
105         // And has enough padding for alignment
106         REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart >= 0);
107         REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart <= align - 1);
108         // And block of the returned struct is the current block of the allocator
109         REPORTER_ASSERT(r, pool->currentBlock() == br.fBlock);
110 
111         // And make sure that we're past the required end of the previous allocation
112         if (prevBR) {
113             uintptr_t prevEnd =
114                     reinterpret_cast<uintptr_t>(prevBR->fBlock->ptr(prevBR->fEnd - 1));
115             REPORTER_ASSERT(r, pt > prevEnd);
116         }
117 
118         // And make sure that the entire byte range is safe to write into (excluding the dead space
119         // between "start" and "aligned offset," which is just padding and is left poisoned)
120         std::memset(br.fBlock->ptr(br.fAlignedOffset), 0xFF, br.fEnd - br.fAlignedOffset);
121     };
122 
123     auto p1 = pool->allocate<1>(14);
124     validate_ptr(1, 14, p1, nullptr);
125 
126     auto p2 = pool->allocate<2>(24);
127     validate_ptr(2, 24, p2, &p1);
128 
129     auto p4 = pool->allocate<4>(28);
130     validate_ptr(4, 28, p4, &p2);
131 
132     auto p8 = pool->allocate<8>(40);
133     validate_ptr(8, 40, p8, &p4);
134 
135     auto p16 = pool->allocate<16>(64);
136     validate_ptr(16, 64, p16, &p8);
137 
138     auto p32 = pool->allocate<32>(96);
139     validate_ptr(32, 96, p32, &p16);
140 
141     // All of these allocations should be in the head block
142     REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
143     SkDEBUGCODE(pool->validate();)
144 
145     // Requesting an allocation of avail() should not make a new block
146     size_t avail = pool->currentBlock()->avail<4>();
147     auto pAvail = pool->allocate<4>(avail);
148     validate_ptr(4, avail, pAvail, &p32);
149 
150     // Remaining should be less than the alignment that was requested, and then
151     // the next allocation will make a new block
152     REPORTER_ASSERT(r, pool->currentBlock()->avail<4>() < 4);
153     auto pNextBlock = pool->allocate<4>(4);
154     validate_ptr(4, 4, pNextBlock, nullptr);
155     REPORTER_ASSERT(r, total_size(pool) > pool->preallocSize());
156 
157     // Allocating more than avail() makes an another block
158     size_t currentSize = total_size(pool);
159     size_t bigRequest = pool->currentBlock()->avail<4>() * 2;
160     auto pTooBig = pool->allocate<4>(bigRequest);
161     validate_ptr(4, bigRequest, pTooBig, nullptr);
162     REPORTER_ASSERT(r, total_size(pool) > currentSize);
163 
164     // Allocating more than the default growth policy (1024 in this case), will fulfill the request
165     REPORTER_ASSERT(r, total_size(pool) - currentSize < 4096);
166     currentSize = total_size(pool);
167     auto pReallyTooBig = pool->allocate<4>(4096);
168     validate_ptr(4, 4096, pReallyTooBig, nullptr);
169     REPORTER_ASSERT(r, total_size(pool) >= currentSize + 4096);
170     SkDEBUGCODE(pool->validate();)
171 }
172 
DEF_TEST(GrBlockAllocatorResize,r)173 DEF_TEST(GrBlockAllocatorResize, r) {
174     GrSBlockAllocator<1024> pool{};
175     SkDEBUGCODE(pool->validate();)
176 
177     // Fixed resize from 16 to 32
178     GrBlockAllocator::ByteRange p = pool->allocate<4>(16);
179     REPORTER_ASSERT(r, p.fBlock->avail<4>() > 16);
180     REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, 16));
181     p.fEnd += 16;
182 
183     std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x11, p.fEnd - p.fAlignedOffset);
184 
185     // Subsequent allocation is 32 bytes ahead of 'p' now, and 'p' cannot be resized further.
186     auto pNext = pool->allocate<4>(16);
187     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
188                        reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
189     REPORTER_ASSERT(r, p.fBlock == pNext.fBlock);
190     REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, 48));
191 
192     // Confirm that releasing pNext allows 'p' to be resized, and that it can be resized up to avail
193     REPORTER_ASSERT(r, p.fBlock->release(pNext.fStart, pNext.fEnd));
194     int fillBlock = p.fBlock->avail<4>();
195     REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
196     p.fEnd += fillBlock;
197 
198     std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x22, p.fEnd - p.fAlignedOffset);
199 
200     // Confirm that resizing when there's not enough room fails
201     REPORTER_ASSERT(r, p.fBlock->avail<4>() < fillBlock);
202     REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
203 
204     // Confirm that we can shrink 'p' back to 32 bytes and then further allocate again
205     int shrinkTo32 = p.fStart - p.fEnd + 32;
206     REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, shrinkTo32));
207     p.fEnd += shrinkTo32;
208     REPORTER_ASSERT(r, p.fEnd - p.fStart == 32);
209 
210     std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x33, p.fEnd - p.fAlignedOffset);
211 
212     pNext = pool->allocate<4>(16);
213     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
214                        reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
215     SkDEBUGCODE(pool->validate();)
216 
217     // Confirm that we can't shrink past the start of the allocation, but we can shrink it to 0
218     int shrinkTo0 = pNext.fStart - pNext.fEnd;
219 #ifndef SK_DEBUG
220     // Only test for false on release builds; a negative size should assert on debug builds
221     REPORTER_ASSERT(r, !pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0 - 1));
222 #endif
223     REPORTER_ASSERT(r, pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0));
224 }
225 
DEF_TEST(GrBlockAllocatorRelease,r)226 DEF_TEST(GrBlockAllocatorRelease, r) {
227     GrSBlockAllocator<1024> pool{};
228     SkDEBUGCODE(pool->validate();)
229 
230     // Successful allocate and release
231     auto p = pool->allocate<8>(32);
232     REPORTER_ASSERT(r, pool->currentBlock()->release(p.fStart, p.fEnd));
233     // Ensure the above release actually means the next allocation reuses the same space
234     auto p2 = pool->allocate<8>(32);
235     REPORTER_ASSERT(r, p.fStart == p2.fStart);
236 
237     // Confirm that 'p2' cannot be released if another allocation came after it
238     auto p3 = pool->allocate<8>(64);
239     (void) p3;
240     REPORTER_ASSERT(r, !p2.fBlock->release(p2.fStart, p2.fEnd));
241 
242     // Confirm that 'p4' can be released if 'p5' is released first, and confirm that 'p2' and 'p3'
243     // can be released simultaneously (equivalent to 'p3' then 'p2').
244     auto p4 = pool->allocate<8>(16);
245     auto p5 = pool->allocate<8>(96);
246     REPORTER_ASSERT(r, p5.fBlock->release(p5.fStart, p5.fEnd));
247     REPORTER_ASSERT(r, p4.fBlock->release(p4.fStart, p4.fEnd));
248     REPORTER_ASSERT(r, p2.fBlock->release(p2.fStart, p3.fEnd));
249 
250     // And confirm that passing in the wrong size for the allocation fails
251     p = pool->allocate<8>(32);
252     REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd - 16));
253     REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd + 16));
254     REPORTER_ASSERT(r, p.fBlock->release(p.fStart, p.fEnd));
255     SkDEBUGCODE(pool->validate();)
256 }
257 
DEF_TEST(GrBlockAllocatorRewind,r)258 DEF_TEST(GrBlockAllocatorRewind, r) {
259     // Confirm that a bunch of allocations and then releases in stack order fully goes back to the
260     // start of the block (i.e. unwinds the entire stack, and not just the last cursor position)
261     GrSBlockAllocator<1024> pool{};
262     SkDEBUGCODE(pool->validate();)
263 
264     std::vector<GrBlockAllocator::ByteRange> ptrs;
265     for (int i = 0; i < 32; ++i) {
266         ptrs.push_back(pool->allocate<4>(16));
267     }
268 
269     // Release everything in reverse order
270     SkDEBUGCODE(pool->validate();)
271     for (int i = 31; i >= 0; --i) {
272         auto br = ptrs[i];
273         REPORTER_ASSERT(r, br.fBlock->release(br.fStart, br.fEnd));
274     }
275 
276     // If correct, we've rewound all the way back to the start of the block, so a new allocation
277     // will have the same location as ptrs[0]
278     SkDEBUGCODE(pool->validate();)
279     REPORTER_ASSERT(r, pool->allocate<4>(16).fStart == ptrs[0].fStart);
280 }
281 
DEF_TEST(GrBlockAllocatorGrowthPolicy,r)282 DEF_TEST(GrBlockAllocatorGrowthPolicy, r) {
283     static constexpr int kInitSize = 128;
284     static constexpr int kBlockCount = 5;
285     static constexpr size_t kExpectedSizes[GrBlockAllocator::kGrowthPolicyCount][kBlockCount] = {
286         // kFixed -> kInitSize per block
287         { kInitSize, kInitSize, kInitSize, kInitSize, kInitSize },
288         // kLinear -> (block ct + 1) * kInitSize for next block
289         { kInitSize, 2 * kInitSize, 3 * kInitSize, 4 * kInitSize, 5 * kInitSize },
290         // kFibonacci -> 1, 1, 2, 3, 5 * kInitSize for the blocks
291         { kInitSize, kInitSize, 2 * kInitSize, 3 * kInitSize, 5 * kInitSize },
292         // kExponential -> 1, 2, 4, 8, 16 * kInitSize for the blocks
293         { kInitSize, 2 * kInitSize, 4 * kInitSize, 8 * kInitSize, 16 * kInitSize },
294     };
295 
296     for (int gp = 0; gp < GrBlockAllocator::kGrowthPolicyCount; ++gp) {
297         GrSBlockAllocator<kInitSize> pool{(GrowthPolicy) gp};
298         SkDEBUGCODE(pool->validate();)
299 
300         REPORTER_ASSERT(r, kExpectedSizes[gp][0] == total_size(pool));
301         for (int i = 1; i < kBlockCount; ++i) {
302             REPORTER_ASSERT(r, kExpectedSizes[gp][i] == add_block(pool));
303         }
304 
305         SkDEBUGCODE(pool->validate();)
306     }
307 }
308 
DEF_TEST(GrBlockAllocatorReset,r)309 DEF_TEST(GrBlockAllocatorReset, r) {
310     static constexpr int kBlockIncrement = 1024;
311 
312     GrSBlockAllocator<kBlockIncrement> pool{GrowthPolicy::kLinear};
313     SkDEBUGCODE(pool->validate();)
314 
315     void* firstAlloc = alloc_byte(pool);
316 
317     // Add several blocks
318     add_block(pool);
319     add_block(pool);
320     add_block(pool);
321     SkDEBUGCODE(pool->validate();)
322 
323     REPORTER_ASSERT(r, block_count(pool) == 4); // 3 added plus the implicit head
324 
325     get_block(pool, 0)->setMetadata(2);
326 
327     // Reset and confirm that there's only one block, a new allocation matches 'firstAlloc' again,
328     // and new blocks are sized based on a reset growth policy.
329     pool->reset();
330     SkDEBUGCODE(pool->validate();)
331 
332     REPORTER_ASSERT(r,block_count(pool) == 1);
333     REPORTER_ASSERT(r, pool->preallocSize() == pool->totalSize());
334     REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0);
335 
336     REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
337     REPORTER_ASSERT(r, 2 * kBlockIncrement == add_block(pool));
338     REPORTER_ASSERT(r, 3 * kBlockIncrement == add_block(pool));
339     SkDEBUGCODE(pool->validate();)
340 }
341 
DEF_TEST(GrBlockAllocatorReleaseBlock,r)342 DEF_TEST(GrBlockAllocatorReleaseBlock, r) {
343     // This loops over all growth policies to make sure that the incremental releases update the
344     // sequence correctly for each policy.
345     for (int gp = 0; gp < GrBlockAllocator::kGrowthPolicyCount; ++gp) {
346         GrSBlockAllocator<1024> pool{(GrowthPolicy) gp};
347         SkDEBUGCODE(pool->validate();)
348 
349         void* firstAlloc = alloc_byte(pool);
350 
351         size_t b1Size = total_size(pool);
352         size_t b2Size = add_block(pool);
353         size_t b3Size = add_block(pool);
354         size_t b4Size = add_block(pool);
355         SkDEBUGCODE(pool->validate();)
356 
357         get_block(pool, 0)->setMetadata(1);
358         get_block(pool, 1)->setMetadata(2);
359         get_block(pool, 2)->setMetadata(3);
360         get_block(pool, 3)->setMetadata(4);
361 
362         // Remove the 3 added blocks, but always remove the i = 1 to test intermediate removal (and
363         // on the last iteration, will test tail removal).
364         REPORTER_ASSERT(r, total_size(pool) == b1Size + b2Size + b3Size + b4Size);
365         pool->releaseBlock(get_block(pool, 1));
366         REPORTER_ASSERT(r, block_count(pool) == 3);
367         REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 3);
368         REPORTER_ASSERT(r, total_size(pool) == b1Size + b3Size + b4Size);
369 
370         pool->releaseBlock(get_block(pool, 1));
371         REPORTER_ASSERT(r, block_count(pool) == 2);
372         REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 4);
373         REPORTER_ASSERT(r, total_size(pool) == b1Size + b4Size);
374 
375         pool->releaseBlock(get_block(pool, 1));
376         REPORTER_ASSERT(r, block_count(pool) == 1);
377         REPORTER_ASSERT(r, total_size(pool) == b1Size);
378 
379         // Since we're back to just the head block, if we add a new block, the growth policy should
380         // match the original sequence instead of continuing with "b5Size'"
381         pool->resetScratchSpace();
382         size_t size = add_block(pool);
383         REPORTER_ASSERT(r, size == b2Size);
384         pool->releaseBlock(get_block(pool, 1));
385 
386         // Explicitly release the head block and confirm it's reset
387         pool->releaseBlock(get_block(pool, 0));
388         REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
389         REPORTER_ASSERT(r, block_count(pool) == 1);
390         REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
391         REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0); // metadata reset too
392 
393         // Confirm that if we have > 1 block, but release the head block we can still access the
394         // others
395         add_block(pool);
396         add_block(pool);
397         pool->releaseBlock(get_block(pool, 0));
398         REPORTER_ASSERT(r, block_count(pool) == 3);
399         SkDEBUGCODE(pool->validate();)
400     }
401 }
402 
DEF_TEST(GrBlockAllocatorIterateAndRelease,r)403 DEF_TEST(GrBlockAllocatorIterateAndRelease, r) {
404     GrSBlockAllocator<256> pool;
405 
406     pool->headBlock()->setMetadata(1);
407     add_block(pool);
408     add_block(pool);
409     add_block(pool);
410 
411     // Loop forward and release the blocks
412     int releaseCount = 0;
413     for (auto* b : pool->blocks()) {
414         pool->releaseBlock(b);
415         releaseCount++;
416     }
417     REPORTER_ASSERT(r, releaseCount == 4);
418     // pool should have just the head block, but was reset
419     REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
420     REPORTER_ASSERT(r, block_count(pool) == 1);
421 
422     // Add more blocks
423     pool->headBlock()->setMetadata(1);
424     add_block(pool);
425     add_block(pool);
426     add_block(pool);
427 
428     // Loop in reverse and release the blocks
429     releaseCount = 0;
430     for (auto* b : pool->rblocks()) {
431         pool->releaseBlock(b);
432         releaseCount++;
433     }
434     REPORTER_ASSERT(r, releaseCount == 4);
435     // pool should have just the head block, but was reset
436     REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
437     REPORTER_ASSERT(r, block_count(pool) == 1);
438 }
439 
DEF_TEST(GrBlockAllocatorScratchBlockReserve,r)440 DEF_TEST(GrBlockAllocatorScratchBlockReserve, r) {
441     GrSBlockAllocator<256> pool;
442 
443     size_t added = add_block(pool);
444     REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
445     size_t total = pool->totalSize();
446     pool->releaseBlock(pool->currentBlock());
447 
448     // Total size shouldn't have changed, the released block should become scratch
449     REPORTER_ASSERT(r, pool->totalSize() == total);
450     REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == added);
451 
452     // But a reset definitely deletes any scratch block
453     pool->reset();
454     REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
455 
456     // Reserving more than what's available adds a scratch block, and current block remains avail.
457     size_t avail = pool->currentBlock()->avail();
458     size_t reserve = avail + 1;
459     pool->reserve(reserve);
460     REPORTER_ASSERT(r, (size_t) pool->currentBlock()->avail() == avail);
461     // And rounds up to the fixed size of this pool's growth policy
462     REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() >= reserve &&
463                        pool->testingOnly_scratchBlockSize() % 256 == 0);
464 
465     // Allocating more than avail activates the scratch block (so totalSize doesn't change)
466     size_t preAllocTotalSize = pool->totalSize();
467     pool->allocate<1>(avail + 1);
468     REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == 0);
469     REPORTER_ASSERT(r, pool->totalSize() == preAllocTotalSize);
470 
471     // When reserving less than what's still available in the current block, no scratch block is
472     // added.
473     pool->reserve(pool->currentBlock()->avail());
474     REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
475 
476     // Unless checking available bytes is disabled
477     pool->reserve(pool->currentBlock()->avail(), GrBlockAllocator::kIgnoreExistingBytes_Flag);
478     REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() > 0);
479 
480     // If kIgnoreGrowthPolicy is specified, the new scratch block should not have been updated to
481     // follow the size (which in this case is a fixed 256 bytes per block).
482     pool->resetScratchSpace();
483     pool->reserve(32, GrBlockAllocator::kIgnoreGrowthPolicy_Flag);
484     REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() > 0 &&
485                        pool->testingOnly_scratchBlockSize() < 256);
486 
487     // When requesting an allocation larger than the current block and the scratch block, a new
488     // block is added, and the scratch block remains scratch.
489     GrBlockAllocator::Block* oldTail = pool->currentBlock();
490     avail = oldTail->avail();
491     size_t scratchAvail = 2 * avail;
492     pool->reserve(scratchAvail);
493     REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() >= scratchAvail);
494 
495     // This allocation request is higher than oldTail's available, and the scratch size so we
496     // should add a new block and scratch size should stay the same.
497     scratchAvail = pool->testingOnly_scratchBlockSize();
498     pool->allocate<1>(scratchAvail + 1);
499     REPORTER_ASSERT(r, pool->currentBlock() != oldTail);
500     REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == scratchAvail);
501 }
502 
DEF_TEST(GrBlockAllocatorStealBlocks,r)503 DEF_TEST(GrBlockAllocatorStealBlocks, r) {
504     GrSBlockAllocator<256> poolA;
505     GrSBlockAllocator<128> poolB;
506 
507     add_block(poolA);
508     add_block(poolA);
509     add_block(poolA);
510 
511     add_block(poolB);
512     add_block(poolB);
513 
514     char* bAlloc = (char*) alloc_byte(poolB);
515     *bAlloc = 't';
516 
517     const GrBlockAllocator::Block* allocOwner = poolB->findOwningBlock(bAlloc);
518 
519     REPORTER_ASSERT(r, block_count(poolA) == 4);
520     REPORTER_ASSERT(r, block_count(poolB) == 3);
521 
522     size_t aSize = poolA->totalSize();
523     size_t bSize = poolB->totalSize();
524     size_t theftSize = bSize - poolB->preallocSize();
525 
526     // This steal should move B's 2 heap blocks to A, bringing A to 6 and B to just its head
527     poolA->stealHeapBlocks(poolB.allocator());
528     REPORTER_ASSERT(r, block_count(poolA) == 6);
529     REPORTER_ASSERT(r, block_count(poolB) == 1);
530     REPORTER_ASSERT(r, poolB->preallocSize() == poolB->totalSize());
531     REPORTER_ASSERT(r, poolA->totalSize() == aSize + theftSize);
532 
533     REPORTER_ASSERT(r, *bAlloc == 't');
534     REPORTER_ASSERT(r, (uintptr_t) poolA->findOwningBlock(bAlloc) == (uintptr_t) allocOwner);
535     REPORTER_ASSERT(r, !poolB->findOwningBlock(bAlloc));
536 
537     // Redoing the steal now that B is just a head block should be a no-op
538     poolA->stealHeapBlocks(poolB.allocator());
539     REPORTER_ASSERT(r, block_count(poolA) == 6);
540     REPORTER_ASSERT(r, block_count(poolB) == 1);
541 }
542 
543 // These tests ensure that the allocation padding mechanism works as intended
544 struct TestMeta {
545     int fX1;
546     int fX2;
547 };
548 struct alignas(32) TestMetaBig {
549     int fX1;
550     int fX2;
551 };
552 
DEF_TEST(GrBlockAllocatorMetadata,r)553 DEF_TEST(GrBlockAllocatorMetadata, r) {
554     GrSBlockAllocator<1024> pool{};
555     SkDEBUGCODE(pool->validate();)
556 
557     // Allocation where alignment of user data > alignment of metadata
558     SkASSERT(alignof(TestMeta) < 16);
559     auto p1 = pool->allocate<16, sizeof(TestMeta)>(16);
560     SkDEBUGCODE(pool->validate();)
561 
562     REPORTER_ASSERT(r, p1.fAlignedOffset - p1.fStart >= (int) sizeof(TestMeta));
563     TestMeta* meta = static_cast<TestMeta*>(p1.fBlock->ptr(p1.fAlignedOffset - sizeof(TestMeta)));
564     // Confirm alignment for both pointers
565     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(meta) % alignof(TestMeta) == 0);
566     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p1.fBlock->ptr(p1.fAlignedOffset)) % 16 == 0);
567     // Access fields to make sure 'meta' matches compilers expectations...
568     meta->fX1 = 2;
569     meta->fX2 = 5;
570 
571     // Repeat, but for metadata that has a larger alignment than the allocation
572     SkASSERT(alignof(TestMetaBig) == 32);
573     auto p2 = pool->allocate<alignof(TestMetaBig), sizeof(TestMetaBig)>(16);
574     SkDEBUGCODE(pool->validate();)
575 
576     REPORTER_ASSERT(r, p2.fAlignedOffset - p2.fStart >= (int) sizeof(TestMetaBig));
577     TestMetaBig* metaBig = static_cast<TestMetaBig*>(
578             p2.fBlock->ptr(p2.fAlignedOffset - sizeof(TestMetaBig)));
579     // Confirm alignment for both pointers
580     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(metaBig) % alignof(TestMetaBig) == 0);
581     REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p2.fBlock->ptr(p2.fAlignedOffset)) % 16 == 0);
582     // Access fields
583     metaBig->fX1 = 3;
584     metaBig->fX2 = 6;
585 
586     // Ensure metadata values persist after allocations
587     REPORTER_ASSERT(r, meta->fX1 == 2 && meta->fX2 == 5);
588     REPORTER_ASSERT(r, metaBig->fX1 == 3 && metaBig->fX2 == 6);
589 }
590 
DEF_TEST(GrBlockAllocatorAllocatorMetadata,r)591 DEF_TEST(GrBlockAllocatorAllocatorMetadata, r) {
592     GrSBlockAllocator<256> pool{};
593     SkDEBUGCODE(pool->validate();)
594 
595     REPORTER_ASSERT(r, pool->metadata() == 0); // initial value
596 
597     pool->setMetadata(4);
598     REPORTER_ASSERT(r, pool->metadata() == 4);
599 
600     // Releasing the head block doesn't change the allocator's metadata (even though that's where
601     // it is stored).
602     pool->releaseBlock(pool->headBlock());
603     REPORTER_ASSERT(r, pool->metadata() == 4);
604 
605     // But resetting the whole allocator brings things back to as if it were newly constructed
606     pool->reset();
607     REPORTER_ASSERT(r, pool->metadata() == 0);
608 }
609 
610 template<size_t Align, size_t Padding>
run_owning_block_test(skiatest::Reporter * r,GrBlockAllocator * pool)611 static void run_owning_block_test(skiatest::Reporter* r, GrBlockAllocator* pool) {
612     auto br = pool->allocate<Align, Padding>(1);
613 
614     void* userPtr = br.fBlock->ptr(br.fAlignedOffset);
615     void* metaPtr = br.fBlock->ptr(br.fAlignedOffset - Padding);
616 
617     Block* block = pool->owningBlock<Align, Padding>(userPtr, br.fStart);
618     REPORTER_ASSERT(r, block == br.fBlock);
619 
620     block = pool->owningBlock<Align>(metaPtr, br.fStart);
621     REPORTER_ASSERT(r, block == br.fBlock);
622 
623     block = reinterpret_cast<Block*>(reinterpret_cast<uintptr_t>(userPtr) - br.fAlignedOffset);
624     REPORTER_ASSERT(r, block == br.fBlock);
625 }
626 
627 template<size_t Padding>
run_owning_block_tests(skiatest::Reporter * r,GrBlockAllocator * pool)628 static void run_owning_block_tests(skiatest::Reporter* r, GrBlockAllocator* pool) {
629     run_owning_block_test<1, Padding>(r, pool);
630     run_owning_block_test<2, Padding>(r, pool);
631     run_owning_block_test<4, Padding>(r, pool);
632     run_owning_block_test<8, Padding>(r, pool);
633     run_owning_block_test<16, Padding>(r, pool);
634     run_owning_block_test<32, Padding>(r, pool);
635     run_owning_block_test<64, Padding>(r, pool);
636     run_owning_block_test<128, Padding>(r, pool);
637 }
638 
DEF_TEST(GrBlockAllocatorOwningBlock,r)639 DEF_TEST(GrBlockAllocatorOwningBlock, r) {
640     GrSBlockAllocator<1024> pool{};
641     SkDEBUGCODE(pool->validate();)
642 
643     run_owning_block_tests<1>(r, pool.allocator());
644     run_owning_block_tests<2>(r, pool.allocator());
645     run_owning_block_tests<4>(r, pool.allocator());
646     run_owning_block_tests<8>(r, pool.allocator());
647     run_owning_block_tests<16>(r, pool.allocator());
648     run_owning_block_tests<32>(r, pool.allocator());
649 
650     // And some weird numbers
651     run_owning_block_tests<3>(r, pool.allocator());
652     run_owning_block_tests<9>(r, pool.allocator());
653     run_owning_block_tests<17>(r, pool.allocator());
654 }
655