1 /*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/core/SkBlockAllocator.h"
9 #include "tests/Test.h"
10
11 #include <cstring>
12
13 using Block = SkBlockAllocator::Block;
14 using GrowthPolicy = SkBlockAllocator::GrowthPolicy;
15
16 class BlockAllocatorTestAccess {
17 public:
18 template<size_t N>
ScratchBlockSize(SkSBlockAllocator<N> & pool)19 static size_t ScratchBlockSize(SkSBlockAllocator<N>& pool) {
20 return (size_t) pool->scratchBlockSize();
21 }
22 };
23
24 // Helper functions for modifying the allocator in a controlled manner
25 template<size_t N>
block_count(const SkSBlockAllocator<N> & pool)26 static int block_count(const SkSBlockAllocator<N>& pool) {
27 int ct = 0;
28 for (const Block* b : pool->blocks()) {
29 (void) b;
30 ct++;
31 }
32 return ct;
33 }
34
35 template<size_t N>
get_block(SkSBlockAllocator<N> & pool,int blockIndex)36 static Block* get_block(SkSBlockAllocator<N>& pool, int blockIndex) {
37 Block* found = nullptr;
38 int i = 0;
39 for (Block* b: pool->blocks()) {
40 if (i == blockIndex) {
41 found = b;
42 break;
43 }
44 i++;
45 }
46
47 SkASSERT(found != nullptr);
48 return found;
49 }
50
51 // SkBlockAllocator holds on to the largest last-released block to reuse for new allocations,
52 // and this is still counted in its totalSize(). However, it's easier to reason about size - scratch
53 // in many of these tests.
54 template<size_t N>
total_size(SkSBlockAllocator<N> & pool)55 static size_t total_size(SkSBlockAllocator<N>& pool) {
56 return pool->totalSize() - BlockAllocatorTestAccess::ScratchBlockSize(pool);
57 }
58
59 template<size_t N>
add_block(SkSBlockAllocator<N> & pool)60 static size_t add_block(SkSBlockAllocator<N>& pool) {
61 size_t currentSize = total_size(pool);
62 SkBlockAllocator::Block* current = pool->currentBlock();
63 while(pool->currentBlock() == current) {
64 pool->template allocate<4>(pool->preallocSize() / 2);
65 }
66 return total_size(pool) - currentSize;
67 }
68
69 template<size_t N>
alloc_byte(SkSBlockAllocator<N> & pool)70 static void* alloc_byte(SkSBlockAllocator<N>& pool) {
71 auto br = pool->template allocate<1>(1);
72 return br.fBlock->ptr(br.fAlignedOffset);
73 }
74
DEF_TEST(SkBlockAllocatorPreallocSize,r)75 DEF_TEST(SkBlockAllocatorPreallocSize, r) {
76 // Tests stack/member initialization, option #1 described in doc
77 SkBlockAllocator stack{GrowthPolicy::kFixed, 2048};
78 SkDEBUGCODE(stack.validate();)
79
80 REPORTER_ASSERT(r, stack.preallocSize() == sizeof(SkBlockAllocator));
81 REPORTER_ASSERT(r, stack.preallocUsableSpace() == (size_t) stack.currentBlock()->avail());
82
83 // Tests placement new initialization to increase head block size, option #2
84 void* mem = operator new(1024);
85 SkBlockAllocator* placement = new (mem) SkBlockAllocator(GrowthPolicy::kLinear, 1024,
86 1024 - sizeof(SkBlockAllocator));
87 REPORTER_ASSERT(r, placement->preallocSize() == 1024);
88 REPORTER_ASSERT(r, placement->preallocUsableSpace() < 1024 &&
89 placement->preallocUsableSpace() >= (1024 - sizeof(SkBlockAllocator)));
90 placement->~SkBlockAllocator();
91 operator delete(mem);
92
93 // Tests inline increased preallocation, option #3
94 SkSBlockAllocator<2048> inlined{};
95 SkDEBUGCODE(inlined->validate();)
96 REPORTER_ASSERT(r, inlined->preallocSize() == 2048);
97 REPORTER_ASSERT(r, inlined->preallocUsableSpace() < 2048 &&
98 inlined->preallocUsableSpace() >= (2048 - sizeof(SkBlockAllocator)));
99 }
100
DEF_TEST(SkBlockAllocatorAlloc,r)101 DEF_TEST(SkBlockAllocatorAlloc, r) {
102 SkSBlockAllocator<1024> pool{};
103 SkDEBUGCODE(pool->validate();)
104
105 // Assumes the previous pointer was in the same block
106 auto validate_ptr = [&](int align, int size,
107 SkBlockAllocator::ByteRange br,
108 SkBlockAllocator::ByteRange* prevBR) {
109 uintptr_t pt = reinterpret_cast<uintptr_t>(br.fBlock->ptr(br.fAlignedOffset));
110 // Matches the requested align
111 REPORTER_ASSERT(r, pt % align == 0);
112 // And large enough
113 REPORTER_ASSERT(r, br.fEnd - br.fAlignedOffset >= size);
114 // And has enough padding for alignment
115 REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart >= 0);
116 REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart <= align - 1);
117 // And block of the returned struct is the current block of the allocator
118 REPORTER_ASSERT(r, pool->currentBlock() == br.fBlock);
119
120 // And make sure that we're past the required end of the previous allocation
121 if (prevBR) {
122 uintptr_t prevEnd =
123 reinterpret_cast<uintptr_t>(prevBR->fBlock->ptr(prevBR->fEnd - 1));
124 REPORTER_ASSERT(r, pt > prevEnd);
125 }
126
127 // And make sure that the entire byte range is safe to write into (excluding the dead space
128 // between "start" and "aligned offset," which is just padding and is left poisoned)
129 std::memset(br.fBlock->ptr(br.fAlignedOffset), 0xFF, br.fEnd - br.fAlignedOffset);
130 };
131
132 auto p1 = pool->allocate<1>(14);
133 validate_ptr(1, 14, p1, nullptr);
134
135 auto p2 = pool->allocate<2>(24);
136 validate_ptr(2, 24, p2, &p1);
137
138 auto p4 = pool->allocate<4>(28);
139 validate_ptr(4, 28, p4, &p2);
140
141 auto p8 = pool->allocate<8>(40);
142 validate_ptr(8, 40, p8, &p4);
143
144 auto p16 = pool->allocate<16>(64);
145 validate_ptr(16, 64, p16, &p8);
146
147 auto p32 = pool->allocate<32>(96);
148 validate_ptr(32, 96, p32, &p16);
149
150 // All of these allocations should be in the head block
151 REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
152 SkDEBUGCODE(pool->validate();)
153
154 // Requesting an allocation of avail() should not make a new block
155 size_t avail = pool->currentBlock()->avail<4>();
156 auto pAvail = pool->allocate<4>(avail);
157 validate_ptr(4, avail, pAvail, &p32);
158
159 // Remaining should be less than the alignment that was requested, and then
160 // the next allocation will make a new block
161 REPORTER_ASSERT(r, pool->currentBlock()->avail<4>() < 4);
162 auto pNextBlock = pool->allocate<4>(4);
163 validate_ptr(4, 4, pNextBlock, nullptr);
164 REPORTER_ASSERT(r, total_size(pool) > pool->preallocSize());
165
166 // Allocating more than avail() makes an another block
167 size_t currentSize = total_size(pool);
168 size_t bigRequest = pool->currentBlock()->avail<4>() * 2;
169 auto pTooBig = pool->allocate<4>(bigRequest);
170 validate_ptr(4, bigRequest, pTooBig, nullptr);
171 REPORTER_ASSERT(r, total_size(pool) > currentSize);
172
173 // Allocating more than the default growth policy (1024 in this case), will fulfill the request
174 REPORTER_ASSERT(r, total_size(pool) - currentSize < 4096);
175 currentSize = total_size(pool);
176 auto pReallyTooBig = pool->allocate<4>(4096);
177 validate_ptr(4, 4096, pReallyTooBig, nullptr);
178 REPORTER_ASSERT(r, total_size(pool) >= currentSize + 4096);
179 SkDEBUGCODE(pool->validate();)
180 }
181
DEF_TEST(SkBlockAllocatorResize,r)182 DEF_TEST(SkBlockAllocatorResize, r) {
183 SkSBlockAllocator<1024> pool{};
184 SkDEBUGCODE(pool->validate();)
185
186 // Fixed resize from 16 to 32
187 SkBlockAllocator::ByteRange p = pool->allocate<4>(16);
188 REPORTER_ASSERT(r, p.fBlock->avail<4>() > 16);
189 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, 16));
190 p.fEnd += 16;
191
192 std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x11, p.fEnd - p.fAlignedOffset);
193
194 // Subsequent allocation is 32 bytes ahead of 'p' now, and 'p' cannot be resized further.
195 auto pNext = pool->allocate<4>(16);
196 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
197 reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
198 REPORTER_ASSERT(r, p.fBlock == pNext.fBlock);
199 REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, 48));
200
201 // Confirm that releasing pNext allows 'p' to be resized, and that it can be resized up to avail
202 REPORTER_ASSERT(r, p.fBlock->release(pNext.fStart, pNext.fEnd));
203 int fillBlock = p.fBlock->avail<4>();
204 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
205 p.fEnd += fillBlock;
206
207 std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x22, p.fEnd - p.fAlignedOffset);
208
209 // Confirm that resizing when there's not enough room fails
210 REPORTER_ASSERT(r, p.fBlock->avail<4>() < fillBlock);
211 REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
212
213 // Confirm that we can shrink 'p' back to 32 bytes and then further allocate again
214 int shrinkTo32 = p.fStart - p.fEnd + 32;
215 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, shrinkTo32));
216 p.fEnd += shrinkTo32;
217 REPORTER_ASSERT(r, p.fEnd - p.fStart == 32);
218
219 std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x33, p.fEnd - p.fAlignedOffset);
220
221 pNext = pool->allocate<4>(16);
222 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
223 reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
224 SkDEBUGCODE(pool->validate();)
225
226 // Confirm that we can't shrink past the start of the allocation, but we can shrink it to 0
227 int shrinkTo0 = pNext.fStart - pNext.fEnd;
228 #ifndef SK_DEBUG
229 // Only test for false on release builds; a negative size should assert on debug builds
230 REPORTER_ASSERT(r, !pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0 - 1));
231 #endif
232 REPORTER_ASSERT(r, pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0));
233 }
234
DEF_TEST(SkBlockAllocatorRelease,r)235 DEF_TEST(SkBlockAllocatorRelease, r) {
236 SkSBlockAllocator<1024> pool{};
237 SkDEBUGCODE(pool->validate();)
238
239 // Successful allocate and release
240 auto p = pool->allocate<8>(32);
241 REPORTER_ASSERT(r, pool->currentBlock()->release(p.fStart, p.fEnd));
242 // Ensure the above release actually means the next allocation reuses the same space
243 auto p2 = pool->allocate<8>(32);
244 REPORTER_ASSERT(r, p.fStart == p2.fStart);
245
246 // Confirm that 'p2' cannot be released if another allocation came after it
247 auto p3 = pool->allocate<8>(64);
248 (void) p3;
249 REPORTER_ASSERT(r, !p2.fBlock->release(p2.fStart, p2.fEnd));
250
251 // Confirm that 'p4' can be released if 'p5' is released first, and confirm that 'p2' and 'p3'
252 // can be released simultaneously (equivalent to 'p3' then 'p2').
253 auto p4 = pool->allocate<8>(16);
254 auto p5 = pool->allocate<8>(96);
255 REPORTER_ASSERT(r, p5.fBlock->release(p5.fStart, p5.fEnd));
256 REPORTER_ASSERT(r, p4.fBlock->release(p4.fStart, p4.fEnd));
257 REPORTER_ASSERT(r, p2.fBlock->release(p2.fStart, p3.fEnd));
258
259 // And confirm that passing in the wrong size for the allocation fails
260 p = pool->allocate<8>(32);
261 REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd - 16));
262 REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd + 16));
263 REPORTER_ASSERT(r, p.fBlock->release(p.fStart, p.fEnd));
264 SkDEBUGCODE(pool->validate();)
265 }
266
DEF_TEST(SkBlockAllocatorRewind,r)267 DEF_TEST(SkBlockAllocatorRewind, r) {
268 // Confirm that a bunch of allocations and then releases in stack order fully goes back to the
269 // start of the block (i.e. unwinds the entire stack, and not just the last cursor position)
270 SkSBlockAllocator<1024> pool{};
271 SkDEBUGCODE(pool->validate();)
272
273 std::vector<SkBlockAllocator::ByteRange> ptrs;
274 ptrs.reserve(32); // silence clang-tidy performance warning
275 for (int i = 0; i < 32; ++i) {
276 ptrs.push_back(pool->allocate<4>(16));
277 }
278
279 // Release everything in reverse order
280 SkDEBUGCODE(pool->validate();)
281 for (int i = 31; i >= 0; --i) {
282 auto br = ptrs[i];
283 REPORTER_ASSERT(r, br.fBlock->release(br.fStart, br.fEnd));
284 }
285
286 // If correct, we've rewound all the way back to the start of the block, so a new allocation
287 // will have the same location as ptrs[0]
288 SkDEBUGCODE(pool->validate();)
289 REPORTER_ASSERT(r, pool->allocate<4>(16).fStart == ptrs[0].fStart);
290 }
291
DEF_TEST(SkBlockAllocatorGrowthPolicy,r)292 DEF_TEST(SkBlockAllocatorGrowthPolicy, r) {
293 static constexpr int kInitSize = 128;
294 static constexpr int kBlockCount = 5;
295 static constexpr size_t kExpectedSizes[SkBlockAllocator::kGrowthPolicyCount][kBlockCount] = {
296 // kFixed -> kInitSize per block
297 { kInitSize, kInitSize, kInitSize, kInitSize, kInitSize },
298 // kLinear -> (block ct + 1) * kInitSize for next block
299 { kInitSize, 2 * kInitSize, 3 * kInitSize, 4 * kInitSize, 5 * kInitSize },
300 // kFibonacci -> 1, 1, 2, 3, 5 * kInitSize for the blocks
301 { kInitSize, kInitSize, 2 * kInitSize, 3 * kInitSize, 5 * kInitSize },
302 // kExponential -> 1, 2, 4, 8, 16 * kInitSize for the blocks
303 { kInitSize, 2 * kInitSize, 4 * kInitSize, 8 * kInitSize, 16 * kInitSize },
304 };
305
306 for (int gp = 0; gp < SkBlockAllocator::kGrowthPolicyCount; ++gp) {
307 SkSBlockAllocator<kInitSize> pool{(GrowthPolicy) gp};
308 SkDEBUGCODE(pool->validate();)
309
310 REPORTER_ASSERT(r, kExpectedSizes[gp][0] == total_size(pool));
311 for (int i = 1; i < kBlockCount; ++i) {
312 REPORTER_ASSERT(r, kExpectedSizes[gp][i] == add_block(pool));
313 }
314
315 SkDEBUGCODE(pool->validate();)
316 }
317 }
318
DEF_TEST(SkBlockAllocatorReset,r)319 DEF_TEST(SkBlockAllocatorReset, r) {
320 static constexpr int kBlockIncrement = 1024;
321
322 SkSBlockAllocator<kBlockIncrement> pool{GrowthPolicy::kLinear};
323 SkDEBUGCODE(pool->validate();)
324
325 void* firstAlloc = alloc_byte(pool);
326
327 // Add several blocks
328 add_block(pool);
329 add_block(pool);
330 add_block(pool);
331 SkDEBUGCODE(pool->validate();)
332
333 REPORTER_ASSERT(r, block_count(pool) == 4); // 3 added plus the implicit head
334
335 get_block(pool, 0)->setMetadata(2);
336
337 // Reset and confirm that there's only one block, a new allocation matches 'firstAlloc' again,
338 // and new blocks are sized based on a reset growth policy.
339 pool->reset();
340 SkDEBUGCODE(pool->validate();)
341
342 REPORTER_ASSERT(r,block_count(pool) == 1);
343 REPORTER_ASSERT(r, pool->preallocSize() == pool->totalSize());
344 REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0);
345
346 REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
347 REPORTER_ASSERT(r, 2 * kBlockIncrement == add_block(pool));
348 REPORTER_ASSERT(r, 3 * kBlockIncrement == add_block(pool));
349 SkDEBUGCODE(pool->validate();)
350 }
351
DEF_TEST(SkBlockAllocatorReleaseBlock,r)352 DEF_TEST(SkBlockAllocatorReleaseBlock, r) {
353 // This loops over all growth policies to make sure that the incremental releases update the
354 // sequence correctly for each policy.
355 for (int gp = 0; gp < SkBlockAllocator::kGrowthPolicyCount; ++gp) {
356 SkSBlockAllocator<1024> pool{(GrowthPolicy) gp};
357 SkDEBUGCODE(pool->validate();)
358
359 void* firstAlloc = alloc_byte(pool);
360
361 size_t b1Size = total_size(pool);
362 size_t b2Size = add_block(pool);
363 size_t b3Size = add_block(pool);
364 size_t b4Size = add_block(pool);
365 SkDEBUGCODE(pool->validate();)
366
367 get_block(pool, 0)->setMetadata(1);
368 get_block(pool, 1)->setMetadata(2);
369 get_block(pool, 2)->setMetadata(3);
370 get_block(pool, 3)->setMetadata(4);
371
372 // Remove the 3 added blocks, but always remove the i = 1 to test intermediate removal (and
373 // on the last iteration, will test tail removal).
374 REPORTER_ASSERT(r, total_size(pool) == b1Size + b2Size + b3Size + b4Size);
375 pool->releaseBlock(get_block(pool, 1));
376 REPORTER_ASSERT(r, block_count(pool) == 3);
377 REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 3);
378 REPORTER_ASSERT(r, total_size(pool) == b1Size + b3Size + b4Size);
379
380 pool->releaseBlock(get_block(pool, 1));
381 REPORTER_ASSERT(r, block_count(pool) == 2);
382 REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 4);
383 REPORTER_ASSERT(r, total_size(pool) == b1Size + b4Size);
384
385 pool->releaseBlock(get_block(pool, 1));
386 REPORTER_ASSERT(r, block_count(pool) == 1);
387 REPORTER_ASSERT(r, total_size(pool) == b1Size);
388
389 // Since we're back to just the head block, if we add a new block, the growth policy should
390 // match the original sequence instead of continuing with "b5Size'"
391 pool->resetScratchSpace();
392 size_t size = add_block(pool);
393 REPORTER_ASSERT(r, size == b2Size);
394 pool->releaseBlock(get_block(pool, 1));
395
396 // Explicitly release the head block and confirm it's reset
397 pool->releaseBlock(get_block(pool, 0));
398 REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
399 REPORTER_ASSERT(r, block_count(pool) == 1);
400 REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
401 REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0); // metadata reset too
402
403 // Confirm that if we have > 1 block, but release the head block we can still access the
404 // others
405 add_block(pool);
406 add_block(pool);
407 pool->releaseBlock(get_block(pool, 0));
408 REPORTER_ASSERT(r, block_count(pool) == 3);
409 SkDEBUGCODE(pool->validate();)
410 }
411 }
412
DEF_TEST(SkBlockAllocatorIterateAndRelease,r)413 DEF_TEST(SkBlockAllocatorIterateAndRelease, r) {
414 SkSBlockAllocator<256> pool;
415
416 pool->headBlock()->setMetadata(1);
417 add_block(pool);
418 add_block(pool);
419 add_block(pool);
420
421 // Loop forward and release the blocks
422 int releaseCount = 0;
423 for (auto* b : pool->blocks()) {
424 pool->releaseBlock(b);
425 releaseCount++;
426 }
427 REPORTER_ASSERT(r, releaseCount == 4);
428 // pool should have just the head block, but was reset
429 REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
430 REPORTER_ASSERT(r, block_count(pool) == 1);
431
432 // Add more blocks
433 pool->headBlock()->setMetadata(1);
434 add_block(pool);
435 add_block(pool);
436 add_block(pool);
437
438 // Loop in reverse and release the blocks
439 releaseCount = 0;
440 for (auto* b : pool->rblocks()) {
441 pool->releaseBlock(b);
442 releaseCount++;
443 }
444 REPORTER_ASSERT(r, releaseCount == 4);
445 // pool should have just the head block, but was reset
446 REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
447 REPORTER_ASSERT(r, block_count(pool) == 1);
448 }
449
DEF_TEST(SkBlockAllocatorScratchBlockReserve,r)450 DEF_TEST(SkBlockAllocatorScratchBlockReserve, r) {
451 SkSBlockAllocator<256> pool;
452
453 size_t added = add_block(pool);
454 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
455 size_t total = pool->totalSize();
456 pool->releaseBlock(pool->currentBlock());
457
458 // Total size shouldn't have changed, the released block should become scratch
459 REPORTER_ASSERT(r, pool->totalSize() == total);
460 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == added);
461
462 // But a reset definitely deletes any scratch block
463 pool->reset();
464 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
465
466 // Reserving more than what's available adds a scratch block, and current block remains avail.
467 size_t avail = pool->currentBlock()->avail();
468 size_t reserve = avail + 1;
469 pool->reserve(reserve);
470 REPORTER_ASSERT(r, (size_t) pool->currentBlock()->avail() == avail);
471 // And rounds up to the fixed size of this pool's growth policy
472 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) >= reserve &&
473 BlockAllocatorTestAccess::ScratchBlockSize(pool) % 256 == 0);
474
475 // Allocating more than avail activates the scratch block (so totalSize doesn't change)
476 size_t preAllocTotalSize = pool->totalSize();
477 pool->allocate<1>(avail + 1);
478 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
479 REPORTER_ASSERT(r, pool->totalSize() == preAllocTotalSize);
480
481 // When reserving less than what's still available in the current block, no scratch block is
482 // added.
483 pool->reserve(pool->currentBlock()->avail());
484 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == 0);
485
486 // Unless checking available bytes is disabled
487 pool->reserve(pool->currentBlock()->avail(), SkBlockAllocator::kIgnoreExistingBytes_Flag);
488 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) > 0);
489
490 // If kIgnoreGrowthPolicy is specified, the new scratch block should not have been updated to
491 // follow the size (which in this case is a fixed 256 bytes per block).
492 pool->resetScratchSpace();
493 pool->reserve(32, SkBlockAllocator::kIgnoreGrowthPolicy_Flag);
494 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) > 0 &&
495 BlockAllocatorTestAccess::ScratchBlockSize(pool) < 256);
496
497 // When requesting an allocation larger than the current block and the scratch block, a new
498 // block is added, and the scratch block remains scratch.
499 SkBlockAllocator::Block* oldTail = pool->currentBlock();
500 avail = oldTail->avail();
501 size_t scratchAvail = 2 * avail;
502 pool->reserve(scratchAvail);
503 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) >= scratchAvail);
504
505 // This allocation request is higher than oldTail's available, and the scratch size so we
506 // should add a new block and scratch size should stay the same.
507 scratchAvail = BlockAllocatorTestAccess::ScratchBlockSize(pool);
508 pool->allocate<1>(scratchAvail + 1);
509 REPORTER_ASSERT(r, pool->currentBlock() != oldTail);
510 REPORTER_ASSERT(r, BlockAllocatorTestAccess::ScratchBlockSize(pool) == scratchAvail);
511 }
512
DEF_TEST(SkBlockAllocatorStealBlocks,r)513 DEF_TEST(SkBlockAllocatorStealBlocks, r) {
514 SkSBlockAllocator<256> poolA;
515 SkSBlockAllocator<128> poolB;
516
517 add_block(poolA);
518 add_block(poolA);
519 add_block(poolA);
520
521 add_block(poolB);
522 add_block(poolB);
523
524 char* bAlloc = (char*) alloc_byte(poolB);
525 *bAlloc = 't';
526
527 const SkBlockAllocator::Block* allocOwner = poolB->findOwningBlock(bAlloc);
528
529 REPORTER_ASSERT(r, block_count(poolA) == 4);
530 REPORTER_ASSERT(r, block_count(poolB) == 3);
531
532 size_t aSize = poolA->totalSize();
533 size_t bSize = poolB->totalSize();
534 size_t theftSize = bSize - poolB->preallocSize();
535
536 // This steal should move B's 2 heap blocks to A, bringing A to 6 and B to just its head
537 poolA->stealHeapBlocks(poolB.allocator());
538 REPORTER_ASSERT(r, block_count(poolA) == 6);
539 REPORTER_ASSERT(r, block_count(poolB) == 1);
540 REPORTER_ASSERT(r, poolB->preallocSize() == poolB->totalSize());
541 REPORTER_ASSERT(r, poolA->totalSize() == aSize + theftSize);
542
543 REPORTER_ASSERT(r, *bAlloc == 't');
544 REPORTER_ASSERT(r, (uintptr_t) poolA->findOwningBlock(bAlloc) == (uintptr_t) allocOwner);
545 REPORTER_ASSERT(r, !poolB->findOwningBlock(bAlloc));
546
547 // Redoing the steal now that B is just a head block should be a no-op
548 poolA->stealHeapBlocks(poolB.allocator());
549 REPORTER_ASSERT(r, block_count(poolA) == 6);
550 REPORTER_ASSERT(r, block_count(poolB) == 1);
551 }
552
553 // These tests ensure that the allocation padding mechanism works as intended
554 struct TestMeta {
555 int fX1;
556 int fX2;
557 };
558 struct alignas(32) TestMetaBig {
559 int fX1;
560 int fX2;
561 };
562
DEF_TEST(SkBlockAllocatorMetadata,r)563 DEF_TEST(SkBlockAllocatorMetadata, r) {
564 SkSBlockAllocator<1024> pool{};
565 SkDEBUGCODE(pool->validate();)
566
567 // Allocation where alignment of user data > alignment of metadata
568 SkASSERT(alignof(TestMeta) < 16);
569 auto p1 = pool->allocate<16, sizeof(TestMeta)>(16);
570 SkDEBUGCODE(pool->validate();)
571
572 REPORTER_ASSERT(r, p1.fAlignedOffset - p1.fStart >= (int) sizeof(TestMeta));
573 TestMeta* meta = static_cast<TestMeta*>(p1.fBlock->ptr(p1.fAlignedOffset - sizeof(TestMeta)));
574 // Confirm alignment for both pointers
575 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(meta) % alignof(TestMeta) == 0);
576 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p1.fBlock->ptr(p1.fAlignedOffset)) % 16 == 0);
577 // Access fields to make sure 'meta' matches compilers expectations...
578 meta->fX1 = 2;
579 meta->fX2 = 5;
580
581 // Repeat, but for metadata that has a larger alignment than the allocation
582 SkASSERT(alignof(TestMetaBig) == 32);
583 auto p2 = pool->allocate<alignof(TestMetaBig), sizeof(TestMetaBig)>(16);
584 SkDEBUGCODE(pool->validate();)
585
586 REPORTER_ASSERT(r, p2.fAlignedOffset - p2.fStart >= (int) sizeof(TestMetaBig));
587 TestMetaBig* metaBig = static_cast<TestMetaBig*>(
588 p2.fBlock->ptr(p2.fAlignedOffset - sizeof(TestMetaBig)));
589 // Confirm alignment for both pointers
590 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(metaBig) % alignof(TestMetaBig) == 0);
591 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p2.fBlock->ptr(p2.fAlignedOffset)) % 16 == 0);
592 // Access fields
593 metaBig->fX1 = 3;
594 metaBig->fX2 = 6;
595
596 // Ensure metadata values persist after allocations
597 REPORTER_ASSERT(r, meta->fX1 == 2 && meta->fX2 == 5);
598 REPORTER_ASSERT(r, metaBig->fX1 == 3 && metaBig->fX2 == 6);
599 }
600
DEF_TEST(SkBlockAllocatorAllocatorMetadata,r)601 DEF_TEST(SkBlockAllocatorAllocatorMetadata, r) {
602 SkSBlockAllocator<256> pool{};
603 SkDEBUGCODE(pool->validate();)
604
605 REPORTER_ASSERT(r, pool->metadata() == 0); // initial value
606
607 pool->setMetadata(4);
608 REPORTER_ASSERT(r, pool->metadata() == 4);
609
610 // Releasing the head block doesn't change the allocator's metadata (even though that's where
611 // it is stored).
612 pool->releaseBlock(pool->headBlock());
613 REPORTER_ASSERT(r, pool->metadata() == 4);
614
615 // But resetting the whole allocator brings things back to as if it were newly constructed
616 pool->reset();
617 REPORTER_ASSERT(r, pool->metadata() == 0);
618 }
619
620 template<size_t Align, size_t Padding>
run_owning_block_test(skiatest::Reporter * r,SkBlockAllocator * pool)621 static void run_owning_block_test(skiatest::Reporter* r, SkBlockAllocator* pool) {
622 auto br = pool->allocate<Align, Padding>(1);
623
624 void* userPtr = br.fBlock->ptr(br.fAlignedOffset);
625 void* metaPtr = br.fBlock->ptr(br.fAlignedOffset - Padding);
626
627 Block* block = pool->owningBlock<Align, Padding>(userPtr, br.fStart);
628 REPORTER_ASSERT(r, block == br.fBlock);
629
630 block = pool->owningBlock<Align>(metaPtr, br.fStart);
631 REPORTER_ASSERT(r, block == br.fBlock);
632
633 block = reinterpret_cast<Block*>(reinterpret_cast<uintptr_t>(userPtr) - br.fAlignedOffset);
634 REPORTER_ASSERT(r, block == br.fBlock);
635 }
636
637 template<size_t Padding>
run_owning_block_tests(skiatest::Reporter * r,SkBlockAllocator * pool)638 static void run_owning_block_tests(skiatest::Reporter* r, SkBlockAllocator* pool) {
639 run_owning_block_test<1, Padding>(r, pool);
640 run_owning_block_test<2, Padding>(r, pool);
641 run_owning_block_test<4, Padding>(r, pool);
642 run_owning_block_test<8, Padding>(r, pool);
643 run_owning_block_test<16, Padding>(r, pool);
644 run_owning_block_test<32, Padding>(r, pool);
645 run_owning_block_test<64, Padding>(r, pool);
646 run_owning_block_test<128, Padding>(r, pool);
647 }
648
DEF_TEST(SkBlockAllocatorOwningBlock,r)649 DEF_TEST(SkBlockAllocatorOwningBlock, r) {
650 SkSBlockAllocator<1024> pool{};
651 SkDEBUGCODE(pool->validate();)
652
653 run_owning_block_tests<1>(r, pool.allocator());
654 run_owning_block_tests<2>(r, pool.allocator());
655 run_owning_block_tests<4>(r, pool.allocator());
656 run_owning_block_tests<8>(r, pool.allocator());
657 run_owning_block_tests<16>(r, pool.allocator());
658 run_owning_block_tests<32>(r, pool.allocator());
659
660 // And some weird numbers
661 run_owning_block_tests<3>(r, pool.allocator());
662 run_owning_block_tests<9>(r, pool.allocator());
663 run_owning_block_tests<17>(r, pool.allocator());
664 }
665