• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrBlockAllocator.h"
9 
10 #ifdef SK_DEBUG
11 #include <vector>
12 #endif
13 
GrBlockAllocator(GrowthPolicy policy,size_t blockIncrementBytes,size_t additionalPreallocBytes)14 GrBlockAllocator::GrBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
15                                    size_t additionalPreallocBytes)
16         : fTail(&fHead)
17         // Round up to the nearest max-aligned value, and then divide so that fBlockSizeIncrement
18         // can effectively fit higher byte counts in its 16 bits of storage
19         , fBlockIncrement(SkTo<uint16_t>(GrAlignTo(blockIncrementBytes, kAddressAlign)
20                                                 / kAddressAlign))
21         , fGrowthPolicy(static_cast<uint64_t>(policy))
22         , fN0((policy == GrowthPolicy::kLinear || policy == GrowthPolicy::kExponential) ? 1 : 0)
23         , fN1(1)
24         // The head block always fills remaining space from GrBlockAllocator's size, because it's
25         // inline, but can take over the specified number of bytes immediately after it.
26         , fHead(/*prev=*/nullptr, additionalPreallocBytes + BaseHeadBlockSize()) {
27     SkASSERT(fBlockIncrement >= 1);
28     SkASSERT(additionalPreallocBytes <= kMaxAllocationSize);
29 }
30 
Block(Block * prev,int allocationSize)31 GrBlockAllocator::Block::Block(Block* prev, int allocationSize)
32          : fNext(nullptr)
33          , fPrev(prev)
34          , fSize(allocationSize)
35          , fCursor(kDataStart)
36          , fMetadata(0)
37          , fAllocatorMetadata(0) {
38     SkASSERT(allocationSize >= (int) sizeof(Block));
39     SkDEBUGCODE(fSentinel = kAssignedMarker;)
40 
41     this->poisonRange(kDataStart, fSize);
42 }
43 
~Block()44 GrBlockAllocator::Block::~Block() {
45     this->unpoisonRange(kDataStart, fSize);
46 
47     SkASSERT(fSentinel == kAssignedMarker);
48     SkDEBUGCODE(fSentinel = kFreedMarker;) // FWIW
49 }
50 
totalSize() const51 size_t GrBlockAllocator::totalSize() const {
52     // Use size_t since the sum across all blocks could exceed 'int', even though each block won't
53     size_t size = offsetof(GrBlockAllocator, fHead) + this->scratchBlockSize();
54     for (const Block* b : this->blocks()) {
55         size += b->fSize;
56     }
57     SkASSERT(size >= this->preallocSize());
58     return size;
59 }
60 
totalUsableSpace() const61 size_t GrBlockAllocator::totalUsableSpace() const {
62     size_t size = this->scratchBlockSize();
63     if (size > 0) {
64         size -= kDataStart; // scratchBlockSize reports total block size, not usable size
65     }
66     for (const Block* b : this->blocks()) {
67         size += (b->fSize - kDataStart);
68     }
69     SkASSERT(size >= this->preallocUsableSpace());
70     return size;
71 }
72 
totalSpaceInUse() const73 size_t GrBlockAllocator::totalSpaceInUse() const {
74     size_t size = 0;
75     for (const Block* b : this->blocks()) {
76         size += (b->fCursor - kDataStart);
77     }
78     SkASSERT(size <= this->totalUsableSpace());
79     return size;
80 }
81 
findOwningBlock(const void * p)82 GrBlockAllocator::Block* GrBlockAllocator::findOwningBlock(const void* p) {
83     // When in doubt, search in reverse to find an overlapping block.
84     uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
85     for (Block* b : this->rblocks()) {
86         uintptr_t lowerBound = reinterpret_cast<uintptr_t>(b) + kDataStart;
87         uintptr_t upperBound = reinterpret_cast<uintptr_t>(b) + b->fSize;
88         if (lowerBound <= ptr && ptr < upperBound) {
89             SkASSERT(b->fSentinel == kAssignedMarker);
90             return b;
91         }
92     }
93     return nullptr;
94 }
95 
releaseBlock(Block * block)96 void GrBlockAllocator::releaseBlock(Block* block) {
97      if (block == &fHead) {
98         // Reset the cursor of the head block so that it can be reused if it becomes the new tail
99         block->fCursor = kDataStart;
100         block->fMetadata = 0;
101         block->poisonRange(kDataStart, block->fSize);
102         // Unlike in reset(), we don't set the head's next block to null because there are
103         // potentially heap-allocated blocks that are still connected to it.
104     } else {
105         SkASSERT(block->fPrev);
106         block->fPrev->fNext = block->fNext;
107         if (block->fNext) {
108             SkASSERT(fTail != block);
109             block->fNext->fPrev = block->fPrev;
110         } else {
111             SkASSERT(fTail == block);
112             fTail = block->fPrev;
113         }
114 
115         // The released block becomes the new scratch block (if it's bigger), or delete it
116         if (this->scratchBlockSize() < block->fSize) {
117             SkASSERT(block != fHead.fPrev); // shouldn't already be the scratch block
118             if (fHead.fPrev) {
119                 delete fHead.fPrev;
120             }
121             block->markAsScratch();
122             fHead.fPrev = block;
123         } else {
124             delete block;
125         }
126     }
127 
128     // Decrement growth policy (opposite of addBlock()'s increment operations)
129     GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
130     if (fN0 > 0 && (fN1 > 1 || gp == GrowthPolicy::kFibonacci)) {
131         SkASSERT(gp != GrowthPolicy::kFixed); // fixed never needs undoing, fN0 always is 0
132         if (gp == GrowthPolicy::kLinear) {
133             fN1 = fN1 - fN0;
134         } else if (gp == GrowthPolicy::kFibonacci) {
135             // Subtract n0 from n1 to get the prior 2 terms in the fibonacci sequence
136             int temp = fN1 - fN0; // yields prior fN0
137             fN1 = fN1 - temp;     // yields prior fN1
138             fN0 = temp;
139         } else {
140             SkASSERT(gp == GrowthPolicy::kExponential);
141             // Divide by 2 to undo the 2N update from addBlock
142             fN1 = fN1 >> 1;
143             fN0 = fN1;
144         }
145     }
146 
147     SkASSERT(fN1 >= 1 && fN0 >= 0);
148 }
149 
stealHeapBlocks(GrBlockAllocator * other)150 void GrBlockAllocator::stealHeapBlocks(GrBlockAllocator* other) {
151     Block* toSteal = other->fHead.fNext;
152     if (toSteal) {
153         // The other's next block connects back to this allocator's current tail, and its new tail
154         // becomes the end of other's block linked list.
155         SkASSERT(other->fTail != &other->fHead);
156         toSteal->fPrev = fTail;
157         fTail->fNext = toSteal;
158         fTail = other->fTail;
159         // The other allocator becomes just its inline head block
160         other->fTail = &other->fHead;
161         other->fHead.fNext = nullptr;
162     } // else no block to steal
163 }
164 
reset()165 void GrBlockAllocator::reset() {
166     for (Block* b : this->rblocks()) {
167         if (b == &fHead) {
168             // Reset metadata and cursor, tail points to the head block again
169             fTail = b;
170             b->fNext = nullptr;
171             b->fCursor = kDataStart;
172             b->fMetadata = 0;
173             // For reset(), but NOT releaseBlock(), the head allocatorMetadata and scratch block
174             // are reset/destroyed.
175             b->fAllocatorMetadata = 0;
176             b->poisonRange(kDataStart, b->fSize);
177             this->resetScratchSpace();
178         } else {
179             delete b;
180         }
181     }
182     SkASSERT(fTail == &fHead && fHead.fNext == nullptr && fHead.fPrev == nullptr &&
183              fHead.metadata() == 0 && fHead.fCursor == kDataStart);
184 
185     GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
186     fN0 = (gp == GrowthPolicy::kLinear || gp == GrowthPolicy::kExponential) ? 1 : 0;
187     fN1 = 1;
188 }
189 
resetScratchSpace()190 void GrBlockAllocator::resetScratchSpace() {
191     if (fHead.fPrev) {
192         delete fHead.fPrev;
193         fHead.fPrev = nullptr;
194     }
195 }
196 
addBlock(int minimumSize,int maxSize)197 void GrBlockAllocator::addBlock(int minimumSize, int maxSize) {
198     SkASSERT(minimumSize > (int) sizeof(Block) && minimumSize <= maxSize);
199 
200     // Max positive value for uint:23 storage (decltype(fN0) picks up uint64_t, not uint:23).
201     static constexpr int kMaxN = (1 << 23) - 1;
202     static_assert(2 * kMaxN <= std::numeric_limits<int32_t>::max()); // Growth policy won't overflow
203 
204     auto alignAllocSize = [](int size) {
205         // Round to a nice boundary since the block isn't maxing out:
206         //   if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
207         //   nicely with jeMalloc (from SkArenaAlloc).
208         int mask = size > (1 << 15) ? ((1 << 12) - 1) : (kAddressAlign - 1);
209         return (size + mask) & ~mask;
210     };
211 
212     int allocSize;
213     void* mem = nullptr;
214     if (this->scratchBlockSize() >= minimumSize) {
215         // Activate the scratch block instead of making a new block
216         SkASSERT(fHead.fPrev->isScratch());
217         allocSize = fHead.fPrev->fSize;
218         mem = fHead.fPrev;
219         fHead.fPrev = nullptr;
220     } else if (minimumSize < maxSize) {
221         // Calculate the 'next' size per growth policy sequence
222         GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
223         int nextN1 = fN0 + fN1;
224         int nextN0;
225         if (gp == GrowthPolicy::kFixed || gp == GrowthPolicy::kLinear) {
226             nextN0 = fN0;
227         } else if (gp == GrowthPolicy::kFibonacci) {
228             nextN0 = fN1;
229         } else {
230             SkASSERT(gp == GrowthPolicy::kExponential);
231             nextN0 = nextN1;
232         }
233         fN0 = std::min(kMaxN, nextN0);
234         fN1 = std::min(kMaxN, nextN1);
235 
236         // However, must guard against overflow here, since all the size-based asserts prevented
237         // alignment/addition overflows, while multiplication requires 2x bits instead of x+1.
238         int sizeIncrement = fBlockIncrement * kAddressAlign;
239         if (maxSize / sizeIncrement < nextN1) {
240             // The growth policy would overflow, so use the max. We've already confirmed that
241             // maxSize will be sufficient for the requested minimumSize
242             allocSize = maxSize;
243         } else {
244             allocSize = std::min(alignAllocSize(std::max(minimumSize, sizeIncrement * nextN1)),
245                                  maxSize);
246         }
247     } else {
248         SkASSERT(minimumSize == maxSize);
249         // Still align on a nice boundary, no max clamping since that would just undo the alignment
250         allocSize = alignAllocSize(minimumSize);
251     }
252 
253     // Create new block and append to the linked list of blocks in this allocator
254     if (!mem) {
255         mem = operator new(allocSize);
256     }
257     fTail->fNext = new (mem) Block(fTail, allocSize);
258     fTail = fTail->fNext;
259 }
260 
261 #ifdef SK_DEBUG
validate() const262 void GrBlockAllocator::validate() const {
263     std::vector<const Block*> blocks;
264     const Block* prev = nullptr;
265     for (const Block* block : this->blocks()) {
266         blocks.push_back(block);
267 
268         SkASSERT(kAssignedMarker == block->fSentinel);
269         if (block == &fHead) {
270             // The head blocks' fPrev may be non-null if it holds a scratch block, but that's not
271             // considered part of the linked list
272             SkASSERT(!prev && (!fHead.fPrev || fHead.fPrev->isScratch()));
273         } else {
274             SkASSERT(prev == block->fPrev);
275         }
276         if (prev) {
277             SkASSERT(prev->fNext == block);
278         }
279 
280         SkASSERT(block->fSize >= (int) sizeof(Block));
281         SkASSERT(block->fCursor >= kDataStart);
282         SkASSERT(block->fCursor <= block->fSize);
283 
284         prev = block;
285     }
286     SkASSERT(prev == fTail);
287     SkASSERT(blocks.size() > 0);
288     SkASSERT(blocks[0] == &fHead);
289 
290     // Confirm reverse iteration matches forward iteration
291     size_t j = blocks.size();
292     for (const Block* b : this->rblocks()) {
293         SkASSERT(b == blocks[j - 1]);
294         j--;
295     }
296     SkASSERT(j == 0);
297 }
298 #endif
299