• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "include/gpu/GrDirectContext.h"
9 #include "include/gpu/GrTypes.h"
10 #include "include/private/base/SkMacros.h"
11 #include "src/base/SkSafeMath.h"
12 #include "src/core/SkTraceEvent.h"
13 #include "src/gpu/ganesh/GrBufferAllocPool.h"
14 
15 #include <memory>
16 #include "src/gpu/ganesh/GrCaps.h"
17 #include "src/gpu/ganesh/GrCpuBuffer.h"
18 #include "src/gpu/ganesh/GrDirectContextPriv.h"
19 #include "src/gpu/ganesh/GrGpu.h"
20 #include "src/gpu/ganesh/GrGpuBuffer.h"
21 #include "src/gpu/ganesh/GrResourceProvider.h"
22 
Make(int maxBuffersToCache)23 sk_sp<GrBufferAllocPool::CpuBufferCache> GrBufferAllocPool::CpuBufferCache::Make(
24         int maxBuffersToCache) {
25     return sk_sp<CpuBufferCache>(new CpuBufferCache(maxBuffersToCache));
26 }
27 
CpuBufferCache(int maxBuffersToCache)28 GrBufferAllocPool::CpuBufferCache::CpuBufferCache(int maxBuffersToCache)
29         : fMaxBuffersToCache(maxBuffersToCache) {
30     if (fMaxBuffersToCache) {
31         fBuffers = std::make_unique<Buffer[]>(fMaxBuffersToCache);
32     }
33 }
34 
makeBuffer(size_t size,bool mustBeInitialized)35 sk_sp<GrCpuBuffer> GrBufferAllocPool::CpuBufferCache::makeBuffer(size_t size,
36                                                                  bool mustBeInitialized) {
37     SkASSERT(size > 0);
38     Buffer* result = nullptr;
39     if (size == kDefaultBufferSize) {
40         int i = 0;
41         for (; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
42             SkASSERT(fBuffers[i].fBuffer->size() == kDefaultBufferSize);
43             if (fBuffers[i].fBuffer->unique()) {
44                 result = &fBuffers[i];
45             }
46         }
47         if (!result && i < fMaxBuffersToCache) {
48             fBuffers[i].fBuffer = GrCpuBuffer::Make(size);
49             result = &fBuffers[i];
50         }
51     }
52     Buffer tempResult;
53     if (!result) {
54         tempResult.fBuffer = GrCpuBuffer::Make(size);
55         result = &tempResult;
56     }
57     if (mustBeInitialized && !result->fCleared) {
58         result->fCleared = true;
59         memset(result->fBuffer->data(), 0, result->fBuffer->size());
60     }
61     return result->fBuffer;
62 }
63 
releaseAll()64 void GrBufferAllocPool::CpuBufferCache::releaseAll() {
65     for (int i = 0; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
66         fBuffers[i].fBuffer.reset();
67         fBuffers[i].fCleared = false;
68     }
69 }
70 
71 //////////////////////////////////////////////////////////////////////////////
72 
73 #ifdef SK_DEBUG
74     #define VALIDATE validate
75 #else
VALIDATE(bool=false)76     static void VALIDATE(bool = false) {}
77 #endif
78 
79 #define UNMAP_BUFFER(block)                                                          \
80     do {                                                                             \
81         TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer",       \
82                              TRACE_EVENT_SCOPE_THREAD, "percent_unwritten",          \
83                              (float)((block).fBytesFree) / (block).fBuffer->size()); \
84         SkASSERT(!block.fBuffer->isCpuBuffer());                                     \
85         static_cast<GrGpuBuffer*>(block.fBuffer.get())->unmap();                     \
86     } while (false)
87 
GrBufferAllocPool(GrGpu * gpu,GrGpuBufferType bufferType,sk_sp<CpuBufferCache> cpuBufferCache)88 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType,
89                                      sk_sp<CpuBufferCache> cpuBufferCache)
90         : fBlocks(8)
91         , fCpuBufferCache(std::move(cpuBufferCache))
92         , fGpu(gpu)
93         , fBufferType(bufferType) {}
94 
deleteBlocks()95 void GrBufferAllocPool::deleteBlocks() {
96     if (fBlocks.size()) {
97         GrBuffer* buffer = fBlocks.back().fBuffer.get();
98         if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
99             UNMAP_BUFFER(fBlocks.back());
100         }
101     }
102     while (!fBlocks.empty()) {
103         this->destroyBlock();
104     }
105     SkASSERT(!fBufferPtr);
106 }
107 
~GrBufferAllocPool()108 GrBufferAllocPool::~GrBufferAllocPool() {
109     VALIDATE();
110     this->deleteBlocks();
111 }
112 
reset()113 void GrBufferAllocPool::reset() {
114     VALIDATE();
115     fBytesInUse = 0;
116     this->deleteBlocks();
117     this->resetCpuData(0);
118     VALIDATE();
119 }
120 
unmap()121 void GrBufferAllocPool::unmap() {
122     VALIDATE();
123 
124     if (fBufferPtr) {
125         BufferBlock& block = fBlocks.back();
126         GrBuffer* buffer = block.fBuffer.get();
127         if (!buffer->isCpuBuffer()) {
128             if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
129                 UNMAP_BUFFER(block);
130             } else {
131                 size_t flushSize = block.fBuffer->size() - block.fBytesFree;
132                 this->flushCpuData(fBlocks.back(), flushSize);
133             }
134         }
135         fBufferPtr = nullptr;
136     }
137     VALIDATE();
138 }
139 
140 #ifdef SK_DEBUG
validate(bool unusedBlockAllowed) const141 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
142     bool wasDestroyed = false;
143     if (fBufferPtr) {
144         SkASSERT(!fBlocks.empty());
145         const GrBuffer* buffer = fBlocks.back().fBuffer.get();
146         if (!buffer->isCpuBuffer() && !static_cast<const GrGpuBuffer*>(buffer)->isMapped()) {
147             SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
148         }
149     } else if (!fBlocks.empty()) {
150         const GrBuffer* buffer = fBlocks.back().fBuffer.get();
151         SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
152     }
153     size_t bytesInUse = 0;
154     for (int i = 0; i < fBlocks.size() - 1; ++i) {
155         const GrBuffer* buffer = fBlocks[i].fBuffer.get();
156         SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
157     }
158     for (int i = 0; !wasDestroyed && i < fBlocks.size(); ++i) {
159         GrBuffer* buffer = fBlocks[i].fBuffer.get();
160         if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->wasDestroyed()) {
161             wasDestroyed = true;
162         } else {
163             size_t bytes = fBlocks[i].fBuffer->size() - fBlocks[i].fBytesFree;
164             bytesInUse += bytes;
165             SkASSERT(bytes || unusedBlockAllowed);
166         }
167     }
168 
169     if (!wasDestroyed) {
170         SkASSERT(bytesInUse == fBytesInUse);
171         if (unusedBlockAllowed) {
172             SkASSERT((fBytesInUse && !fBlocks.empty()) ||
173                      (!fBytesInUse && (fBlocks.size() < 2)));
174         } else {
175             SkASSERT((0 == fBytesInUse) == fBlocks.empty());
176         }
177     }
178 }
179 #endif
180 
align_up_pad(size_t x,size_t alignment)181 static inline size_t align_up_pad(size_t x, size_t alignment) {
182     return (alignment - x % alignment) % alignment;
183 }
184 
align_down(size_t x,uint32_t alignment)185 static inline size_t align_down(size_t x, uint32_t alignment) {
186     return (x / alignment) * alignment;
187 }
188 
makeSpace(size_t size,size_t alignment,sk_sp<const GrBuffer> * buffer,size_t * offset)189 void* GrBufferAllocPool::makeSpace(size_t size,
190                                    size_t alignment,
191                                    sk_sp<const GrBuffer>* buffer,
192                                    size_t* offset) {
193     VALIDATE();
194 
195     SkASSERT(buffer);
196     SkASSERT(offset);
197 
198     if (fBufferPtr) {
199         BufferBlock& back = fBlocks.back();
200         size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
201         size_t pad = align_up_pad(usedBytes, alignment);
202         SkSafeMath safeMath;
203         size_t alignedSize = safeMath.add(pad, size);
204         if (!safeMath.ok()) {
205             return nullptr;
206         }
207         if (alignedSize <= back.fBytesFree) {
208             memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
209             usedBytes += pad;
210             *offset = usedBytes;
211             *buffer = back.fBuffer;
212             back.fBytesFree -= alignedSize;
213             fBytesInUse += alignedSize;
214             VALIDATE();
215             return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
216         }
217     }
218 
219     // We could honor the space request using by a partial update of the current
220     // VB (if there is room). But we don't currently use draw calls to GL that
221     // allow the driver to know that previously issued draws won't read from
222     // the part of the buffer we update. Also, when this was written the GL
223     // buffer implementation was cheating on the actual buffer size by shrinking
224     // the buffer in updateData() if the amount of data passed was less than
225     // the full buffer size. This is old code and both concerns may be obsolete.
226 
227     if (!this->createBlock(size)) {
228         return nullptr;
229     }
230     SkASSERT(fBufferPtr);
231 
232     *offset = 0;
233     BufferBlock& back = fBlocks.back();
234     *buffer = back.fBuffer;
235     back.fBytesFree -= size;
236     fBytesInUse += size;
237     VALIDATE();
238     return fBufferPtr;
239 }
240 
makeSpaceAtLeast(size_t minSize,size_t fallbackSize,size_t alignment,sk_sp<const GrBuffer> * buffer,size_t * offset,size_t * actualSize)241 void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
242                                           size_t fallbackSize,
243                                           size_t alignment,
244                                           sk_sp<const GrBuffer>* buffer,
245                                           size_t* offset,
246                                           size_t* actualSize) {
247     VALIDATE();
248 
249     SkASSERT(buffer);
250     SkASSERT(offset);
251     SkASSERT(actualSize);
252 
253     size_t usedBytes = (fBlocks.empty()) ? 0 : fBlocks.back().fBuffer->size() -
254                                                fBlocks.back().fBytesFree;
255     size_t pad = align_up_pad(usedBytes, alignment);
256     if (fBlocks.empty() || (minSize + pad) > fBlocks.back().fBytesFree) {
257         // We either don't have a block yet or the current block doesn't have enough free space.
258         // Create a new one.
259         if (!this->createBlock(fallbackSize)) {
260             return nullptr;
261         }
262         usedBytes = 0;
263         pad = 0;
264     }
265     SkASSERT(fBufferPtr);
266 
267     // Consume padding first, to make subsequent alignment math easier
268     memset(static_cast<char*>(fBufferPtr) + usedBytes, 0, pad);
269     usedBytes += pad;
270     fBlocks.back().fBytesFree -= pad;
271     fBytesInUse += pad;
272 
273     // Give caller all remaining space in this block (but aligned correctly)
274     size_t size = align_down(fBlocks.back().fBytesFree, alignment);
275     *offset = usedBytes;
276     *buffer = fBlocks.back().fBuffer;
277     *actualSize = size;
278     fBlocks.back().fBytesFree -= size;
279     fBytesInUse += size;
280     VALIDATE();
281     return static_cast<char*>(fBufferPtr) + usedBytes;
282 }
283 
putBack(size_t bytes)284 void GrBufferAllocPool::putBack(size_t bytes) {
285     VALIDATE();
286 
287     while (bytes) {
288         // caller shouldn't try to put back more than they've taken
289         SkASSERT(!fBlocks.empty());
290         BufferBlock& block = fBlocks.back();
291         size_t bytesUsed = block.fBuffer->size() - block.fBytesFree;
292         if (bytes >= bytesUsed) {
293             bytes -= bytesUsed;
294             fBytesInUse -= bytesUsed;
295             // if we locked a vb to satisfy the make space and we're releasing
296             // beyond it, then unmap it.
297             GrBuffer* buffer = block.fBuffer.get();
298             if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
299                 UNMAP_BUFFER(block);
300             }
301             this->destroyBlock();
302         } else {
303             block.fBytesFree += bytes;
304             fBytesInUse -= bytes;
305             bytes = 0;
306             break;
307         }
308     }
309 
310     VALIDATE();
311 }
312 
createBlock(size_t requestSize)313 bool GrBufferAllocPool::createBlock(size_t requestSize) {
314     size_t size = std::max(requestSize, kDefaultBufferSize);
315 
316     VALIDATE();
317 
318     BufferBlock& block = fBlocks.push_back();
319 
320     block.fBuffer = this->getBuffer(size);
321     if (!block.fBuffer) {
322         fBlocks.pop_back();
323         return false;
324     }
325 
326     block.fBytesFree = block.fBuffer->size();
327     if (fBufferPtr) {
328         SkASSERT(fBlocks.size() > 1);
329         BufferBlock& prev = fBlocks.fromBack(1);
330         GrBuffer* buffer = prev.fBuffer.get();
331         if (!buffer->isCpuBuffer()) {
332             if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
333                 UNMAP_BUFFER(prev);
334             } else {
335                 this->flushCpuData(prev, prev.fBuffer->size() - prev.fBytesFree);
336             }
337         }
338         fBufferPtr = nullptr;
339     }
340 
341     SkASSERT(!fBufferPtr);
342 
343     // If the buffer is CPU-backed we "map" it because it is free to do so and saves a copy.
344     // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
345     // threshold.
346     if (block.fBuffer->isCpuBuffer()) {
347         fBufferPtr = static_cast<GrCpuBuffer*>(block.fBuffer.get())->data();
348         SkASSERT(fBufferPtr);
349     } else {
350         if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
351             size > fGpu->caps()->bufferMapThreshold()) {
352             fBufferPtr = static_cast<GrGpuBuffer*>(block.fBuffer.get())->map();
353         }
354     }
355     if (!fBufferPtr) {
356         this->resetCpuData(block.fBytesFree);
357         fBufferPtr = fCpuStagingBuffer->data();
358     }
359 
360     VALIDATE(true);
361 
362     return true;
363 }
364 
destroyBlock()365 void GrBufferAllocPool::destroyBlock() {
366     SkASSERT(!fBlocks.empty());
367     SkASSERT(fBlocks.back().fBuffer->isCpuBuffer() ||
368              !static_cast<GrGpuBuffer*>(fBlocks.back().fBuffer.get())->isMapped());
369     fBlocks.pop_back();
370     fBufferPtr = nullptr;
371 }
372 
resetCpuData(size_t newSize)373 void GrBufferAllocPool::resetCpuData(size_t newSize) {
374     SkASSERT(newSize >= kDefaultBufferSize || !newSize);
375     if (!newSize) {
376         fCpuStagingBuffer.reset();
377         return;
378     }
379     if (fCpuStagingBuffer && newSize <= fCpuStagingBuffer->size()) {
380         return;
381     }
382     bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
383     fCpuStagingBuffer = fCpuBufferCache ? fCpuBufferCache->makeBuffer(newSize, mustInitialize)
384                                         : GrCpuBuffer::Make(newSize);
385 }
386 
flushCpuData(const BufferBlock & block,size_t flushSize)387 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
388     SkASSERT(block.fBuffer.get());
389     SkASSERT(!block.fBuffer.get()->isCpuBuffer());
390     GrGpuBuffer* buffer = static_cast<GrGpuBuffer*>(block.fBuffer.get());
391     SkASSERT(!buffer->isMapped());
392     SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
393     SkASSERT(flushSize <= buffer->size());
394     VALIDATE(true);
395 
396     if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
397         flushSize > fGpu->caps()->bufferMapThreshold()) {
398         void* data = buffer->map();
399         if (data) {
400             memcpy(data, fBufferPtr, flushSize);
401             UNMAP_BUFFER(block);
402             return;
403         }
404     }
405     buffer->updateData(fBufferPtr, /*offset=*/0, flushSize, /*preserve=*/false);
406     VALIDATE(true);
407 }
408 
getBuffer(size_t size)409 sk_sp<GrBuffer> GrBufferAllocPool::getBuffer(size_t size) {
410     const GrCaps& caps = *fGpu->caps();
411     auto resourceProvider = fGpu->getContext()->priv().resourceProvider();
412     if (caps.preferClientSideDynamicBuffers() ||
413         (fBufferType == GrGpuBufferType::kDrawIndirect && caps.useClientSideIndirectBuffers())) {
414         // Create a CPU buffer.
415         bool mustInitialize = caps.mustClearUploadedBufferData();
416         return fCpuBufferCache ? fCpuBufferCache->makeBuffer(size, mustInitialize)
417                                : GrCpuBuffer::Make(size);
418     }
419     return resourceProvider->createBuffer(size,
420                                           fBufferType,
421                                           kDynamic_GrAccessPattern,
422                                           GrResourceProvider::ZeroInit::kNo);
423 }
424 
425 ////////////////////////////////////////////////////////////////////////////////
426 
GrVertexBufferAllocPool(GrGpu * gpu,sk_sp<CpuBufferCache> cpuBufferCache)427 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
428         : GrBufferAllocPool(gpu, GrGpuBufferType::kVertex, std::move(cpuBufferCache)) {}
429 
makeSpace(size_t vertexSize,int vertexCount,sk_sp<const GrBuffer> * buffer,int * startVertex)430 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
431                                          int vertexCount,
432                                          sk_sp<const GrBuffer>* buffer,
433                                          int* startVertex) {
434     SkASSERT(vertexCount >= 0);
435     SkASSERT(buffer);
436     SkASSERT(startVertex);
437 
438     size_t offset SK_INIT_TO_AVOID_WARNING;
439     void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(vertexSize, vertexCount),
440                                      vertexSize,
441                                      buffer,
442                                      &offset);
443 
444     SkASSERT(0 == offset % vertexSize);
445     *startVertex = static_cast<int>(offset / vertexSize);
446     return ptr;
447 }
448 
makeSpaceAtLeast(size_t vertexSize,int minVertexCount,int fallbackVertexCount,sk_sp<const GrBuffer> * buffer,int * startVertex,int * actualVertexCount)449 void* GrVertexBufferAllocPool::makeSpaceAtLeast(size_t vertexSize, int minVertexCount,
450                                                 int fallbackVertexCount,
451                                                 sk_sp<const GrBuffer>* buffer, int* startVertex,
452                                                 int* actualVertexCount) {
453     SkASSERT(minVertexCount >= 0);
454     SkASSERT(fallbackVertexCount >= minVertexCount);
455     SkASSERT(buffer);
456     SkASSERT(startVertex);
457     SkASSERT(actualVertexCount);
458 
459     size_t offset SK_INIT_TO_AVOID_WARNING;
460     size_t actualSize SK_INIT_TO_AVOID_WARNING;
461     void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(vertexSize, minVertexCount),
462                                             SkSafeMath::Mul(vertexSize, fallbackVertexCount),
463                                             vertexSize,
464                                             buffer,
465                                             &offset,
466                                             &actualSize);
467 
468     SkASSERT(0 == offset % vertexSize);
469     *startVertex = static_cast<int>(offset / vertexSize);
470 
471     SkASSERT(0 == actualSize % vertexSize);
472     SkASSERT(actualSize >= vertexSize * minVertexCount);
473     *actualVertexCount = static_cast<int>(actualSize / vertexSize);
474 
475     return ptr;
476 }
477 
478 ////////////////////////////////////////////////////////////////////////////////
479 
GrIndexBufferAllocPool(GrGpu * gpu,sk_sp<CpuBufferCache> cpuBufferCache)480 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
481         : GrBufferAllocPool(gpu, GrGpuBufferType::kIndex, std::move(cpuBufferCache)) {}
482 
makeSpace(int indexCount,sk_sp<const GrBuffer> * buffer,int * startIndex)483 void* GrIndexBufferAllocPool::makeSpace(int indexCount, sk_sp<const GrBuffer>* buffer,
484                                         int* startIndex) {
485     SkASSERT(indexCount >= 0);
486     SkASSERT(buffer);
487     SkASSERT(startIndex);
488 
489     size_t offset SK_INIT_TO_AVOID_WARNING;
490     void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(indexCount, sizeof(uint16_t)),
491                                      sizeof(uint16_t),
492                                      buffer,
493                                      &offset);
494 
495     SkASSERT(0 == offset % sizeof(uint16_t));
496     *startIndex = static_cast<int>(offset / sizeof(uint16_t));
497     return ptr;
498 }
499 
makeSpaceAtLeast(int minIndexCount,int fallbackIndexCount,sk_sp<const GrBuffer> * buffer,int * startIndex,int * actualIndexCount)500 void* GrIndexBufferAllocPool::makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
501                                                sk_sp<const GrBuffer>* buffer, int* startIndex,
502                                                int* actualIndexCount) {
503     SkASSERT(minIndexCount >= 0);
504     SkASSERT(fallbackIndexCount >= minIndexCount);
505     SkASSERT(buffer);
506     SkASSERT(startIndex);
507     SkASSERT(actualIndexCount);
508 
509     size_t offset SK_INIT_TO_AVOID_WARNING;
510     size_t actualSize SK_INIT_TO_AVOID_WARNING;
511     void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(minIndexCount, sizeof(uint16_t)),
512                                             SkSafeMath::Mul(fallbackIndexCount, sizeof(uint16_t)),
513                                             sizeof(uint16_t),
514                                             buffer,
515                                             &offset,
516                                             &actualSize);
517 
518     SkASSERT(0 == offset % sizeof(uint16_t));
519     *startIndex = static_cast<int>(offset / sizeof(uint16_t));
520 
521     SkASSERT(0 == actualSize % sizeof(uint16_t));
522     SkASSERT(actualSize >= minIndexCount * sizeof(uint16_t));
523     *actualIndexCount = static_cast<int>(actualSize / sizeof(uint16_t));
524     return ptr;
525 }
526