• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "include/gpu/GrContext.h"
9 #include "include/gpu/GrTypes.h"
10 #include "include/private/SkMacros.h"
11 #include "src/core/SkSafeMath.h"
12 #include "src/core/SkTraceEvent.h"
13 #include "src/gpu/GrBufferAllocPool.h"
14 #include "src/gpu/GrCaps.h"
15 #include "src/gpu/GrContextPriv.h"
16 #include "src/gpu/GrCpuBuffer.h"
17 #include "src/gpu/GrGpu.h"
18 #include "src/gpu/GrGpuBuffer.h"
19 #include "src/gpu/GrResourceProvider.h"
20 
Make(int maxBuffersToCache)21 sk_sp<GrBufferAllocPool::CpuBufferCache> GrBufferAllocPool::CpuBufferCache::Make(
22         int maxBuffersToCache) {
23     return sk_sp<CpuBufferCache>(new CpuBufferCache(maxBuffersToCache));
24 }
25 
CpuBufferCache(int maxBuffersToCache)26 GrBufferAllocPool::CpuBufferCache::CpuBufferCache(int maxBuffersToCache)
27         : fMaxBuffersToCache(maxBuffersToCache) {
28     if (fMaxBuffersToCache) {
29         fBuffers.reset(new Buffer[fMaxBuffersToCache]);
30     }
31 }
32 
makeBuffer(size_t size,bool mustBeInitialized)33 sk_sp<GrCpuBuffer> GrBufferAllocPool::CpuBufferCache::makeBuffer(size_t size,
34                                                                  bool mustBeInitialized) {
35     SkASSERT(size > 0);
36     Buffer* result = nullptr;
37     if (size == kDefaultBufferSize) {
38         int i = 0;
39         for (; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
40             SkASSERT(fBuffers[i].fBuffer->size() == kDefaultBufferSize);
41             if (fBuffers[i].fBuffer->unique()) {
42                 result = &fBuffers[i];
43             }
44         }
45         if (!result && i < fMaxBuffersToCache) {
46             fBuffers[i].fBuffer = GrCpuBuffer::Make(size);
47             result = &fBuffers[i];
48         }
49     }
50     Buffer tempResult;
51     if (!result) {
52         tempResult.fBuffer = GrCpuBuffer::Make(size);
53         result = &tempResult;
54     }
55     if (mustBeInitialized && !result->fCleared) {
56         result->fCleared = true;
57         memset(result->fBuffer->data(), 0, result->fBuffer->size());
58     }
59     return result->fBuffer;
60 }
61 
releaseAll()62 void GrBufferAllocPool::CpuBufferCache::releaseAll() {
63     for (int i = 0; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
64         fBuffers[i].fBuffer.reset();
65         fBuffers[i].fCleared = false;
66     }
67 }
68 
69 //////////////////////////////////////////////////////////////////////////////
70 
71 #ifdef SK_DEBUG
72     #define VALIDATE validate
73 #else
VALIDATE(bool=false)74     static void VALIDATE(bool = false) {}
75 #endif
76 
77 #define UNMAP_BUFFER(block)                                                          \
78     do {                                                                             \
79         TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer",       \
80                              TRACE_EVENT_SCOPE_THREAD, "percent_unwritten",          \
81                              (float)((block).fBytesFree) / (block).fBuffer->size()); \
82         SkASSERT(!block.fBuffer->isCpuBuffer());                                     \
83         static_cast<GrGpuBuffer*>(block.fBuffer.get())->unmap();                     \
84     } while (false)
85 
86 constexpr size_t GrBufferAllocPool::kDefaultBufferSize;
87 
GrBufferAllocPool(GrGpu * gpu,GrGpuBufferType bufferType,sk_sp<CpuBufferCache> cpuBufferCache)88 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType,
89                                      sk_sp<CpuBufferCache> cpuBufferCache)
90         : fBlocks(8)
91         , fCpuBufferCache(std::move(cpuBufferCache))
92         , fGpu(gpu)
93         , fBufferType(bufferType) {}
94 
deleteBlocks()95 void GrBufferAllocPool::deleteBlocks() {
96     if (fBlocks.count()) {
97         GrBuffer* buffer = fBlocks.back().fBuffer.get();
98         if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
99             UNMAP_BUFFER(fBlocks.back());
100         }
101     }
102     while (!fBlocks.empty()) {
103         this->destroyBlock();
104     }
105     SkASSERT(!fBufferPtr);
106 }
107 
~GrBufferAllocPool()108 GrBufferAllocPool::~GrBufferAllocPool() {
109     VALIDATE();
110     this->deleteBlocks();
111 }
112 
reset()113 void GrBufferAllocPool::reset() {
114     VALIDATE();
115     fBytesInUse = 0;
116     this->deleteBlocks();
117     this->resetCpuData(0);
118     VALIDATE();
119 }
120 
unmap()121 void GrBufferAllocPool::unmap() {
122     VALIDATE();
123 
124     if (fBufferPtr) {
125         BufferBlock& block = fBlocks.back();
126         GrBuffer* buffer = block.fBuffer.get();
127         if (!buffer->isCpuBuffer()) {
128             if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
129                 UNMAP_BUFFER(block);
130             } else {
131                 size_t flushSize = block.fBuffer->size() - block.fBytesFree;
132                 this->flushCpuData(fBlocks.back(), flushSize);
133             }
134         }
135         fBufferPtr = nullptr;
136     }
137     VALIDATE();
138 }
139 
140 #ifdef SK_DEBUG
validate(bool unusedBlockAllowed) const141 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
142     bool wasDestroyed = false;
143     if (fBufferPtr) {
144         SkASSERT(!fBlocks.empty());
145         const GrBuffer* buffer = fBlocks.back().fBuffer.get();
146         if (!buffer->isCpuBuffer() && !static_cast<const GrGpuBuffer*>(buffer)->isMapped()) {
147             SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
148         }
149     } else if (!fBlocks.empty()) {
150         const GrBuffer* buffer = fBlocks.back().fBuffer.get();
151         SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
152     }
153     size_t bytesInUse = 0;
154     for (int i = 0; i < fBlocks.count() - 1; ++i) {
155         const GrBuffer* buffer = fBlocks[i].fBuffer.get();
156         SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
157     }
158     for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
159         GrBuffer* buffer = fBlocks[i].fBuffer.get();
160         if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->wasDestroyed()) {
161             wasDestroyed = true;
162         } else {
163             size_t bytes = fBlocks[i].fBuffer->size() - fBlocks[i].fBytesFree;
164             bytesInUse += bytes;
165             SkASSERT(bytes || unusedBlockAllowed);
166         }
167     }
168 
169     if (!wasDestroyed) {
170         SkASSERT(bytesInUse == fBytesInUse);
171         if (unusedBlockAllowed) {
172             SkASSERT((fBytesInUse && !fBlocks.empty()) ||
173                      (!fBytesInUse && (fBlocks.count() < 2)));
174         } else {
175             SkASSERT((0 == fBytesInUse) == fBlocks.empty());
176         }
177     }
178 }
179 #endif
180 
makeSpace(size_t size,size_t alignment,sk_sp<const GrBuffer> * buffer,size_t * offset)181 void* GrBufferAllocPool::makeSpace(size_t size,
182                                    size_t alignment,
183                                    sk_sp<const GrBuffer>* buffer,
184                                    size_t* offset) {
185     VALIDATE();
186 
187     SkASSERT(buffer);
188     SkASSERT(offset);
189 
190     if (fBufferPtr) {
191         BufferBlock& back = fBlocks.back();
192         size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
193         size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
194         SkSafeMath safeMath;
195         size_t alignedSize = safeMath.add(pad, size);
196         if (!safeMath.ok()) {
197             return nullptr;
198         }
199         if (alignedSize <= back.fBytesFree) {
200             memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
201             usedBytes += pad;
202             *offset = usedBytes;
203             *buffer = back.fBuffer;
204             back.fBytesFree -= alignedSize;
205             fBytesInUse += alignedSize;
206             VALIDATE();
207             return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
208         }
209     }
210 
211     // We could honor the space request using by a partial update of the current
212     // VB (if there is room). But we don't currently use draw calls to GL that
213     // allow the driver to know that previously issued draws won't read from
214     // the part of the buffer we update. Also, the GL buffer implementation
215     // may be cheating on the actual buffer size by shrinking the buffer on
216     // updateData() if the amount of data passed is less than the full buffer
217     // size.
218 
219     if (!this->createBlock(size)) {
220         return nullptr;
221     }
222     SkASSERT(fBufferPtr);
223 
224     *offset = 0;
225     BufferBlock& back = fBlocks.back();
226     *buffer = back.fBuffer;
227     back.fBytesFree -= size;
228     fBytesInUse += size;
229     VALIDATE();
230     return fBufferPtr;
231 }
232 
makeSpaceAtLeast(size_t minSize,size_t fallbackSize,size_t alignment,sk_sp<const GrBuffer> * buffer,size_t * offset,size_t * actualSize)233 void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
234                                           size_t fallbackSize,
235                                           size_t alignment,
236                                           sk_sp<const GrBuffer>* buffer,
237                                           size_t* offset,
238                                           size_t* actualSize) {
239     VALIDATE();
240 
241     SkASSERT(buffer);
242     SkASSERT(offset);
243     SkASSERT(actualSize);
244 
245     if (fBufferPtr) {
246         BufferBlock& back = fBlocks.back();
247         size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
248         size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
249         if ((minSize + pad) <= back.fBytesFree) {
250             // Consume padding first, to make subsequent alignment math easier
251             memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
252             usedBytes += pad;
253             back.fBytesFree -= pad;
254             fBytesInUse += pad;
255 
256             // Give caller all remaining space in this block up to fallbackSize (but aligned
257             // correctly)
258             size_t size;
259             if (back.fBytesFree >= fallbackSize) {
260                 SkASSERT(GrSizeAlignDown(fallbackSize, alignment) == fallbackSize);
261                 size = fallbackSize;
262             } else {
263                 size = GrSizeAlignDown(back.fBytesFree, alignment);
264             }
265             *offset = usedBytes;
266             *buffer = back.fBuffer;
267             *actualSize = size;
268             back.fBytesFree -= size;
269             fBytesInUse += size;
270             VALIDATE();
271             return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
272         }
273     }
274 
275     // We could honor the space request using by a partial update of the current
276     // VB (if there is room). But we don't currently use draw calls to GL that
277     // allow the driver to know that previously issued draws won't read from
278     // the part of the buffer we update. Also, the GL buffer implementation
279     // may be cheating on the actual buffer size by shrinking the buffer on
280     // updateData() if the amount of data passed is less than the full buffer
281     // size.
282 
283     if (!this->createBlock(fallbackSize)) {
284         return nullptr;
285     }
286     SkASSERT(fBufferPtr);
287 
288     *offset = 0;
289     BufferBlock& back = fBlocks.back();
290     *buffer = back.fBuffer;
291     *actualSize = fallbackSize;
292     back.fBytesFree -= fallbackSize;
293     fBytesInUse += fallbackSize;
294     VALIDATE();
295     return fBufferPtr;
296 }
297 
putBack(size_t bytes)298 void GrBufferAllocPool::putBack(size_t bytes) {
299     VALIDATE();
300 
301     while (bytes) {
302         // caller shouldn't try to put back more than they've taken
303         SkASSERT(!fBlocks.empty());
304         BufferBlock& block = fBlocks.back();
305         size_t bytesUsed = block.fBuffer->size() - block.fBytesFree;
306         if (bytes >= bytesUsed) {
307             bytes -= bytesUsed;
308             fBytesInUse -= bytesUsed;
309             // if we locked a vb to satisfy the make space and we're releasing
310             // beyond it, then unmap it.
311             GrBuffer* buffer = block.fBuffer.get();
312             if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
313                 UNMAP_BUFFER(block);
314             }
315             this->destroyBlock();
316         } else {
317             block.fBytesFree += bytes;
318             fBytesInUse -= bytes;
319             bytes = 0;
320             break;
321         }
322     }
323 
324     VALIDATE();
325 }
326 
createBlock(size_t requestSize)327 bool GrBufferAllocPool::createBlock(size_t requestSize) {
328     size_t size = SkTMax(requestSize, kDefaultBufferSize);
329 
330     VALIDATE();
331 
332     BufferBlock& block = fBlocks.push_back();
333 
334     block.fBuffer = this->getBuffer(size);
335     if (!block.fBuffer) {
336         fBlocks.pop_back();
337         return false;
338     }
339 
340     block.fBytesFree = block.fBuffer->size();
341     if (fBufferPtr) {
342         SkASSERT(fBlocks.count() > 1);
343         BufferBlock& prev = fBlocks.fromBack(1);
344         GrBuffer* buffer = prev.fBuffer.get();
345         if (!buffer->isCpuBuffer()) {
346             if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
347                 UNMAP_BUFFER(prev);
348             } else {
349                 this->flushCpuData(prev, prev.fBuffer->size() - prev.fBytesFree);
350             }
351         }
352         fBufferPtr = nullptr;
353     }
354 
355     SkASSERT(!fBufferPtr);
356 
357     // If the buffer is CPU-backed we "map" it because it is free to do so and saves a copy.
358     // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
359     // threshold.
360     if (block.fBuffer->isCpuBuffer()) {
361         fBufferPtr = static_cast<GrCpuBuffer*>(block.fBuffer.get())->data();
362         SkASSERT(fBufferPtr);
363     } else {
364         if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
365             size > fGpu->caps()->bufferMapThreshold()) {
366             fBufferPtr = static_cast<GrGpuBuffer*>(block.fBuffer.get())->map();
367         }
368     }
369     if (!fBufferPtr) {
370         this->resetCpuData(block.fBytesFree);
371         fBufferPtr = fCpuStagingBuffer->data();
372     }
373 
374     VALIDATE(true);
375 
376     return true;
377 }
378 
destroyBlock()379 void GrBufferAllocPool::destroyBlock() {
380     SkASSERT(!fBlocks.empty());
381     SkASSERT(fBlocks.back().fBuffer->isCpuBuffer() ||
382              !static_cast<GrGpuBuffer*>(fBlocks.back().fBuffer.get())->isMapped());
383     fBlocks.pop_back();
384     fBufferPtr = nullptr;
385 }
386 
resetCpuData(size_t newSize)387 void GrBufferAllocPool::resetCpuData(size_t newSize) {
388     SkASSERT(newSize >= kDefaultBufferSize || !newSize);
389     if (!newSize) {
390         fCpuStagingBuffer.reset();
391         return;
392     }
393     if (fCpuStagingBuffer && newSize <= fCpuStagingBuffer->size()) {
394         return;
395     }
396     bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
397     fCpuStagingBuffer = fCpuBufferCache ? fCpuBufferCache->makeBuffer(newSize, mustInitialize)
398                                         : GrCpuBuffer::Make(newSize);
399 }
400 
flushCpuData(const BufferBlock & block,size_t flushSize)401 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
402     SkASSERT(block.fBuffer.get());
403     SkASSERT(!block.fBuffer.get()->isCpuBuffer());
404     GrGpuBuffer* buffer = static_cast<GrGpuBuffer*>(block.fBuffer.get());
405     SkASSERT(!buffer->isMapped());
406     SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
407     SkASSERT(flushSize <= buffer->size());
408     VALIDATE(true);
409 
410     if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
411         flushSize > fGpu->caps()->bufferMapThreshold()) {
412         void* data = buffer->map();
413         if (data) {
414             memcpy(data, fBufferPtr, flushSize);
415             UNMAP_BUFFER(block);
416             return;
417         }
418     }
419     buffer->updateData(fBufferPtr, flushSize);
420     VALIDATE(true);
421 }
422 
getBuffer(size_t size)423 sk_sp<GrBuffer> GrBufferAllocPool::getBuffer(size_t size) {
424     auto resourceProvider = fGpu->getContext()->priv().resourceProvider();
425 
426     if (fGpu->caps()->preferClientSideDynamicBuffers()) {
427         bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
428         return fCpuBufferCache ? fCpuBufferCache->makeBuffer(size, mustInitialize)
429                                : GrCpuBuffer::Make(size);
430     }
431     return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern);
432 }
433 
434 ////////////////////////////////////////////////////////////////////////////////
435 
GrVertexBufferAllocPool(GrGpu * gpu,sk_sp<CpuBufferCache> cpuBufferCache)436 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
437         : GrBufferAllocPool(gpu, GrGpuBufferType::kVertex, std::move(cpuBufferCache)) {}
438 
makeSpace(size_t vertexSize,int vertexCount,sk_sp<const GrBuffer> * buffer,int * startVertex)439 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
440                                          int vertexCount,
441                                          sk_sp<const GrBuffer>* buffer,
442                                          int* startVertex) {
443     SkASSERT(vertexCount >= 0);
444     SkASSERT(buffer);
445     SkASSERT(startVertex);
446 
447     size_t offset SK_INIT_TO_AVOID_WARNING;
448     void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(vertexSize, vertexCount),
449                                      vertexSize,
450                                      buffer,
451                                      &offset);
452 
453     SkASSERT(0 == offset % vertexSize);
454     *startVertex = static_cast<int>(offset / vertexSize);
455     return ptr;
456 }
457 
makeSpaceAtLeast(size_t vertexSize,int minVertexCount,int fallbackVertexCount,sk_sp<const GrBuffer> * buffer,int * startVertex,int * actualVertexCount)458 void* GrVertexBufferAllocPool::makeSpaceAtLeast(size_t vertexSize, int minVertexCount,
459                                                 int fallbackVertexCount,
460                                                 sk_sp<const GrBuffer>* buffer, int* startVertex,
461                                                 int* actualVertexCount) {
462     SkASSERT(minVertexCount >= 0);
463     SkASSERT(fallbackVertexCount >= minVertexCount);
464     SkASSERT(buffer);
465     SkASSERT(startVertex);
466     SkASSERT(actualVertexCount);
467 
468     size_t offset SK_INIT_TO_AVOID_WARNING;
469     size_t actualSize SK_INIT_TO_AVOID_WARNING;
470     void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(vertexSize, minVertexCount),
471                                             SkSafeMath::Mul(vertexSize, fallbackVertexCount),
472                                             vertexSize,
473                                             buffer,
474                                             &offset,
475                                             &actualSize);
476 
477     SkASSERT(0 == offset % vertexSize);
478     *startVertex = static_cast<int>(offset / vertexSize);
479 
480     SkASSERT(0 == actualSize % vertexSize);
481     SkASSERT(actualSize >= vertexSize * minVertexCount);
482     *actualVertexCount = static_cast<int>(actualSize / vertexSize);
483 
484     return ptr;
485 }
486 
487 ////////////////////////////////////////////////////////////////////////////////
488 
GrIndexBufferAllocPool(GrGpu * gpu,sk_sp<CpuBufferCache> cpuBufferCache)489 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
490         : GrBufferAllocPool(gpu, GrGpuBufferType::kIndex, std::move(cpuBufferCache)) {}
491 
makeSpace(int indexCount,sk_sp<const GrBuffer> * buffer,int * startIndex)492 void* GrIndexBufferAllocPool::makeSpace(int indexCount, sk_sp<const GrBuffer>* buffer,
493                                         int* startIndex) {
494     SkASSERT(indexCount >= 0);
495     SkASSERT(buffer);
496     SkASSERT(startIndex);
497 
498     size_t offset SK_INIT_TO_AVOID_WARNING;
499     void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(indexCount, sizeof(uint16_t)),
500                                      sizeof(uint16_t),
501                                      buffer,
502                                      &offset);
503 
504     SkASSERT(0 == offset % sizeof(uint16_t));
505     *startIndex = static_cast<int>(offset / sizeof(uint16_t));
506     return ptr;
507 }
508 
makeSpaceAtLeast(int minIndexCount,int fallbackIndexCount,sk_sp<const GrBuffer> * buffer,int * startIndex,int * actualIndexCount)509 void* GrIndexBufferAllocPool::makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
510                                                sk_sp<const GrBuffer>* buffer, int* startIndex,
511                                                int* actualIndexCount) {
512     SkASSERT(minIndexCount >= 0);
513     SkASSERT(fallbackIndexCount >= minIndexCount);
514     SkASSERT(buffer);
515     SkASSERT(startIndex);
516     SkASSERT(actualIndexCount);
517 
518     size_t offset SK_INIT_TO_AVOID_WARNING;
519     size_t actualSize SK_INIT_TO_AVOID_WARNING;
520     void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(minIndexCount, sizeof(uint16_t)),
521                                             SkSafeMath::Mul(fallbackIndexCount, sizeof(uint16_t)),
522                                             sizeof(uint16_t),
523                                             buffer,
524                                             &offset,
525                                             &actualSize);
526 
527     SkASSERT(0 == offset % sizeof(uint16_t));
528     *startIndex = static_cast<int>(offset / sizeof(uint16_t));
529 
530     SkASSERT(0 == actualSize % sizeof(uint16_t));
531     SkASSERT(actualSize >= minIndexCount * sizeof(uint16_t));
532     *actualIndexCount = static_cast<int>(actualSize / sizeof(uint16_t));
533     return ptr;
534 }
535