• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/BufferManager.h"
9 
10 #include "include/gpu/graphite/Recording.h"
11 #include "src/gpu/graphite/Caps.h"
12 #include "src/gpu/graphite/ContextPriv.h"
13 #include "src/gpu/graphite/Log.h"
14 #include "src/gpu/graphite/QueueManager.h"
15 #include "src/gpu/graphite/RecordingPriv.h"
16 #include "src/gpu/graphite/ResourceProvider.h"
17 #include "src/gpu/graphite/SharedContext.h"
18 #include "src/gpu/graphite/UploadBufferManager.h"
19 #include "src/gpu/graphite/task/ClearBuffersTask.h"
20 #include "src/gpu/graphite/task/CopyTask.h"
21 
22 namespace skgpu::graphite {
23 
24 namespace {
25 
26 // TODO: Tune these values on real world data
27 static constexpr size_t kVertexBufferSize = 16 << 10; // 16 KB
28 static constexpr size_t kIndexBufferSize   = 2 << 10; //  2 KB
29 static constexpr size_t kUniformBufferSize = 2 << 10; //  2 KB
30 static constexpr size_t kStorageBufferSize = 2 << 10; //  2 KB
31 
32 // The limit for all data created by the StaticBufferManager. This data remains alive for
33 // the entire SharedContext so we want to keep it small and give a concrete upper bound to
34 // clients for our steady-state memory usage.
35 // FIXME The current usage is 4732 bytes across static vertex and index buffers, but that includes
36 // multiple copies of tessellation data, and an unoptimized AnalyticRRect mesh. Once those issues
37 // are addressed, we can tighten this and decide on the transfer buffer sizing as well.
38 [[maybe_unused]] static constexpr size_t kMaxStaticDataSize = 6 << 10;
39 
sufficient_block_size(size_t requiredBytes,size_t blockSize)40 size_t sufficient_block_size(size_t requiredBytes, size_t blockSize) {
41     // Always request a buffer at least 'requiredBytes', but keep them in multiples of
42     // 'blockSize' for improved reuse.
43     static constexpr size_t kMaxSize   = std::numeric_limits<size_t>::max();
44     size_t maxBlocks = kMaxSize / blockSize;
45     size_t blocks = (requiredBytes / blockSize) + 1;
46     size_t bufferSize = blocks > maxBlocks ? kMaxSize : (blocks * blockSize);
47     SkASSERT(requiredBytes < bufferSize);
48     return bufferSize;
49 }
50 
can_fit(size_t requestedSize,size_t allocatedSize,size_t currentOffset,size_t alignment)51 bool can_fit(size_t requestedSize,
52              size_t allocatedSize,
53              size_t currentOffset,
54              size_t alignment) {
55     size_t startOffset = SkAlignTo(currentOffset, alignment);
56     return requestedSize <= (allocatedSize - startOffset);
57 }
58 
starting_alignment(BufferType type,bool useTransferBuffers,const Caps * caps)59 size_t starting_alignment(BufferType type, bool useTransferBuffers, const Caps* caps) {
60     // Both vertex and index data is aligned to 4 bytes by default
61     size_t alignment = 4;
62     if (type == BufferType::kUniform) {
63         alignment = caps->requiredUniformBufferAlignment();
64     } else if (type == BufferType::kStorage || type == BufferType::kVertexStorage ||
65                type == BufferType::kIndexStorage || type == BufferType::kIndirect) {
66         alignment = caps->requiredStorageBufferAlignment();
67     }
68     if (useTransferBuffers) {
69         alignment = std::max(alignment, caps->requiredTransferBufferAlignment());
70     }
71     return alignment;
72 }
73 
74 } // anonymous namespace
75 
76 // ------------------------------------------------------------------------------------------------
77 // ScratchBuffer
78 
ScratchBuffer(size_t size,size_t alignment,sk_sp<Buffer> buffer,DrawBufferManager * owner)79 ScratchBuffer::ScratchBuffer(size_t size, size_t alignment,
80                              sk_sp<Buffer> buffer, DrawBufferManager* owner)
81         : fSize(size)
82         , fAlignment(alignment)
83         , fBuffer(std::move(buffer))
84         , fOwner(owner) {
85     SkASSERT(fSize > 0);
86     SkASSERT(fBuffer);
87     SkASSERT(fOwner);
88 }
89 
~ScratchBuffer()90 ScratchBuffer::~ScratchBuffer() { this->returnToPool(); }
91 
suballocate(size_t requiredBytes)92 BindBufferInfo ScratchBuffer::suballocate(size_t requiredBytes) {
93     if (!this->isValid()) {
94         return {};
95     }
96     if (!can_fit(requiredBytes, fBuffer->size(), fOffset, fAlignment)) {
97         return {};
98     }
99     const size_t offset = SkAlignTo(fOffset, fAlignment);
100     fOffset = offset + requiredBytes;
101     return {fBuffer.get(), offset};
102 }
103 
returnToPool()104 void ScratchBuffer::returnToPool() {
105     if (fOwner && fBuffer) {
106         // TODO: Generalize the pool to other buffer types.
107         fOwner->fReusableScratchStorageBuffers.push_back(std::move(fBuffer));
108         SkASSERT(!fBuffer);
109     }
110 }
111 
112 // ------------------------------------------------------------------------------------------------
113 // DrawBufferManager
114 
DrawBufferManager(ResourceProvider * resourceProvider,const Caps * caps,UploadBufferManager * uploadManager)115 DrawBufferManager::DrawBufferManager(ResourceProvider* resourceProvider,
116                                      const Caps* caps,
117                                      UploadBufferManager* uploadManager)
118         : fResourceProvider(resourceProvider)
119         , fCaps(caps)
120         , fUploadManager(uploadManager)
121         , fCurrentBuffers{{
122                 { BufferType::kVertex,        kVertexBufferSize,  caps },
123                 { BufferType::kIndex,         kIndexBufferSize,   caps },
124                 { BufferType::kUniform,       kUniformBufferSize, caps },
125                 { BufferType::kStorage,       kStorageBufferSize, caps },  // mapped storage
126                 { BufferType::kStorage,       kStorageBufferSize, caps },  // GPU-only storage
127                 { BufferType::kVertexStorage, kVertexBufferSize,  caps },
128                 { BufferType::kIndexStorage,  kIndexBufferSize,   caps },
129                 { BufferType::kIndirect,      kStorageBufferSize, caps } }} {}
130 
~DrawBufferManager()131 DrawBufferManager::~DrawBufferManager() {}
132 
133 // For simplicity, if transfer buffers are being used, we align the data to the max alignment of
134 // either the final buffer type or cpu->gpu transfer alignment so that the buffers are laid out
135 // the same in memory.
BufferInfo(BufferType type,size_t blockSize,const Caps * caps)136 DrawBufferManager::BufferInfo::BufferInfo(BufferType type, size_t blockSize, const Caps* caps)
137         : fType(type)
138         , fStartAlignment(starting_alignment(type, !caps->drawBufferCanBeMapped(), caps))
139         , fBlockSize(SkAlignTo(blockSize, fStartAlignment)) {}
140 
getVertexWriter(size_t requiredBytes)141 std::pair<VertexWriter, BindBufferInfo> DrawBufferManager::getVertexWriter(size_t requiredBytes) {
142     if (!requiredBytes) {
143         return {};
144     }
145 
146     auto& info = fCurrentBuffers[kVertexBufferIndex];
147     auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "VertexBuffer");
148     return {VertexWriter(ptr, requiredBytes), bindInfo};
149 }
150 
returnVertexBytes(size_t unusedBytes)151 void DrawBufferManager::returnVertexBytes(size_t unusedBytes) {
152     if (fMappingFailed) {
153         // The caller can be unaware that the written data went to no-where and will still call
154         // this function.
155         return;
156     }
157     SkASSERT(fCurrentBuffers[kVertexBufferIndex].fOffset >= unusedBytes);
158     fCurrentBuffers[kVertexBufferIndex].fOffset -= unusedBytes;
159 }
160 
getIndexWriter(size_t requiredBytes)161 std::pair<IndexWriter, BindBufferInfo> DrawBufferManager::getIndexWriter(size_t requiredBytes) {
162     if (!requiredBytes) {
163         return {};
164     }
165 
166     auto& info = fCurrentBuffers[kIndexBufferIndex];
167     auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "IndexBuffer");
168     return {IndexWriter(ptr, requiredBytes), bindInfo};
169 }
170 
getUniformWriter(size_t requiredBytes)171 std::pair<UniformWriter, BindBufferInfo> DrawBufferManager::getUniformWriter(size_t requiredBytes) {
172     if (!requiredBytes) {
173         return {};
174     }
175 
176     auto& info = fCurrentBuffers[kUniformBufferIndex];
177     auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "UniformBuffer");
178     return {UniformWriter(ptr, requiredBytes), bindInfo};
179 }
180 
getSsboWriter(size_t requiredBytes)181 std::pair<UniformWriter, BindBufferInfo> DrawBufferManager::getSsboWriter(size_t requiredBytes) {
182     if (!requiredBytes) {
183         return {};
184     }
185 
186     auto& info = fCurrentBuffers[kStorageBufferIndex];
187     auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "StorageBuffer");
188     return {UniformWriter(ptr, requiredBytes), bindInfo};
189 }
190 
getUniformPointer(size_t requiredBytes)191 std::pair<void* /*mappedPtr*/, BindBufferInfo> DrawBufferManager::getUniformPointer(
192             size_t requiredBytes) {
193     if (!requiredBytes) {
194         return {};
195     }
196 
197     auto& info = fCurrentBuffers[kUniformBufferIndex];
198     return this->prepareMappedBindBuffer(&info, requiredBytes, "UniformBuffer");
199 }
200 
getStoragePointer(size_t requiredBytes)201 std::pair<void* /*mappedPtr*/, BindBufferInfo> DrawBufferManager::getStoragePointer(
202         size_t requiredBytes) {
203     if (!requiredBytes) {
204         return {};
205     }
206 
207     auto& info = fCurrentBuffers[kStorageBufferIndex];
208     return this->prepareMappedBindBuffer(&info, requiredBytes, "StorageBuffer");
209 }
210 
getStorage(size_t requiredBytes,ClearBuffer cleared)211 BindBufferInfo DrawBufferManager::getStorage(size_t requiredBytes, ClearBuffer cleared) {
212     if (!requiredBytes) {
213         return {};
214     }
215 
216     auto& info = fCurrentBuffers[kGpuOnlyStorageBufferIndex];
217     return this->prepareBindBuffer(&info,
218                                    requiredBytes,
219                                    "StorageBuffer",
220                                    /*supportCpuUpload=*/false,
221                                    cleared);
222 }
223 
getVertexStorage(size_t requiredBytes)224 BindBufferInfo DrawBufferManager::getVertexStorage(size_t requiredBytes) {
225     if (!requiredBytes) {
226         return {};
227     }
228 
229     auto& info = fCurrentBuffers[kVertexStorageBufferIndex];
230     return this->prepareBindBuffer(&info, requiredBytes, "VertexStorageBuffer");
231 }
232 
getIndexStorage(size_t requiredBytes)233 BindBufferInfo DrawBufferManager::getIndexStorage(size_t requiredBytes) {
234     if (!requiredBytes) {
235         return {};
236     }
237 
238     auto& info = fCurrentBuffers[kIndexStorageBufferIndex];
239     return this->prepareBindBuffer(&info, requiredBytes, "IndexStorageBuffer");
240 }
241 
getIndirectStorage(size_t requiredBytes,ClearBuffer cleared)242 BindBufferInfo DrawBufferManager::getIndirectStorage(size_t requiredBytes, ClearBuffer cleared) {
243     if (!requiredBytes) {
244         return {};
245     }
246 
247     auto& info = fCurrentBuffers[kIndirectStorageBufferIndex];
248     return this->prepareBindBuffer(&info,
249                                    requiredBytes,
250                                    "IndirectStorageBuffer",
251                                    /*supportCpuUpload=*/false,
252                                    cleared);
253 }
254 
getScratchStorage(size_t requiredBytes)255 ScratchBuffer DrawBufferManager::getScratchStorage(size_t requiredBytes) {
256     if (!requiredBytes || fMappingFailed) {
257         return {};
258     }
259 
260     // TODO: Generalize the pool to other buffer types.
261     auto& info = fCurrentBuffers[kStorageBufferIndex];
262     size_t bufferSize = sufficient_block_size(requiredBytes, info.fBlockSize);
263     sk_sp<Buffer> buffer = this->findReusableSbo(bufferSize);
264     if (!buffer) {
265         buffer = fResourceProvider->findOrCreateBuffer(
266                 bufferSize, BufferType::kStorage, AccessPattern::kGpuOnly, "ScratchStorageBuffer");
267 
268         if (!buffer) {
269             this->onFailedBuffer();
270             return {};
271         }
272     }
273     return {requiredBytes, info.fStartAlignment, std::move(buffer), this};
274 }
275 
onFailedBuffer()276 void DrawBufferManager::onFailedBuffer() {
277     fMappingFailed = true;
278 
279     // Clean up and unmap everything now
280     fReusableScratchStorageBuffers.clear();
281 
282     for (auto& [buffer, _] : fUsedBuffers) {
283         if (buffer->isMapped()) {
284             buffer->unmap();
285         }
286     }
287     fUsedBuffers.clear();
288 
289     for (auto& info : fCurrentBuffers) {
290         if (info.fBuffer && info.fBuffer->isMapped()) {
291             info.fBuffer->unmap();
292         }
293         info.fBuffer = nullptr;
294         info.fTransferBuffer = {};
295         info.fOffset = 0;
296     }
297 }
298 
transferToRecording(Recording * recording)299 void DrawBufferManager::transferToRecording(Recording* recording) {
300     // We could allow this to be called when the mapping has failed, since the transfer will be a no
301     // op, but in practice, the caller will want to check the error state as soon as possible to
302     // limit any unnecessary resource preparation from other tasks.
303     SkASSERT(!fMappingFailed);
304 
305     if (!fClearList.empty()) {
306         recording->priv().addTask(ClearBuffersTask::Make(std::move(fClearList)));
307     }
308 
309     // Transfer the buffers in the reuse pool to the recording.
310     // TODO: Allow reuse across different Recordings?
311     for (auto& buffer : fReusableScratchStorageBuffers) {
312         recording->priv().addResourceRef(std::move(buffer));
313     }
314     fReusableScratchStorageBuffers.clear();
315 
316     for (auto& [buffer, transferBuffer] : fUsedBuffers) {
317         if (transferBuffer) {
318             SkASSERT(buffer);
319             SkASSERT(!fCaps->drawBufferCanBeMapped());
320             // Since the transfer buffer is managed by the UploadManager, we don't manually unmap
321             // it here or need to pass a ref into CopyBufferToBufferTask.
322             size_t copySize = buffer->size();
323             recording->priv().addTask(
324                     CopyBufferToBufferTask::Make(transferBuffer.fBuffer,
325                                                  transferBuffer.fOffset,
326                                                  std::move(buffer),
327                                                  /*dstOffset=*/0,
328                                                  copySize));
329         } else {
330             if (buffer->isMapped()) {
331                 buffer->unmap();
332             }
333             recording->priv().addResourceRef(std::move(buffer));
334         }
335     }
336     fUsedBuffers.clear();
337 
338     // The current draw buffers have not been added to fUsedBuffers,
339     // so we need to handle them as well.
340     for (auto& info : fCurrentBuffers) {
341         if (!info.fBuffer) {
342             continue;
343         }
344         if (info.fTransferBuffer) {
345             // A transfer buffer should always be mapped at this stage
346             SkASSERT(info.fBuffer);
347             SkASSERT(!fCaps->drawBufferCanBeMapped());
348             // Since the transfer buffer is managed by the UploadManager, we don't manually unmap
349             // it here or need to pass a ref into CopyBufferToBufferTask.
350             recording->priv().addTask(
351                     CopyBufferToBufferTask::Make(info.fTransferBuffer.fBuffer,
352                                                  info.fTransferBuffer.fOffset,
353                                                  info.fBuffer,
354                                                  /*dstOffset=*/0,
355                                                  info.fBuffer->size()));
356         } else {
357             if (info.fBuffer->isMapped()) {
358                 info.fBuffer->unmap();
359             }
360             recording->priv().addResourceRef(std::move(info.fBuffer));
361         }
362         info.fTransferBuffer = {};
363         info.fOffset = 0;
364     }
365 }
366 
prepareMappedBindBuffer(BufferInfo * info,size_t requiredBytes,std::string_view label)367 std::pair<void*, BindBufferInfo> DrawBufferManager::prepareMappedBindBuffer(
368         BufferInfo* info,
369         size_t requiredBytes,
370         std::string_view label) {
371     BindBufferInfo bindInfo = this->prepareBindBuffer(info,
372                                                       requiredBytes,
373                                                       std::move(label),
374                                                       /*supportCpuUpload=*/true);
375     if (!bindInfo) {
376         // prepareBindBuffer() already called onFailedBuffer()
377         SkASSERT(fMappingFailed);
378         return {nullptr, {}};
379     }
380 
381     // If there's a transfer buffer, its mapped pointer should already have been validated
382     SkASSERT(!info->fTransferBuffer || info->fTransferMapPtr);
383     void* mapPtr = info->fTransferBuffer ? info->fTransferMapPtr : info->fBuffer->map();
384     if (!mapPtr) {
385         // Mapping a direct draw buffer failed
386         this->onFailedBuffer();
387         return {nullptr, {}};
388     }
389 
390     mapPtr = SkTAddOffset<void>(mapPtr, static_cast<ptrdiff_t>(bindInfo.fOffset));
391     return {mapPtr, bindInfo};
392 }
393 
prepareBindBuffer(BufferInfo * info,size_t requiredBytes,std::string_view label,bool supportCpuUpload,ClearBuffer cleared)394 BindBufferInfo DrawBufferManager::prepareBindBuffer(BufferInfo* info,
395                                                     size_t requiredBytes,
396                                                     std::string_view label,
397                                                     bool supportCpuUpload,
398                                                     ClearBuffer cleared) {
399     SkASSERT(info);
400     SkASSERT(requiredBytes);
401 
402     if (fMappingFailed) {
403         return {};
404     }
405 
406     // A transfer buffer is not necessary if the caller does not intend to upload CPU data to it.
407     bool useTransferBuffer = supportCpuUpload && !fCaps->drawBufferCanBeMapped();
408 
409     if (info->fBuffer &&
410         !can_fit(requiredBytes, info->fBuffer->size(), info->fOffset, info->fStartAlignment)) {
411         fUsedBuffers.emplace_back(std::move(info->fBuffer), info->fTransferBuffer);
412         info->fTransferBuffer = {};
413     }
414 
415     if (!info->fBuffer) {
416         // This buffer can be GPU-only if
417         //     a) the caller does not intend to ever upload CPU data to the buffer; or
418         //     b) CPU data will get uploaded to fBuffer only via a transfer buffer
419         AccessPattern accessPattern = (useTransferBuffer || !supportCpuUpload)
420                                               ? AccessPattern::kGpuOnly
421                                               : AccessPattern::kHostVisible;
422         size_t bufferSize = sufficient_block_size(requiredBytes, info->fBlockSize);
423         info->fBuffer = fResourceProvider->findOrCreateBuffer(bufferSize,
424                                                               info->fType,
425                                                               accessPattern,
426                                                               std::move(label));
427         info->fOffset = 0;
428         if (!info->fBuffer) {
429             this->onFailedBuffer();
430             return {};
431         }
432     }
433 
434     if (useTransferBuffer && !info->fTransferBuffer) {
435         std::tie(info->fTransferMapPtr, info->fTransferBuffer) =
436                 fUploadManager->makeBindInfo(info->fBuffer->size(),
437                                              fCaps->requiredTransferBufferAlignment(),
438                                              "TransferForDataBuffer");
439 
440         if (!info->fTransferBuffer) {
441             this->onFailedBuffer();
442             return {};
443         }
444         SkASSERT(info->fTransferMapPtr);
445     }
446 
447     info->fOffset = SkAlignTo(info->fOffset, info->fStartAlignment);
448     BindBufferInfo bindInfo{info->fBuffer.get(), info->fOffset};
449     info->fOffset += requiredBytes;
450 
451     if (cleared == ClearBuffer::kYes) {
452         fClearList.push_back({bindInfo.fBuffer, bindInfo.fOffset, requiredBytes});
453     }
454 
455     return bindInfo;
456 }
457 
findReusableSbo(size_t bufferSize)458 sk_sp<Buffer> DrawBufferManager::findReusableSbo(size_t bufferSize) {
459     SkASSERT(bufferSize);
460     SkASSERT(!fMappingFailed);
461 
462     for (int i = 0; i < fReusableScratchStorageBuffers.size(); ++i) {
463         sk_sp<Buffer>* buffer = &fReusableScratchStorageBuffers[i];
464         if ((*buffer)->size() >= bufferSize) {
465             auto found = std::move(*buffer);
466             // Fill the hole left by the move (if necessary) and shrink the pool.
467             if (i < fReusableScratchStorageBuffers.size() - 1) {
468                 *buffer = std::move(fReusableScratchStorageBuffers.back());
469             }
470             fReusableScratchStorageBuffers.pop_back();
471             return found;
472         }
473     }
474     return nullptr;
475 }
476 
477 // ------------------------------------------------------------------------------------------------
478 // StaticBufferManager
479 
StaticBufferManager(ResourceProvider * resourceProvider,const Caps * caps)480 StaticBufferManager::StaticBufferManager(ResourceProvider* resourceProvider,
481                                          const Caps* caps)
482         : fResourceProvider(resourceProvider)
483         , fUploadManager(resourceProvider, caps)
484         , fRequiredTransferAlignment(caps->requiredTransferBufferAlignment())
485         , fVertexBufferInfo(BufferType::kVertex, caps)
486         , fIndexBufferInfo(BufferType::kIndex, caps) {}
487 StaticBufferManager::~StaticBufferManager() = default;
488 
BufferInfo(BufferType type,const Caps * caps)489 StaticBufferManager::BufferInfo::BufferInfo(BufferType type, const Caps* caps)
490         : fBufferType(type)
491         , fAlignment(starting_alignment(type, /*useTransferBuffers=*/true, caps))
492         , fTotalRequiredBytes(0) {}
493 
getVertexWriter(size_t size,BindBufferInfo * binding)494 VertexWriter StaticBufferManager::getVertexWriter(size_t size, BindBufferInfo* binding) {
495     void* data = this->prepareStaticData(&fVertexBufferInfo, size, binding);
496     return VertexWriter{data, size};
497 }
498 
getIndexWriter(size_t size,BindBufferInfo * binding)499 VertexWriter StaticBufferManager::getIndexWriter(size_t size, BindBufferInfo* binding) {
500     void* data = this->prepareStaticData(&fIndexBufferInfo, size, binding);
501     return VertexWriter{data, size};
502 }
503 
prepareStaticData(BufferInfo * info,size_t size,BindBufferInfo * target)504 void* StaticBufferManager::prepareStaticData(BufferInfo* info,
505                                              size_t size,
506                                              BindBufferInfo* target) {
507     // Zero-out the target binding in the event of any failure in actually transfering data later.
508     SkASSERT(target);
509     *target = {nullptr, 0};
510     if (!size || fMappingFailed) {
511         return nullptr;
512     }
513 
514     // Both the transfer buffer and static buffers are aligned to the max required alignment for
515     // the pair of buffer types involved (transfer cpu->gpu and either index or vertex). Copies
516     // must also copy an aligned amount of bytes.
517     size = SkAlignTo(size, info->fAlignment);
518 
519     auto [transferMapPtr, transferBindInfo] =
520             fUploadManager.makeBindInfo(size,
521                                         fRequiredTransferAlignment,
522                                         "TransferForStaticBuffer");
523     if (!transferMapPtr) {
524         SKGPU_LOG_E("Failed to create or map transfer buffer that initializes static GPU data.");
525         fMappingFailed = true;
526         return nullptr;
527     }
528 
529     info->fData.push_back({transferBindInfo, target, size});
530     info->fTotalRequiredBytes += size;
531     return transferMapPtr;
532 }
533 
createAndUpdateBindings(ResourceProvider * resourceProvider,Context * context,QueueManager * queueManager,GlobalCache * globalCache,std::string_view label) const534 bool StaticBufferManager::BufferInfo::createAndUpdateBindings(
535         ResourceProvider* resourceProvider,
536         Context* context,
537         QueueManager* queueManager,
538         GlobalCache* globalCache,
539         std::string_view label) const {
540     if (!fTotalRequiredBytes) {
541         return true; // No buffer needed
542     }
543 
544     sk_sp<Buffer> staticBuffer = resourceProvider->findOrCreateBuffer(
545             fTotalRequiredBytes, fBufferType, AccessPattern::kGpuOnly, std::move(label));
546     if (!staticBuffer) {
547         SKGPU_LOG_E("Failed to create static buffer for type %d of size %zu bytes.\n",
548                     (int) fBufferType, fTotalRequiredBytes);
549         return false;
550     }
551 
552     size_t offset = 0;
553     for (const CopyRange& data : fData) {
554         // Each copy range's size should be aligned to the max of the required buffer alignment and
555         // the transfer alignment, so we can just increment the offset into the static buffer.
556         SkASSERT(offset % fAlignment == 0);
557         data.fTarget->fBuffer = staticBuffer.get();
558         data.fTarget->fOffset = offset;
559 
560         auto copyTask = CopyBufferToBufferTask::Make(
561                 data.fSource.fBuffer, data.fSource.fOffset,
562                 sk_ref_sp(data.fTarget->fBuffer), data.fTarget->fOffset,
563                 data.fSize);
564         if (!queueManager->addTask(copyTask.get(), context)) {
565             SKGPU_LOG_E("Failed to copy data to static buffer.\n");
566             return false;
567         }
568 
569         offset += data.fSize;
570     }
571 
572     SkASSERT(offset == fTotalRequiredBytes);
573     globalCache->addStaticResource(std::move(staticBuffer));
574     return true;
575 }
576 
finalize(Context * context,QueueManager * queueManager,GlobalCache * globalCache)577 StaticBufferManager::FinishResult StaticBufferManager::finalize(Context* context,
578                                                                 QueueManager* queueManager,
579                                                                 GlobalCache* globalCache) {
580     if (fMappingFailed) {
581         return FinishResult::kFailure;
582     }
583 
584     const size_t totalRequiredBytes = fVertexBufferInfo.fTotalRequiredBytes +
585                                       fIndexBufferInfo.fTotalRequiredBytes;
586     SkASSERT(totalRequiredBytes <= kMaxStaticDataSize);
587     if (!totalRequiredBytes) {
588         return FinishResult::kNoWork;
589     }
590 
591     if (!fVertexBufferInfo.createAndUpdateBindings(fResourceProvider,
592                                                    context,
593                                                    queueManager,
594                                                    globalCache,
595                                                    "StaticVertexBuffer")) {
596         return FinishResult::kFailure;
597     }
598     if (!fIndexBufferInfo.createAndUpdateBindings(fResourceProvider,
599                                                   context,
600                                                   queueManager,
601                                                   globalCache,
602                                                   "StaticIndexBuffer")) {
603         return FinishResult::kFailure;
604     }
605     queueManager->addUploadBufferManagerRefs(&fUploadManager);
606 
607     // Reset the static buffer manager since the Recording's copy tasks now manage ownership of
608     // the transfer buffers and the GlobalCache owns the final static buffers.
609     fVertexBufferInfo.reset();
610     fIndexBufferInfo.reset();
611 
612     return FinishResult::kSuccess;
613 }
614 
615 } // namespace skgpu::graphite
616