1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/dawn/DawnResourceProvider.h"
9
10 #include "include/gpu/graphite/BackendTexture.h"
11 #include "include/gpu/graphite/TextureInfo.h"
12 #include "include/gpu/graphite/dawn/DawnTypes.h"
13 #include "include/private/base/SkAlign.h"
14 #include "src/gpu/graphite/ComputePipeline.h"
15 #include "src/gpu/graphite/RenderPassDesc.h"
16 #include "src/gpu/graphite/dawn/DawnBuffer.h"
17 #include "src/gpu/graphite/dawn/DawnCommandBuffer.h"
18 #include "src/gpu/graphite/dawn/DawnComputePipeline.h"
19 #include "src/gpu/graphite/dawn/DawnErrorChecker.h"
20 #include "src/gpu/graphite/dawn/DawnGraphicsPipeline.h"
21 #include "src/gpu/graphite/dawn/DawnGraphiteTypesPriv.h"
22 #include "src/gpu/graphite/dawn/DawnSampler.h"
23 #include "src/gpu/graphite/dawn/DawnSharedContext.h"
24 #include "src/gpu/graphite/dawn/DawnTexture.h"
25 #include "src/sksl/SkSLCompiler.h"
26
27 namespace skgpu::graphite {
28
29 namespace {
30
31 constexpr int kBufferBindingSizeAlignment = 16;
32 constexpr int kMaxNumberOfCachedBufferBindGroups = 1024;
33 constexpr int kMaxNumberOfCachedTextureBindGroups = 4096;
34
create_shader_module(const wgpu::Device & device,const char * source)35 wgpu::ShaderModule create_shader_module(const wgpu::Device& device, const char* source) {
36 #ifdef WGPU_BREAKING_CHANGE_DROP_DESCRIPTOR
37 wgpu::ShaderSourceWGSL wgslDesc;
38 #else
39 wgpu::ShaderModuleWGSLDescriptor wgslDesc;
40 #endif
41 wgslDesc.code = source;
42 wgpu::ShaderModuleDescriptor descriptor;
43 descriptor.nextInChain = &wgslDesc;
44 return device.CreateShaderModule(&descriptor);
45 }
46
create_blit_render_pipeline(const DawnSharedContext * sharedContext,const char * label,wgpu::ShaderModule vsModule,wgpu::ShaderModule fsModule,wgpu::TextureFormat renderPassColorFormat,wgpu::TextureFormat renderPassDepthStencilFormat,int numSamples)47 wgpu::RenderPipeline create_blit_render_pipeline(const DawnSharedContext* sharedContext,
48 const char* label,
49 wgpu::ShaderModule vsModule,
50 wgpu::ShaderModule fsModule,
51 wgpu::TextureFormat renderPassColorFormat,
52 wgpu::TextureFormat renderPassDepthStencilFormat,
53 int numSamples) {
54 wgpu::RenderPipelineDescriptor descriptor;
55 descriptor.label = label;
56 descriptor.layout = nullptr;
57
58 wgpu::ColorTargetState colorTarget;
59 colorTarget.format = renderPassColorFormat;
60 colorTarget.blend = nullptr;
61 colorTarget.writeMask = wgpu::ColorWriteMask::All;
62
63 wgpu::DepthStencilState depthStencil;
64 if (renderPassDepthStencilFormat != wgpu::TextureFormat::Undefined) {
65 depthStencil.format = renderPassDepthStencilFormat;
66 depthStencil.depthWriteEnabled = false;
67 depthStencil.depthCompare = wgpu::CompareFunction::Always;
68
69 descriptor.depthStencil = &depthStencil;
70 }
71
72 wgpu::FragmentState fragment;
73 fragment.module = std::move(fsModule);
74 fragment.entryPoint = "main";
75 fragment.constantCount = 0;
76 fragment.constants = nullptr;
77 fragment.targetCount = 1;
78 fragment.targets = &colorTarget;
79 descriptor.fragment = &fragment;
80
81 descriptor.vertex.module = std::move(vsModule);
82 descriptor.vertex.entryPoint = "main";
83 descriptor.vertex.constantCount = 0;
84 descriptor.vertex.constants = nullptr;
85 descriptor.vertex.bufferCount = 0;
86 descriptor.vertex.buffers = nullptr;
87
88 descriptor.primitive.frontFace = wgpu::FrontFace::CCW;
89 descriptor.primitive.cullMode = wgpu::CullMode::None;
90 descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
91 descriptor.primitive.stripIndexFormat = wgpu::IndexFormat::Undefined;
92
93 descriptor.multisample.count = numSamples;
94 descriptor.multisample.mask = 0xFFFFFFFF;
95 descriptor.multisample.alphaToCoverageEnabled = false;
96
97 std::optional<DawnErrorChecker> errorChecker;
98 if (sharedContext->dawnCaps()->allowScopedErrorChecks()) {
99 errorChecker.emplace(sharedContext);
100 }
101 auto pipeline = sharedContext->device().CreateRenderPipeline(&descriptor);
102 if (errorChecker.has_value() && errorChecker->popErrorScopes() != DawnErrorType::kNoError) {
103 return nullptr;
104 }
105
106 return pipeline;
107 }
108
109 template <size_t NumEntries>
110 using BindGroupKey = typename DawnResourceProvider::BindGroupKey<NumEntries>;
111 using UniformBindGroupKey = BindGroupKey<DawnResourceProvider::kNumUniformEntries>;
112
make_ubo_bind_group_key(const std::array<std::pair<const DawnBuffer *,uint32_t>,DawnResourceProvider::kNumUniformEntries> & boundBuffersAndSizes)113 UniformBindGroupKey make_ubo_bind_group_key(
114 const std::array<std::pair<const DawnBuffer*, uint32_t>,
115 DawnResourceProvider::kNumUniformEntries>& boundBuffersAndSizes) {
116 UniformBindGroupKey uniqueKey;
117 {
118 // Each entry in the bind group needs 2 uint32_t in the key:
119 // - buffer's unique ID: 32 bits.
120 // - buffer's binding size: 32 bits.
121 // We need total of 4 entries in the uniform buffer bind group.
122 // Unused entries will be assigned zero values.
123 UniformBindGroupKey::Builder builder(&uniqueKey);
124
125 for (uint32_t i = 0; i < boundBuffersAndSizes.size(); ++i) {
126 const DawnBuffer* boundBuffer = boundBuffersAndSizes[i].first;
127 const uint32_t bindingSize = boundBuffersAndSizes[i].second;
128 if (boundBuffer) {
129 builder[2 * i] = boundBuffer->uniqueID().asUInt();
130 builder[2 * i + 1] = bindingSize;
131 } else {
132 builder[2 * i] = 0;
133 builder[2 * i + 1] = 0;
134 }
135 }
136
137 builder.finish();
138 }
139
140 return uniqueKey;
141 }
142
make_texture_bind_group_key(const DawnSampler * sampler,const DawnTexture * texture)143 BindGroupKey<1> make_texture_bind_group_key(const DawnSampler* sampler,
144 const DawnTexture* texture) {
145 BindGroupKey<1> uniqueKey;
146 {
147 BindGroupKey<1>::Builder builder(&uniqueKey);
148
149 builder[0] = sampler->uniqueID().asUInt();
150 builder[1] = texture->uniqueID().asUInt();
151
152 builder.finish();
153 }
154
155 return uniqueKey;
156 }
157 } // namespace
158
159
160 // Wraps a Dawn buffer, and tracks the intrinsic blocks residing in this buffer.
161 class DawnResourceProvider::IntrinsicBuffer final {
162 public:
163 static constexpr int kNumSlots = 8;
164
IntrinsicBuffer(sk_sp<DawnBuffer> dawnBuffer)165 IntrinsicBuffer(sk_sp<DawnBuffer> dawnBuffer) : fDawnBuffer(std::move(dawnBuffer)) {}
166 ~IntrinsicBuffer() = default;
167
buffer() const168 sk_sp<DawnBuffer> buffer() const { return fDawnBuffer; }
169
170 // Track that 'intrinsicValues' is stored in the buffer at the 'offset'.
trackIntrinsic(UniformDataBlock intrinsicValues,uint32_t offset)171 void trackIntrinsic(UniformDataBlock intrinsicValues, uint32_t offset) {
172 fCachedIntrinsicValues.set(UniformDataBlock::Make(intrinsicValues, &fUniformData), offset);
173 }
174
175 // Find the offset of 'intrinsicValues' in the buffer. If not found, return nullptr.
findIntrinsic(UniformDataBlock intrinsicValues) const176 uint32_t* findIntrinsic(UniformDataBlock intrinsicValues) const {
177 return fCachedIntrinsicValues.find(intrinsicValues);
178 }
179
slotsUsed() const180 int slotsUsed() const { return fCachedIntrinsicValues.count(); }
181
updateAccessTime()182 void updateAccessTime() {
183 fLastAccess = skgpu::StdSteadyClock::now();
184 }
lastAccessTime() const185 skgpu::StdSteadyClock::time_point lastAccessTime() const {
186 return fLastAccess;
187 }
188
189 private:
190 skia_private::THashMap<UniformDataBlock, uint32_t, UniformDataBlock::Hash>
191 fCachedIntrinsicValues;
192 SkArenaAlloc fUniformData{0};
193
194 sk_sp<DawnBuffer> fDawnBuffer;
195 skgpu::StdSteadyClock::time_point fLastAccess;
196
197 SK_DECLARE_INTERNAL_LLIST_INTERFACE(IntrinsicBuffer);
198 };
199
200 // DawnResourceProvider::IntrinsicConstantsManager
201 // ----------------------------------------------------------------------------
202
203 /**
204 * Since Dawn does not currently provide push constants, this helper class manages rotating through
205 * buffers and writing each new occurrence of a set of intrinsic uniforms into the current buffer.
206 */
207 class DawnResourceProvider::IntrinsicConstantsManager {
208 public:
IntrinsicConstantsManager(DawnResourceProvider * resourceProvider)209 explicit IntrinsicConstantsManager(DawnResourceProvider* resourceProvider)
210 : fResourceProvider(resourceProvider) {}
211
~IntrinsicConstantsManager()212 ~IntrinsicConstantsManager() {
213 auto alwaysTrue = [](IntrinsicBuffer* buffer) { return true; };
214 this->purgeBuffersIf(alwaysTrue);
215
216 SkASSERT(fIntrinsicBuffersLRU.isEmpty());
217 }
218
219 // Find or create a bind buffer info for the given intrinsic values used in the given command
220 // buffer.
221 BindBufferInfo add(DawnCommandBuffer* cb, UniformDataBlock intrinsicValues);
222
purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime)223 void purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime) {
224 auto bufferNotUsedSince = [purgeTime, this](IntrinsicBuffer* buffer) {
225 // We always keep the current buffer as it is likely to be used again soon.
226 return buffer != fCurrentBuffer && buffer->lastAccessTime() < purgeTime;
227 };
228 this->purgeBuffersIf(bufferNotUsedSince);
229 }
230
freeGpuResources()231 void freeGpuResources() { this->purgeResourcesNotUsedSince(skgpu::StdSteadyClock::now()); }
232
233 private:
234 // The max number of intrinsic buffers to keep around in the cache.
235 static constexpr uint32_t kMaxNumBuffers = 16;
236
237 // Traverse the intrinsic buffers and purge the ones that match the 'pred'.
238 template<typename T> void purgeBuffersIf(T pred);
239
240 DawnResourceProvider* const fResourceProvider;
241 // The current buffer being filled up, as well as the how much of it has been written to.
242 IntrinsicBuffer* fCurrentBuffer = nullptr;
243
244 // All cached intrinsic buffers, in LRU order.
245 SkTInternalLList<IntrinsicBuffer> fIntrinsicBuffersLRU;
246 // The number of intrinsic buffers currently in the cache.
247 uint32_t fNumBuffers = 0;
248 };
249
250 // Find or create a bind buffer info for the given intrinsic values used in the given command
251 // buffer.
add(DawnCommandBuffer * cb,UniformDataBlock intrinsicValues)252 BindBufferInfo DawnResourceProvider::IntrinsicConstantsManager::add(
253 DawnCommandBuffer* cb, UniformDataBlock intrinsicValues) {
254 using Iter = SkTInternalLList<IntrinsicBuffer>::Iter;
255 Iter iter;
256 auto* curr = iter.init(fIntrinsicBuffersLRU, Iter::kHead_IterStart);
257 uint32_t* offset = nullptr;
258 // Find the buffer that contains the given intrinsic values.
259 while (curr != nullptr) {
260 offset = curr->findIntrinsic(intrinsicValues);
261 if (offset != nullptr) {
262 break;
263 }
264 curr = iter.next();
265 }
266 // If we found the buffer, we can return the bind buffer info directly.
267 if (curr != nullptr && offset != nullptr) {
268 // Move the buffer to the head of the LRU list.
269 fIntrinsicBuffersLRU.remove(curr);
270 fIntrinsicBuffersLRU.addToHead(curr);
271 // Track the dawn buffer's usage by the command buffer.
272 cb->trackResource(curr->buffer());
273 curr->updateAccessTime();
274 return {curr->buffer().get(), *offset, SkTo<uint32_t>(intrinsicValues.size())};
275 }
276
277 // TODO: https://b.corp.google.com/issues/259267703
278 // Make updating intrinsic constants faster. Metal has setVertexBytes method to quickly send
279 // intrinsic constants to vertex shader without any buffer. But Dawn doesn't have similar
280 // capability. So we have to use WriteBuffer(), and this method is not allowed to be called when
281 // there is an active render pass.
282 SkASSERT(!cb->hasActivePassEncoder());
283
284 const Caps* caps = fResourceProvider->dawnSharedContext()->caps();
285 const uint32_t stride =
286 SkAlignTo(intrinsicValues.size(), caps->requiredUniformBufferAlignment());
287 // In any one of the following cases, we need to create a new buffer:
288 // (1) There is no current buffer.
289 // (2) The current buffer is full.
290 if (!fCurrentBuffer || fCurrentBuffer->slotsUsed() == IntrinsicBuffer::kNumSlots) {
291 // We can just replace the current buffer; any prior buffer was already tracked in the LRU
292 // list and the intrinsic constants were written directly to the Dawn queue.
293 DawnResourceProvider* resourceProvider = fResourceProvider;
294 auto dawnBuffer =
295 resourceProvider->findOrCreateDawnBuffer(stride * IntrinsicBuffer::kNumSlots,
296 BufferType::kUniform,
297 AccessPattern::kGpuOnly,
298 "IntrinsicConstantBuffer");
299 if (!dawnBuffer) {
300 // If we failed to create a GPU buffer to hold the intrinsic uniforms, we will fail the
301 // Recording being inserted, so return an empty bind info.
302 return {};
303 }
304
305 fCurrentBuffer = new IntrinsicBuffer(dawnBuffer);
306 fIntrinsicBuffersLRU.addToHead(fCurrentBuffer);
307 fNumBuffers++;
308 // If we have too many buffers, remove the least used one.
309 if (fNumBuffers > kMaxNumBuffers) {
310 auto* tail = fIntrinsicBuffersLRU.tail();
311 fIntrinsicBuffersLRU.remove(tail);
312 delete tail;
313 fNumBuffers--;
314 }
315 }
316
317 SkASSERT(fCurrentBuffer && fCurrentBuffer->slotsUsed() < IntrinsicBuffer::kNumSlots);
318 uint32_t newOffset = (fCurrentBuffer->slotsUsed()) * stride;
319 fResourceProvider->dawnSharedContext()->queue().WriteBuffer(
320 fCurrentBuffer->buffer()->dawnBuffer(),
321 newOffset,
322 intrinsicValues.data(),
323 intrinsicValues.size());
324
325 // Track the intrinsic values in the buffer.
326 fCurrentBuffer->trackIntrinsic(intrinsicValues, newOffset);
327
328 cb->trackResource(fCurrentBuffer->buffer());
329 fCurrentBuffer->updateAccessTime();
330
331 return {fCurrentBuffer->buffer().get(), newOffset, SkTo<uint32_t>(intrinsicValues.size())};
332 }
333
purgeBuffersIf(T pred)334 template <typename T> void DawnResourceProvider::IntrinsicConstantsManager::purgeBuffersIf(T pred) {
335 using Iter = SkTInternalLList<IntrinsicBuffer>::Iter;
336 Iter iter;
337 auto* curr = iter.init(fIntrinsicBuffersLRU, Iter::kHead_IterStart);
338 while (curr != nullptr) {
339 auto* next = iter.next();
340 if (pred(curr)) {
341 fIntrinsicBuffersLRU.remove(curr);
342 fNumBuffers--;
343 delete curr;
344 }
345 curr = next;
346 }
347 }
348
349 // DawnResourceProvider::IntrinsicConstantsManager
350 // ----------------------------------------------------------------------------
351
352
DawnResourceProvider(SharedContext * sharedContext,SingleOwner * singleOwner,uint32_t recorderID,size_t resourceBudget)353 DawnResourceProvider::DawnResourceProvider(SharedContext* sharedContext,
354 SingleOwner* singleOwner,
355 uint32_t recorderID,
356 size_t resourceBudget)
357 : ResourceProvider(sharedContext, singleOwner, recorderID, resourceBudget)
358 , fUniformBufferBindGroupCache(kMaxNumberOfCachedBufferBindGroups)
359 , fSingleTextureSamplerBindGroups(kMaxNumberOfCachedTextureBindGroups) {
360 fIntrinsicConstantsManager = std::make_unique<IntrinsicConstantsManager>(this);
361 }
362
363 DawnResourceProvider::~DawnResourceProvider() = default;
364
findOrCreateBlitWithDrawPipeline(const RenderPassDesc & renderPassDesc)365 wgpu::RenderPipeline DawnResourceProvider::findOrCreateBlitWithDrawPipeline(
366 const RenderPassDesc& renderPassDesc) {
367 uint32_t renderPassKey =
368 this->dawnSharedContext()->dawnCaps()->getRenderPassDescKeyForPipeline(renderPassDesc);
369 wgpu::RenderPipeline pipeline = fBlitWithDrawPipelines[renderPassKey];
370 if (!pipeline) {
371 static constexpr char kVertexShaderText[] = R"(
372 var<private> fullscreenTriPositions : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
373 vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
374
375 @vertex
376 fn main(@builtin(vertex_index) vertexIndex : u32) -> @builtin(position) vec4<f32> {
377 return vec4(fullscreenTriPositions[vertexIndex], 1.0, 1.0);
378 }
379 )";
380
381 static constexpr char kFragmentShaderText[] = R"(
382 @group(0) @binding(0) var colorMap: texture_2d<f32>;
383
384 @fragment
385 fn main(@builtin(position) fragPosition : vec4<f32>) -> @location(0) vec4<f32> {
386 var coords : vec2<i32> = vec2<i32>(i32(fragPosition.x), i32(fragPosition.y));
387 return textureLoad(colorMap, coords, 0);
388 }
389 )";
390
391 auto vsModule = create_shader_module(dawnSharedContext()->device(), kVertexShaderText);
392 auto fsModule = create_shader_module(dawnSharedContext()->device(), kFragmentShaderText);
393
394 pipeline = create_blit_render_pipeline(
395 dawnSharedContext(),
396 /*label=*/"BlitWithDraw",
397 std::move(vsModule),
398 std::move(fsModule),
399 /*renderPassColorFormat=*/
400 TextureInfos::GetDawnViewFormat(renderPassDesc.fColorAttachment.fTextureInfo),
401 /*renderPassDepthStencilFormat=*/
402 renderPassDesc.fDepthStencilAttachment.fTextureInfo.isValid()
403 ? TextureInfos::GetDawnViewFormat(
404 renderPassDesc.fDepthStencilAttachment.fTextureInfo)
405 : wgpu::TextureFormat::Undefined,
406 /*numSamples=*/renderPassDesc.fColorAttachment.fTextureInfo.numSamples());
407
408 if (pipeline) {
409 fBlitWithDrawPipelines.set(renderPassKey, pipeline);
410 }
411 }
412
413 return pipeline;
414 }
415
onCreateWrappedTexture(const BackendTexture & texture)416 sk_sp<Texture> DawnResourceProvider::onCreateWrappedTexture(const BackendTexture& texture) {
417 // Convert to smart pointers. wgpu::Texture* constructor will increment the ref count.
418 wgpu::Texture dawnTexture = BackendTextures::GetDawnTexturePtr(texture);
419 wgpu::TextureView dawnTextureView = BackendTextures::GetDawnTextureViewPtr(texture);
420 SkASSERT(!dawnTexture || !dawnTextureView);
421
422 if (!dawnTexture && !dawnTextureView) {
423 return {};
424 }
425
426 if (dawnTexture) {
427 return DawnTexture::MakeWrapped(this->dawnSharedContext(),
428 texture.dimensions(),
429 texture.info(),
430 std::move(dawnTexture));
431 } else {
432 return DawnTexture::MakeWrapped(this->dawnSharedContext(),
433 texture.dimensions(),
434 texture.info(),
435 std::move(dawnTextureView));
436 }
437 }
438
findOrCreateDiscardableMSAALoadTexture(SkISize dimensions,const TextureInfo & msaaInfo)439 sk_sp<DawnTexture> DawnResourceProvider::findOrCreateDiscardableMSAALoadTexture(
440 SkISize dimensions, const TextureInfo& msaaInfo) {
441 SkASSERT(msaaInfo.isValid());
442
443 // Derive the load texture's info from MSAA texture's info.
444 DawnTextureInfo dawnMsaaLoadTextureInfo;
445 SkAssertResult(TextureInfos::GetDawnTextureInfo(msaaInfo, &dawnMsaaLoadTextureInfo));
446 dawnMsaaLoadTextureInfo.fSampleCount = 1;
447 dawnMsaaLoadTextureInfo.fUsage |= wgpu::TextureUsage::TextureBinding;
448
449 #if !defined(__EMSCRIPTEN__)
450 // MSAA texture can be transient attachment (memoryless) but the load texture cannot be.
451 // This is because the load texture will need to have its content retained between two passes
452 // loading:
453 // - first pass: the resolve texture is blitted to the load texture.
454 // - 2nd pass: the actual render pass is started and the load texture is blitted to the MSAA
455 // texture.
456 dawnMsaaLoadTextureInfo.fUsage &= (~wgpu::TextureUsage::TransientAttachment);
457 #endif
458
459 auto texture = this->findOrCreateDiscardableMSAAAttachment(
460 dimensions, TextureInfos::MakeDawn(dawnMsaaLoadTextureInfo));
461
462 return sk_sp<DawnTexture>(static_cast<DawnTexture*>(texture.release()));
463 }
464
createGraphicsPipeline(const RuntimeEffectDictionary * runtimeDict,const UniqueKey & pipelineKey,const GraphicsPipelineDesc & pipelineDesc,const RenderPassDesc & renderPassDesc,SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags,uint32_t compilationID)465 sk_sp<GraphicsPipeline> DawnResourceProvider::createGraphicsPipeline(
466 const RuntimeEffectDictionary* runtimeDict,
467 const UniqueKey& pipelineKey,
468 const GraphicsPipelineDesc& pipelineDesc,
469 const RenderPassDesc& renderPassDesc,
470 SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags,
471 uint32_t compilationID) {
472 return DawnGraphicsPipeline::Make(this->dawnSharedContext(),
473 this,
474 runtimeDict,
475 pipelineKey,
476 pipelineDesc,
477 renderPassDesc,
478 pipelineCreationFlags,
479 compilationID);
480 }
481
createComputePipeline(const ComputePipelineDesc & desc)482 sk_sp<ComputePipeline> DawnResourceProvider::createComputePipeline(
483 const ComputePipelineDesc& desc) {
484 return DawnComputePipeline::Make(this->dawnSharedContext(), desc);
485 }
486
createTexture(SkISize dimensions,const TextureInfo & info,skgpu::Budgeted budgeted)487 sk_sp<Texture> DawnResourceProvider::createTexture(SkISize dimensions,
488 const TextureInfo& info,
489 skgpu::Budgeted budgeted) {
490 return DawnTexture::Make(this->dawnSharedContext(),
491 dimensions,
492 info,
493 budgeted);
494 }
495
createBuffer(size_t size,BufferType type,AccessPattern accessPattern)496 sk_sp<Buffer> DawnResourceProvider::createBuffer(size_t size,
497 BufferType type,
498 AccessPattern accessPattern) {
499 return DawnBuffer::Make(this->dawnSharedContext(), size, type, accessPattern);
500 }
501
createSampler(const SamplerDesc & samplerDesc)502 sk_sp<Sampler> DawnResourceProvider::createSampler(const SamplerDesc& samplerDesc) {
503 return DawnSampler::Make(this->dawnSharedContext(), samplerDesc);
504 }
505
onCreateBackendTexture(SkISize dimensions,const TextureInfo & info)506 BackendTexture DawnResourceProvider::onCreateBackendTexture(SkISize dimensions,
507 const TextureInfo& info) {
508 wgpu::Texture texture = DawnTexture::MakeDawnTexture(this->dawnSharedContext(),
509 dimensions,
510 info);
511 if (!texture) {
512 return {};
513 }
514
515 return BackendTextures::MakeDawn(texture.MoveToCHandle());
516 }
517
onDeleteBackendTexture(const BackendTexture & texture)518 void DawnResourceProvider::onDeleteBackendTexture(const BackendTexture& texture) {
519 SkASSERT(texture.isValid());
520 SkASSERT(texture.backend() == BackendApi::kDawn);
521
522 // Automatically release the pointers in wgpu::TextureView & wgpu::Texture's dtor.
523 // Acquire() won't increment the ref count.
524 wgpu::TextureView::Acquire(BackendTextures::GetDawnTextureViewPtr(texture));
525 // We need to explicitly call Destroy() here since since that is the recommended way to delete
526 // a Dawn texture predictably versus just dropping a ref and relying on garbage collection.
527 //
528 // Additionally this helps to work around an issue where Skia may have cached a BindGroup that
529 // references the underlying texture. Skia currently doesn't destroy BindGroups when its use of
530 // the texture goes away, thus a ref to the texture remains on the BindGroup and memory is never
531 // cleared up unless we call Destroy() here.
532 wgpu::Texture::Acquire(BackendTextures::GetDawnTexturePtr(texture)).Destroy();
533 }
534
dawnSharedContext() const535 DawnSharedContext* DawnResourceProvider::dawnSharedContext() const {
536 return static_cast<DawnSharedContext*>(fSharedContext);
537 }
538
findOrCreateDawnBuffer(size_t size,BufferType type,AccessPattern accessPattern,std::string_view label)539 sk_sp<DawnBuffer> DawnResourceProvider::findOrCreateDawnBuffer(size_t size,
540 BufferType type,
541 AccessPattern accessPattern,
542 std::string_view label) {
543 sk_sp<Buffer> buffer = this->findOrCreateBuffer(size, type, accessPattern, std::move(label));
544 DawnBuffer* ptr = static_cast<DawnBuffer*>(buffer.release());
545 return sk_sp<DawnBuffer>(ptr);
546 }
547
getOrCreateUniformBuffersBindGroupLayout()548 const wgpu::BindGroupLayout& DawnResourceProvider::getOrCreateUniformBuffersBindGroupLayout() {
549 if (fUniformBuffersBindGroupLayout) {
550 return fUniformBuffersBindGroupLayout;
551 }
552
553 std::array<wgpu::BindGroupLayoutEntry, 4> entries;
554 entries[0].binding = DawnGraphicsPipeline::kIntrinsicUniformBufferIndex;
555 entries[0].visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
556 entries[0].buffer.type = wgpu::BufferBindingType::Uniform;
557 entries[0].buffer.hasDynamicOffset = true;
558 entries[0].buffer.minBindingSize = 0;
559
560 entries[1].binding = DawnGraphicsPipeline::kRenderStepUniformBufferIndex;
561 entries[1].visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
562 entries[1].buffer.type = fSharedContext->caps()->storageBufferSupport()
563 ? wgpu::BufferBindingType::ReadOnlyStorage
564 : wgpu::BufferBindingType::Uniform;
565 entries[1].buffer.hasDynamicOffset = true;
566 entries[1].buffer.minBindingSize = 0;
567
568 entries[2].binding = DawnGraphicsPipeline::kPaintUniformBufferIndex;
569 entries[2].visibility = wgpu::ShaderStage::Fragment;
570 entries[2].buffer.type = fSharedContext->caps()->storageBufferSupport()
571 ? wgpu::BufferBindingType::ReadOnlyStorage
572 : wgpu::BufferBindingType::Uniform;
573 entries[2].buffer.hasDynamicOffset = true;
574 entries[2].buffer.minBindingSize = 0;
575
576 // Gradient buffer will only be used when storage buffers are preferred, else large
577 // gradients use a texture fallback, set binding type as a uniform when not in use to
578 // satisfy any binding type restricions for non-supported ssbo devices.
579 entries[3].binding = DawnGraphicsPipeline::kGradientBufferIndex;
580 entries[3].visibility = wgpu::ShaderStage::Fragment;
581 entries[3].buffer.type = fSharedContext->caps()->storageBufferSupport()
582 ? wgpu::BufferBindingType::ReadOnlyStorage
583 : wgpu::BufferBindingType::Uniform;
584 entries[3].buffer.hasDynamicOffset = true;
585 entries[3].buffer.minBindingSize = 0;
586
587 wgpu::BindGroupLayoutDescriptor groupLayoutDesc;
588 if (fSharedContext->caps()->setBackendLabels()) {
589 groupLayoutDesc.label = "Uniform buffers bind group layout";
590 }
591
592 groupLayoutDesc.entryCount = entries.size();
593 groupLayoutDesc.entries = entries.data();
594 fUniformBuffersBindGroupLayout =
595 this->dawnSharedContext()->device().CreateBindGroupLayout(&groupLayoutDesc);
596
597 return fUniformBuffersBindGroupLayout;
598 }
599
600 const wgpu::BindGroupLayout&
getOrCreateSingleTextureSamplerBindGroupLayout()601 DawnResourceProvider::getOrCreateSingleTextureSamplerBindGroupLayout() {
602 if (fSingleTextureSamplerBindGroupLayout) {
603 return fSingleTextureSamplerBindGroupLayout;
604 }
605
606 std::array<wgpu::BindGroupLayoutEntry, 2> entries;
607
608 entries[0].binding = 0;
609 entries[0].visibility = wgpu::ShaderStage::Fragment;
610 entries[0].sampler.type = wgpu::SamplerBindingType::Filtering;
611
612 entries[1].binding = 1;
613 entries[1].visibility = wgpu::ShaderStage::Fragment;
614 entries[1].texture.sampleType = wgpu::TextureSampleType::Float;
615 entries[1].texture.viewDimension = wgpu::TextureViewDimension::e2D;
616 entries[1].texture.multisampled = false;
617
618 wgpu::BindGroupLayoutDescriptor groupLayoutDesc;
619 if (fSharedContext->caps()->setBackendLabels()) {
620 groupLayoutDesc.label = "Single texture + sampler bind group layout";
621 }
622
623 groupLayoutDesc.entryCount = entries.size();
624 groupLayoutDesc.entries = entries.data();
625 fSingleTextureSamplerBindGroupLayout =
626 this->dawnSharedContext()->device().CreateBindGroupLayout(&groupLayoutDesc);
627
628 return fSingleTextureSamplerBindGroupLayout;
629 }
630
getOrCreateNullBuffer()631 const wgpu::Buffer& DawnResourceProvider::getOrCreateNullBuffer() {
632 if (!fNullBuffer) {
633 wgpu::BufferDescriptor desc;
634 if (fSharedContext->caps()->setBackendLabels()) {
635 desc.label = "UnusedBufferSlot";
636 }
637 desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform |
638 wgpu::BufferUsage::Storage;
639 desc.size = kBufferBindingSizeAlignment;
640 desc.mappedAtCreation = false;
641
642 fNullBuffer = this->dawnSharedContext()->device().CreateBuffer(&desc);
643 SkASSERT(fNullBuffer);
644 }
645
646 return fNullBuffer;
647 }
648
findOrCreateUniformBuffersBindGroup(const std::array<std::pair<const DawnBuffer *,uint32_t>,kNumUniformEntries> & boundBuffersAndSizes)649 const wgpu::BindGroup& DawnResourceProvider::findOrCreateUniformBuffersBindGroup(
650 const std::array<std::pair<const DawnBuffer*, uint32_t>, kNumUniformEntries>&
651 boundBuffersAndSizes) {
652 auto key = make_ubo_bind_group_key(boundBuffersAndSizes);
653 auto* existingBindGroup = fUniformBufferBindGroupCache.find(key);
654 if (existingBindGroup) {
655 // cache hit.
656 return *existingBindGroup;
657 }
658
659 // Translate to wgpu::BindGroupDescriptor
660 std::array<wgpu::BindGroupEntry, kNumUniformEntries> entries;
661
662 constexpr uint32_t kBindingIndices[] = {
663 DawnGraphicsPipeline::kIntrinsicUniformBufferIndex,
664 DawnGraphicsPipeline::kRenderStepUniformBufferIndex,
665 DawnGraphicsPipeline::kPaintUniformBufferIndex,
666 DawnGraphicsPipeline::kGradientBufferIndex,
667 };
668
669 for (uint32_t i = 0; i < boundBuffersAndSizes.size(); ++i) {
670 const DawnBuffer* boundBuffer = boundBuffersAndSizes[i].first;
671 const uint32_t bindingSize = boundBuffersAndSizes[i].second;
672
673 entries[i].binding = kBindingIndices[i];
674 entries[i].offset = 0;
675 if (boundBuffer) {
676 entries[i].buffer = boundBuffer->dawnBuffer();
677 entries[i].size = SkAlignTo(bindingSize, kBufferBindingSizeAlignment);
678 } else {
679 entries[i].buffer = this->getOrCreateNullBuffer();
680 entries[i].size = wgpu::kWholeSize;
681 }
682 }
683
684 wgpu::BindGroupDescriptor desc;
685 desc.layout = this->getOrCreateUniformBuffersBindGroupLayout();
686 desc.entryCount = entries.size();
687 desc.entries = entries.data();
688
689 const auto& device = this->dawnSharedContext()->device();
690 auto bindGroup = device.CreateBindGroup(&desc);
691
692 return *fUniformBufferBindGroupCache.insert(key, bindGroup);
693 }
694
findOrCreateSingleTextureSamplerBindGroup(const DawnSampler * sampler,const DawnTexture * texture)695 const wgpu::BindGroup& DawnResourceProvider::findOrCreateSingleTextureSamplerBindGroup(
696 const DawnSampler* sampler, const DawnTexture* texture) {
697 auto key = make_texture_bind_group_key(sampler, texture);
698 auto* existingBindGroup = fSingleTextureSamplerBindGroups.find(key);
699 if (existingBindGroup) {
700 // cache hit.
701 return *existingBindGroup;
702 }
703
704 std::array<wgpu::BindGroupEntry, 2> entries;
705
706 entries[0].binding = 0;
707 entries[0].sampler = sampler->dawnSampler();
708 entries[1].binding = 1;
709 entries[1].textureView = texture->sampleTextureView();
710
711 wgpu::BindGroupDescriptor desc;
712 desc.layout = getOrCreateSingleTextureSamplerBindGroupLayout();
713 desc.entryCount = entries.size();
714 desc.entries = entries.data();
715
716 const auto& device = this->dawnSharedContext()->device();
717 auto bindGroup = device.CreateBindGroup(&desc);
718
719 return *fSingleTextureSamplerBindGroups.insert(key, bindGroup);
720 }
721
onFreeGpuResources()722 void DawnResourceProvider::onFreeGpuResources() {
723 fIntrinsicConstantsManager->freeGpuResources();
724 // The wgpu::Textures and wgpu::Buffers held by the BindGroups should be explicitly destroyed
725 // when the DawnTexture and DawnBuffer is destroyed, but removing the bind groups themselves
726 // helps reduce CPU memory periodically.
727 fSingleTextureSamplerBindGroups.reset();
728 fUniformBufferBindGroupCache.reset();
729 }
730
onPurgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime)731 void DawnResourceProvider::onPurgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime) {
732 fIntrinsicConstantsManager->purgeResourcesNotUsedSince(purgeTime);
733 }
734
findOrCreateIntrinsicBindBufferInfo(DawnCommandBuffer * cb,UniformDataBlock intrinsicValues)735 BindBufferInfo DawnResourceProvider::findOrCreateIntrinsicBindBufferInfo(
736 DawnCommandBuffer* cb, UniformDataBlock intrinsicValues) {
737 return fIntrinsicConstantsManager->add(cb, intrinsicValues);
738 }
739
740 } // namespace skgpu::graphite
741