1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/dawn/DawnResourceProvider.h"
9
10 #include "include/gpu/graphite/BackendTexture.h"
11 #include "include/gpu/graphite/TextureInfo.h"
12 #include "include/private/base/SkAlign.h"
13 #include "src/gpu/graphite/ComputePipeline.h"
14 #include "src/gpu/graphite/RenderPassDesc.h"
15 #include "src/gpu/graphite/dawn/DawnBuffer.h"
16 #include "src/gpu/graphite/dawn/DawnComputePipeline.h"
17 #include "src/gpu/graphite/dawn/DawnErrorChecker.h"
18 #include "src/gpu/graphite/dawn/DawnGraphicsPipeline.h"
19 #include "src/gpu/graphite/dawn/DawnSampler.h"
20 #include "src/gpu/graphite/dawn/DawnSharedContext.h"
21 #include "src/gpu/graphite/dawn/DawnTexture.h"
22 #include "src/sksl/SkSLCompiler.h"
23
24 namespace skgpu::graphite {
25
26 namespace {
27
28 constexpr int kBufferBindingSizeAlignment = 16;
29 constexpr int kMaxNumberOfCachedBufferBindGroups = 32;
30 constexpr int kMaxNumberOfCachedTextureBindGroups = 4096;
31
create_shader_module(const wgpu::Device & device,const char * source)32 wgpu::ShaderModule create_shader_module(const wgpu::Device& device, const char* source) {
33 wgpu::ShaderModuleWGSLDescriptor wgslDesc;
34 wgslDesc.code = source;
35 wgpu::ShaderModuleDescriptor descriptor;
36 descriptor.nextInChain = &wgslDesc;
37 return device.CreateShaderModule(&descriptor);
38 }
39
create_blit_render_pipeline(const DawnSharedContext * sharedContext,const char * label,wgpu::ShaderModule vsModule,wgpu::ShaderModule fsModule,wgpu::TextureFormat renderPassColorFormat,wgpu::TextureFormat renderPassDepthStencilFormat,int numSamples)40 wgpu::RenderPipeline create_blit_render_pipeline(const DawnSharedContext* sharedContext,
41 const char* label,
42 wgpu::ShaderModule vsModule,
43 wgpu::ShaderModule fsModule,
44 wgpu::TextureFormat renderPassColorFormat,
45 wgpu::TextureFormat renderPassDepthStencilFormat,
46 int numSamples) {
47 wgpu::RenderPipelineDescriptor descriptor;
48 descriptor.label = label;
49 descriptor.layout = nullptr;
50
51 wgpu::ColorTargetState colorTarget;
52 colorTarget.format = renderPassColorFormat;
53 colorTarget.blend = nullptr;
54 colorTarget.writeMask = wgpu::ColorWriteMask::All;
55
56 wgpu::DepthStencilState depthStencil;
57 if (renderPassDepthStencilFormat != wgpu::TextureFormat::Undefined) {
58 depthStencil.format = renderPassDepthStencilFormat;
59 depthStencil.depthWriteEnabled = false;
60 depthStencil.depthCompare = wgpu::CompareFunction::Always;
61
62 descriptor.depthStencil = &depthStencil;
63 }
64
65 wgpu::FragmentState fragment;
66 fragment.module = std::move(fsModule);
67 fragment.entryPoint = "main";
68 fragment.targetCount = 1;
69 fragment.targets = &colorTarget;
70 descriptor.fragment = &fragment;
71
72 descriptor.vertex.module = std::move(vsModule);
73 descriptor.vertex.entryPoint = "main";
74 descriptor.vertex.constantCount = 0;
75 descriptor.vertex.constants = nullptr;
76 descriptor.vertex.bufferCount = 0;
77 descriptor.vertex.buffers = nullptr;
78
79 descriptor.primitive.frontFace = wgpu::FrontFace::CCW;
80 descriptor.primitive.cullMode = wgpu::CullMode::None;
81 descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
82 descriptor.primitive.stripIndexFormat = wgpu::IndexFormat::Undefined;
83
84 descriptor.multisample.count = numSamples;
85 descriptor.multisample.mask = 0xFFFFFFFF;
86 descriptor.multisample.alphaToCoverageEnabled = false;
87
88 std::optional<DawnErrorChecker> errorChecker;
89 if (sharedContext->dawnCaps()->allowScopedErrorChecks()) {
90 errorChecker.emplace(sharedContext);
91 }
92 auto pipeline = sharedContext->device().CreateRenderPipeline(&descriptor);
93 if (errorChecker.has_value() && errorChecker->popErrorScopes() != DawnErrorType::kNoError) {
94 return nullptr;
95 }
96
97 return pipeline;
98 }
99
make_ubo_bind_group_key(const std::array<std::pair<const DawnBuffer *,uint32_t>,4> & boundBuffersAndSizes)100 UniqueKey make_ubo_bind_group_key(
101 const std::array<std::pair<const DawnBuffer*, uint32_t>, 4>& boundBuffersAndSizes) {
102 static const UniqueKey::Domain kBufferBindGroupDomain = UniqueKey::GenerateDomain();
103
104 UniqueKey uniqueKey;
105 {
106 // Each entry in the bind group needs 2 uint32_t in the key:
107 // - buffer's unique ID: 32 bits.
108 // - buffer's binding size: 32 bits.
109 // We need total of 4 entries in the uniform buffer bind group.
110 // Unused entries will be assigned zero values.
111 UniqueKey::Builder builder(
112 &uniqueKey, kBufferBindGroupDomain, 8, "GraphicsPipelineBufferBindGroup");
113
114 for (uint32_t i = 0; i < boundBuffersAndSizes.size(); ++i) {
115 const DawnBuffer* boundBuffer = boundBuffersAndSizes[i].first;
116 const uint32_t bindingSize = boundBuffersAndSizes[i].second;
117 if (boundBuffer) {
118 builder[2 * i] = boundBuffer->uniqueID().asUInt();
119 builder[2 * i + 1] = bindingSize;
120 } else {
121 builder[2 * i] = 0;
122 builder[2 * i + 1] = 0;
123 }
124 }
125
126 builder.finish();
127 }
128
129 return uniqueKey;
130 }
131
make_texture_bind_group_key(const DawnSampler * sampler,const DawnTexture * texture)132 UniqueKey make_texture_bind_group_key(const DawnSampler* sampler, const DawnTexture* texture) {
133 static const UniqueKey::Domain kTextureBindGroupDomain = UniqueKey::GenerateDomain();
134
135 UniqueKey uniqueKey;
136 {
137 UniqueKey::Builder builder(&uniqueKey,
138 kTextureBindGroupDomain,
139 2,
140 "GraphicsPipelineSingleTextureSamplerBindGroup");
141
142 builder[0] = sampler->uniqueID().asUInt();
143 builder[1] = texture->uniqueID().asUInt();
144
145 builder.finish();
146 }
147
148 return uniqueKey;
149 }
150 } // namespace
151
DawnResourceProvider(SharedContext * sharedContext,SingleOwner * singleOwner,uint32_t recorderID,size_t resourceBudget)152 DawnResourceProvider::DawnResourceProvider(SharedContext* sharedContext,
153 SingleOwner* singleOwner,
154 uint32_t recorderID,
155 size_t resourceBudget)
156 : ResourceProvider(sharedContext, singleOwner, recorderID, resourceBudget)
157 , fUniformBufferBindGroupCache(kMaxNumberOfCachedBufferBindGroups)
158 , fSingleTextureSamplerBindGroups(kMaxNumberOfCachedTextureBindGroups) {}
159
160 DawnResourceProvider::~DawnResourceProvider() = default;
161
findOrCreateBlitWithDrawPipeline(const RenderPassDesc & renderPassDesc)162 wgpu::RenderPipeline DawnResourceProvider::findOrCreateBlitWithDrawPipeline(
163 const RenderPassDesc& renderPassDesc) {
164 uint64_t renderPassKey =
165 this->dawnSharedContext()->dawnCaps()->getRenderPassDescKeyForPipeline(renderPassDesc);
166 wgpu::RenderPipeline pipeline = fBlitWithDrawPipelines[renderPassKey];
167 if (!pipeline) {
168 static constexpr char kVertexShaderText[] = R"(
169 var<private> fullscreenTriPositions : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
170 vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
171
172 @vertex
173 fn main(@builtin(vertex_index) vertexIndex : u32) -> @builtin(position) vec4<f32> {
174 return vec4(fullscreenTriPositions[vertexIndex], 1.0, 1.0);
175 }
176 )";
177
178 static constexpr char kFragmentShaderText[] = R"(
179 @group(0) @binding(0) var colorMap: texture_2d<f32>;
180
181 @fragment
182 fn main(@builtin(position) fragPosition : vec4<f32>) -> @location(0) vec4<f32> {
183 var coords : vec2<i32> = vec2<i32>(i32(fragPosition.x), i32(fragPosition.y));
184 return textureLoad(colorMap, coords, 0);
185 }
186 )";
187
188 auto vsModule = create_shader_module(dawnSharedContext()->device(), kVertexShaderText);
189 auto fsModule = create_shader_module(dawnSharedContext()->device(), kFragmentShaderText);
190
191 pipeline = create_blit_render_pipeline(
192 dawnSharedContext(),
193 /*label=*/"BlitWithDraw",
194 std::move(vsModule),
195 std::move(fsModule),
196 /*renderPassColorFormat=*/
197 renderPassDesc.fColorAttachment.fTextureInfo.dawnTextureSpec().getViewFormat(),
198 /*renderPassDepthStencilFormat=*/
199 renderPassDesc.fDepthStencilAttachment.fTextureInfo.isValid()
200 ? renderPassDesc.fDepthStencilAttachment.fTextureInfo.dawnTextureSpec()
201 .getViewFormat()
202 : wgpu::TextureFormat::Undefined,
203 /*numSamples=*/renderPassDesc.fColorAttachment.fTextureInfo.numSamples());
204
205 if (pipeline) {
206 fBlitWithDrawPipelines.set(renderPassKey, pipeline);
207 }
208 }
209
210 return pipeline;
211 }
212
onCreateWrappedTexture(const BackendTexture & texture)213 sk_sp<Texture> DawnResourceProvider::onCreateWrappedTexture(const BackendTexture& texture) {
214 // Convert to smart pointers. wgpu::Texture* constructor will increment the ref count.
215 wgpu::Texture dawnTexture = texture.getDawnTexturePtr();
216 wgpu::TextureView dawnTextureView = texture.getDawnTextureViewPtr();
217 SkASSERT(!dawnTexture || !dawnTextureView);
218
219 if (!dawnTexture && !dawnTextureView) {
220 return {};
221 }
222
223 if (dawnTexture) {
224 return DawnTexture::MakeWrapped(this->dawnSharedContext(),
225 texture.dimensions(),
226 texture.info(),
227 std::move(dawnTexture));
228 } else {
229 return DawnTexture::MakeWrapped(this->dawnSharedContext(),
230 texture.dimensions(),
231 texture.info(),
232 std::move(dawnTextureView));
233 }
234 }
235
findOrCreateDiscardableMSAALoadTexture(SkISize dimensions,const TextureInfo & msaaInfo)236 sk_sp<DawnTexture> DawnResourceProvider::findOrCreateDiscardableMSAALoadTexture(
237 SkISize dimensions, const TextureInfo& msaaInfo) {
238 SkASSERT(msaaInfo.isValid());
239
240 // Derive the load texture's info from MSAA texture's info.
241 DawnTextureInfo dawnMsaaLoadTextureInfo;
242 msaaInfo.getDawnTextureInfo(&dawnMsaaLoadTextureInfo);
243 dawnMsaaLoadTextureInfo.fSampleCount = 1;
244 dawnMsaaLoadTextureInfo.fUsage |= wgpu::TextureUsage::TextureBinding;
245
246 #if !defined(__EMSCRIPTEN__)
247 // MSAA texture can be transient attachment (memoryless) but the load texture cannot be.
248 // This is because the load texture will need to have its content retained between two passes
249 // loading:
250 // - first pass: the resolve texture is blitted to the load texture.
251 // - 2nd pass: the actual render pass is started and the load texture is blitted to the MSAA
252 // texture.
253 dawnMsaaLoadTextureInfo.fUsage &= (~wgpu::TextureUsage::TransientAttachment);
254 #endif
255
256 auto texture = this->findOrCreateDiscardableMSAAAttachment(dimensions, dawnMsaaLoadTextureInfo);
257
258 return sk_sp<DawnTexture>(static_cast<DawnTexture*>(texture.release()));
259 }
260
createGraphicsPipeline(const RuntimeEffectDictionary * runtimeDict,const GraphicsPipelineDesc & pipelineDesc,const RenderPassDesc & renderPassDesc)261 sk_sp<GraphicsPipeline> DawnResourceProvider::createGraphicsPipeline(
262 const RuntimeEffectDictionary* runtimeDict,
263 const GraphicsPipelineDesc& pipelineDesc,
264 const RenderPassDesc& renderPassDesc) {
265 return DawnGraphicsPipeline::Make(this->dawnSharedContext(),
266 this,
267 runtimeDict,
268 pipelineDesc,
269 renderPassDesc);
270 }
271
createComputePipeline(const ComputePipelineDesc & desc)272 sk_sp<ComputePipeline> DawnResourceProvider::createComputePipeline(
273 const ComputePipelineDesc& desc) {
274 return DawnComputePipeline::Make(this->dawnSharedContext(), desc);
275 }
276
createTexture(SkISize dimensions,const TextureInfo & info,skgpu::Budgeted budgeted)277 sk_sp<Texture> DawnResourceProvider::createTexture(SkISize dimensions,
278 const TextureInfo& info,
279 skgpu::Budgeted budgeted) {
280 return DawnTexture::Make(this->dawnSharedContext(),
281 dimensions,
282 info,
283 budgeted);
284 }
285
createBuffer(size_t size,BufferType type,AccessPattern accessPattern)286 sk_sp<Buffer> DawnResourceProvider::createBuffer(size_t size,
287 BufferType type,
288 AccessPattern accessPattern) {
289 return DawnBuffer::Make(this->dawnSharedContext(), size, type, accessPattern);
290 }
291
createSampler(const SamplerDesc & samplerDesc)292 sk_sp<Sampler> DawnResourceProvider::createSampler(const SamplerDesc& samplerDesc) {
293 return DawnSampler::Make(this->dawnSharedContext(),
294 samplerDesc.samplingOptions(),
295 samplerDesc.tileModeX(),
296 samplerDesc.tileModeY());
297 }
298
onCreateBackendTexture(SkISize dimensions,const TextureInfo & info)299 BackendTexture DawnResourceProvider::onCreateBackendTexture(SkISize dimensions,
300 const TextureInfo& info) {
301 wgpu::Texture texture = DawnTexture::MakeDawnTexture(this->dawnSharedContext(),
302 dimensions,
303 info);
304 if (!texture) {
305 return {};
306 }
307
308 return BackendTexture(texture.MoveToCHandle());
309 }
310
onDeleteBackendTexture(const BackendTexture & texture)311 void DawnResourceProvider::onDeleteBackendTexture(const BackendTexture& texture) {
312 SkASSERT(texture.isValid());
313 SkASSERT(texture.backend() == BackendApi::kDawn);
314
315 // Automatically release the pointers in wgpu::TextureView & wgpu::Texture's dtor.
316 // Acquire() won't increment the ref count.
317 wgpu::TextureView::Acquire(texture.getDawnTextureViewPtr());
318 // We need to explicitly call Destroy() here since since that is the recommended way to delete
319 // a Dawn texture predictably versus just dropping a ref and relying on garbage collection.
320 //
321 // Additionally this helps to work around an issue where Skia may have cached a BindGroup that
322 // references the underlying texture. Skia currently doesn't destroy BindGroups when its use of
323 // the texture goes away, thus a ref to the texture remains on the BindGroup and memory is never
324 // cleared up unless we call Destroy() here.
325 wgpu::Texture::Acquire(texture.getDawnTexturePtr()).Destroy();
326 }
327
dawnSharedContext() const328 DawnSharedContext* DawnResourceProvider::dawnSharedContext() const {
329 return static_cast<DawnSharedContext*>(fSharedContext);
330 }
331
findOrCreateDawnBuffer(size_t size,BufferType type,AccessPattern accessPattern,std::string_view label)332 sk_sp<DawnBuffer> DawnResourceProvider::findOrCreateDawnBuffer(size_t size,
333 BufferType type,
334 AccessPattern accessPattern,
335 std::string_view label) {
336 sk_sp<Buffer> buffer = this->findOrCreateBuffer(size, type, accessPattern, std::move(label));
337 DawnBuffer* ptr = static_cast<DawnBuffer*>(buffer.release());
338 return sk_sp<DawnBuffer>(ptr);
339 }
340
getOrCreateUniformBuffersBindGroupLayout()341 const wgpu::BindGroupLayout& DawnResourceProvider::getOrCreateUniformBuffersBindGroupLayout() {
342 if (fUniformBuffersBindGroupLayout) {
343 return fUniformBuffersBindGroupLayout;
344 }
345
346 std::array<wgpu::BindGroupLayoutEntry, 4> entries;
347 entries[0].binding = DawnGraphicsPipeline::kIntrinsicUniformBufferIndex;
348 entries[0].visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
349 entries[0].buffer.type = wgpu::BufferBindingType::Uniform;
350 entries[0].buffer.hasDynamicOffset = true;
351 entries[0].buffer.minBindingSize = 0;
352
353 entries[1].binding = DawnGraphicsPipeline::kRenderStepUniformBufferIndex;
354 entries[1].visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
355 entries[1].buffer.type = fSharedContext->caps()->storageBufferPreferred()
356 ? wgpu::BufferBindingType::ReadOnlyStorage
357 : wgpu::BufferBindingType::Uniform;
358 entries[1].buffer.hasDynamicOffset = true;
359 entries[1].buffer.minBindingSize = 0;
360
361 entries[2].binding = DawnGraphicsPipeline::kPaintUniformBufferIndex;
362 entries[2].visibility = wgpu::ShaderStage::Fragment;
363 entries[2].buffer.type = fSharedContext->caps()->storageBufferPreferred()
364 ? wgpu::BufferBindingType::ReadOnlyStorage
365 : wgpu::BufferBindingType::Uniform;
366 entries[2].buffer.hasDynamicOffset = true;
367 entries[2].buffer.minBindingSize = 0;
368
369 // Gradient buffer will only be used when storage buffers are preferred, else large
370 // gradients use a texture fallback, set binding type as a uniform when not in use to
371 // satisfy any binding type restricions for non-supported ssbo devices.
372 entries[3].binding = DawnGraphicsPipeline::kGradientBufferIndex;
373 entries[3].visibility = wgpu::ShaderStage::Fragment;
374 entries[3].buffer.type = fSharedContext->caps()->storageBufferPreferred()
375 ? wgpu::BufferBindingType::ReadOnlyStorage
376 : wgpu::BufferBindingType::Uniform;
377 entries[3].buffer.hasDynamicOffset = true;
378 entries[3].buffer.minBindingSize = 0;
379
380 wgpu::BindGroupLayoutDescriptor groupLayoutDesc;
381 if (fSharedContext->caps()->setBackendLabels()) {
382 groupLayoutDesc.label = "Uniform buffers bind group layout";
383 }
384
385 groupLayoutDesc.entryCount = entries.size();
386 groupLayoutDesc.entries = entries.data();
387 fUniformBuffersBindGroupLayout =
388 this->dawnSharedContext()->device().CreateBindGroupLayout(&groupLayoutDesc);
389
390 return fUniformBuffersBindGroupLayout;
391 }
392
393 const wgpu::BindGroupLayout&
getOrCreateSingleTextureSamplerBindGroupLayout()394 DawnResourceProvider::getOrCreateSingleTextureSamplerBindGroupLayout() {
395 if (fSingleTextureSamplerBindGroupLayout) {
396 return fSingleTextureSamplerBindGroupLayout;
397 }
398
399 std::array<wgpu::BindGroupLayoutEntry, 2> entries;
400
401 entries[0].binding = 0;
402 entries[0].visibility = wgpu::ShaderStage::Fragment;
403 entries[0].sampler.type = wgpu::SamplerBindingType::Filtering;
404
405 entries[1].binding = 1;
406 entries[1].visibility = wgpu::ShaderStage::Fragment;
407 entries[1].texture.sampleType = wgpu::TextureSampleType::Float;
408 entries[1].texture.viewDimension = wgpu::TextureViewDimension::e2D;
409 entries[1].texture.multisampled = false;
410
411 wgpu::BindGroupLayoutDescriptor groupLayoutDesc;
412 if (fSharedContext->caps()->setBackendLabels()) {
413 groupLayoutDesc.label = "Single texture + sampler bind group layout";
414 }
415
416 groupLayoutDesc.entryCount = entries.size();
417 groupLayoutDesc.entries = entries.data();
418 fSingleTextureSamplerBindGroupLayout =
419 this->dawnSharedContext()->device().CreateBindGroupLayout(&groupLayoutDesc);
420
421 return fSingleTextureSamplerBindGroupLayout;
422 }
423
getOrCreateNullBuffer()424 const wgpu::Buffer& DawnResourceProvider::getOrCreateNullBuffer() {
425 if (!fNullBuffer) {
426 wgpu::BufferDescriptor desc;
427 if (fSharedContext->caps()->setBackendLabels()) {
428 desc.label = "UnusedBufferSlot";
429 }
430 desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform |
431 wgpu::BufferUsage::Storage;
432 desc.size = kBufferBindingSizeAlignment;
433 desc.mappedAtCreation = false;
434
435 fNullBuffer = this->dawnSharedContext()->device().CreateBuffer(&desc);
436 SkASSERT(fNullBuffer);
437 }
438
439 return fNullBuffer;
440 }
441
getOrCreateIntrinsicConstantBuffer()442 const sk_sp<DawnBuffer>& DawnResourceProvider::getOrCreateIntrinsicConstantBuffer() {
443 if (!fIntrinsicConstantBuffer) {
444 fIntrinsicConstantBuffer = findOrCreateDawnBuffer(sizeof(float[4]),
445 BufferType::kUniform,
446 AccessPattern::kGpuOnly,
447 "IntrinsicConstantBuffer");
448 SkASSERT(fIntrinsicConstantBuffer);
449 }
450
451 return fIntrinsicConstantBuffer;
452 }
453
findOrCreateUniformBuffersBindGroup(const std::array<std::pair<const DawnBuffer *,uint32_t>,4> & boundBuffersAndSizes)454 const wgpu::BindGroup& DawnResourceProvider::findOrCreateUniformBuffersBindGroup(
455 const std::array<std::pair<const DawnBuffer*, uint32_t>, 4>& boundBuffersAndSizes) {
456 auto key = make_ubo_bind_group_key(boundBuffersAndSizes);
457 auto* existingBindGroup = fUniformBufferBindGroupCache.find(key);
458 if (existingBindGroup) {
459 // cache hit.
460 return *existingBindGroup;
461 }
462
463 // Translate to wgpu::BindGroupDescriptor
464 std::array<wgpu::BindGroupEntry, 4> entries;
465
466 constexpr uint32_t kBindingIndices[] = {
467 DawnGraphicsPipeline::kIntrinsicUniformBufferIndex,
468 DawnGraphicsPipeline::kRenderStepUniformBufferIndex,
469 DawnGraphicsPipeline::kPaintUniformBufferIndex,
470 DawnGraphicsPipeline::kGradientBufferIndex,
471 };
472
473 for (uint32_t i = 0; i < boundBuffersAndSizes.size(); ++i) {
474 const DawnBuffer* boundBuffer = boundBuffersAndSizes[i].first;
475 const uint32_t bindingSize = boundBuffersAndSizes[i].second;
476
477 entries[i].binding = kBindingIndices[i];
478 entries[i].offset = 0;
479 if (boundBuffer) {
480 entries[i].buffer = boundBuffer->dawnBuffer();
481 entries[i].size = SkAlignTo(bindingSize, kBufferBindingSizeAlignment);
482 } else {
483 entries[i].buffer = this->getOrCreateNullBuffer();
484 entries[i].size = wgpu::kWholeSize;
485 }
486 }
487
488 wgpu::BindGroupDescriptor desc;
489 desc.layout = this->getOrCreateUniformBuffersBindGroupLayout();
490 desc.entryCount = entries.size();
491 desc.entries = entries.data();
492
493 const auto& device = this->dawnSharedContext()->device();
494 auto bindGroup = device.CreateBindGroup(&desc);
495
496 return *fUniformBufferBindGroupCache.insert(key, bindGroup);
497 }
498
findOrCreateSingleTextureSamplerBindGroup(const DawnSampler * sampler,const DawnTexture * texture)499 const wgpu::BindGroup& DawnResourceProvider::findOrCreateSingleTextureSamplerBindGroup(
500 const DawnSampler* sampler, const DawnTexture* texture) {
501 auto key = make_texture_bind_group_key(sampler, texture);
502 auto* existingBindGroup = fSingleTextureSamplerBindGroups.find(key);
503 if (existingBindGroup) {
504 // cache hit.
505 return *existingBindGroup;
506 }
507
508 std::array<wgpu::BindGroupEntry, 2> entries;
509
510 entries[0].binding = 0;
511 entries[0].sampler = sampler->dawnSampler();
512 entries[1].binding = 1;
513 entries[1].textureView = texture->sampleTextureView();
514
515 wgpu::BindGroupDescriptor desc;
516 desc.layout = getOrCreateSingleTextureSamplerBindGroupLayout();
517 desc.entryCount = entries.size();
518 desc.entries = entries.data();
519
520 const auto& device = this->dawnSharedContext()->device();
521 auto bindGroup = device.CreateBindGroup(&desc);
522
523 return *fSingleTextureSamplerBindGroups.insert(key, bindGroup);
524 }
525
526 } // namespace skgpu::graphite
527