1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/dawn/DawnResourceProvider.h"
9
10 #include "include/gpu/graphite/BackendTexture.h"
11 #include "include/gpu/graphite/TextureInfo.h"
12 #include "include/gpu/graphite/dawn/DawnGraphiteTypes.h"
13 #include "include/private/base/SingleOwner.h"
14 #include "include/private/base/SkAlign.h"
15 #include "src/gpu/graphite/ComputePipeline.h"
16 #include "src/gpu/graphite/RenderPassDesc.h"
17 #include "src/gpu/graphite/dawn/DawnBuffer.h"
18 #include "src/gpu/graphite/dawn/DawnCommandBuffer.h"
19 #include "src/gpu/graphite/dawn/DawnComputePipeline.h"
20 #include "src/gpu/graphite/dawn/DawnErrorChecker.h"
21 #include "src/gpu/graphite/dawn/DawnGraphicsPipeline.h"
22 #include "src/gpu/graphite/dawn/DawnGraphiteUtils.h"
23 #include "src/gpu/graphite/dawn/DawnSampler.h"
24 #include "src/gpu/graphite/dawn/DawnSharedContext.h"
25 #include "src/gpu/graphite/dawn/DawnTexture.h"
26 #include "src/sksl/SkSLCompiler.h"
27
28 namespace skgpu::graphite {
29
30 namespace {
31
32 constexpr int kBufferBindingSizeAlignment = 16;
33 constexpr int kMaxNumberOfCachedBufferBindGroups = 1024;
34 constexpr int kMaxNumberOfCachedTextureBindGroups = 4096;
35
create_shader_module(const wgpu::Device & device,const char * source)36 wgpu::ShaderModule create_shader_module(const wgpu::Device& device, const char* source) {
37 #if defined(__EMSCRIPTEN__)
38 wgpu::ShaderModuleWGSLDescriptor wgslDesc;
39 #else
40 wgpu::ShaderSourceWGSL wgslDesc;
41 #endif
42 wgslDesc.code = source;
43 wgpu::ShaderModuleDescriptor descriptor;
44 descriptor.nextInChain = &wgslDesc;
45 return device.CreateShaderModule(&descriptor);
46 }
47
create_blit_render_pipeline(const DawnSharedContext * sharedContext,const char * label,wgpu::ShaderModule vsModule,wgpu::ShaderModule fsModule,wgpu::TextureFormat renderPassColorFormat,wgpu::TextureFormat renderPassDepthStencilFormat,int numSamples)48 wgpu::RenderPipeline create_blit_render_pipeline(const DawnSharedContext* sharedContext,
49 const char* label,
50 wgpu::ShaderModule vsModule,
51 wgpu::ShaderModule fsModule,
52 wgpu::TextureFormat renderPassColorFormat,
53 wgpu::TextureFormat renderPassDepthStencilFormat,
54 int numSamples) {
55 wgpu::RenderPipelineDescriptor descriptor;
56 descriptor.label = label;
57 descriptor.layout = nullptr;
58
59 wgpu::ColorTargetState colorTarget;
60 colorTarget.format = renderPassColorFormat;
61 colorTarget.blend = nullptr;
62 colorTarget.writeMask = wgpu::ColorWriteMask::All;
63
64 wgpu::DepthStencilState depthStencil;
65 if (renderPassDepthStencilFormat != wgpu::TextureFormat::Undefined) {
66 depthStencil.format = renderPassDepthStencilFormat;
67 depthStencil.depthWriteEnabled = false;
68 depthStencil.depthCompare = wgpu::CompareFunction::Always;
69
70 descriptor.depthStencil = &depthStencil;
71 }
72
73 wgpu::FragmentState fragment;
74 fragment.module = std::move(fsModule);
75 fragment.entryPoint = "main";
76 fragment.constantCount = 0;
77 fragment.constants = nullptr;
78 fragment.targetCount = 1;
79 fragment.targets = &colorTarget;
80 descriptor.fragment = &fragment;
81
82 descriptor.vertex.module = std::move(vsModule);
83 descriptor.vertex.entryPoint = "main";
84 descriptor.vertex.constantCount = 0;
85 descriptor.vertex.constants = nullptr;
86 descriptor.vertex.bufferCount = 0;
87 descriptor.vertex.buffers = nullptr;
88
89 descriptor.primitive.frontFace = wgpu::FrontFace::CCW;
90 descriptor.primitive.cullMode = wgpu::CullMode::None;
91 descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
92 descriptor.primitive.stripIndexFormat = wgpu::IndexFormat::Undefined;
93
94 descriptor.multisample.count = numSamples;
95 descriptor.multisample.mask = 0xFFFFFFFF;
96 descriptor.multisample.alphaToCoverageEnabled = false;
97
98 std::optional<DawnErrorChecker> errorChecker;
99 if (sharedContext->dawnCaps()->allowScopedErrorChecks()) {
100 errorChecker.emplace(sharedContext);
101 }
102 auto pipeline = sharedContext->device().CreateRenderPipeline(&descriptor);
103 if (errorChecker.has_value() && errorChecker->popErrorScopes() != DawnErrorType::kNoError) {
104 return nullptr;
105 }
106
107 return pipeline;
108 }
109
110 template <size_t NumEntries>
111 using BindGroupKey = typename DawnResourceProvider::BindGroupKey<NumEntries>;
112 using UniformBindGroupKey = BindGroupKey<DawnResourceProvider::kNumUniformEntries>;
113
make_ubo_bind_group_key(const std::array<std::pair<const DawnBuffer *,uint32_t>,DawnResourceProvider::kNumUniformEntries> & boundBuffersAndSizes)114 UniformBindGroupKey make_ubo_bind_group_key(
115 const std::array<std::pair<const DawnBuffer*, uint32_t>,
116 DawnResourceProvider::kNumUniformEntries>& boundBuffersAndSizes) {
117 UniformBindGroupKey uniqueKey;
118 {
119 // Each entry in the bind group needs 2 uint32_t in the key:
120 // - buffer's unique ID: 32 bits.
121 // - buffer's binding size: 32 bits.
122 // We need total of 4 entries in the uniform buffer bind group.
123 // Unused entries will be assigned zero values.
124 UniformBindGroupKey::Builder builder(&uniqueKey);
125
126 for (uint32_t i = 0; i < boundBuffersAndSizes.size(); ++i) {
127 const DawnBuffer* boundBuffer = boundBuffersAndSizes[i].first;
128 const uint32_t bindingSize = boundBuffersAndSizes[i].second;
129 if (boundBuffer) {
130 builder[2 * i] = boundBuffer->uniqueID().asUInt();
131 builder[2 * i + 1] = bindingSize;
132 } else {
133 builder[2 * i] = 0;
134 builder[2 * i + 1] = 0;
135 }
136 }
137
138 builder.finish();
139 }
140
141 return uniqueKey;
142 }
143
make_texture_bind_group_key(const DawnSampler * sampler,const DawnTexture * texture)144 BindGroupKey<1> make_texture_bind_group_key(const DawnSampler* sampler,
145 const DawnTexture* texture) {
146 BindGroupKey<1> uniqueKey;
147 {
148 BindGroupKey<1>::Builder builder(&uniqueKey);
149
150 builder[0] = sampler->uniqueID().asUInt();
151 builder[1] = texture->uniqueID().asUInt();
152
153 builder.finish();
154 }
155
156 return uniqueKey;
157 }
158 } // namespace
159
160
161 // Wraps a Dawn buffer, and tracks the intrinsic blocks residing in this buffer.
162 class DawnResourceProvider::IntrinsicBuffer final {
163 public:
164 static constexpr int kNumSlots = 8;
165
IntrinsicBuffer(sk_sp<DawnBuffer> dawnBuffer)166 IntrinsicBuffer(sk_sp<DawnBuffer> dawnBuffer) : fDawnBuffer(std::move(dawnBuffer)) {}
167 ~IntrinsicBuffer() = default;
168
buffer() const169 sk_sp<DawnBuffer> buffer() const { return fDawnBuffer; }
170
171 // Track that 'intrinsicValues' is stored in the buffer at the 'offset'.
trackIntrinsic(UniformDataBlock intrinsicValues,uint32_t offset)172 void trackIntrinsic(UniformDataBlock intrinsicValues, uint32_t offset) {
173 fCachedIntrinsicValues.set(UniformDataBlock::Make(intrinsicValues, &fUniformData), offset);
174 }
175
176 // Find the offset of 'intrinsicValues' in the buffer. If not found, return nullptr.
findIntrinsic(UniformDataBlock intrinsicValues) const177 uint32_t* findIntrinsic(UniformDataBlock intrinsicValues) const {
178 return fCachedIntrinsicValues.find(intrinsicValues);
179 }
180
slotsUsed() const181 int slotsUsed() const { return fCachedIntrinsicValues.count(); }
182
updateAccessTime()183 void updateAccessTime() {
184 fLastAccess = skgpu::StdSteadyClock::now();
185 }
lastAccessTime() const186 skgpu::StdSteadyClock::time_point lastAccessTime() const {
187 return fLastAccess;
188 }
189
190 private:
191 skia_private::THashMap<UniformDataBlock, uint32_t, UniformDataBlock::Hash>
192 fCachedIntrinsicValues;
193 SkArenaAlloc fUniformData{0};
194
195 sk_sp<DawnBuffer> fDawnBuffer;
196 skgpu::StdSteadyClock::time_point fLastAccess;
197
198 SK_DECLARE_INTERNAL_LLIST_INTERFACE(IntrinsicBuffer);
199 };
200
201 // DawnResourceProvider::IntrinsicConstantsManager
202 // ----------------------------------------------------------------------------
203
204 /**
205 * Since Dawn does not currently provide push constants, this helper class manages rotating through
206 * buffers and writing each new occurrence of a set of intrinsic uniforms into the current buffer.
207 */
208 class DawnResourceProvider::IntrinsicConstantsManager {
209 public:
IntrinsicConstantsManager(DawnResourceProvider * resourceProvider)210 explicit IntrinsicConstantsManager(DawnResourceProvider* resourceProvider)
211 : fResourceProvider(resourceProvider) {}
212
~IntrinsicConstantsManager()213 ~IntrinsicConstantsManager() {
214 auto alwaysTrue = [](IntrinsicBuffer* buffer) { return true; };
215 this->purgeBuffersIf(alwaysTrue);
216
217 SkASSERT(fIntrinsicBuffersLRU.isEmpty());
218 }
219
220 // Find or create a bind buffer info for the given intrinsic values used in the given command
221 // buffer.
222 BindBufferInfo add(DawnCommandBuffer* cb, UniformDataBlock intrinsicValues);
223
purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime)224 void purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime) {
225 auto bufferNotUsedSince = [purgeTime, this](IntrinsicBuffer* buffer) {
226 // We always keep the current buffer as it is likely to be used again soon.
227 return buffer != fCurrentBuffer && buffer->lastAccessTime() < purgeTime;
228 };
229 this->purgeBuffersIf(bufferNotUsedSince);
230 }
231
freeGpuResources()232 void freeGpuResources() { this->purgeResourcesNotUsedSince(skgpu::StdSteadyClock::now()); }
233
234 private:
235 // The max number of intrinsic buffers to keep around in the cache.
236 static constexpr uint32_t kMaxNumBuffers = 16;
237
238 // Traverse the intrinsic buffers and purge the ones that match the 'pred'.
239 template<typename T> void purgeBuffersIf(T pred);
240
241 DawnResourceProvider* const fResourceProvider;
242 // The current buffer being filled up, as well as the how much of it has been written to.
243 IntrinsicBuffer* fCurrentBuffer = nullptr;
244
245 // All cached intrinsic buffers, in LRU order.
246 SkTInternalLList<IntrinsicBuffer> fIntrinsicBuffersLRU;
247 // The number of intrinsic buffers currently in the cache.
248 uint32_t fNumBuffers = 0;
249 };
250
251 // Find or create a bind buffer info for the given intrinsic values used in the given command
252 // buffer.
add(DawnCommandBuffer * cb,UniformDataBlock intrinsicValues)253 BindBufferInfo DawnResourceProvider::IntrinsicConstantsManager::add(
254 DawnCommandBuffer* cb, UniformDataBlock intrinsicValues) {
255 using Iter = SkTInternalLList<IntrinsicBuffer>::Iter;
256 Iter iter;
257 auto* curr = iter.init(fIntrinsicBuffersLRU, Iter::kHead_IterStart);
258 uint32_t* offset = nullptr;
259 // Find the buffer that contains the given intrinsic values.
260 while (curr != nullptr) {
261 offset = curr->findIntrinsic(intrinsicValues);
262 if (offset != nullptr) {
263 break;
264 }
265 curr = iter.next();
266 }
267 // If we found the buffer, we can return the bind buffer info directly.
268 if (curr != nullptr && offset != nullptr) {
269 // Move the buffer to the head of the LRU list.
270 fIntrinsicBuffersLRU.remove(curr);
271 fIntrinsicBuffersLRU.addToHead(curr);
272 // Track the dawn buffer's usage by the command buffer.
273 cb->trackResource(curr->buffer());
274 curr->updateAccessTime();
275 return {curr->buffer().get(), *offset, SkTo<uint32_t>(intrinsicValues.size())};
276 }
277
278 // TODO: https://b.corp.google.com/issues/259267703
279 // Make updating intrinsic constants faster. Metal has setVertexBytes method to quickly send
280 // intrinsic constants to vertex shader without any buffer. But Dawn doesn't have similar
281 // capability. So we have to use WriteBuffer(), and this method is not allowed to be called when
282 // there is an active render pass.
283 SkASSERT(!cb->hasActivePassEncoder());
284
285 const Caps* caps = fResourceProvider->dawnSharedContext()->caps();
286 const uint32_t stride =
287 SkAlignTo(intrinsicValues.size(), caps->requiredUniformBufferAlignment());
288 // In any one of the following cases, we need to create a new buffer:
289 // (1) There is no current buffer.
290 // (2) The current buffer is full.
291 if (!fCurrentBuffer || fCurrentBuffer->slotsUsed() == IntrinsicBuffer::kNumSlots) {
292 // We can just replace the current buffer; any prior buffer was already tracked in the LRU
293 // list and the intrinsic constants were written directly to the Dawn queue.
294 DawnResourceProvider* resourceProvider = fResourceProvider;
295 auto dawnBuffer =
296 resourceProvider->findOrCreateDawnBuffer(stride * IntrinsicBuffer::kNumSlots,
297 BufferType::kUniform,
298 AccessPattern::kGpuOnly,
299 "IntrinsicConstantBuffer");
300 if (!dawnBuffer) {
301 // If we failed to create a GPU buffer to hold the intrinsic uniforms, we will fail the
302 // Recording being inserted, so return an empty bind info.
303 return {};
304 }
305
306 fCurrentBuffer = new IntrinsicBuffer(dawnBuffer);
307 fIntrinsicBuffersLRU.addToHead(fCurrentBuffer);
308 fNumBuffers++;
309 // If we have too many buffers, remove the least used one.
310 if (fNumBuffers > kMaxNumBuffers) {
311 auto* tail = fIntrinsicBuffersLRU.tail();
312 fIntrinsicBuffersLRU.remove(tail);
313 delete tail;
314 fNumBuffers--;
315 }
316 }
317
318 SkASSERT(fCurrentBuffer && fCurrentBuffer->slotsUsed() < IntrinsicBuffer::kNumSlots);
319 uint32_t newOffset = (fCurrentBuffer->slotsUsed()) * stride;
320 fResourceProvider->dawnSharedContext()->queue().WriteBuffer(
321 fCurrentBuffer->buffer()->dawnBuffer(),
322 newOffset,
323 intrinsicValues.data(),
324 intrinsicValues.size());
325
326 // Track the intrinsic values in the buffer.
327 fCurrentBuffer->trackIntrinsic(intrinsicValues, newOffset);
328
329 cb->trackResource(fCurrentBuffer->buffer());
330 fCurrentBuffer->updateAccessTime();
331
332 return {fCurrentBuffer->buffer().get(), newOffset, SkTo<uint32_t>(intrinsicValues.size())};
333 }
334
purgeBuffersIf(T pred)335 template <typename T> void DawnResourceProvider::IntrinsicConstantsManager::purgeBuffersIf(T pred) {
336 using Iter = SkTInternalLList<IntrinsicBuffer>::Iter;
337 Iter iter;
338 auto* curr = iter.init(fIntrinsicBuffersLRU, Iter::kHead_IterStart);
339 while (curr != nullptr) {
340 auto* next = iter.next();
341 if (pred(curr)) {
342 fIntrinsicBuffersLRU.remove(curr);
343 fNumBuffers--;
344 delete curr;
345 }
346 curr = next;
347 }
348 }
349
350 // DawnResourceProvider::IntrinsicConstantsManager
351 // ----------------------------------------------------------------------------
352
DawnResourceProvider(SharedContext * sharedContext,SingleOwner * singleOwner,uint32_t recorderID,size_t resourceBudget)353 DawnResourceProvider::DawnResourceProvider(SharedContext* sharedContext,
354 SingleOwner* singleOwner,
355 uint32_t recorderID,
356 size_t resourceBudget)
357 : ResourceProvider(sharedContext, singleOwner, recorderID, resourceBudget)
358 , fUniformBufferBindGroupCache(kMaxNumberOfCachedBufferBindGroups)
359 , fSingleTextureSamplerBindGroups(kMaxNumberOfCachedTextureBindGroups)
360 , fSingleOwner(singleOwner) {
361 fIntrinsicConstantsManager = std::make_unique<IntrinsicConstantsManager>(this);
362
363 // Only used for debug asserts so this avoids compile errors.
364 (void)fSingleOwner;
365 }
366
367 DawnResourceProvider::~DawnResourceProvider() = default;
368
findOrCreateBlitWithDrawPipeline(const RenderPassDesc & renderPassDesc,bool srcIsMSAA)369 wgpu::RenderPipeline DawnResourceProvider::findOrCreateBlitWithDrawPipeline(
370 const RenderPassDesc& renderPassDesc, bool srcIsMSAA) {
371 uint32_t renderPassKey = this->dawnSharedContext()->dawnCaps()->getRenderPassDescKeyForPipeline(
372 renderPassDesc, srcIsMSAA);
373 wgpu::RenderPipeline pipeline = fBlitWithDrawPipelines[renderPassKey];
374 if (!pipeline) {
375 static constexpr char kVertexShaderText[] =
376 "var<private> fullscreenTriPositions : array<vec2<f32>, 3> = array<vec2<f32>, 3>("
377 "vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));"
378
379 "@vertex "
380 "fn main(@builtin(vertex_index) vertexIndex : u32) -> @builtin(position) vec4<f32> {"
381 "return vec4(fullscreenTriPositions[vertexIndex], 1.0, 1.0);"
382 "}";
383
384 static constexpr char kFragmentShaderText[] =
385 "@group(0) @binding(0) var colorMap: texture_2d<f32>;"
386
387 "@fragment "
388 "fn main(@builtin(position) fragPosition : vec4<f32>) -> @location(0) vec4<f32> {"
389 "let coords = vec2<i32>(i32(fragPosition.x), i32(fragPosition.y));"
390 "return textureLoad(colorMap, coords, 0);"
391 "}";
392
393 static constexpr char kFragmentShaderMSAAText[] =
394 "@group(0) @binding(0) var colorMap: texture_multisampled_2d<f32>;"
395 "@fragment\n"
396 "fn main(@builtin(position) fragPosition : vec4<f32>) -> @location(0) vec4<f32> {"
397 "let coords = vec2<i32>(i32(fragPosition.x), i32(fragPosition.y));"
398 "var sum = vec4f(0.0);"
399 "let sampleCount = textureNumSamples(colorMap);"
400 "for (var i: u32 = 0; i < sampleCount; i = i + 1) {"
401 "sum += textureLoad(colorMap, coords, i);"
402 "}"
403 "return sum / f32(sampleCount);"
404 "}";
405
406 auto vsModule = create_shader_module(dawnSharedContext()->device(), kVertexShaderText);
407 auto fsModule =
408 create_shader_module(dawnSharedContext()->device(),
409 srcIsMSAA ? kFragmentShaderMSAAText : kFragmentShaderText);
410
411 const auto& colorTexInfo = renderPassDesc.fColorAttachment.fTextureInfo;
412 const auto& dsTexInfo = renderPassDesc.fDepthStencilAttachment.fTextureInfo;
413
414 pipeline = create_blit_render_pipeline(
415 dawnSharedContext(),
416 /*label=*/"BlitWithDraw",
417 std::move(vsModule),
418 std::move(fsModule),
419 /*renderPassColorFormat=*/
420 TextureInfoPriv::Get<DawnTextureInfo>(colorTexInfo).getViewFormat(),
421 /*renderPassDepthStencilFormat=*/
422 dsTexInfo.isValid()
423 ? TextureInfoPriv::Get<DawnTextureInfo>(dsTexInfo).getViewFormat()
424 : wgpu::TextureFormat::Undefined,
425 /*numSamples=*/renderPassDesc.fColorAttachment.fTextureInfo.numSamples());
426
427 if (pipeline) {
428 fBlitWithDrawPipelines.set(renderPassKey, pipeline);
429 }
430 }
431
432 return pipeline;
433 }
434
onCreateWrappedTexture(const BackendTexture & texture)435 sk_sp<Texture> DawnResourceProvider::onCreateWrappedTexture(const BackendTexture& texture) {
436 // Convert to smart pointers. wgpu::Texture* constructor will increment the ref count.
437 wgpu::Texture dawnTexture = BackendTextures::GetDawnTexturePtr(texture);
438 wgpu::TextureView dawnTextureView = BackendTextures::GetDawnTextureViewPtr(texture);
439 SkASSERT(!dawnTexture || !dawnTextureView);
440
441 if (!dawnTexture && !dawnTextureView) {
442 return {};
443 }
444
445 if (dawnTexture) {
446 return DawnTexture::MakeWrapped(this->dawnSharedContext(),
447 texture.dimensions(),
448 texture.info(),
449 std::move(dawnTexture));
450 } else {
451 return DawnTexture::MakeWrapped(this->dawnSharedContext(),
452 texture.dimensions(),
453 texture.info(),
454 std::move(dawnTextureView));
455 }
456 }
457
findOrCreateDiscardableMSAALoadTexture(SkISize dimensions,const TextureInfo & msaaInfo)458 sk_sp<DawnTexture> DawnResourceProvider::findOrCreateDiscardableMSAALoadTexture(
459 SkISize dimensions, const TextureInfo& msaaInfo) {
460 SkASSERT(msaaInfo.isValid());
461
462 // Derive the load texture's info from MSAA texture's info.
463 DawnTextureInfo dawnMsaaLoadTextureInfo = TextureInfoPriv::Get<DawnTextureInfo>(msaaInfo);
464 dawnMsaaLoadTextureInfo.fSampleCount = 1;
465 dawnMsaaLoadTextureInfo.fUsage |= wgpu::TextureUsage::TextureBinding;
466
467 #if !defined(__EMSCRIPTEN__)
468 // MSAA texture can be transient attachment (memoryless) but the load texture cannot be.
469 // This is because the load texture will need to have its content retained between two passes
470 // loading:
471 // - first pass: the resolve texture is blitted to the load texture.
472 // - 2nd pass: the actual render pass is started and the load texture is blitted to the MSAA
473 // texture.
474 dawnMsaaLoadTextureInfo.fUsage &= (~wgpu::TextureUsage::TransientAttachment);
475 #endif
476
477 auto texture = this->findOrCreateDiscardableMSAAAttachment(
478 dimensions, TextureInfos::MakeDawn(dawnMsaaLoadTextureInfo));
479
480 return sk_sp<DawnTexture>(static_cast<DawnTexture*>(texture.release()));
481 }
482
createGraphicsPipeline(const RuntimeEffectDictionary * runtimeDict,const UniqueKey & pipelineKey,const GraphicsPipelineDesc & pipelineDesc,const RenderPassDesc & renderPassDesc,SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags,uint32_t compilationID)483 sk_sp<GraphicsPipeline> DawnResourceProvider::createGraphicsPipeline(
484 const RuntimeEffectDictionary* runtimeDict,
485 const UniqueKey& pipelineKey,
486 const GraphicsPipelineDesc& pipelineDesc,
487 const RenderPassDesc& renderPassDesc,
488 SkEnumBitMask<PipelineCreationFlags> pipelineCreationFlags,
489 uint32_t compilationID) {
490 return DawnGraphicsPipeline::Make(this->dawnSharedContext(),
491 this,
492 runtimeDict,
493 pipelineKey,
494 pipelineDesc,
495 renderPassDesc,
496 pipelineCreationFlags,
497 compilationID);
498 }
499
createComputePipeline(const ComputePipelineDesc & desc)500 sk_sp<ComputePipeline> DawnResourceProvider::createComputePipeline(
501 const ComputePipelineDesc& desc) {
502 return DawnComputePipeline::Make(this->dawnSharedContext(), desc);
503 }
504
createTexture(SkISize dimensions,const TextureInfo & info)505 sk_sp<Texture> DawnResourceProvider::createTexture(SkISize dimensions, const TextureInfo& info) {
506 return DawnTexture::Make(this->dawnSharedContext(), dimensions, info);
507 }
508
createBuffer(size_t size,BufferType type,AccessPattern accessPattern)509 sk_sp<Buffer> DawnResourceProvider::createBuffer(size_t size,
510 BufferType type,
511 AccessPattern accessPattern) {
512 return DawnBuffer::Make(this->dawnSharedContext(), size, type, accessPattern);
513 }
514
createSampler(const SamplerDesc & samplerDesc)515 sk_sp<Sampler> DawnResourceProvider::createSampler(const SamplerDesc& samplerDesc) {
516 return DawnSampler::Make(this->dawnSharedContext(), samplerDesc);
517 }
518
onCreateBackendTexture(SkISize dimensions,const TextureInfo & info)519 BackendTexture DawnResourceProvider::onCreateBackendTexture(SkISize dimensions,
520 const TextureInfo& info) {
521 wgpu::Texture texture = DawnTexture::MakeDawnTexture(this->dawnSharedContext(),
522 dimensions,
523 info);
524 if (!texture) {
525 return {};
526 }
527
528 return BackendTextures::MakeDawn(texture.MoveToCHandle());
529 }
530
onDeleteBackendTexture(const BackendTexture & texture)531 void DawnResourceProvider::onDeleteBackendTexture(const BackendTexture& texture) {
532 SkASSERT(texture.isValid());
533 SkASSERT(texture.backend() == BackendApi::kDawn);
534
535 // Automatically release the pointers in wgpu::TextureView & wgpu::Texture's dtor.
536 // Acquire() won't increment the ref count.
537 wgpu::TextureView::Acquire(BackendTextures::GetDawnTextureViewPtr(texture));
538 // We need to explicitly call Destroy() here since since that is the recommended way to delete
539 // a Dawn texture predictably versus just dropping a ref and relying on garbage collection.
540 //
541 // Additionally this helps to work around an issue where Skia may have cached a BindGroup that
542 // references the underlying texture. Skia currently doesn't destroy BindGroups when its use of
543 // the texture goes away, thus a ref to the texture remains on the BindGroup and memory is never
544 // cleared up unless we call Destroy() here.
545 wgpu::Texture::Acquire(BackendTextures::GetDawnTexturePtr(texture)).Destroy();
546 }
547
dawnSharedContext() const548 DawnSharedContext* DawnResourceProvider::dawnSharedContext() const {
549 return static_cast<DawnSharedContext*>(fSharedContext);
550 }
551
findOrCreateDawnBuffer(size_t size,BufferType type,AccessPattern accessPattern,std::string_view label)552 sk_sp<DawnBuffer> DawnResourceProvider::findOrCreateDawnBuffer(size_t size,
553 BufferType type,
554 AccessPattern accessPattern,
555 std::string_view label) {
556 sk_sp<Buffer> buffer = this->findOrCreateBuffer(size, type, accessPattern, std::move(label));
557 DawnBuffer* ptr = static_cast<DawnBuffer*>(buffer.release());
558 return sk_sp<DawnBuffer>(ptr);
559 }
560
getOrCreateUniformBuffersBindGroupLayout()561 const wgpu::BindGroupLayout& DawnResourceProvider::getOrCreateUniformBuffersBindGroupLayout() {
562 SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
563
564 if (fUniformBuffersBindGroupLayout) {
565 return fUniformBuffersBindGroupLayout;
566 }
567
568 std::array<wgpu::BindGroupLayoutEntry, 4> entries;
569 entries[0].binding = DawnGraphicsPipeline::kIntrinsicUniformBufferIndex;
570 entries[0].visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
571 entries[0].buffer.type = wgpu::BufferBindingType::Uniform;
572 entries[0].buffer.hasDynamicOffset = true;
573 entries[0].buffer.minBindingSize = 0;
574
575 entries[1].binding = DawnGraphicsPipeline::kRenderStepUniformBufferIndex;
576 entries[1].visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
577 entries[1].buffer.type = fSharedContext->caps()->storageBufferSupport()
578 ? wgpu::BufferBindingType::ReadOnlyStorage
579 : wgpu::BufferBindingType::Uniform;
580 entries[1].buffer.hasDynamicOffset = true;
581 entries[1].buffer.minBindingSize = 0;
582
583 entries[2].binding = DawnGraphicsPipeline::kPaintUniformBufferIndex;
584 entries[2].visibility = wgpu::ShaderStage::Fragment;
585 entries[2].buffer.type = fSharedContext->caps()->storageBufferSupport()
586 ? wgpu::BufferBindingType::ReadOnlyStorage
587 : wgpu::BufferBindingType::Uniform;
588 entries[2].buffer.hasDynamicOffset = true;
589 entries[2].buffer.minBindingSize = 0;
590
591 // Gradient buffer will only be used when storage buffers are preferred, else large
592 // gradients use a texture fallback, set binding type as a uniform when not in use to
593 // satisfy any binding type restricions for non-supported ssbo devices.
594 entries[3].binding = DawnGraphicsPipeline::kGradientBufferIndex;
595 entries[3].visibility = wgpu::ShaderStage::Fragment;
596 entries[3].buffer.type = fSharedContext->caps()->storageBufferSupport()
597 ? wgpu::BufferBindingType::ReadOnlyStorage
598 : wgpu::BufferBindingType::Uniform;
599 entries[3].buffer.hasDynamicOffset = true;
600 entries[3].buffer.minBindingSize = 0;
601
602 wgpu::BindGroupLayoutDescriptor groupLayoutDesc;
603 if (fSharedContext->caps()->setBackendLabels()) {
604 groupLayoutDesc.label = "Uniform buffers bind group layout";
605 }
606
607 groupLayoutDesc.entryCount = entries.size();
608 groupLayoutDesc.entries = entries.data();
609 fUniformBuffersBindGroupLayout =
610 this->dawnSharedContext()->device().CreateBindGroupLayout(&groupLayoutDesc);
611
612 return fUniformBuffersBindGroupLayout;
613 }
614
615 const wgpu::BindGroupLayout&
getOrCreateSingleTextureSamplerBindGroupLayout()616 DawnResourceProvider::getOrCreateSingleTextureSamplerBindGroupLayout() {
617 SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
618
619 if (fSingleTextureSamplerBindGroupLayout) {
620 return fSingleTextureSamplerBindGroupLayout;
621 }
622
623 std::array<wgpu::BindGroupLayoutEntry, 2> entries;
624
625 entries[0].binding = 0;
626 entries[0].visibility = wgpu::ShaderStage::Fragment;
627 entries[0].sampler.type = wgpu::SamplerBindingType::Filtering;
628
629 entries[1].binding = 1;
630 entries[1].visibility = wgpu::ShaderStage::Fragment;
631 entries[1].texture.sampleType = wgpu::TextureSampleType::Float;
632 entries[1].texture.viewDimension = wgpu::TextureViewDimension::e2D;
633 entries[1].texture.multisampled = false;
634
635 wgpu::BindGroupLayoutDescriptor groupLayoutDesc;
636 if (fSharedContext->caps()->setBackendLabels()) {
637 groupLayoutDesc.label = "Single texture + sampler bind group layout";
638 }
639
640 groupLayoutDesc.entryCount = entries.size();
641 groupLayoutDesc.entries = entries.data();
642 fSingleTextureSamplerBindGroupLayout =
643 this->dawnSharedContext()->device().CreateBindGroupLayout(&groupLayoutDesc);
644
645 return fSingleTextureSamplerBindGroupLayout;
646 }
647
getOrCreateNullBuffer()648 const wgpu::Buffer& DawnResourceProvider::getOrCreateNullBuffer() {
649 if (!fNullBuffer) {
650 wgpu::BufferDescriptor desc;
651 if (fSharedContext->caps()->setBackendLabels()) {
652 desc.label = "UnusedBufferSlot";
653 }
654 desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform |
655 wgpu::BufferUsage::Storage;
656 desc.size = kBufferBindingSizeAlignment;
657 desc.mappedAtCreation = false;
658
659 fNullBuffer = this->dawnSharedContext()->device().CreateBuffer(&desc);
660 SkASSERT(fNullBuffer);
661 }
662
663 return fNullBuffer;
664 }
665
findOrCreateUniformBuffersBindGroup(const std::array<std::pair<const DawnBuffer *,uint32_t>,kNumUniformEntries> & boundBuffersAndSizes)666 const wgpu::BindGroup& DawnResourceProvider::findOrCreateUniformBuffersBindGroup(
667 const std::array<std::pair<const DawnBuffer*, uint32_t>, kNumUniformEntries>&
668 boundBuffersAndSizes) {
669 SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
670
671 auto key = make_ubo_bind_group_key(boundBuffersAndSizes);
672 auto* existingBindGroup = fUniformBufferBindGroupCache.find(key);
673 if (existingBindGroup) {
674 // cache hit.
675 return *existingBindGroup;
676 }
677
678 // Translate to wgpu::BindGroupDescriptor
679 std::array<wgpu::BindGroupEntry, kNumUniformEntries> entries;
680
681 constexpr uint32_t kBindingIndices[] = {
682 DawnGraphicsPipeline::kIntrinsicUniformBufferIndex,
683 DawnGraphicsPipeline::kRenderStepUniformBufferIndex,
684 DawnGraphicsPipeline::kPaintUniformBufferIndex,
685 DawnGraphicsPipeline::kGradientBufferIndex,
686 };
687
688 for (uint32_t i = 0; i < boundBuffersAndSizes.size(); ++i) {
689 const DawnBuffer* boundBuffer = boundBuffersAndSizes[i].first;
690 const uint32_t bindingSize = boundBuffersAndSizes[i].second;
691
692 entries[i].binding = kBindingIndices[i];
693 entries[i].offset = 0;
694 if (boundBuffer) {
695 entries[i].buffer = boundBuffer->dawnBuffer();
696 entries[i].size = SkAlignTo(bindingSize, kBufferBindingSizeAlignment);
697 } else {
698 entries[i].buffer = this->getOrCreateNullBuffer();
699 entries[i].size = wgpu::kWholeSize;
700 }
701 }
702
703 wgpu::BindGroupDescriptor desc;
704 desc.layout = this->getOrCreateUniformBuffersBindGroupLayout();
705 desc.entryCount = entries.size();
706 desc.entries = entries.data();
707
708 const auto& device = this->dawnSharedContext()->device();
709 auto bindGroup = device.CreateBindGroup(&desc);
710
711 return *fUniformBufferBindGroupCache.insert(key, bindGroup);
712 }
713
findOrCreateSingleTextureSamplerBindGroup(const DawnSampler * sampler,const DawnTexture * texture)714 const wgpu::BindGroup& DawnResourceProvider::findOrCreateSingleTextureSamplerBindGroup(
715 const DawnSampler* sampler, const DawnTexture* texture) {
716 SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
717
718 auto key = make_texture_bind_group_key(sampler, texture);
719 auto* existingBindGroup = fSingleTextureSamplerBindGroups.find(key);
720 if (existingBindGroup) {
721 // cache hit.
722 return *existingBindGroup;
723 }
724
725 std::array<wgpu::BindGroupEntry, 2> entries;
726
727 entries[0].binding = 0;
728 entries[0].sampler = sampler->dawnSampler();
729 entries[1].binding = 1;
730 entries[1].textureView = texture->sampleTextureView();
731
732 wgpu::BindGroupDescriptor desc;
733 desc.layout = getOrCreateSingleTextureSamplerBindGroupLayout();
734 desc.entryCount = entries.size();
735 desc.entries = entries.data();
736
737 const auto& device = this->dawnSharedContext()->device();
738 auto bindGroup = device.CreateBindGroup(&desc);
739
740 return *fSingleTextureSamplerBindGroups.insert(key, bindGroup);
741 }
742
onFreeGpuResources()743 void DawnResourceProvider::onFreeGpuResources() {
744 SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
745
746 fIntrinsicConstantsManager->freeGpuResources();
747 // The wgpu::Textures and wgpu::Buffers held by the BindGroups should be explicitly destroyed
748 // when the DawnTexture and DawnBuffer is destroyed, but removing the bind groups themselves
749 // helps reduce CPU memory periodically.
750 fSingleTextureSamplerBindGroups.reset();
751 fUniformBufferBindGroupCache.reset();
752 }
753
onPurgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime)754 void DawnResourceProvider::onPurgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime) {
755 fIntrinsicConstantsManager->purgeResourcesNotUsedSince(purgeTime);
756 }
757
findOrCreateIntrinsicBindBufferInfo(DawnCommandBuffer * cb,UniformDataBlock intrinsicValues)758 BindBufferInfo DawnResourceProvider::findOrCreateIntrinsicBindBufferInfo(
759 DawnCommandBuffer* cb, UniformDataBlock intrinsicValues) {
760 return fIntrinsicConstantsManager->add(cb, intrinsicValues);
761 }
762
763 } // namespace skgpu::graphite
764