• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/dawn/DawnCommandBuffer.h"
9 
10 #include "src/gpu/graphite/Log.h"
11 #include "src/gpu/graphite/TextureProxy.h"
12 #include "src/gpu/graphite/dawn/DawnBuffer.h"
13 #include "src/gpu/graphite/dawn/DawnCaps.h"
14 #include "src/gpu/graphite/dawn/DawnGraphicsPipeline.h"
15 #include "src/gpu/graphite/dawn/DawnQueueManager.h"
16 #include "src/gpu/graphite/dawn/DawnResourceProvider.h"
17 #include "src/gpu/graphite/dawn/DawnSampler.h"
18 #include "src/gpu/graphite/dawn/DawnSharedContext.h"
19 #include "src/gpu/graphite/dawn/DawnTexture.h"
20 #include "src/gpu/graphite/dawn/DawnUtilsPriv.h"
21 
22 namespace skgpu::graphite {
23 
24 namespace {
25 using IntrinsicConstant = float[4];
26 
clamp_ubo_binding_size(uint64_t offset,uint64_t bufferSize)27 uint64_t clamp_ubo_binding_size(uint64_t offset, uint64_t bufferSize) {
28     // Dawn's limit
29     constexpr uint32_t kMaxUniformBufferBindingSize = 64 * 1024;
30 
31     SkASSERT(offset <= bufferSize);
32     auto remainSize = bufferSize - offset;
33     if (remainSize > kMaxUniformBufferBindingSize) {
34         return kMaxUniformBufferBindingSize;
35     }
36 
37     return wgpu::kWholeSize;
38 }
39 }  // namespace
40 
Make(const DawnSharedContext * sharedContext,DawnResourceProvider * resourceProvider)41 std::unique_ptr<DawnCommandBuffer> DawnCommandBuffer::Make(const DawnSharedContext* sharedContext,
42                                                            DawnResourceProvider* resourceProvider) {
43     std::unique_ptr<DawnCommandBuffer> cmdBuffer(
44             new DawnCommandBuffer(sharedContext, resourceProvider));
45     if (!cmdBuffer->setNewCommandBufferResources()) {
46         return {};
47     }
48     return cmdBuffer;
49 }
50 
DawnCommandBuffer(const DawnSharedContext * sharedContext,DawnResourceProvider * resourceProvider)51 DawnCommandBuffer::DawnCommandBuffer(const DawnSharedContext* sharedContext,
52                                      DawnResourceProvider* resourceProvider)
53         : fSharedContext(sharedContext)
54         , fResourceProvider(resourceProvider) {}
55 
~DawnCommandBuffer()56 DawnCommandBuffer::~DawnCommandBuffer() {}
57 
finishEncoding()58 wgpu::CommandBuffer DawnCommandBuffer::finishEncoding() {
59     SkASSERT(fCommandEncoder);
60     wgpu::CommandBuffer cmdBuffer = fCommandEncoder.Finish();
61 
62     fCommandEncoder = nullptr;
63 
64     return cmdBuffer;
65 }
66 
onResetCommandBuffer()67 void DawnCommandBuffer::onResetCommandBuffer() {
68     fActiveGraphicsPipeline = nullptr;
69     fActiveRenderPassEncoder = nullptr;
70     fActiveComputePassEncoder = nullptr;
71     fCommandEncoder = nullptr;
72 
73     for (auto& bufferSlot : fBoundUniformBuffers) {
74         bufferSlot = nullptr;
75     }
76     fBoundUniformBuffersDirty = true;
77 }
78 
setNewCommandBufferResources()79 bool DawnCommandBuffer::setNewCommandBufferResources() {
80     SkASSERT(!fCommandEncoder);
81     fCommandEncoder = fSharedContext->device().CreateCommandEncoder();
82     SkASSERT(fCommandEncoder);
83     return true;
84 }
85 
onAddRenderPass(const RenderPassDesc & renderPassDesc,const Texture * colorTexture,const Texture * resolveTexture,const Texture * depthStencilTexture,SkRect viewport,const std::vector<std::unique_ptr<DrawPass>> & drawPasses)86 bool DawnCommandBuffer::onAddRenderPass(const RenderPassDesc& renderPassDesc,
87                                         const Texture* colorTexture,
88                                         const Texture* resolveTexture,
89                                         const Texture* depthStencilTexture,
90                                         SkRect viewport,
91                                         const std::vector<std::unique_ptr<DrawPass>>& drawPasses) {
92     // Update viewport's constant buffer before starting a render pass.
93     this->preprocessViewport(viewport);
94 
95     if (!this->beginRenderPass(renderPassDesc, colorTexture, resolveTexture, depthStencilTexture)) {
96         return false;
97     }
98 
99     this->setViewport(viewport);
100 
101     for (size_t i = 0; i < drawPasses.size(); ++i) {
102         this->addDrawPass(drawPasses[i].get());
103     }
104 
105     this->endRenderPass();
106     return true;
107 }
108 
onAddComputePass(const ComputePassDesc & computePassDesc,const ComputePipeline * pipeline,const std::vector<ResourceBinding> & bindings)109 bool DawnCommandBuffer::onAddComputePass(const ComputePassDesc& computePassDesc,
110                                          const ComputePipeline* pipeline,
111                                          const std::vector<ResourceBinding>& bindings) {
112     this->beginComputePass();
113     this->bindComputePipeline(pipeline);
114     for (const ResourceBinding& binding : bindings) {
115         this->bindBuffer(binding.fBuffer.fBuffer, binding.fBuffer.fOffset, binding.fIndex);
116     }
117     this->dispatchThreadgroups(computePassDesc.fGlobalDispatchSize,
118                                computePassDesc.fLocalDispatchSize);
119     this->endComputePass();
120     return true;
121 }
122 
beginRenderPass(const RenderPassDesc & renderPassDesc,const Texture * colorTexture,const Texture * resolveTexture,const Texture * depthStencilTexture)123 bool DawnCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc,
124                                         const Texture* colorTexture,
125                                         const Texture* resolveTexture,
126                                         const Texture* depthStencilTexture) {
127     SkASSERT(!fActiveRenderPassEncoder);
128     SkASSERT(!fActiveComputePassEncoder);
129 
130     constexpr static wgpu::LoadOp wgpuLoadActionMap[]{
131             wgpu::LoadOp::Load,
132             wgpu::LoadOp::Clear,
133             wgpu::LoadOp::Clear  // Don't care
134     };
135     static_assert((int)LoadOp::kLoad == 0);
136     static_assert((int)LoadOp::kClear == 1);
137     static_assert((int)LoadOp::kDiscard == 2);
138     static_assert(std::size(wgpuLoadActionMap) == kLoadOpCount);
139 
140     constexpr static wgpu::StoreOp wgpuStoreActionMap[]{wgpu::StoreOp::Store,
141                                                         wgpu::StoreOp::Discard};
142     static_assert((int)StoreOp::kStore == 0);
143     static_assert((int)StoreOp::kDiscard == 1);
144     static_assert(std::size(wgpuStoreActionMap) == kStoreOpCount);
145 
146     wgpu::RenderPassDescriptor wgpuRenderPass = {};
147     wgpu::RenderPassColorAttachment wgpuColorAttachment;
148     wgpu::RenderPassDepthStencilAttachment wgpuDepthStencilAttachment;
149 
150     // Set up color attachment.
151     auto& colorInfo = renderPassDesc.fColorAttachment;
152     bool loadMSAAFromResolve = false;
153     if (colorTexture) {
154         wgpuRenderPass.colorAttachments = &wgpuColorAttachment;
155         wgpuRenderPass.colorAttachmentCount = 1;
156 
157         // TODO: check Texture matches RenderPassDesc
158         const auto* dawnColorTexture = static_cast<const DawnTexture*>(colorTexture);
159         SkASSERT(dawnColorTexture->dawnTextureView());
160         wgpuColorAttachment.view = dawnColorTexture->dawnTextureView();
161 
162         const std::array<float, 4>& clearColor = renderPassDesc.fClearColor;
163         wgpuColorAttachment.clearValue = {
164                 clearColor[0], clearColor[1], clearColor[2], clearColor[3]};
165         wgpuColorAttachment.loadOp = wgpuLoadActionMap[static_cast<int>(colorInfo.fLoadOp)];
166         wgpuColorAttachment.storeOp = wgpuStoreActionMap[static_cast<int>(colorInfo.fStoreOp)];
167 
168         // Set up resolve attachment
169         if (resolveTexture) {
170             SkASSERT(renderPassDesc.fColorResolveAttachment.fStoreOp == StoreOp::kStore);
171             // TODO: check Texture matches RenderPassDesc
172             const auto* dawnResolveTexture = static_cast<const DawnTexture*>(resolveTexture);
173             SkASSERT(dawnResolveTexture->dawnTextureView());
174             wgpuColorAttachment.resolveTarget = dawnResolveTexture->dawnTextureView();
175 
176             // Inclusion of a resolve texture implies the client wants to finish the
177             // renderpass with a resolve.
178             SkASSERT(wgpuColorAttachment.storeOp == wgpu::StoreOp::Discard);
179 
180             // But it also means we have to load the resolve texture into the MSAA color attachment
181             loadMSAAFromResolve = renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
182             // TODO: If the color resolve texture is read-only we can use a private (vs. memoryless)
183             // msaa attachment that's coupled to the framebuffer and the StoreAndMultisampleResolve
184             // action instead of loading as a draw.
185         }
186     }
187 
188     // Set up stencil/depth attachment
189     auto& depthStencilInfo = renderPassDesc.fDepthStencilAttachment;
190     if (depthStencilTexture) {
191         const auto* dawnDepthStencilTexture = static_cast<const DawnTexture*>(depthStencilTexture);
192         auto format = dawnDepthStencilTexture->textureInfo().dawnTextureSpec().fFormat;
193         SkASSERT(DawnFormatIsDepthOrStencil(format));
194 
195         // TODO: check Texture matches RenderPassDesc
196         SkASSERT(dawnDepthStencilTexture->dawnTextureView());
197         wgpuDepthStencilAttachment.view = dawnDepthStencilTexture->dawnTextureView();
198 
199         if (DawnFormatIsDepth(format)) {
200             wgpuDepthStencilAttachment.depthClearValue = renderPassDesc.fClearDepth;
201             wgpuDepthStencilAttachment.depthLoadOp =
202                     wgpuLoadActionMap[static_cast<int>(depthStencilInfo.fLoadOp)];
203             wgpuDepthStencilAttachment.depthStoreOp =
204                     wgpuStoreActionMap[static_cast<int>(depthStencilInfo.fStoreOp)];
205         }
206 
207         if (DawnFormatIsStencil(format)) {
208             wgpuDepthStencilAttachment.stencilClearValue = renderPassDesc.fClearStencil;
209             wgpuDepthStencilAttachment.stencilLoadOp =
210                     wgpuLoadActionMap[static_cast<int>(depthStencilInfo.fLoadOp)];
211             wgpuDepthStencilAttachment.stencilStoreOp =
212                     wgpuStoreActionMap[static_cast<int>(depthStencilInfo.fStoreOp)];
213         }
214 
215         wgpuRenderPass.depthStencilAttachment = &wgpuDepthStencilAttachment;
216     } else {
217         SkASSERT(!depthStencilInfo.fTextureInfo.isValid());
218     }
219 
220     if (loadMSAAFromResolve) {
221         // Manually load the contents of the resolve texture into the MSAA attachment as a draw,
222         // so the actual load op for the MSAA attachment had better have been discard.
223 
224         if (!this->loadMSAAFromResolveAndBeginRenderPassEncoder(
225                     renderPassDesc,
226                     wgpuRenderPass,
227                     static_cast<const DawnTexture*>(colorTexture))) {
228             return false;
229         }
230     }
231     else {
232         fActiveRenderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuRenderPass);
233     }
234 
235     return true;
236 }
237 
loadMSAAFromResolveAndBeginRenderPassEncoder(const RenderPassDesc & frontendRenderPassDesc,const wgpu::RenderPassDescriptor & wgpuRenderPassDesc,const DawnTexture * msaaTexture)238 bool DawnCommandBuffer::loadMSAAFromResolveAndBeginRenderPassEncoder(
239         const RenderPassDesc& frontendRenderPassDesc,
240         const wgpu::RenderPassDescriptor& wgpuRenderPassDesc,
241         const DawnTexture* msaaTexture) {
242     SkASSERT(!fActiveRenderPassEncoder);
243 
244     // Copy from resolve texture to an intermediate texture. Using blit with draw
245     // pipeline because the resolveTexture might be created from a swapchain, and it
246     // is possible that only its texture view is available. So onCopyTextureToTexture()
247     // which operates on wgpu::Texture instead of wgpu::TextureView cannot be used in that case.
248     auto msaaLoadTexture = fResourceProvider->findOrCreateDiscardableMSAALoadTexture(
249             msaaTexture->dimensions(), msaaTexture->textureInfo());
250     if (!msaaLoadTexture) {
251         SKGPU_LOG_E("DawnCommandBuffer::loadMSAAFromResolveAndBeginRenderPassEncoder: "
252                     "Can't create MSAA Load Texture.");
253         return false;
254     }
255 
256     // Creating intermediate render pass (copy from resolve texture -> MSAA load texture)
257     RenderPassDesc intermediateRenderPassDesc = {};
258     intermediateRenderPassDesc.fColorAttachment.fLoadOp = LoadOp::kDiscard;
259     intermediateRenderPassDesc.fColorAttachment.fStoreOp = StoreOp::kStore;
260     intermediateRenderPassDesc.fColorAttachment.fTextureInfo =
261             frontendRenderPassDesc.fColorResolveAttachment.fTextureInfo;
262 
263     wgpu::RenderPassColorAttachment wgpuIntermediateColorAttachment;
264      // Dawn doesn't support actual DontCare so use LoadOp::Clear.
265     wgpuIntermediateColorAttachment.loadOp = wgpu::LoadOp::Clear;
266     wgpuIntermediateColorAttachment.clearValue = {1, 1, 1, 1};
267     wgpuIntermediateColorAttachment.storeOp = wgpu::StoreOp::Store;
268     wgpuIntermediateColorAttachment.view = msaaLoadTexture->dawnTextureView();
269 
270     wgpu::RenderPassDescriptor wgpuIntermediateRenderPassDesc;
271     wgpuIntermediateRenderPassDesc.colorAttachmentCount = 1;
272     wgpuIntermediateRenderPassDesc.colorAttachments = &wgpuIntermediateColorAttachment;
273 
274     fActiveRenderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuIntermediateRenderPassDesc);
275 
276     if (!this->doBlitWithDraw(
277                 intermediateRenderPassDesc,
278                 /*sourceTextureView=*/wgpuRenderPassDesc.colorAttachments[0].resolveTarget,
279                 msaaTexture->dimensions().width(),
280                 msaaTexture->dimensions().height())) {
281         return false;
282     }
283 
284     fActiveRenderPassEncoder.End();
285 
286     // Start actual render pass (blit from MSAA load texture -> MSAA texture)
287     fActiveRenderPassEncoder = fCommandEncoder.BeginRenderPass(&wgpuRenderPassDesc);
288 
289     return this->doBlitWithDraw(frontendRenderPassDesc,
290                                 /*sourceTextureView=*/msaaLoadTexture->dawnTextureView(),
291                                 msaaTexture->dimensions().width(),
292                                 msaaTexture->dimensions().height());
293 }
294 
doBlitWithDraw(const RenderPassDesc & frontendRenderPassDesc,const wgpu::TextureView & sourceTextureView,int width,int height)295 bool DawnCommandBuffer::doBlitWithDraw(const RenderPassDesc& frontendRenderPassDesc,
296                                        const wgpu::TextureView& sourceTextureView,
297                                        int width,
298                                        int height) {
299     auto loadPipeline = fResourceProvider->findOrCreateBlitWithDrawPipeline(frontendRenderPassDesc);
300     if (!loadPipeline) {
301         SKGPU_LOG_E("Unable to create pipeline to blit with draw");
302         return false;
303     }
304 
305     SkASSERT(fActiveRenderPassEncoder);
306 
307     fActiveRenderPassEncoder.SetPipeline(loadPipeline);
308 
309     // The load msaa pipeline takes no uniforms, no vertex/instance attributes and only uses
310     // one texture that does not require a sampler.
311 
312     // TODO: b/260368758
313     // cache single texture's bind group creation.
314     wgpu::BindGroupEntry entry;
315     entry.binding = 0;
316     entry.textureView = sourceTextureView;
317 
318     wgpu::BindGroupDescriptor desc;
319     desc.layout = loadPipeline.GetBindGroupLayout(0);
320     desc.entryCount = 1;
321     desc.entries = &entry;
322 
323     auto bindGroup = fSharedContext->device().CreateBindGroup(&desc);
324 
325     fActiveRenderPassEncoder.SetBindGroup(0, bindGroup);
326 
327     fActiveRenderPassEncoder.SetScissorRect(0, 0, width, height);
328     fActiveRenderPassEncoder.SetViewport(0, 0, width, height, 0, 1);
329 
330     // Fullscreen triangle
331     fActiveRenderPassEncoder.Draw(3);
332 
333     return true;
334 }
335 
endRenderPass()336 void DawnCommandBuffer::endRenderPass() {
337     SkASSERT(fActiveRenderPassEncoder);
338     fActiveRenderPassEncoder.End();
339     fActiveRenderPassEncoder = nullptr;
340 }
341 
addDrawPass(const DrawPass * drawPass)342 void DawnCommandBuffer::addDrawPass(const DrawPass* drawPass) {
343     drawPass->addResourceRefs(this);
344     for (auto [type, cmdPtr] : drawPass->commands()) {
345         switch (type) {
346             case DrawPassCommands::Type::kBindGraphicsPipeline: {
347                 auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr);
348                 this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex));
349                 break;
350             }
351             case DrawPassCommands::Type::kSetBlendConstants: {
352                 auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr);
353                 this->setBlendConstants(sbc->fBlendConstants);
354                 break;
355             }
356             case DrawPassCommands::Type::kBindUniformBuffer: {
357                 auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr);
358                 this->bindUniformBuffer(bub->fInfo, bub->fSlot);
359                 break;
360             }
361             case DrawPassCommands::Type::kBindDrawBuffers: {
362                 auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr);
363                 this->bindDrawBuffers(
364                         bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
365                 break;
366             }
367             case DrawPassCommands::Type::kBindTexturesAndSamplers: {
368                 auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr);
369                 bindTextureAndSamplers(*drawPass, *bts);
370                 break;
371             }
372             case DrawPassCommands::Type::kSetScissor: {
373                 auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr);
374                 const SkIRect& rect = ss->fScissor;
375                 this->setScissor(rect.fLeft, rect.fTop, rect.width(), rect.height());
376                 break;
377             }
378             case DrawPassCommands::Type::kDraw: {
379                 auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr);
380                 this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
381                 break;
382             }
383             case DrawPassCommands::Type::kDrawIndexed: {
384                 auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr);
385                 this->drawIndexed(
386                         draw->fType, draw->fBaseIndex, draw->fIndexCount, draw->fBaseVertex);
387                 break;
388             }
389             case DrawPassCommands::Type::kDrawInstanced: {
390                 auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr);
391                 this->drawInstanced(draw->fType,
392                                     draw->fBaseVertex,
393                                     draw->fVertexCount,
394                                     draw->fBaseInstance,
395                                     draw->fInstanceCount);
396                 break;
397             }
398             case DrawPassCommands::Type::kDrawIndexedInstanced: {
399                 auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr);
400                 this->drawIndexedInstanced(draw->fType,
401                                            draw->fBaseIndex,
402                                            draw->fIndexCount,
403                                            draw->fBaseVertex,
404                                            draw->fBaseInstance,
405                                            draw->fInstanceCount);
406                 break;
407             }
408             case DrawPassCommands::Type::kDrawIndirect: {
409                 auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr);
410                 this->drawIndirect(draw->fType);
411                 break;
412             }
413             case DrawPassCommands::Type::kDrawIndexedIndirect: {
414                 auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr);
415                 this->drawIndexedIndirect(draw->fType);
416                 break;
417             }
418         }
419     }
420 }
421 
bindGraphicsPipeline(const GraphicsPipeline * graphicsPipeline)422 void DawnCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) {
423     fActiveGraphicsPipeline = static_cast<const DawnGraphicsPipeline*>(graphicsPipeline);
424     fActiveRenderPassEncoder.SetPipeline(fActiveGraphicsPipeline->dawnRenderPipeline());
425     fBoundUniformBuffersDirty = true;
426 }
427 
bindUniformBuffer(const BindBufferInfo & info,UniformSlot slot)428 void DawnCommandBuffer::bindUniformBuffer(const BindBufferInfo& info, UniformSlot slot) {
429     SkASSERT(fActiveRenderPassEncoder);
430 
431     auto dawnBuffer = static_cast<const DawnBuffer*>(info.fBuffer);
432 
433     unsigned int bufferIndex = 0;
434     switch (slot) {
435         case UniformSlot::kRenderStep:
436             bufferIndex = DawnGraphicsPipeline::kRenderStepUniformBufferIndex;
437             break;
438         case UniformSlot::kPaint:
439             bufferIndex = DawnGraphicsPipeline::kPaintUniformBufferIndex;
440             break;
441         default:
442             SkASSERT(false);
443     }
444 
445     fBoundUniformBuffers[bufferIndex] = dawnBuffer;
446     fBoundUniformBufferOffsets[bufferIndex] = static_cast<uint32_t>(info.fOffset);
447 
448     fBoundUniformBuffersDirty = true;
449 }
450 
bindDrawBuffers(const BindBufferInfo & vertices,const BindBufferInfo & instances,const BindBufferInfo & indices,const BindBufferInfo & indirect)451 void DawnCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices,
452                                         const BindBufferInfo& instances,
453                                         const BindBufferInfo& indices,
454                                         const BindBufferInfo& indirect) {
455     SkASSERT(fActiveRenderPassEncoder);
456 
457     if (vertices.fBuffer) {
458         auto dawnBuffer = static_cast<const DawnBuffer*>(vertices.fBuffer)->dawnBuffer();
459         fActiveRenderPassEncoder.SetVertexBuffer(
460                 DawnGraphicsPipeline::kVertexBufferIndex, dawnBuffer, vertices.fOffset);
461     }
462     if (instances.fBuffer) {
463         auto dawnBuffer = static_cast<const DawnBuffer*>(instances.fBuffer)->dawnBuffer();
464         fActiveRenderPassEncoder.SetVertexBuffer(
465                 DawnGraphicsPipeline::kInstanceBufferIndex, dawnBuffer, instances.fOffset);
466     }
467     if (indices.fBuffer) {
468         auto dawnBuffer = static_cast<const DawnBuffer*>(indices.fBuffer)->dawnBuffer();
469         fActiveRenderPassEncoder.SetIndexBuffer(
470                 dawnBuffer, wgpu::IndexFormat::Uint16, indices.fOffset);
471     }
472     if (indirect.fBuffer) {
473         fCurrentIndirectBuffer = static_cast<const DawnBuffer*>(indirect.fBuffer)->dawnBuffer();
474         fCurrentIndirectBufferOffset = indirect.fOffset;
475     } else {
476         fCurrentIndirectBuffer = nullptr;
477         fCurrentIndirectBufferOffset = 0;
478     }
479 }
480 
bindTextureAndSamplers(const DrawPass & drawPass,const DrawPassCommands::BindTexturesAndSamplers & command)481 void DawnCommandBuffer::bindTextureAndSamplers(
482         const DrawPass& drawPass, const DrawPassCommands::BindTexturesAndSamplers& command) {
483     SkASSERT(fActiveRenderPassEncoder);
484     SkASSERT(fActiveGraphicsPipeline);
485 
486     // TODO: optimize for single texture.
487     std::vector<wgpu::BindGroupEntry> entries(2 * command.fNumTexSamplers);
488 
489     for (int i = 0; i < command.fNumTexSamplers; ++i) {
490         const auto* texture =
491                 static_cast<const DawnTexture*>(drawPass.getTexture(command.fTextureIndices[i]));
492         const auto* sampler =
493                 static_cast<const DawnSampler*>(drawPass.getSampler(command.fSamplerIndices[i]));
494         auto& wgpuTextureView = texture->dawnTextureView();
495         auto& wgpuSampler = sampler->dawnSampler();
496 
497         // Assuming shader generator assigns binding slot to sampler then texture,
498         // then the next sampler and texture, and so on, we need to use
499         // 2 * i as base binding index of the sampler and texture.
500         // TODO: https://b.corp.google.com/issues/259457090:
501         // Better configurable way of assigning samplers and textures' bindings.
502         entries[2 * i].binding = 2 * i;
503         entries[2 * i].sampler = wgpuSampler;
504 
505         entries[2 * i + 1].binding = 2 * i + 1;
506         entries[2 * i + 1].textureView = wgpuTextureView;
507     }
508 
509     wgpu::BindGroupDescriptor desc;
510     desc.layout = fActiveGraphicsPipeline->dawnRenderPipeline().GetBindGroupLayout(
511             DawnGraphicsPipeline::kTextureBindGroupIndex);
512     desc.entryCount = entries.size();
513     desc.entries = entries.data();
514 
515     auto bindGroup = fSharedContext->device().CreateBindGroup(&desc);
516 
517     fActiveRenderPassEncoder.SetBindGroup(DawnGraphicsPipeline::kTextureBindGroupIndex, bindGroup);
518 }
519 
syncUniformBuffers()520 void DawnCommandBuffer::syncUniformBuffers() {
521     if (fBoundUniformBuffersDirty) {
522         fBoundUniformBuffersDirty = false;
523         std::array<wgpu::BindGroupEntry, 3> entries;
524         uint32_t numBuffers = 0;
525 
526         entries[numBuffers].binding = DawnGraphicsPipeline::kIntrinsicUniformBufferIndex;
527         entries[numBuffers].buffer = fInstrinsicConstantBuffer;
528         entries[numBuffers].offset = 0;
529         entries[numBuffers].size = sizeof(IntrinsicConstant);
530         ++numBuffers;
531 
532         if (fActiveGraphicsPipeline->hasStepUniforms() &&
533             fBoundUniformBuffers[DawnGraphicsPipeline::kRenderStepUniformBufferIndex]) {
534             auto boundBuffer =
535                     fBoundUniformBuffers[DawnGraphicsPipeline::kRenderStepUniformBufferIndex];
536 
537             entries[numBuffers].binding = DawnGraphicsPipeline::kRenderStepUniformBufferIndex;
538             entries[numBuffers].buffer = boundBuffer->dawnBuffer();
539 
540             entries[numBuffers].offset =
541                     fBoundUniformBufferOffsets[DawnGraphicsPipeline::kRenderStepUniformBufferIndex];
542 
543             entries[numBuffers].size =
544                     clamp_ubo_binding_size(entries[numBuffers].offset, boundBuffer->size());
545 
546             ++numBuffers;
547         }
548 
549         if (fActiveGraphicsPipeline->hasFragment() &&
550             fBoundUniformBuffers[DawnGraphicsPipeline::kPaintUniformBufferIndex]) {
551             auto boundBuffer = fBoundUniformBuffers[DawnGraphicsPipeline::kPaintUniformBufferIndex];
552 
553             entries[numBuffers].binding = DawnGraphicsPipeline::kPaintUniformBufferIndex;
554             entries[numBuffers].buffer = boundBuffer->dawnBuffer();
555 
556             entries[numBuffers].offset =
557                     fBoundUniformBufferOffsets[DawnGraphicsPipeline::kPaintUniformBufferIndex];
558 
559             entries[numBuffers].size =
560                     clamp_ubo_binding_size(entries[numBuffers].offset, boundBuffer->size());
561 
562             ++numBuffers;
563         }
564 
565         wgpu::BindGroupDescriptor desc;
566         desc.layout = fActiveGraphicsPipeline->dawnRenderPipeline().GetBindGroupLayout(
567                 DawnGraphicsPipeline::kUniformBufferBindGroupIndex);
568         desc.entryCount = numBuffers;
569         desc.entries = entries.data();
570 
571         auto bindGroup = fSharedContext->device().CreateBindGroup(&desc);
572 
573         fActiveRenderPassEncoder.SetBindGroup(DawnGraphicsPipeline::kUniformBufferBindGroupIndex,
574                                               bindGroup);
575     }
576 }
577 
setScissor(unsigned int left,unsigned int top,unsigned int width,unsigned int height)578 void DawnCommandBuffer::setScissor(unsigned int left,
579                                    unsigned int top,
580                                    unsigned int width,
581                                    unsigned int height) {
582     SkASSERT(fActiveRenderPassEncoder);
583     fActiveRenderPassEncoder.SetScissorRect(left, top, width, height);
584 }
585 
preprocessViewport(const SkRect & viewport)586 void DawnCommandBuffer::preprocessViewport(const SkRect& viewport) {
587     // Dawn's framebuffer space has (0, 0) at the top left. This agrees with Skia's device coords.
588     // However, in NDC (-1, -1) is the bottom left. So we flip the origin here (assuming all
589     // surfaces we have are TopLeft origin).
590     const float x = viewport.x();
591     const float y = viewport.y();
592     const float invTwoW = 2.f / viewport.width();
593     const float invTwoH = 2.f / viewport.height();
594     const IntrinsicConstant rtAdjust = {invTwoW, -invTwoH, -1.f - x * invTwoW, 1.f + y * invTwoH};
595 
596     if (!fInstrinsicConstantBuffer) {
597         wgpu::BufferDescriptor desc;
598 #if defined(SK_DEBUG)
599         desc.label = "CommandBufferInstrinsicConstant";
600 #endif
601         desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform;
602         desc.size = sizeof(IntrinsicConstant);
603         desc.mappedAtCreation = false;
604         fInstrinsicConstantBuffer = fSharedContext->device().CreateBuffer(&desc);
605         SkASSERT(fInstrinsicConstantBuffer);
606     }
607 
608     // TODO: https://b.corp.google.com/issues/259267703
609     // Make updating instrinsic constants faster. Metal has setVertexBytes method
610     // to quickly sending instrinsic constants to vertex shader without any buffer. But Dawn doesn't
611     // have similar capability. So we have to use WriteBuffer(), and this method is not allowed to
612     // be called when there is an active render pass.
613     SkASSERT(!fActiveRenderPassEncoder);
614     SkASSERT(!fActiveComputePassEncoder);
615 
616     fCommandEncoder.WriteBuffer(fInstrinsicConstantBuffer,
617                                 0,
618                                 reinterpret_cast<const uint8_t*>(rtAdjust),
619                                 sizeof(rtAdjust));
620 }
621 
setViewport(const SkRect & viewport)622 void DawnCommandBuffer::setViewport(const SkRect& viewport) {
623     SkASSERT(fActiveRenderPassEncoder);
624     fActiveRenderPassEncoder.SetViewport(
625             viewport.x(), viewport.y(), viewport.width(), viewport.height(), 0, 1);
626 }
627 
setBlendConstants(float * blendConstants)628 void DawnCommandBuffer::setBlendConstants(float* blendConstants) {
629     SkASSERT(fActiveRenderPassEncoder);
630     wgpu::Color blendConst = {
631             blendConstants[0], blendConstants[1], blendConstants[2], blendConstants[3]};
632     fActiveRenderPassEncoder.SetBlendConstant(&blendConst);
633 }
634 
draw(PrimitiveType type,unsigned int baseVertex,unsigned int vertexCount)635 void DawnCommandBuffer::draw(PrimitiveType type,
636                              unsigned int baseVertex,
637                              unsigned int vertexCount) {
638     SkASSERT(fActiveRenderPassEncoder);
639     SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
640 
641     this->syncUniformBuffers();
642 
643     fActiveRenderPassEncoder.Draw(vertexCount, /*instanceCount=*/1, baseVertex);
644 }
645 
drawIndexed(PrimitiveType type,unsigned int baseIndex,unsigned int indexCount,unsigned int baseVertex)646 void DawnCommandBuffer::drawIndexed(PrimitiveType type,
647                                     unsigned int baseIndex,
648                                     unsigned int indexCount,
649                                     unsigned int baseVertex) {
650     SkASSERT(fActiveRenderPassEncoder);
651     SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
652 
653     this->syncUniformBuffers();
654 
655     fActiveRenderPassEncoder.DrawIndexed(indexCount, /*instanceCount=*/1, baseIndex, baseVertex);
656 }
657 
drawInstanced(PrimitiveType type,unsigned int baseVertex,unsigned int vertexCount,unsigned int baseInstance,unsigned int instanceCount)658 void DawnCommandBuffer::drawInstanced(PrimitiveType type,
659                                       unsigned int baseVertex,
660                                       unsigned int vertexCount,
661                                       unsigned int baseInstance,
662                                       unsigned int instanceCount) {
663     SkASSERT(fActiveRenderPassEncoder);
664     SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
665 
666     this->syncUniformBuffers();
667 
668     fActiveRenderPassEncoder.Draw(vertexCount, instanceCount, baseVertex, baseInstance);
669 }
670 
drawIndexedInstanced(PrimitiveType type,unsigned int baseIndex,unsigned int indexCount,unsigned int baseVertex,unsigned int baseInstance,unsigned int instanceCount)671 void DawnCommandBuffer::drawIndexedInstanced(PrimitiveType type,
672                                              unsigned int baseIndex,
673                                              unsigned int indexCount,
674                                              unsigned int baseVertex,
675                                              unsigned int baseInstance,
676                                              unsigned int instanceCount) {
677     SkASSERT(fActiveRenderPassEncoder);
678     SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
679 
680     this->syncUniformBuffers();
681 
682     fActiveRenderPassEncoder.DrawIndexed(
683             indexCount, instanceCount, baseIndex, baseVertex, baseInstance);
684 }
685 
drawIndirect(PrimitiveType type)686 void DawnCommandBuffer::drawIndirect(PrimitiveType type) {
687     SkASSERT(fActiveRenderPassEncoder);
688     SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
689     SkASSERT(fCurrentIndirectBuffer);
690 
691     this->syncUniformBuffers();
692 
693     fActiveRenderPassEncoder.DrawIndirect(fCurrentIndirectBuffer, fCurrentIndirectBufferOffset);
694 }
695 
drawIndexedIndirect(PrimitiveType type)696 void DawnCommandBuffer::drawIndexedIndirect(PrimitiveType type) {
697     SkASSERT(fActiveRenderPassEncoder);
698     SkASSERT(fActiveGraphicsPipeline->primitiveType() == type);
699     SkASSERT(fCurrentIndirectBuffer);
700 
701     this->syncUniformBuffers();
702 
703     fActiveRenderPassEncoder.DrawIndexedIndirect(fCurrentIndirectBuffer,
704                                                  fCurrentIndirectBufferOffset);
705 }
706 
beginComputePass()707 void DawnCommandBuffer::beginComputePass() { SkASSERT(false); }
708 
bindComputePipeline(const ComputePipeline * computePipeline)709 void DawnCommandBuffer::bindComputePipeline(const ComputePipeline* computePipeline) {
710     // TODO: https://b.corp.google.com/issues/260341543
711     SkASSERT(false);
712 }
713 
bindBuffer(const Buffer * buffer,unsigned int offset,unsigned int index)714 void DawnCommandBuffer::bindBuffer(const Buffer* buffer, unsigned int offset, unsigned int index) {
715     // TODO: https://b.corp.google.com/issues/260341543
716     SkASSERT(false);
717 }
718 
dispatchThreadgroups(const WorkgroupSize & globalSize,const WorkgroupSize & localSize)719 void DawnCommandBuffer::dispatchThreadgroups(const WorkgroupSize& globalSize,
720                                              const WorkgroupSize& localSize) {
721     // TODO: https://b.corp.google.com/issues/260341543
722     SkASSERT(false);
723 }
724 
endComputePass()725 void DawnCommandBuffer::endComputePass() {
726     // TODO: https://b.corp.google.com/issues/260341543
727     SkASSERT(false);
728 }
729 
onCopyBufferToBuffer(const Buffer * srcBuffer,size_t srcOffset,const Buffer * dstBuffer,size_t dstOffset,size_t size)730 bool DawnCommandBuffer::onCopyBufferToBuffer(const Buffer* srcBuffer,
731                                              size_t srcOffset,
732                                              const Buffer* dstBuffer,
733                                              size_t dstOffset,
734                                              size_t size) {
735     SkASSERT(!fActiveRenderPassEncoder);
736     SkASSERT(!fActiveComputePassEncoder);
737 
738     auto& wgpuBufferSrc = static_cast<const DawnBuffer*>(srcBuffer)->dawnBuffer();
739     auto& wgpuBufferDst = static_cast<const DawnBuffer*>(dstBuffer)->dawnBuffer();
740 
741     fCommandEncoder.CopyBufferToBuffer(wgpuBufferSrc, srcOffset, wgpuBufferDst, dstOffset, size);
742     return true;
743 }
744 
onCopyTextureToBuffer(const Texture * texture,SkIRect srcRect,const Buffer * buffer,size_t bufferOffset,size_t bufferRowBytes)745 bool DawnCommandBuffer::onCopyTextureToBuffer(const Texture* texture,
746                                               SkIRect srcRect,
747                                               const Buffer* buffer,
748                                               size_t bufferOffset,
749                                               size_t bufferRowBytes) {
750     SkASSERT(!fActiveRenderPassEncoder);
751     SkASSERT(!fActiveComputePassEncoder);
752 
753     auto& wgpuTexture = static_cast<const DawnTexture*>(texture)->dawnTexture();
754     auto& wgpuBuffer = static_cast<const DawnBuffer*>(buffer)->dawnBuffer();
755 
756     wgpu::ImageCopyTexture src;
757     src.texture = wgpuTexture;
758     src.origin.x = srcRect.x();
759     src.origin.y = srcRect.y();
760 
761     wgpu::ImageCopyBuffer dst;
762     dst.buffer = wgpuBuffer;
763     dst.layout.offset = bufferOffset;
764     // Dawn requires buffer's alignment to be multiples of 256.
765     // https://b.corp.google.com/issues/259264489
766     SkASSERT((bufferRowBytes & 0xFF) == 0);
767     dst.layout.bytesPerRow = bufferRowBytes;
768 
769     wgpu::Extent3D copySize = {
770             static_cast<uint32_t>(srcRect.width()), static_cast<uint32_t>(srcRect.height()), 1};
771     fCommandEncoder.CopyTextureToBuffer(&src, &dst, &copySize);
772 
773     return true;
774 }
775 
onCopyBufferToTexture(const Buffer * buffer,const Texture * texture,const BufferTextureCopyData * copyData,int count)776 bool DawnCommandBuffer::onCopyBufferToTexture(const Buffer* buffer,
777                                               const Texture* texture,
778                                               const BufferTextureCopyData* copyData,
779                                               int count) {
780     SkASSERT(!fActiveRenderPassEncoder);
781     SkASSERT(!fActiveComputePassEncoder);
782 
783     auto& wgpuTexture = static_cast<const DawnTexture*>(texture)->dawnTexture();
784     auto& wgpuBuffer = static_cast<const DawnBuffer*>(buffer)->dawnBuffer();
785 
786     wgpu::ImageCopyBuffer src;
787     src.buffer = wgpuBuffer;
788 
789     wgpu::ImageCopyTexture dst;
790     dst.texture = wgpuTexture;
791 
792     for (int i = 0; i < count; ++i) {
793         src.layout.offset = copyData[i].fBufferOffset;
794         // Dawn requires buffer's alignment to be multiples of 256.
795         // https://b.corp.google.com/issues/259264489
796         SkASSERT((copyData[i].fBufferRowBytes & 0xFF) == 0);
797         src.layout.bytesPerRow = copyData[i].fBufferRowBytes;
798 
799         dst.origin.x = copyData[i].fRect.x();
800         dst.origin.y = copyData[i].fRect.y();
801 
802         wgpu::Extent3D copySize = {static_cast<uint32_t>(copyData[i].fRect.width()),
803                                    static_cast<uint32_t>(copyData[i].fRect.height()),
804                                    1};
805         fCommandEncoder.CopyBufferToTexture(&src, &dst, &copySize);
806     }
807 
808     return true;
809 }
810 
onCopyTextureToTexture(const Texture * src,SkIRect srcRect,const Texture * dst,SkIPoint dstPoint)811 bool DawnCommandBuffer::onCopyTextureToTexture(const Texture* src,
812                                                SkIRect srcRect,
813                                                const Texture* dst,
814                                                SkIPoint dstPoint) {
815     SkASSERT(!fActiveRenderPassEncoder);
816     SkASSERT(!fActiveComputePassEncoder);
817 
818     auto& wgpuTextureSrc = static_cast<const DawnTexture*>(src)->dawnTexture();
819     auto& wgpuTextureDst = static_cast<const DawnTexture*>(dst)->dawnTexture();
820 
821     wgpu::ImageCopyTexture srcArgs;
822     srcArgs.texture = wgpuTextureSrc;
823     srcArgs.origin.x = srcRect.fLeft;
824     srcArgs.origin.y = srcRect.fTop;
825 
826     wgpu::ImageCopyTexture dstArgs;
827     dstArgs.texture = wgpuTextureDst;
828     dstArgs.origin.x = dstPoint.fX;
829     dstArgs.origin.y = dstPoint.fY;
830 
831     wgpu::Extent3D copySize = {
832             static_cast<uint32_t>(srcRect.width()), static_cast<uint32_t>(srcRect.height()), 1};
833 
834     fCommandEncoder.CopyTextureToTexture(&srcArgs, &dstArgs, &copySize);
835 
836     return true;
837 }
838 
onSynchronizeBufferToCpu(const Buffer * buffer,bool * outDidResultInWork)839 bool DawnCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) {
840     return true;
841 }
842 
onClearBuffer(const Buffer * buffer,size_t offset,size_t size)843 bool DawnCommandBuffer::onClearBuffer(const Buffer* buffer, size_t offset, size_t size) {
844     SkASSERT(!fActiveRenderPassEncoder);
845     SkASSERT(!fActiveComputePassEncoder);
846 
847     auto& wgpuBuffer = static_cast<const DawnBuffer*>(buffer)->dawnBuffer();
848     fCommandEncoder.ClearBuffer(wgpuBuffer, offset, size);
849 
850     return true;
851 }
852 
853 }  // namespace skgpu::graphite
854