• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/dawn/GrDawnOpsRenderPass.h"
9 
10 #include "src/gpu/GrOpFlushState.h"
11 #include "src/gpu/GrPipeline.h"
12 #include "src/gpu/GrRenderTarget.h"
13 #include "src/gpu/GrStencilSettings.h"
14 #include "src/gpu/GrTexture.h"
15 #include "src/gpu/dawn/GrDawnAttachment.h"
16 #include "src/gpu/dawn/GrDawnBuffer.h"
17 #include "src/gpu/dawn/GrDawnGpu.h"
18 #include "src/gpu/dawn/GrDawnProgramBuilder.h"
19 #include "src/gpu/dawn/GrDawnRenderTarget.h"
20 #include "src/gpu/dawn/GrDawnTexture.h"
21 #include "src/gpu/dawn/GrDawnUtil.h"
22 #include "src/sksl/SkSLCompiler.h"
23 
24 ////////////////////////////////////////////////////////////////////////////////
25 
to_dawn_load_op(GrLoadOp loadOp)26 static wgpu::LoadOp to_dawn_load_op(GrLoadOp loadOp) {
27     switch (loadOp) {
28         case GrLoadOp::kLoad:
29             return wgpu::LoadOp::Load;
30         case GrLoadOp::kDiscard:
31             // Use LoadOp::Load to emulate DontCare.
32             // Dawn doesn't have DontCare, for security reasons.
33             // Load should be equivalent to DontCare for desktop; Clear would
34             // probably be better for tilers. If Dawn does add DontCare
35             // as an extension, use it here.
36             return wgpu::LoadOp::Load;
37         case GrLoadOp::kClear:
38             return wgpu::LoadOp::Clear;
39         default:
40             SK_ABORT("Invalid LoadOp");
41     }
42 }
43 
GrDawnOpsRenderPass(GrDawnGpu * gpu,GrRenderTarget * rt,GrSurfaceOrigin origin,const LoadAndStoreInfo & colorInfo,const StencilLoadAndStoreInfo & stencilInfo)44 GrDawnOpsRenderPass::GrDawnOpsRenderPass(GrDawnGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
45                                          const LoadAndStoreInfo& colorInfo,
46                                          const StencilLoadAndStoreInfo& stencilInfo)
47         : INHERITED(rt, origin)
48         , fGpu(gpu)
49         , fColorInfo(colorInfo) {
50     fEncoder = fGpu->device().CreateCommandEncoder();
51     wgpu::LoadOp colorOp = to_dawn_load_op(colorInfo.fLoadOp);
52     wgpu::LoadOp stencilOp = to_dawn_load_op(stencilInfo.fLoadOp);
53     fPassEncoder = beginRenderPass(colorOp, stencilOp);
54 }
55 
beginRenderPass(wgpu::LoadOp colorOp,wgpu::LoadOp stencilOp)56 wgpu::RenderPassEncoder GrDawnOpsRenderPass::beginRenderPass(wgpu::LoadOp colorOp,
57                                                              wgpu::LoadOp stencilOp) {
58     if (GrTexture* tex = fRenderTarget->asTexture()) {
59         tex->markMipmapsDirty();
60     }
61     auto stencilAttachment = static_cast<GrDawnAttachment*>(fRenderTarget->getStencilAttachment());
62 
63     const float* c = fColorInfo.fClearColor.data();
64 
65     wgpu::RenderPassColorAttachment colorAttachment;
66     colorAttachment.view = static_cast<GrDawnRenderTarget*>(fRenderTarget)->textureView();
67     colorAttachment.resolveTarget = nullptr;
68     colorAttachment.clearColor = { c[0], c[1], c[2], c[3] };
69     colorAttachment.loadOp = colorOp;
70     colorAttachment.storeOp = wgpu::StoreOp::Store;
71     wgpu::RenderPassColorAttachment* colorAttachments = { &colorAttachment };
72     wgpu::RenderPassDescriptor renderPassDescriptor;
73     renderPassDescriptor.colorAttachmentCount = 1;
74     renderPassDescriptor.colorAttachments = colorAttachments;
75     if (stencilAttachment) {
76         wgpu::RenderPassDepthStencilAttachment depthStencilAttachment;
77         depthStencilAttachment.view = stencilAttachment->view();
78         depthStencilAttachment.depthLoadOp = stencilOp;
79         depthStencilAttachment.stencilLoadOp = stencilOp;
80         depthStencilAttachment.clearDepth = 1.0f;
81         depthStencilAttachment.clearStencil = 0;
82         depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
83         depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
84         renderPassDescriptor.depthStencilAttachment = &depthStencilAttachment;
85     } else {
86         renderPassDescriptor.depthStencilAttachment = nullptr;
87     }
88     return fEncoder.BeginRenderPass(&renderPassDescriptor);
89 }
90 
~GrDawnOpsRenderPass()91 GrDawnOpsRenderPass::~GrDawnOpsRenderPass() {
92 }
93 
gpu()94 GrGpu* GrDawnOpsRenderPass::gpu() { return fGpu; }
95 
submit()96 void GrDawnOpsRenderPass::submit() {
97     fGpu->appendCommandBuffer(fEncoder.Finish());
98 }
99 
onClearStencilClip(const GrScissorState & scissor,bool insideStencilMask)100 void GrDawnOpsRenderPass::onClearStencilClip(const GrScissorState& scissor,
101                                              bool insideStencilMask) {
102     SkASSERT(!scissor.enabled());
103     fPassEncoder.EndPass();
104     fPassEncoder = beginRenderPass(wgpu::LoadOp::Load, wgpu::LoadOp::Clear);
105 }
106 
onClear(const GrScissorState & scissor,std::array<float,4> color)107 void GrDawnOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
108     SkASSERT(!scissor.enabled());
109     fPassEncoder.EndPass();
110     fPassEncoder = beginRenderPass(wgpu::LoadOp::Clear, wgpu::LoadOp::Load);
111 }
112 
113 ////////////////////////////////////////////////////////////////////////////////
114 
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)115 void GrDawnOpsRenderPass::inlineUpload(GrOpFlushState* state,
116                                        GrDeferredTextureUploadFn& upload) {
117     fGpu->submitToGpu(false);
118     state->doUpload(upload);
119 }
120 
121 ////////////////////////////////////////////////////////////////////////////////
122 
123 // Our caps require there to be a single reference value for both faces. However, our stencil
124 // object asserts if the correct face getter is not queried.
get_stencil_ref(const GrProgramInfo & info)125 static uint16_t get_stencil_ref(const GrProgramInfo& info) {
126     GrStencilSettings stencilSettings = info.nonGLStencilSettings();
127     if (stencilSettings.isTwoSided()) {
128         SkASSERT(stencilSettings.postOriginCCWFace(info.origin()).fRef ==
129                  stencilSettings.postOriginCWFace(info.origin()).fRef);
130         return stencilSettings.postOriginCCWFace(info.origin()).fRef;
131     } else {
132         return stencilSettings.singleSidedFace().fRef;
133     }
134 }
135 
applyState(GrDawnProgram * program,const GrProgramInfo & programInfo)136 void GrDawnOpsRenderPass::applyState(GrDawnProgram* program, const GrProgramInfo& programInfo) {
137     auto bindGroup = program->setUniformData(fGpu, fRenderTarget, programInfo);
138     fPassEncoder.SetPipeline(program->fRenderPipeline);
139     fPassEncoder.SetBindGroup(0, bindGroup, 0, nullptr);
140     if (programInfo.isStencilEnabled()) {
141         fPassEncoder.SetStencilReference(get_stencil_ref(programInfo));
142     }
143     const GrPipeline& pipeline = programInfo.pipeline();
144     GrXferProcessor::BlendInfo blendInfo = pipeline.getXferProcessor().getBlendInfo();
145     const float* c = blendInfo.fBlendConstant.vec();
146     wgpu::Color color{c[0], c[1], c[2], c[3]};
147     fPassEncoder.SetBlendConstant(&color);
148     if (!programInfo.pipeline().isScissorTestEnabled()) {
149         // "Disable" scissor by setting it to the full pipeline bounds.
150         SkIRect rect = SkIRect::MakeWH(fRenderTarget->width(), fRenderTarget->height());
151         fPassEncoder.SetScissorRect(rect.x(), rect.y(), rect.width(), rect.height());
152     }
153 }
154 
onEnd()155 void GrDawnOpsRenderPass::onEnd() {
156     fPassEncoder.EndPass();
157 }
158 
onBindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)159 bool GrDawnOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo,
160                                          const SkRect& drawBounds) {
161     fCurrentProgram = fGpu->getOrCreateRenderPipeline(fRenderTarget, programInfo);
162     if (!fCurrentProgram) {
163         return false;
164     }
165     this->applyState(fCurrentProgram.get(), programInfo);
166     return true;
167 }
168 
onSetScissorRect(const SkIRect & scissor)169 void GrDawnOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
170     // Higher-level skgpu::v1::SurfaceDrawContext and clips should have already ensured draw
171     // bounds are restricted to the render target.
172     SkASSERT(SkIRect::MakeSize(fRenderTarget->dimensions()).contains(scissor));
173     auto nativeScissorRect =
174             GrNativeRect::MakeRelativeTo(fOrigin, fRenderTarget->height(), scissor);
175     fPassEncoder.SetScissorRect(nativeScissorRect.fX, nativeScissorRect.fY,
176                                 nativeScissorRect.fWidth, nativeScissorRect.fHeight);
177 }
178 
onBindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)179 bool GrDawnOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
180                                          const GrSurfaceProxy* const geomProcTextures[],
181                                          const GrPipeline& pipeline) {
182     auto bindGroup = fCurrentProgram->setTextures(fGpu, geomProc, pipeline, geomProcTextures);
183     if (bindGroup) {
184         fPassEncoder.SetBindGroup(1, bindGroup, 0, nullptr);
185     }
186     return true;
187 }
188 
onBindBuffers(sk_sp<const GrBuffer> indexBuffer,sk_sp<const GrBuffer> instanceBuffer,sk_sp<const GrBuffer> vertexBuffer,GrPrimitiveRestart)189 void GrDawnOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
190                                         sk_sp<const GrBuffer> instanceBuffer,
191                                         sk_sp<const GrBuffer> vertexBuffer,
192                                         GrPrimitiveRestart) {
193     if (vertexBuffer) {
194         wgpu::Buffer vertex = static_cast<const GrDawnBuffer*>(vertexBuffer.get())->get();
195         fPassEncoder.SetVertexBuffer(0, vertex);
196     }
197     if (instanceBuffer) {
198         wgpu::Buffer instance = static_cast<const GrDawnBuffer*>(instanceBuffer.get())->get();
199         fPassEncoder.SetVertexBuffer(1, instance);
200     }
201     if (indexBuffer) {
202         wgpu::Buffer index = static_cast<const GrDawnBuffer*>(indexBuffer.get())->get();
203         fPassEncoder.SetIndexBuffer(index, wgpu::IndexFormat::Uint16);
204     }
205 }
206 
onDraw(int vertexCount,int baseVertex)207 void GrDawnOpsRenderPass::onDraw(int vertexCount, int baseVertex) {
208     this->onDrawInstanced(1, 0, vertexCount, baseVertex);
209 }
210 
onDrawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)211 void GrDawnOpsRenderPass::onDrawInstanced(int instanceCount, int baseInstance,
212                                           int vertexCount, int baseVertex) {
213     fPassEncoder.Draw(vertexCount, instanceCount, baseVertex, baseInstance);
214     fGpu->stats()->incNumDraws();
215 }
216 
onDrawIndexed(int indexCount,int baseIndex,uint16_t minIndexValue,uint16_t maxIndexValue,int baseVertex)217 void GrDawnOpsRenderPass::onDrawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue,
218                                         uint16_t maxIndexValue, int baseVertex) {
219     this->onDrawIndexedInstanced(indexCount, baseIndex, 1, 0, baseVertex);
220 }
221 
onDrawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)222 void GrDawnOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount,
223                                                  int baseInstance, int baseVertex) {
224     fPassEncoder.DrawIndexed(indexCount, instanceCount, baseIndex, baseVertex, baseInstance);
225     fGpu->stats()->incNumDraws();
226 }
227