1 /*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/dawn/GrDawnOpsRenderPass.h"
9
10 #include "src/gpu/GrOpFlushState.h"
11 #include "src/gpu/GrPipeline.h"
12 #include "src/gpu/GrRenderTarget.h"
13 #include "src/gpu/GrTexture.h"
14 #include "src/gpu/dawn/GrDawnAttachment.h"
15 #include "src/gpu/dawn/GrDawnBuffer.h"
16 #include "src/gpu/dawn/GrDawnGpu.h"
17 #include "src/gpu/dawn/GrDawnProgramBuilder.h"
18 #include "src/gpu/dawn/GrDawnRenderTarget.h"
19 #include "src/gpu/dawn/GrDawnTexture.h"
20 #include "src/gpu/dawn/GrDawnUtil.h"
21 #include "src/sksl/SkSLCompiler.h"
22
23 ////////////////////////////////////////////////////////////////////////////////
24
to_dawn_load_op(GrLoadOp loadOp)25 static wgpu::LoadOp to_dawn_load_op(GrLoadOp loadOp) {
26 switch (loadOp) {
27 case GrLoadOp::kLoad:
28 return wgpu::LoadOp::Load;
29 case GrLoadOp::kDiscard:
30 // Use LoadOp::Load to emulate DontCare.
31 // Dawn doesn't have DontCare, for security reasons.
32 // Load should be equivalent to DontCare for desktop; Clear would
33 // probably be better for tilers. If Dawn does add DontCare
34 // as an extension, use it here.
35 return wgpu::LoadOp::Load;
36 case GrLoadOp::kClear:
37 return wgpu::LoadOp::Clear;
38 default:
39 SK_ABORT("Invalid LoadOp");
40 }
41 }
42
GrDawnOpsRenderPass(GrDawnGpu * gpu,GrRenderTarget * rt,GrSurfaceOrigin origin,const LoadAndStoreInfo & colorInfo,const StencilLoadAndStoreInfo & stencilInfo)43 GrDawnOpsRenderPass::GrDawnOpsRenderPass(GrDawnGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
44 const LoadAndStoreInfo& colorInfo,
45 const StencilLoadAndStoreInfo& stencilInfo)
46 : INHERITED(rt, origin)
47 , fGpu(gpu)
48 , fColorInfo(colorInfo) {
49 fEncoder = fGpu->device().CreateCommandEncoder();
50 wgpu::LoadOp colorOp = to_dawn_load_op(colorInfo.fLoadOp);
51 wgpu::LoadOp stencilOp = to_dawn_load_op(stencilInfo.fLoadOp);
52 fPassEncoder = beginRenderPass(colorOp, stencilOp);
53 }
54
beginRenderPass(wgpu::LoadOp colorOp,wgpu::LoadOp stencilOp)55 wgpu::RenderPassEncoder GrDawnOpsRenderPass::beginRenderPass(wgpu::LoadOp colorOp,
56 wgpu::LoadOp stencilOp) {
57 if (GrTexture* tex = fRenderTarget->asTexture()) {
58 tex->markMipmapsDirty();
59 }
60 auto stencilAttachment = static_cast<GrDawnAttachment*>(fRenderTarget->getStencilAttachment());
61
62 const float* c = fColorInfo.fClearColor.data();
63
64 wgpu::RenderPassColorAttachmentDescriptor colorAttachment;
65 colorAttachment.attachment = static_cast<GrDawnRenderTarget*>(fRenderTarget)->textureView();
66 colorAttachment.resolveTarget = nullptr;
67 colorAttachment.clearColor = { c[0], c[1], c[2], c[3] };
68 colorAttachment.loadOp = colorOp;
69 colorAttachment.storeOp = wgpu::StoreOp::Store;
70 wgpu::RenderPassColorAttachmentDescriptor* colorAttachments = { &colorAttachment };
71 wgpu::RenderPassDescriptor renderPassDescriptor;
72 renderPassDescriptor.colorAttachmentCount = 1;
73 renderPassDescriptor.colorAttachments = colorAttachments;
74 if (stencilAttachment) {
75 wgpu::RenderPassDepthStencilAttachmentDescriptor depthStencilAttachment;
76 depthStencilAttachment.attachment = stencilAttachment->view();
77 depthStencilAttachment.depthLoadOp = stencilOp;
78 depthStencilAttachment.stencilLoadOp = stencilOp;
79 depthStencilAttachment.clearDepth = 1.0f;
80 depthStencilAttachment.clearStencil = 0;
81 depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
82 depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
83 renderPassDescriptor.depthStencilAttachment = &depthStencilAttachment;
84 } else {
85 renderPassDescriptor.depthStencilAttachment = nullptr;
86 }
87 return fEncoder.BeginRenderPass(&renderPassDescriptor);
88 }
89
~GrDawnOpsRenderPass()90 GrDawnOpsRenderPass::~GrDawnOpsRenderPass() {
91 }
92
gpu()93 GrGpu* GrDawnOpsRenderPass::gpu() { return fGpu; }
94
submit()95 void GrDawnOpsRenderPass::submit() {
96 fGpu->appendCommandBuffer(fEncoder.Finish());
97 }
98
onClearStencilClip(const GrScissorState & scissor,bool insideStencilMask)99 void GrDawnOpsRenderPass::onClearStencilClip(const GrScissorState& scissor,
100 bool insideStencilMask) {
101 SkASSERT(!scissor.enabled());
102 fPassEncoder.EndPass();
103 fPassEncoder = beginRenderPass(wgpu::LoadOp::Load, wgpu::LoadOp::Clear);
104 }
105
onClear(const GrScissorState & scissor,std::array<float,4> color)106 void GrDawnOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
107 SkASSERT(!scissor.enabled());
108 fPassEncoder.EndPass();
109 fPassEncoder = beginRenderPass(wgpu::LoadOp::Clear, wgpu::LoadOp::Load);
110 }
111
112 ////////////////////////////////////////////////////////////////////////////////
113
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)114 void GrDawnOpsRenderPass::inlineUpload(GrOpFlushState* state,
115 GrDeferredTextureUploadFn& upload) {
116 fGpu->submitToGpu(false);
117 state->doUpload(upload);
118 }
119
120 ////////////////////////////////////////////////////////////////////////////////
121
applyState(GrDawnProgram * program,const GrProgramInfo & programInfo)122 void GrDawnOpsRenderPass::applyState(GrDawnProgram* program, const GrProgramInfo& programInfo) {
123 auto bindGroup = program->setUniformData(fGpu, fRenderTarget, programInfo);
124 fPassEncoder.SetPipeline(program->fRenderPipeline);
125 fPassEncoder.SetBindGroup(0, bindGroup, 0, nullptr);
126 if (programInfo.isStencilEnabled()) {
127 fPassEncoder.SetStencilReference(programInfo.userStencilSettings()->fCCWFace.fRef);
128 }
129 const GrPipeline& pipeline = programInfo.pipeline();
130 GrXferProcessor::BlendInfo blendInfo = pipeline.getXferProcessor().getBlendInfo();
131 const float* c = blendInfo.fBlendConstant.vec();
132 wgpu::Color color{c[0], c[1], c[2], c[3]};
133 fPassEncoder.SetBlendColor(&color);
134 if (!programInfo.pipeline().isScissorTestEnabled()) {
135 // "Disable" scissor by setting it to the full pipeline bounds.
136 SkIRect rect = SkIRect::MakeWH(fRenderTarget->width(), fRenderTarget->height());
137 fPassEncoder.SetScissorRect(rect.x(), rect.y(), rect.width(), rect.height());
138 }
139 }
140
onEnd()141 void GrDawnOpsRenderPass::onEnd() {
142 fPassEncoder.EndPass();
143 }
144
onBindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)145 bool GrDawnOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo,
146 const SkRect& drawBounds) {
147 fCurrentProgram = fGpu->getOrCreateRenderPipeline(fRenderTarget, programInfo);
148 if (!fCurrentProgram) {
149 return false;
150 }
151 this->applyState(fCurrentProgram.get(), programInfo);
152 return true;
153 }
154
onSetScissorRect(const SkIRect & scissor)155 void GrDawnOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
156 // Higher-level GrSurfaceDrawContext and clips should have already ensured draw bounds are
157 // restricted to the render target.
158 SkASSERT(SkIRect::MakeSize(fRenderTarget->dimensions()).contains(scissor));
159 auto nativeScissorRect =
160 GrNativeRect::MakeRelativeTo(fOrigin, fRenderTarget->height(), scissor);
161 fPassEncoder.SetScissorRect(nativeScissorRect.fX, nativeScissorRect.fY,
162 nativeScissorRect.fWidth, nativeScissorRect.fHeight);
163 }
164
onBindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)165 bool GrDawnOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
166 const GrSurfaceProxy* const geomProcTextures[],
167 const GrPipeline& pipeline) {
168 auto bindGroup = fCurrentProgram->setTextures(fGpu, geomProc, pipeline, geomProcTextures);
169 if (bindGroup) {
170 fPassEncoder.SetBindGroup(1, bindGroup, 0, nullptr);
171 }
172 return true;
173 }
174
onBindBuffers(sk_sp<const GrBuffer> indexBuffer,sk_sp<const GrBuffer> instanceBuffer,sk_sp<const GrBuffer> vertexBuffer,GrPrimitiveRestart)175 void GrDawnOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
176 sk_sp<const GrBuffer> instanceBuffer,
177 sk_sp<const GrBuffer> vertexBuffer,
178 GrPrimitiveRestart) {
179 if (vertexBuffer) {
180 wgpu::Buffer vertex = static_cast<const GrDawnBuffer*>(vertexBuffer.get())->get();
181 fPassEncoder.SetVertexBuffer(0, vertex);
182 }
183 if (instanceBuffer) {
184 wgpu::Buffer instance = static_cast<const GrDawnBuffer*>(instanceBuffer.get())->get();
185 fPassEncoder.SetVertexBuffer(1, instance);
186 }
187 if (indexBuffer) {
188 wgpu::Buffer index = static_cast<const GrDawnBuffer*>(indexBuffer.get())->get();
189 fPassEncoder.SetIndexBuffer(index, wgpu::IndexFormat::Uint16);
190 }
191 }
192
onDraw(int vertexCount,int baseVertex)193 void GrDawnOpsRenderPass::onDraw(int vertexCount, int baseVertex) {
194 this->onDrawInstanced(1, 0, vertexCount, baseVertex);
195 }
196
onDrawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)197 void GrDawnOpsRenderPass::onDrawInstanced(int instanceCount, int baseInstance,
198 int vertexCount, int baseVertex) {
199 fPassEncoder.Draw(vertexCount, instanceCount, baseVertex, baseInstance);
200 fGpu->stats()->incNumDraws();
201 }
202
onDrawIndexed(int indexCount,int baseIndex,uint16_t minIndexValue,uint16_t maxIndexValue,int baseVertex)203 void GrDawnOpsRenderPass::onDrawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue,
204 uint16_t maxIndexValue, int baseVertex) {
205 this->onDrawIndexedInstanced(indexCount, baseIndex, 1, 0, baseVertex);
206 }
207
onDrawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)208 void GrDawnOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount,
209 int baseInstance, int baseVertex) {
210 fPassEncoder.DrawIndexed(indexCount, instanceCount, baseIndex, baseVertex, baseInstance);
211 fGpu->stats()->incNumDraws();
212 }
213