• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #include "GrVkGpuCommandBuffer.h"
9 
10 #include "GrFixedClip.h"
11 #include "GrMesh.h"
12 #include "GrOpFlushState.h"
13 #include "GrPipeline.h"
14 #include "GrRenderTargetPriv.h"
15 #include "GrTexturePriv.h"
16 #include "GrVkCommandBuffer.h"
17 #include "GrVkGpu.h"
18 #include "GrVkPipeline.h"
19 #include "GrVkRenderPass.h"
20 #include "GrVkRenderTarget.h"
21 #include "GrVkResourceProvider.h"
22 #include "GrVkTexture.h"
23 #include "SkRect.h"
24 
get_vk_load_store_ops(const GrGpuCommandBuffer::LoadAndStoreInfo & info,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)25 void get_vk_load_store_ops(const GrGpuCommandBuffer::LoadAndStoreInfo& info,
26                            VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
27     switch (info.fLoadOp) {
28         case GrGpuCommandBuffer::LoadOp::kLoad:
29             *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
30             break;
31         case GrGpuCommandBuffer::LoadOp::kClear:
32             *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
33             break;
34         case GrGpuCommandBuffer::LoadOp::kDiscard:
35             *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
36             break;
37         default:
38             SK_ABORT("Invalid LoadOp");
39             *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
40     }
41 
42     switch (info.fStoreOp) {
43         case GrGpuCommandBuffer::StoreOp::kStore:
44             *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
45             break;
46         case GrGpuCommandBuffer::StoreOp::kDiscard:
47             *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
48             break;
49         default:
50             SK_ABORT("Invalid StoreOp");
51             *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
52     }
53 }
54 
GrVkGpuCommandBuffer(GrVkGpu * gpu,const LoadAndStoreInfo & colorInfo,const LoadAndStoreInfo & stencilInfo)55 GrVkGpuCommandBuffer::GrVkGpuCommandBuffer(GrVkGpu* gpu,
56                                            const LoadAndStoreInfo& colorInfo,
57                                            const LoadAndStoreInfo& stencilInfo)
58     : fGpu(gpu)
59     , fRenderTarget(nullptr)
60     , fClearColor(GrColor4f::FromGrColor(colorInfo.fClearColor))
61     , fLastPipelineState(nullptr) {
62 
63     get_vk_load_store_ops(colorInfo, &fVkColorLoadOp, &fVkColorStoreOp);
64 
65     get_vk_load_store_ops(stencilInfo, &fVkStencilLoadOp, &fVkStencilStoreOp);
66 
67     fCurrentCmdInfo = -1;
68 }
69 
init(GrVkRenderTarget * target)70 void GrVkGpuCommandBuffer::init(GrVkRenderTarget* target) {
71     SkASSERT(!fRenderTarget);
72     fRenderTarget = target;
73 
74     GrVkRenderPass::LoadStoreOps vkColorOps(fVkColorLoadOp, fVkColorStoreOp);
75     GrVkRenderPass::LoadStoreOps vkStencilOps(fVkStencilLoadOp, fVkStencilStoreOp);
76 
77     CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
78     SkASSERT(fCommandBufferInfos.count() == 1);
79     fCurrentCmdInfo = 0;
80 
81     const GrVkResourceProvider::CompatibleRPHandle& rpHandle = target->compatibleRenderPassHandle();
82     if (rpHandle.isValid()) {
83         cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
84                                                                      vkColorOps,
85                                                                      vkStencilOps);
86     } else {
87         cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*target,
88                                                                      vkColorOps,
89                                                                      vkStencilOps);
90     }
91 
92     cbInfo.fColorClearValue.color.float32[0] = fClearColor.fRGBA[0];
93     cbInfo.fColorClearValue.color.float32[1] = fClearColor.fRGBA[1];
94     cbInfo.fColorClearValue.color.float32[2] = fClearColor.fRGBA[2];
95     cbInfo.fColorClearValue.color.float32[3] = fClearColor.fRGBA[3];
96 
97     cbInfo.fBounds.setEmpty();
98     cbInfo.fIsEmpty = true;
99     cbInfo.fStartsWithClear = false;
100 
101     cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
102     cbInfo.currentCmdBuf()->begin(fGpu, target->framebuffer(), cbInfo.fRenderPass);
103 }
104 
105 
~GrVkGpuCommandBuffer()106 GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() {
107     for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
108         CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
109         for (int j = 0; j < cbInfo.fCommandBuffers.count(); ++j) {
110             cbInfo.fCommandBuffers[j]->unref(fGpu);
111         }
112         cbInfo.fRenderPass->unref(fGpu);
113     }
114 }
115 
gpu()116 GrGpu* GrVkGpuCommandBuffer::gpu() { return fGpu; }
renderTarget()117 GrRenderTarget* GrVkGpuCommandBuffer::renderTarget() { return fRenderTarget; }
118 
end()119 void GrVkGpuCommandBuffer::end() {
120     if (fCurrentCmdInfo >= 0) {
121         fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
122     }
123 }
124 
onSubmit()125 void GrVkGpuCommandBuffer::onSubmit() {
126     if (!fRenderTarget) {
127         return;
128     }
129     // Change layout of our render target so it can be used as the color attachment. Currently
130     // we don't attach the resolve to the framebuffer so no need to change its layout.
131     GrVkImage* targetImage = fRenderTarget->msaaImage() ? fRenderTarget->msaaImage()
132                                                         : fRenderTarget;
133 
134     // Change layout of our render target so it can be used as the color attachment
135     targetImage->setImageLayout(fGpu,
136                                 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
137                                 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
138                                 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
139                                 false);
140 
141     // If we are using a stencil attachment we also need to update its layout
142     if (GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getStencilAttachment()) {
143         GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
144         vkStencil->setImageLayout(fGpu,
145                                   VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
146                                   VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
147                                   VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
148                                   VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
149                                   false);
150     }
151 
152     for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
153         CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
154 
155         for (int j = 0; j < cbInfo.fPreDrawUploads.count(); ++j) {
156             InlineUploadInfo& iuInfo = cbInfo.fPreDrawUploads[j];
157             iuInfo.fFlushState->doUpload(iuInfo.fUpload);
158         }
159 
160         // TODO: We can't add this optimization yet since many things create a scratch texture which
161         // adds the discard immediately, but then don't draw to it right away. This causes the
162         // discard to be ignored and we get yelled at for loading uninitialized data. However, once
163         // MDP lands, the discard will get reordered with the rest of the draw commands and we can
164         // re-enable this.
165 #if 0
166         if (cbInfo.fIsEmpty && !cbInfo.fStartsWithClear) {
167             // We have sumbitted no actual draw commands to the command buffer and we are not using
168             // the render pass to do a clear so there is no need to submit anything.
169             continue;
170         }
171 #endif
172         if (cbInfo.fBounds.intersect(0, 0,
173                                      SkIntToScalar(fRenderTarget->width()),
174                                      SkIntToScalar(fRenderTarget->height()))) {
175             SkIRect iBounds;
176             cbInfo.fBounds.roundOut(&iBounds);
177 
178             fGpu->submitSecondaryCommandBuffer(cbInfo.fCommandBuffers, cbInfo.fRenderPass,
179                                                &cbInfo.fColorClearValue, fRenderTarget, iBounds);
180         }
181     }
182 }
183 
discard(GrRenderTarget * rt)184 void GrVkGpuCommandBuffer::discard(GrRenderTarget* rt) {
185     GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(rt);
186     if (!fRenderTarget) {
187         this->init(target);
188     }
189     SkASSERT(target == fRenderTarget);
190 
191     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
192     if (cbInfo.fIsEmpty) {
193         // We will change the render pass to do a clear load instead
194         GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
195                                                 VK_ATTACHMENT_STORE_OP_STORE);
196         GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
197                                                   VK_ATTACHMENT_STORE_OP_STORE);
198 
199         const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
200 
201         const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
202             fRenderTarget->compatibleRenderPassHandle();
203         if (rpHandle.isValid()) {
204             cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
205                                                                          vkColorOps,
206                                                                          vkStencilOps);
207         } else {
208             cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*fRenderTarget,
209                                                                          vkColorOps,
210                                                                          vkStencilOps);
211         }
212 
213         SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
214         oldRP->unref(fGpu);
215         cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
216         cbInfo.fStartsWithClear = false;
217     }
218 }
219 
onClearStencilClip(GrRenderTarget * rt,const GrFixedClip & clip,bool insideStencilMask)220 void GrVkGpuCommandBuffer::onClearStencilClip(GrRenderTarget* rt, const GrFixedClip& clip,
221                                               bool insideStencilMask) {
222     SkASSERT(!clip.hasWindowRectangles());
223 
224     GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(rt);
225     if (!fRenderTarget) {
226         this->init(target);
227     }
228     SkASSERT(target == fRenderTarget);
229 
230     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
231 
232     GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
233     // this should only be called internally when we know we have a
234     // stencil buffer.
235     SkASSERT(sb);
236     int stencilBitCount = sb->bits();
237 
238     // The contract with the callers does not guarantee that we preserve all bits in the stencil
239     // during this clear. Thus we will clear the entire stencil to the desired value.
240 
241     VkClearDepthStencilValue vkStencilColor;
242     memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
243     if (insideStencilMask) {
244         vkStencilColor.stencil = (1 << (stencilBitCount - 1));
245     } else {
246         vkStencilColor.stencil = 0;
247     }
248 
249     VkClearRect clearRect;
250     // Flip rect if necessary
251     SkIRect vkRect;
252     if (!clip.scissorEnabled()) {
253         vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
254     } else if (kBottomLeft_GrSurfaceOrigin != fRenderTarget->origin()) {
255         vkRect = clip.scissorRect();
256     } else {
257         const SkIRect& scissor = clip.scissorRect();
258         vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
259                        scissor.fRight, fRenderTarget->height() - scissor.fTop);
260     }
261 
262     clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
263     clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
264 
265     clearRect.baseArrayLayer = 0;
266     clearRect.layerCount = 1;
267 
268     uint32_t stencilIndex;
269     SkAssertResult(cbInfo.fRenderPass->stencilAttachmentIndex(&stencilIndex));
270 
271     VkClearAttachment attachment;
272     attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
273     attachment.colorAttachment = 0; // this value shouldn't matter
274     attachment.clearValue.depthStencil = vkStencilColor;
275 
276     cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
277     cbInfo.fIsEmpty = false;
278 
279     // Update command buffer bounds
280     if (!clip.scissorEnabled()) {
281         cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
282     } else {
283         cbInfo.fBounds.join(SkRect::Make(clip.scissorRect()));
284     }
285 }
286 
onClear(GrRenderTarget * rt,const GrFixedClip & clip,GrColor color)287 void GrVkGpuCommandBuffer::onClear(GrRenderTarget* rt, const GrFixedClip& clip, GrColor color) {
288     // parent class should never let us get here with no RT
289     SkASSERT(!clip.hasWindowRectangles());
290 
291     GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(rt);
292     if (!fRenderTarget) {
293         this->init(target);
294     }
295     SkASSERT(target == fRenderTarget);
296 
297     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
298 
299     VkClearColorValue vkColor;
300     GrColorToRGBAFloat(color, vkColor.float32);
301 
302     if (cbInfo.fIsEmpty && !clip.scissorEnabled()) {
303         // We will change the render pass to do a clear load instead
304         GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_CLEAR,
305                                                 VK_ATTACHMENT_STORE_OP_STORE);
306         GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
307                                                   VK_ATTACHMENT_STORE_OP_STORE);
308 
309         const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
310 
311         const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
312             fRenderTarget->compatibleRenderPassHandle();
313         if (rpHandle.isValid()) {
314             cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
315                                                                          vkColorOps,
316                                                                          vkStencilOps);
317         } else {
318             cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*fRenderTarget,
319                                                                          vkColorOps,
320                                                                          vkStencilOps);
321         }
322 
323         SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
324         oldRP->unref(fGpu);
325 
326         GrColorToRGBAFloat(color, cbInfo.fColorClearValue.color.float32);
327         cbInfo.fStartsWithClear = true;
328 
329         // Update command buffer bounds
330         cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
331         return;
332     }
333 
334     // We always do a sub rect clear with clearAttachments since we are inside a render pass
335     VkClearRect clearRect;
336     // Flip rect if necessary
337     SkIRect vkRect;
338     if (!clip.scissorEnabled()) {
339         vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
340     } else if (kBottomLeft_GrSurfaceOrigin != fRenderTarget->origin()) {
341         vkRect = clip.scissorRect();
342     } else {
343         const SkIRect& scissor = clip.scissorRect();
344         vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
345                        scissor.fRight, fRenderTarget->height() - scissor.fTop);
346     }
347     clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
348     clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
349     clearRect.baseArrayLayer = 0;
350     clearRect.layerCount = 1;
351 
352     uint32_t colorIndex;
353     SkAssertResult(cbInfo.fRenderPass->colorAttachmentIndex(&colorIndex));
354 
355     VkClearAttachment attachment;
356     attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
357     attachment.colorAttachment = colorIndex;
358     attachment.clearValue.color = vkColor;
359 
360     cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
361     cbInfo.fIsEmpty = false;
362 
363     // Update command buffer bounds
364     if (!clip.scissorEnabled()) {
365         cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
366     } else {
367         cbInfo.fBounds.join(SkRect::Make(clip.scissorRect()));
368     }
369     return;
370 }
371 
addAdditionalCommandBuffer()372 void GrVkGpuCommandBuffer::addAdditionalCommandBuffer() {
373     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
374     cbInfo.currentCmdBuf()->end(fGpu);
375     cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
376     cbInfo.currentCmdBuf()->begin(fGpu, fRenderTarget->framebuffer(), cbInfo.fRenderPass);
377 }
378 
addAdditionalRenderPass()379 void GrVkGpuCommandBuffer::addAdditionalRenderPass() {
380     fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
381 
382     CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
383     fCurrentCmdInfo++;
384 
385     GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
386                                             VK_ATTACHMENT_STORE_OP_STORE);
387     GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
388                                               VK_ATTACHMENT_STORE_OP_STORE);
389 
390     const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
391             fRenderTarget->compatibleRenderPassHandle();
392     if (rpHandle.isValid()) {
393         cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
394                                                                      vkColorOps,
395                                                                      vkStencilOps);
396     } else {
397         cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*fRenderTarget,
398                                                                      vkColorOps,
399                                                                      vkStencilOps);
400     }
401 
402     cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
403     // It shouldn't matter what we set the clear color to here since we will assume loading of the
404     // attachment.
405     memset(&cbInfo.fColorClearValue, 0, sizeof(VkClearValue));
406     cbInfo.fBounds.setEmpty();
407     cbInfo.fIsEmpty = true;
408     cbInfo.fStartsWithClear = false;
409 
410     cbInfo.currentCmdBuf()->begin(fGpu, fRenderTarget->framebuffer(), cbInfo.fRenderPass);
411 }
412 
inlineUpload(GrOpFlushState * state,GrDrawOp::DeferredUploadFn & upload,GrRenderTarget * rt)413 void GrVkGpuCommandBuffer::inlineUpload(GrOpFlushState* state, GrDrawOp::DeferredUploadFn& upload,
414                                         GrRenderTarget* rt) {
415     GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(rt);
416     if (!fRenderTarget) {
417         this->init(target);
418     }
419     if (!fCommandBufferInfos[fCurrentCmdInfo].fIsEmpty) {
420         this->addAdditionalRenderPass();
421     }
422     fCommandBufferInfos[fCurrentCmdInfo].fPreDrawUploads.emplace_back(state, upload);
423 }
424 
425 ////////////////////////////////////////////////////////////////////////////////
426 
bindGeometry(const GrPrimitiveProcessor & primProc,const GrBuffer * indexBuffer,const GrBuffer * vertexBuffer,const GrBuffer * instanceBuffer)427 void GrVkGpuCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc,
428                                         const GrBuffer* indexBuffer,
429                                         const GrBuffer* vertexBuffer,
430                                         const GrBuffer* instanceBuffer) {
431     GrVkSecondaryCommandBuffer* currCmdBuf = fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf();
432     // There is no need to put any memory barriers to make sure host writes have finished here.
433     // When a command buffer is submitted to a queue, there is an implicit memory barrier that
434     // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
435     // an active RenderPass.
436 
437     // Here our vertex and instance inputs need to match the same 0-based bindings they were
438     // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
439     uint32_t binding = 0;
440 
441     if (primProc.hasVertexAttribs()) {
442         SkASSERT(vertexBuffer);
443         SkASSERT(!vertexBuffer->isCPUBacked());
444         SkASSERT(!vertexBuffer->isMapped());
445 
446         currCmdBuf->bindInputBuffer(fGpu, binding++,
447                                     static_cast<const GrVkVertexBuffer*>(vertexBuffer));
448     }
449 
450     if (primProc.hasInstanceAttribs()) {
451         SkASSERT(instanceBuffer);
452         SkASSERT(!instanceBuffer->isCPUBacked());
453         SkASSERT(!instanceBuffer->isMapped());
454 
455         currCmdBuf->bindInputBuffer(fGpu, binding++,
456                                     static_cast<const GrVkVertexBuffer*>(instanceBuffer));
457     }
458 
459     if (indexBuffer) {
460         SkASSERT(indexBuffer);
461         SkASSERT(!indexBuffer->isMapped());
462         SkASSERT(!indexBuffer->isCPUBacked());
463 
464         currCmdBuf->bindIndexBuffer(fGpu, static_cast<const GrVkIndexBuffer*>(indexBuffer));
465     }
466 }
467 
prepareDrawState(const GrPipeline & pipeline,const GrPrimitiveProcessor & primProc,GrPrimitiveType primitiveType,bool hasDynamicState)468 sk_sp<GrVkPipelineState> GrVkGpuCommandBuffer::prepareDrawState(
469                                                                const GrPipeline& pipeline,
470                                                                const GrPrimitiveProcessor& primProc,
471                                                                GrPrimitiveType primitiveType,
472                                                                bool hasDynamicState) {
473     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
474     SkASSERT(cbInfo.fRenderPass);
475 
476     sk_sp<GrVkPipelineState> pipelineState =
477         fGpu->resourceProvider().findOrCreateCompatiblePipelineState(pipeline,
478                                                                      primProc,
479                                                                      primitiveType,
480                                                                      *cbInfo.fRenderPass);
481     if (!pipelineState) {
482         return pipelineState;
483     }
484 
485     if (!cbInfo.fIsEmpty &&
486         fLastPipelineState && fLastPipelineState != pipelineState.get() &&
487         fGpu->vkCaps().newCBOnPipelineChange()) {
488         this->addAdditionalCommandBuffer();
489     }
490     fLastPipelineState = pipelineState.get();
491 
492     pipelineState->setData(fGpu, primProc, pipeline);
493 
494     pipelineState->bind(fGpu, cbInfo.currentCmdBuf());
495 
496     GrRenderTarget* rt = pipeline.getRenderTarget();
497 
498     if (!pipeline.getScissorState().enabled()) {
499         GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(), rt,
500                                                  SkIRect::MakeWH(rt->width(), rt->height()));
501     } else if (!hasDynamicState) {
502         GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(), rt,
503                                                  pipeline.getScissorState().rect());
504     }
505     GrVkPipeline::SetDynamicViewportState(fGpu, cbInfo.currentCmdBuf(), rt);
506     GrVkPipeline::SetDynamicBlendConstantState(fGpu, cbInfo.currentCmdBuf(), rt->config(),
507                                                pipeline.getXferProcessor());
508 
509     return pipelineState;
510 }
511 
set_texture_layout(GrVkTexture * vkTexture,GrVkGpu * gpu)512 static void set_texture_layout(GrVkTexture* vkTexture, GrVkGpu* gpu) {
513     // TODO: If we ever decide to create the secondary command buffers ahead of time before we
514     // are actually going to submit them, we will need to track the sampled images and delay
515     // adding the layout change/barrier until we are ready to submit.
516     vkTexture->setImageLayout(gpu,
517                               VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
518                               VK_ACCESS_SHADER_READ_BIT,
519                               VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
520                               false);
521 }
522 
prepare_sampled_images(const GrResourceIOProcessor & processor,GrVkGpu * gpu)523 static void prepare_sampled_images(const GrResourceIOProcessor& processor, GrVkGpu* gpu) {
524     for (int i = 0; i < processor.numTextureSamplers(); ++i) {
525         const GrResourceIOProcessor::TextureSampler& sampler = processor.textureSampler(i);
526         GrVkTexture* vkTexture = static_cast<GrVkTexture*>(sampler.peekTexture());
527 
528         // We may need to resolve the texture first if it is also a render target
529         GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(vkTexture->asRenderTarget());
530         if (texRT) {
531             gpu->onResolveRenderTarget(texRT);
532         }
533 
534         const GrSamplerParams& params = sampler.params();
535         // Check if we need to regenerate any mip maps
536         if (GrSamplerParams::kMipMap_FilterMode == params.filterMode()) {
537             if (vkTexture->texturePriv().mipMapsAreDirty()) {
538                 gpu->generateMipmap(vkTexture);
539                 vkTexture->texturePriv().dirtyMipMaps(false);
540             }
541         }
542         set_texture_layout(vkTexture, gpu);
543     }
544 }
545 
onDraw(const GrPipeline & pipeline,const GrPrimitiveProcessor & primProc,const GrMesh meshes[],const GrPipeline::DynamicState dynamicStates[],int meshCount,const SkRect & bounds)546 void GrVkGpuCommandBuffer::onDraw(const GrPipeline& pipeline,
547                                   const GrPrimitiveProcessor& primProc,
548                                   const GrMesh meshes[],
549                                   const GrPipeline::DynamicState dynamicStates[],
550                                   int meshCount,
551                                   const SkRect& bounds) {
552     GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(pipeline.getRenderTarget());
553     if (!fRenderTarget) {
554         this->init(target);
555     }
556     SkASSERT(target == fRenderTarget);
557 
558     if (!meshCount) {
559         return;
560     }
561     prepare_sampled_images(primProc, fGpu);
562     GrFragmentProcessor::Iter iter(pipeline);
563     while (const GrFragmentProcessor* fp = iter.next()) {
564         prepare_sampled_images(*fp, fGpu);
565     }
566     if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
567         set_texture_layout(static_cast<GrVkTexture*>(dstTexture), fGpu);
568     }
569 
570     GrPrimitiveType primitiveType = meshes[0].primitiveType();
571     sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline,
572                                                                     primProc,
573                                                                     primitiveType,
574                                                                     SkToBool(dynamicStates));
575     if (!pipelineState) {
576         return;
577     }
578 
579     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
580 
581     for (int i = 0; i < meshCount; ++i) {
582         const GrMesh& mesh = meshes[i];
583         if (mesh.primitiveType() != primitiveType) {
584             // Technically we don't have to call this here (since there is a safety check in
585             // pipelineState:setData but this will allow for quicker freeing of resources if the
586             // pipelineState sits in a cache for a while.
587             pipelineState->freeTempResources(fGpu);
588             SkDEBUGCODE(pipelineState = nullptr);
589             primitiveType = mesh.primitiveType();
590             pipelineState = this->prepareDrawState(pipeline,
591                                                    primProc,
592                                                    primitiveType,
593                                                    SkToBool(dynamicStates));
594             if (!pipelineState) {
595                 return;
596             }
597         }
598 
599         if (dynamicStates) {
600             if (pipeline.getScissorState().enabled()) {
601                 GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(),
602                                                          target, dynamicStates[i].fScissorRect);
603             }
604         }
605 
606         SkASSERT(pipelineState);
607         mesh.sendToGpu(primProc, this);
608     }
609 
610     cbInfo.fBounds.join(bounds);
611     cbInfo.fIsEmpty = false;
612 
613     // Technically we don't have to call this here (since there is a safety check in
614     // pipelineState:setData but this will allow for quicker freeing of resources if the
615     // pipelineState sits in a cache for a while.
616     pipelineState->freeTempResources(fGpu);
617 }
618 
sendInstancedMeshToGpu(const GrPrimitiveProcessor & primProc,GrPrimitiveType,const GrBuffer * vertexBuffer,int vertexCount,int baseVertex,const GrBuffer * instanceBuffer,int instanceCount,int baseInstance)619 void GrVkGpuCommandBuffer::sendInstancedMeshToGpu(const GrPrimitiveProcessor& primProc,
620                                                   GrPrimitiveType,
621                                                   const GrBuffer* vertexBuffer,
622                                                   int vertexCount,
623                                                   int baseVertex,
624                                                   const GrBuffer* instanceBuffer,
625                                                   int instanceCount,
626                                                   int baseInstance) {
627     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
628     this->bindGeometry(primProc, nullptr, vertexBuffer, instanceBuffer);
629     cbInfo.currentCmdBuf()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
630     fGpu->stats()->incNumDraws();
631 }
632 
sendIndexedInstancedMeshToGpu(const GrPrimitiveProcessor & primProc,GrPrimitiveType,const GrBuffer * indexBuffer,int indexCount,int baseIndex,const GrBuffer * vertexBuffer,int baseVertex,const GrBuffer * instanceBuffer,int instanceCount,int baseInstance)633 void GrVkGpuCommandBuffer::sendIndexedInstancedMeshToGpu(const GrPrimitiveProcessor& primProc,
634                                                          GrPrimitiveType,
635                                                          const GrBuffer* indexBuffer,
636                                                          int indexCount,
637                                                          int baseIndex,
638                                                          const GrBuffer* vertexBuffer,
639                                                          int baseVertex,
640                                                          const GrBuffer* instanceBuffer,
641                                                          int instanceCount,
642                                                          int baseInstance) {
643     CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
644     this->bindGeometry(primProc, indexBuffer, vertexBuffer, instanceBuffer);
645     cbInfo.currentCmdBuf()->drawIndexed(fGpu, indexCount, instanceCount,
646                                         baseIndex, baseVertex, baseInstance);
647     fGpu->stats()->incNumDraws();
648 }
649 
650