• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ganesh/vk/GrVkOpsRenderPass.h"
9 
10 #include "include/core/SkDrawable.h"
11 #include "include/core/SkRect.h"
12 #include "include/gpu/GrDirectContext.h"
13 #include "include/gpu/ganesh/vk/GrBackendDrawableInfo.h"
14 #include "src/gpu/ganesh/GrBackendUtils.h"
15 #include "src/gpu/ganesh/GrDirectContextPriv.h"
16 #include "src/gpu/ganesh/GrOpFlushState.h"
17 #include "src/gpu/ganesh/GrPipeline.h"
18 #include "src/gpu/ganesh/GrRenderTarget.h"
19 #include "src/gpu/ganesh/effects/GrTextureEffect.h"
20 #include "src/gpu/ganesh/vk/GrVkBuffer.h"
21 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
22 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
23 #include "src/gpu/ganesh/vk/GrVkFramebuffer.h"
24 #include "src/gpu/ganesh/vk/GrVkGpu.h"
25 #include "src/gpu/ganesh/vk/GrVkImage.h"
26 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
27 #include "src/gpu/ganesh/vk/GrVkRenderPass.h"
28 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
29 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
30 #include "src/gpu/ganesh/vk/GrVkSemaphore.h"
31 #include "src/gpu/ganesh/vk/GrVkTexture.h"
32 
33 using namespace skia_private;
34 
35 /////////////////////////////////////////////////////////////////////////////
36 
get_vk_load_store_ops(GrLoadOp loadOpIn,GrStoreOp storeOpIn,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)37 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
38                            VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
39     switch (loadOpIn) {
40         case GrLoadOp::kLoad:
41             *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
42             break;
43         case GrLoadOp::kClear:
44             *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
45             break;
46         case GrLoadOp::kDiscard:
47             *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
48             break;
49         default:
50             SK_ABORT("Invalid LoadOp");
51             *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
52     }
53 
54     switch (storeOpIn) {
55         case GrStoreOp::kStore:
56             *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
57             break;
58         case GrStoreOp::kDiscard:
59             *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
60             break;
61         default:
62             SK_ABORT("Invalid StoreOp");
63             *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
64     }
65 }
66 
GrVkOpsRenderPass(GrVkGpu * gpu)67 GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {}
68 
setAttachmentLayouts(LoadFromResolve loadFromResolve)69 void GrVkOpsRenderPass::setAttachmentLayouts(LoadFromResolve loadFromResolve) {
70     bool withStencil = fCurrentRenderPass->hasStencilAttachment();
71     bool withResolve = fCurrentRenderPass->hasResolveAttachment();
72 
73     if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
74         // We need to use the GENERAL layout in this case since we'll be using texture barriers
75         // with an input attachment.
76         VkAccessFlags dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
77                                   VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
78                                   VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
79         VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
80                                          VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
81         fFramebuffer->colorAttachment()->setImageLayout(
82                 fGpu, VK_IMAGE_LAYOUT_GENERAL, dstAccess, dstStages, false);
83     } else {
84         // Change layout of our render target so it can be used as the color attachment.
85         // TODO: If we know that we will never be blending or loading the attachment we could drop
86         // the VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
87         fFramebuffer->colorAttachment()->setImageLayout(
88                 fGpu,
89                 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
90                 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
91                 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
92                 false);
93     }
94 
95     if (withResolve) {
96         GrVkImage* resolveAttachment = fFramebuffer->resolveAttachment();
97         SkASSERT(resolveAttachment);
98         if (loadFromResolve == LoadFromResolve::kLoad) {
99             // We need input access to do the shader read and color read access to do the attachment
100             // load.
101             VkAccessFlags dstAccess =
102                     VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
103             VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
104                                              VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
105             resolveAttachment->setImageLayout(fGpu,
106                                               VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
107                                               dstAccess,
108                                               dstStages,
109                                               false);
110         } else {
111             resolveAttachment->setImageLayout(
112                     fGpu,
113                     VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
114                     VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
115                     VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
116                     false);
117         }
118     }
119 
120     // If we are using a stencil attachment we also need to update its layout
121     if (withStencil) {
122         auto* vkStencil = fFramebuffer->stencilAttachment();
123         SkASSERT(vkStencil);
124 
125         // We need the write and read access bits since we may load and store the stencil.
126         // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
127         // wait there.
128         vkStencil->setImageLayout(fGpu,
129                                   VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
130                                   VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
131                                   VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
132                                   VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
133                                   false);
134     }
135 }
136 
137 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
138 // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
139 // the the entire attachment. Similar requirements for the y and height components.
adjust_bounds_to_granularity(SkIRect * dstBounds,const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)140 void adjust_bounds_to_granularity(SkIRect* dstBounds,
141                                   const SkIRect& srcBounds,
142                                   const VkExtent2D& granularity,
143                                   int maxWidth,
144                                   int maxHeight) {
145     // Adjust Width
146     if ((0 != granularity.width && 1 != granularity.width)) {
147         // Start with the right side of rect so we know if we end up going pass the maxWidth.
148         int rightAdj = srcBounds.fRight % granularity.width;
149         if (rightAdj != 0) {
150             rightAdj = granularity.width - rightAdj;
151         }
152         dstBounds->fRight = srcBounds.fRight + rightAdj;
153         if (dstBounds->fRight > maxWidth) {
154             dstBounds->fRight = maxWidth;
155             dstBounds->fLeft = 0;
156         } else {
157             dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
158         }
159     } else {
160         dstBounds->fLeft = srcBounds.fLeft;
161         dstBounds->fRight = srcBounds.fRight;
162     }
163 
164     // Adjust height
165     if ((0 != granularity.height && 1 != granularity.height)) {
166         // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
167         int bottomAdj = srcBounds.fBottom % granularity.height;
168         if (bottomAdj != 0) {
169             bottomAdj = granularity.height - bottomAdj;
170         }
171         dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
172         if (dstBounds->fBottom > maxHeight) {
173             dstBounds->fBottom = maxHeight;
174             dstBounds->fTop = 0;
175         } else {
176             dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
177         }
178     } else {
179         dstBounds->fTop = srcBounds.fTop;
180         dstBounds->fBottom = srcBounds.fBottom;
181     }
182 }
183 
beginRenderPass(const VkClearValue & clearColor,LoadFromResolve loadFromResolve)184 bool GrVkOpsRenderPass::beginRenderPass(const VkClearValue& clearColor,
185                                         LoadFromResolve loadFromResolve) {
186     this->setAttachmentLayouts(loadFromResolve);
187 
188     bool firstSubpassUsesSecondaryCB =
189             loadFromResolve != LoadFromResolve::kLoad && SkToBool(fCurrentSecondaryCommandBuffer);
190 
191     bool useFullBounds = fCurrentRenderPass->hasResolveAttachment() &&
192                          fGpu->vkCaps().mustLoadFullImageWithDiscardableMSAA();
193 
194     auto dimensions = fFramebuffer->colorAttachment()->dimensions();
195 
196     auto nativeBounds = GrNativeRect::MakeIRectRelativeTo(
197             fOrigin,
198             dimensions.height(), useFullBounds ? SkIRect::MakeSize(dimensions) : fBounds);
199 
200     // The bounds we use for the render pass should be of the granularity supported
201     // by the device.
202     const VkExtent2D& granularity = fCurrentRenderPass->granularity();
203     SkIRect adjustedBounds;
204     if ((0 != granularity.width && 1 != granularity.width) ||
205         (0 != granularity.height && 1 != granularity.height)) {
206         adjust_bounds_to_granularity(&adjustedBounds,
207                                      nativeBounds,
208                                      granularity,
209                                      dimensions.width(),
210                                      dimensions.height());
211     } else {
212         adjustedBounds = nativeBounds;
213     }
214 
215     if (!fGpu->beginRenderPass(fCurrentRenderPass, fFramebuffer, &clearColor, fRenderTarget,
216                                adjustedBounds, firstSubpassUsesSecondaryCB)) {
217         if (fCurrentSecondaryCommandBuffer) {
218             fCurrentSecondaryCommandBuffer->end(fGpu);
219         }
220         fCurrentRenderPass = nullptr;
221         return false;
222     }
223 
224     if (loadFromResolve == LoadFromResolve::kLoad) {
225         this->loadResolveIntoMSAA(adjustedBounds);
226     }
227 
228     return true;
229 }
230 
init(const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo)231 bool GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
232                              const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
233                              const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
234     VkAttachmentLoadOp loadOp;
235     VkAttachmentStoreOp storeOp;
236     get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp, &loadOp, &storeOp);
237     GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp);
238 
239     get_vk_load_store_ops(resolveInfo.fLoadOp, resolveInfo.fStoreOp, &loadOp, &storeOp);
240     GrVkRenderPass::LoadStoreOps vkResolveOps(loadOp, storeOp);
241 
242     get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp, &loadOp, &storeOp);
243     GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp);
244 
245     GrVkResourceProvider::CompatibleRPHandle rpHandle = fFramebuffer->compatibleRenderPassHandle();
246     SkASSERT(rpHandle.isValid());
247     fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
248                                                                  vkColorOps,
249                                                                  vkResolveOps,
250                                                                  vkStencilOps);
251 
252     if (!fCurrentRenderPass) {
253         return false;
254     }
255 
256     if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) {
257         SkASSERT(fGpu->cmdPool());
258         fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
259         if (!fCurrentSecondaryCommandBuffer) {
260             fCurrentRenderPass = nullptr;
261             return false;
262         }
263         fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
264     }
265 
266     VkClearValue vkClearColor;
267     vkClearColor.color.float32[0] = colorInfo.fClearColor[0];
268     vkClearColor.color.float32[1] = colorInfo.fClearColor[1];
269     vkClearColor.color.float32[2] = colorInfo.fClearColor[2];
270     vkClearColor.color.float32[3] = colorInfo.fClearColor[3];
271 
272     return this->beginRenderPass(vkClearColor, fLoadFromResolve);
273 }
274 
initWrapped()275 bool GrVkOpsRenderPass::initWrapped() {
276     SkASSERT(fFramebuffer->isExternal());
277     fCurrentRenderPass = fFramebuffer->externalRenderPass();
278     SkASSERT(fCurrentRenderPass);
279     fCurrentRenderPass->ref();
280 
281     fCurrentSecondaryCommandBuffer = fFramebuffer->externalCommandBuffer();
282     if (!fCurrentSecondaryCommandBuffer) {
283         return false;
284     }
285     return true;
286 }
287 
~GrVkOpsRenderPass()288 GrVkOpsRenderPass::~GrVkOpsRenderPass() {
289     this->reset();
290 }
291 
gpu()292 GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; }
293 
currentCommandBuffer()294 GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
295     if (fCurrentSecondaryCommandBuffer) {
296         return fCurrentSecondaryCommandBuffer.get();
297     }
298     // We checked this when we setup the GrVkOpsRenderPass and it should not have changed while we
299     // are still using this object.
300     SkASSERT(fGpu->currentCommandBuffer());
301     return fGpu->currentCommandBuffer();
302 }
303 
loadResolveIntoMSAA(const SkIRect & nativeBounds)304 void GrVkOpsRenderPass::loadResolveIntoMSAA(const SkIRect& nativeBounds) {
305     fGpu->loadMSAAFromResolve(this->currentCommandBuffer(), *fCurrentRenderPass,
306                               fFramebuffer->colorAttachment(), fFramebuffer->resolveAttachment(),
307                               nativeBounds);
308     fGpu->currentCommandBuffer()->nexSubpass(fGpu, SkToBool(fCurrentSecondaryCommandBuffer));
309 
310     // If we loaded the resolve attachment, then we would have set the image layout to be
311     // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an input
312     // attachment. However, when we switched to the main subpass it will transition the layout
313     // internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our tracking
314     // of the layout to match the new layout.
315     SkASSERT(fFramebuffer->resolveAttachment());
316     fFramebuffer->resolveAttachment()->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
317 }
318 
submit()319 void GrVkOpsRenderPass::submit() {
320     if (!fRenderTarget) {
321         return;
322     }
323     if (!fCurrentRenderPass) {
324         SkASSERT(fGpu->isDeviceLost());
325         return;
326     }
327 
328     // We don't want to actually submit the secondary command buffer if it is wrapped.
329     if (this->wrapsSecondaryCommandBuffer()) {
330         // We pass the ownership of the GrVkSecondaryCommandBuffer to the external framebuffer
331         // since it's lifetime matches the lifetime we need to keep the GrManagedResources on the
332         // GrVkSecondaryCommandBuffer alive.
333         fFramebuffer->returnExternalGrSecondaryCommandBuffer(
334                 std::move(fCurrentSecondaryCommandBuffer));
335         return;
336     }
337 
338     if (fCurrentSecondaryCommandBuffer) {
339         fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
340     }
341     fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
342 }
343 
set(GrRenderTarget * rt,sk_sp<GrVkFramebuffer> framebuffer,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,GrVkRenderPass::SelfDependencyFlags selfDepFlags,GrVkRenderPass::LoadFromResolve loadFromResolve,const TArray<GrSurfaceProxy *,true> & sampledProxies)344 bool GrVkOpsRenderPass::set(GrRenderTarget* rt,
345                             sk_sp<GrVkFramebuffer> framebuffer,
346                             GrSurfaceOrigin origin,
347                             const SkIRect& bounds,
348                             const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
349                             const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
350                             const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
351                             GrVkRenderPass::SelfDependencyFlags selfDepFlags,
352                             GrVkRenderPass::LoadFromResolve loadFromResolve,
353                             const TArray<GrSurfaceProxy*, true>& sampledProxies) {
354     SkASSERT(!fRenderTarget);
355     SkASSERT(fGpu == rt->getContext()->priv().getGpu());
356 
357 #ifdef SK_DEBUG
358     fIsActive = true;
359 #endif
360 
361     // We check to make sure the GrVkGpu has a valid current command buffer instead of each time we
362     // access it. If the command buffer is valid here should be valid throughout the use of the
363     // render pass since nothing should trigger a submit while this render pass is active.
364     if (!fGpu->currentCommandBuffer()) {
365         return false;
366     }
367 
368     this->INHERITED::set(rt, origin);
369 
370     for (int i = 0; i < sampledProxies.size(); ++i) {
371         if (sampledProxies[i]->isInstantiated()) {
372             SkASSERT(sampledProxies[i]->asTextureProxy());
373             GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
374             SkASSERT(vkTex);
375             GrVkImage* texture = vkTex->textureImage();
376             SkASSERT(texture);
377             texture->setImageLayout(
378                     fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
379                     VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
380         }
381     }
382 
383     SkASSERT(framebuffer);
384     fFramebuffer = std::move(framebuffer);
385 
386     SkASSERT(bounds.isEmpty() ||
387              SkIRect::MakeSize(fFramebuffer->colorAttachment()->dimensions()).contains(bounds));
388     fBounds = bounds;
389 
390     fSelfDependencyFlags = selfDepFlags;
391     fLoadFromResolve = loadFromResolve;
392 
393     if (this->wrapsSecondaryCommandBuffer()) {
394         return this->initWrapped();
395     }
396 
397     return this->init(colorInfo, resolveInfo, stencilInfo);
398 }
399 
reset()400 void GrVkOpsRenderPass::reset() {
401     if (fCurrentSecondaryCommandBuffer) {
402         // The active GrVkCommandPool on the GrVkGpu should still be the same pool we got the
403         // secondary command buffer from since we haven't submitted any work yet.
404         SkASSERT(fGpu->cmdPool());
405         fCurrentSecondaryCommandBuffer.release()->recycle(fGpu->cmdPool());
406     }
407     if (fCurrentRenderPass) {
408         fCurrentRenderPass->unref();
409         fCurrentRenderPass = nullptr;
410     }
411     fCurrentCBIsEmpty = true;
412 
413     fRenderTarget = nullptr;
414     fFramebuffer.reset();
415 
416     fSelfDependencyFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
417 
418     fLoadFromResolve = LoadFromResolve::kNo;
419     fOverridePipelinesForResolveLoad = false;
420 
421 #ifdef SK_DEBUG
422     fIsActive = false;
423 #endif
424 }
425 
wrapsSecondaryCommandBuffer() const426 bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const {
427     return fFramebuffer->isExternal();
428 }
429 
430 ////////////////////////////////////////////////////////////////////////////////
431 
onClearStencilClip(const GrScissorState & scissor,bool insideStencilMask)432 void GrVkOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
433     if (!fCurrentRenderPass) {
434         SkASSERT(fGpu->isDeviceLost());
435         return;
436     }
437 
438     GrAttachment* sb = fFramebuffer->stencilAttachment();
439     // this should only be called internally when we know we have a
440     // stencil buffer.
441     SkASSERT(sb);
442     int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
443 
444     // The contract with the callers does not guarantee that we preserve all bits in the stencil
445     // during this clear. Thus we will clear the entire stencil to the desired value.
446 
447     VkClearDepthStencilValue vkStencilColor;
448     memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
449     if (insideStencilMask) {
450         vkStencilColor.stencil = (1 << (stencilBitCount - 1));
451     } else {
452         vkStencilColor.stencil = 0;
453     }
454 
455     VkClearRect clearRect;
456     // Flip rect if necessary
457     SkIRect vkRect;
458     if (!scissor.enabled()) {
459         vkRect.setXYWH(0, 0, sb->width(), sb->height());
460     } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
461         vkRect = scissor.rect();
462     } else {
463         vkRect.setLTRB(scissor.rect().fLeft, sb->height() - scissor.rect().fBottom,
464                        scissor.rect().fRight, sb->height() - scissor.rect().fTop);
465     }
466 
467     clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
468     clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
469 
470     clearRect.baseArrayLayer = 0;
471     clearRect.layerCount = 1;
472 
473     uint32_t stencilIndex;
474     SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
475 
476     VkClearAttachment attachment;
477     attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
478     attachment.colorAttachment = 0; // this value shouldn't matter
479     attachment.clearValue.depthStencil = vkStencilColor;
480 
481     this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
482     fCurrentCBIsEmpty = false;
483 }
484 
onClear(const GrScissorState & scissor,std::array<float,4> color)485 void GrVkOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
486     if (!fCurrentRenderPass) {
487         SkASSERT(fGpu->isDeviceLost());
488         return;
489     }
490 
491     VkClearColorValue vkColor = {{color[0], color[1], color[2], color[3]}};
492 
493     // If we end up in a situation where we are calling clear without a scissior then in general it
494     // means we missed an opportunity higher up the stack to set the load op to be a clear. However,
495     // there are situations where higher up we couldn't discard the previous ops and set a clear
496     // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
497     // TODO: Make the waitOp a RenderTask instead so we can clear out the OpsTask for a clear. We
498     // can then reenable this assert assuming we can't get messed up by a waitOp.
499     //SkASSERT(!fCurrentCBIsEmpty || scissor);
500 
501     auto dimensions = fFramebuffer->colorAttachment()->dimensions();
502     // We always do a sub rect clear with clearAttachments since we are inside a render pass
503     VkClearRect clearRect;
504     // Flip rect if necessary
505     SkIRect vkRect;
506     if (!scissor.enabled()) {
507         vkRect.setSize(dimensions);
508     } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
509         vkRect = scissor.rect();
510     } else {
511         vkRect.setLTRB(scissor.rect().fLeft, dimensions.height() - scissor.rect().fBottom,
512                        scissor.rect().fRight, dimensions.height() - scissor.rect().fTop);
513     }
514     clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
515     clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
516     clearRect.baseArrayLayer = 0;
517     clearRect.layerCount = 1;
518 
519     uint32_t colorIndex;
520     SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex));
521 
522     VkClearAttachment attachment;
523     attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
524     attachment.colorAttachment = colorIndex;
525     attachment.clearValue.color = vkColor;
526 
527     this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
528     fCurrentCBIsEmpty = false;
529 }
530 
531 ////////////////////////////////////////////////////////////////////////////////
532 
addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer)533 void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) {
534     SkASSERT(!this->wrapsSecondaryCommandBuffer());
535 
536     bool withResolve = fFramebuffer->resolveAttachment();
537     bool withStencil = fFramebuffer->stencilAttachment();
538 
539     // If we have a resolve attachment we must do a resolve load in the new render pass since we
540     // broke up the original one. GrProgramInfos were made without any knowledge that the render
541     // pass may be split up. Thus they may try to make VkPipelines that only use one subpass. We
542     // need to override that to make sure they are compatible with the extra load subpass.
543     fOverridePipelinesForResolveLoad |=
544             withResolve && fCurrentRenderPass->loadFromResolve() != LoadFromResolve::kLoad;
545 
546     GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
547                                             VK_ATTACHMENT_STORE_OP_STORE);
548     GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD,
549                                               VK_ATTACHMENT_STORE_OP_STORE);
550     LoadFromResolve loadFromResolve = LoadFromResolve::kNo;
551     if (withResolve) {
552         vkColorOps = {VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE};
553         loadFromResolve = LoadFromResolve::kLoad;
554     }
555     GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
556                                               VK_ATTACHMENT_STORE_OP_STORE);
557 
558     SkASSERT(fCurrentRenderPass);
559     fCurrentRenderPass->unref();
560     fCurrentRenderPass = nullptr;
561 
562     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
563     auto fb = vkRT->getFramebuffer(withResolve, withStencil, fSelfDependencyFlags, loadFromResolve);
564     if (!fb) {
565         return;
566     }
567     fFramebuffer = sk_ref_sp(fb);
568 
569     SkASSERT(fFramebuffer);
570     const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
571             fFramebuffer->compatibleRenderPassHandle();
572     SkASSERT(rpHandle.isValid());
573 
574     fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
575                                                                  vkColorOps,
576                                                                  vkResolveOps,
577                                                                  vkStencilOps);
578 
579     if (!fCurrentRenderPass) {
580         return;
581     }
582 
583     if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() ||
584         mustUseSecondaryCommandBuffer) {
585         SkASSERT(fGpu->cmdPool());
586         fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
587         if (!fCurrentSecondaryCommandBuffer) {
588             fCurrentRenderPass = nullptr;
589             return;
590         }
591         fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
592     }
593 
594     VkClearValue vkClearColor;
595     memset(&vkClearColor, 0, sizeof(VkClearValue));
596 
597     this->beginRenderPass(vkClearColor, loadFromResolve);
598 }
599 
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)600 void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
601     if (!fCurrentRenderPass) {
602         SkASSERT(fGpu->isDeviceLost());
603         return;
604     }
605     if (fCurrentSecondaryCommandBuffer) {
606         fCurrentSecondaryCommandBuffer->end(fGpu);
607         fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
608     }
609     fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
610 
611     // We pass in true here to signal that after the upload we need to set the upload textures
612     // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL.
613     state->doUpload(upload, true);
614 
615     this->addAdditionalRenderPass(false);
616 }
617 
618 ////////////////////////////////////////////////////////////////////////////////
619 
onEnd()620 void GrVkOpsRenderPass::onEnd() {
621     if (fCurrentSecondaryCommandBuffer && !this->wrapsSecondaryCommandBuffer()) {
622         fCurrentSecondaryCommandBuffer->end(fGpu);
623     }
624 }
625 
onBindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)626 bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
627     if (!fCurrentRenderPass) {
628         SkASSERT(fGpu->isDeviceLost());
629         return false;
630     }
631 
632     SkRect rtRect = SkRect::Make(fBounds);
633     if (rtRect.intersect(drawBounds)) {
634         rtRect.roundOut(&fCurrentPipelineBounds);
635     } else {
636         fCurrentPipelineBounds.setEmpty();
637     }
638 
639     GrVkCommandBuffer* currentCB = this->currentCommandBuffer();
640     SkASSERT(fCurrentRenderPass);
641 
642     VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass();
643     fCurrentPipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
644             fRenderTarget, programInfo, compatibleRenderPass, fOverridePipelinesForResolveLoad);
645     if (!fCurrentPipelineState) {
646         return false;
647     }
648 
649     fCurrentPipelineState->bindPipeline(fGpu, currentCB);
650 
651     // Both the 'programInfo' and this renderPass have an origin. Since they come from the
652     // same place (i.e., the target renderTargetProxy) they had best agree.
653     SkASSERT(programInfo.origin() == fOrigin);
654 
655     auto colorAttachment = fFramebuffer->colorAttachment();
656     if (!fCurrentPipelineState->setAndBindUniforms(fGpu, colorAttachment->dimensions(), programInfo,
657                                                    currentCB)) {
658         return false;
659     }
660 
661     if (!programInfo.pipeline().isScissorTestEnabled()) {
662         // "Disable" scissor by setting it to the full pipeline bounds.
663         GrVkPipeline::SetDynamicScissorRectState(
664                 fGpu, currentCB, colorAttachment->dimensions(), fOrigin,
665                                                  fCurrentPipelineBounds);
666     }
667     GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, colorAttachment->dimensions());
668     GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB,
669                                                programInfo.pipeline().writeSwizzle(),
670                                                programInfo.pipeline().getXferProcessor());
671 
672     return true;
673 }
674 
onSetScissorRect(const SkIRect & scissor)675 void GrVkOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
676     SkIRect combinedScissorRect;
677     if (!combinedScissorRect.intersect(fCurrentPipelineBounds, scissor)) {
678         combinedScissorRect = SkIRect::MakeEmpty();
679     }
680     GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(),
681                                              fFramebuffer->colorAttachment()->dimensions(),
682                                              fOrigin, combinedScissorRect);
683 }
684 
685 #ifdef SK_DEBUG
check_sampled_texture(GrTexture * tex,GrAttachment * colorAttachment,GrVkGpu * gpu)686 void check_sampled_texture(GrTexture* tex, GrAttachment* colorAttachment, GrVkGpu* gpu) {
687     SkASSERT(!tex->isProtected() || (colorAttachment->isProtected() && gpu->protectedContext()));
688     auto vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
689     SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
690 }
691 #endif
692 
onBindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)693 bool GrVkOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
694                                        const GrSurfaceProxy* const geomProcTextures[],
695                                        const GrPipeline& pipeline) {
696 #ifdef SK_DEBUG
697     SkASSERT(fCurrentPipelineState);
698     auto colorAttachment = fFramebuffer->colorAttachment();
699     for (int i = 0; i < geomProc.numTextureSamplers(); ++i) {
700         check_sampled_texture(geomProcTextures[i]->peekTexture(), colorAttachment, fGpu);
701     }
702     pipeline.visitTextureEffects([&](const GrTextureEffect& te) {
703         check_sampled_texture(te.texture(), colorAttachment, fGpu);
704     });
705     if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
706         check_sampled_texture(dstTexture, colorAttachment, fGpu);
707     }
708 #endif
709     if (!fCurrentPipelineState->setAndBindTextures(fGpu, geomProc, pipeline, geomProcTextures,
710                                                    this->currentCommandBuffer())) {
711         return false;
712     }
713     if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
714         // We bind the color attachment as an input attachment
715         auto ds = fFramebuffer->colorAttachment()->inputDescSetForBlending(fGpu);
716         if (!ds) {
717             return false;
718         }
719         return fCurrentPipelineState->setAndBindInputAttachment(fGpu, std::move(ds),
720                                                                 this->currentCommandBuffer());
721     }
722     return true;
723 }
724 
onBindBuffers(sk_sp<const GrBuffer> indexBuffer,sk_sp<const GrBuffer> instanceBuffer,sk_sp<const GrBuffer> vertexBuffer,GrPrimitiveRestart primRestart)725 void GrVkOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
726                                       sk_sp<const GrBuffer> instanceBuffer,
727                                       sk_sp<const GrBuffer> vertexBuffer,
728                                       GrPrimitiveRestart primRestart) {
729     SkASSERT(GrPrimitiveRestart::kNo == primRestart);
730     if (!fCurrentRenderPass) {
731         SkASSERT(fGpu->isDeviceLost());
732         return;
733     }
734     SkASSERT(fCurrentPipelineState);
735     SkASSERT(!fGpu->caps()->usePrimitiveRestart());  // Ignore primitiveRestart parameter.
736 
737     GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer();
738     SkASSERT(currCmdBuf);
739 
740     // There is no need to put any memory barriers to make sure host writes have finished here.
741     // When a command buffer is submitted to a queue, there is an implicit memory barrier that
742     // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
743     // an active RenderPass.
744 
745     // Here our vertex and instance inputs need to match the same 0-based bindings they were
746     // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
747     uint32_t binding = 0;
748     if (vertexBuffer) {
749         SkDEBUGCODE(auto* gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer.get()));
750         SkASSERT(!gpuVertexBuffer->isCpuBuffer());
751         SkASSERT(!gpuVertexBuffer->isMapped());
752         currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(vertexBuffer));
753     }
754     if (instanceBuffer) {
755         SkDEBUGCODE(auto* gpuInstanceBuffer =
756                             static_cast<const GrGpuBuffer*>(instanceBuffer.get()));
757         SkASSERT(!gpuInstanceBuffer->isCpuBuffer());
758         SkASSERT(!gpuInstanceBuffer->isMapped());
759         currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(instanceBuffer));
760     }
761     if (indexBuffer) {
762         SkDEBUGCODE(auto* gpuIndexBuffer = static_cast<const GrGpuBuffer*>(indexBuffer.get()));
763         SkASSERT(!gpuIndexBuffer->isCpuBuffer());
764         SkASSERT(!gpuIndexBuffer->isMapped());
765         currCmdBuf->bindIndexBuffer(fGpu, std::move(indexBuffer));
766     }
767 }
768 
onDrawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)769 void GrVkOpsRenderPass::onDrawInstanced(int instanceCount,
770                                         int baseInstance,
771                                         int vertexCount, int baseVertex) {
772     if (!fCurrentRenderPass) {
773         SkASSERT(fGpu->isDeviceLost());
774         return;
775     }
776     SkASSERT(fCurrentPipelineState);
777     this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
778     fGpu->stats()->incNumDraws();
779     fCurrentCBIsEmpty = false;
780 }
781 
onDrawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)782 void GrVkOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount,
783                                                int baseInstance, int baseVertex) {
784     if (!fCurrentRenderPass) {
785         SkASSERT(fGpu->isDeviceLost());
786         return;
787     }
788     SkASSERT(fCurrentPipelineState);
789     this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount,
790                                               baseIndex, baseVertex, baseInstance);
791     fGpu->stats()->incNumDraws();
792     fCurrentCBIsEmpty = false;
793 }
794 
onDrawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)795 void GrVkOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
796                                        int drawCount) {
797     SkASSERT(!drawIndirectBuffer->isCpuBuffer());
798     if (!fCurrentRenderPass) {
799         SkASSERT(fGpu->isDeviceLost());
800         return;
801     }
802     const GrVkCaps& caps = fGpu->vkCaps();
803     SkASSERT(caps.nativeDrawIndirectSupport());
804     SkASSERT(fCurrentPipelineState);
805 
806     const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
807     uint32_t remainingDraws = drawCount;
808     const size_t stride = sizeof(GrDrawIndirectCommand);
809     while (remainingDraws >= 1) {
810         uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
811         this->currentCommandBuffer()->drawIndirect(
812                 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
813         remainingDraws -= currDrawCount;
814         offset += stride * currDrawCount;
815         fGpu->stats()->incNumDraws();
816     }
817     fCurrentCBIsEmpty = false;
818 }
819 
onDrawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)820 void GrVkOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
821                                               int drawCount) {
822     SkASSERT(!drawIndirectBuffer->isCpuBuffer());
823     if (!fCurrentRenderPass) {
824         SkASSERT(fGpu->isDeviceLost());
825         return;
826     }
827     const GrVkCaps& caps = fGpu->vkCaps();
828     SkASSERT(caps.nativeDrawIndirectSupport());
829     SkASSERT(fCurrentPipelineState);
830     const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
831     uint32_t remainingDraws = drawCount;
832     const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
833     while (remainingDraws >= 1) {
834         uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
835         this->currentCommandBuffer()->drawIndexedIndirect(
836                 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
837         remainingDraws -= currDrawCount;
838         offset += stride * currDrawCount;
839         fGpu->stats()->incNumDraws();
840     }
841     fCurrentCBIsEmpty = false;
842 }
843 
844 ////////////////////////////////////////////////////////////////////////////////
845 
onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)846 void GrVkOpsRenderPass::onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
847     if (!fCurrentRenderPass) {
848         SkASSERT(fGpu->isDeviceLost());
849         return;
850     }
851 
852     VkRect2D bounds;
853     bounds.offset = { 0, 0 };
854     bounds.extent = { 0, 0 };
855 
856     if (!fCurrentSecondaryCommandBuffer) {
857         fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
858         this->addAdditionalRenderPass(true);
859         // We may have failed to start a new render pass
860         if (!fCurrentRenderPass) {
861             SkASSERT(fGpu->isDeviceLost());
862             return;
863         }
864     }
865     SkASSERT(fCurrentSecondaryCommandBuffer);
866 
867     GrVkDrawableInfo vkInfo;
868     vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer();
869     vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass();
870     SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
871     vkInfo.fFormat = fFramebuffer->colorAttachment()->imageFormat();
872     vkInfo.fDrawBounds = &bounds;
873 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
874     vkInfo.fFromSwapchainOrAndroidWindow =
875             fFramebuffer->colorAttachment()->vkImageInfo().fPartOfSwapchainOrAndroidWindow;
876 #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
877 
878     GrBackendDrawableInfo info(vkInfo);
879 
880     // After we draw into the command buffer via the drawable, cached state we have may be invalid.
881     this->currentCommandBuffer()->invalidateState();
882     // Also assume that the drawable produced output.
883     fCurrentCBIsEmpty = false;
884 
885     drawable->draw(info);
886     fGpu->addDrawable(std::move(drawable));
887 }
888