1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkOpsRenderPass.h"
9
10 #include "include/core/SkDrawable.h"
11 #include "include/core/SkRect.h"
12 #include "include/core/SkSize.h"
13 #include "include/gpu/ganesh/GrBackendSurface.h"
14 #include "include/gpu/ganesh/GrDirectContext.h"
15 #include "include/gpu/ganesh/GrTypes.h"
16 #include "include/gpu/ganesh/vk/GrBackendDrawableInfo.h"
17 #include "include/gpu/ganesh/vk/GrVkTypes.h"
18 #include "include/private/base/SkAssert.h"
19 #include "include/private/base/SkTo.h"
20 #include "include/private/gpu/ganesh/GrTypesPriv.h"
21 #include "src/gpu/GpuRefCnt.h"
22 #include "src/gpu/ganesh/GrAttachment.h"
23 #include "src/gpu/ganesh/GrBackendUtils.h"
24 #include "src/gpu/ganesh/GrBuffer.h"
25 #include "src/gpu/ganesh/GrCaps.h"
26 #include "src/gpu/ganesh/GrDirectContextPriv.h"
27 #include "src/gpu/ganesh/GrDrawIndirectCommand.h"
28 #include "src/gpu/ganesh/GrGpuBuffer.h"
29 #include "src/gpu/ganesh/GrNativeRect.h"
30 #include "src/gpu/ganesh/GrOpFlushState.h"
31 #ifdef SKIA_OHOS
32 #include "src/gpu/ganesh/GrPerfMonitorReporter.h"
33 #endif
34 #include "src/gpu/ganesh/GrPipeline.h"
35 #include "src/gpu/ganesh/GrProgramInfo.h"
36 #include "src/gpu/ganesh/GrRenderTarget.h"
37 #include "src/gpu/ganesh/GrScissorState.h"
38 #include "src/gpu/ganesh/GrSurfaceProxy.h"
39 #include "src/gpu/ganesh/GrTexture.h"
40 #include "src/gpu/ganesh/effects/GrTextureEffect.h"
41 #include "src/gpu/ganesh/vk/GrVkCaps.h"
42 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
43 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
44 #include "src/gpu/ganesh/vk/GrVkDescriptorSet.h"
45 #include "src/gpu/ganesh/vk/GrVkFramebuffer.h"
46 #include "src/gpu/ganesh/vk/GrVkGpu.h"
47 #include "src/gpu/ganesh/vk/GrVkImage.h"
48 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
49 #include "src/gpu/ganesh/vk/GrVkPipelineState.h"
50 #include "src/gpu/ganesh/vk/GrVkRenderPass.h"
51 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
52 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
53 #include "src/gpu/ganesh/vk/GrVkTexture.h"
54
55 #include <algorithm>
56 #include <cstring>
57 #include <functional>
58 #include <utility>
59
60 class GrGpu;
61
62 using namespace skia_private;
63
64 /////////////////////////////////////////////////////////////////////////////
65
get_vk_load_store_ops(GrLoadOp loadOpIn,GrStoreOp storeOpIn,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)66 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
67 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
68 switch (loadOpIn) {
69 case GrLoadOp::kLoad:
70 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
71 break;
72 case GrLoadOp::kClear:
73 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
74 break;
75 case GrLoadOp::kDiscard:
76 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
77 break;
78 default:
79 SK_ABORT("Invalid LoadOp");
80 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
81 }
82
83 switch (storeOpIn) {
84 case GrStoreOp::kStore:
85 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
86 break;
87 case GrStoreOp::kDiscard:
88 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
89 break;
90 default:
91 SK_ABORT("Invalid StoreOp");
92 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
93 }
94 }
95
GrVkOpsRenderPass(GrVkGpu * gpu)96 GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {}
97
setAttachmentLayouts(LoadFromResolve loadFromResolve)98 void GrVkOpsRenderPass::setAttachmentLayouts(LoadFromResolve loadFromResolve) {
99 bool withStencil = fCurrentRenderPass->hasStencilAttachment();
100 bool withResolve = fCurrentRenderPass->hasResolveAttachment();
101
102 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
103 // We need to use the GENERAL layout in this case since we'll be using texture barriers
104 // with an input attachment.
105 VkAccessFlags dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
106 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
107 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
108 VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
109 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
110 fFramebuffer->colorAttachment()->setImageLayout(
111 fGpu, VK_IMAGE_LAYOUT_GENERAL, dstAccess, dstStages, false);
112 } else {
113 // Change layout of our render target so it can be used as the color attachment.
114 // TODO: If we know that we will never be blending or loading the attachment we could drop
115 // the VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
116 fFramebuffer->colorAttachment()->setImageLayout(
117 fGpu,
118 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
119 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
120 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
121 false);
122 }
123
124 if (withResolve) {
125 GrVkImage* resolveAttachment = fFramebuffer->resolveAttachment();
126 SkASSERT(resolveAttachment);
127 if (loadFromResolve == LoadFromResolve::kLoad) {
128 // We need input access to do the shader read and color read access to do the attachment
129 // load.
130 VkAccessFlags dstAccess =
131 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
132 VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
133 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
134 resolveAttachment->setImageLayout(fGpu,
135 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
136 dstAccess,
137 dstStages,
138 false);
139 } else {
140 resolveAttachment->setImageLayout(
141 fGpu,
142 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
143 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
144 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
145 false);
146 }
147 }
148
149 // If we are using a stencil attachment we also need to update its layout
150 if (withStencil) {
151 auto* vkStencil = fFramebuffer->stencilAttachment();
152 SkASSERT(vkStencil);
153
154 // We need the write and read access bits since we may load and store the stencil.
155 // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
156 // wait there.
157 vkStencil->setImageLayout(fGpu,
158 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
159 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
160 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
161 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
162 false);
163 }
164 }
165
166 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
167 // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
168 // the the entire attachment. Similar requirements for the y and height components.
adjust_bounds_to_granularity(SkIRect * dstBounds,const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)169 void adjust_bounds_to_granularity(SkIRect* dstBounds,
170 const SkIRect& srcBounds,
171 const VkExtent2D& granularity,
172 int maxWidth,
173 int maxHeight) {
174 // Adjust Width
175 if ((0 != granularity.width && 1 != granularity.width)) {
176 // Start with the right side of rect so we know if we end up going pass the maxWidth.
177 int rightAdj = srcBounds.fRight % granularity.width;
178 if (rightAdj != 0) {
179 rightAdj = granularity.width - rightAdj;
180 }
181 dstBounds->fRight = srcBounds.fRight + rightAdj;
182 if (dstBounds->fRight > maxWidth) {
183 dstBounds->fRight = maxWidth;
184 dstBounds->fLeft = 0;
185 } else {
186 dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
187 }
188 } else {
189 dstBounds->fLeft = srcBounds.fLeft;
190 dstBounds->fRight = srcBounds.fRight;
191 }
192
193 // Adjust height
194 if ((0 != granularity.height && 1 != granularity.height)) {
195 // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
196 int bottomAdj = srcBounds.fBottom % granularity.height;
197 if (bottomAdj != 0) {
198 bottomAdj = granularity.height - bottomAdj;
199 }
200 dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
201 if (dstBounds->fBottom > maxHeight) {
202 dstBounds->fBottom = maxHeight;
203 dstBounds->fTop = 0;
204 } else {
205 dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
206 }
207 } else {
208 dstBounds->fTop = srcBounds.fTop;
209 dstBounds->fBottom = srcBounds.fBottom;
210 }
211 }
212
beginRenderPass(const VkClearValue & clearColor,LoadFromResolve loadFromResolve)213 bool GrVkOpsRenderPass::beginRenderPass(const VkClearValue& clearColor,
214 LoadFromResolve loadFromResolve) {
215 this->setAttachmentLayouts(loadFromResolve);
216
217 bool firstSubpassUsesSecondaryCB =
218 loadFromResolve != LoadFromResolve::kLoad && SkToBool(fCurrentSecondaryCommandBuffer);
219
220 bool useFullBounds = fCurrentRenderPass->hasResolveAttachment() &&
221 fGpu->vkCaps().mustLoadFullImageWithDiscardableMSAA();
222
223 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
224
225 auto nativeBounds = GrNativeRect::MakeIRectRelativeTo(
226 fOrigin,
227 dimensions.height(), useFullBounds ? SkIRect::MakeSize(dimensions) : fBounds);
228
229 // The bounds we use for the render pass should be of the granularity supported
230 // by the device.
231 const VkExtent2D& granularity = fCurrentRenderPass->granularity();
232 SkIRect adjustedBounds;
233 if ((0 != granularity.width && 1 != granularity.width) ||
234 (0 != granularity.height && 1 != granularity.height)) {
235 adjust_bounds_to_granularity(&adjustedBounds,
236 nativeBounds,
237 granularity,
238 dimensions.width(),
239 dimensions.height());
240 } else {
241 adjustedBounds = nativeBounds;
242 }
243
244 if (!fGpu->beginRenderPass(fCurrentRenderPass, fFramebuffer, &clearColor, fRenderTarget,
245 adjustedBounds, firstSubpassUsesSecondaryCB)) {
246 if (fCurrentSecondaryCommandBuffer) {
247 fCurrentSecondaryCommandBuffer->end(fGpu);
248 }
249 fCurrentRenderPass = nullptr;
250 return false;
251 }
252
253 if (loadFromResolve == LoadFromResolve::kLoad) {
254 this->loadResolveIntoMSAA(adjustedBounds);
255 }
256
257 return true;
258 }
259
init(const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo)260 bool GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
261 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
262 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
263 VkAttachmentLoadOp loadOp;
264 VkAttachmentStoreOp storeOp;
265 get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp, &loadOp, &storeOp);
266 GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp);
267
268 get_vk_load_store_ops(resolveInfo.fLoadOp, resolveInfo.fStoreOp, &loadOp, &storeOp);
269 GrVkRenderPass::LoadStoreOps vkResolveOps(loadOp, storeOp);
270
271 get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp, &loadOp, &storeOp);
272 GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp);
273
274 GrVkResourceProvider::CompatibleRPHandle rpHandle = fFramebuffer->compatibleRenderPassHandle();
275 SkASSERT(rpHandle.isValid());
276 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
277 vkColorOps,
278 vkResolveOps,
279 vkStencilOps);
280
281 if (!fCurrentRenderPass) {
282 return false;
283 }
284
285 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) {
286 SkASSERT(fGpu->cmdPool());
287 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
288 if (!fCurrentSecondaryCommandBuffer) {
289 fCurrentRenderPass = nullptr;
290 return false;
291 }
292 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
293 }
294
295 VkClearValue vkClearColor;
296 vkClearColor.color.float32[0] = colorInfo.fClearColor[0];
297 vkClearColor.color.float32[1] = colorInfo.fClearColor[1];
298 vkClearColor.color.float32[2] = colorInfo.fClearColor[2];
299 vkClearColor.color.float32[3] = colorInfo.fClearColor[3];
300
301 return this->beginRenderPass(vkClearColor, fLoadFromResolve);
302 }
303
initWrapped()304 bool GrVkOpsRenderPass::initWrapped() {
305 SkASSERT(fFramebuffer->isExternal());
306 fCurrentRenderPass = fFramebuffer->externalRenderPass();
307 SkASSERT(fCurrentRenderPass);
308 fCurrentRenderPass->ref();
309
310 fCurrentSecondaryCommandBuffer = fFramebuffer->externalCommandBuffer();
311 if (!fCurrentSecondaryCommandBuffer) {
312 return false;
313 }
314 return true;
315 }
316
~GrVkOpsRenderPass()317 GrVkOpsRenderPass::~GrVkOpsRenderPass() {
318 this->reset();
319 }
320
gpu()321 GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; }
322
currentCommandBuffer()323 GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
324 if (fCurrentSecondaryCommandBuffer) {
325 return fCurrentSecondaryCommandBuffer.get();
326 }
327 // We checked this when we setup the GrVkOpsRenderPass and it should not have changed while we
328 // are still using this object.
329 SkASSERT(fGpu->currentCommandBuffer());
330 return fGpu->currentCommandBuffer();
331 }
332
loadResolveIntoMSAA(const SkIRect & nativeBounds)333 void GrVkOpsRenderPass::loadResolveIntoMSAA(const SkIRect& nativeBounds) {
334 fGpu->loadMSAAFromResolve(this->currentCommandBuffer(), *fCurrentRenderPass,
335 fFramebuffer->colorAttachment(), fFramebuffer->resolveAttachment(),
336 nativeBounds);
337 fGpu->currentCommandBuffer()->nexSubpass(fGpu, SkToBool(fCurrentSecondaryCommandBuffer));
338
339 // If we loaded the resolve attachment, then we would have set the image layout to be
340 // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an input
341 // attachment. However, when we switched to the main subpass it will transition the layout
342 // internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our tracking
343 // of the layout to match the new layout.
344 SkASSERT(fFramebuffer->resolveAttachment());
345 fFramebuffer->resolveAttachment()->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
346 }
347
submit()348 void GrVkOpsRenderPass::submit() {
349 if (!fRenderTarget) {
350 return;
351 }
352 if (!fCurrentRenderPass) {
353 SkASSERT(fGpu->isDeviceLost());
354 return;
355 }
356
357 // We don't want to actually submit the secondary command buffer if it is wrapped.
358 if (this->wrapsSecondaryCommandBuffer()) {
359 // We pass the ownership of the GrVkSecondaryCommandBuffer to the external framebuffer
360 // since it's lifetime matches the lifetime we need to keep the GrManagedResources on the
361 // GrVkSecondaryCommandBuffer alive.
362 fFramebuffer->returnExternalGrSecondaryCommandBuffer(
363 std::move(fCurrentSecondaryCommandBuffer));
364 return;
365 }
366
367 if (fCurrentSecondaryCommandBuffer) {
368 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
369 }
370 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
371 }
372
set(GrRenderTarget * rt,sk_sp<GrVkFramebuffer> framebuffer,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,GrVkRenderPass::SelfDependencyFlags selfDepFlags,GrVkRenderPass::LoadFromResolve loadFromResolve,const TArray<GrSurfaceProxy *,true> & sampledProxies)373 bool GrVkOpsRenderPass::set(GrRenderTarget* rt,
374 sk_sp<GrVkFramebuffer> framebuffer,
375 GrSurfaceOrigin origin,
376 const SkIRect& bounds,
377 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
378 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
379 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
380 GrVkRenderPass::SelfDependencyFlags selfDepFlags,
381 GrVkRenderPass::LoadFromResolve loadFromResolve,
382 const TArray<GrSurfaceProxy*, true>& sampledProxies) {
383 SkASSERT(!fRenderTarget);
384 SkASSERT(fGpu == rt->getContext()->priv().getGpu());
385
386 #ifdef SK_DEBUG
387 fIsActive = true;
388 #endif
389
390 // We check to make sure the GrVkGpu has a valid current command buffer instead of each time we
391 // access it. If the command buffer is valid here should be valid throughout the use of the
392 // render pass since nothing should trigger a submit while this render pass is active.
393 if (!fGpu->currentCommandBuffer()) {
394 return false;
395 }
396
397 this->INHERITED::set(rt, origin);
398
399 for (int i = 0; i < sampledProxies.size(); ++i) {
400 if (sampledProxies[i]->isInstantiated()) {
401 SkASSERT(sampledProxies[i]->asTextureProxy());
402 GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
403 SkASSERT(vkTex);
404 GrVkImage* texture = vkTex->textureImage();
405 SkASSERT(texture);
406 texture->setImageLayout(
407 fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
408 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
409 }
410 }
411
412 SkASSERT(framebuffer);
413 fFramebuffer = std::move(framebuffer);
414
415 SkASSERT(bounds.isEmpty() ||
416 SkIRect::MakeSize(fFramebuffer->colorAttachment()->dimensions()).contains(bounds));
417 fBounds = bounds;
418
419 fSelfDependencyFlags = selfDepFlags;
420 fLoadFromResolve = loadFromResolve;
421
422 if (this->wrapsSecondaryCommandBuffer()) {
423 return this->initWrapped();
424 }
425
426 return this->init(colorInfo, resolveInfo, stencilInfo);
427 }
428
reset()429 void GrVkOpsRenderPass::reset() {
430 if (fCurrentSecondaryCommandBuffer) {
431 // The active GrVkCommandPool on the GrVkGpu should still be the same pool we got the
432 // secondary command buffer from since we haven't submitted any work yet.
433 SkASSERT(fGpu->cmdPool());
434 fCurrentSecondaryCommandBuffer.release()->recycle(fGpu->cmdPool());
435 }
436 if (fCurrentRenderPass) {
437 fCurrentRenderPass->unref();
438 fCurrentRenderPass = nullptr;
439 }
440 fCurrentCBIsEmpty = true;
441
442 fRenderTarget = nullptr;
443 fFramebuffer.reset();
444
445 fSelfDependencyFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
446
447 fLoadFromResolve = LoadFromResolve::kNo;
448 fOverridePipelinesForResolveLoad = false;
449
450 #ifdef SK_DEBUG
451 fIsActive = false;
452 #endif
453 }
454
wrapsSecondaryCommandBuffer() const455 bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const {
456 return fFramebuffer->isExternal();
457 }
458
459 ////////////////////////////////////////////////////////////////////////////////
460
onClearStencilClip(const GrScissorState & scissor,bool insideStencilMask)461 void GrVkOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
462 if (!fCurrentRenderPass) {
463 SkASSERT(fGpu->isDeviceLost());
464 return;
465 }
466
467 GrAttachment* sb = fFramebuffer->stencilAttachment();
468 // this should only be called internally when we know we have a
469 // stencil buffer.
470 SkASSERT(sb);
471 int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
472
473 // The contract with the callers does not guarantee that we preserve all bits in the stencil
474 // during this clear. Thus we will clear the entire stencil to the desired value.
475
476 VkClearDepthStencilValue vkStencilColor;
477 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
478 if (insideStencilMask) {
479 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
480 } else {
481 vkStencilColor.stencil = 0;
482 }
483
484 VkClearRect clearRect;
485 // Flip rect if necessary
486 SkIRect vkRect;
487 if (!scissor.enabled()) {
488 vkRect.setXYWH(0, 0, sb->width(), sb->height());
489 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
490 vkRect = scissor.rect();
491 } else {
492 vkRect.setLTRB(scissor.rect().fLeft, sb->height() - scissor.rect().fBottom,
493 scissor.rect().fRight, sb->height() - scissor.rect().fTop);
494 }
495
496 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
497 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
498
499 clearRect.baseArrayLayer = 0;
500 clearRect.layerCount = 1;
501
502 uint32_t stencilIndex;
503 SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
504
505 VkClearAttachment attachment;
506 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
507 attachment.colorAttachment = 0; // this value shouldn't matter
508 attachment.clearValue.depthStencil = vkStencilColor;
509
510 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
511 fCurrentCBIsEmpty = false;
512 }
513
onClear(const GrScissorState & scissor,std::array<float,4> color)514 void GrVkOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
515 if (!fCurrentRenderPass) {
516 SkASSERT(fGpu->isDeviceLost());
517 return;
518 }
519
520 VkClearColorValue vkColor = {{color[0], color[1], color[2], color[3]}};
521
522 // If we end up in a situation where we are calling clear without a scissior then in general it
523 // means we missed an opportunity higher up the stack to set the load op to be a clear. However,
524 // there are situations where higher up we couldn't discard the previous ops and set a clear
525 // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
526 // TODO: Make the waitOp a RenderTask instead so we can clear out the OpsTask for a clear. We
527 // can then reenable this assert assuming we can't get messed up by a waitOp.
528 //SkASSERT(!fCurrentCBIsEmpty || scissor);
529
530 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
531 // We always do a sub rect clear with clearAttachments since we are inside a render pass
532 VkClearRect clearRect;
533 // Flip rect if necessary
534 SkIRect vkRect;
535 if (!scissor.enabled()) {
536 vkRect.setSize(dimensions);
537 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
538 vkRect = scissor.rect();
539 } else {
540 vkRect.setLTRB(scissor.rect().fLeft, dimensions.height() - scissor.rect().fBottom,
541 scissor.rect().fRight, dimensions.height() - scissor.rect().fTop);
542 }
543 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
544 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
545 clearRect.baseArrayLayer = 0;
546 clearRect.layerCount = 1;
547
548 uint32_t colorIndex;
549 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex));
550
551 VkClearAttachment attachment;
552 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
553 attachment.colorAttachment = colorIndex;
554 attachment.clearValue.color = vkColor;
555
556 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
557 fCurrentCBIsEmpty = false;
558 }
559
560 ////////////////////////////////////////////////////////////////////////////////
561
addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer)562 void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) {
563 SkASSERT(!this->wrapsSecondaryCommandBuffer());
564
565 bool withResolve = fFramebuffer->resolveAttachment();
566 bool withStencil = fFramebuffer->stencilAttachment();
567
568 // If we have a resolve attachment we must do a resolve load in the new render pass since we
569 // broke up the original one. GrProgramInfos were made without any knowledge that the render
570 // pass may be split up. Thus they may try to make VkPipelines that only use one subpass. We
571 // need to override that to make sure they are compatible with the extra load subpass.
572 fOverridePipelinesForResolveLoad |=
573 withResolve && fCurrentRenderPass->loadFromResolve() != LoadFromResolve::kLoad;
574
575 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
576 VK_ATTACHMENT_STORE_OP_STORE);
577 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD,
578 VK_ATTACHMENT_STORE_OP_STORE);
579 LoadFromResolve loadFromResolve = LoadFromResolve::kNo;
580 if (withResolve) {
581 vkColorOps = {VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE};
582 loadFromResolve = LoadFromResolve::kLoad;
583 }
584 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
585 VK_ATTACHMENT_STORE_OP_STORE);
586
587 SkASSERT(fCurrentRenderPass);
588 fCurrentRenderPass->unref();
589 fCurrentRenderPass = nullptr;
590
591 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
592 auto fb = vkRT->getFramebuffer(withResolve, withStencil, fSelfDependencyFlags, loadFromResolve);
593 if (!fb) {
594 return;
595 }
596 fFramebuffer = sk_ref_sp(fb);
597
598 SkASSERT(fFramebuffer);
599 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
600 fFramebuffer->compatibleRenderPassHandle();
601 SkASSERT(rpHandle.isValid());
602
603 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
604 vkColorOps,
605 vkResolveOps,
606 vkStencilOps);
607
608 if (!fCurrentRenderPass) {
609 return;
610 }
611
612 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() ||
613 mustUseSecondaryCommandBuffer) {
614 SkASSERT(fGpu->cmdPool());
615 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
616 if (!fCurrentSecondaryCommandBuffer) {
617 fCurrentRenderPass = nullptr;
618 return;
619 }
620 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
621 }
622
623 VkClearValue vkClearColor;
624 memset(&vkClearColor, 0, sizeof(VkClearValue));
625
626 this->beginRenderPass(vkClearColor, loadFromResolve);
627 }
628
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)629 void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
630 if (!fCurrentRenderPass) {
631 SkASSERT(fGpu->isDeviceLost());
632 return;
633 }
634 if (fCurrentSecondaryCommandBuffer) {
635 fCurrentSecondaryCommandBuffer->end(fGpu);
636 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
637 }
638 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
639
640 // We pass in true here to signal that after the upload we need to set the upload textures
641 // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL.
642 state->doUpload(upload, true);
643
644 this->addAdditionalRenderPass(false);
645 }
646
647 ////////////////////////////////////////////////////////////////////////////////
648
onEnd()649 void GrVkOpsRenderPass::onEnd() {
650 if (fCurrentSecondaryCommandBuffer && !this->wrapsSecondaryCommandBuffer()) {
651 fCurrentSecondaryCommandBuffer->end(fGpu);
652 }
653 }
654
onBindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)655 bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
656 if (!fCurrentRenderPass) {
657 SkASSERT(fGpu->isDeviceLost());
658 return false;
659 }
660
661 SkRect rtRect = SkRect::Make(fBounds);
662 if (rtRect.intersect(drawBounds)) {
663 rtRect.roundOut(&fCurrentPipelineBounds);
664 } else {
665 fCurrentPipelineBounds.setEmpty();
666 }
667
668 GrVkCommandBuffer* currentCB = this->currentCommandBuffer();
669 SkASSERT(fCurrentRenderPass);
670
671 VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass();
672 fCurrentPipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
673 fRenderTarget, programInfo, compatibleRenderPass, fOverridePipelinesForResolveLoad);
674 if (!fCurrentPipelineState) {
675 return false;
676 }
677
678 fCurrentPipelineState->bindPipeline(fGpu, currentCB);
679
680 // Both the 'programInfo' and this renderPass have an origin. Since they come from the
681 // same place (i.e., the target renderTargetProxy) they had best agree.
682 SkASSERT(programInfo.origin() == fOrigin);
683
684 auto colorAttachment = fFramebuffer->colorAttachment();
685 if (!fCurrentPipelineState->setAndBindUniforms(fGpu, colorAttachment->dimensions(), programInfo,
686 currentCB)) {
687 return false;
688 }
689
690 if (!programInfo.pipeline().isScissorTestEnabled()) {
691 // "Disable" scissor by setting it to the full pipeline bounds.
692 GrVkPipeline::SetDynamicScissorRectState(
693 fGpu, currentCB, colorAttachment->dimensions(), fOrigin,
694 fCurrentPipelineBounds);
695 }
696 GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, colorAttachment->dimensions());
697 GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB,
698 programInfo.pipeline().writeSwizzle(),
699 programInfo.pipeline().getXferProcessor());
700
701 return true;
702 }
703
onSetScissorRect(const SkIRect & scissor)704 void GrVkOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
705 SkIRect combinedScissorRect;
706 if (!combinedScissorRect.intersect(fCurrentPipelineBounds, scissor)) {
707 combinedScissorRect = SkIRect::MakeEmpty();
708 }
709 GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(),
710 fFramebuffer->colorAttachment()->dimensions(),
711 fOrigin, combinedScissorRect);
712 }
713
714 #ifdef SK_DEBUG
check_sampled_texture(GrTexture * tex,GrAttachment * colorAttachment,GrVkGpu * gpu)715 void check_sampled_texture(GrTexture* tex, GrAttachment* colorAttachment, GrVkGpu* gpu) {
716 SkASSERT(!tex->isProtected() || (colorAttachment->isProtected() && gpu->protectedContext()));
717 auto vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
718 SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
719 }
720 #endif
721
onBindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)722 bool GrVkOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
723 const GrSurfaceProxy* const geomProcTextures[],
724 const GrPipeline& pipeline) {
725 #ifdef SK_DEBUG
726 SkASSERT(fCurrentPipelineState);
727 auto colorAttachment = fFramebuffer->colorAttachment();
728 for (int i = 0; i < geomProc.numTextureSamplers(); ++i) {
729 check_sampled_texture(geomProcTextures[i]->peekTexture(), colorAttachment, fGpu);
730 }
731 pipeline.visitTextureEffects([&](const GrTextureEffect& te) {
732 check_sampled_texture(te.texture(), colorAttachment, fGpu);
733 });
734 if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
735 check_sampled_texture(dstTexture, colorAttachment, fGpu);
736 }
737 #endif
738 if (!fCurrentPipelineState->setAndBindTextures(fGpu, geomProc, pipeline, geomProcTextures,
739 this->currentCommandBuffer())) {
740 return false;
741 }
742 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
743 // We bind the color attachment as an input attachment
744 auto ds = fFramebuffer->colorAttachment()->inputDescSetForBlending(fGpu);
745 if (!ds) {
746 return false;
747 }
748 return fCurrentPipelineState->setAndBindInputAttachment(fGpu, std::move(ds),
749 this->currentCommandBuffer());
750 }
751 return true;
752 }
753
onBindBuffers(sk_sp<const GrBuffer> indexBuffer,sk_sp<const GrBuffer> instanceBuffer,sk_sp<const GrBuffer> vertexBuffer,GrPrimitiveRestart primRestart)754 void GrVkOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
755 sk_sp<const GrBuffer> instanceBuffer,
756 sk_sp<const GrBuffer> vertexBuffer,
757 GrPrimitiveRestart primRestart) {
758 SkASSERT(GrPrimitiveRestart::kNo == primRestart);
759 if (!fCurrentRenderPass) {
760 SkASSERT(fGpu->isDeviceLost());
761 return;
762 }
763 SkASSERT(fCurrentPipelineState);
764 SkASSERT(!fGpu->caps()->usePrimitiveRestart()); // Ignore primitiveRestart parameter.
765
766 GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer();
767 SkASSERT(currCmdBuf);
768
769 // There is no need to put any memory barriers to make sure host writes have finished here.
770 // When a command buffer is submitted to a queue, there is an implicit memory barrier that
771 // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
772 // an active RenderPass.
773
774 // Here our vertex and instance inputs need to match the same 0-based bindings they were
775 // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
776 uint32_t binding = 0;
777 if (vertexBuffer) {
778 SkDEBUGCODE(auto* gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer.get()));
779 SkASSERT(!gpuVertexBuffer->isCpuBuffer());
780 SkASSERT(!gpuVertexBuffer->isMapped());
781 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(vertexBuffer));
782 }
783 if (instanceBuffer) {
784 SkDEBUGCODE(auto* gpuInstanceBuffer =
785 static_cast<const GrGpuBuffer*>(instanceBuffer.get()));
786 SkASSERT(!gpuInstanceBuffer->isCpuBuffer());
787 SkASSERT(!gpuInstanceBuffer->isMapped());
788 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(instanceBuffer));
789 }
790 if (indexBuffer) {
791 SkDEBUGCODE(auto* gpuIndexBuffer = static_cast<const GrGpuBuffer*>(indexBuffer.get()));
792 SkASSERT(!gpuIndexBuffer->isCpuBuffer());
793 SkASSERT(!gpuIndexBuffer->isMapped());
794 currCmdBuf->bindIndexBuffer(fGpu, std::move(indexBuffer));
795 }
796 }
797
onDrawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)798 void GrVkOpsRenderPass::onDrawInstanced(int instanceCount,
799 int baseInstance,
800 int vertexCount, int baseVertex) {
801 if (!fCurrentRenderPass) {
802 SkASSERT(fGpu->isDeviceLost());
803 return;
804 }
805 SkASSERT(fCurrentPipelineState);
806 this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
807 fGpu->stats()->incNumDraws();
808 fCurrentCBIsEmpty = false;
809 }
810
onDrawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)811 void GrVkOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount,
812 int baseInstance, int baseVertex) {
813 if (!fCurrentRenderPass) {
814 SkASSERT(fGpu->isDeviceLost());
815 return;
816 }
817 SkASSERT(fCurrentPipelineState);
818 this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount,
819 baseIndex, baseVertex, baseInstance);
820 fGpu->stats()->incNumDraws();
821 fCurrentCBIsEmpty = false;
822 }
823
onDrawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)824 void GrVkOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
825 int drawCount) {
826 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
827 if (!fCurrentRenderPass) {
828 SkASSERT(fGpu->isDeviceLost());
829 return;
830 }
831 const GrVkCaps& caps = fGpu->vkCaps();
832 SkASSERT(caps.nativeDrawIndirectSupport());
833 SkASSERT(fCurrentPipelineState);
834
835 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
836 uint32_t remainingDraws = drawCount;
837 const size_t stride = sizeof(GrDrawIndirectCommand);
838 while (remainingDraws >= 1) {
839 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
840 this->currentCommandBuffer()->drawIndirect(
841 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
842 remainingDraws -= currDrawCount;
843 offset += stride * currDrawCount;
844 fGpu->stats()->incNumDraws();
845 }
846 fCurrentCBIsEmpty = false;
847 }
848
onDrawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)849 void GrVkOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
850 int drawCount) {
851 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
852 if (!fCurrentRenderPass) {
853 SkASSERT(fGpu->isDeviceLost());
854 return;
855 }
856 const GrVkCaps& caps = fGpu->vkCaps();
857 SkASSERT(caps.nativeDrawIndirectSupport());
858 SkASSERT(fCurrentPipelineState);
859 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
860 uint32_t remainingDraws = drawCount;
861 const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
862 while (remainingDraws >= 1) {
863 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
864 this->currentCommandBuffer()->drawIndexedIndirect(
865 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
866 remainingDraws -= currDrawCount;
867 offset += stride * currDrawCount;
868 fGpu->stats()->incNumDraws();
869 }
870 fCurrentCBIsEmpty = false;
871 }
872
873 ////////////////////////////////////////////////////////////////////////////////
874
onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)875 void GrVkOpsRenderPass::onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
876 if (!fCurrentRenderPass) {
877 SkASSERT(fGpu->isDeviceLost());
878 return;
879 }
880
881 VkRect2D bounds;
882 bounds.offset = { 0, 0 };
883 bounds.extent = { 0, 0 };
884
885 if (!fCurrentSecondaryCommandBuffer) {
886 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
887 this->addAdditionalRenderPass(true);
888 // We may have failed to start a new render pass
889 if (!fCurrentRenderPass) {
890 SkASSERT(fGpu->isDeviceLost());
891 return;
892 }
893 }
894 SkASSERT(fCurrentSecondaryCommandBuffer);
895
896 GrVkDrawableInfo vkInfo;
897 vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer();
898 vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass();
899 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
900 vkInfo.fFormat = fFramebuffer->colorAttachment()->imageFormat();
901 vkInfo.fDrawBounds = &bounds;
902 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
903 vkInfo.fFromSwapchainOrAndroidWindow =
904 fFramebuffer->colorAttachment()->vkImageInfo().fPartOfSwapchainOrAndroidWindow;
905 #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
906
907 GrBackendDrawableInfo info(vkInfo);
908
909 // After we draw into the command buffer via the drawable, cached state we have may be invalid.
910 this->currentCommandBuffer()->invalidateState();
911 // Also assume that the drawable produced output.
912 fCurrentCBIsEmpty = false;
913
914 drawable->draw(info);
915 fGpu->addDrawable(std::move(drawable));
916 }
917