1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkOpsRenderPass.h"
9
10 #include "include/core/SkDrawable.h"
11 #include "include/core/SkRect.h"
12 #include "include/gpu/GrBackendDrawableInfo.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/gpu/GrBackendUtils.h"
15 #include "src/gpu/GrDirectContextPriv.h"
16 #include "src/gpu/GrOpFlushState.h"
17 #include "src/gpu/GrPipeline.h"
18 #include "src/gpu/GrRenderTarget.h"
19 #include "src/gpu/vk/GrVkAttachment.h"
20 #include "src/gpu/vk/GrVkBuffer.h"
21 #include "src/gpu/vk/GrVkCommandBuffer.h"
22 #include "src/gpu/vk/GrVkCommandPool.h"
23 #include "src/gpu/vk/GrVkFramebuffer.h"
24 #include "src/gpu/vk/GrVkGpu.h"
25 #include "src/gpu/vk/GrVkPipeline.h"
26 #include "src/gpu/vk/GrVkRenderPass.h"
27 #include "src/gpu/vk/GrVkRenderTarget.h"
28 #include "src/gpu/vk/GrVkResourceProvider.h"
29 #include "src/gpu/vk/GrVkSemaphore.h"
30 #include "src/gpu/vk/GrVkTexture.h"
31
32 /////////////////////////////////////////////////////////////////////////////
33
get_vk_load_store_ops(GrLoadOp loadOpIn,GrStoreOp storeOpIn,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)34 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
35 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
36 switch (loadOpIn) {
37 case GrLoadOp::kLoad:
38 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
39 break;
40 case GrLoadOp::kClear:
41 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
42 break;
43 case GrLoadOp::kDiscard:
44 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
45 break;
46 default:
47 SK_ABORT("Invalid LoadOp");
48 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
49 }
50
51 switch (storeOpIn) {
52 case GrStoreOp::kStore:
53 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
54 break;
55 case GrStoreOp::kDiscard:
56 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
57 break;
58 default:
59 SK_ABORT("Invalid StoreOp");
60 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
61 }
62 }
63
GrVkOpsRenderPass(GrVkGpu * gpu)64 GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {}
65
setAttachmentLayouts(LoadFromResolve loadFromResolve)66 void GrVkOpsRenderPass::setAttachmentLayouts(LoadFromResolve loadFromResolve) {
67 bool withStencil = fCurrentRenderPass->hasStencilAttachment();
68 bool withResolve = fCurrentRenderPass->hasResolveAttachment();
69
70 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
71 // We need to use the GENERAL layout in this case since we'll be using texture barriers
72 // with an input attachment.
73 VkAccessFlags dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
74 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
75 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
76 VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
77 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
78 fFramebuffer->colorAttachment()->setImageLayout(
79 fGpu, VK_IMAGE_LAYOUT_GENERAL, dstAccess, dstStages, false);
80 } else {
81 // Change layout of our render target so it can be used as the color attachment.
82 // TODO: If we know that we will never be blending or loading the attachment we could drop
83 // the VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
84 fFramebuffer->colorAttachment()->setImageLayout(
85 fGpu,
86 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
87 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
88 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
89 false);
90 }
91
92 if (withResolve) {
93 GrVkAttachment* resolveAttachment = fFramebuffer->resolveAttachment();
94 SkASSERT(resolveAttachment);
95 if (loadFromResolve == LoadFromResolve::kLoad) {
96 resolveAttachment->setImageLayout(fGpu,
97 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
98 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT,
99 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
100 false);
101 } else {
102 resolveAttachment->setImageLayout(
103 fGpu,
104 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
105 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
106 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
107 false);
108 }
109 }
110
111 // If we are using a stencil attachment we also need to update its layout
112 if (withStencil) {
113 auto* vkStencil = fFramebuffer->stencilAttachment();
114 SkASSERT(vkStencil);
115
116 // We need the write and read access bits since we may load and store the stencil.
117 // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
118 // wait there.
119 vkStencil->setImageLayout(fGpu,
120 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
121 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
122 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
123 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
124 false);
125 }
126 }
127
128 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
129 // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
130 // the the entire attachment. Similar requirements for the y and height components.
adjust_bounds_to_granularity(SkIRect * dstBounds,const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)131 void adjust_bounds_to_granularity(SkIRect* dstBounds,
132 const SkIRect& srcBounds,
133 const VkExtent2D& granularity,
134 int maxWidth,
135 int maxHeight) {
136 // Adjust Width
137 if ((0 != granularity.width && 1 != granularity.width)) {
138 // Start with the right side of rect so we know if we end up going pass the maxWidth.
139 int rightAdj = srcBounds.fRight % granularity.width;
140 if (rightAdj != 0) {
141 rightAdj = granularity.width - rightAdj;
142 }
143 dstBounds->fRight = srcBounds.fRight + rightAdj;
144 if (dstBounds->fRight > maxWidth) {
145 dstBounds->fRight = maxWidth;
146 dstBounds->fLeft = 0;
147 } else {
148 dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
149 }
150 } else {
151 dstBounds->fLeft = srcBounds.fLeft;
152 dstBounds->fRight = srcBounds.fRight;
153 }
154
155 // Adjust height
156 if ((0 != granularity.height && 1 != granularity.height)) {
157 // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
158 int bottomAdj = srcBounds.fBottom % granularity.height;
159 if (bottomAdj != 0) {
160 bottomAdj = granularity.height - bottomAdj;
161 }
162 dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
163 if (dstBounds->fBottom > maxHeight) {
164 dstBounds->fBottom = maxHeight;
165 dstBounds->fTop = 0;
166 } else {
167 dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
168 }
169 } else {
170 dstBounds->fTop = srcBounds.fTop;
171 dstBounds->fBottom = srcBounds.fBottom;
172 }
173 }
174
beginRenderPass(const VkClearValue & clearColor,LoadFromResolve loadFromResolve)175 bool GrVkOpsRenderPass::beginRenderPass(const VkClearValue& clearColor,
176 LoadFromResolve loadFromResolve) {
177 this->setAttachmentLayouts(loadFromResolve);
178
179 bool firstSubpassUsesSecondaryCB =
180 loadFromResolve != LoadFromResolve::kLoad && SkToBool(fCurrentSecondaryCommandBuffer);
181
182 bool useFullBounds = fCurrentRenderPass->hasResolveAttachment() &&
183 fGpu->vkCaps().mustLoadFullImageWithDiscardableMSAA();
184
185 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
186
187 auto nativeBounds = GrNativeRect::MakeIRectRelativeTo(
188 fOrigin,
189 dimensions.height(), useFullBounds ? SkIRect::MakeSize(dimensions) : fBounds);
190
191 // The bounds we use for the render pass should be of the granularity supported
192 // by the device.
193 const VkExtent2D& granularity = fCurrentRenderPass->granularity();
194 SkIRect adjustedBounds;
195 if ((0 != granularity.width && 1 != granularity.width) ||
196 (0 != granularity.height && 1 != granularity.height)) {
197 adjust_bounds_to_granularity(&adjustedBounds,
198 nativeBounds,
199 granularity,
200 dimensions.width(),
201 dimensions.height());
202 } else {
203 adjustedBounds = nativeBounds;
204 }
205
206 if (!fGpu->beginRenderPass(fCurrentRenderPass, fFramebuffer, &clearColor, fRenderTarget,
207 adjustedBounds, firstSubpassUsesSecondaryCB)) {
208 if (fCurrentSecondaryCommandBuffer) {
209 fCurrentSecondaryCommandBuffer->end(fGpu);
210 }
211 fCurrentRenderPass = nullptr;
212 return false;
213 }
214
215 if (loadFromResolve == LoadFromResolve::kLoad) {
216 this->loadResolveIntoMSAA(adjustedBounds);
217 }
218
219 return true;
220 }
221
init(const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo)222 bool GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
223 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
224 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
225 VkAttachmentLoadOp loadOp;
226 VkAttachmentStoreOp storeOp;
227 get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp, &loadOp, &storeOp);
228 GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp);
229
230 get_vk_load_store_ops(resolveInfo.fLoadOp, resolveInfo.fStoreOp, &loadOp, &storeOp);
231 GrVkRenderPass::LoadStoreOps vkResolveOps(loadOp, storeOp);
232
233 get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp, &loadOp, &storeOp);
234 GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp);
235
236 GrVkResourceProvider::CompatibleRPHandle rpHandle = fFramebuffer->compatibleRenderPassHandle();
237 SkASSERT(rpHandle.isValid());
238 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
239 vkColorOps,
240 vkResolveOps,
241 vkStencilOps);
242
243 if (!fCurrentRenderPass) {
244 return false;
245 }
246
247 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) {
248 SkASSERT(fGpu->cmdPool());
249 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
250 if (!fCurrentSecondaryCommandBuffer) {
251 fCurrentRenderPass = nullptr;
252 return false;
253 }
254 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
255 }
256
257 VkClearValue vkClearColor;
258 vkClearColor.color.float32[0] = colorInfo.fClearColor[0];
259 vkClearColor.color.float32[1] = colorInfo.fClearColor[1];
260 vkClearColor.color.float32[2] = colorInfo.fClearColor[2];
261 vkClearColor.color.float32[3] = colorInfo.fClearColor[3];
262
263 return this->beginRenderPass(vkClearColor, fLoadFromResolve);
264 }
265
initWrapped()266 bool GrVkOpsRenderPass::initWrapped() {
267 SkASSERT(fFramebuffer->isExternal());
268 fCurrentRenderPass = fFramebuffer->externalRenderPass();
269 SkASSERT(fCurrentRenderPass);
270 fCurrentRenderPass->ref();
271
272 fCurrentSecondaryCommandBuffer = fFramebuffer->externalCommandBuffer();
273 if (!fCurrentSecondaryCommandBuffer) {
274 return false;
275 }
276 return true;
277 }
278
~GrVkOpsRenderPass()279 GrVkOpsRenderPass::~GrVkOpsRenderPass() {
280 this->reset();
281 }
282
gpu()283 GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; }
284
currentCommandBuffer()285 GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
286 if (fCurrentSecondaryCommandBuffer) {
287 return fCurrentSecondaryCommandBuffer.get();
288 }
289 // We checked this when we setup the GrVkOpsRenderPass and it should not have changed while we
290 // are still using this object.
291 SkASSERT(fGpu->currentCommandBuffer());
292 return fGpu->currentCommandBuffer();
293 }
294
loadResolveIntoMSAA(const SkIRect & nativeBounds)295 void GrVkOpsRenderPass::loadResolveIntoMSAA(const SkIRect& nativeBounds) {
296 fGpu->loadMSAAFromResolve(this->currentCommandBuffer(), *fCurrentRenderPass,
297 fFramebuffer->colorAttachment(), fFramebuffer->resolveAttachment(),
298 nativeBounds);
299 fGpu->currentCommandBuffer()->nexSubpass(fGpu, SkToBool(fCurrentSecondaryCommandBuffer));
300
301 // If we loaded the resolve attachment, then we would have set the image layout to be
302 // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an input
303 // attachment. However, when we switched to the main subpass it will transition the layout
304 // internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our tracking
305 // of the layout to match the new layout.
306 SkASSERT(fFramebuffer->resolveAttachment());
307 fFramebuffer->resolveAttachment()->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
308 }
309
submit()310 void GrVkOpsRenderPass::submit() {
311 if (!fRenderTarget) {
312 return;
313 }
314 if (!fCurrentRenderPass) {
315 SkASSERT(fGpu->isDeviceLost());
316 return;
317 }
318
319 // We don't want to actually submit the secondary command buffer if it is wrapped.
320 if (this->wrapsSecondaryCommandBuffer()) {
321 // We pass the ownership of the GrVkSecondaryCommandBuffer to the external framebuffer
322 // since it's lifetime matches the lifetime we need to keep the GrManagedResources on the
323 // GrVkSecondaryCommandBuffer alive.
324 fFramebuffer->returnExternalGrSecondaryCommandBuffer(
325 std::move(fCurrentSecondaryCommandBuffer));
326 return;
327 }
328
329 if (fCurrentSecondaryCommandBuffer) {
330 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
331 }
332 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
333 }
334
set(GrRenderTarget * rt,sk_sp<GrVkFramebuffer> framebuffer,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,GrVkRenderPass::SelfDependencyFlags selfDepFlags,GrVkRenderPass::LoadFromResolve loadFromResolve,const SkTArray<GrSurfaceProxy *,true> & sampledProxies)335 bool GrVkOpsRenderPass::set(GrRenderTarget* rt,
336 sk_sp<GrVkFramebuffer> framebuffer,
337 GrSurfaceOrigin origin,
338 const SkIRect& bounds,
339 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
340 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
341 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
342 GrVkRenderPass::SelfDependencyFlags selfDepFlags,
343 GrVkRenderPass::LoadFromResolve loadFromResolve,
344 const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
345 SkASSERT(!fRenderTarget);
346 SkASSERT(fGpu == rt->getContext()->priv().getGpu());
347
348 #ifdef SK_DEBUG
349 fIsActive = true;
350 #endif
351
352 // We check to make sure the GrVkGpu has a valid current command buffer instead of each time we
353 // access it. If the command buffer is valid here should be valid throughout the use of the
354 // render pass since nothing should trigger a submit while this render pass is active.
355 if (!fGpu->currentCommandBuffer()) {
356 return false;
357 }
358
359 this->INHERITED::set(rt, origin);
360
361 for (int i = 0; i < sampledProxies.count(); ++i) {
362 if (sampledProxies[i]->isInstantiated()) {
363 SkASSERT(sampledProxies[i]->asTextureProxy());
364 GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
365 SkASSERT(vkTex);
366 GrVkAttachment* texture = vkTex->textureAttachment();
367 SkASSERT(texture);
368 texture->setImageLayout(
369 fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
370 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
371 }
372 }
373
374 SkASSERT(framebuffer);
375 fFramebuffer = std::move(framebuffer);
376
377 SkASSERT(bounds.isEmpty() ||
378 SkIRect::MakeSize(fFramebuffer->colorAttachment()->dimensions()).contains(bounds));
379 fBounds = bounds;
380
381 fSelfDependencyFlags = selfDepFlags;
382 fLoadFromResolve = loadFromResolve;
383
384 if (this->wrapsSecondaryCommandBuffer()) {
385 return this->initWrapped();
386 }
387
388 return this->init(colorInfo, resolveInfo, stencilInfo);
389 }
390
reset()391 void GrVkOpsRenderPass::reset() {
392 if (fCurrentSecondaryCommandBuffer) {
393 // The active GrVkCommandPool on the GrVkGpu should still be the same pool we got the
394 // secondary command buffer from since we haven't submitted any work yet.
395 SkASSERT(fGpu->cmdPool());
396 fCurrentSecondaryCommandBuffer.release()->recycle(fGpu->cmdPool());
397 }
398 if (fCurrentRenderPass) {
399 fCurrentRenderPass->unref();
400 fCurrentRenderPass = nullptr;
401 }
402 fCurrentCBIsEmpty = true;
403
404 fRenderTarget = nullptr;
405 fFramebuffer.reset();
406
407 fSelfDependencyFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
408
409 fLoadFromResolve = LoadFromResolve::kNo;
410 fOverridePipelinesForResolveLoad = false;
411
412 #ifdef SK_DEBUG
413 fIsActive = false;
414 #endif
415 }
416
wrapsSecondaryCommandBuffer() const417 bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const {
418 return fFramebuffer->isExternal();
419 }
420
421 ////////////////////////////////////////////////////////////////////////////////
422
onClearStencilClip(const GrScissorState & scissor,bool insideStencilMask)423 void GrVkOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
424 if (!fCurrentRenderPass) {
425 SkASSERT(fGpu->isDeviceLost());
426 return;
427 }
428
429 GrAttachment* sb = fFramebuffer->stencilAttachment();
430 // this should only be called internally when we know we have a
431 // stencil buffer.
432 SkASSERT(sb);
433 int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
434
435 // The contract with the callers does not guarantee that we preserve all bits in the stencil
436 // during this clear. Thus we will clear the entire stencil to the desired value.
437
438 VkClearDepthStencilValue vkStencilColor;
439 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
440 if (insideStencilMask) {
441 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
442 } else {
443 vkStencilColor.stencil = 0;
444 }
445
446 VkClearRect clearRect;
447 // Flip rect if necessary
448 SkIRect vkRect;
449 if (!scissor.enabled()) {
450 vkRect.setXYWH(0, 0, sb->width(), sb->height());
451 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
452 vkRect = scissor.rect();
453 } else {
454 vkRect.setLTRB(scissor.rect().fLeft, sb->height() - scissor.rect().fBottom,
455 scissor.rect().fRight, sb->height() - scissor.rect().fTop);
456 }
457
458 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
459 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
460
461 clearRect.baseArrayLayer = 0;
462 clearRect.layerCount = 1;
463
464 uint32_t stencilIndex;
465 SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
466
467 VkClearAttachment attachment;
468 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
469 attachment.colorAttachment = 0; // this value shouldn't matter
470 attachment.clearValue.depthStencil = vkStencilColor;
471
472 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
473 fCurrentCBIsEmpty = false;
474 }
475
onClear(const GrScissorState & scissor,std::array<float,4> color)476 void GrVkOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
477 if (!fCurrentRenderPass) {
478 SkASSERT(fGpu->isDeviceLost());
479 return;
480 }
481
482 VkClearColorValue vkColor = {{color[0], color[1], color[2], color[3]}};
483
484 // If we end up in a situation where we are calling clear without a scissior then in general it
485 // means we missed an opportunity higher up the stack to set the load op to be a clear. However,
486 // there are situations where higher up we couldn't discard the previous ops and set a clear
487 // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
488 // TODO: Make the waitOp a RenderTask instead so we can clear out the GrOpsTask for a clear. We
489 // can then reenable this assert assuming we can't get messed up by a waitOp.
490 //SkASSERT(!fCurrentCBIsEmpty || scissor);
491
492 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
493 // We always do a sub rect clear with clearAttachments since we are inside a render pass
494 VkClearRect clearRect;
495 // Flip rect if necessary
496 SkIRect vkRect;
497 if (!scissor.enabled()) {
498 vkRect.setSize(dimensions);
499 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
500 vkRect = scissor.rect();
501 } else {
502 vkRect.setLTRB(scissor.rect().fLeft, dimensions.height() - scissor.rect().fBottom,
503 scissor.rect().fRight, dimensions.height() - scissor.rect().fTop);
504 }
505 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
506 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
507 clearRect.baseArrayLayer = 0;
508 clearRect.layerCount = 1;
509
510 uint32_t colorIndex;
511 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex));
512
513 VkClearAttachment attachment;
514 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
515 attachment.colorAttachment = colorIndex;
516 attachment.clearValue.color = vkColor;
517
518 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
519 fCurrentCBIsEmpty = false;
520 return;
521 }
522
523 ////////////////////////////////////////////////////////////////////////////////
524
addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer)525 void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) {
526 SkASSERT(!this->wrapsSecondaryCommandBuffer());
527
528 bool withResolve = fFramebuffer->resolveAttachment();
529 bool withStencil = fFramebuffer->stencilAttachment();
530
531 // If we have a resolve attachment we must do a resolve load in the new render pass since we
532 // broke up the original one. GrProgramInfos were made without any knowledge that the render
533 // pass may be split up. Thus they may try to make VkPipelines that only use one subpass. We
534 // need to override that to make sure they are compatible with the extra load subpass.
535 fOverridePipelinesForResolveLoad |=
536 withResolve && fCurrentRenderPass->loadFromResolve() != LoadFromResolve::kLoad;
537
538 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
539 VK_ATTACHMENT_STORE_OP_STORE);
540 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD,
541 VK_ATTACHMENT_STORE_OP_STORE);
542 LoadFromResolve loadFromResolve = LoadFromResolve::kNo;
543 if (withResolve) {
544 vkColorOps = {VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE};
545 loadFromResolve = LoadFromResolve::kLoad;
546 }
547 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
548 VK_ATTACHMENT_STORE_OP_STORE);
549
550 SkASSERT(fCurrentRenderPass);
551 fCurrentRenderPass->unref();
552 fCurrentRenderPass = nullptr;
553
554 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
555 auto fb = vkRT->getFramebuffer(withResolve, withStencil, fSelfDependencyFlags, loadFromResolve);
556 if (!fb) {
557 return;
558 }
559 fFramebuffer = sk_ref_sp(fb);
560
561 SkASSERT(fFramebuffer);
562 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
563 fFramebuffer->compatibleRenderPassHandle();
564 SkASSERT(rpHandle.isValid());
565
566 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
567 vkColorOps,
568 vkResolveOps,
569 vkStencilOps);
570
571 if (!fCurrentRenderPass) {
572 return;
573 }
574
575 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() ||
576 mustUseSecondaryCommandBuffer) {
577 SkASSERT(fGpu->cmdPool());
578 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
579 if (!fCurrentSecondaryCommandBuffer) {
580 fCurrentRenderPass = nullptr;
581 return;
582 }
583 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
584 }
585
586 VkClearValue vkClearColor;
587 memset(&vkClearColor, 0, sizeof(VkClearValue));
588
589 this->beginRenderPass(vkClearColor, loadFromResolve);
590 }
591
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)592 void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
593 if (!fCurrentRenderPass) {
594 SkASSERT(fGpu->isDeviceLost());
595 return;
596 }
597 if (fCurrentSecondaryCommandBuffer) {
598 fCurrentSecondaryCommandBuffer->end(fGpu);
599 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
600 }
601 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
602
603 // We pass in true here to signal that after the upload we need to set the upload textures
604 // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL.
605 state->doUpload(upload, true);
606
607 this->addAdditionalRenderPass(false);
608 }
609
610 ////////////////////////////////////////////////////////////////////////////////
611
onEnd()612 void GrVkOpsRenderPass::onEnd() {
613 if (fCurrentSecondaryCommandBuffer && !this->wrapsSecondaryCommandBuffer()) {
614 fCurrentSecondaryCommandBuffer->end(fGpu);
615 }
616 }
617
onBindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)618 bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
619 if (!fCurrentRenderPass) {
620 SkASSERT(fGpu->isDeviceLost());
621 return false;
622 }
623
624 SkRect rtRect = SkRect::Make(fBounds);
625 if (rtRect.intersect(drawBounds)) {
626 rtRect.roundOut(&fCurrentPipelineBounds);
627 } else {
628 fCurrentPipelineBounds.setEmpty();
629 }
630
631 GrVkCommandBuffer* currentCB = this->currentCommandBuffer();
632 SkASSERT(fCurrentRenderPass);
633
634 VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass();
635
636 fCurrentPipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
637 fRenderTarget, programInfo, compatibleRenderPass, fOverridePipelinesForResolveLoad);
638 if (!fCurrentPipelineState) {
639 return false;
640 }
641
642 fCurrentPipelineState->bindPipeline(fGpu, currentCB);
643
644 // Both the 'programInfo' and this renderPass have an origin. Since they come from the
645 // same place (i.e., the target renderTargetProxy) they had best agree.
646 SkASSERT(programInfo.origin() == fOrigin);
647
648 auto colorAttachment = fFramebuffer->colorAttachment();
649 if (!fCurrentPipelineState->setAndBindUniforms(fGpu, colorAttachment->dimensions(), programInfo,
650 currentCB)) {
651 return false;
652 }
653
654 if (!programInfo.pipeline().isScissorTestEnabled()) {
655 // "Disable" scissor by setting it to the full pipeline bounds.
656 GrVkPipeline::SetDynamicScissorRectState(
657 fGpu, currentCB, colorAttachment->dimensions(), fOrigin,
658 fCurrentPipelineBounds);
659 }
660 GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, colorAttachment->dimensions());
661 GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB,
662 programInfo.pipeline().writeSwizzle(),
663 programInfo.pipeline().getXferProcessor());
664
665 return true;
666 }
667
onSetScissorRect(const SkIRect & scissor)668 void GrVkOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
669 SkIRect combinedScissorRect;
670 if (!combinedScissorRect.intersect(fCurrentPipelineBounds, scissor)) {
671 combinedScissorRect = SkIRect::MakeEmpty();
672 }
673 GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(),
674 fFramebuffer->colorAttachment()->dimensions(),
675 fOrigin, combinedScissorRect);
676 }
677
678 #ifdef SK_DEBUG
check_sampled_texture(GrTexture * tex,GrAttachment * colorAttachment,GrVkGpu * gpu)679 void check_sampled_texture(GrTexture* tex, GrAttachment* colorAttachment, GrVkGpu* gpu) {
680 SkASSERT(!tex->isProtected() || (colorAttachment->isProtected() && gpu->protectedContext()));
681 auto vkTex = static_cast<GrVkTexture*>(tex)->textureAttachment();
682 SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
683 }
684 #endif
685
onBindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)686 bool GrVkOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
687 const GrSurfaceProxy* const geomProcTextures[],
688 const GrPipeline& pipeline) {
689 #ifdef SK_DEBUG
690 SkASSERT(fCurrentPipelineState);
691 auto colorAttachment = fFramebuffer->colorAttachment();
692 for (int i = 0; i < geomProc.numTextureSamplers(); ++i) {
693 check_sampled_texture(geomProcTextures[i]->peekTexture(), colorAttachment, fGpu);
694 }
695 pipeline.visitTextureEffects([&](const GrTextureEffect& te) {
696 check_sampled_texture(te.texture(), colorAttachment, fGpu);
697 });
698 if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
699 check_sampled_texture(dstTexture, colorAttachment, fGpu);
700 }
701 #endif
702 if (!fCurrentPipelineState->setAndBindTextures(fGpu, geomProc, pipeline, geomProcTextures,
703 this->currentCommandBuffer())) {
704 return false;
705 }
706 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
707 // We bind the color attachment as an input attachment
708 auto ds = fFramebuffer->colorAttachment()->inputDescSetForBlending(fGpu);
709 if (!ds) {
710 return false;
711 }
712 return fCurrentPipelineState->setAndBindInputAttachment(fGpu, std::move(ds),
713 this->currentCommandBuffer());
714 }
715 return true;
716 }
717
onBindBuffers(sk_sp<const GrBuffer> indexBuffer,sk_sp<const GrBuffer> instanceBuffer,sk_sp<const GrBuffer> vertexBuffer,GrPrimitiveRestart primRestart)718 void GrVkOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
719 sk_sp<const GrBuffer> instanceBuffer,
720 sk_sp<const GrBuffer> vertexBuffer,
721 GrPrimitiveRestart primRestart) {
722 SkASSERT(GrPrimitiveRestart::kNo == primRestart);
723 if (!fCurrentRenderPass) {
724 SkASSERT(fGpu->isDeviceLost());
725 return;
726 }
727 SkASSERT(fCurrentPipelineState);
728 SkASSERT(!fGpu->caps()->usePrimitiveRestart()); // Ignore primitiveRestart parameter.
729
730 GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer();
731 SkASSERT(currCmdBuf);
732
733 // There is no need to put any memory barriers to make sure host writes have finished here.
734 // When a command buffer is submitted to a queue, there is an implicit memory barrier that
735 // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
736 // an active RenderPass.
737
738 // Here our vertex and instance inputs need to match the same 0-based bindings they were
739 // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
740 uint32_t binding = 0;
741 if (auto* gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer.get())) {
742 SkASSERT(!gpuVertexBuffer->isCpuBuffer());
743 SkASSERT(!gpuVertexBuffer->isMapped());
744 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(vertexBuffer));
745 }
746 if (auto* gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer.get())) {
747 SkASSERT(!gpuInstanceBuffer->isCpuBuffer());
748 SkASSERT(!gpuInstanceBuffer->isMapped());
749 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(instanceBuffer));
750 }
751 if (auto* gpuIndexBuffer = static_cast<const GrGpuBuffer*>(indexBuffer.get())) {
752 SkASSERT(!gpuIndexBuffer->isCpuBuffer());
753 SkASSERT(!gpuIndexBuffer->isMapped());
754 currCmdBuf->bindIndexBuffer(fGpu, std::move(indexBuffer));
755 }
756 }
757
onDrawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)758 void GrVkOpsRenderPass::onDrawInstanced(int instanceCount,
759 int baseInstance,
760 int vertexCount, int baseVertex) {
761 if (!fCurrentRenderPass) {
762 SkASSERT(fGpu->isDeviceLost());
763 return;
764 }
765 SkASSERT(fCurrentPipelineState);
766 this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
767 fGpu->stats()->incNumDraws();
768 fCurrentCBIsEmpty = false;
769 }
770
onDrawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)771 void GrVkOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount,
772 int baseInstance, int baseVertex) {
773 if (!fCurrentRenderPass) {
774 SkASSERT(fGpu->isDeviceLost());
775 return;
776 }
777 SkASSERT(fCurrentPipelineState);
778 this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount,
779 baseIndex, baseVertex, baseInstance);
780 fGpu->stats()->incNumDraws();
781 fCurrentCBIsEmpty = false;
782 }
783
onDrawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)784 void GrVkOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
785 int drawCount) {
786 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
787 if (!fCurrentRenderPass) {
788 SkASSERT(fGpu->isDeviceLost());
789 return;
790 }
791 const GrVkCaps& caps = fGpu->vkCaps();
792 SkASSERT(caps.nativeDrawIndirectSupport());
793 SkASSERT(fCurrentPipelineState);
794
795 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
796 uint32_t remainingDraws = drawCount;
797 const size_t stride = sizeof(GrDrawIndirectCommand);
798 while (remainingDraws >= 1) {
799 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
800 this->currentCommandBuffer()->drawIndirect(
801 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
802 remainingDraws -= currDrawCount;
803 offset += stride * currDrawCount;
804 fGpu->stats()->incNumDraws();
805 }
806 fCurrentCBIsEmpty = false;
807 }
808
onDrawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)809 void GrVkOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
810 int drawCount) {
811 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
812 if (!fCurrentRenderPass) {
813 SkASSERT(fGpu->isDeviceLost());
814 return;
815 }
816 const GrVkCaps& caps = fGpu->vkCaps();
817 SkASSERT(caps.nativeDrawIndirectSupport());
818 SkASSERT(fCurrentPipelineState);
819 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
820 uint32_t remainingDraws = drawCount;
821 const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
822 while (remainingDraws >= 1) {
823 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
824 this->currentCommandBuffer()->drawIndexedIndirect(
825 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
826 remainingDraws -= currDrawCount;
827 offset += stride * currDrawCount;
828 fGpu->stats()->incNumDraws();
829 }
830 fCurrentCBIsEmpty = false;
831 }
832
833 ////////////////////////////////////////////////////////////////////////////////
834
onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)835 void GrVkOpsRenderPass::onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
836 if (!fCurrentRenderPass) {
837 SkASSERT(fGpu->isDeviceLost());
838 return;
839 }
840
841 VkRect2D bounds;
842 bounds.offset = { 0, 0 };
843 bounds.extent = { 0, 0 };
844
845 if (!fCurrentSecondaryCommandBuffer) {
846 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
847 this->addAdditionalRenderPass(true);
848 // We may have failed to start a new render pass
849 if (!fCurrentRenderPass) {
850 SkASSERT(fGpu->isDeviceLost());
851 return;
852 }
853 }
854 SkASSERT(fCurrentSecondaryCommandBuffer);
855
856 GrVkDrawableInfo vkInfo;
857 vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer();
858 vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass();
859 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
860 vkInfo.fFormat = fFramebuffer->colorAttachment()->imageFormat();
861 vkInfo.fDrawBounds = &bounds;
862 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
863 vkInfo.fImage = fFramebuffer->colorAttachment()->image();
864 #else
865 vkInfo.fImage = VK_NULL_HANDLE;
866 #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
867
868 GrBackendDrawableInfo info(vkInfo);
869
870 // After we draw into the command buffer via the drawable, cached state we have may be invalid.
871 this->currentCommandBuffer()->invalidateState();
872 // Also assume that the drawable produced output.
873 fCurrentCBIsEmpty = false;
874
875 drawable->draw(info);
876 fGpu->addDrawable(std::move(drawable));
877 }
878