1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkOpsRenderPass.h"
9
10 #include "include/core/SkDrawable.h"
11 #include "include/core/SkRect.h"
12 #include "include/gpu/GrBackendDrawableInfo.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/gpu/GrBackendUtils.h"
15 #include "src/gpu/GrDirectContextPriv.h"
16 #include "src/gpu/GrOpFlushState.h"
17 #include "src/gpu/GrPipeline.h"
18 #include "src/gpu/GrRenderTarget.h"
19 #include "src/gpu/effects/GrTextureEffect.h"
20 #include "src/gpu/vk/GrVkBuffer.h"
21 #include "src/gpu/vk/GrVkCommandBuffer.h"
22 #include "src/gpu/vk/GrVkCommandPool.h"
23 #include "src/gpu/vk/GrVkFramebuffer.h"
24 #include "src/gpu/vk/GrVkGpu.h"
25 #include "src/gpu/vk/GrVkImage.h"
26 #include "src/gpu/vk/GrVkPipeline.h"
27 #include "src/gpu/vk/GrVkRenderPass.h"
28 #include "src/gpu/vk/GrVkRenderTarget.h"
29 #include "src/gpu/vk/GrVkResourceProvider.h"
30 #include "src/gpu/vk/GrVkSemaphore.h"
31 #include "src/gpu/vk/GrVkTexture.h"
32
33 /////////////////////////////////////////////////////////////////////////////
34
get_vk_load_store_ops(GrLoadOp loadOpIn,GrStoreOp storeOpIn,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)35 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
36 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
37 switch (loadOpIn) {
38 case GrLoadOp::kLoad:
39 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
40 break;
41 case GrLoadOp::kClear:
42 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
43 break;
44 case GrLoadOp::kDiscard:
45 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
46 break;
47 default:
48 SK_ABORT("Invalid LoadOp");
49 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
50 }
51
52 switch (storeOpIn) {
53 case GrStoreOp::kStore:
54 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
55 break;
56 case GrStoreOp::kDiscard:
57 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
58 break;
59 default:
60 SK_ABORT("Invalid StoreOp");
61 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
62 }
63 }
64
GrVkOpsRenderPass(GrVkGpu * gpu)65 GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {}
66
setAttachmentLayouts(LoadFromResolve loadFromResolve)67 void GrVkOpsRenderPass::setAttachmentLayouts(LoadFromResolve loadFromResolve) {
68 bool withStencil = fCurrentRenderPass->hasStencilAttachment();
69 bool withResolve = fCurrentRenderPass->hasResolveAttachment();
70
71 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
72 // We need to use the GENERAL layout in this case since we'll be using texture barriers
73 // with an input attachment.
74 VkAccessFlags dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
75 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
76 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
77 VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
78 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
79 fFramebuffer->colorAttachment()->setImageLayout(
80 fGpu, VK_IMAGE_LAYOUT_GENERAL, dstAccess, dstStages, false);
81 } else {
82 // Change layout of our render target so it can be used as the color attachment.
83 // TODO: If we know that we will never be blending or loading the attachment we could drop
84 // the VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
85 fFramebuffer->colorAttachment()->setImageLayout(
86 fGpu,
87 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
88 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
89 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
90 false);
91 }
92
93 if (withResolve) {
94 GrVkImage* resolveAttachment = fFramebuffer->resolveAttachment();
95 SkASSERT(resolveAttachment);
96 if (loadFromResolve == LoadFromResolve::kLoad) {
97 // We need input access to do the shader read and color read access to do the attachment
98 // load.
99 VkAccessFlags dstAccess =
100 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
101 VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
102 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
103 resolveAttachment->setImageLayout(fGpu,
104 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
105 dstAccess,
106 dstStages,
107 false);
108 } else {
109 resolveAttachment->setImageLayout(
110 fGpu,
111 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
112 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
113 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
114 false);
115 }
116 }
117
118 // If we are using a stencil attachment we also need to update its layout
119 if (withStencil) {
120 auto* vkStencil = fFramebuffer->stencilAttachment();
121 SkASSERT(vkStencil);
122
123 // We need the write and read access bits since we may load and store the stencil.
124 // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
125 // wait there.
126 vkStencil->setImageLayout(fGpu,
127 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
128 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
129 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
130 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
131 false);
132 }
133 }
134
135 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
136 // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
137 // the the entire attachment. Similar requirements for the y and height components.
adjust_bounds_to_granularity(SkIRect * dstBounds,const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)138 void adjust_bounds_to_granularity(SkIRect* dstBounds,
139 const SkIRect& srcBounds,
140 const VkExtent2D& granularity,
141 int maxWidth,
142 int maxHeight) {
143 // Adjust Width
144 if ((0 != granularity.width && 1 != granularity.width)) {
145 // Start with the right side of rect so we know if we end up going pass the maxWidth.
146 int rightAdj = srcBounds.fRight % granularity.width;
147 if (rightAdj != 0) {
148 rightAdj = granularity.width - rightAdj;
149 }
150 dstBounds->fRight = srcBounds.fRight + rightAdj;
151 if (dstBounds->fRight > maxWidth) {
152 dstBounds->fRight = maxWidth;
153 dstBounds->fLeft = 0;
154 } else {
155 dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
156 }
157 } else {
158 dstBounds->fLeft = srcBounds.fLeft;
159 dstBounds->fRight = srcBounds.fRight;
160 }
161
162 // Adjust height
163 if ((0 != granularity.height && 1 != granularity.height)) {
164 // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
165 int bottomAdj = srcBounds.fBottom % granularity.height;
166 if (bottomAdj != 0) {
167 bottomAdj = granularity.height - bottomAdj;
168 }
169 dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
170 if (dstBounds->fBottom > maxHeight) {
171 dstBounds->fBottom = maxHeight;
172 dstBounds->fTop = 0;
173 } else {
174 dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
175 }
176 } else {
177 dstBounds->fTop = srcBounds.fTop;
178 dstBounds->fBottom = srcBounds.fBottom;
179 }
180 }
181
beginRenderPass(const VkClearValue & clearColor,LoadFromResolve loadFromResolve)182 bool GrVkOpsRenderPass::beginRenderPass(const VkClearValue& clearColor,
183 LoadFromResolve loadFromResolve) {
184 this->setAttachmentLayouts(loadFromResolve);
185
186 bool firstSubpassUsesSecondaryCB =
187 loadFromResolve != LoadFromResolve::kLoad && SkToBool(fCurrentSecondaryCommandBuffer);
188
189 bool useFullBounds = fCurrentRenderPass->hasResolveAttachment() &&
190 fGpu->vkCaps().mustLoadFullImageWithDiscardableMSAA();
191
192 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
193
194 auto nativeBounds = GrNativeRect::MakeIRectRelativeTo(
195 fOrigin,
196 dimensions.height(), useFullBounds ? SkIRect::MakeSize(dimensions) : fBounds);
197
198 // The bounds we use for the render pass should be of the granularity supported
199 // by the device.
200 const VkExtent2D& granularity = fCurrentRenderPass->granularity();
201 SkIRect adjustedBounds;
202 if ((0 != granularity.width && 1 != granularity.width) ||
203 (0 != granularity.height && 1 != granularity.height)) {
204 adjust_bounds_to_granularity(&adjustedBounds,
205 nativeBounds,
206 granularity,
207 dimensions.width(),
208 dimensions.height());
209 } else {
210 adjustedBounds = nativeBounds;
211 }
212
213 if (!fGpu->beginRenderPass(fCurrentRenderPass, fFramebuffer, &clearColor, fRenderTarget,
214 adjustedBounds, firstSubpassUsesSecondaryCB)) {
215 if (fCurrentSecondaryCommandBuffer) {
216 fCurrentSecondaryCommandBuffer->end(fGpu);
217 }
218 fCurrentRenderPass = nullptr;
219 return false;
220 }
221
222 if (loadFromResolve == LoadFromResolve::kLoad) {
223 this->loadResolveIntoMSAA(adjustedBounds);
224 }
225
226 return true;
227 }
228
init(const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo)229 bool GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
230 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
231 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
232 VkAttachmentLoadOp loadOp;
233 VkAttachmentStoreOp storeOp;
234 get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp, &loadOp, &storeOp);
235 GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp);
236
237 get_vk_load_store_ops(resolveInfo.fLoadOp, resolveInfo.fStoreOp, &loadOp, &storeOp);
238 GrVkRenderPass::LoadStoreOps vkResolveOps(loadOp, storeOp);
239
240 get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp, &loadOp, &storeOp);
241 GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp);
242
243 GrVkResourceProvider::CompatibleRPHandle rpHandle = fFramebuffer->compatibleRenderPassHandle();
244 SkASSERT(rpHandle.isValid());
245 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
246 vkColorOps,
247 vkResolveOps,
248 vkStencilOps);
249
250 if (!fCurrentRenderPass) {
251 return false;
252 }
253
254 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) {
255 SkASSERT(fGpu->cmdPool());
256 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
257 if (!fCurrentSecondaryCommandBuffer) {
258 fCurrentRenderPass = nullptr;
259 return false;
260 }
261 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
262 }
263
264 VkClearValue vkClearColor;
265 vkClearColor.color.float32[0] = colorInfo.fClearColor[0];
266 vkClearColor.color.float32[1] = colorInfo.fClearColor[1];
267 vkClearColor.color.float32[2] = colorInfo.fClearColor[2];
268 vkClearColor.color.float32[3] = colorInfo.fClearColor[3];
269
270 return this->beginRenderPass(vkClearColor, fLoadFromResolve);
271 }
272
initWrapped()273 bool GrVkOpsRenderPass::initWrapped() {
274 SkASSERT(fFramebuffer->isExternal());
275 fCurrentRenderPass = fFramebuffer->externalRenderPass();
276 SkASSERT(fCurrentRenderPass);
277 fCurrentRenderPass->ref();
278
279 fCurrentSecondaryCommandBuffer = fFramebuffer->externalCommandBuffer();
280 if (!fCurrentSecondaryCommandBuffer) {
281 return false;
282 }
283 return true;
284 }
285
~GrVkOpsRenderPass()286 GrVkOpsRenderPass::~GrVkOpsRenderPass() {
287 this->reset();
288 }
289
gpu()290 GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; }
291
currentCommandBuffer()292 GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
293 if (fCurrentSecondaryCommandBuffer) {
294 return fCurrentSecondaryCommandBuffer.get();
295 }
296 // We checked this when we setup the GrVkOpsRenderPass and it should not have changed while we
297 // are still using this object.
298 SkASSERT(fGpu->currentCommandBuffer());
299 return fGpu->currentCommandBuffer();
300 }
301
loadResolveIntoMSAA(const SkIRect & nativeBounds)302 void GrVkOpsRenderPass::loadResolveIntoMSAA(const SkIRect& nativeBounds) {
303 fGpu->loadMSAAFromResolve(this->currentCommandBuffer(), *fCurrentRenderPass,
304 fFramebuffer->colorAttachment(), fFramebuffer->resolveAttachment(),
305 nativeBounds);
306 fGpu->currentCommandBuffer()->nexSubpass(fGpu, SkToBool(fCurrentSecondaryCommandBuffer));
307
308 // If we loaded the resolve attachment, then we would have set the image layout to be
309 // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an input
310 // attachment. However, when we switched to the main subpass it will transition the layout
311 // internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our tracking
312 // of the layout to match the new layout.
313 SkASSERT(fFramebuffer->resolveAttachment());
314 fFramebuffer->resolveAttachment()->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
315 }
316
submit()317 void GrVkOpsRenderPass::submit() {
318 if (!fRenderTarget) {
319 return;
320 }
321 if (!fCurrentRenderPass) {
322 SkASSERT(fGpu->isDeviceLost());
323 return;
324 }
325
326 // We don't want to actually submit the secondary command buffer if it is wrapped.
327 if (this->wrapsSecondaryCommandBuffer()) {
328 // We pass the ownership of the GrVkSecondaryCommandBuffer to the external framebuffer
329 // since it's lifetime matches the lifetime we need to keep the GrManagedResources on the
330 // GrVkSecondaryCommandBuffer alive.
331 fFramebuffer->returnExternalGrSecondaryCommandBuffer(
332 std::move(fCurrentSecondaryCommandBuffer));
333 return;
334 }
335
336 if (fCurrentSecondaryCommandBuffer) {
337 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
338 }
339 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
340 }
341
set(GrRenderTarget * rt,sk_sp<GrVkFramebuffer> framebuffer,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,GrVkRenderPass::SelfDependencyFlags selfDepFlags,GrVkRenderPass::LoadFromResolve loadFromResolve,const SkTArray<GrSurfaceProxy *,true> & sampledProxies)342 bool GrVkOpsRenderPass::set(GrRenderTarget* rt,
343 sk_sp<GrVkFramebuffer> framebuffer,
344 GrSurfaceOrigin origin,
345 const SkIRect& bounds,
346 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
347 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
348 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
349 GrVkRenderPass::SelfDependencyFlags selfDepFlags,
350 GrVkRenderPass::LoadFromResolve loadFromResolve,
351 const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
352 SkASSERT(!fRenderTarget);
353 SkASSERT(fGpu == rt->getContext()->priv().getGpu());
354
355 #ifdef SK_DEBUG
356 fIsActive = true;
357 #endif
358
359 // We check to make sure the GrVkGpu has a valid current command buffer instead of each time we
360 // access it. If the command buffer is valid here should be valid throughout the use of the
361 // render pass since nothing should trigger a submit while this render pass is active.
362 if (!fGpu->currentCommandBuffer()) {
363 return false;
364 }
365
366 this->INHERITED::set(rt, origin);
367
368 for (int i = 0; i < sampledProxies.count(); ++i) {
369 if (sampledProxies[i]->isInstantiated()) {
370 SkASSERT(sampledProxies[i]->asTextureProxy());
371 GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
372 SkASSERT(vkTex);
373 GrVkImage* texture = vkTex->textureImage();
374 SkASSERT(texture);
375 texture->setImageLayout(
376 fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
377 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
378 }
379 }
380
381 SkASSERT(framebuffer);
382 fFramebuffer = std::move(framebuffer);
383
384 SkASSERT(bounds.isEmpty() ||
385 SkIRect::MakeSize(fFramebuffer->colorAttachment()->dimensions()).contains(bounds));
386 fBounds = bounds;
387
388 fSelfDependencyFlags = selfDepFlags;
389 fLoadFromResolve = loadFromResolve;
390
391 if (this->wrapsSecondaryCommandBuffer()) {
392 return this->initWrapped();
393 }
394
395 return this->init(colorInfo, resolveInfo, stencilInfo);
396 }
397
reset()398 void GrVkOpsRenderPass::reset() {
399 if (fCurrentSecondaryCommandBuffer) {
400 // The active GrVkCommandPool on the GrVkGpu should still be the same pool we got the
401 // secondary command buffer from since we haven't submitted any work yet.
402 SkASSERT(fGpu->cmdPool());
403 fCurrentSecondaryCommandBuffer.release()->recycle(fGpu->cmdPool());
404 }
405 if (fCurrentRenderPass) {
406 fCurrentRenderPass->unref();
407 fCurrentRenderPass = nullptr;
408 }
409 fCurrentCBIsEmpty = true;
410
411 fRenderTarget = nullptr;
412 fFramebuffer.reset();
413
414 fSelfDependencyFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
415
416 fLoadFromResolve = LoadFromResolve::kNo;
417 fOverridePipelinesForResolveLoad = false;
418
419 #ifdef SK_DEBUG
420 fIsActive = false;
421 #endif
422 }
423
wrapsSecondaryCommandBuffer() const424 bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const {
425 return fFramebuffer->isExternal();
426 }
427
428 ////////////////////////////////////////////////////////////////////////////////
429
onClearStencilClip(const GrScissorState & scissor,bool insideStencilMask)430 void GrVkOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
431 if (!fCurrentRenderPass) {
432 SkASSERT(fGpu->isDeviceLost());
433 return;
434 }
435
436 GrAttachment* sb = fFramebuffer->stencilAttachment();
437 // this should only be called internally when we know we have a
438 // stencil buffer.
439 SkASSERT(sb);
440 int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
441
442 // The contract with the callers does not guarantee that we preserve all bits in the stencil
443 // during this clear. Thus we will clear the entire stencil to the desired value.
444
445 VkClearDepthStencilValue vkStencilColor;
446 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
447 if (insideStencilMask) {
448 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
449 } else {
450 vkStencilColor.stencil = 0;
451 }
452
453 VkClearRect clearRect;
454 // Flip rect if necessary
455 SkIRect vkRect;
456 if (!scissor.enabled()) {
457 vkRect.setXYWH(0, 0, sb->width(), sb->height());
458 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
459 vkRect = scissor.rect();
460 } else {
461 vkRect.setLTRB(scissor.rect().fLeft, sb->height() - scissor.rect().fBottom,
462 scissor.rect().fRight, sb->height() - scissor.rect().fTop);
463 }
464
465 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
466 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
467
468 clearRect.baseArrayLayer = 0;
469 clearRect.layerCount = 1;
470
471 uint32_t stencilIndex;
472 SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
473
474 VkClearAttachment attachment;
475 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
476 attachment.colorAttachment = 0; // this value shouldn't matter
477 attachment.clearValue.depthStencil = vkStencilColor;
478
479 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
480 fCurrentCBIsEmpty = false;
481 }
482
onClear(const GrScissorState & scissor,std::array<float,4> color)483 void GrVkOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
484 if (!fCurrentRenderPass) {
485 SkASSERT(fGpu->isDeviceLost());
486 return;
487 }
488
489 VkClearColorValue vkColor = {{color[0], color[1], color[2], color[3]}};
490
491 // If we end up in a situation where we are calling clear without a scissior then in general it
492 // means we missed an opportunity higher up the stack to set the load op to be a clear. However,
493 // there are situations where higher up we couldn't discard the previous ops and set a clear
494 // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
495 // TODO: Make the waitOp a RenderTask instead so we can clear out the OpsTask for a clear. We
496 // can then reenable this assert assuming we can't get messed up by a waitOp.
497 //SkASSERT(!fCurrentCBIsEmpty || scissor);
498
499 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
500 // We always do a sub rect clear with clearAttachments since we are inside a render pass
501 VkClearRect clearRect;
502 // Flip rect if necessary
503 SkIRect vkRect;
504 if (!scissor.enabled()) {
505 vkRect.setSize(dimensions);
506 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
507 vkRect = scissor.rect();
508 } else {
509 vkRect.setLTRB(scissor.rect().fLeft, dimensions.height() - scissor.rect().fBottom,
510 scissor.rect().fRight, dimensions.height() - scissor.rect().fTop);
511 }
512 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
513 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
514 clearRect.baseArrayLayer = 0;
515 clearRect.layerCount = 1;
516
517 uint32_t colorIndex;
518 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex));
519
520 VkClearAttachment attachment;
521 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
522 attachment.colorAttachment = colorIndex;
523 attachment.clearValue.color = vkColor;
524
525 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
526 fCurrentCBIsEmpty = false;
527 return;
528 }
529
530 ////////////////////////////////////////////////////////////////////////////////
531
addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer)532 void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) {
533 SkASSERT(!this->wrapsSecondaryCommandBuffer());
534
535 bool withResolve = fFramebuffer->resolveAttachment();
536 bool withStencil = fFramebuffer->stencilAttachment();
537
538 // If we have a resolve attachment we must do a resolve load in the new render pass since we
539 // broke up the original one. GrProgramInfos were made without any knowledge that the render
540 // pass may be split up. Thus they may try to make VkPipelines that only use one subpass. We
541 // need to override that to make sure they are compatible with the extra load subpass.
542 fOverridePipelinesForResolveLoad |=
543 withResolve && fCurrentRenderPass->loadFromResolve() != LoadFromResolve::kLoad;
544
545 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
546 VK_ATTACHMENT_STORE_OP_STORE);
547 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD,
548 VK_ATTACHMENT_STORE_OP_STORE);
549 LoadFromResolve loadFromResolve = LoadFromResolve::kNo;
550 if (withResolve) {
551 vkColorOps = {VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE};
552 loadFromResolve = LoadFromResolve::kLoad;
553 }
554 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
555 VK_ATTACHMENT_STORE_OP_STORE);
556
557 SkASSERT(fCurrentRenderPass);
558 fCurrentRenderPass->unref();
559 fCurrentRenderPass = nullptr;
560
561 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
562 auto fb = vkRT->getFramebuffer(withResolve, withStencil, fSelfDependencyFlags, loadFromResolve);
563 if (!fb) {
564 return;
565 }
566 fFramebuffer = sk_ref_sp(fb);
567
568 SkASSERT(fFramebuffer);
569 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
570 fFramebuffer->compatibleRenderPassHandle();
571 SkASSERT(rpHandle.isValid());
572
573 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
574 vkColorOps,
575 vkResolveOps,
576 vkStencilOps);
577
578 if (!fCurrentRenderPass) {
579 return;
580 }
581
582 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() ||
583 mustUseSecondaryCommandBuffer) {
584 SkASSERT(fGpu->cmdPool());
585 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
586 if (!fCurrentSecondaryCommandBuffer) {
587 fCurrentRenderPass = nullptr;
588 return;
589 }
590 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
591 }
592
593 VkClearValue vkClearColor;
594 memset(&vkClearColor, 0, sizeof(VkClearValue));
595
596 this->beginRenderPass(vkClearColor, loadFromResolve);
597 }
598
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)599 void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
600 if (!fCurrentRenderPass) {
601 SkASSERT(fGpu->isDeviceLost());
602 return;
603 }
604 if (fCurrentSecondaryCommandBuffer) {
605 fCurrentSecondaryCommandBuffer->end(fGpu);
606 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
607 }
608 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
609
610 // We pass in true here to signal that after the upload we need to set the upload textures
611 // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL.
612 state->doUpload(upload, true);
613
614 this->addAdditionalRenderPass(false);
615 }
616
617 ////////////////////////////////////////////////////////////////////////////////
618
onEnd()619 void GrVkOpsRenderPass::onEnd() {
620 if (fCurrentSecondaryCommandBuffer && !this->wrapsSecondaryCommandBuffer()) {
621 fCurrentSecondaryCommandBuffer->end(fGpu);
622 }
623 }
624
onBindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)625 bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
626 if (!fCurrentRenderPass) {
627 SkASSERT(fGpu->isDeviceLost());
628 return false;
629 }
630
631 SkRect rtRect = SkRect::Make(fBounds);
632 if (rtRect.intersect(drawBounds)) {
633 rtRect.roundOut(&fCurrentPipelineBounds);
634 } else {
635 fCurrentPipelineBounds.setEmpty();
636 }
637
638 GrVkCommandBuffer* currentCB = this->currentCommandBuffer();
639 SkASSERT(fCurrentRenderPass);
640
641 VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass();
642 fCurrentPipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
643 fRenderTarget, programInfo, compatibleRenderPass, fOverridePipelinesForResolveLoad);
644 if (!fCurrentPipelineState) {
645 return false;
646 }
647
648 fCurrentPipelineState->bindPipeline(fGpu, currentCB);
649
650 // Both the 'programInfo' and this renderPass have an origin. Since they come from the
651 // same place (i.e., the target renderTargetProxy) they had best agree.
652 SkASSERT(programInfo.origin() == fOrigin);
653
654 auto colorAttachment = fFramebuffer->colorAttachment();
655 if (!fCurrentPipelineState->setAndBindUniforms(fGpu, colorAttachment->dimensions(), programInfo,
656 currentCB)) {
657 return false;
658 }
659
660 if (!programInfo.pipeline().isScissorTestEnabled()) {
661 // "Disable" scissor by setting it to the full pipeline bounds.
662 GrVkPipeline::SetDynamicScissorRectState(
663 fGpu, currentCB, colorAttachment->dimensions(), fOrigin,
664 fCurrentPipelineBounds);
665 }
666 GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, colorAttachment->dimensions());
667 GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB,
668 programInfo.pipeline().writeSwizzle(),
669 programInfo.pipeline().getXferProcessor());
670
671 return true;
672 }
673
onSetScissorRect(const SkIRect & scissor)674 void GrVkOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
675 SkIRect combinedScissorRect;
676 if (!combinedScissorRect.intersect(fCurrentPipelineBounds, scissor)) {
677 combinedScissorRect = SkIRect::MakeEmpty();
678 }
679 GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(),
680 fFramebuffer->colorAttachment()->dimensions(),
681 fOrigin, combinedScissorRect);
682 }
683
684 #ifdef SK_DEBUG
check_sampled_texture(GrTexture * tex,GrAttachment * colorAttachment,GrVkGpu * gpu)685 void check_sampled_texture(GrTexture* tex, GrAttachment* colorAttachment, GrVkGpu* gpu) {
686 SkASSERT(!tex->isProtected() || (colorAttachment->isProtected() && gpu->protectedContext()));
687 auto vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
688 SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
689 }
690 #endif
691
onBindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)692 bool GrVkOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
693 const GrSurfaceProxy* const geomProcTextures[],
694 const GrPipeline& pipeline) {
695 #ifdef SK_DEBUG
696 SkASSERT(fCurrentPipelineState);
697 auto colorAttachment = fFramebuffer->colorAttachment();
698 for (int i = 0; i < geomProc.numTextureSamplers(); ++i) {
699 check_sampled_texture(geomProcTextures[i]->peekTexture(), colorAttachment, fGpu);
700 }
701 pipeline.visitTextureEffects([&](const GrTextureEffect& te) {
702 check_sampled_texture(te.texture(), colorAttachment, fGpu);
703 });
704 if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
705 check_sampled_texture(dstTexture, colorAttachment, fGpu);
706 }
707 #endif
708 if (!fCurrentPipelineState->setAndBindTextures(fGpu, geomProc, pipeline, geomProcTextures,
709 this->currentCommandBuffer())) {
710 return false;
711 }
712 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
713 // We bind the color attachment as an input attachment
714 auto ds = fFramebuffer->colorAttachment()->inputDescSetForBlending(fGpu);
715 if (!ds) {
716 return false;
717 }
718 return fCurrentPipelineState->setAndBindInputAttachment(fGpu, std::move(ds),
719 this->currentCommandBuffer());
720 }
721 return true;
722 }
723
onBindBuffers(sk_sp<const GrBuffer> indexBuffer,sk_sp<const GrBuffer> instanceBuffer,sk_sp<const GrBuffer> vertexBuffer,GrPrimitiveRestart primRestart)724 void GrVkOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
725 sk_sp<const GrBuffer> instanceBuffer,
726 sk_sp<const GrBuffer> vertexBuffer,
727 GrPrimitiveRestart primRestart) {
728 SkASSERT(GrPrimitiveRestart::kNo == primRestart);
729 if (!fCurrentRenderPass) {
730 SkASSERT(fGpu->isDeviceLost());
731 return;
732 }
733 SkASSERT(fCurrentPipelineState);
734 SkASSERT(!fGpu->caps()->usePrimitiveRestart()); // Ignore primitiveRestart parameter.
735
736 GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer();
737 SkASSERT(currCmdBuf);
738
739 // There is no need to put any memory barriers to make sure host writes have finished here.
740 // When a command buffer is submitted to a queue, there is an implicit memory barrier that
741 // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
742 // an active RenderPass.
743
744 // Here our vertex and instance inputs need to match the same 0-based bindings they were
745 // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
746 uint32_t binding = 0;
747 if (auto* gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer.get())) {
748 SkASSERT(!gpuVertexBuffer->isCpuBuffer());
749 SkASSERT(!gpuVertexBuffer->isMapped());
750 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(vertexBuffer));
751 }
752 if (auto* gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer.get())) {
753 SkASSERT(!gpuInstanceBuffer->isCpuBuffer());
754 SkASSERT(!gpuInstanceBuffer->isMapped());
755 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(instanceBuffer));
756 }
757 if (auto* gpuIndexBuffer = static_cast<const GrGpuBuffer*>(indexBuffer.get())) {
758 SkASSERT(!gpuIndexBuffer->isCpuBuffer());
759 SkASSERT(!gpuIndexBuffer->isMapped());
760 currCmdBuf->bindIndexBuffer(fGpu, std::move(indexBuffer));
761 }
762 }
763
onDrawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)764 void GrVkOpsRenderPass::onDrawInstanced(int instanceCount,
765 int baseInstance,
766 int vertexCount, int baseVertex) {
767 if (!fCurrentRenderPass) {
768 SkASSERT(fGpu->isDeviceLost());
769 return;
770 }
771 SkASSERT(fCurrentPipelineState);
772 this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
773 fGpu->stats()->incNumDraws();
774 fCurrentCBIsEmpty = false;
775 }
776
onDrawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)777 void GrVkOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount,
778 int baseInstance, int baseVertex) {
779 if (!fCurrentRenderPass) {
780 SkASSERT(fGpu->isDeviceLost());
781 return;
782 }
783 SkASSERT(fCurrentPipelineState);
784 this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount,
785 baseIndex, baseVertex, baseInstance);
786 fGpu->stats()->incNumDraws();
787 fCurrentCBIsEmpty = false;
788 }
789
onDrawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)790 void GrVkOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
791 int drawCount) {
792 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
793 if (!fCurrentRenderPass) {
794 SkASSERT(fGpu->isDeviceLost());
795 return;
796 }
797 const GrVkCaps& caps = fGpu->vkCaps();
798 SkASSERT(caps.nativeDrawIndirectSupport());
799 SkASSERT(fCurrentPipelineState);
800
801 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
802 uint32_t remainingDraws = drawCount;
803 const size_t stride = sizeof(GrDrawIndirectCommand);
804 while (remainingDraws >= 1) {
805 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
806 this->currentCommandBuffer()->drawIndirect(
807 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
808 remainingDraws -= currDrawCount;
809 offset += stride * currDrawCount;
810 fGpu->stats()->incNumDraws();
811 }
812 fCurrentCBIsEmpty = false;
813 }
814
onDrawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)815 void GrVkOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
816 int drawCount) {
817 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
818 if (!fCurrentRenderPass) {
819 SkASSERT(fGpu->isDeviceLost());
820 return;
821 }
822 const GrVkCaps& caps = fGpu->vkCaps();
823 SkASSERT(caps.nativeDrawIndirectSupport());
824 SkASSERT(fCurrentPipelineState);
825 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
826 uint32_t remainingDraws = drawCount;
827 const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
828 while (remainingDraws >= 1) {
829 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
830 this->currentCommandBuffer()->drawIndexedIndirect(
831 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
832 remainingDraws -= currDrawCount;
833 offset += stride * currDrawCount;
834 fGpu->stats()->incNumDraws();
835 }
836 fCurrentCBIsEmpty = false;
837 }
838
839 ////////////////////////////////////////////////////////////////////////////////
840
onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)841 void GrVkOpsRenderPass::onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
842 if (!fCurrentRenderPass) {
843 SkASSERT(fGpu->isDeviceLost());
844 return;
845 }
846
847 VkRect2D bounds;
848 bounds.offset = { 0, 0 };
849 bounds.extent = { 0, 0 };
850
851 if (!fCurrentSecondaryCommandBuffer) {
852 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
853 this->addAdditionalRenderPass(true);
854 // We may have failed to start a new render pass
855 if (!fCurrentRenderPass) {
856 SkASSERT(fGpu->isDeviceLost());
857 return;
858 }
859 }
860 SkASSERT(fCurrentSecondaryCommandBuffer);
861
862 GrVkDrawableInfo vkInfo;
863 vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer();
864 vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass();
865 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
866 vkInfo.fFormat = fFramebuffer->colorAttachment()->imageFormat();
867 vkInfo.fDrawBounds = &bounds;
868 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
869 vkInfo.fFromSwapchainOrAndroidWindow =
870 fFramebuffer->colorAttachment()->vkImageInfo().fPartOfSwapchainOrAndroidWindow;
871 #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
872
873 GrBackendDrawableInfo info(vkInfo);
874
875 // After we draw into the command buffer via the drawable, cached state we have may be invalid.
876 this->currentCommandBuffer()->invalidateState();
877 // Also assume that the drawable produced output.
878 fCurrentCBIsEmpty = false;
879
880 drawable->draw(info);
881 fGpu->addDrawable(std::move(drawable));
882 }
883