1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkOpsRenderPass.h"
9
10 #include "include/core/SkDrawable.h"
11 #include "include/core/SkRect.h"
12 #include "include/gpu/GrBackendDrawableInfo.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/gpu/GrBackendUtils.h"
15 #include "src/gpu/GrDirectContextPriv.h"
16 #include "src/gpu/GrOpFlushState.h"
17 #include "src/gpu/GrPipeline.h"
18 #include "src/gpu/GrRenderTarget.h"
19 #include "src/gpu/effects/GrTextureEffect.h"
20 #include "src/gpu/vk/GrVkBuffer.h"
21 #include "src/gpu/vk/GrVkCommandBuffer.h"
22 #include "src/gpu/vk/GrVkCommandPool.h"
23 #include "src/gpu/vk/GrVkFramebuffer.h"
24 #include "src/gpu/vk/GrVkGpu.h"
25 #include "src/gpu/vk/GrVkImage.h"
26 #include "src/gpu/vk/GrVkPipeline.h"
27 #include "src/gpu/vk/GrVkRenderPass.h"
28 #include "src/gpu/vk/GrVkRenderTarget.h"
29 #include "src/gpu/vk/GrVkResourceProvider.h"
30 #include "src/gpu/vk/GrVkSemaphore.h"
31 #include "src/gpu/vk/GrVkTexture.h"
32
33 /////////////////////////////////////////////////////////////////////////////
34
get_vk_load_store_ops(GrLoadOp loadOpIn,GrStoreOp storeOpIn,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)35 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
36 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
37 switch (loadOpIn) {
38 case GrLoadOp::kLoad:
39 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
40 break;
41 case GrLoadOp::kClear:
42 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
43 break;
44 case GrLoadOp::kDiscard:
45 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
46 break;
47 default:
48 SK_ABORT("Invalid LoadOp");
49 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
50 }
51
52 switch (storeOpIn) {
53 case GrStoreOp::kStore:
54 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
55 break;
56 case GrStoreOp::kDiscard:
57 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
58 break;
59 default:
60 SK_ABORT("Invalid StoreOp");
61 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
62 }
63 }
64
GrVkOpsRenderPass(GrVkGpu * gpu)65 GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {}
66
setAttachmentLayouts(LoadFromResolve loadFromResolve)67 void GrVkOpsRenderPass::setAttachmentLayouts(LoadFromResolve loadFromResolve) {
68 bool withStencil = fCurrentRenderPass->hasStencilAttachment();
69 bool withResolve = fCurrentRenderPass->hasResolveAttachment();
70
71 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
72 // We need to use the GENERAL layout in this case since we'll be using texture barriers
73 // with an input attachment.
74 VkAccessFlags dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
75 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
76 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
77 VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
78 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
79 fFramebuffer->colorAttachment()->setImageLayout(
80 fGpu, VK_IMAGE_LAYOUT_GENERAL, dstAccess, dstStages, false);
81 } else {
82 // Change layout of our render target so it can be used as the color attachment.
83 // TODO: If we know that we will never be blending or loading the attachment we could drop
84 // the VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
85 fFramebuffer->colorAttachment()->setImageLayout(
86 fGpu,
87 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
88 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
89 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
90 false);
91 }
92
93 if (withResolve) {
94 GrVkImage* resolveAttachment = fFramebuffer->resolveAttachment();
95 SkASSERT(resolveAttachment);
96 if (loadFromResolve == LoadFromResolve::kLoad) {
97 resolveAttachment->setImageLayout(fGpu,
98 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
99 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT,
100 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
101 false);
102 } else {
103 resolveAttachment->setImageLayout(
104 fGpu,
105 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
106 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
107 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
108 false);
109 }
110 }
111
112 // If we are using a stencil attachment we also need to update its layout
113 if (withStencil) {
114 auto* vkStencil = fFramebuffer->stencilAttachment();
115 SkASSERT(vkStencil);
116
117 // We need the write and read access bits since we may load and store the stencil.
118 // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
119 // wait there.
120 vkStencil->setImageLayout(fGpu,
121 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
122 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
123 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
124 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
125 false);
126 }
127 }
128
129 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
130 // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
131 // the the entire attachment. Similar requirements for the y and height components.
adjust_bounds_to_granularity(SkIRect * dstBounds,const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)132 void adjust_bounds_to_granularity(SkIRect* dstBounds,
133 const SkIRect& srcBounds,
134 const VkExtent2D& granularity,
135 int maxWidth,
136 int maxHeight) {
137 // Adjust Width
138 if ((0 != granularity.width && 1 != granularity.width)) {
139 // Start with the right side of rect so we know if we end up going pass the maxWidth.
140 int rightAdj = srcBounds.fRight % granularity.width;
141 if (rightAdj != 0) {
142 rightAdj = granularity.width - rightAdj;
143 }
144 dstBounds->fRight = srcBounds.fRight + rightAdj;
145 if (dstBounds->fRight > maxWidth) {
146 dstBounds->fRight = maxWidth;
147 dstBounds->fLeft = 0;
148 } else {
149 dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
150 }
151 } else {
152 dstBounds->fLeft = srcBounds.fLeft;
153 dstBounds->fRight = srcBounds.fRight;
154 }
155
156 // Adjust height
157 if ((0 != granularity.height && 1 != granularity.height)) {
158 // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
159 int bottomAdj = srcBounds.fBottom % granularity.height;
160 if (bottomAdj != 0) {
161 bottomAdj = granularity.height - bottomAdj;
162 }
163 dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
164 if (dstBounds->fBottom > maxHeight) {
165 dstBounds->fBottom = maxHeight;
166 dstBounds->fTop = 0;
167 } else {
168 dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
169 }
170 } else {
171 dstBounds->fTop = srcBounds.fTop;
172 dstBounds->fBottom = srcBounds.fBottom;
173 }
174 }
175
beginRenderPass(const VkClearValue & clearColor,LoadFromResolve loadFromResolve)176 bool GrVkOpsRenderPass::beginRenderPass(const VkClearValue& clearColor,
177 LoadFromResolve loadFromResolve) {
178 this->setAttachmentLayouts(loadFromResolve);
179
180 bool firstSubpassUsesSecondaryCB =
181 loadFromResolve != LoadFromResolve::kLoad && SkToBool(fCurrentSecondaryCommandBuffer);
182
183 bool useFullBounds = fCurrentRenderPass->hasResolveAttachment() &&
184 fGpu->vkCaps().mustLoadFullImageWithDiscardableMSAA();
185
186 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
187
188 auto nativeBounds = GrNativeRect::MakeIRectRelativeTo(
189 fOrigin,
190 dimensions.height(), useFullBounds ? SkIRect::MakeSize(dimensions) : fBounds);
191
192 // The bounds we use for the render pass should be of the granularity supported
193 // by the device.
194 const VkExtent2D& granularity = fCurrentRenderPass->granularity();
195 SkIRect adjustedBounds;
196 if ((0 != granularity.width && 1 != granularity.width) ||
197 (0 != granularity.height && 1 != granularity.height)) {
198 adjust_bounds_to_granularity(&adjustedBounds,
199 nativeBounds,
200 granularity,
201 dimensions.width(),
202 dimensions.height());
203 } else {
204 adjustedBounds = nativeBounds;
205 }
206
207 if (!fGpu->beginRenderPass(fCurrentRenderPass, fFramebuffer, &clearColor, fRenderTarget,
208 adjustedBounds, firstSubpassUsesSecondaryCB)) {
209 if (fCurrentSecondaryCommandBuffer) {
210 fCurrentSecondaryCommandBuffer->end(fGpu);
211 }
212 fCurrentRenderPass = nullptr;
213 return false;
214 }
215
216 if (loadFromResolve == LoadFromResolve::kLoad) {
217 this->loadResolveIntoMSAA(adjustedBounds);
218 }
219
220 return true;
221 }
222
init(const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo)223 bool GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
224 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
225 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
226 VkAttachmentLoadOp loadOp;
227 VkAttachmentStoreOp storeOp;
228 get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp, &loadOp, &storeOp);
229 GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp);
230
231 get_vk_load_store_ops(resolveInfo.fLoadOp, resolveInfo.fStoreOp, &loadOp, &storeOp);
232 GrVkRenderPass::LoadStoreOps vkResolveOps(loadOp, storeOp);
233
234 get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp, &loadOp, &storeOp);
235 GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp);
236
237 GrVkResourceProvider::CompatibleRPHandle rpHandle = fFramebuffer->compatibleRenderPassHandle();
238 SkASSERT(rpHandle.isValid());
239 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
240 vkColorOps,
241 vkResolveOps,
242 vkStencilOps);
243
244 if (!fCurrentRenderPass) {
245 return false;
246 }
247
248 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) {
249 SkASSERT(fGpu->cmdPool());
250 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
251 if (!fCurrentSecondaryCommandBuffer) {
252 fCurrentRenderPass = nullptr;
253 return false;
254 }
255 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
256 }
257
258 VkClearValue vkClearColor;
259 vkClearColor.color.float32[0] = colorInfo.fClearColor[0];
260 vkClearColor.color.float32[1] = colorInfo.fClearColor[1];
261 vkClearColor.color.float32[2] = colorInfo.fClearColor[2];
262 vkClearColor.color.float32[3] = colorInfo.fClearColor[3];
263
264 return this->beginRenderPass(vkClearColor, fLoadFromResolve);
265 }
266
initWrapped()267 bool GrVkOpsRenderPass::initWrapped() {
268 SkASSERT(fFramebuffer->isExternal());
269 fCurrentRenderPass = fFramebuffer->externalRenderPass();
270 SkASSERT(fCurrentRenderPass);
271 fCurrentRenderPass->ref();
272
273 fCurrentSecondaryCommandBuffer = fFramebuffer->externalCommandBuffer();
274 if (!fCurrentSecondaryCommandBuffer) {
275 return false;
276 }
277 return true;
278 }
279
~GrVkOpsRenderPass()280 GrVkOpsRenderPass::~GrVkOpsRenderPass() {
281 this->reset();
282 }
283
gpu()284 GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; }
285
currentCommandBuffer()286 GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
287 if (fCurrentSecondaryCommandBuffer) {
288 return fCurrentSecondaryCommandBuffer.get();
289 }
290 // We checked this when we setup the GrVkOpsRenderPass and it should not have changed while we
291 // are still using this object.
292 SkASSERT(fGpu->currentCommandBuffer());
293 return fGpu->currentCommandBuffer();
294 }
295
loadResolveIntoMSAA(const SkIRect & nativeBounds)296 void GrVkOpsRenderPass::loadResolveIntoMSAA(const SkIRect& nativeBounds) {
297 fGpu->loadMSAAFromResolve(this->currentCommandBuffer(), *fCurrentRenderPass,
298 fFramebuffer->colorAttachment(), fFramebuffer->resolveAttachment(),
299 nativeBounds);
300 fGpu->currentCommandBuffer()->nexSubpass(fGpu, SkToBool(fCurrentSecondaryCommandBuffer));
301
302 // If we loaded the resolve attachment, then we would have set the image layout to be
303 // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an input
304 // attachment. However, when we switched to the main subpass it will transition the layout
305 // internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our tracking
306 // of the layout to match the new layout.
307 SkASSERT(fFramebuffer->resolveAttachment());
308 fFramebuffer->resolveAttachment()->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
309 }
310
submit()311 void GrVkOpsRenderPass::submit() {
312 if (!fRenderTarget) {
313 return;
314 }
315 if (!fCurrentRenderPass) {
316 SkASSERT(fGpu->isDeviceLost());
317 return;
318 }
319
320 // We don't want to actually submit the secondary command buffer if it is wrapped.
321 if (this->wrapsSecondaryCommandBuffer()) {
322 // We pass the ownership of the GrVkSecondaryCommandBuffer to the external framebuffer
323 // since it's lifetime matches the lifetime we need to keep the GrManagedResources on the
324 // GrVkSecondaryCommandBuffer alive.
325 fFramebuffer->returnExternalGrSecondaryCommandBuffer(
326 std::move(fCurrentSecondaryCommandBuffer));
327 return;
328 }
329
330 if (fCurrentSecondaryCommandBuffer) {
331 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
332 }
333 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
334 }
335
set(GrRenderTarget * rt,sk_sp<GrVkFramebuffer> framebuffer,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const GrOpsRenderPass::LoadAndStoreInfo & resolveInfo,GrVkRenderPass::SelfDependencyFlags selfDepFlags,GrVkRenderPass::LoadFromResolve loadFromResolve,const SkTArray<GrSurfaceProxy *,true> & sampledProxies)336 bool GrVkOpsRenderPass::set(GrRenderTarget* rt,
337 sk_sp<GrVkFramebuffer> framebuffer,
338 GrSurfaceOrigin origin,
339 const SkIRect& bounds,
340 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
341 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
342 const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo,
343 GrVkRenderPass::SelfDependencyFlags selfDepFlags,
344 GrVkRenderPass::LoadFromResolve loadFromResolve,
345 const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
346 SkASSERT(!fRenderTarget);
347 SkASSERT(fGpu == rt->getContext()->priv().getGpu());
348
349 #ifdef SK_DEBUG
350 fIsActive = true;
351 #endif
352
353 // We check to make sure the GrVkGpu has a valid current command buffer instead of each time we
354 // access it. If the command buffer is valid here should be valid throughout the use of the
355 // render pass since nothing should trigger a submit while this render pass is active.
356 if (!fGpu->currentCommandBuffer()) {
357 return false;
358 }
359
360 this->INHERITED::set(rt, origin);
361
362 for (int i = 0; i < sampledProxies.count(); ++i) {
363 if (sampledProxies[i]->isInstantiated()) {
364 SkASSERT(sampledProxies[i]->asTextureProxy());
365 GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
366 SkASSERT(vkTex);
367 GrVkImage* texture = vkTex->textureImage();
368 SkASSERT(texture);
369 texture->setImageLayout(
370 fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
371 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
372 }
373 }
374
375 SkASSERT(framebuffer);
376 fFramebuffer = std::move(framebuffer);
377
378 SkASSERT(bounds.isEmpty() ||
379 SkIRect::MakeSize(fFramebuffer->colorAttachment()->dimensions()).contains(bounds));
380 fBounds = bounds;
381
382 fSelfDependencyFlags = selfDepFlags;
383 fLoadFromResolve = loadFromResolve;
384
385 if (this->wrapsSecondaryCommandBuffer()) {
386 return this->initWrapped();
387 }
388
389 return this->init(colorInfo, resolveInfo, stencilInfo);
390 }
391
reset()392 void GrVkOpsRenderPass::reset() {
393 if (fCurrentSecondaryCommandBuffer) {
394 // The active GrVkCommandPool on the GrVkGpu should still be the same pool we got the
395 // secondary command buffer from since we haven't submitted any work yet.
396 SkASSERT(fGpu->cmdPool());
397 fCurrentSecondaryCommandBuffer.release()->recycle(fGpu->cmdPool());
398 }
399 if (fCurrentRenderPass) {
400 fCurrentRenderPass->unref();
401 fCurrentRenderPass = nullptr;
402 }
403 fCurrentCBIsEmpty = true;
404
405 fRenderTarget = nullptr;
406 fFramebuffer.reset();
407
408 fSelfDependencyFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
409
410 fLoadFromResolve = LoadFromResolve::kNo;
411 fOverridePipelinesForResolveLoad = false;
412
413 #ifdef SK_DEBUG
414 fIsActive = false;
415 #endif
416 }
417
wrapsSecondaryCommandBuffer() const418 bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const {
419 return fFramebuffer->isExternal();
420 }
421
422 ////////////////////////////////////////////////////////////////////////////////
423
onClearStencilClip(const GrScissorState & scissor,bool insideStencilMask)424 void GrVkOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
425 if (!fCurrentRenderPass) {
426 SkASSERT(fGpu->isDeviceLost());
427 return;
428 }
429
430 GrAttachment* sb = fFramebuffer->stencilAttachment();
431 // this should only be called internally when we know we have a
432 // stencil buffer.
433 SkASSERT(sb);
434 int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
435
436 // The contract with the callers does not guarantee that we preserve all bits in the stencil
437 // during this clear. Thus we will clear the entire stencil to the desired value.
438
439 VkClearDepthStencilValue vkStencilColor;
440 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
441 if (insideStencilMask) {
442 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
443 } else {
444 vkStencilColor.stencil = 0;
445 }
446
447 VkClearRect clearRect;
448 // Flip rect if necessary
449 SkIRect vkRect;
450 if (!scissor.enabled()) {
451 vkRect.setXYWH(0, 0, sb->width(), sb->height());
452 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
453 vkRect = scissor.rect();
454 } else {
455 vkRect.setLTRB(scissor.rect().fLeft, sb->height() - scissor.rect().fBottom,
456 scissor.rect().fRight, sb->height() - scissor.rect().fTop);
457 }
458
459 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
460 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
461
462 clearRect.baseArrayLayer = 0;
463 clearRect.layerCount = 1;
464
465 uint32_t stencilIndex;
466 SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
467
468 VkClearAttachment attachment;
469 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
470 attachment.colorAttachment = 0; // this value shouldn't matter
471 attachment.clearValue.depthStencil = vkStencilColor;
472
473 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
474 fCurrentCBIsEmpty = false;
475 }
476
onClear(const GrScissorState & scissor,std::array<float,4> color)477 void GrVkOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
478 if (!fCurrentRenderPass) {
479 SkASSERT(fGpu->isDeviceLost());
480 return;
481 }
482
483 VkClearColorValue vkColor = {{color[0], color[1], color[2], color[3]}};
484
485 // If we end up in a situation where we are calling clear without a scissior then in general it
486 // means we missed an opportunity higher up the stack to set the load op to be a clear. However,
487 // there are situations where higher up we couldn't discard the previous ops and set a clear
488 // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
489 // TODO: Make the waitOp a RenderTask instead so we can clear out the OpsTask for a clear. We
490 // can then reenable this assert assuming we can't get messed up by a waitOp.
491 //SkASSERT(!fCurrentCBIsEmpty || scissor);
492
493 auto dimensions = fFramebuffer->colorAttachment()->dimensions();
494 // We always do a sub rect clear with clearAttachments since we are inside a render pass
495 VkClearRect clearRect;
496 // Flip rect if necessary
497 SkIRect vkRect;
498 if (!scissor.enabled()) {
499 vkRect.setSize(dimensions);
500 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
501 vkRect = scissor.rect();
502 } else {
503 vkRect.setLTRB(scissor.rect().fLeft, dimensions.height() - scissor.rect().fBottom,
504 scissor.rect().fRight, dimensions.height() - scissor.rect().fTop);
505 }
506 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
507 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
508 clearRect.baseArrayLayer = 0;
509 clearRect.layerCount = 1;
510
511 uint32_t colorIndex;
512 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex));
513
514 VkClearAttachment attachment;
515 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
516 attachment.colorAttachment = colorIndex;
517 attachment.clearValue.color = vkColor;
518
519 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
520 fCurrentCBIsEmpty = false;
521 return;
522 }
523
524 ////////////////////////////////////////////////////////////////////////////////
525
addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer)526 void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) {
527 SkASSERT(!this->wrapsSecondaryCommandBuffer());
528
529 bool withResolve = fFramebuffer->resolveAttachment();
530 bool withStencil = fFramebuffer->stencilAttachment();
531
532 // If we have a resolve attachment we must do a resolve load in the new render pass since we
533 // broke up the original one. GrProgramInfos were made without any knowledge that the render
534 // pass may be split up. Thus they may try to make VkPipelines that only use one subpass. We
535 // need to override that to make sure they are compatible with the extra load subpass.
536 fOverridePipelinesForResolveLoad |=
537 withResolve && fCurrentRenderPass->loadFromResolve() != LoadFromResolve::kLoad;
538
539 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
540 VK_ATTACHMENT_STORE_OP_STORE);
541 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD,
542 VK_ATTACHMENT_STORE_OP_STORE);
543 LoadFromResolve loadFromResolve = LoadFromResolve::kNo;
544 if (withResolve) {
545 vkColorOps = {VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE};
546 loadFromResolve = LoadFromResolve::kLoad;
547 }
548 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
549 VK_ATTACHMENT_STORE_OP_STORE);
550
551 SkASSERT(fCurrentRenderPass);
552 fCurrentRenderPass->unref();
553 fCurrentRenderPass = nullptr;
554
555 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
556 auto fb = vkRT->getFramebuffer(withResolve, withStencil, fSelfDependencyFlags, loadFromResolve);
557 if (!fb) {
558 return;
559 }
560 fFramebuffer = sk_ref_sp(fb);
561
562 SkASSERT(fFramebuffer);
563 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
564 fFramebuffer->compatibleRenderPassHandle();
565 SkASSERT(rpHandle.isValid());
566
567 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
568 vkColorOps,
569 vkResolveOps,
570 vkStencilOps);
571
572 if (!fCurrentRenderPass) {
573 return;
574 }
575
576 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() ||
577 mustUseSecondaryCommandBuffer) {
578 SkASSERT(fGpu->cmdPool());
579 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
580 if (!fCurrentSecondaryCommandBuffer) {
581 fCurrentRenderPass = nullptr;
582 return;
583 }
584 fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass);
585 }
586
587 VkClearValue vkClearColor;
588 memset(&vkClearColor, 0, sizeof(VkClearValue));
589
590 this->beginRenderPass(vkClearColor, loadFromResolve);
591 }
592
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)593 void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
594 if (!fCurrentRenderPass) {
595 SkASSERT(fGpu->isDeviceLost());
596 return;
597 }
598 if (fCurrentSecondaryCommandBuffer) {
599 fCurrentSecondaryCommandBuffer->end(fGpu);
600 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
601 }
602 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
603
604 // We pass in true here to signal that after the upload we need to set the upload textures
605 // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL.
606 state->doUpload(upload, true);
607
608 this->addAdditionalRenderPass(false);
609 }
610
611 ////////////////////////////////////////////////////////////////////////////////
612
onEnd()613 void GrVkOpsRenderPass::onEnd() {
614 if (fCurrentSecondaryCommandBuffer && !this->wrapsSecondaryCommandBuffer()) {
615 fCurrentSecondaryCommandBuffer->end(fGpu);
616 }
617 }
618
onBindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)619 bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
620 if (!fCurrentRenderPass) {
621 SkASSERT(fGpu->isDeviceLost());
622 return false;
623 }
624
625 SkRect rtRect = SkRect::Make(fBounds);
626 if (rtRect.intersect(drawBounds)) {
627 rtRect.roundOut(&fCurrentPipelineBounds);
628 } else {
629 fCurrentPipelineBounds.setEmpty();
630 }
631
632 GrVkCommandBuffer* currentCB = this->currentCommandBuffer();
633 SkASSERT(fCurrentRenderPass);
634
635 VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass();
636 fCurrentPipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
637 fRenderTarget, programInfo, compatibleRenderPass, fOverridePipelinesForResolveLoad);
638 if (!fCurrentPipelineState) {
639 return false;
640 }
641
642 fCurrentPipelineState->bindPipeline(fGpu, currentCB);
643
644 // Both the 'programInfo' and this renderPass have an origin. Since they come from the
645 // same place (i.e., the target renderTargetProxy) they had best agree.
646 SkASSERT(programInfo.origin() == fOrigin);
647
648 auto colorAttachment = fFramebuffer->colorAttachment();
649 if (!fCurrentPipelineState->setAndBindUniforms(fGpu, colorAttachment->dimensions(), programInfo,
650 currentCB)) {
651 return false;
652 }
653
654 if (!programInfo.pipeline().isScissorTestEnabled()) {
655 // "Disable" scissor by setting it to the full pipeline bounds.
656 GrVkPipeline::SetDynamicScissorRectState(
657 fGpu, currentCB, colorAttachment->dimensions(), fOrigin,
658 fCurrentPipelineBounds);
659 }
660 GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, colorAttachment->dimensions());
661 GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB,
662 programInfo.pipeline().writeSwizzle(),
663 programInfo.pipeline().getXferProcessor());
664
665 return true;
666 }
667
onSetScissorRect(const SkIRect & scissor)668 void GrVkOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
669 SkIRect combinedScissorRect;
670 if (!combinedScissorRect.intersect(fCurrentPipelineBounds, scissor)) {
671 combinedScissorRect = SkIRect::MakeEmpty();
672 }
673 GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(),
674 fFramebuffer->colorAttachment()->dimensions(),
675 fOrigin, combinedScissorRect);
676 }
677
678 #ifdef SK_DEBUG
check_sampled_texture(GrTexture * tex,GrAttachment * colorAttachment,GrVkGpu * gpu)679 void check_sampled_texture(GrTexture* tex, GrAttachment* colorAttachment, GrVkGpu* gpu) {
680 SkASSERT(!tex->isProtected() || (colorAttachment->isProtected() && gpu->protectedContext()));
681 auto vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
682 SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
683 }
684 #endif
685
onBindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)686 bool GrVkOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
687 const GrSurfaceProxy* const geomProcTextures[],
688 const GrPipeline& pipeline) {
689 #ifdef SK_DEBUG
690 SkASSERT(fCurrentPipelineState);
691 auto colorAttachment = fFramebuffer->colorAttachment();
692 for (int i = 0; i < geomProc.numTextureSamplers(); ++i) {
693 check_sampled_texture(geomProcTextures[i]->peekTexture(), colorAttachment, fGpu);
694 }
695 pipeline.visitTextureEffects([&](const GrTextureEffect& te) {
696 check_sampled_texture(te.texture(), colorAttachment, fGpu);
697 });
698 if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
699 check_sampled_texture(dstTexture, colorAttachment, fGpu);
700 }
701 #endif
702 if (!fCurrentPipelineState->setAndBindTextures(fGpu, geomProc, pipeline, geomProcTextures,
703 this->currentCommandBuffer())) {
704 return false;
705 }
706 if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) {
707 // We bind the color attachment as an input attachment
708 auto ds = fFramebuffer->colorAttachment()->inputDescSetForBlending(fGpu);
709 if (!ds) {
710 return false;
711 }
712 return fCurrentPipelineState->setAndBindInputAttachment(fGpu, std::move(ds),
713 this->currentCommandBuffer());
714 }
715 return true;
716 }
717
onBindBuffers(sk_sp<const GrBuffer> indexBuffer,sk_sp<const GrBuffer> instanceBuffer,sk_sp<const GrBuffer> vertexBuffer,GrPrimitiveRestart primRestart)718 void GrVkOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
719 sk_sp<const GrBuffer> instanceBuffer,
720 sk_sp<const GrBuffer> vertexBuffer,
721 GrPrimitiveRestart primRestart) {
722 SkASSERT(GrPrimitiveRestart::kNo == primRestart);
723 if (!fCurrentRenderPass) {
724 SkASSERT(fGpu->isDeviceLost());
725 return;
726 }
727 SkASSERT(fCurrentPipelineState);
728 SkASSERT(!fGpu->caps()->usePrimitiveRestart()); // Ignore primitiveRestart parameter.
729
730 GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer();
731 SkASSERT(currCmdBuf);
732
733 // There is no need to put any memory barriers to make sure host writes have finished here.
734 // When a command buffer is submitted to a queue, there is an implicit memory barrier that
735 // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
736 // an active RenderPass.
737
738 // Here our vertex and instance inputs need to match the same 0-based bindings they were
739 // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
740 uint32_t binding = 0;
741 if (auto* gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer.get())) {
742 SkASSERT(!gpuVertexBuffer->isCpuBuffer());
743 SkASSERT(!gpuVertexBuffer->isMapped());
744 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(vertexBuffer));
745 }
746 if (auto* gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer.get())) {
747 SkASSERT(!gpuInstanceBuffer->isCpuBuffer());
748 SkASSERT(!gpuInstanceBuffer->isMapped());
749 currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(instanceBuffer));
750 }
751 if (auto* gpuIndexBuffer = static_cast<const GrGpuBuffer*>(indexBuffer.get())) {
752 SkASSERT(!gpuIndexBuffer->isCpuBuffer());
753 SkASSERT(!gpuIndexBuffer->isMapped());
754 currCmdBuf->bindIndexBuffer(fGpu, std::move(indexBuffer));
755 }
756 }
757
onDrawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)758 void GrVkOpsRenderPass::onDrawInstanced(int instanceCount,
759 int baseInstance,
760 int vertexCount, int baseVertex) {
761 if (!fCurrentRenderPass) {
762 SkASSERT(fGpu->isDeviceLost());
763 return;
764 }
765 SkASSERT(fCurrentPipelineState);
766 this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
767 fGpu->stats()->incNumDraws();
768 fCurrentCBIsEmpty = false;
769 }
770
onDrawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)771 void GrVkOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount,
772 int baseInstance, int baseVertex) {
773 if (!fCurrentRenderPass) {
774 SkASSERT(fGpu->isDeviceLost());
775 return;
776 }
777 SkASSERT(fCurrentPipelineState);
778 this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount,
779 baseIndex, baseVertex, baseInstance);
780 fGpu->stats()->incNumDraws();
781 fCurrentCBIsEmpty = false;
782 }
783
onDrawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)784 void GrVkOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
785 int drawCount) {
786 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
787 if (!fCurrentRenderPass) {
788 SkASSERT(fGpu->isDeviceLost());
789 return;
790 }
791 const GrVkCaps& caps = fGpu->vkCaps();
792 SkASSERT(caps.nativeDrawIndirectSupport());
793 SkASSERT(fCurrentPipelineState);
794
795 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
796 uint32_t remainingDraws = drawCount;
797 const size_t stride = sizeof(GrDrawIndirectCommand);
798 while (remainingDraws >= 1) {
799 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
800 this->currentCommandBuffer()->drawIndirect(
801 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
802 remainingDraws -= currDrawCount;
803 offset += stride * currDrawCount;
804 fGpu->stats()->incNumDraws();
805 }
806 fCurrentCBIsEmpty = false;
807 }
808
onDrawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)809 void GrVkOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset,
810 int drawCount) {
811 SkASSERT(!drawIndirectBuffer->isCpuBuffer());
812 if (!fCurrentRenderPass) {
813 SkASSERT(fGpu->isDeviceLost());
814 return;
815 }
816 const GrVkCaps& caps = fGpu->vkCaps();
817 SkASSERT(caps.nativeDrawIndirectSupport());
818 SkASSERT(fCurrentPipelineState);
819 const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount();
820 uint32_t remainingDraws = drawCount;
821 const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
822 while (remainingDraws >= 1) {
823 uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount);
824 this->currentCommandBuffer()->drawIndexedIndirect(
825 fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride);
826 remainingDraws -= currDrawCount;
827 offset += stride * currDrawCount;
828 fGpu->stats()->incNumDraws();
829 }
830 fCurrentCBIsEmpty = false;
831 }
832
833 ////////////////////////////////////////////////////////////////////////////////
834
onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)835 void GrVkOpsRenderPass::onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
836 if (!fCurrentRenderPass) {
837 SkASSERT(fGpu->isDeviceLost());
838 return;
839 }
840
841 VkRect2D bounds;
842 bounds.offset = { 0, 0 };
843 bounds.extent = { 0, 0 };
844
845 if (!fCurrentSecondaryCommandBuffer) {
846 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
847 this->addAdditionalRenderPass(true);
848 // We may have failed to start a new render pass
849 if (!fCurrentRenderPass) {
850 SkASSERT(fGpu->isDeviceLost());
851 return;
852 }
853 }
854 SkASSERT(fCurrentSecondaryCommandBuffer);
855
856 GrVkDrawableInfo vkInfo;
857 vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer();
858 vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass();
859 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
860 vkInfo.fFormat = fFramebuffer->colorAttachment()->imageFormat();
861 vkInfo.fDrawBounds = &bounds;
862 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
863 vkInfo.fImage = fFramebuffer->colorAttachment()->image();
864 #else
865 vkInfo.fImage = VK_NULL_HANDLE;
866 #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
867
868 GrBackendDrawableInfo info(vkInfo);
869
870 // After we draw into the command buffer via the drawable, cached state we have may be invalid.
871 this->currentCommandBuffer()->invalidateState();
872 // Also assume that the drawable produced output.
873 fCurrentCBIsEmpty = false;
874
875 drawable->draw(info);
876 fGpu->addDrawable(std::move(drawable));
877 }
878