1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkOpsRenderPass.h"
9
10 #include "include/core/SkDrawable.h"
11 #include "include/core/SkRect.h"
12 #include "include/gpu/GrBackendDrawableInfo.h"
13 #include "src/gpu/GrContextPriv.h"
14 #include "src/gpu/GrFixedClip.h"
15 #include "src/gpu/GrMesh.h"
16 #include "src/gpu/GrOpFlushState.h"
17 #include "src/gpu/GrPipeline.h"
18 #include "src/gpu/GrRenderTargetPriv.h"
19 #include "src/gpu/vk/GrVkCommandBuffer.h"
20 #include "src/gpu/vk/GrVkCommandPool.h"
21 #include "src/gpu/vk/GrVkGpu.h"
22 #include "src/gpu/vk/GrVkPipeline.h"
23 #include "src/gpu/vk/GrVkRenderPass.h"
24 #include "src/gpu/vk/GrVkRenderTarget.h"
25 #include "src/gpu/vk/GrVkResourceProvider.h"
26 #include "src/gpu/vk/GrVkSemaphore.h"
27 #include "src/gpu/vk/GrVkTexture.h"
28
29 /////////////////////////////////////////////////////////////////////////////
30
get_vk_load_store_ops(GrLoadOp loadOpIn,GrStoreOp storeOpIn,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)31 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
32 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
33 switch (loadOpIn) {
34 case GrLoadOp::kLoad:
35 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
36 break;
37 case GrLoadOp::kClear:
38 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
39 break;
40 case GrLoadOp::kDiscard:
41 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
42 break;
43 default:
44 SK_ABORT("Invalid LoadOp");
45 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
46 }
47
48 switch (storeOpIn) {
49 case GrStoreOp::kStore:
50 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
51 break;
52 case GrStoreOp::kDiscard:
53 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
54 break;
55 default:
56 SK_ABORT("Invalid StoreOp");
57 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
58 }
59 }
60
GrVkOpsRenderPass(GrVkGpu * gpu)61 GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {}
62
init(const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkPMColor4f & clearColor)63 bool GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
64 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
65 const SkPMColor4f& clearColor) {
66
67 VkAttachmentLoadOp loadOp;
68 VkAttachmentStoreOp storeOp;
69 get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp,
70 &loadOp, &storeOp);
71 GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp);
72
73 get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp,
74 &loadOp, &storeOp);
75 GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp);
76
77 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
78 GrVkImage* targetImage = vkRT->msaaImage() ? vkRT->msaaImage() : vkRT;
79
80 // Change layout of our render target so it can be used as the color attachment.
81 // TODO: If we know that we will never be blending or loading the attachment we could drop the
82 // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
83 targetImage->setImageLayout(fGpu,
84 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
85 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
86 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
87 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
88 false);
89
90 // If we are using a stencil attachment we also need to update its layout
91 if (GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getStencilAttachment()) {
92 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
93 // We need the write and read access bits since we may load and store the stencil.
94 // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
95 // wait there.
96 vkStencil->setImageLayout(fGpu,
97 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
98 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
99 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
100 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
101 false);
102 }
103
104 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
105 vkRT->compatibleRenderPassHandle();
106 if (rpHandle.isValid()) {
107 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
108 vkColorOps,
109 vkStencilOps);
110 } else {
111 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(vkRT,
112 vkColorOps,
113 vkStencilOps);
114 }
115 if (!fCurrentRenderPass) {
116 return false;
117 }
118
119 VkClearValue vkClearColor;
120 vkClearColor.color.float32[0] = clearColor[0];
121 vkClearColor.color.float32[1] = clearColor[1];
122 vkClearColor.color.float32[2] = clearColor[2];
123 vkClearColor.color.float32[3] = clearColor[3];
124
125 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) {
126 SkASSERT(fGpu->cmdPool());
127 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
128 if (!fCurrentSecondaryCommandBuffer) {
129 fCurrentRenderPass = nullptr;
130 return false;
131 }
132 fCurrentSecondaryCommandBuffer->begin(fGpu, vkRT->getFramebuffer(), fCurrentRenderPass);
133 }
134
135 if (!fGpu->beginRenderPass(fCurrentRenderPass, &vkClearColor, vkRT, fOrigin, fBounds,
136 SkToBool(fCurrentSecondaryCommandBuffer))) {
137 if (fCurrentSecondaryCommandBuffer) {
138 fCurrentSecondaryCommandBuffer->end(fGpu);
139 }
140 fCurrentRenderPass = nullptr;
141 return false;
142 }
143 return true;
144 }
145
initWrapped()146 bool GrVkOpsRenderPass::initWrapped() {
147 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
148 SkASSERT(vkRT->wrapsSecondaryCommandBuffer());
149 fCurrentRenderPass = vkRT->externalRenderPass();
150 SkASSERT(fCurrentRenderPass);
151 fCurrentRenderPass->ref();
152
153 fCurrentSecondaryCommandBuffer.reset(
154 GrVkSecondaryCommandBuffer::Create(vkRT->getExternalSecondaryCommandBuffer()));
155 if (!fCurrentSecondaryCommandBuffer) {
156 return false;
157 }
158 fCurrentSecondaryCommandBuffer->begin(fGpu, nullptr, fCurrentRenderPass);
159 return true;
160 }
161
~GrVkOpsRenderPass()162 GrVkOpsRenderPass::~GrVkOpsRenderPass() {
163 this->reset();
164 }
165
gpu()166 GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; }
167
currentCommandBuffer()168 GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
169 if (fCurrentSecondaryCommandBuffer) {
170 return fCurrentSecondaryCommandBuffer.get();
171 }
172 return fGpu->currentCommandBuffer();
173 }
174
end()175 void GrVkOpsRenderPass::end() {
176 if (fCurrentSecondaryCommandBuffer) {
177 fCurrentSecondaryCommandBuffer->end(fGpu);
178 }
179 }
180
submit()181 void GrVkOpsRenderPass::submit() {
182 if (!fRenderTarget) {
183 return;
184 }
185 if (!fCurrentRenderPass) {
186 SkASSERT(fGpu->isDeviceLost());
187 return;
188 }
189
190 // We don't want to actually submit the secondary command buffer if it is wrapped.
191 if (this->wrapsSecondaryCommandBuffer()) {
192 // We pass the ownership of the GrVkSecondaryCommandBuffer to the special wrapped
193 // GrVkRenderTarget since it's lifetime matches the lifetime we need to keep the
194 // GrVkResources on the GrVkSecondaryCommandBuffer alive.
195 static_cast<GrVkRenderTarget*>(fRenderTarget)->addWrappedGrSecondaryCommandBuffer(
196 std::move(fCurrentSecondaryCommandBuffer));
197 return;
198 }
199
200 if (fCurrentSecondaryCommandBuffer) {
201 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
202 }
203 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
204 }
205
set(GrRenderTarget * rt,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies)206 bool GrVkOpsRenderPass::set(GrRenderTarget* rt, GrSurfaceOrigin origin, const SkIRect& bounds,
207 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
208 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
209 const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
210 SkASSERT(!fRenderTarget);
211 SkASSERT(fGpu == rt->getContext()->priv().getGpu());
212
213 #ifdef SK_DEBUG
214 fIsActive = true;
215 #endif
216
217 this->INHERITED::set(rt, origin);
218
219 for (int i = 0; i < sampledProxies.count(); ++i) {
220 if (sampledProxies[i]->isInstantiated()) {
221 SkASSERT(sampledProxies[i]->asTextureProxy());
222 GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
223 SkASSERT(vkTex);
224 vkTex->setImageLayout(
225 fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
226 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
227 }
228 }
229
230 SkASSERT(bounds.isEmpty() || SkIRect::MakeWH(rt->width(), rt->height()).contains(bounds));
231 fBounds = bounds;
232
233 if (this->wrapsSecondaryCommandBuffer()) {
234 return this->initWrapped();
235 }
236
237 return this->init(colorInfo, stencilInfo, colorInfo.fClearColor);
238 }
239
reset()240 void GrVkOpsRenderPass::reset() {
241 if (fCurrentSecondaryCommandBuffer) {
242 // The active GrVkCommandPool on the GrVkGpu should still be the same pool we got the
243 // secondary command buffer from since we haven't submitted any work yet.
244 SkASSERT(fGpu->cmdPool());
245 fCurrentSecondaryCommandBuffer.release()->recycle(fGpu->cmdPool());
246 }
247 if (fCurrentRenderPass) {
248 fCurrentRenderPass->unref(fGpu);
249 fCurrentRenderPass = nullptr;
250 }
251 fCurrentCBIsEmpty = true;
252
253 fRenderTarget = nullptr;
254
255 #ifdef SK_DEBUG
256 fIsActive = false;
257 #endif
258 }
259
wrapsSecondaryCommandBuffer() const260 bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const {
261 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
262 return vkRT->wrapsSecondaryCommandBuffer();
263 }
264
265 ////////////////////////////////////////////////////////////////////////////////
266
onClearStencilClip(const GrFixedClip & clip,bool insideStencilMask)267 void GrVkOpsRenderPass::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
268 if (!fCurrentRenderPass) {
269 SkASSERT(fGpu->isDeviceLost());
270 return;
271 }
272
273 SkASSERT(!clip.hasWindowRectangles());
274
275 GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
276 // this should only be called internally when we know we have a
277 // stencil buffer.
278 SkASSERT(sb);
279 int stencilBitCount = sb->bits();
280
281 // The contract with the callers does not guarantee that we preserve all bits in the stencil
282 // during this clear. Thus we will clear the entire stencil to the desired value.
283
284 VkClearDepthStencilValue vkStencilColor;
285 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
286 if (insideStencilMask) {
287 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
288 } else {
289 vkStencilColor.stencil = 0;
290 }
291
292 VkClearRect clearRect;
293 // Flip rect if necessary
294 SkIRect vkRect;
295 if (!clip.scissorEnabled()) {
296 vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
297 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
298 vkRect = clip.scissorRect();
299 } else {
300 const SkIRect& scissor = clip.scissorRect();
301 vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
302 scissor.fRight, fRenderTarget->height() - scissor.fTop);
303 }
304
305 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
306 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
307
308 clearRect.baseArrayLayer = 0;
309 clearRect.layerCount = 1;
310
311 uint32_t stencilIndex;
312 SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
313
314 VkClearAttachment attachment;
315 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
316 attachment.colorAttachment = 0; // this value shouldn't matter
317 attachment.clearValue.depthStencil = vkStencilColor;
318
319 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
320 fCurrentCBIsEmpty = false;
321 }
322
onClear(const GrFixedClip & clip,const SkPMColor4f & color)323 void GrVkOpsRenderPass::onClear(const GrFixedClip& clip, const SkPMColor4f& color) {
324 if (!fCurrentRenderPass) {
325 SkASSERT(fGpu->isDeviceLost());
326 return;
327 }
328
329 // parent class should never let us get here with no RT
330 SkASSERT(!clip.hasWindowRectangles());
331
332 VkClearColorValue vkColor = {{color.fR, color.fG, color.fB, color.fA}};
333
334 // If we end up in a situation where we are calling clear without a scissior then in general it
335 // means we missed an opportunity higher up the stack to set the load op to be a clear. However,
336 // there are situations where higher up we couldn't discard the previous ops and set a clear
337 // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
338 // TODO: Make the waitOp a RenderTask instead so we can clear out the GrOpsTask for a clear. We
339 // can then reenable this assert assuming we can't get messed up by a waitOp.
340 //SkASSERT(!fCurrentCBIsEmpty || clip.scissorEnabled());
341
342 // We always do a sub rect clear with clearAttachments since we are inside a render pass
343 VkClearRect clearRect;
344 // Flip rect if necessary
345 SkIRect vkRect;
346 if (!clip.scissorEnabled()) {
347 vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
348 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
349 vkRect = clip.scissorRect();
350 } else {
351 const SkIRect& scissor = clip.scissorRect();
352 vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
353 scissor.fRight, fRenderTarget->height() - scissor.fTop);
354 }
355 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
356 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
357 clearRect.baseArrayLayer = 0;
358 clearRect.layerCount = 1;
359
360 uint32_t colorIndex;
361 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex));
362
363 VkClearAttachment attachment;
364 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
365 attachment.colorAttachment = colorIndex;
366 attachment.clearValue.color = vkColor;
367
368 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
369 fCurrentCBIsEmpty = false;
370 return;
371 }
372
373 ////////////////////////////////////////////////////////////////////////////////
374
addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer)375 void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) {
376 SkASSERT(!this->wrapsSecondaryCommandBuffer());
377 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
378
379 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
380 VK_ATTACHMENT_STORE_OP_STORE);
381 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
382 VK_ATTACHMENT_STORE_OP_STORE);
383
384 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
385 vkRT->compatibleRenderPassHandle();
386 SkASSERT(fCurrentRenderPass);
387 fCurrentRenderPass->unref(fGpu);
388 if (rpHandle.isValid()) {
389 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
390 vkColorOps,
391 vkStencilOps);
392 } else {
393 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(vkRT,
394 vkColorOps,
395 vkStencilOps);
396 }
397 if (!fCurrentRenderPass) {
398 return;
399 }
400
401 VkClearValue vkClearColor;
402 memset(&vkClearColor, 0, sizeof(VkClearValue));
403
404 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() ||
405 mustUseSecondaryCommandBuffer) {
406 SkASSERT(fGpu->cmdPool());
407 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
408 if (!fCurrentSecondaryCommandBuffer) {
409 fCurrentRenderPass = nullptr;
410 return;
411 }
412 fCurrentSecondaryCommandBuffer->begin(fGpu, vkRT->getFramebuffer(), fCurrentRenderPass);
413 }
414
415 // We use the same fBounds as the whole GrVkOpsRenderPass since we have no way of tracking the
416 // bounds in GrOpsTask for parts before and after inline uploads separately.
417 if (!fGpu->beginRenderPass(fCurrentRenderPass, &vkClearColor, vkRT, fOrigin, fBounds,
418 SkToBool(fCurrentSecondaryCommandBuffer))) {
419 if (fCurrentSecondaryCommandBuffer) {
420 fCurrentSecondaryCommandBuffer->end(fGpu);
421 }
422 fCurrentRenderPass = nullptr;
423 }
424 }
425
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)426 void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
427 if (!fCurrentRenderPass) {
428 SkASSERT(fGpu->isDeviceLost());
429 return;
430 }
431 if (fCurrentSecondaryCommandBuffer) {
432 fCurrentSecondaryCommandBuffer->end(fGpu);
433 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
434 }
435 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
436
437 // We pass in true here to signal that after the upload we need to set the upload textures
438 // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL.
439 state->doUpload(upload, true);
440
441 this->addAdditionalRenderPass(false);
442 }
443
444 ////////////////////////////////////////////////////////////////////////////////
445
bindGeometry(const GrGpuBuffer * indexBuffer,const GrGpuBuffer * vertexBuffer,const GrGpuBuffer * instanceBuffer)446 void GrVkOpsRenderPass::bindGeometry(const GrGpuBuffer* indexBuffer,
447 const GrGpuBuffer* vertexBuffer,
448 const GrGpuBuffer* instanceBuffer) {
449 GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer();
450 // There is no need to put any memory barriers to make sure host writes have finished here.
451 // When a command buffer is submitted to a queue, there is an implicit memory barrier that
452 // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
453 // an active RenderPass.
454
455 // Here our vertex and instance inputs need to match the same 0-based bindings they were
456 // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
457 uint32_t binding = 0;
458
459 if (vertexBuffer) {
460 SkASSERT(vertexBuffer);
461 SkASSERT(!vertexBuffer->isMapped());
462
463 currCmdBuf->bindInputBuffer(fGpu, binding++,
464 static_cast<const GrVkVertexBuffer*>(vertexBuffer));
465 }
466
467 if (instanceBuffer) {
468 SkASSERT(instanceBuffer);
469 SkASSERT(!instanceBuffer->isMapped());
470
471 currCmdBuf->bindInputBuffer(fGpu, binding++,
472 static_cast<const GrVkVertexBuffer*>(instanceBuffer));
473 }
474 if (indexBuffer) {
475 SkASSERT(indexBuffer);
476 SkASSERT(!indexBuffer->isMapped());
477
478 currCmdBuf->bindIndexBuffer(fGpu, static_cast<const GrVkIndexBuffer*>(indexBuffer));
479 }
480 }
481
482 #ifdef SK_DEBUG
check_sampled_texture(GrTexture * tex,GrRenderTarget * rt,GrVkGpu * gpu)483 void check_sampled_texture(GrTexture* tex, GrRenderTarget* rt, GrVkGpu* gpu) {
484 SkASSERT(!tex->isProtected() || (rt->isProtected() && gpu->protectedContext()));
485 GrVkTexture* vkTex = static_cast<GrVkTexture*>(tex);
486 SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
487 }
488
check_sampled_textures(const GrProgramInfo & programInfo,GrRenderTarget * rt,GrVkGpu * gpu)489 void check_sampled_textures(const GrProgramInfo& programInfo, GrRenderTarget* rt, GrVkGpu* gpu) {
490 if (programInfo.hasDynamicPrimProcTextures()) {
491 for (int m = 0; m < programInfo.numDynamicStateArrays(); ++m) {
492 auto dynamicPrimProcTextures = programInfo.dynamicPrimProcTextures(m);
493
494 for (int s = 0; s < programInfo.primProc().numTextureSamplers(); ++s) {
495 auto texture = dynamicPrimProcTextures[s]->peekTexture();
496 check_sampled_texture(texture, rt, gpu);
497 }
498 }
499 } else if (programInfo.hasFixedPrimProcTextures()) {
500 auto fixedPrimProcTextures = programInfo.fixedPrimProcTextures();
501
502 for (int s = 0; s < programInfo.primProc().numTextureSamplers(); ++s) {
503 auto texture = fixedPrimProcTextures[s]->peekTexture();
504 check_sampled_texture(texture, rt, gpu);
505 }
506 }
507
508 GrFragmentProcessor::PipelineTextureSamplerRange textureSamplerRange(programInfo.pipeline());
509 for (auto [sampler, fp] : textureSamplerRange) {
510 check_sampled_texture(sampler.peekTexture(), rt, gpu);
511 }
512 if (GrTexture* dstTexture = programInfo.pipeline().peekDstTexture()) {
513 check_sampled_texture(dstTexture, rt, gpu);
514 }
515 }
516 #endif
517
onBindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)518 bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
519 if (!fCurrentRenderPass) {
520 SkASSERT(fGpu->isDeviceLost());
521 return false;
522 }
523
524 #ifdef SK_DEBUG
525 check_sampled_textures(programInfo, fRenderTarget, fGpu);
526
527 // Both the 'programInfo' and this renderPass have an origin. Since they come from the
528 // same place (i.e., the target renderTargetProxy) they had best agree.
529 SkASSERT(programInfo.origin() == fOrigin);
530 #endif
531
532 SkRect rtRect = SkRect::Make(fBounds);
533 if (rtRect.intersect(drawBounds)) {
534 rtRect.roundOut(&fCurrentPipelineBounds);
535 } else {
536 fCurrentPipelineBounds.setEmpty();
537 }
538
539 GrVkCommandBuffer* currentCB = this->currentCommandBuffer();
540 SkASSERT(fCurrentRenderPass);
541
542 VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass();
543
544 fCurrentPipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
545 fRenderTarget, programInfo, compatibleRenderPass);
546 if (!fCurrentPipelineState) {
547 return false;
548 }
549
550 fCurrentPipelineState->bindPipeline(fGpu, currentCB);
551
552 // Both the 'programInfo' and this renderPass have an origin. Since they come from the
553 // same place (i.e., the target renderTargetProxy) they had best agree.
554 SkASSERT(programInfo.origin() == fOrigin);
555
556 if (!fCurrentPipelineState->setAndBindUniforms(fGpu, fRenderTarget, programInfo, currentCB)) {
557 return false;
558 }
559
560 // Check whether we need to bind textures between each GrMesh. If not we can bind them all now.
561 if (!programInfo.hasDynamicPrimProcTextures()) {
562 auto proxies = programInfo.hasFixedPrimProcTextures() ? programInfo.fixedPrimProcTextures()
563 : nullptr;
564 if (!fCurrentPipelineState->setAndBindTextures(
565 fGpu, programInfo.primProc(), programInfo.pipeline(), proxies, currentCB)) {
566 return false;
567 }
568 }
569
570 if (!programInfo.pipeline().isScissorEnabled()) {
571 GrVkPipeline::SetDynamicScissorRectState(fGpu, currentCB, fRenderTarget, fOrigin,
572 fCurrentPipelineBounds);
573 } else if (!programInfo.hasDynamicScissors()) {
574 SkASSERT(programInfo.hasFixedScissor());
575
576 SkIRect combinedScissorRect;
577 if (!combinedScissorRect.intersect(fCurrentPipelineBounds, programInfo.fixedScissor())) {
578 combinedScissorRect = SkIRect::MakeEmpty();
579 }
580 GrVkPipeline::SetDynamicScissorRectState(fGpu, currentCB, fRenderTarget, fOrigin,
581 combinedScissorRect);
582 }
583 GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, fRenderTarget);
584 GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB,
585 programInfo.pipeline().outputSwizzle(),
586 programInfo.pipeline().getXferProcessor());
587
588 return true;
589 }
590
onDrawMeshes(const GrProgramInfo & programInfo,const GrMesh meshes[],int meshCount)591 void GrVkOpsRenderPass::onDrawMeshes(const GrProgramInfo& programInfo, const GrMesh meshes[],
592 int meshCount) {
593 if (!fCurrentRenderPass) {
594 SkASSERT(fGpu->isDeviceLost());
595 return;
596 }
597
598 SkASSERT(fCurrentPipelineState);
599 SkASSERT(meshCount); // guaranteed by GrOpsRenderPass::draw
600
601 for (int i = 0; i < meshCount; ++i) {
602 const GrMesh& mesh = meshes[i];
603
604 if (programInfo.hasDynamicScissors()) {
605 SkIRect combinedScissorRect;
606 if (!combinedScissorRect.intersect(fCurrentPipelineBounds,
607 programInfo.dynamicScissor(i))) {
608 combinedScissorRect = SkIRect::MakeEmpty();
609 }
610 GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(),
611 fRenderTarget, fOrigin, combinedScissorRect);
612 }
613 if (programInfo.hasDynamicPrimProcTextures()) {
614 auto meshProxies = programInfo.dynamicPrimProcTextures(i);
615 if (!fCurrentPipelineState->setAndBindTextures(fGpu, programInfo.primProc(),
616 programInfo.pipeline(), meshProxies,
617 this->currentCommandBuffer())) {
618 if (fGpu->isDeviceLost()) {
619 return;
620 } else {
621 continue;
622 }
623 }
624 }
625 mesh.sendToGpu(programInfo.primitiveType(), this);
626 }
627
628 fCurrentCBIsEmpty = false;
629 }
630
sendInstancedMeshToGpu(GrPrimitiveType,const GrMesh & mesh,int vertexCount,int baseVertex,int instanceCount,int baseInstance)631 void GrVkOpsRenderPass::sendInstancedMeshToGpu(GrPrimitiveType, const GrMesh& mesh, int vertexCount,
632 int baseVertex, int instanceCount,
633 int baseInstance)
634 {
635 SkASSERT(!mesh.vertexBuffer() || !mesh.vertexBuffer()->isCpuBuffer());
636 SkASSERT(!mesh.instanceBuffer() || !mesh.instanceBuffer()->isCpuBuffer());
637 auto gpuVertexBuffer = static_cast<const GrGpuBuffer*>(mesh.vertexBuffer());
638 auto gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(mesh.instanceBuffer());
639 this->bindGeometry(nullptr, gpuVertexBuffer, gpuInstanceBuffer);
640 this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
641 fGpu->stats()->incNumDraws();
642 }
643
sendIndexedInstancedMeshToGpu(GrPrimitiveType,const GrMesh & mesh,int indexCount,int baseIndex,int baseVertex,int instanceCount,int baseInstance)644 void GrVkOpsRenderPass::sendIndexedInstancedMeshToGpu(GrPrimitiveType, const GrMesh& mesh,
645 int indexCount, int baseIndex, int baseVertex,
646 int instanceCount, int baseInstance) {
647 SkASSERT(mesh.primitiveRestart() == GrPrimitiveRestart::kNo);
648 SkASSERT(!mesh.vertexBuffer() || !mesh.vertexBuffer()->isCpuBuffer());
649 SkASSERT(!mesh.instanceBuffer() || !mesh.instanceBuffer()->isCpuBuffer());
650 SkASSERT(!mesh.indexBuffer()->isCpuBuffer());
651 auto gpuIndexxBuffer = static_cast<const GrGpuBuffer*>(mesh.indexBuffer());
652 auto gpuVertexBuffer = static_cast<const GrGpuBuffer*>(mesh.vertexBuffer());
653 auto gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(mesh.instanceBuffer());
654 this->bindGeometry(gpuIndexxBuffer, gpuVertexBuffer, gpuInstanceBuffer);
655 this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount,
656 baseIndex, baseVertex, baseInstance);
657 fGpu->stats()->incNumDraws();
658 }
659
660 ////////////////////////////////////////////////////////////////////////////////
661
onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)662 void GrVkOpsRenderPass::onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
663 if (!fCurrentRenderPass) {
664 SkASSERT(fGpu->isDeviceLost());
665 return;
666 }
667 GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(fRenderTarget);
668
669 GrVkImage* targetImage = target->msaaImage() ? target->msaaImage() : target;
670
671 VkRect2D bounds;
672 bounds.offset = { 0, 0 };
673 bounds.extent = { 0, 0 };
674
675 if (!fCurrentSecondaryCommandBuffer) {
676 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
677 this->addAdditionalRenderPass(true);
678 // We may have failed to start a new render pass
679 if (!fCurrentRenderPass) {
680 SkASSERT(fGpu->isDeviceLost());
681 return;
682 }
683 }
684 SkASSERT(fCurrentSecondaryCommandBuffer);
685
686 GrVkDrawableInfo vkInfo;
687 vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer();
688 vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass();
689 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
690 vkInfo.fFormat = targetImage->imageFormat();
691 vkInfo.fDrawBounds = &bounds;
692 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
693 vkInfo.fImage = targetImage->image();
694 #else
695 vkInfo.fImage = VK_NULL_HANDLE;
696 #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
697
698 GrBackendDrawableInfo info(vkInfo);
699
700 // After we draw into the command buffer via the drawable, cached state we have may be invalid.
701 this->currentCommandBuffer()->invalidateState();
702 // Also assume that the drawable produced output.
703 fCurrentCBIsEmpty = false;
704
705 drawable->draw(info);
706 fGpu->addDrawable(std::move(drawable));
707 }
708
709