1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkGpuCommandBuffer.h"
9
10 #include "include/core/SkDrawable.h"
11 #include "include/core/SkRect.h"
12 #include "include/gpu/GrBackendDrawableInfo.h"
13 #include "src/gpu/GrContextPriv.h"
14 #include "src/gpu/GrFixedClip.h"
15 #include "src/gpu/GrMesh.h"
16 #include "src/gpu/GrOpFlushState.h"
17 #include "src/gpu/GrPipeline.h"
18 #include "src/gpu/GrRenderTargetPriv.h"
19 #include "src/gpu/GrTexturePriv.h"
20 #include "src/gpu/vk/GrVkCommandBuffer.h"
21 #include "src/gpu/vk/GrVkCommandPool.h"
22 #include "src/gpu/vk/GrVkGpu.h"
23 #include "src/gpu/vk/GrVkPipeline.h"
24 #include "src/gpu/vk/GrVkRenderPass.h"
25 #include "src/gpu/vk/GrVkRenderTarget.h"
26 #include "src/gpu/vk/GrVkResourceProvider.h"
27 #include "src/gpu/vk/GrVkSemaphore.h"
28 #include "src/gpu/vk/GrVkTexture.h"
29
30 GrVkPrimaryCommandBufferTask::~GrVkPrimaryCommandBufferTask() = default;
31 GrVkPrimaryCommandBufferTask::GrVkPrimaryCommandBufferTask() = default;
32
33 namespace {
34
35 class InlineUpload : public GrVkPrimaryCommandBufferTask {
36 public:
InlineUpload(GrOpFlushState * state,const GrDeferredTextureUploadFn & upload)37 InlineUpload(GrOpFlushState* state, const GrDeferredTextureUploadFn& upload)
38 : fFlushState(state), fUpload(upload) {}
39
execute(const Args & args)40 void execute(const Args& args) override { fFlushState->doUpload(fUpload); }
41
42 private:
43 GrOpFlushState* fFlushState;
44 GrDeferredTextureUploadFn fUpload;
45 };
46
47 class Copy : public GrVkPrimaryCommandBufferTask {
48 public:
Copy(GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint,bool shouldDiscardDst)49 Copy(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint, bool shouldDiscardDst)
50 : fSrc(src)
51 , fSrcRect(srcRect)
52 , fDstPoint(dstPoint)
53 , fShouldDiscardDst(shouldDiscardDst) {}
54
execute(const Args & args)55 void execute(const Args& args) override {
56 args.fGpu->copySurface(args.fSurface, fSrc.get(), fSrcRect, fDstPoint, fShouldDiscardDst);
57 }
58
59 private:
60 using Src = GrPendingIOResource<GrSurface, kRead_GrIOType>;
61 Src fSrc;
62 SkIRect fSrcRect;
63 SkIPoint fDstPoint;
64 bool fShouldDiscardDst;
65 };
66
67 class TransferFrom : public GrVkPrimaryCommandBufferTask {
68 public:
TransferFrom(const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t offset)69 TransferFrom(const SkIRect& srcRect, GrColorType surfaceColorType, GrColorType bufferColorType,
70 GrGpuBuffer* transferBuffer, size_t offset)
71 : fTransferBuffer(sk_ref_sp(transferBuffer))
72 , fOffset(offset)
73 , fSrcRect(srcRect)
74 , fSurfaceColorType(surfaceColorType)
75 , fBufferColorType(bufferColorType) {}
76
execute(const Args & args)77 void execute(const Args& args) override {
78 args.fGpu->transferPixelsFrom(args.fSurface, fSrcRect.fLeft, fSrcRect.fTop,
79 fSrcRect.width(), fSrcRect.height(), fSurfaceColorType,
80 fBufferColorType, fTransferBuffer.get(), fOffset);
81 }
82
83 private:
84 sk_sp<GrGpuBuffer> fTransferBuffer;
85 size_t fOffset;
86 SkIRect fSrcRect;
87 GrColorType fSurfaceColorType;
88 GrColorType fBufferColorType;
89 };
90
91 } // anonymous namespace
92
93 /////////////////////////////////////////////////////////////////////////////
94
copy(GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)95 void GrVkGpuTextureCommandBuffer::copy(GrSurface* src, const SkIRect& srcRect,
96 const SkIPoint& dstPoint) {
97 SkASSERT(!src->isProtected() || (fTexture->isProtected() && fGpu->protectedContext()));
98 fTasks.emplace<Copy>(src, srcRect, dstPoint, false);
99 }
100
transferFrom(const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t offset)101 void GrVkGpuTextureCommandBuffer::transferFrom(const SkIRect& srcRect, GrColorType surfaceColorType,
102 GrColorType bufferColorType,
103 GrGpuBuffer* transferBuffer, size_t offset) {
104 fTasks.emplace<TransferFrom>(srcRect, surfaceColorType, bufferColorType, transferBuffer,
105 offset);
106 }
107
insertEventMarker(const char * msg)108 void GrVkGpuTextureCommandBuffer::insertEventMarker(const char* msg) {
109 // TODO: does Vulkan have a correlate?
110 }
111
submit()112 void GrVkGpuTextureCommandBuffer::submit() {
113 GrVkPrimaryCommandBufferTask::Args taskArgs{fGpu, fTexture};
114 for (auto& task : fTasks) {
115 task.execute(taskArgs);
116 }
117 }
118
119 ////////////////////////////////////////////////////////////////////////////////
120
get_vk_load_store_ops(GrLoadOp loadOpIn,GrStoreOp storeOpIn,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)121 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
122 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
123 switch (loadOpIn) {
124 case GrLoadOp::kLoad:
125 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
126 break;
127 case GrLoadOp::kClear:
128 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
129 break;
130 case GrLoadOp::kDiscard:
131 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
132 break;
133 default:
134 SK_ABORT("Invalid LoadOp");
135 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
136 }
137
138 switch (storeOpIn) {
139 case GrStoreOp::kStore:
140 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
141 break;
142 case GrStoreOp::kDiscard:
143 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
144 break;
145 default:
146 SK_ABORT("Invalid StoreOp");
147 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
148 }
149 }
150
GrVkGpuRTCommandBuffer(GrVkGpu * gpu)151 GrVkGpuRTCommandBuffer::GrVkGpuRTCommandBuffer(GrVkGpu* gpu) : fGpu(gpu) {}
152
init()153 void GrVkGpuRTCommandBuffer::init() {
154 GrVkRenderPass::LoadStoreOps vkColorOps(fVkColorLoadOp, fVkColorStoreOp);
155 GrVkRenderPass::LoadStoreOps vkStencilOps(fVkStencilLoadOp, fVkStencilStoreOp);
156
157 CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
158 SkASSERT(fCommandBufferInfos.count() == 1);
159 fCurrentCmdInfo = 0;
160
161 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
162 const GrVkResourceProvider::CompatibleRPHandle& rpHandle = vkRT->compatibleRenderPassHandle();
163 if (rpHandle.isValid()) {
164 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
165 vkColorOps,
166 vkStencilOps);
167 } else {
168 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
169 vkColorOps,
170 vkStencilOps);
171 }
172
173 cbInfo.fColorClearValue.color.float32[0] = fClearColor[0];
174 cbInfo.fColorClearValue.color.float32[1] = fClearColor[1];
175 cbInfo.fColorClearValue.color.float32[2] = fClearColor[2];
176 cbInfo.fColorClearValue.color.float32[3] = fClearColor[3];
177
178 if (VK_ATTACHMENT_LOAD_OP_CLEAR == fVkColorLoadOp) {
179 cbInfo.fBounds = SkRect::MakeWH(vkRT->width(), vkRT->height());
180 } else {
181 cbInfo.fBounds.setEmpty();
182 }
183
184 if (VK_ATTACHMENT_LOAD_OP_CLEAR == fVkColorLoadOp) {
185 cbInfo.fLoadStoreState = LoadStoreState::kStartsWithClear;
186 } else if (VK_ATTACHMENT_LOAD_OP_LOAD == fVkColorLoadOp &&
187 VK_ATTACHMENT_STORE_OP_STORE == fVkColorStoreOp) {
188 cbInfo.fLoadStoreState = LoadStoreState::kLoadAndStore;
189 } else if (VK_ATTACHMENT_LOAD_OP_DONT_CARE == fVkColorLoadOp) {
190 cbInfo.fLoadStoreState = LoadStoreState::kStartsWithDiscard;
191 }
192
193 cbInfo.fCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
194 cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
195 }
196
initWrapped()197 void GrVkGpuRTCommandBuffer::initWrapped() {
198 CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
199 SkASSERT(fCommandBufferInfos.count() == 1);
200 fCurrentCmdInfo = 0;
201
202 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
203 SkASSERT(vkRT->wrapsSecondaryCommandBuffer());
204 cbInfo.fRenderPass = vkRT->externalRenderPass();
205 cbInfo.fRenderPass->ref();
206
207 cbInfo.fBounds.setEmpty();
208 cbInfo.fCommandBuffer.reset(
209 GrVkSecondaryCommandBuffer::Create(vkRT->getExternalSecondaryCommandBuffer()));
210 cbInfo.currentCmdBuf()->begin(fGpu, nullptr, cbInfo.fRenderPass);
211 }
212
~GrVkGpuRTCommandBuffer()213 GrVkGpuRTCommandBuffer::~GrVkGpuRTCommandBuffer() {
214 this->reset();
215 }
216
gpu()217 GrGpu* GrVkGpuRTCommandBuffer::gpu() { return fGpu; }
218
end()219 void GrVkGpuRTCommandBuffer::end() {
220 if (fCurrentCmdInfo >= 0) {
221 fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
222 }
223 }
224
submit()225 void GrVkGpuRTCommandBuffer::submit() {
226 if (!fRenderTarget) {
227 return;
228 }
229
230 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
231 GrVkImage* targetImage = vkRT->msaaImage() ? vkRT->msaaImage() : vkRT;
232 GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getStencilAttachment();
233 auto currPreCmd = fPreCommandBufferTasks.begin();
234
235 GrVkPrimaryCommandBufferTask::Args taskArgs{fGpu, fRenderTarget};
236 for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
237 CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
238
239 for (int c = 0; c < cbInfo.fNumPreCmds; ++c, ++currPreCmd) {
240 currPreCmd->execute(taskArgs);
241 }
242
243 // TODO: Many things create a scratch texture which adds the discard immediately, but then
244 // don't draw to it right away. This causes the discard to be ignored and we get yelled at
245 // for loading uninitialized data. However, once MDB lands with reordering, the discard will
246 // get reordered with the rest of the draw commands and we can remove the discard check.
247 if (cbInfo.fIsEmpty &&
248 cbInfo.fLoadStoreState != LoadStoreState::kStartsWithClear &&
249 cbInfo.fLoadStoreState != LoadStoreState::kStartsWithDiscard) {
250 // We have sumbitted no actual draw commands to the command buffer and we are not using
251 // the render pass to do a clear so there is no need to submit anything.
252 continue;
253 }
254
255 // We don't want to actually submit the secondary command buffer if it is wrapped.
256 if (this->wrapsSecondaryCommandBuffer()) {
257 // If we have any sampled images set their layout now.
258 for (int j = 0; j < cbInfo.fSampledTextures.count(); ++j) {
259 cbInfo.fSampledTextures[j]->setImageLayout(
260 fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
261 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
262 }
263
264 // There should have only been one secondary command buffer in the wrapped case so it is
265 // safe to just return here.
266 SkASSERT(fCommandBufferInfos.count() == 1);
267 return;
268 }
269
270 // Make sure if we only have a discard load that we execute the discard on the whole image.
271 // TODO: Once we improve our tracking of discards so that we never end up flushing a discard
272 // call with no actually ops, remove this.
273 if (cbInfo.fIsEmpty && cbInfo.fLoadStoreState == LoadStoreState::kStartsWithDiscard) {
274 cbInfo.fBounds = SkRect::MakeWH(vkRT->width(), vkRT->height());
275 }
276
277 if (cbInfo.fBounds.intersect(0, 0,
278 SkIntToScalar(fRenderTarget->width()),
279 SkIntToScalar(fRenderTarget->height()))) {
280 // Make sure we do the following layout changes after all copies, uploads, or any other
281 // pre-work is done since we may change the layouts in the pre-work. Also since the
282 // draws will be submitted in different render passes, we need to guard againts write
283 // and write issues.
284
285 // Change layout of our render target so it can be used as the color attachment.
286 // TODO: If we know that we will never be blending or loading the attachment we could
287 // drop the VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
288 targetImage->setImageLayout(fGpu,
289 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
290 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
291 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
292 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
293 false);
294
295 // If we are using a stencil attachment we also need to update its layout
296 if (stencil) {
297 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
298 // We need the write and read access bits since we may load and store the stencil.
299 // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
300 // wait there.
301 vkStencil->setImageLayout(fGpu,
302 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
303 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
304 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
305 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
306 false);
307 }
308
309 // If we have any sampled images set their layout now.
310 for (int j = 0; j < cbInfo.fSampledTextures.count(); ++j) {
311 cbInfo.fSampledTextures[j]->setImageLayout(
312 fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
313 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
314 }
315
316 SkIRect iBounds;
317 cbInfo.fBounds.roundOut(&iBounds);
318
319 fGpu->submitSecondaryCommandBuffer(std::move(cbInfo.fCommandBuffer), cbInfo.fRenderPass,
320 &cbInfo.fColorClearValue, vkRT, fOrigin, iBounds);
321 }
322 }
323 SkASSERT(currPreCmd == fPreCommandBufferTasks.end());
324 }
325
set(GrRenderTarget * rt,GrSurfaceOrigin origin,const GrGpuRTCommandBuffer::LoadAndStoreInfo & colorInfo,const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo & stencilInfo)326 void GrVkGpuRTCommandBuffer::set(GrRenderTarget* rt, GrSurfaceOrigin origin,
327 const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
328 const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) {
329 SkASSERT(!fRenderTarget);
330 SkASSERT(fCommandBufferInfos.empty());
331 SkASSERT(-1 == fCurrentCmdInfo);
332 SkASSERT(fGpu == rt->getContext()->priv().getGpu());
333 SkASSERT(!fLastPipelineState);
334
335 #ifdef SK_DEBUG
336 fIsActive = true;
337 #endif
338
339 this->INHERITED::set(rt, origin);
340
341 if (this->wrapsSecondaryCommandBuffer()) {
342 this->initWrapped();
343 return;
344 }
345
346 fClearColor = colorInfo.fClearColor;
347
348 get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp,
349 &fVkColorLoadOp, &fVkColorStoreOp);
350
351 get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp,
352 &fVkStencilLoadOp, &fVkStencilStoreOp);
353
354 this->init();
355 }
356
reset()357 void GrVkGpuRTCommandBuffer::reset() {
358 for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
359 CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
360 if (cbInfo.fCommandBuffer) {
361 cbInfo.fCommandBuffer.release()->recycle(fGpu);
362 }
363 cbInfo.fRenderPass->unref(fGpu);
364 }
365 fCommandBufferInfos.reset();
366 fPreCommandBufferTasks.reset();
367
368 fCurrentCmdInfo = -1;
369
370 fLastPipelineState = nullptr;
371 fRenderTarget = nullptr;
372
373 #ifdef SK_DEBUG
374 fIsActive = false;
375 #endif
376 }
377
wrapsSecondaryCommandBuffer() const378 bool GrVkGpuRTCommandBuffer::wrapsSecondaryCommandBuffer() const {
379 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
380 return vkRT->wrapsSecondaryCommandBuffer();
381 }
382
383 ////////////////////////////////////////////////////////////////////////////////
384
insertEventMarker(const char * msg)385 void GrVkGpuRTCommandBuffer::insertEventMarker(const char* msg) {
386 // TODO: does Vulkan have a correlate?
387 }
388
onClearStencilClip(const GrFixedClip & clip,bool insideStencilMask)389 void GrVkGpuRTCommandBuffer::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
390 SkASSERT(!clip.hasWindowRectangles());
391
392 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
393
394 GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
395 // this should only be called internally when we know we have a
396 // stencil buffer.
397 SkASSERT(sb);
398 int stencilBitCount = sb->bits();
399
400 // The contract with the callers does not guarantee that we preserve all bits in the stencil
401 // during this clear. Thus we will clear the entire stencil to the desired value.
402
403 VkClearDepthStencilValue vkStencilColor;
404 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
405 if (insideStencilMask) {
406 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
407 } else {
408 vkStencilColor.stencil = 0;
409 }
410
411 VkClearRect clearRect;
412 // Flip rect if necessary
413 SkIRect vkRect;
414 if (!clip.scissorEnabled()) {
415 vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
416 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
417 vkRect = clip.scissorRect();
418 } else {
419 const SkIRect& scissor = clip.scissorRect();
420 vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
421 scissor.fRight, fRenderTarget->height() - scissor.fTop);
422 }
423
424 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
425 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
426
427 clearRect.baseArrayLayer = 0;
428 clearRect.layerCount = 1;
429
430 uint32_t stencilIndex;
431 SkAssertResult(cbInfo.fRenderPass->stencilAttachmentIndex(&stencilIndex));
432
433 VkClearAttachment attachment;
434 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
435 attachment.colorAttachment = 0; // this value shouldn't matter
436 attachment.clearValue.depthStencil = vkStencilColor;
437
438 cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
439 cbInfo.fIsEmpty = false;
440
441 // Update command buffer bounds
442 if (!clip.scissorEnabled()) {
443 cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
444 } else {
445 cbInfo.fBounds.join(SkRect::Make(clip.scissorRect()));
446 }
447 }
448
onClear(const GrFixedClip & clip,const SkPMColor4f & color)449 void GrVkGpuRTCommandBuffer::onClear(const GrFixedClip& clip, const SkPMColor4f& color) {
450 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
451
452 // parent class should never let us get here with no RT
453 SkASSERT(!clip.hasWindowRectangles());
454
455 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
456
457 VkClearColorValue vkColor = {{color.fR, color.fG, color.fB, color.fA}};
458
459 if (cbInfo.fIsEmpty && !clip.scissorEnabled()) {
460 // Change the render pass to do a clear load
461 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_CLEAR,
462 VK_ATTACHMENT_STORE_OP_STORE);
463 // Preserve the stencil buffer's load & store settings
464 GrVkRenderPass::LoadStoreOps vkStencilOps(fVkStencilLoadOp, fVkStencilStoreOp);
465
466 const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
467
468 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
469 vkRT->compatibleRenderPassHandle();
470 if (rpHandle.isValid()) {
471 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
472 vkColorOps,
473 vkStencilOps);
474 } else {
475 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
476 vkColorOps,
477 vkStencilOps);
478 }
479
480 SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
481 oldRP->unref(fGpu);
482
483 cbInfo.fColorClearValue.color = {{color.fR, color.fG, color.fB, color.fA}};
484 cbInfo.fLoadStoreState = LoadStoreState::kStartsWithClear;
485 // Update command buffer bounds
486 cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
487 return;
488 }
489
490 // We always do a sub rect clear with clearAttachments since we are inside a render pass
491 VkClearRect clearRect;
492 // Flip rect if necessary
493 SkIRect vkRect;
494 if (!clip.scissorEnabled()) {
495 vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
496 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
497 vkRect = clip.scissorRect();
498 } else {
499 const SkIRect& scissor = clip.scissorRect();
500 vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
501 scissor.fRight, fRenderTarget->height() - scissor.fTop);
502 }
503 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
504 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
505 clearRect.baseArrayLayer = 0;
506 clearRect.layerCount = 1;
507
508 uint32_t colorIndex;
509 SkAssertResult(cbInfo.fRenderPass->colorAttachmentIndex(&colorIndex));
510
511 VkClearAttachment attachment;
512 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
513 attachment.colorAttachment = colorIndex;
514 attachment.clearValue.color = vkColor;
515
516 cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
517 cbInfo.fIsEmpty = false;
518
519 // Update command buffer bounds
520 if (!clip.scissorEnabled()) {
521 cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
522 } else {
523 cbInfo.fBounds.join(SkRect::Make(clip.scissorRect()));
524 }
525 return;
526 }
527
528 ////////////////////////////////////////////////////////////////////////////////
529
addAdditionalRenderPass()530 void GrVkGpuRTCommandBuffer::addAdditionalRenderPass() {
531 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
532
533 fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
534
535 CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
536 fCurrentCmdInfo++;
537
538 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
539 VK_ATTACHMENT_STORE_OP_STORE);
540 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
541 VK_ATTACHMENT_STORE_OP_STORE);
542
543 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
544 vkRT->compatibleRenderPassHandle();
545 if (rpHandle.isValid()) {
546 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
547 vkColorOps,
548 vkStencilOps);
549 } else {
550 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
551 vkColorOps,
552 vkStencilOps);
553 }
554 cbInfo.fLoadStoreState = LoadStoreState::kLoadAndStore;
555
556 cbInfo.fCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
557 // It shouldn't matter what we set the clear color to here since we will assume loading of the
558 // attachment.
559 memset(&cbInfo.fColorClearValue, 0, sizeof(VkClearValue));
560 cbInfo.fBounds.setEmpty();
561
562 cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
563 }
564
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)565 void GrVkGpuRTCommandBuffer::inlineUpload(GrOpFlushState* state,
566 GrDeferredTextureUploadFn& upload) {
567 if (!fCommandBufferInfos[fCurrentCmdInfo].fIsEmpty) {
568 this->addAdditionalRenderPass();
569 }
570
571 fPreCommandBufferTasks.emplace<InlineUpload>(state, upload);
572 ++fCommandBufferInfos[fCurrentCmdInfo].fNumPreCmds;
573 }
574
copy(GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)575 void GrVkGpuRTCommandBuffer::copy(GrSurface* src, const SkIRect& srcRect,
576 const SkIPoint& dstPoint) {
577 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
578 if (!cbInfo.fIsEmpty || LoadStoreState::kStartsWithClear == cbInfo.fLoadStoreState) {
579 this->addAdditionalRenderPass();
580 }
581
582 fPreCommandBufferTasks.emplace<Copy>(
583 src, srcRect, dstPoint, LoadStoreState::kStartsWithDiscard == cbInfo.fLoadStoreState);
584 ++fCommandBufferInfos[fCurrentCmdInfo].fNumPreCmds;
585
586 if (LoadStoreState::kLoadAndStore != cbInfo.fLoadStoreState) {
587 // Change the render pass to do a load and store so we don't lose the results of our copy
588 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
589 VK_ATTACHMENT_STORE_OP_STORE);
590 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
591 VK_ATTACHMENT_STORE_OP_STORE);
592
593 const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
594
595 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
596 SkASSERT(!src->isProtected() || (fRenderTarget->isProtected() && fGpu->protectedContext()));
597 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
598 vkRT->compatibleRenderPassHandle();
599 if (rpHandle.isValid()) {
600 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
601 vkColorOps,
602 vkStencilOps);
603 } else {
604 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
605 vkColorOps,
606 vkStencilOps);
607 }
608 SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
609 oldRP->unref(fGpu);
610
611 cbInfo.fLoadStoreState = LoadStoreState::kLoadAndStore;
612
613 }
614 }
615
transferFrom(const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t offset)616 void GrVkGpuRTCommandBuffer::transferFrom(const SkIRect& srcRect, GrColorType surfaceColorType,
617 GrColorType bufferColorType, GrGpuBuffer* transferBuffer,
618 size_t offset) {
619 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
620 if (!cbInfo.fIsEmpty) {
621 this->addAdditionalRenderPass();
622 }
623 fPreCommandBufferTasks.emplace<TransferFrom>(srcRect, surfaceColorType, bufferColorType,
624 transferBuffer, offset);
625 ++fCommandBufferInfos[fCurrentCmdInfo].fNumPreCmds;
626 }
627
628 ////////////////////////////////////////////////////////////////////////////////
629
bindGeometry(const GrGpuBuffer * indexBuffer,const GrGpuBuffer * vertexBuffer,const GrGpuBuffer * instanceBuffer)630 void GrVkGpuRTCommandBuffer::bindGeometry(const GrGpuBuffer* indexBuffer,
631 const GrGpuBuffer* vertexBuffer,
632 const GrGpuBuffer* instanceBuffer) {
633 GrVkSecondaryCommandBuffer* currCmdBuf = fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf();
634 // There is no need to put any memory barriers to make sure host writes have finished here.
635 // When a command buffer is submitted to a queue, there is an implicit memory barrier that
636 // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
637 // an active RenderPass.
638
639 // Here our vertex and instance inputs need to match the same 0-based bindings they were
640 // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
641 uint32_t binding = 0;
642
643 if (vertexBuffer) {
644 SkASSERT(vertexBuffer);
645 SkASSERT(!vertexBuffer->isMapped());
646
647 currCmdBuf->bindInputBuffer(fGpu, binding++,
648 static_cast<const GrVkVertexBuffer*>(vertexBuffer));
649 }
650
651 if (instanceBuffer) {
652 SkASSERT(instanceBuffer);
653 SkASSERT(!instanceBuffer->isMapped());
654
655 currCmdBuf->bindInputBuffer(fGpu, binding++,
656 static_cast<const GrVkVertexBuffer*>(instanceBuffer));
657 }
658 if (indexBuffer) {
659 SkASSERT(indexBuffer);
660 SkASSERT(!indexBuffer->isMapped());
661
662 currCmdBuf->bindIndexBuffer(fGpu, static_cast<const GrVkIndexBuffer*>(indexBuffer));
663 }
664 }
665
prepareDrawState(const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrPipeline::FixedDynamicState * fixedDynamicState,const GrPipeline::DynamicStateArrays * dynamicStateArrays,GrPrimitiveType primitiveType)666 GrVkPipelineState* GrVkGpuRTCommandBuffer::prepareDrawState(
667 const GrPrimitiveProcessor& primProc,
668 const GrPipeline& pipeline,
669 const GrPipeline::FixedDynamicState* fixedDynamicState,
670 const GrPipeline::DynamicStateArrays* dynamicStateArrays,
671 GrPrimitiveType primitiveType) {
672 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
673 SkASSERT(cbInfo.fRenderPass);
674
675 VkRenderPass compatibleRenderPass = cbInfo.fRenderPass->vkRenderPass();
676
677 const GrTextureProxy* const* primProcProxies = nullptr;
678 if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) {
679 primProcProxies = dynamicStateArrays->fPrimitiveProcessorTextures;
680 } else if (fixedDynamicState) {
681 primProcProxies = fixedDynamicState->fPrimitiveProcessorTextures;
682 }
683
684 SkASSERT(SkToBool(primProcProxies) == SkToBool(primProc.numTextureSamplers()));
685
686 GrVkPipelineState* pipelineState =
687 fGpu->resourceProvider().findOrCreateCompatiblePipelineState(fRenderTarget, fOrigin,
688 pipeline,
689 primProc,
690 primProcProxies,
691 primitiveType,
692 compatibleRenderPass);
693 if (!pipelineState) {
694 return pipelineState;
695 }
696
697 fLastPipelineState = pipelineState;
698
699 pipelineState->bindPipeline(fGpu, cbInfo.currentCmdBuf());
700
701 pipelineState->setAndBindUniforms(fGpu, fRenderTarget, fOrigin,
702 primProc, pipeline, cbInfo.currentCmdBuf());
703
704 // Check whether we need to bind textures between each GrMesh. If not we can bind them all now.
705 bool setTextures = !(dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures);
706 if (setTextures) {
707 pipelineState->setAndBindTextures(fGpu, primProc, pipeline, primProcProxies,
708 cbInfo.currentCmdBuf());
709 }
710
711 if (!pipeline.isScissorEnabled()) {
712 GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(),
713 fRenderTarget, fOrigin,
714 SkIRect::MakeWH(fRenderTarget->width(),
715 fRenderTarget->height()));
716 } else if (!dynamicStateArrays || !dynamicStateArrays->fScissorRects) {
717 SkASSERT(fixedDynamicState);
718 GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(), fRenderTarget,
719 fOrigin,
720 fixedDynamicState->fScissorRect);
721 }
722 GrVkPipeline::SetDynamicViewportState(fGpu, cbInfo.currentCmdBuf(), fRenderTarget);
723 GrVkPipeline::SetDynamicBlendConstantState(fGpu, cbInfo.currentCmdBuf(),
724 pipeline.outputSwizzle(),
725 pipeline.getXferProcessor());
726
727 return pipelineState;
728 }
729
onDraw(const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrPipeline::FixedDynamicState * fixedDynamicState,const GrPipeline::DynamicStateArrays * dynamicStateArrays,const GrMesh meshes[],int meshCount,const SkRect & bounds)730 void GrVkGpuRTCommandBuffer::onDraw(const GrPrimitiveProcessor& primProc,
731 const GrPipeline& pipeline,
732 const GrPipeline::FixedDynamicState* fixedDynamicState,
733 const GrPipeline::DynamicStateArrays* dynamicStateArrays,
734 const GrMesh meshes[],
735 int meshCount,
736 const SkRect& bounds) {
737 if (!meshCount) {
738 return;
739 }
740
741 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
742
743 auto prepareSampledImage = [&](GrTexture* texture, GrSamplerState::Filter filter) {
744 GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture);
745 // We may need to resolve the texture first if it is also a render target
746 GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(vkTexture->asRenderTarget());
747 if (texRT && texRT->needsResolve()) {
748 fGpu->resolveRenderTargetNoFlush(texRT);
749 // TEMPORARY: MSAA resolve will have dirtied mipmaps. This goes away once we switch
750 // to resolving MSAA from the opList as well.
751 if (GrSamplerState::Filter::kMipMap == filter &&
752 (vkTexture->width() != 1 || vkTexture->height() != 1)) {
753 SkASSERT(vkTexture->texturePriv().mipMapped() == GrMipMapped::kYes);
754 SkASSERT(vkTexture->texturePriv().mipMapsAreDirty());
755 fGpu->regenerateMipMapLevels(vkTexture);
756 }
757 }
758
759 // Ensure mip maps were all resolved ahead of time by the opList.
760 if (GrSamplerState::Filter::kMipMap == filter &&
761 (vkTexture->width() != 1 || vkTexture->height() != 1)) {
762 SkASSERT(vkTexture->texturePriv().mipMapped() == GrMipMapped::kYes);
763 SkASSERT(!vkTexture->texturePriv().mipMapsAreDirty());
764 }
765 cbInfo.fSampledTextures.push_back(vkTexture);
766
767 SkASSERT(!texture->isProtected() ||
768 (fRenderTarget->isProtected() && fGpu->protectedContext()));
769 };
770
771 if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) {
772 for (int m = 0, i = 0; m < meshCount; ++m) {
773 for (int s = 0; s < primProc.numTextureSamplers(); ++s, ++i) {
774 auto texture = dynamicStateArrays->fPrimitiveProcessorTextures[i]->peekTexture();
775 prepareSampledImage(texture, primProc.textureSampler(s).samplerState().filter());
776 }
777 }
778 } else {
779 for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
780 auto texture = fixedDynamicState->fPrimitiveProcessorTextures[i]->peekTexture();
781 prepareSampledImage(texture, primProc.textureSampler(i).samplerState().filter());
782 }
783 }
784 GrFragmentProcessor::Iter iter(pipeline);
785 while (const GrFragmentProcessor* fp = iter.next()) {
786 for (int i = 0; i < fp->numTextureSamplers(); ++i) {
787 const GrFragmentProcessor::TextureSampler& sampler = fp->textureSampler(i);
788 prepareSampledImage(sampler.peekTexture(), sampler.samplerState().filter());
789 }
790 }
791 if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
792 cbInfo.fSampledTextures.push_back(sk_ref_sp(static_cast<GrVkTexture*>(dstTexture)));
793 }
794
795 GrPrimitiveType primitiveType = meshes[0].primitiveType();
796 GrVkPipelineState* pipelineState = this->prepareDrawState(primProc, pipeline, fixedDynamicState,
797 dynamicStateArrays, primitiveType);
798 if (!pipelineState) {
799 return;
800 }
801
802 bool dynamicScissor =
803 pipeline.isScissorEnabled() && dynamicStateArrays && dynamicStateArrays->fScissorRects;
804 bool dynamicTextures = dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures;
805
806 for (int i = 0; i < meshCount; ++i) {
807 const GrMesh& mesh = meshes[i];
808 if (mesh.primitiveType() != primitiveType) {
809 SkDEBUGCODE(pipelineState = nullptr);
810 primitiveType = mesh.primitiveType();
811 pipelineState = this->prepareDrawState(primProc, pipeline, fixedDynamicState,
812 dynamicStateArrays, primitiveType);
813 if (!pipelineState) {
814 return;
815 }
816 }
817
818 if (dynamicScissor) {
819 GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(), fRenderTarget,
820 fOrigin,
821 dynamicStateArrays->fScissorRects[i]);
822 }
823 if (dynamicTextures) {
824 GrTextureProxy* const* meshProxies = dynamicStateArrays->fPrimitiveProcessorTextures +
825 primProc.numTextureSamplers() * i;
826 pipelineState->setAndBindTextures(fGpu, primProc, pipeline, meshProxies,
827 cbInfo.currentCmdBuf());
828 }
829 SkASSERT(pipelineState);
830 mesh.sendToGpu(this);
831 }
832
833 cbInfo.fBounds.join(bounds);
834 cbInfo.fIsEmpty = false;
835 }
836
sendInstancedMeshToGpu(GrPrimitiveType,const GrBuffer * vertexBuffer,int vertexCount,int baseVertex,const GrBuffer * instanceBuffer,int instanceCount,int baseInstance)837 void GrVkGpuRTCommandBuffer::sendInstancedMeshToGpu(GrPrimitiveType,
838 const GrBuffer* vertexBuffer,
839 int vertexCount,
840 int baseVertex,
841 const GrBuffer* instanceBuffer,
842 int instanceCount,
843 int baseInstance) {
844 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
845 SkASSERT(!vertexBuffer || !vertexBuffer->isCpuBuffer());
846 SkASSERT(!instanceBuffer || !instanceBuffer->isCpuBuffer());
847 auto gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
848 auto gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer);
849 this->bindGeometry(nullptr, gpuVertexBuffer, gpuInstanceBuffer);
850 cbInfo.currentCmdBuf()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
851 fGpu->stats()->incNumDraws();
852 }
853
sendIndexedInstancedMeshToGpu(GrPrimitiveType,const GrBuffer * indexBuffer,int indexCount,int baseIndex,const GrBuffer * vertexBuffer,int baseVertex,const GrBuffer * instanceBuffer,int instanceCount,int baseInstance,GrPrimitiveRestart restart)854 void GrVkGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType,
855 const GrBuffer* indexBuffer,
856 int indexCount,
857 int baseIndex,
858 const GrBuffer* vertexBuffer,
859 int baseVertex,
860 const GrBuffer* instanceBuffer,
861 int instanceCount,
862 int baseInstance,
863 GrPrimitiveRestart restart) {
864 SkASSERT(restart == GrPrimitiveRestart::kNo);
865 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
866 SkASSERT(!vertexBuffer || !vertexBuffer->isCpuBuffer());
867 SkASSERT(!instanceBuffer || !instanceBuffer->isCpuBuffer());
868 SkASSERT(!indexBuffer->isCpuBuffer());
869 auto gpuIndexxBuffer = static_cast<const GrGpuBuffer*>(indexBuffer);
870 auto gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
871 auto gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer);
872 this->bindGeometry(gpuIndexxBuffer, gpuVertexBuffer, gpuInstanceBuffer);
873 cbInfo.currentCmdBuf()->drawIndexed(fGpu, indexCount, instanceCount,
874 baseIndex, baseVertex, baseInstance);
875 fGpu->stats()->incNumDraws();
876 }
877
878 ////////////////////////////////////////////////////////////////////////////////
879
executeDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)880 void GrVkGpuRTCommandBuffer::executeDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
881 GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(fRenderTarget);
882
883 GrVkImage* targetImage = target->msaaImage() ? target->msaaImage() : target;
884
885 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
886 VkRect2D bounds;
887 bounds.offset = { 0, 0 };
888 bounds.extent = { 0, 0 };
889
890 GrVkDrawableInfo vkInfo;
891 vkInfo.fSecondaryCommandBuffer = cbInfo.currentCmdBuf()->vkCommandBuffer();
892 vkInfo.fCompatibleRenderPass = cbInfo.fRenderPass->vkRenderPass();
893 SkAssertResult(cbInfo.fRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
894 vkInfo.fFormat = targetImage->imageFormat();
895 vkInfo.fDrawBounds = &bounds;
896 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
897 vkInfo.fImage = targetImage->image();
898 #else
899 vkInfo.fImage = VK_NULL_HANDLE;
900 #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
901
902 GrBackendDrawableInfo info(vkInfo);
903
904 // After we draw into the command buffer via the drawable, cached state we have may be invalid.
905 cbInfo.currentCmdBuf()->invalidateState();
906 // Also assume that the drawable produced output.
907 cbInfo.fIsEmpty = false;
908
909 drawable->draw(info);
910 fGpu->addDrawable(std::move(drawable));
911
912 if (bounds.extent.width == 0 || bounds.extent.height == 0) {
913 cbInfo.fBounds.join(target->getBoundsRect());
914 } else {
915 cbInfo.fBounds.join(SkRect::MakeXYWH(bounds.offset.x, bounds.offset.y,
916 bounds.extent.width, bounds.extent.height));
917 }
918 }
919
920