• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
9 
10 #include "include/core/SkRect.h"
11 #include "src/core/SkTraceEvent.h"
12 #include "src/gpu/ganesh/vk/GrVkBuffer.h"
13 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
14 #include "src/gpu/ganesh/vk/GrVkFramebuffer.h"
15 #include "src/gpu/ganesh/vk/GrVkGpu.h"
16 #include "src/gpu/ganesh/vk/GrVkImage.h"
17 #include "src/gpu/ganesh/vk/GrVkImageView.h"
18 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
19 #include "src/gpu/ganesh/vk/GrVkPipelineState.h"
20 #include "src/gpu/ganesh/vk/GrVkRenderPass.h"
21 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
22 #include "src/gpu/ganesh/vk/GrVkUtil.h"
23 
invalidateState()24 void GrVkCommandBuffer::invalidateState() {
25     for (auto& boundInputBuffer : fBoundInputBuffers) {
26         boundInputBuffer = VK_NULL_HANDLE;
27     }
28     fBoundIndexBuffer = VK_NULL_HANDLE;
29 
30     memset(&fCachedViewport, 0, sizeof(VkViewport));
31     fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
32 
33     memset(&fCachedScissor, 0, sizeof(VkRect2D));
34     fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
35 
36     for (int i = 0; i < 4; ++i) {
37         fCachedBlendConstant[i] = -1.0;
38     }
39 }
40 
freeGPUData(const GrGpu * gpu,VkCommandPool cmdPool) const41 void GrVkCommandBuffer::freeGPUData(const GrGpu* gpu, VkCommandPool cmdPool) const {
42     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
43     SkASSERT(!fIsActive);
44     SkASSERT(fTrackedResources.empty());
45     SkASSERT(fTrackedRecycledResources.empty());
46     SkASSERT(fTrackedGpuBuffers.empty());
47     SkASSERT(fTrackedGpuSurfaces.empty());
48     SkASSERT(cmdPool != VK_NULL_HANDLE);
49     SkASSERT(!this->isWrapped());
50 
51     GrVkGpu* vkGpu = (GrVkGpu*)gpu;
52     GR_VK_CALL(vkGpu->vkInterface(), FreeCommandBuffers(vkGpu->device(), cmdPool, 1, &fCmdBuffer));
53 
54     this->onFreeGPUData(vkGpu);
55 }
56 
releaseResources()57 void GrVkCommandBuffer::releaseResources() {
58     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
59     SkASSERT(!fIsActive || this->isWrapped());
60     fTrackedResources.clear();
61     fTrackedRecycledResources.clear();
62 
63     fTrackedGpuBuffers.clear();
64     fTrackedGpuSurfaces.clear();
65 
66     this->invalidateState();
67 
68     this->onReleaseResources();
69 }
70 
71 ////////////////////////////////////////////////////////////////////////////////
72 // CommandBuffer commands
73 ////////////////////////////////////////////////////////////////////////////////
74 
pipelineBarrier(const GrVkGpu * gpu,const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,BarrierType barrierType,void * barrier)75 void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
76                                         const GrManagedResource* resource,
77                                         VkPipelineStageFlags srcStageMask,
78                                         VkPipelineStageFlags dstStageMask,
79                                         bool byRegion,
80                                         BarrierType barrierType,
81                                         void* barrier) {
82     SkASSERT(!this->isWrapped());
83     SkASSERT(fIsActive);
84 #ifdef SK_DEBUG
85     // For images we can have barriers inside of render passes but they require us to add more
86     // support in subpasses which need self dependencies to have barriers inside them. Also, we can
87     // never have buffer barriers inside of a render pass. For now we will just assert that we are
88     // not in a render pass.
89     bool isValidSubpassBarrier = false;
90     if (barrierType == kImageMemory_BarrierType) {
91         VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier);
92         isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) &&
93                                 (imgBarrier->srcQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
94                                 (imgBarrier->dstQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
95                                 byRegion;
96     }
97     SkASSERT(!fActiveRenderPass || isValidSubpassBarrier);
98 #endif
99 
100     if (barrierType == kBufferMemory_BarrierType) {
101         const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier);
102         fBufferBarriers.push_back(*barrierPtr);
103     } else {
104         SkASSERT(barrierType == kImageMemory_BarrierType);
105         const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier);
106         // We need to check if we are adding a pipeline barrier that covers part of the same
107         // subresource range as a barrier that is already in current batch. If it does, then we must
108         // submit the first batch because the vulkan spec does not define a specific ordering for
109         // barriers submitted in the same batch.
110         // TODO: Look if we can gain anything by merging barriers together instead of submitting
111         // the old ones.
112         for (int i = 0; i < fImageBarriers.size(); ++i) {
113             VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
114             if (barrierPtr->image == currentBarrier.image) {
115                 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
116                 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
117                 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
118                 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
119                 SkASSERT(newRange.layerCount == oldRange.layerCount);
120                 uint32_t newStart = newRange.baseMipLevel;
121                 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
122                 uint32_t oldStart = oldRange.baseMipLevel;
123                 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
124                 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
125                     this->submitPipelineBarriers(gpu);
126                     break;
127                 }
128             }
129         }
130         fImageBarriers.push_back(*barrierPtr);
131     }
132     fBarriersByRegion |= byRegion;
133     fSrcStageMask = fSrcStageMask | srcStageMask;
134     fDstStageMask = fDstStageMask | dstStageMask;
135 
136     fHasWork = true;
137     if (resource) {
138         this->addResource(resource);
139     }
140     if (fActiveRenderPass) {
141         this->submitPipelineBarriers(gpu, true);
142     }
143 }
144 
submitPipelineBarriers(const GrVkGpu * gpu,bool forSelfDependency)145 void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency) {
146     SkASSERT(fIsActive);
147 
148     // Currently we never submit a pipeline barrier without at least one memory barrier.
149     if (fBufferBarriers.size() || fImageBarriers.size()) {
150         // For images we can have barriers inside of render passes but they require us to add more
151         // support in subpasses which need self dependencies to have barriers inside them. Also, we
152         // can never have buffer barriers inside of a render pass. For now we will just assert that
153         // we are not in a render pass.
154         SkASSERT(!fActiveRenderPass || forSelfDependency);
155         SkASSERT(!this->isWrapped());
156         SkASSERT(fSrcStageMask && fDstStageMask);
157 
158         VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
159         GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(
160                 fCmdBuffer, fSrcStageMask, fDstStageMask, dependencyFlags, 0, nullptr,
161                 fBufferBarriers.size(), fBufferBarriers.begin(),
162                 fImageBarriers.size(), fImageBarriers.begin()));
163         fBufferBarriers.clear();
164         fImageBarriers.clear();
165         fBarriersByRegion = false;
166         fSrcStageMask = 0;
167         fDstStageMask = 0;
168     }
169     SkASSERT(!fBufferBarriers.size());
170     SkASSERT(!fImageBarriers.size());
171     SkASSERT(!fBarriersByRegion);
172     SkASSERT(!fSrcStageMask);
173     SkASSERT(!fDstStageMask);
174 }
175 
bindInputBuffer(GrVkGpu * gpu,uint32_t binding,sk_sp<const GrBuffer> buffer)176 void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
177                                         sk_sp<const GrBuffer> buffer) {
178     VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer();
179     SkASSERT(VK_NULL_HANDLE != vkBuffer);
180     SkASSERT(binding < kMaxInputBuffers);
181     // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
182     // to know if we can skip binding or not.
183     if (vkBuffer != fBoundInputBuffers[binding]) {
184         VkDeviceSize offset = 0;
185         GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
186                                                             binding,
187                                                             1,
188                                                             &vkBuffer,
189                                                             &offset));
190         fBoundInputBuffers[binding] = vkBuffer;
191         this->addGrBuffer(std::move(buffer));
192     }
193 }
194 
bindIndexBuffer(GrVkGpu * gpu,sk_sp<const GrBuffer> buffer)195 void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, sk_sp<const GrBuffer> buffer) {
196     VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer();
197     SkASSERT(VK_NULL_HANDLE != vkBuffer);
198     // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
199     // to know if we can skip binding or not.
200     if (vkBuffer != fBoundIndexBuffer) {
201         GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
202                                                           vkBuffer, /*offset=*/0,
203                                                           VK_INDEX_TYPE_UINT16));
204         fBoundIndexBuffer = vkBuffer;
205         this->addGrBuffer(std::move(buffer));
206     }
207 }
208 
clearAttachments(const GrVkGpu * gpu,int numAttachments,const VkClearAttachment * attachments,int numRects,const VkClearRect * clearRects)209 void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
210                                          int numAttachments,
211                                          const VkClearAttachment* attachments,
212                                          int numRects,
213                                          const VkClearRect* clearRects) {
214     SkASSERT(fIsActive);
215     SkASSERT(fActiveRenderPass);
216     SkASSERT(numAttachments > 0);
217     SkASSERT(numRects > 0);
218 
219     this->addingWork(gpu);
220 
221 #ifdef SK_DEBUG
222     for (int i = 0; i < numAttachments; ++i) {
223         if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
224             uint32_t testIndex;
225             SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
226             SkASSERT(testIndex == attachments[i].colorAttachment);
227         }
228     }
229 #endif
230     GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
231                                                        numAttachments,
232                                                        attachments,
233                                                        numRects,
234                                                        clearRects));
235     if (gpu->vkCaps().mustInvalidatePrimaryCmdBufferStateAfterClearAttachments()) {
236         this->invalidateState();
237     }
238 }
239 
bindDescriptorSets(const GrVkGpu * gpu,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * descriptorSets,uint32_t dynamicOffsetCount,const uint32_t * dynamicOffsets)240 void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
241                                            VkPipelineLayout layout,
242                                            uint32_t firstSet,
243                                            uint32_t setCount,
244                                            const VkDescriptorSet* descriptorSets,
245                                            uint32_t dynamicOffsetCount,
246                                            const uint32_t* dynamicOffsets) {
247     SkASSERT(fIsActive);
248     GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
249                                                          VK_PIPELINE_BIND_POINT_GRAPHICS,
250                                                          layout,
251                                                          firstSet,
252                                                          setCount,
253                                                          descriptorSets,
254                                                          dynamicOffsetCount,
255                                                          dynamicOffsets));
256 }
257 
bindPipeline(const GrVkGpu * gpu,sk_sp<const GrVkPipeline> pipeline)258 void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, sk_sp<const GrVkPipeline> pipeline) {
259     SkASSERT(fIsActive);
260     GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
261                                                    VK_PIPELINE_BIND_POINT_GRAPHICS,
262                                                    pipeline->pipeline()));
263     this->addResource(std::move(pipeline));
264 }
265 
pushConstants(const GrVkGpu * gpu,VkPipelineLayout layout,VkShaderStageFlags stageFlags,uint32_t offset,uint32_t size,const void * values)266 void GrVkCommandBuffer::pushConstants(const GrVkGpu* gpu, VkPipelineLayout layout,
267                                       VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
268                                       const void* values) {
269     SkASSERT(fIsActive);
270     // offset and size must be a multiple of 4
271     SkASSERT(!SkToBool(offset & 0x3));
272     SkASSERT(!SkToBool(size & 0x3));
273     GR_VK_CALL(gpu->vkInterface(), CmdPushConstants(fCmdBuffer,
274                                                     layout,
275                                                     stageFlags,
276                                                     offset,
277                                                     size,
278                                                     values));
279 }
280 
drawIndexed(const GrVkGpu * gpu,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)281 void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
282                                     uint32_t indexCount,
283                                     uint32_t instanceCount,
284                                     uint32_t firstIndex,
285                                     int32_t vertexOffset,
286                                     uint32_t firstInstance) {
287     SkASSERT(fIsActive);
288     SkASSERT(fActiveRenderPass);
289     this->addingWork(gpu);
290     GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
291                                                   indexCount,
292                                                   instanceCount,
293                                                   firstIndex,
294                                                   vertexOffset,
295                                                   firstInstance));
296 }
297 
draw(const GrVkGpu * gpu,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)298 void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
299                              uint32_t vertexCount,
300                              uint32_t instanceCount,
301                              uint32_t firstVertex,
302                              uint32_t firstInstance) {
303     SkASSERT(fIsActive);
304     SkASSERT(fActiveRenderPass);
305     this->addingWork(gpu);
306     GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
307                                            vertexCount,
308                                            instanceCount,
309                                            firstVertex,
310                                            firstInstance));
311 }
312 
drawIndirect(const GrVkGpu * gpu,sk_sp<const GrBuffer> indirectBuffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)313 void GrVkCommandBuffer::drawIndirect(const GrVkGpu* gpu,
314                                      sk_sp<const GrBuffer> indirectBuffer,
315                                      VkDeviceSize offset,
316                                      uint32_t drawCount,
317                                      uint32_t stride) {
318     SkASSERT(fIsActive);
319     SkASSERT(fActiveRenderPass);
320     SkASSERT(!indirectBuffer->isCpuBuffer());
321     this->addingWork(gpu);
322     VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer();
323     GR_VK_CALL(gpu->vkInterface(), CmdDrawIndirect(fCmdBuffer,
324                                                    vkBuffer,
325                                                    offset,
326                                                    drawCount,
327                                                    stride));
328     this->addGrBuffer(std::move(indirectBuffer));
329 }
330 
drawIndexedIndirect(const GrVkGpu * gpu,sk_sp<const GrBuffer> indirectBuffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)331 void GrVkCommandBuffer::drawIndexedIndirect(const GrVkGpu* gpu,
332                                             sk_sp<const GrBuffer> indirectBuffer,
333                                             VkDeviceSize offset,
334                                             uint32_t drawCount,
335                                             uint32_t stride) {
336     SkASSERT(fIsActive);
337     SkASSERT(fActiveRenderPass);
338     SkASSERT(!indirectBuffer->isCpuBuffer());
339     this->addingWork(gpu);
340     VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer();
341     GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexedIndirect(fCmdBuffer,
342                                                           vkBuffer,
343                                                           offset,
344                                                           drawCount,
345                                                           stride));
346     this->addGrBuffer(std::move(indirectBuffer));
347 }
348 
setViewport(const GrVkGpu * gpu,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * viewports)349 void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu,
350                                     uint32_t firstViewport,
351                                     uint32_t viewportCount,
352                                     const VkViewport* viewports) {
353     SkASSERT(fIsActive);
354     SkASSERT(1 == viewportCount);
355     if (0 != memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
356         GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
357                                                       firstViewport,
358                                                       viewportCount,
359                                                       viewports));
360         fCachedViewport = viewports[0];
361     }
362 }
363 
setScissor(const GrVkGpu * gpu,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * scissors)364 void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu,
365                                    uint32_t firstScissor,
366                                    uint32_t scissorCount,
367                                    const VkRect2D* scissors) {
368     SkASSERT(fIsActive);
369     SkASSERT(1 == scissorCount);
370     if (0 != memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
371         GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
372                                                      firstScissor,
373                                                      scissorCount,
374                                                      scissors));
375         fCachedScissor = scissors[0];
376     }
377 }
378 
setBlendConstants(const GrVkGpu * gpu,const float blendConstants[4])379 void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu,
380                                           const float blendConstants[4]) {
381     SkASSERT(fIsActive);
382     if (0 != memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
383         GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
384         memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
385     }
386 }
387 
addingWork(const GrVkGpu * gpu)388 void GrVkCommandBuffer::addingWork(const GrVkGpu* gpu) {
389     this->submitPipelineBarriers(gpu);
390     fHasWork = true;
391 }
392 
393 ///////////////////////////////////////////////////////////////////////////////
394 // PrimaryCommandBuffer
395 ////////////////////////////////////////////////////////////////////////////////
~GrVkPrimaryCommandBuffer()396 GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() {
397     // Should have ended any render pass we're in the middle of
398     SkASSERT(!fActiveRenderPass);
399 }
400 
Create(GrVkGpu * gpu,VkCommandPool cmdPool)401 GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(GrVkGpu* gpu,
402                                                            VkCommandPool cmdPool) {
403     const VkCommandBufferAllocateInfo cmdInfo = {
404         VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,   // sType
405         nullptr,                                          // pNext
406         cmdPool,                                          // commandPool
407         VK_COMMAND_BUFFER_LEVEL_PRIMARY,                  // level
408         1                                                 // bufferCount
409     };
410 
411     VkCommandBuffer cmdBuffer;
412     VkResult err;
413     GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
414     if (err) {
415         return nullptr;
416     }
417     return new GrVkPrimaryCommandBuffer(cmdBuffer);
418 }
419 
begin(GrVkGpu * gpu)420 void GrVkPrimaryCommandBuffer::begin(GrVkGpu* gpu) {
421     SkASSERT(!fIsActive);
422     VkCommandBufferBeginInfo cmdBufferBeginInfo;
423     memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
424     cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
425     cmdBufferBeginInfo.pNext = nullptr;
426     cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
427     cmdBufferBeginInfo.pInheritanceInfo = nullptr;
428 
429     GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
430     fIsActive = true;
431 }
432 
end(GrVkGpu * gpu,bool abandoningBuffer)433 void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu, bool abandoningBuffer) {
434     SkASSERT(fIsActive);
435     SkASSERT(!fActiveRenderPass);
436 
437     // If we are in the process of abandoning the context then the GrResourceCache will have freed
438     // all resources before destroying the GrVkGpu. When we destroy the GrVkGpu we call end on the
439     // command buffer to keep all our state tracking consistent. However, the vulkan validation
440     // layers complain about calling end on a command buffer that contains resources that have
441     // already been deleted. From the vulkan API it isn't required to end the command buffer to
442     // delete it, so we just skip the vulkan API calls and update our own state tracking.
443     if (!abandoningBuffer) {
444         this->submitPipelineBarriers(gpu);
445 
446         GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
447     }
448     this->invalidateState();
449     fIsActive = false;
450     fHasWork = false;
451 }
452 
beginRenderPass(GrVkGpu * gpu,const GrVkRenderPass * renderPass,sk_sp<const GrVkFramebuffer> framebuffer,const VkClearValue clearValues[],const GrSurface * target,const SkIRect & bounds,bool forSecondaryCB)453 bool GrVkPrimaryCommandBuffer::beginRenderPass(GrVkGpu* gpu,
454                                                const GrVkRenderPass* renderPass,
455                                                sk_sp<const GrVkFramebuffer> framebuffer,
456                                                const VkClearValue clearValues[],
457                                                const GrSurface* target,
458                                                const SkIRect& bounds,
459                                                bool forSecondaryCB) {
460     SkASSERT(fIsActive);
461     SkASSERT(!fActiveRenderPass);
462 
463     SkASSERT(framebuffer);
464 
465     this->addingWork(gpu);
466 
467     VkRenderPassBeginInfo beginInfo;
468     VkRect2D renderArea;
469     renderArea.offset = { bounds.fLeft , bounds.fTop };
470     renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
471 
472     memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
473     beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
474     beginInfo.pNext = nullptr;
475     beginInfo.renderPass = renderPass->vkRenderPass();
476     beginInfo.framebuffer = framebuffer->framebuffer();
477     beginInfo.renderArea = renderArea;
478     beginInfo.clearValueCount = renderPass->clearValueCount();
479     beginInfo.pClearValues = clearValues;
480 
481     VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
482                                                 : VK_SUBPASS_CONTENTS_INLINE;
483 
484     GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
485     fActiveRenderPass = renderPass;
486     this->addResource(renderPass);
487     this->addResource(std::move(framebuffer));
488     this->addGrSurface(sk_ref_sp(target));
489     return true;
490 }
491 
endRenderPass(const GrVkGpu * gpu)492 void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
493     SkASSERT(fIsActive);
494     SkASSERT(fActiveRenderPass);
495     this->addingWork(gpu);
496     GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
497     fActiveRenderPass = nullptr;
498 }
499 
500 
nexSubpass(GrVkGpu * gpu,bool forSecondaryCB)501 void GrVkPrimaryCommandBuffer::nexSubpass(GrVkGpu* gpu, bool forSecondaryCB) {
502     SkASSERT(fIsActive);
503     SkASSERT(fActiveRenderPass);
504     VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
505                                                 : VK_SUBPASS_CONTENTS_INLINE;
506     GR_VK_CALL(gpu->vkInterface(), CmdNextSubpass(fCmdBuffer, contents));
507 }
508 
executeCommands(const GrVkGpu * gpu,std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)509 void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
510                                                std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
511     // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer
512     // if the command pools both were created from were created with the same queue family. However,
513     // we currently always create them from the same pool.
514     SkASSERT(fIsActive);
515     SkASSERT(!buffer->fIsActive);
516     SkASSERT(fActiveRenderPass);
517     SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
518 
519     this->addingWork(gpu);
520 
521     GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
522     fSecondaryCommandBuffers.push_back(std::move(buffer));
523     // When executing a secondary command buffer all state (besides render pass state) becomes
524     // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
525     this->invalidateState();
526 }
527 
submit_to_queue(GrVkGpu * gpu,VkQueue queue,VkFence fence,uint32_t waitCount,const VkSemaphore * waitSemaphores,const VkPipelineStageFlags * waitStages,uint32_t commandBufferCount,const VkCommandBuffer * commandBuffers,uint32_t signalCount,const VkSemaphore * signalSemaphores,GrProtected protectedContext)528 static bool submit_to_queue(GrVkGpu* gpu,
529                             VkQueue queue,
530                             VkFence fence,
531                             uint32_t waitCount,
532                             const VkSemaphore* waitSemaphores,
533                             const VkPipelineStageFlags* waitStages,
534                             uint32_t commandBufferCount,
535                             const VkCommandBuffer* commandBuffers,
536                             uint32_t signalCount,
537                             const VkSemaphore* signalSemaphores,
538                             GrProtected protectedContext) {
539     VkProtectedSubmitInfo protectedSubmitInfo;
540     if (protectedContext == GrProtected::kYes) {
541         memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
542         protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
543         protectedSubmitInfo.pNext = nullptr;
544         protectedSubmitInfo.protectedSubmit = VK_TRUE;
545     }
546 
547     VkSubmitInfo submitInfo;
548     memset(&submitInfo, 0, sizeof(VkSubmitInfo));
549     submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
550     submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
551     submitInfo.waitSemaphoreCount = waitCount;
552     submitInfo.pWaitSemaphores = waitSemaphores;
553     submitInfo.pWaitDstStageMask = waitStages;
554     submitInfo.commandBufferCount = commandBufferCount;
555     submitInfo.pCommandBuffers = commandBuffers;
556     submitInfo.signalSemaphoreCount = signalCount;
557     submitInfo.pSignalSemaphores = signalSemaphores;
558     VkResult result;
559     GR_VK_CALL_RESULT(gpu, result, QueueSubmit(queue, 1, &submitInfo, fence));
560     return result == VK_SUCCESS;
561 }
562 
submitToQueue(GrVkGpu * gpu,VkQueue queue,SkTArray<GrVkSemaphore::Resource * > & signalSemaphores,SkTArray<GrVkSemaphore::Resource * > & waitSemaphores)563 bool GrVkPrimaryCommandBuffer::submitToQueue(
564         GrVkGpu* gpu,
565         VkQueue queue,
566         SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
567         SkTArray<GrVkSemaphore::Resource*>& waitSemaphores) {
568     SkASSERT(!fIsActive);
569 
570     VkResult err;
571     if (VK_NULL_HANDLE == fSubmitFence) {
572         VkFenceCreateInfo fenceInfo;
573         memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
574         fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
575         GR_VK_CALL_RESULT(gpu, err, CreateFence(gpu->device(), &fenceInfo, nullptr,
576                                                 &fSubmitFence));
577         if (err) {
578             fSubmitFence = VK_NULL_HANDLE;
579             return false;
580         }
581     } else {
582         // This cannot return DEVICE_LOST so we assert we succeeded.
583         GR_VK_CALL_RESULT(gpu, err, ResetFences(gpu->device(), 1, &fSubmitFence));
584         SkASSERT(err == VK_SUCCESS);
585     }
586 
587     int signalCount = signalSemaphores.size();
588     int waitCount = waitSemaphores.size();
589 
590     bool submitted = false;
591 
592     if (0 == signalCount && 0 == waitCount) {
593         // This command buffer has no dependent semaphores so we can simply just submit it to the
594         // queue with no worries.
595         submitted = submit_to_queue(
596                 gpu, queue, fSubmitFence, 0, nullptr, nullptr, 1, &fCmdBuffer, 0, nullptr,
597                 gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
598     } else {
599         SkTArray<VkSemaphore> vkSignalSems(signalCount);
600         for (int i = 0; i < signalCount; ++i) {
601             if (signalSemaphores[i]->shouldSignal()) {
602                 this->addResource(signalSemaphores[i]);
603                 vkSignalSems.push_back(signalSemaphores[i]->semaphore());
604             }
605         }
606 
607         SkTArray<VkSemaphore> vkWaitSems(waitCount);
608         SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount);
609         for (int i = 0; i < waitCount; ++i) {
610             if (waitSemaphores[i]->shouldWait()) {
611                 this->addResource(waitSemaphores[i]);
612                 vkWaitSems.push_back(waitSemaphores[i]->semaphore());
613                 vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
614             }
615         }
616         submitted = submit_to_queue(gpu, queue, fSubmitFence, vkWaitSems.size(),
617                                     vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
618                                     vkSignalSems.size(), vkSignalSems.begin(),
619                                     gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
620         if (submitted) {
621             for (int i = 0; i < signalCount; ++i) {
622                 signalSemaphores[i]->markAsSignaled();
623             }
624             for (int i = 0; i < waitCount; ++i) {
625                 waitSemaphores[i]->markAsWaited();
626             }
627         }
628     }
629 
630     if (!submitted) {
631         // Destroy the fence or else we will try to wait forever for it to finish.
632         GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
633         fSubmitFence = VK_NULL_HANDLE;
634         return false;
635     }
636     return true;
637 }
638 
forceSync(GrVkGpu * gpu)639 void GrVkPrimaryCommandBuffer::forceSync(GrVkGpu* gpu) {
640     if (fSubmitFence == VK_NULL_HANDLE) {
641         return;
642     }
643     GR_VK_CALL_ERRCHECK(gpu, WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
644 }
645 
finished(GrVkGpu * gpu)646 bool GrVkPrimaryCommandBuffer::finished(GrVkGpu* gpu) {
647     SkASSERT(!fIsActive);
648     if (VK_NULL_HANDLE == fSubmitFence) {
649         return true;
650     }
651 
652     VkResult err;
653     GR_VK_CALL_RESULT_NOCHECK(gpu, err, GetFenceStatus(gpu->device(), fSubmitFence));
654     switch (err) {
655         case VK_SUCCESS:
656         case VK_ERROR_DEVICE_LOST:
657             return true;
658 
659         case VK_NOT_READY:
660             return false;
661 
662         default:
663             SkDebugf("Error getting fence status: %d\n", err);
664             SK_ABORT("Got an invalid fence status");
665             return false;
666     }
667 }
668 
addFinishedProc(sk_sp<skgpu::RefCntedCallback> finishedProc)669 void GrVkPrimaryCommandBuffer::addFinishedProc(sk_sp<skgpu::RefCntedCallback> finishedProc) {
670     fFinishedProcs.push_back(std::move(finishedProc));
671 }
672 
onReleaseResources()673 void GrVkPrimaryCommandBuffer::onReleaseResources() {
674     for (int i = 0; i < fSecondaryCommandBuffers.size(); ++i) {
675         fSecondaryCommandBuffers[i]->releaseResources();
676     }
677     this->callFinishedProcs();
678 }
679 
recycleSecondaryCommandBuffers(GrVkCommandPool * cmdPool)680 void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool) {
681     for (int i = 0; i < fSecondaryCommandBuffers.size(); ++i) {
682         fSecondaryCommandBuffers[i].release()->recycle(cmdPool);
683     }
684     fSecondaryCommandBuffers.clear();
685 }
686 
copyImage(const GrVkGpu * gpu,GrVkImage * srcImage,VkImageLayout srcLayout,GrVkImage * dstImage,VkImageLayout dstLayout,uint32_t copyRegionCount,const VkImageCopy * copyRegions)687 void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu,
688                                          GrVkImage* srcImage,
689                                          VkImageLayout srcLayout,
690                                          GrVkImage* dstImage,
691                                          VkImageLayout dstLayout,
692                                          uint32_t copyRegionCount,
693                                          const VkImageCopy* copyRegions) {
694     SkASSERT(fIsActive);
695     SkASSERT(!fActiveRenderPass);
696     this->addingWork(gpu);
697     this->addResource(srcImage->resource());
698     this->addResource(dstImage->resource());
699     GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
700                                                 srcImage->image(),
701                                                 srcLayout,
702                                                 dstImage->image(),
703                                                 dstLayout,
704                                                 copyRegionCount,
705                                                 copyRegions));
706 }
707 
blitImage(const GrVkGpu * gpu,const GrManagedResource * srcResource,VkImage srcImage,VkImageLayout srcLayout,const GrManagedResource * dstResource,VkImage dstImage,VkImageLayout dstLayout,uint32_t blitRegionCount,const VkImageBlit * blitRegions,VkFilter filter)708 void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
709                                          const GrManagedResource* srcResource,
710                                          VkImage srcImage,
711                                          VkImageLayout srcLayout,
712                                          const GrManagedResource* dstResource,
713                                          VkImage dstImage,
714                                          VkImageLayout dstLayout,
715                                          uint32_t blitRegionCount,
716                                          const VkImageBlit* blitRegions,
717                                          VkFilter filter) {
718     SkASSERT(fIsActive);
719     SkASSERT(!fActiveRenderPass);
720     this->addingWork(gpu);
721     this->addResource(srcResource);
722     this->addResource(dstResource);
723     GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
724                                                 srcImage,
725                                                 srcLayout,
726                                                 dstImage,
727                                                 dstLayout,
728                                                 blitRegionCount,
729                                                 blitRegions,
730                                                 filter));
731 }
732 
blitImage(const GrVkGpu * gpu,const GrVkImage & srcImage,const GrVkImage & dstImage,uint32_t blitRegionCount,const VkImageBlit * blitRegions,VkFilter filter)733 void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
734                                          const GrVkImage& srcImage,
735                                          const GrVkImage& dstImage,
736                                          uint32_t blitRegionCount,
737                                          const VkImageBlit* blitRegions,
738                                          VkFilter filter) {
739     this->blitImage(gpu,
740                     srcImage.resource(),
741                     srcImage.image(),
742                     srcImage.currentLayout(),
743                     dstImage.resource(),
744                     dstImage.image(),
745                     dstImage.currentLayout(),
746                     blitRegionCount,
747                     blitRegions,
748                     filter);
749 }
750 
751 
copyImageToBuffer(const GrVkGpu * gpu,GrVkImage * srcImage,VkImageLayout srcLayout,sk_sp<GrGpuBuffer> dstBuffer,uint32_t copyRegionCount,const VkBufferImageCopy * copyRegions)752 void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
753                                                  GrVkImage* srcImage,
754                                                  VkImageLayout srcLayout,
755                                                  sk_sp<GrGpuBuffer> dstBuffer,
756                                                  uint32_t copyRegionCount,
757                                                  const VkBufferImageCopy* copyRegions) {
758     SkASSERT(fIsActive);
759     SkASSERT(!fActiveRenderPass);
760     this->addingWork(gpu);
761     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(dstBuffer.get());
762     GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
763                                                         srcImage->image(),
764                                                         srcLayout,
765                                                         vkBuffer->vkBuffer(),
766                                                         copyRegionCount,
767                                                         copyRegions));
768     this->addResource(srcImage->resource());
769     this->addGrBuffer(std::move(dstBuffer));
770 }
771 
copyBufferToImage(const GrVkGpu * gpu,VkBuffer srcBuffer,GrVkImage * dstImage,VkImageLayout dstLayout,uint32_t copyRegionCount,const VkBufferImageCopy * copyRegions)772 void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
773                                                  VkBuffer srcBuffer,
774                                                  GrVkImage* dstImage,
775                                                  VkImageLayout dstLayout,
776                                                  uint32_t copyRegionCount,
777                                                  const VkBufferImageCopy* copyRegions) {
778     SkASSERT(fIsActive);
779     SkASSERT(!fActiveRenderPass);
780     this->addingWork(gpu);
781 
782     GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
783                                                         srcBuffer,
784                                                         dstImage->image(),
785                                                         dstLayout,
786                                                         copyRegionCount,
787                                                         copyRegions));
788     this->addResource(dstImage->resource());
789 }
790 
fillBuffer(GrVkGpu * gpu,sk_sp<GrGpuBuffer> buffer,VkDeviceSize offset,VkDeviceSize size,uint32_t data)791 void GrVkPrimaryCommandBuffer::fillBuffer(GrVkGpu* gpu,
792                                           sk_sp<GrGpuBuffer> buffer,
793                                           VkDeviceSize offset,
794                                           VkDeviceSize size,
795                                           uint32_t data) {
796     SkASSERT(fIsActive);
797     SkASSERT(!fActiveRenderPass);
798     this->addingWork(gpu);
799 
800     const GrVkBuffer* bufferVk = static_cast<GrVkBuffer*>(buffer.get());
801 
802     GR_VK_CALL(gpu->vkInterface(), CmdFillBuffer(fCmdBuffer,
803                                                  bufferVk->vkBuffer(),
804                                                  offset,
805                                                  size,
806                                                  data));
807     this->addGrBuffer(std::move(buffer));
808 }
809 
copyBuffer(GrVkGpu * gpu,sk_sp<GrGpuBuffer> srcBuffer,sk_sp<GrGpuBuffer> dstBuffer,uint32_t regionCount,const VkBufferCopy * regions)810 void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
811                                           sk_sp<GrGpuBuffer> srcBuffer,
812                                           sk_sp<GrGpuBuffer> dstBuffer,
813                                           uint32_t regionCount,
814                                           const VkBufferCopy* regions) {
815     SkASSERT(fIsActive);
816     SkASSERT(!fActiveRenderPass);
817     this->addingWork(gpu);
818 #ifdef SK_DEBUG
819     for (uint32_t i = 0; i < regionCount; ++i) {
820         const VkBufferCopy& region = regions[i];
821         SkASSERT(region.size > 0);
822         SkASSERT(region.srcOffset < srcBuffer->size());
823         SkASSERT(region.dstOffset < dstBuffer->size());
824         SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
825         SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
826     }
827 #endif
828 
829     const GrVkBuffer* srcVk = static_cast<GrVkBuffer*>(srcBuffer.get());
830     const GrVkBuffer* dstVk = static_cast<GrVkBuffer*>(dstBuffer.get());
831 
832     GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
833                                                  srcVk->vkBuffer(),
834                                                  dstVk->vkBuffer(),
835                                                  regionCount,
836                                                  regions));
837     this->addGrBuffer(std::move(srcBuffer));
838     this->addGrBuffer(std::move(dstBuffer));
839 }
840 
updateBuffer(GrVkGpu * gpu,sk_sp<GrVkBuffer> dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * data)841 void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
842                                             sk_sp<GrVkBuffer> dstBuffer,
843                                             VkDeviceSize dstOffset,
844                                             VkDeviceSize dataSize,
845                                             const void* data) {
846     SkASSERT(fIsActive);
847     SkASSERT(!fActiveRenderPass);
848     SkASSERT(0 == (dstOffset & 0x03));  // four byte aligned
849     // TODO: handle larger transfer sizes
850     SkASSERT(dataSize <= 65536);
851     SkASSERT(0 == (dataSize & 0x03));  // four byte aligned
852     this->addingWork(gpu);
853     GR_VK_CALL(
854             gpu->vkInterface(),
855             CmdUpdateBuffer(
856                     fCmdBuffer, dstBuffer->vkBuffer(), dstOffset, dataSize, (const uint32_t*)data));
857     this->addGrBuffer(std::move(dstBuffer));
858 }
859 
clearColorImage(const GrVkGpu * gpu,GrVkImage * image,const VkClearColorValue * color,uint32_t subRangeCount,const VkImageSubresourceRange * subRanges)860 void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu,
861                                                GrVkImage* image,
862                                                const VkClearColorValue* color,
863                                                uint32_t subRangeCount,
864                                                const VkImageSubresourceRange* subRanges) {
865     SkASSERT(fIsActive);
866     SkASSERT(!fActiveRenderPass);
867     this->addingWork(gpu);
868     this->addResource(image->resource());
869     GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
870                                                       image->image(),
871                                                       image->currentLayout(),
872                                                       color,
873                                                       subRangeCount,
874                                                       subRanges));
875 }
876 
clearDepthStencilImage(const GrVkGpu * gpu,GrVkImage * image,const VkClearDepthStencilValue * color,uint32_t subRangeCount,const VkImageSubresourceRange * subRanges)877 void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu,
878                                                       GrVkImage* image,
879                                                       const VkClearDepthStencilValue* color,
880                                                       uint32_t subRangeCount,
881                                                       const VkImageSubresourceRange* subRanges) {
882     SkASSERT(fIsActive);
883     SkASSERT(!fActiveRenderPass);
884     this->addingWork(gpu);
885     this->addResource(image->resource());
886     GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
887                                                              image->image(),
888                                                              image->currentLayout(),
889                                                              color,
890                                                              subRangeCount,
891                                                              subRanges));
892 }
893 
resolveImage(GrVkGpu * gpu,const GrVkImage & srcImage,const GrVkImage & dstImage,uint32_t regionCount,const VkImageResolve * regions)894 void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu,
895                                             const GrVkImage& srcImage,
896                                             const GrVkImage& dstImage,
897                                             uint32_t regionCount,
898                                             const VkImageResolve* regions) {
899     SkASSERT(fIsActive);
900     SkASSERT(!fActiveRenderPass);
901 
902     this->addingWork(gpu);
903     this->addResource(srcImage.resource());
904     this->addResource(dstImage.resource());
905 
906     GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
907                                                    srcImage.image(),
908                                                    srcImage.currentLayout(),
909                                                    dstImage.image(),
910                                                    dstImage.currentLayout(),
911                                                    regionCount,
912                                                    regions));
913 }
914 
onFreeGPUData(const GrVkGpu * gpu) const915 void GrVkPrimaryCommandBuffer::onFreeGPUData(const GrVkGpu* gpu) const {
916     SkASSERT(!fActiveRenderPass);
917     // Destroy the fence, if any
918     if (VK_NULL_HANDLE != fSubmitFence) {
919         GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
920     }
921     SkASSERT(!fSecondaryCommandBuffers.size());
922 }
923 
924 ///////////////////////////////////////////////////////////////////////////////
925 // SecondaryCommandBuffer
926 ////////////////////////////////////////////////////////////////////////////////
927 
Create(GrVkGpu * gpu,GrVkCommandPool * cmdPool)928 GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(GrVkGpu* gpu,
929                                                                GrVkCommandPool* cmdPool) {
930     SkASSERT(cmdPool);
931     const VkCommandBufferAllocateInfo cmdInfo = {
932         VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,   // sType
933         nullptr,                                          // pNext
934         cmdPool->vkCommandPool(),                         // commandPool
935         VK_COMMAND_BUFFER_LEVEL_SECONDARY,                // level
936         1                                                 // bufferCount
937     };
938 
939     VkCommandBuffer cmdBuffer;
940     VkResult err;
941     GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
942     if (err) {
943         return nullptr;
944     }
945     return new GrVkSecondaryCommandBuffer(cmdBuffer, /*externalRenderPass=*/nullptr);
946 }
947 
Create(VkCommandBuffer cmdBuffer,const GrVkRenderPass * externalRenderPass)948 GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(
949         VkCommandBuffer cmdBuffer, const GrVkRenderPass* externalRenderPass) {
950     return new GrVkSecondaryCommandBuffer(cmdBuffer, externalRenderPass);
951 }
952 
begin(GrVkGpu * gpu,const GrVkFramebuffer * framebuffer,const GrVkRenderPass * compatibleRenderPass)953 void GrVkSecondaryCommandBuffer::begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
954                                        const GrVkRenderPass* compatibleRenderPass) {
955     SkASSERT(!fIsActive);
956     SkASSERT(!this->isWrapped());
957     SkASSERT(compatibleRenderPass);
958     fActiveRenderPass = compatibleRenderPass;
959 
960     VkCommandBufferInheritanceInfo inheritanceInfo;
961     memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
962     inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
963     inheritanceInfo.pNext = nullptr;
964     inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
965     inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
966     inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
967     inheritanceInfo.occlusionQueryEnable = false;
968     inheritanceInfo.queryFlags = 0;
969     inheritanceInfo.pipelineStatistics = 0;
970 
971     VkCommandBufferBeginInfo cmdBufferBeginInfo;
972     memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
973     cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
974     cmdBufferBeginInfo.pNext = nullptr;
975     cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
976             VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
977     cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
978 
979     GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
980 
981     fIsActive = true;
982 }
983 
end(GrVkGpu * gpu)984 void GrVkSecondaryCommandBuffer::end(GrVkGpu* gpu) {
985     SkASSERT(fIsActive);
986     SkASSERT(!this->isWrapped());
987     GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
988     this->invalidateState();
989     fHasWork = false;
990     fIsActive = false;
991 }
992 
recycle(GrVkCommandPool * cmdPool)993 void GrVkSecondaryCommandBuffer::recycle(GrVkCommandPool* cmdPool) {
994     if (this->isWrapped()) {
995         delete this;
996     } else {
997         cmdPool->recycleSecondaryCommandBuffer(this);
998     }
999 }
1000 
1001