1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkCommandBuffer.h"
9
10 #include "include/core/SkRect.h"
11 #include "src/gpu/vk/GrVkBuffer.h"
12 #include "src/gpu/vk/GrVkCommandPool.h"
13 #include "src/gpu/vk/GrVkFramebuffer.h"
14 #include "src/gpu/vk/GrVkGpu.h"
15 #include "src/gpu/vk/GrVkImage.h"
16 #include "src/gpu/vk/GrVkImageView.h"
17 #include "src/gpu/vk/GrVkPipeline.h"
18 #include "src/gpu/vk/GrVkPipelineState.h"
19 #include "src/gpu/vk/GrVkPipelineState.h"
20 #include "src/gpu/vk/GrVkRenderPass.h"
21 #include "src/gpu/vk/GrVkRenderTarget.h"
22 #include "src/gpu/vk/GrVkUtil.h"
23
invalidateState()24 void GrVkCommandBuffer::invalidateState() {
25 for (auto& boundInputBuffer : fBoundInputBuffers) {
26 boundInputBuffer = VK_NULL_HANDLE;
27 }
28 fBoundIndexBuffer = VK_NULL_HANDLE;
29
30 memset(&fCachedViewport, 0, sizeof(VkViewport));
31 fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
32
33 memset(&fCachedScissor, 0, sizeof(VkRect2D));
34 fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
35
36 for (int i = 0; i < 4; ++i) {
37 fCachedBlendConstant[i] = -1.0;
38 }
39 }
40
freeGPUData(const GrGpu * gpu,VkCommandPool cmdPool) const41 void GrVkCommandBuffer::freeGPUData(const GrGpu* gpu, VkCommandPool cmdPool) const {
42 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
43 SkASSERT(!fIsActive);
44 SkASSERT(!fTrackedResources.count());
45 SkASSERT(!fTrackedRecycledResources.count());
46 SkASSERT(!fTrackedGpuBuffers.count());
47 SkASSERT(!fTrackedGpuSurfaces.count());
48 SkASSERT(cmdPool != VK_NULL_HANDLE);
49 SkASSERT(!this->isWrapped());
50
51 GrVkGpu* vkGpu = (GrVkGpu*)gpu;
52 GR_VK_CALL(vkGpu->vkInterface(), FreeCommandBuffers(vkGpu->device(), cmdPool, 1, &fCmdBuffer));
53
54 this->onFreeGPUData(vkGpu);
55 }
56
releaseResources()57 void GrVkCommandBuffer::releaseResources() {
58 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
59 SkASSERT(!fIsActive || this->isWrapped());
60 for (int i = 0; i < fTrackedResources.count(); ++i) {
61 fTrackedResources[i]->notifyFinishedWithWorkOnGpu();
62 }
63 fTrackedResources.reset();
64 for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
65 fTrackedRecycledResources[i]->notifyFinishedWithWorkOnGpu();
66 }
67 fTrackedRecycledResources.reset();
68
69 fTrackedGpuBuffers.reset();
70 fTrackedGpuSurfaces.reset();
71
72 this->invalidateState();
73
74 this->onReleaseResources();
75 }
76
77 ////////////////////////////////////////////////////////////////////////////////
78 // CommandBuffer commands
79 ////////////////////////////////////////////////////////////////////////////////
80
pipelineBarrier(const GrVkGpu * gpu,const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,BarrierType barrierType,void * barrier)81 void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
82 const GrManagedResource* resource,
83 VkPipelineStageFlags srcStageMask,
84 VkPipelineStageFlags dstStageMask,
85 bool byRegion,
86 BarrierType barrierType,
87 void* barrier) {
88 SkASSERT(!this->isWrapped());
89 SkASSERT(fIsActive);
90 #ifdef SK_DEBUG
91 // For images we can have barriers inside of render passes but they require us to add more
92 // support in subpasses which need self dependencies to have barriers inside them. Also, we can
93 // never have buffer barriers inside of a render pass. For now we will just assert that we are
94 // not in a render pass.
95 bool isValidSubpassBarrier = false;
96 if (barrierType == kImageMemory_BarrierType) {
97 VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier);
98 isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) &&
99 (imgBarrier->srcQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
100 (imgBarrier->dstQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
101 byRegion;
102 }
103 SkASSERT(!fActiveRenderPass || isValidSubpassBarrier);
104 #endif
105
106 if (barrierType == kBufferMemory_BarrierType) {
107 const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier);
108 fBufferBarriers.push_back(*barrierPtr);
109 } else {
110 SkASSERT(barrierType == kImageMemory_BarrierType);
111 const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier);
112 // We need to check if we are adding a pipeline barrier that covers part of the same
113 // subresource range as a barrier that is already in current batch. If it does, then we must
114 // submit the first batch because the vulkan spec does not define a specific ordering for
115 // barriers submitted in the same batch.
116 // TODO: Look if we can gain anything by merging barriers together instead of submitting
117 // the old ones.
118 for (int i = 0; i < fImageBarriers.count(); ++i) {
119 VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
120 if (barrierPtr->image == currentBarrier.image) {
121 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
122 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
123 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
124 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
125 SkASSERT(newRange.layerCount == oldRange.layerCount);
126 uint32_t newStart = newRange.baseMipLevel;
127 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
128 uint32_t oldStart = oldRange.baseMipLevel;
129 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
130 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
131 this->submitPipelineBarriers(gpu);
132 break;
133 }
134 }
135 }
136 fImageBarriers.push_back(*barrierPtr);
137 }
138 fBarriersByRegion |= byRegion;
139 fSrcStageMask = fSrcStageMask | srcStageMask;
140 fDstStageMask = fDstStageMask | dstStageMask;
141
142 fHasWork = true;
143 if (resource) {
144 this->addResource(resource);
145 }
146 if (fActiveRenderPass) {
147 this->submitPipelineBarriers(gpu, true);
148 }
149 }
150
submitPipelineBarriers(const GrVkGpu * gpu,bool forSelfDependency)151 void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency) {
152 SkASSERT(fIsActive);
153
154 // Currently we never submit a pipeline barrier without at least one memory barrier.
155 if (fBufferBarriers.count() || fImageBarriers.count()) {
156 // For images we can have barriers inside of render passes but they require us to add more
157 // support in subpasses which need self dependencies to have barriers inside them. Also, we
158 // can never have buffer barriers inside of a render pass. For now we will just assert that
159 // we are not in a render pass.
160 SkASSERT(!fActiveRenderPass || forSelfDependency);
161 SkASSERT(!this->isWrapped());
162 SkASSERT(fSrcStageMask && fDstStageMask);
163
164 VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
165 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(
166 fCmdBuffer, fSrcStageMask, fDstStageMask, dependencyFlags, 0, nullptr,
167 fBufferBarriers.count(), fBufferBarriers.begin(),
168 fImageBarriers.count(), fImageBarriers.begin()));
169 fBufferBarriers.reset();
170 fImageBarriers.reset();
171 fBarriersByRegion = false;
172 fSrcStageMask = 0;
173 fDstStageMask = 0;
174 }
175 SkASSERT(!fBufferBarriers.count());
176 SkASSERT(!fImageBarriers.count());
177 SkASSERT(!fBarriersByRegion);
178 SkASSERT(!fSrcStageMask);
179 SkASSERT(!fDstStageMask);
180 }
181
bindInputBuffer(GrVkGpu * gpu,uint32_t binding,sk_sp<const GrBuffer> buffer)182 void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
183 sk_sp<const GrBuffer> buffer) {
184 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer();
185 SkASSERT(VK_NULL_HANDLE != vkBuffer);
186 SkASSERT(binding < kMaxInputBuffers);
187 // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
188 // to know if we can skip binding or not.
189 if (vkBuffer != fBoundInputBuffers[binding]) {
190 VkDeviceSize offset = 0;
191 GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
192 binding,
193 1,
194 &vkBuffer,
195 &offset));
196 fBoundInputBuffers[binding] = vkBuffer;
197 this->addGrBuffer(std::move(buffer));
198 }
199 }
200
bindIndexBuffer(GrVkGpu * gpu,sk_sp<const GrBuffer> buffer)201 void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, sk_sp<const GrBuffer> buffer) {
202 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer();
203 SkASSERT(VK_NULL_HANDLE != vkBuffer);
204 // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
205 // to know if we can skip binding or not.
206 if (vkBuffer != fBoundIndexBuffer) {
207 GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
208 vkBuffer, /*offset=*/0,
209 VK_INDEX_TYPE_UINT16));
210 fBoundIndexBuffer = vkBuffer;
211 this->addGrBuffer(std::move(buffer));
212 }
213 }
214
clearAttachments(const GrVkGpu * gpu,int numAttachments,const VkClearAttachment * attachments,int numRects,const VkClearRect * clearRects)215 void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
216 int numAttachments,
217 const VkClearAttachment* attachments,
218 int numRects,
219 const VkClearRect* clearRects) {
220 SkASSERT(fIsActive);
221 SkASSERT(fActiveRenderPass);
222 SkASSERT(numAttachments > 0);
223 SkASSERT(numRects > 0);
224
225 this->addingWork(gpu);
226
227 #ifdef SK_DEBUG
228 for (int i = 0; i < numAttachments; ++i) {
229 if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
230 uint32_t testIndex;
231 SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
232 SkASSERT(testIndex == attachments[i].colorAttachment);
233 }
234 }
235 #endif
236 GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
237 numAttachments,
238 attachments,
239 numRects,
240 clearRects));
241 if (gpu->vkCaps().mustInvalidatePrimaryCmdBufferStateAfterClearAttachments()) {
242 this->invalidateState();
243 }
244 }
245
bindDescriptorSets(const GrVkGpu * gpu,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * descriptorSets,uint32_t dynamicOffsetCount,const uint32_t * dynamicOffsets)246 void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
247 VkPipelineLayout layout,
248 uint32_t firstSet,
249 uint32_t setCount,
250 const VkDescriptorSet* descriptorSets,
251 uint32_t dynamicOffsetCount,
252 const uint32_t* dynamicOffsets) {
253 SkASSERT(fIsActive);
254 GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
255 VK_PIPELINE_BIND_POINT_GRAPHICS,
256 layout,
257 firstSet,
258 setCount,
259 descriptorSets,
260 dynamicOffsetCount,
261 dynamicOffsets));
262 }
263
bindPipeline(const GrVkGpu * gpu,sk_sp<const GrVkPipeline> pipeline)264 void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, sk_sp<const GrVkPipeline> pipeline) {
265 SkASSERT(fIsActive);
266 GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
267 VK_PIPELINE_BIND_POINT_GRAPHICS,
268 pipeline->pipeline()));
269 this->addResource(std::move(pipeline));
270 }
271
pushConstants(const GrVkGpu * gpu,VkPipelineLayout layout,VkShaderStageFlags stageFlags,uint32_t offset,uint32_t size,const void * values)272 void GrVkCommandBuffer::pushConstants(const GrVkGpu* gpu, VkPipelineLayout layout,
273 VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
274 const void* values) {
275 SkASSERT(fIsActive);
276 // offset and size must be a multiple of 4
277 SkASSERT(!SkToBool(offset & 0x3));
278 SkASSERT(!SkToBool(size & 0x3));
279 GR_VK_CALL(gpu->vkInterface(), CmdPushConstants(fCmdBuffer,
280 layout,
281 stageFlags,
282 offset,
283 size,
284 values));
285 }
286
drawIndexed(const GrVkGpu * gpu,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)287 void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
288 uint32_t indexCount,
289 uint32_t instanceCount,
290 uint32_t firstIndex,
291 int32_t vertexOffset,
292 uint32_t firstInstance) {
293 SkASSERT(fIsActive);
294 SkASSERT(fActiveRenderPass);
295 this->addingWork(gpu);
296 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
297 indexCount,
298 instanceCount,
299 firstIndex,
300 vertexOffset,
301 firstInstance));
302 }
303
draw(const GrVkGpu * gpu,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)304 void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
305 uint32_t vertexCount,
306 uint32_t instanceCount,
307 uint32_t firstVertex,
308 uint32_t firstInstance) {
309 SkASSERT(fIsActive);
310 SkASSERT(fActiveRenderPass);
311 this->addingWork(gpu);
312 GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
313 vertexCount,
314 instanceCount,
315 firstVertex,
316 firstInstance));
317 }
318
drawIndirect(const GrVkGpu * gpu,sk_sp<const GrBuffer> indirectBuffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)319 void GrVkCommandBuffer::drawIndirect(const GrVkGpu* gpu,
320 sk_sp<const GrBuffer> indirectBuffer,
321 VkDeviceSize offset,
322 uint32_t drawCount,
323 uint32_t stride) {
324 SkASSERT(fIsActive);
325 SkASSERT(fActiveRenderPass);
326 SkASSERT(!indirectBuffer->isCpuBuffer());
327 this->addingWork(gpu);
328 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer();
329 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndirect(fCmdBuffer,
330 vkBuffer,
331 offset,
332 drawCount,
333 stride));
334 this->addGrBuffer(std::move(indirectBuffer));
335 }
336
drawIndexedIndirect(const GrVkGpu * gpu,sk_sp<const GrBuffer> indirectBuffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)337 void GrVkCommandBuffer::drawIndexedIndirect(const GrVkGpu* gpu,
338 sk_sp<const GrBuffer> indirectBuffer,
339 VkDeviceSize offset,
340 uint32_t drawCount,
341 uint32_t stride) {
342 SkASSERT(fIsActive);
343 SkASSERT(fActiveRenderPass);
344 SkASSERT(!indirectBuffer->isCpuBuffer());
345 this->addingWork(gpu);
346 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer();
347 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexedIndirect(fCmdBuffer,
348 vkBuffer,
349 offset,
350 drawCount,
351 stride));
352 this->addGrBuffer(std::move(indirectBuffer));
353 }
354
setViewport(const GrVkGpu * gpu,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * viewports)355 void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu,
356 uint32_t firstViewport,
357 uint32_t viewportCount,
358 const VkViewport* viewports) {
359 SkASSERT(fIsActive);
360 SkASSERT(1 == viewportCount);
361 if (0 != memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
362 GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
363 firstViewport,
364 viewportCount,
365 viewports));
366 fCachedViewport = viewports[0];
367 }
368 }
369
setScissor(const GrVkGpu * gpu,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * scissors)370 void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu,
371 uint32_t firstScissor,
372 uint32_t scissorCount,
373 const VkRect2D* scissors) {
374 SkASSERT(fIsActive);
375 SkASSERT(1 == scissorCount);
376 if (0 != memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
377 GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
378 firstScissor,
379 scissorCount,
380 scissors));
381 fCachedScissor = scissors[0];
382 }
383 }
384
setBlendConstants(const GrVkGpu * gpu,const float blendConstants[4])385 void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu,
386 const float blendConstants[4]) {
387 SkASSERT(fIsActive);
388 if (0 != memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
389 GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
390 memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
391 }
392 }
393
addingWork(const GrVkGpu * gpu)394 void GrVkCommandBuffer::addingWork(const GrVkGpu* gpu) {
395 this->submitPipelineBarriers(gpu);
396 fHasWork = true;
397 }
398
399 ///////////////////////////////////////////////////////////////////////////////
400 // PrimaryCommandBuffer
401 ////////////////////////////////////////////////////////////////////////////////
~GrVkPrimaryCommandBuffer()402 GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() {
403 // Should have ended any render pass we're in the middle of
404 SkASSERT(!fActiveRenderPass);
405 }
406
Create(GrVkGpu * gpu,VkCommandPool cmdPool)407 GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(GrVkGpu* gpu,
408 VkCommandPool cmdPool) {
409 const VkCommandBufferAllocateInfo cmdInfo = {
410 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
411 nullptr, // pNext
412 cmdPool, // commandPool
413 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
414 1 // bufferCount
415 };
416
417 VkCommandBuffer cmdBuffer;
418 VkResult err;
419 GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
420 if (err) {
421 return nullptr;
422 }
423 return new GrVkPrimaryCommandBuffer(cmdBuffer);
424 }
425
begin(GrVkGpu * gpu)426 void GrVkPrimaryCommandBuffer::begin(GrVkGpu* gpu) {
427 SkASSERT(!fIsActive);
428 VkCommandBufferBeginInfo cmdBufferBeginInfo;
429 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
430 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
431 cmdBufferBeginInfo.pNext = nullptr;
432 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
433 cmdBufferBeginInfo.pInheritanceInfo = nullptr;
434
435 GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
436 fIsActive = true;
437 }
438
end(GrVkGpu * gpu,bool abandoningBuffer)439 void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu, bool abandoningBuffer) {
440 SkASSERT(fIsActive);
441 SkASSERT(!fActiveRenderPass);
442
443 // If we are in the process of abandoning the context then the GrResourceCache will have freed
444 // all resources before destroying the GrVkGpu. When we destroy the GrVkGpu we call end on the
445 // command buffer to keep all our state tracking consistent. However, the vulkan validation
446 // layers complain about calling end on a command buffer that contains resources that have
447 // already been deleted. From the vulkan API it isn't required to end the command buffer to
448 // delete it, so we just skip the vulkan API calls and update our own state tracking.
449 if (!abandoningBuffer) {
450 this->submitPipelineBarriers(gpu);
451
452 GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
453 }
454 this->invalidateState();
455 fIsActive = false;
456 fHasWork = false;
457 }
458
beginRenderPass(GrVkGpu * gpu,const GrVkRenderPass * renderPass,sk_sp<const GrVkFramebuffer> framebuffer,const VkClearValue clearValues[],const GrSurface * target,const SkIRect & bounds,bool forSecondaryCB)459 bool GrVkPrimaryCommandBuffer::beginRenderPass(GrVkGpu* gpu,
460 const GrVkRenderPass* renderPass,
461 sk_sp<const GrVkFramebuffer> framebuffer,
462 const VkClearValue clearValues[],
463 const GrSurface* target,
464 const SkIRect& bounds,
465 bool forSecondaryCB) {
466 SkASSERT(fIsActive);
467 SkASSERT(!fActiveRenderPass);
468
469 SkASSERT(framebuffer);
470
471 this->addingWork(gpu);
472
473 VkRenderPassBeginInfo beginInfo;
474 VkRect2D renderArea;
475 renderArea.offset = { bounds.fLeft , bounds.fTop };
476 renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
477
478 memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
479 beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
480 beginInfo.pNext = nullptr;
481 beginInfo.renderPass = renderPass->vkRenderPass();
482 beginInfo.framebuffer = framebuffer->framebuffer();
483 beginInfo.renderArea = renderArea;
484 beginInfo.clearValueCount = renderPass->clearValueCount();
485 beginInfo.pClearValues = clearValues;
486
487 VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
488 : VK_SUBPASS_CONTENTS_INLINE;
489
490 GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
491 fActiveRenderPass = renderPass;
492 this->addResource(renderPass);
493 this->addResource(std::move(framebuffer));
494 this->addGrSurface(sk_ref_sp(target));
495 return true;
496 }
497
endRenderPass(const GrVkGpu * gpu)498 void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
499 SkASSERT(fIsActive);
500 SkASSERT(fActiveRenderPass);
501 this->addingWork(gpu);
502 GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
503 fActiveRenderPass = nullptr;
504 }
505
506
nexSubpass(GrVkGpu * gpu,bool forSecondaryCB)507 void GrVkPrimaryCommandBuffer::nexSubpass(GrVkGpu* gpu, bool forSecondaryCB) {
508 SkASSERT(fIsActive);
509 SkASSERT(fActiveRenderPass);
510 VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
511 : VK_SUBPASS_CONTENTS_INLINE;
512 GR_VK_CALL(gpu->vkInterface(), CmdNextSubpass(fCmdBuffer, contents));
513 }
514
executeCommands(const GrVkGpu * gpu,std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)515 void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
516 std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
517 // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer
518 // if the command pools both were created from were created with the same queue family. However,
519 // we currently always create them from the same pool.
520 SkASSERT(fIsActive);
521 SkASSERT(!buffer->fIsActive);
522 SkASSERT(fActiveRenderPass);
523 SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
524
525 this->addingWork(gpu);
526
527 GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
528 fSecondaryCommandBuffers.push_back(std::move(buffer));
529 // When executing a secondary command buffer all state (besides render pass state) becomes
530 // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
531 this->invalidateState();
532 }
533
submit_to_queue(GrVkGpu * gpu,VkQueue queue,VkFence fence,uint32_t waitCount,const VkSemaphore * waitSemaphores,const VkPipelineStageFlags * waitStages,uint32_t commandBufferCount,const VkCommandBuffer * commandBuffers,uint32_t signalCount,const VkSemaphore * signalSemaphores,GrProtected protectedContext)534 static bool submit_to_queue(GrVkGpu* gpu,
535 VkQueue queue,
536 VkFence fence,
537 uint32_t waitCount,
538 const VkSemaphore* waitSemaphores,
539 const VkPipelineStageFlags* waitStages,
540 uint32_t commandBufferCount,
541 const VkCommandBuffer* commandBuffers,
542 uint32_t signalCount,
543 const VkSemaphore* signalSemaphores,
544 GrProtected protectedContext) {
545 VkProtectedSubmitInfo protectedSubmitInfo;
546 if (protectedContext == GrProtected::kYes) {
547 memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
548 protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
549 protectedSubmitInfo.pNext = nullptr;
550 protectedSubmitInfo.protectedSubmit = VK_TRUE;
551 }
552
553 VkSubmitInfo submitInfo;
554 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
555 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
556 submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
557 submitInfo.waitSemaphoreCount = waitCount;
558 submitInfo.pWaitSemaphores = waitSemaphores;
559 submitInfo.pWaitDstStageMask = waitStages;
560 submitInfo.commandBufferCount = commandBufferCount;
561 submitInfo.pCommandBuffers = commandBuffers;
562 submitInfo.signalSemaphoreCount = signalCount;
563 submitInfo.pSignalSemaphores = signalSemaphores;
564 VkResult result;
565 GR_VK_CALL_RESULT(gpu, result, QueueSubmit(queue, 1, &submitInfo, fence));
566 return result == VK_SUCCESS;
567 }
568
submitToQueue(GrVkGpu * gpu,VkQueue queue,SkTArray<GrVkSemaphore::Resource * > & signalSemaphores,SkTArray<GrVkSemaphore::Resource * > & waitSemaphores)569 bool GrVkPrimaryCommandBuffer::submitToQueue(
570 GrVkGpu* gpu,
571 VkQueue queue,
572 SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
573 SkTArray<GrVkSemaphore::Resource*>& waitSemaphores) {
574 SkASSERT(!fIsActive);
575
576 VkResult err;
577 if (VK_NULL_HANDLE == fSubmitFence) {
578 VkFenceCreateInfo fenceInfo;
579 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
580 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
581 GR_VK_CALL_RESULT(gpu, err, CreateFence(gpu->device(), &fenceInfo, nullptr,
582 &fSubmitFence));
583 if (err) {
584 fSubmitFence = VK_NULL_HANDLE;
585 return false;
586 }
587 } else {
588 // This cannot return DEVICE_LOST so we assert we succeeded.
589 GR_VK_CALL_RESULT(gpu, err, ResetFences(gpu->device(), 1, &fSubmitFence));
590 SkASSERT(err == VK_SUCCESS);
591 }
592
593 int signalCount = signalSemaphores.count();
594 int waitCount = waitSemaphores.count();
595
596 bool submitted = false;
597
598 if (0 == signalCount && 0 == waitCount) {
599 // This command buffer has no dependent semaphores so we can simply just submit it to the
600 // queue with no worries.
601 submitted = submit_to_queue(
602 gpu, queue, fSubmitFence, 0, nullptr, nullptr, 1, &fCmdBuffer, 0, nullptr,
603 gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
604 } else {
605 SkTArray<VkSemaphore> vkSignalSems(signalCount);
606 for (int i = 0; i < signalCount; ++i) {
607 if (signalSemaphores[i]->shouldSignal()) {
608 this->addResource(signalSemaphores[i]);
609 vkSignalSems.push_back(signalSemaphores[i]->semaphore());
610 }
611 }
612
613 SkTArray<VkSemaphore> vkWaitSems(waitCount);
614 SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount);
615 for (int i = 0; i < waitCount; ++i) {
616 if (waitSemaphores[i]->shouldWait()) {
617 this->addResource(waitSemaphores[i]);
618 vkWaitSems.push_back(waitSemaphores[i]->semaphore());
619 vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
620 }
621 }
622 submitted = submit_to_queue(gpu, queue, fSubmitFence, vkWaitSems.count(),
623 vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
624 vkSignalSems.count(), vkSignalSems.begin(),
625 gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
626 if (submitted) {
627 for (int i = 0; i < signalCount; ++i) {
628 signalSemaphores[i]->markAsSignaled();
629 }
630 for (int i = 0; i < waitCount; ++i) {
631 waitSemaphores[i]->markAsWaited();
632 }
633 }
634 }
635
636 if (!submitted) {
637 // Destroy the fence or else we will try to wait forever for it to finish.
638 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
639 fSubmitFence = VK_NULL_HANDLE;
640 return false;
641 }
642 return true;
643 }
644
forceSync(GrVkGpu * gpu)645 void GrVkPrimaryCommandBuffer::forceSync(GrVkGpu* gpu) {
646 if (fSubmitFence == VK_NULL_HANDLE) {
647 return;
648 }
649 GR_VK_CALL_ERRCHECK(gpu, WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
650 }
651
finished(GrVkGpu * gpu)652 bool GrVkPrimaryCommandBuffer::finished(GrVkGpu* gpu) {
653 SkASSERT(!fIsActive);
654 if (VK_NULL_HANDLE == fSubmitFence) {
655 return true;
656 }
657
658 VkResult err;
659 GR_VK_CALL_RESULT_NOCHECK(gpu, err, GetFenceStatus(gpu->device(), fSubmitFence));
660 switch (err) {
661 case VK_SUCCESS:
662 case VK_ERROR_DEVICE_LOST:
663 return true;
664
665 case VK_NOT_READY:
666 return false;
667
668 default:
669 SkDebugf("Error getting fence status: %d\n", err);
670 SK_ABORT("Got an invalid fence status");
671 return false;
672 }
673 }
674
addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc)675 void GrVkPrimaryCommandBuffer::addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc) {
676 fFinishedProcs.push_back(std::move(finishedProc));
677 }
678
onReleaseResources()679 void GrVkPrimaryCommandBuffer::onReleaseResources() {
680 for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
681 fSecondaryCommandBuffers[i]->releaseResources();
682 }
683 this->callFinishedProcs();
684 }
685
recycleSecondaryCommandBuffers(GrVkCommandPool * cmdPool)686 void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool) {
687 for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
688 fSecondaryCommandBuffers[i].release()->recycle(cmdPool);
689 }
690 fSecondaryCommandBuffers.reset();
691 }
692
copyImage(const GrVkGpu * gpu,GrVkImage * srcImage,VkImageLayout srcLayout,GrVkImage * dstImage,VkImageLayout dstLayout,uint32_t copyRegionCount,const VkImageCopy * copyRegions)693 void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu,
694 GrVkImage* srcImage,
695 VkImageLayout srcLayout,
696 GrVkImage* dstImage,
697 VkImageLayout dstLayout,
698 uint32_t copyRegionCount,
699 const VkImageCopy* copyRegions) {
700 SkASSERT(fIsActive);
701 SkASSERT(!fActiveRenderPass);
702 this->addingWork(gpu);
703 this->addResource(srcImage->resource());
704 this->addResource(dstImage->resource());
705 GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
706 srcImage->image(),
707 srcLayout,
708 dstImage->image(),
709 dstLayout,
710 copyRegionCount,
711 copyRegions));
712 }
713
blitImage(const GrVkGpu * gpu,const GrManagedResource * srcResource,VkImage srcImage,VkImageLayout srcLayout,const GrManagedResource * dstResource,VkImage dstImage,VkImageLayout dstLayout,uint32_t blitRegionCount,const VkImageBlit * blitRegions,VkFilter filter)714 void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
715 const GrManagedResource* srcResource,
716 VkImage srcImage,
717 VkImageLayout srcLayout,
718 const GrManagedResource* dstResource,
719 VkImage dstImage,
720 VkImageLayout dstLayout,
721 uint32_t blitRegionCount,
722 const VkImageBlit* blitRegions,
723 VkFilter filter) {
724 SkASSERT(fIsActive);
725 SkASSERT(!fActiveRenderPass);
726 this->addingWork(gpu);
727 this->addResource(srcResource);
728 this->addResource(dstResource);
729 GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
730 srcImage,
731 srcLayout,
732 dstImage,
733 dstLayout,
734 blitRegionCount,
735 blitRegions,
736 filter));
737 }
738
blitImage(const GrVkGpu * gpu,const GrVkImage & srcImage,const GrVkImage & dstImage,uint32_t blitRegionCount,const VkImageBlit * blitRegions,VkFilter filter)739 void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
740 const GrVkImage& srcImage,
741 const GrVkImage& dstImage,
742 uint32_t blitRegionCount,
743 const VkImageBlit* blitRegions,
744 VkFilter filter) {
745 this->blitImage(gpu,
746 srcImage.resource(),
747 srcImage.image(),
748 srcImage.currentLayout(),
749 dstImage.resource(),
750 dstImage.image(),
751 dstImage.currentLayout(),
752 blitRegionCount,
753 blitRegions,
754 filter);
755 }
756
757
copyImageToBuffer(const GrVkGpu * gpu,GrVkImage * srcImage,VkImageLayout srcLayout,sk_sp<GrGpuBuffer> dstBuffer,uint32_t copyRegionCount,const VkBufferImageCopy * copyRegions)758 void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
759 GrVkImage* srcImage,
760 VkImageLayout srcLayout,
761 sk_sp<GrGpuBuffer> dstBuffer,
762 uint32_t copyRegionCount,
763 const VkBufferImageCopy* copyRegions) {
764 SkASSERT(fIsActive);
765 SkASSERT(!fActiveRenderPass);
766 this->addingWork(gpu);
767 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(dstBuffer.get());
768 GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
769 srcImage->image(),
770 srcLayout,
771 vkBuffer->vkBuffer(),
772 copyRegionCount,
773 copyRegions));
774 this->addResource(srcImage->resource());
775 this->addGrBuffer(std::move(dstBuffer));
776 }
777
copyBufferToImage(const GrVkGpu * gpu,VkBuffer srcBuffer,GrVkImage * dstImage,VkImageLayout dstLayout,uint32_t copyRegionCount,const VkBufferImageCopy * copyRegions)778 void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
779 VkBuffer srcBuffer,
780 GrVkImage* dstImage,
781 VkImageLayout dstLayout,
782 uint32_t copyRegionCount,
783 const VkBufferImageCopy* copyRegions) {
784 SkASSERT(fIsActive);
785 SkASSERT(!fActiveRenderPass);
786 this->addingWork(gpu);
787
788 GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
789 srcBuffer,
790 dstImage->image(),
791 dstLayout,
792 copyRegionCount,
793 copyRegions));
794 this->addResource(dstImage->resource());
795 }
796
copyBuffer(GrVkGpu * gpu,sk_sp<GrGpuBuffer> srcBuffer,sk_sp<GrGpuBuffer> dstBuffer,uint32_t regionCount,const VkBufferCopy * regions)797 void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
798 sk_sp<GrGpuBuffer> srcBuffer,
799 sk_sp<GrGpuBuffer> dstBuffer,
800 uint32_t regionCount,
801 const VkBufferCopy* regions) {
802 SkASSERT(fIsActive);
803 SkASSERT(!fActiveRenderPass);
804 this->addingWork(gpu);
805 #ifdef SK_DEBUG
806 for (uint32_t i = 0; i < regionCount; ++i) {
807 const VkBufferCopy& region = regions[i];
808 SkASSERT(region.size > 0);
809 SkASSERT(region.srcOffset < srcBuffer->size());
810 SkASSERT(region.dstOffset < dstBuffer->size());
811 SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
812 SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
813 }
814 #endif
815
816 const GrVkBuffer* srcVk = static_cast<GrVkBuffer*>(srcBuffer.get());
817 const GrVkBuffer* dstVk = static_cast<GrVkBuffer*>(dstBuffer.get());
818
819 GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
820 srcVk->vkBuffer(),
821 dstVk->vkBuffer(),
822 regionCount,
823 regions));
824 this->addGrBuffer(std::move(srcBuffer));
825 this->addGrBuffer(std::move(dstBuffer));
826 }
827
updateBuffer(GrVkGpu * gpu,sk_sp<GrVkBuffer> dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * data)828 void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
829 sk_sp<GrVkBuffer> dstBuffer,
830 VkDeviceSize dstOffset,
831 VkDeviceSize dataSize,
832 const void* data) {
833 SkASSERT(fIsActive);
834 SkASSERT(!fActiveRenderPass);
835 SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned
836 // TODO: handle larger transfer sizes
837 SkASSERT(dataSize <= 65536);
838 SkASSERT(0 == (dataSize & 0x03)); // four byte aligned
839 this->addingWork(gpu);
840 GR_VK_CALL(
841 gpu->vkInterface(),
842 CmdUpdateBuffer(
843 fCmdBuffer, dstBuffer->vkBuffer(), dstOffset, dataSize, (const uint32_t*)data));
844 this->addGrBuffer(std::move(dstBuffer));
845 }
846
clearColorImage(const GrVkGpu * gpu,GrVkImage * image,const VkClearColorValue * color,uint32_t subRangeCount,const VkImageSubresourceRange * subRanges)847 void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu,
848 GrVkImage* image,
849 const VkClearColorValue* color,
850 uint32_t subRangeCount,
851 const VkImageSubresourceRange* subRanges) {
852 SkASSERT(fIsActive);
853 SkASSERT(!fActiveRenderPass);
854 this->addingWork(gpu);
855 this->addResource(image->resource());
856 GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
857 image->image(),
858 image->currentLayout(),
859 color,
860 subRangeCount,
861 subRanges));
862 }
863
clearDepthStencilImage(const GrVkGpu * gpu,GrVkImage * image,const VkClearDepthStencilValue * color,uint32_t subRangeCount,const VkImageSubresourceRange * subRanges)864 void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu,
865 GrVkImage* image,
866 const VkClearDepthStencilValue* color,
867 uint32_t subRangeCount,
868 const VkImageSubresourceRange* subRanges) {
869 SkASSERT(fIsActive);
870 SkASSERT(!fActiveRenderPass);
871 this->addingWork(gpu);
872 this->addResource(image->resource());
873 GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
874 image->image(),
875 image->currentLayout(),
876 color,
877 subRangeCount,
878 subRanges));
879 }
880
resolveImage(GrVkGpu * gpu,const GrVkImage & srcImage,const GrVkImage & dstImage,uint32_t regionCount,const VkImageResolve * regions)881 void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu,
882 const GrVkImage& srcImage,
883 const GrVkImage& dstImage,
884 uint32_t regionCount,
885 const VkImageResolve* regions) {
886 SkASSERT(fIsActive);
887 SkASSERT(!fActiveRenderPass);
888
889 this->addingWork(gpu);
890 this->addResource(srcImage.resource());
891 this->addResource(dstImage.resource());
892
893 GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
894 srcImage.image(),
895 srcImage.currentLayout(),
896 dstImage.image(),
897 dstImage.currentLayout(),
898 regionCount,
899 regions));
900 }
901
onFreeGPUData(const GrVkGpu * gpu) const902 void GrVkPrimaryCommandBuffer::onFreeGPUData(const GrVkGpu* gpu) const {
903 SkASSERT(!fActiveRenderPass);
904 // Destroy the fence, if any
905 if (VK_NULL_HANDLE != fSubmitFence) {
906 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
907 }
908 SkASSERT(!fSecondaryCommandBuffers.count());
909 }
910
911 ///////////////////////////////////////////////////////////////////////////////
912 // SecondaryCommandBuffer
913 ////////////////////////////////////////////////////////////////////////////////
914
Create(GrVkGpu * gpu,GrVkCommandPool * cmdPool)915 GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(GrVkGpu* gpu,
916 GrVkCommandPool* cmdPool) {
917 SkASSERT(cmdPool);
918 const VkCommandBufferAllocateInfo cmdInfo = {
919 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
920 nullptr, // pNext
921 cmdPool->vkCommandPool(), // commandPool
922 VK_COMMAND_BUFFER_LEVEL_SECONDARY, // level
923 1 // bufferCount
924 };
925
926 VkCommandBuffer cmdBuffer;
927 VkResult err;
928 GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
929 if (err) {
930 return nullptr;
931 }
932 return new GrVkSecondaryCommandBuffer(cmdBuffer, /*externalRenderPass=*/nullptr);
933 }
934
Create(VkCommandBuffer cmdBuffer,const GrVkRenderPass * externalRenderPass)935 GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(
936 VkCommandBuffer cmdBuffer, const GrVkRenderPass* externalRenderPass) {
937 return new GrVkSecondaryCommandBuffer(cmdBuffer, externalRenderPass);
938 }
939
begin(GrVkGpu * gpu,const GrVkFramebuffer * framebuffer,const GrVkRenderPass * compatibleRenderPass)940 void GrVkSecondaryCommandBuffer::begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
941 const GrVkRenderPass* compatibleRenderPass) {
942 SkASSERT(!fIsActive);
943 SkASSERT(!this->isWrapped());
944 SkASSERT(compatibleRenderPass);
945 fActiveRenderPass = compatibleRenderPass;
946
947 VkCommandBufferInheritanceInfo inheritanceInfo;
948 memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
949 inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
950 inheritanceInfo.pNext = nullptr;
951 inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
952 inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
953 inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
954 inheritanceInfo.occlusionQueryEnable = false;
955 inheritanceInfo.queryFlags = 0;
956 inheritanceInfo.pipelineStatistics = 0;
957
958 VkCommandBufferBeginInfo cmdBufferBeginInfo;
959 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
960 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
961 cmdBufferBeginInfo.pNext = nullptr;
962 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
963 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
964 cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
965
966 GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
967
968 fIsActive = true;
969 }
970
end(GrVkGpu * gpu)971 void GrVkSecondaryCommandBuffer::end(GrVkGpu* gpu) {
972 SkASSERT(fIsActive);
973 SkASSERT(!this->isWrapped());
974 GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
975 this->invalidateState();
976 fHasWork = false;
977 fIsActive = false;
978 }
979
recycle(GrVkCommandPool * cmdPool)980 void GrVkSecondaryCommandBuffer::recycle(GrVkCommandPool* cmdPool) {
981 if (this->isWrapped()) {
982 delete this;
983 } else {
984 cmdPool->recycleSecondaryCommandBuffer(this);
985 }
986 }
987
988