1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
9
10 #include "include/core/SkRect.h"
11 #include "src/core/SkTraceEvent.h"
12 #include "src/gpu/ganesh/vk/GrVkBuffer.h"
13 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
14 #include "src/gpu/ganesh/vk/GrVkFramebuffer.h"
15 #include "src/gpu/ganesh/vk/GrVkGpu.h"
16 #include "src/gpu/ganesh/vk/GrVkImage.h"
17 #include "src/gpu/ganesh/vk/GrVkImageView.h"
18 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
19 #include "src/gpu/ganesh/vk/GrVkPipelineState.h"
20 #include "src/gpu/ganesh/vk/GrVkRenderPass.h"
21 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
22 #include "src/gpu/ganesh/vk/GrVkUtil.h"
23
24 using namespace skia_private;
25
invalidateState()26 void GrVkCommandBuffer::invalidateState() {
27 for (auto& boundInputBuffer : fBoundInputBuffers) {
28 boundInputBuffer = VK_NULL_HANDLE;
29 }
30 fBoundIndexBuffer = VK_NULL_HANDLE;
31
32 memset(&fCachedViewport, 0, sizeof(VkViewport));
33 fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
34
35 memset(&fCachedScissor, 0, sizeof(VkRect2D));
36 fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
37
38 for (int i = 0; i < 4; ++i) {
39 fCachedBlendConstant[i] = -1.0;
40 }
41 }
42
freeGPUData(const GrGpu * gpu,VkCommandPool cmdPool) const43 void GrVkCommandBuffer::freeGPUData(const GrGpu* gpu, VkCommandPool cmdPool) const {
44 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
45 SkASSERT(!fIsActive);
46 SkASSERT(fTrackedResources.empty());
47 SkASSERT(fTrackedRecycledResources.empty());
48 SkASSERT(fTrackedGpuBuffers.empty());
49 SkASSERT(fTrackedGpuSurfaces.empty());
50 SkASSERT(cmdPool != VK_NULL_HANDLE);
51 SkASSERT(!this->isWrapped());
52
53 const GrVkGpu* vkGpu = (const GrVkGpu*)gpu;
54 GR_VK_CALL(vkGpu->vkInterface(), FreeCommandBuffers(vkGpu->device(), cmdPool, 1, &fCmdBuffer));
55
56 this->onFreeGPUData(vkGpu);
57 }
58
releaseResources()59 void GrVkCommandBuffer::releaseResources() {
60 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
61 SkASSERT(!fIsActive || this->isWrapped());
62 fTrackedResources.clear();
63 fTrackedRecycledResources.clear();
64
65 fTrackedGpuBuffers.clear();
66 fTrackedGpuSurfaces.clear();
67
68 this->invalidateState();
69
70 this->onReleaseResources();
71 }
72
73 ////////////////////////////////////////////////////////////////////////////////
74 // CommandBuffer commands
75 ////////////////////////////////////////////////////////////////////////////////
76
pipelineBarrier(const GrVkGpu * gpu,const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,BarrierType barrierType,void * barrier)77 void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
78 const GrManagedResource* resource,
79 VkPipelineStageFlags srcStageMask,
80 VkPipelineStageFlags dstStageMask,
81 bool byRegion,
82 BarrierType barrierType,
83 void* barrier) {
84 SkASSERT(!this->isWrapped());
85 SkASSERT(fIsActive);
86 #ifdef SK_DEBUG
87 // For images we can have barriers inside of render passes but they require us to add more
88 // support in subpasses which need self dependencies to have barriers inside them. Also, we can
89 // never have buffer barriers inside of a render pass. For now we will just assert that we are
90 // not in a render pass.
91 bool isValidSubpassBarrier = false;
92 if (barrierType == kImageMemory_BarrierType) {
93 VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier);
94 isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) &&
95 (imgBarrier->srcQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
96 (imgBarrier->dstQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
97 byRegion;
98 }
99 SkASSERT(!fActiveRenderPass || isValidSubpassBarrier);
100 #endif
101
102 if (barrierType == kBufferMemory_BarrierType) {
103 const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier);
104 fBufferBarriers.push_back(*barrierPtr);
105 } else {
106 SkASSERT(barrierType == kImageMemory_BarrierType);
107 const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier);
108 // We need to check if we are adding a pipeline barrier that covers part of the same
109 // subresource range as a barrier that is already in current batch. If it does, then we must
110 // submit the first batch because the vulkan spec does not define a specific ordering for
111 // barriers submitted in the same batch.
112 // TODO: Look if we can gain anything by merging barriers together instead of submitting
113 // the old ones.
114 for (int i = 0; i < fImageBarriers.size(); ++i) {
115 VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
116 if (barrierPtr->image == currentBarrier.image) {
117 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
118 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
119 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
120 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
121 SkASSERT(newRange.layerCount == oldRange.layerCount);
122 uint32_t newStart = newRange.baseMipLevel;
123 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
124 uint32_t oldStart = oldRange.baseMipLevel;
125 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
126 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
127 this->submitPipelineBarriers(gpu);
128 break;
129 }
130 }
131 }
132 fImageBarriers.push_back(*barrierPtr);
133 }
134 fBarriersByRegion |= byRegion;
135 fSrcStageMask = fSrcStageMask | srcStageMask;
136 fDstStageMask = fDstStageMask | dstStageMask;
137
138 fHasWork = true;
139 if (resource) {
140 this->addResource(resource);
141 }
142 if (fActiveRenderPass) {
143 this->submitPipelineBarriers(gpu, true);
144 }
145 }
146
submitPipelineBarriers(const GrVkGpu * gpu,bool forSelfDependency)147 void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency) {
148 SkASSERT(fIsActive);
149
150 // Currently we never submit a pipeline barrier without at least one memory barrier.
151 if (!fBufferBarriers.empty() || !fImageBarriers.empty()) {
152 // For images we can have barriers inside of render passes but they require us to add more
153 // support in subpasses which need self dependencies to have barriers inside them. Also, we
154 // can never have buffer barriers inside of a render pass. For now we will just assert that
155 // we are not in a render pass.
156 SkASSERT(!fActiveRenderPass || forSelfDependency);
157 SkASSERT(!this->isWrapped());
158 SkASSERT(fSrcStageMask && fDstStageMask);
159
160 // TODO(https://crbug.com/1469231): The linked bug references a crash report from calling
161 // CmdPipelineBarrier. The checks below were added to ensure that we are passing in buffer
162 // counts >= 0, and in the case of >0, that the buffers are non-null. Evaluate whether this
163 // change leads to a reduction in crash instances. If not, the issue may lie within the
164 // driver itself and these checks can be removed.
165 if (!fBufferBarriers.empty() && fBufferBarriers.begin() == nullptr) {
166 fBufferBarriers.clear(); // Sets the size to 0
167 }
168 if (!fImageBarriers.empty() && fImageBarriers.begin() == nullptr) {
169 fImageBarriers.clear(); // Sets the size to 0
170 }
171
172 VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
173 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(
174 fCmdBuffer, fSrcStageMask, fDstStageMask, dependencyFlags, 0, nullptr,
175 fBufferBarriers.size(), fBufferBarriers.begin(),
176 fImageBarriers.size(), fImageBarriers.begin()));
177 fBufferBarriers.clear();
178 fImageBarriers.clear();
179 fBarriersByRegion = false;
180 fSrcStageMask = 0;
181 fDstStageMask = 0;
182 }
183 SkASSERT(fBufferBarriers.empty());
184 SkASSERT(fImageBarriers.empty());
185 SkASSERT(!fBarriersByRegion);
186 SkASSERT(!fSrcStageMask);
187 SkASSERT(!fDstStageMask);
188 }
189
bindInputBuffer(GrVkGpu * gpu,uint32_t binding,sk_sp<const GrBuffer> buffer)190 void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
191 sk_sp<const GrBuffer> buffer) {
192 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer();
193 SkASSERT(VK_NULL_HANDLE != vkBuffer);
194 SkASSERT(binding < kMaxInputBuffers);
195 // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
196 // to know if we can skip binding or not.
197 if (vkBuffer != fBoundInputBuffers[binding]) {
198 VkDeviceSize offset = 0;
199 GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
200 binding,
201 1,
202 &vkBuffer,
203 &offset));
204 fBoundInputBuffers[binding] = vkBuffer;
205 this->addGrBuffer(std::move(buffer));
206 }
207 }
208
bindIndexBuffer(GrVkGpu * gpu,sk_sp<const GrBuffer> buffer)209 void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, sk_sp<const GrBuffer> buffer) {
210 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer();
211 SkASSERT(VK_NULL_HANDLE != vkBuffer);
212 // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
213 // to know if we can skip binding or not.
214 if (vkBuffer != fBoundIndexBuffer) {
215 GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
216 vkBuffer, /*offset=*/0,
217 VK_INDEX_TYPE_UINT16));
218 fBoundIndexBuffer = vkBuffer;
219 this->addGrBuffer(std::move(buffer));
220 }
221 }
222
clearAttachments(const GrVkGpu * gpu,int numAttachments,const VkClearAttachment * attachments,int numRects,const VkClearRect * clearRects)223 void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
224 int numAttachments,
225 const VkClearAttachment* attachments,
226 int numRects,
227 const VkClearRect* clearRects) {
228 SkASSERT(fIsActive);
229 SkASSERT(fActiveRenderPass);
230 SkASSERT(numAttachments > 0);
231 SkASSERT(numRects > 0);
232
233 this->addingWork(gpu);
234
235 #ifdef SK_DEBUG
236 for (int i = 0; i < numAttachments; ++i) {
237 if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
238 uint32_t testIndex;
239 SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
240 SkASSERT(testIndex == attachments[i].colorAttachment);
241 }
242 }
243 #endif
244 GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
245 numAttachments,
246 attachments,
247 numRects,
248 clearRects));
249 if (gpu->vkCaps().mustInvalidatePrimaryCmdBufferStateAfterClearAttachments()) {
250 this->invalidateState();
251 }
252 }
253
bindDescriptorSets(const GrVkGpu * gpu,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * descriptorSets,uint32_t dynamicOffsetCount,const uint32_t * dynamicOffsets)254 void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
255 VkPipelineLayout layout,
256 uint32_t firstSet,
257 uint32_t setCount,
258 const VkDescriptorSet* descriptorSets,
259 uint32_t dynamicOffsetCount,
260 const uint32_t* dynamicOffsets) {
261 SkASSERT(fIsActive);
262 GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
263 VK_PIPELINE_BIND_POINT_GRAPHICS,
264 layout,
265 firstSet,
266 setCount,
267 descriptorSets,
268 dynamicOffsetCount,
269 dynamicOffsets));
270 }
271
bindPipeline(const GrVkGpu * gpu,sk_sp<const GrVkPipeline> pipeline)272 void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, sk_sp<const GrVkPipeline> pipeline) {
273 SkASSERT(fIsActive);
274 GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
275 VK_PIPELINE_BIND_POINT_GRAPHICS,
276 pipeline->pipeline()));
277 this->addResource(std::move(pipeline));
278 }
279
pushConstants(const GrVkGpu * gpu,VkPipelineLayout layout,VkShaderStageFlags stageFlags,uint32_t offset,uint32_t size,const void * values)280 void GrVkCommandBuffer::pushConstants(const GrVkGpu* gpu, VkPipelineLayout layout,
281 VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
282 const void* values) {
283 SkASSERT(fIsActive);
284 // offset and size must be a multiple of 4
285 SkASSERT(!SkToBool(offset & 0x3));
286 SkASSERT(!SkToBool(size & 0x3));
287 GR_VK_CALL(gpu->vkInterface(), CmdPushConstants(fCmdBuffer,
288 layout,
289 stageFlags,
290 offset,
291 size,
292 values));
293 }
294
drawIndexed(const GrVkGpu * gpu,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)295 void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
296 uint32_t indexCount,
297 uint32_t instanceCount,
298 uint32_t firstIndex,
299 int32_t vertexOffset,
300 uint32_t firstInstance) {
301 SkASSERT(fIsActive);
302 SkASSERT(fActiveRenderPass);
303 this->addingWork(gpu);
304 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
305 indexCount,
306 instanceCount,
307 firstIndex,
308 vertexOffset,
309 firstInstance));
310 }
311
draw(const GrVkGpu * gpu,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)312 void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
313 uint32_t vertexCount,
314 uint32_t instanceCount,
315 uint32_t firstVertex,
316 uint32_t firstInstance) {
317 SkASSERT(fIsActive);
318 SkASSERT(fActiveRenderPass);
319 this->addingWork(gpu);
320 GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
321 vertexCount,
322 instanceCount,
323 firstVertex,
324 firstInstance));
325 }
326
drawIndirect(const GrVkGpu * gpu,sk_sp<const GrBuffer> indirectBuffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)327 void GrVkCommandBuffer::drawIndirect(const GrVkGpu* gpu,
328 sk_sp<const GrBuffer> indirectBuffer,
329 VkDeviceSize offset,
330 uint32_t drawCount,
331 uint32_t stride) {
332 SkASSERT(fIsActive);
333 SkASSERT(fActiveRenderPass);
334 SkASSERT(!indirectBuffer->isCpuBuffer());
335 this->addingWork(gpu);
336 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer();
337 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndirect(fCmdBuffer,
338 vkBuffer,
339 offset,
340 drawCount,
341 stride));
342 this->addGrBuffer(std::move(indirectBuffer));
343 }
344
drawIndexedIndirect(const GrVkGpu * gpu,sk_sp<const GrBuffer> indirectBuffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)345 void GrVkCommandBuffer::drawIndexedIndirect(const GrVkGpu* gpu,
346 sk_sp<const GrBuffer> indirectBuffer,
347 VkDeviceSize offset,
348 uint32_t drawCount,
349 uint32_t stride) {
350 SkASSERT(fIsActive);
351 SkASSERT(fActiveRenderPass);
352 SkASSERT(!indirectBuffer->isCpuBuffer());
353 this->addingWork(gpu);
354 VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer();
355 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexedIndirect(fCmdBuffer,
356 vkBuffer,
357 offset,
358 drawCount,
359 stride));
360 this->addGrBuffer(std::move(indirectBuffer));
361 }
362
setViewport(const GrVkGpu * gpu,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * viewports)363 void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu,
364 uint32_t firstViewport,
365 uint32_t viewportCount,
366 const VkViewport* viewports) {
367 SkASSERT(fIsActive);
368 SkASSERT(1 == viewportCount);
369 if (0 != memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
370 GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
371 firstViewport,
372 viewportCount,
373 viewports));
374 fCachedViewport = viewports[0];
375 }
376 }
377
setScissor(const GrVkGpu * gpu,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * scissors)378 void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu,
379 uint32_t firstScissor,
380 uint32_t scissorCount,
381 const VkRect2D* scissors) {
382 SkASSERT(fIsActive);
383 SkASSERT(1 == scissorCount);
384 if (0 != memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
385 GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
386 firstScissor,
387 scissorCount,
388 scissors));
389 fCachedScissor = scissors[0];
390 }
391 }
392
setBlendConstants(const GrVkGpu * gpu,const float blendConstants[4])393 void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu,
394 const float blendConstants[4]) {
395 SkASSERT(fIsActive);
396 if (0 != memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
397 GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
398 memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
399 }
400 }
401
addingWork(const GrVkGpu * gpu)402 void GrVkCommandBuffer::addingWork(const GrVkGpu* gpu) {
403 this->submitPipelineBarriers(gpu);
404 fHasWork = true;
405 }
406
407 ///////////////////////////////////////////////////////////////////////////////
408 // PrimaryCommandBuffer
409 ////////////////////////////////////////////////////////////////////////////////
~GrVkPrimaryCommandBuffer()410 GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() {
411 // Should have ended any render pass we're in the middle of
412 SkASSERT(!fActiveRenderPass);
413 }
414
Create(GrVkGpu * gpu,VkCommandPool cmdPool)415 GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(GrVkGpu* gpu,
416 VkCommandPool cmdPool) {
417 const VkCommandBufferAllocateInfo cmdInfo = {
418 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
419 nullptr, // pNext
420 cmdPool, // commandPool
421 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
422 1 // bufferCount
423 };
424
425 VkCommandBuffer cmdBuffer;
426 VkResult err;
427 GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
428 if (err) {
429 return nullptr;
430 }
431 return new GrVkPrimaryCommandBuffer(cmdBuffer);
432 }
433
begin(GrVkGpu * gpu)434 void GrVkPrimaryCommandBuffer::begin(GrVkGpu* gpu) {
435 SkASSERT(!fIsActive);
436 VkCommandBufferBeginInfo cmdBufferBeginInfo;
437 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
438 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
439 cmdBufferBeginInfo.pNext = nullptr;
440 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
441 cmdBufferBeginInfo.pInheritanceInfo = nullptr;
442
443 GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
444 fIsActive = true;
445 }
446
end(GrVkGpu * gpu,bool abandoningBuffer)447 void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu, bool abandoningBuffer) {
448 SkASSERT(fIsActive);
449 SkASSERT(!fActiveRenderPass);
450
451 // If we are in the process of abandoning the context then the GrResourceCache will have freed
452 // all resources before destroying the GrVkGpu. When we destroy the GrVkGpu we call end on the
453 // command buffer to keep all our state tracking consistent. However, the vulkan validation
454 // layers complain about calling end on a command buffer that contains resources that have
455 // already been deleted. From the vulkan API it isn't required to end the command buffer to
456 // delete it, so we just skip the vulkan API calls and update our own state tracking.
457 if (!abandoningBuffer) {
458 this->submitPipelineBarriers(gpu);
459
460 GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
461 }
462 this->invalidateState();
463 fIsActive = false;
464 fHasWork = false;
465 }
466
beginRenderPass(GrVkGpu * gpu,const GrVkRenderPass * renderPass,sk_sp<const GrVkFramebuffer> framebuffer,const VkClearValue clearValues[],const GrSurface * target,const SkIRect & bounds,bool forSecondaryCB)467 bool GrVkPrimaryCommandBuffer::beginRenderPass(GrVkGpu* gpu,
468 const GrVkRenderPass* renderPass,
469 sk_sp<const GrVkFramebuffer> framebuffer,
470 const VkClearValue clearValues[],
471 const GrSurface* target,
472 const SkIRect& bounds,
473 bool forSecondaryCB) {
474 SkASSERT(fIsActive);
475 SkASSERT(!fActiveRenderPass);
476
477 SkASSERT(framebuffer);
478
479 this->addingWork(gpu);
480
481 VkRenderPassBeginInfo beginInfo;
482 VkRect2D renderArea;
483 renderArea.offset = { bounds.fLeft , bounds.fTop };
484 renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
485
486 memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
487 beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
488 beginInfo.pNext = nullptr;
489 beginInfo.renderPass = renderPass->vkRenderPass();
490 beginInfo.framebuffer = framebuffer->framebuffer();
491 beginInfo.renderArea = renderArea;
492 beginInfo.clearValueCount = renderPass->clearValueCount();
493 beginInfo.pClearValues = clearValues;
494
495 VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
496 : VK_SUBPASS_CONTENTS_INLINE;
497
498 GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
499 fActiveRenderPass = renderPass;
500 this->addResource(renderPass);
501 this->addResource(std::move(framebuffer));
502 this->addGrSurface(sk_ref_sp(target));
503 return true;
504 }
505
endRenderPass(const GrVkGpu * gpu)506 void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
507 SkASSERT(fIsActive);
508 SkASSERT(fActiveRenderPass);
509 this->addingWork(gpu);
510 GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
511 fActiveRenderPass = nullptr;
512 }
513
514
nexSubpass(GrVkGpu * gpu,bool forSecondaryCB)515 void GrVkPrimaryCommandBuffer::nexSubpass(GrVkGpu* gpu, bool forSecondaryCB) {
516 SkASSERT(fIsActive);
517 SkASSERT(fActiveRenderPass);
518 VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
519 : VK_SUBPASS_CONTENTS_INLINE;
520 GR_VK_CALL(gpu->vkInterface(), CmdNextSubpass(fCmdBuffer, contents));
521 }
522
executeCommands(const GrVkGpu * gpu,std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)523 void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
524 std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
525 // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer
526 // if the command pools both were created from were created with the same queue family. However,
527 // we currently always create them from the same pool.
528 SkASSERT(fIsActive);
529 SkASSERT(!buffer->fIsActive);
530 SkASSERT(fActiveRenderPass);
531 SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
532
533 this->addingWork(gpu);
534
535 GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
536 fSecondaryCommandBuffers.push_back(std::move(buffer));
537 // When executing a secondary command buffer all state (besides render pass state) becomes
538 // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
539 this->invalidateState();
540 }
541
submit_to_queue(GrVkGpu * gpu,VkQueue queue,VkFence fence,uint32_t waitCount,const VkSemaphore * waitSemaphores,const VkPipelineStageFlags * waitStages,uint32_t commandBufferCount,const VkCommandBuffer * commandBuffers,uint32_t signalCount,const VkSemaphore * signalSemaphores,GrProtected protectedContext)542 static bool submit_to_queue(GrVkGpu* gpu,
543 VkQueue queue,
544 VkFence fence,
545 uint32_t waitCount,
546 const VkSemaphore* waitSemaphores,
547 const VkPipelineStageFlags* waitStages,
548 uint32_t commandBufferCount,
549 const VkCommandBuffer* commandBuffers,
550 uint32_t signalCount,
551 const VkSemaphore* signalSemaphores,
552 GrProtected protectedContext) {
553 VkProtectedSubmitInfo protectedSubmitInfo;
554 if (protectedContext == GrProtected::kYes) {
555 memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
556 protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
557 protectedSubmitInfo.pNext = nullptr;
558 protectedSubmitInfo.protectedSubmit = VK_TRUE;
559 }
560
561 VkSubmitInfo submitInfo;
562 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
563 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
564 submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
565 submitInfo.waitSemaphoreCount = waitCount;
566 submitInfo.pWaitSemaphores = waitSemaphores;
567 submitInfo.pWaitDstStageMask = waitStages;
568 submitInfo.commandBufferCount = commandBufferCount;
569 submitInfo.pCommandBuffers = commandBuffers;
570 submitInfo.signalSemaphoreCount = signalCount;
571 submitInfo.pSignalSemaphores = signalSemaphores;
572 VkResult result;
573 GR_VK_CALL_RESULT(gpu, result, QueueSubmit(queue, 1, &submitInfo, fence));
574 return result == VK_SUCCESS;
575 }
576
submitToQueue(GrVkGpu * gpu,VkQueue queue,TArray<GrVkSemaphore::Resource * > & signalSemaphores,TArray<GrVkSemaphore::Resource * > & waitSemaphores)577 bool GrVkPrimaryCommandBuffer::submitToQueue(
578 GrVkGpu* gpu,
579 VkQueue queue,
580 TArray<GrVkSemaphore::Resource*>& signalSemaphores,
581 TArray<GrVkSemaphore::Resource*>& waitSemaphores) {
582 SkASSERT(!fIsActive);
583
584 VkResult err;
585 if (VK_NULL_HANDLE == fSubmitFence) {
586 VkFenceCreateInfo fenceInfo;
587 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
588 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
589 GR_VK_CALL_RESULT(gpu, err, CreateFence(gpu->device(), &fenceInfo, nullptr,
590 &fSubmitFence));
591 if (err) {
592 fSubmitFence = VK_NULL_HANDLE;
593 return false;
594 }
595 } else {
596 // This cannot return DEVICE_LOST so we assert we succeeded.
597 GR_VK_CALL_RESULT(gpu, err, ResetFences(gpu->device(), 1, &fSubmitFence));
598 SkASSERT(err == VK_SUCCESS);
599 }
600
601 int signalCount = signalSemaphores.size();
602 int waitCount = waitSemaphores.size();
603
604 bool submitted = false;
605
606 if (0 == signalCount && 0 == waitCount) {
607 // This command buffer has no dependent semaphores so we can simply just submit it to the
608 // queue with no worries.
609 submitted = submit_to_queue(
610 gpu, queue, fSubmitFence, 0, nullptr, nullptr, 1, &fCmdBuffer, 0, nullptr,
611 GrProtected(gpu->protectedContext()));
612 } else {
613 TArray<VkSemaphore> vkSignalSems(signalCount);
614 for (int i = 0; i < signalCount; ++i) {
615 if (signalSemaphores[i]->shouldSignal()) {
616 this->addResource(signalSemaphores[i]);
617 vkSignalSems.push_back(signalSemaphores[i]->semaphore());
618 }
619 }
620
621 TArray<VkSemaphore> vkWaitSems(waitCount);
622 TArray<VkPipelineStageFlags> vkWaitStages(waitCount);
623 for (int i = 0; i < waitCount; ++i) {
624 if (waitSemaphores[i]->shouldWait()) {
625 this->addResource(waitSemaphores[i]);
626 vkWaitSems.push_back(waitSemaphores[i]->semaphore());
627 // We only block the fragment stage since client provided resources are not used
628 // before the fragment stage. This allows the driver to begin vertex work while
629 // waiting on the semaphore. We also add in the transfer stage for uses of clients
630 // calling read or write pixels.
631 vkWaitStages.push_back(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
632 VK_PIPELINE_STAGE_TRANSFER_BIT);
633 }
634 }
635 submitted = submit_to_queue(gpu, queue, fSubmitFence, vkWaitSems.size(),
636 vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
637 vkSignalSems.size(), vkSignalSems.begin(),
638 GrProtected(gpu->protectedContext()));
639 if (submitted) {
640 for (int i = 0; i < signalCount; ++i) {
641 signalSemaphores[i]->markAsSignaled();
642 }
643 for (int i = 0; i < waitCount; ++i) {
644 waitSemaphores[i]->markAsWaited();
645 }
646 }
647 }
648
649 if (!submitted) {
650 // Destroy the fence or else we will try to wait forever for it to finish.
651 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
652 fSubmitFence = VK_NULL_HANDLE;
653 return false;
654 }
655 return true;
656 }
657
forceSync(GrVkGpu * gpu)658 void GrVkPrimaryCommandBuffer::forceSync(GrVkGpu* gpu) {
659 if (fSubmitFence == VK_NULL_HANDLE) {
660 return;
661 }
662 GR_VK_CALL_ERRCHECK(gpu, WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
663 }
664
finished(GrVkGpu * gpu)665 bool GrVkPrimaryCommandBuffer::finished(GrVkGpu* gpu) {
666 SkASSERT(!fIsActive);
667 if (VK_NULL_HANDLE == fSubmitFence) {
668 return true;
669 }
670
671 VkResult err;
672 GR_VK_CALL_RESULT_NOCHECK(gpu, err, GetFenceStatus(gpu->device(), fSubmitFence));
673 switch (err) {
674 case VK_SUCCESS:
675 case VK_ERROR_DEVICE_LOST:
676 return true;
677
678 case VK_NOT_READY:
679 return false;
680
681 default:
682 SkDebugf("Error getting fence status: %d\n", err);
683 SK_ABORT("Got an invalid fence status");
684 return false;
685 }
686 }
687
addFinishedProc(sk_sp<skgpu::RefCntedCallback> finishedProc)688 void GrVkPrimaryCommandBuffer::addFinishedProc(sk_sp<skgpu::RefCntedCallback> finishedProc) {
689 fFinishedProcs.push_back(std::move(finishedProc));
690 }
691
onReleaseResources()692 void GrVkPrimaryCommandBuffer::onReleaseResources() {
693 for (int i = 0; i < fSecondaryCommandBuffers.size(); ++i) {
694 fSecondaryCommandBuffers[i]->releaseResources();
695 }
696 this->callFinishedProcs();
697 }
698
recycleSecondaryCommandBuffers(GrVkCommandPool * cmdPool)699 void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool) {
700 for (int i = 0; i < fSecondaryCommandBuffers.size(); ++i) {
701 fSecondaryCommandBuffers[i].release()->recycle(cmdPool);
702 }
703 fSecondaryCommandBuffers.clear();
704 }
705
copyImage(const GrVkGpu * gpu,GrVkImage * srcImage,VkImageLayout srcLayout,GrVkImage * dstImage,VkImageLayout dstLayout,uint32_t copyRegionCount,const VkImageCopy * copyRegions)706 void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu,
707 GrVkImage* srcImage,
708 VkImageLayout srcLayout,
709 GrVkImage* dstImage,
710 VkImageLayout dstLayout,
711 uint32_t copyRegionCount,
712 const VkImageCopy* copyRegions) {
713 SkASSERT(fIsActive);
714 SkASSERT(!fActiveRenderPass);
715 this->addingWork(gpu);
716 this->addResource(srcImage->resource());
717 this->addResource(dstImage->resource());
718 GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
719 srcImage->image(),
720 srcLayout,
721 dstImage->image(),
722 dstLayout,
723 copyRegionCount,
724 copyRegions));
725 }
726
blitImage(const GrVkGpu * gpu,const GrManagedResource * srcResource,VkImage srcImage,VkImageLayout srcLayout,const GrManagedResource * dstResource,VkImage dstImage,VkImageLayout dstLayout,uint32_t blitRegionCount,const VkImageBlit * blitRegions,VkFilter filter)727 void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
728 const GrManagedResource* srcResource,
729 VkImage srcImage,
730 VkImageLayout srcLayout,
731 const GrManagedResource* dstResource,
732 VkImage dstImage,
733 VkImageLayout dstLayout,
734 uint32_t blitRegionCount,
735 const VkImageBlit* blitRegions,
736 VkFilter filter) {
737 SkASSERT(fIsActive);
738 SkASSERT(!fActiveRenderPass);
739 this->addingWork(gpu);
740 this->addResource(srcResource);
741 this->addResource(dstResource);
742 GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
743 srcImage,
744 srcLayout,
745 dstImage,
746 dstLayout,
747 blitRegionCount,
748 blitRegions,
749 filter));
750 }
751
blitImage(const GrVkGpu * gpu,const GrVkImage & srcImage,const GrVkImage & dstImage,uint32_t blitRegionCount,const VkImageBlit * blitRegions,VkFilter filter)752 void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
753 const GrVkImage& srcImage,
754 const GrVkImage& dstImage,
755 uint32_t blitRegionCount,
756 const VkImageBlit* blitRegions,
757 VkFilter filter) {
758 this->blitImage(gpu,
759 srcImage.resource(),
760 srcImage.image(),
761 srcImage.currentLayout(),
762 dstImage.resource(),
763 dstImage.image(),
764 dstImage.currentLayout(),
765 blitRegionCount,
766 blitRegions,
767 filter);
768 }
769
770
copyImageToBuffer(const GrVkGpu * gpu,GrVkImage * srcImage,VkImageLayout srcLayout,sk_sp<GrGpuBuffer> dstBuffer,uint32_t copyRegionCount,const VkBufferImageCopy * copyRegions)771 void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
772 GrVkImage* srcImage,
773 VkImageLayout srcLayout,
774 sk_sp<GrGpuBuffer> dstBuffer,
775 uint32_t copyRegionCount,
776 const VkBufferImageCopy* copyRegions) {
777 SkASSERT(fIsActive);
778 SkASSERT(!fActiveRenderPass);
779 this->addingWork(gpu);
780 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(dstBuffer.get());
781 GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
782 srcImage->image(),
783 srcLayout,
784 vkBuffer->vkBuffer(),
785 copyRegionCount,
786 copyRegions));
787 this->addResource(srcImage->resource());
788 this->addGrBuffer(std::move(dstBuffer));
789 }
790
copyBufferToImage(const GrVkGpu * gpu,VkBuffer srcBuffer,GrVkImage * dstImage,VkImageLayout dstLayout,uint32_t copyRegionCount,const VkBufferImageCopy * copyRegions)791 void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
792 VkBuffer srcBuffer,
793 GrVkImage* dstImage,
794 VkImageLayout dstLayout,
795 uint32_t copyRegionCount,
796 const VkBufferImageCopy* copyRegions) {
797 SkASSERT(fIsActive);
798 SkASSERT(!fActiveRenderPass);
799 this->addingWork(gpu);
800
801 GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
802 srcBuffer,
803 dstImage->image(),
804 dstLayout,
805 copyRegionCount,
806 copyRegions));
807 this->addResource(dstImage->resource());
808 }
809
fillBuffer(GrVkGpu * gpu,sk_sp<GrGpuBuffer> buffer,VkDeviceSize offset,VkDeviceSize size,uint32_t data)810 void GrVkPrimaryCommandBuffer::fillBuffer(GrVkGpu* gpu,
811 sk_sp<GrGpuBuffer> buffer,
812 VkDeviceSize offset,
813 VkDeviceSize size,
814 uint32_t data) {
815 SkASSERT(fIsActive);
816 SkASSERT(!fActiveRenderPass);
817 this->addingWork(gpu);
818
819 const GrVkBuffer* bufferVk = static_cast<GrVkBuffer*>(buffer.get());
820
821 GR_VK_CALL(gpu->vkInterface(), CmdFillBuffer(fCmdBuffer,
822 bufferVk->vkBuffer(),
823 offset,
824 size,
825 data));
826 this->addGrBuffer(std::move(buffer));
827 }
828
copyBuffer(GrVkGpu * gpu,sk_sp<GrGpuBuffer> srcBuffer,sk_sp<GrGpuBuffer> dstBuffer,uint32_t regionCount,const VkBufferCopy * regions)829 void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
830 sk_sp<GrGpuBuffer> srcBuffer,
831 sk_sp<GrGpuBuffer> dstBuffer,
832 uint32_t regionCount,
833 const VkBufferCopy* regions) {
834 SkASSERT(fIsActive);
835 SkASSERT(!fActiveRenderPass);
836 this->addingWork(gpu);
837 #ifdef SK_DEBUG
838 for (uint32_t i = 0; i < regionCount; ++i) {
839 const VkBufferCopy& region = regions[i];
840 SkASSERT(region.size > 0);
841 SkASSERT(region.srcOffset < srcBuffer->size());
842 SkASSERT(region.dstOffset < dstBuffer->size());
843 SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
844 SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
845 }
846 #endif
847
848 const GrVkBuffer* srcVk = static_cast<GrVkBuffer*>(srcBuffer.get());
849 const GrVkBuffer* dstVk = static_cast<GrVkBuffer*>(dstBuffer.get());
850
851 GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
852 srcVk->vkBuffer(),
853 dstVk->vkBuffer(),
854 regionCount,
855 regions));
856 this->addGrBuffer(std::move(srcBuffer));
857 this->addGrBuffer(std::move(dstBuffer));
858 }
859
updateBuffer(GrVkGpu * gpu,sk_sp<GrVkBuffer> dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * data)860 void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
861 sk_sp<GrVkBuffer> dstBuffer,
862 VkDeviceSize dstOffset,
863 VkDeviceSize dataSize,
864 const void* data) {
865 SkASSERT(fIsActive);
866 SkASSERT(!fActiveRenderPass);
867 SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned
868 // TODO: handle larger transfer sizes
869 SkASSERT(dataSize <= 65536);
870 SkASSERT(0 == (dataSize & 0x03)); // four byte aligned
871 this->addingWork(gpu);
872 GR_VK_CALL(
873 gpu->vkInterface(),
874 CmdUpdateBuffer(
875 fCmdBuffer, dstBuffer->vkBuffer(), dstOffset, dataSize, (const uint32_t*)data));
876 this->addGrBuffer(std::move(dstBuffer));
877 }
878
clearColorImage(const GrVkGpu * gpu,GrVkImage * image,const VkClearColorValue * color,uint32_t subRangeCount,const VkImageSubresourceRange * subRanges)879 void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu,
880 GrVkImage* image,
881 const VkClearColorValue* color,
882 uint32_t subRangeCount,
883 const VkImageSubresourceRange* subRanges) {
884 SkASSERT(fIsActive);
885 SkASSERT(!fActiveRenderPass);
886 this->addingWork(gpu);
887 this->addResource(image->resource());
888 GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
889 image->image(),
890 image->currentLayout(),
891 color,
892 subRangeCount,
893 subRanges));
894 }
895
clearDepthStencilImage(const GrVkGpu * gpu,GrVkImage * image,const VkClearDepthStencilValue * color,uint32_t subRangeCount,const VkImageSubresourceRange * subRanges)896 void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu,
897 GrVkImage* image,
898 const VkClearDepthStencilValue* color,
899 uint32_t subRangeCount,
900 const VkImageSubresourceRange* subRanges) {
901 SkASSERT(fIsActive);
902 SkASSERT(!fActiveRenderPass);
903 this->addingWork(gpu);
904 this->addResource(image->resource());
905 GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
906 image->image(),
907 image->currentLayout(),
908 color,
909 subRangeCount,
910 subRanges));
911 }
912
resolveImage(GrVkGpu * gpu,const GrVkImage & srcImage,const GrVkImage & dstImage,uint32_t regionCount,const VkImageResolve * regions)913 void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu,
914 const GrVkImage& srcImage,
915 const GrVkImage& dstImage,
916 uint32_t regionCount,
917 const VkImageResolve* regions) {
918 SkASSERT(fIsActive);
919 SkASSERT(!fActiveRenderPass);
920
921 this->addingWork(gpu);
922 this->addResource(srcImage.resource());
923 this->addResource(dstImage.resource());
924
925 GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
926 srcImage.image(),
927 srcImage.currentLayout(),
928 dstImage.image(),
929 dstImage.currentLayout(),
930 regionCount,
931 regions));
932 }
933
onFreeGPUData(const GrVkGpu * gpu) const934 void GrVkPrimaryCommandBuffer::onFreeGPUData(const GrVkGpu* gpu) const {
935 SkASSERT(!fActiveRenderPass);
936 // Destroy the fence, if any
937 if (VK_NULL_HANDLE != fSubmitFence) {
938 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
939 }
940 SkASSERT(fSecondaryCommandBuffers.empty());
941 }
942
943 ///////////////////////////////////////////////////////////////////////////////
944 // SecondaryCommandBuffer
945 ////////////////////////////////////////////////////////////////////////////////
946
Create(GrVkGpu * gpu,GrVkCommandPool * cmdPool)947 GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(GrVkGpu* gpu,
948 GrVkCommandPool* cmdPool) {
949 SkASSERT(cmdPool);
950 const VkCommandBufferAllocateInfo cmdInfo = {
951 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
952 nullptr, // pNext
953 cmdPool->vkCommandPool(), // commandPool
954 VK_COMMAND_BUFFER_LEVEL_SECONDARY, // level
955 1 // bufferCount
956 };
957
958 VkCommandBuffer cmdBuffer;
959 VkResult err;
960 GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
961 if (err) {
962 return nullptr;
963 }
964 return new GrVkSecondaryCommandBuffer(cmdBuffer, /*externalRenderPass=*/nullptr);
965 }
966
Create(VkCommandBuffer cmdBuffer,const GrVkRenderPass * externalRenderPass)967 GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(
968 VkCommandBuffer cmdBuffer, const GrVkRenderPass* externalRenderPass) {
969 return new GrVkSecondaryCommandBuffer(cmdBuffer, externalRenderPass);
970 }
971
begin(GrVkGpu * gpu,const GrVkFramebuffer * framebuffer,const GrVkRenderPass * compatibleRenderPass)972 void GrVkSecondaryCommandBuffer::begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
973 const GrVkRenderPass* compatibleRenderPass) {
974 SkASSERT(!fIsActive);
975 SkASSERT(!this->isWrapped());
976 SkASSERT(compatibleRenderPass);
977 fActiveRenderPass = compatibleRenderPass;
978
979 VkCommandBufferInheritanceInfo inheritanceInfo;
980 memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
981 inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
982 inheritanceInfo.pNext = nullptr;
983 inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
984 inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
985 inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
986 inheritanceInfo.occlusionQueryEnable = false;
987 inheritanceInfo.queryFlags = 0;
988 inheritanceInfo.pipelineStatistics = 0;
989
990 VkCommandBufferBeginInfo cmdBufferBeginInfo;
991 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
992 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
993 cmdBufferBeginInfo.pNext = nullptr;
994 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
995 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
996 cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
997
998 GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
999
1000 fIsActive = true;
1001 }
1002
end(GrVkGpu * gpu)1003 void GrVkSecondaryCommandBuffer::end(GrVkGpu* gpu) {
1004 SkASSERT(fIsActive);
1005 SkASSERT(!this->isWrapped());
1006 GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
1007 this->invalidateState();
1008 fHasWork = false;
1009 fIsActive = false;
1010 }
1011
recycle(GrVkCommandPool * cmdPool)1012 void GrVkSecondaryCommandBuffer::recycle(GrVkCommandPool* cmdPool) {
1013 if (this->isWrapped()) {
1014 delete this;
1015 } else {
1016 cmdPool->recycleSecondaryCommandBuffer(this);
1017 }
1018 }
1019