• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/vk/VulkanCommandBuffer.h"
9 
10 #include "src/gpu/graphite/Log.h"
11 #include "src/gpu/graphite/vk/VulkanBuffer.h"
12 #include "src/gpu/graphite/vk/VulkanGraphiteUtilsPriv.h"
13 #include "src/gpu/graphite/vk/VulkanSharedContext.h"
14 #include "src/gpu/graphite/vk/VulkanTexture.h"
15 
16 namespace skgpu::graphite {
17 
Make(const VulkanSharedContext * sharedContext,VulkanResourceProvider * resourceProvider)18 std::unique_ptr<VulkanCommandBuffer> VulkanCommandBuffer::Make(
19         const VulkanSharedContext* sharedContext,
20         VulkanResourceProvider* resourceProvider) {
21     // Create VkCommandPool
22     VkCommandPoolCreateFlags cmdPoolCreateFlags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
23     if (sharedContext->isProtected() == Protected::kYes) {
24         cmdPoolCreateFlags |= VK_COMMAND_POOL_CREATE_PROTECTED_BIT;
25     }
26 
27     const VkCommandPoolCreateInfo cmdPoolInfo = {
28         VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,  // sType
29         nullptr,                                     // pNext
30         cmdPoolCreateFlags,                          // CmdPoolCreateFlags
31         sharedContext->queueIndex(),                 // queueFamilyIndex
32     };
33     auto interface = sharedContext->interface();
34     VkResult result;
35     VkCommandPool pool;
36     VULKAN_CALL_RESULT(interface, result, CreateCommandPool(sharedContext->device(),
37                                                             &cmdPoolInfo,
38                                                             nullptr,
39                                                             &pool));
40     if (result != VK_SUCCESS) {
41         return nullptr;
42     }
43 
44     const VkCommandBufferAllocateInfo cmdInfo = {
45         VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,   // sType
46         nullptr,                                          // pNext
47         pool,                                             // commandPool
48         VK_COMMAND_BUFFER_LEVEL_PRIMARY,                  // level
49         1                                                 // bufferCount
50     };
51 
52     VkCommandBuffer primaryCmdBuffer;
53     VULKAN_CALL_RESULT(interface, result, AllocateCommandBuffers(sharedContext->device(),
54                                                                  &cmdInfo,
55                                                                  &primaryCmdBuffer));
56     if (result != VK_SUCCESS) {
57         VULKAN_CALL(interface, DestroyCommandPool(sharedContext->device(), pool, nullptr));
58         return nullptr;
59     }
60 
61     return std::unique_ptr<VulkanCommandBuffer>(new VulkanCommandBuffer(pool,
62                                                                         primaryCmdBuffer,
63                                                                         sharedContext,
64                                                                         resourceProvider));
65 }
66 
VulkanCommandBuffer(VkCommandPool pool,VkCommandBuffer primaryCommandBuffer,const VulkanSharedContext * sharedContext,VulkanResourceProvider * resourceProvider)67 VulkanCommandBuffer::VulkanCommandBuffer(VkCommandPool pool,
68                                          VkCommandBuffer primaryCommandBuffer,
69                                          const VulkanSharedContext* sharedContext,
70                                          VulkanResourceProvider* resourceProvider)
71         : fPool(pool)
72         , fPrimaryCommandBuffer(primaryCommandBuffer)
73         , fSharedContext(sharedContext)
74         , fResourceProvider(resourceProvider) {
75 
76     // TODO: Remove this line. It is only here to hide compiler warnings/errors about unused
77     // member variables.
78     (void) fResourceProvider;
79     // When making a new command buffer, we automatically begin the command buffer
80     this->begin();
81 }
82 
~VulkanCommandBuffer()83 VulkanCommandBuffer::~VulkanCommandBuffer() {}
84 
onResetCommandBuffer()85 void VulkanCommandBuffer::onResetCommandBuffer() {
86     SkASSERT(!fActive);
87     VULKAN_CALL_ERRCHECK(fSharedContext->interface(), ResetCommandPool(fSharedContext->device(),
88                                                                        fPool,
89                                                                        0));
90 }
91 
setNewCommandBufferResources()92 bool VulkanCommandBuffer::setNewCommandBufferResources() {
93     this->begin();
94     return true;
95 }
96 
begin()97 void VulkanCommandBuffer::begin() {
98     SkASSERT(!fActive);
99     VkCommandBufferBeginInfo cmdBufferBeginInfo;
100     memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
101     cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
102     cmdBufferBeginInfo.pNext = nullptr;
103     cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
104     cmdBufferBeginInfo.pInheritanceInfo = nullptr;
105 
106     VULKAN_CALL_ERRCHECK(fSharedContext->interface(), BeginCommandBuffer(fPrimaryCommandBuffer,
107                                                                          &cmdBufferBeginInfo));
108     SkDEBUGCODE(fActive = true;)
109 }
110 
end()111 void VulkanCommandBuffer::end() {
112     SkASSERT(fActive);
113 
114     this->submitPipelineBarriers();
115 
116     VULKAN_CALL_ERRCHECK(fSharedContext->interface(), EndCommandBuffer(fPrimaryCommandBuffer));
117 
118     SkDEBUGCODE(fActive = false;)
119 }
120 
submit_to_queue(const VulkanInterface * interface,VkQueue queue,VkFence fence,uint32_t waitCount,const VkSemaphore * waitSemaphores,const VkPipelineStageFlags * waitStages,uint32_t commandBufferCount,const VkCommandBuffer * commandBuffers,uint32_t signalCount,const VkSemaphore * signalSemaphores,Protected protectedContext)121 static bool submit_to_queue(const VulkanInterface* interface,
122                             VkQueue queue,
123                             VkFence fence,
124                             uint32_t waitCount,
125                             const VkSemaphore* waitSemaphores,
126                             const VkPipelineStageFlags* waitStages,
127                             uint32_t commandBufferCount,
128                             const VkCommandBuffer* commandBuffers,
129                             uint32_t signalCount,
130                             const VkSemaphore* signalSemaphores,
131                             Protected protectedContext) {
132     VkProtectedSubmitInfo protectedSubmitInfo;
133     if (protectedContext == Protected::kYes) {
134         memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
135         protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
136         protectedSubmitInfo.pNext = nullptr;
137         protectedSubmitInfo.protectedSubmit = VK_TRUE;
138     }
139 
140     VkSubmitInfo submitInfo;
141     memset(&submitInfo, 0, sizeof(VkSubmitInfo));
142     submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
143     submitInfo.pNext = protectedContext == Protected::kYes ? &protectedSubmitInfo : nullptr;
144     submitInfo.waitSemaphoreCount = waitCount;
145     submitInfo.pWaitSemaphores = waitSemaphores;
146     submitInfo.pWaitDstStageMask = waitStages;
147     submitInfo.commandBufferCount = commandBufferCount;
148     submitInfo.pCommandBuffers = commandBuffers;
149     submitInfo.signalSemaphoreCount = signalCount;
150     submitInfo.pSignalSemaphores = signalSemaphores;
151     VkResult result;
152     VULKAN_CALL_RESULT(interface, result, QueueSubmit(queue, 1, &submitInfo, fence));
153     if (result != VK_SUCCESS) {
154         return false;
155     }
156     return true;
157 }
158 
submit(VkQueue queue)159 bool VulkanCommandBuffer::submit(VkQueue queue) {
160     this->end();
161 
162     auto interface = fSharedContext->interface();
163     auto device = fSharedContext->device();
164     VkResult err;
165 
166     if (fSubmitFence == VK_NULL_HANDLE) {
167         VkFenceCreateInfo fenceInfo;
168         memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
169         fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
170         VULKAN_CALL_RESULT(interface, err, CreateFence(device,
171                                                        &fenceInfo,
172                                                        nullptr,
173                                                        &fSubmitFence));
174         if (err) {
175             fSubmitFence = VK_NULL_HANDLE;
176             return false;
177         }
178     } else {
179         // This cannot return DEVICE_LOST so we assert we succeeded.
180         VULKAN_CALL_RESULT(interface, err, ResetFences(device, 1, &fSubmitFence));
181         SkASSERT(err == VK_SUCCESS);
182     }
183 
184     SkASSERT(fSubmitFence != VK_NULL_HANDLE);
185 
186     bool submitted = submit_to_queue(interface,
187                                      queue,
188                                      fSubmitFence,
189                                      /*waitCount=*/0,
190                                      /*waitSemaphores=*/nullptr,
191                                      /*waitStages=*/nullptr,
192                                      /*commandBufferCount*/1,
193                                      &fPrimaryCommandBuffer,
194                                      /*signalCount=*/0,
195                                      /*signalSemaphores=*/nullptr,
196                                      fSharedContext->isProtected());
197     if (!submitted) {
198         // Destroy the fence or else we will try to wait forever for it to finish.
199         VULKAN_CALL(interface, DestroyFence(device, fSubmitFence, nullptr));
200         fSubmitFence = VK_NULL_HANDLE;
201         return false;
202     }
203     return true;
204 }
205 
isFinished()206 bool VulkanCommandBuffer::isFinished() {
207     SkASSERT(!fActive);
208     if (VK_NULL_HANDLE == fSubmitFence) {
209         return true;
210     }
211 
212     VkResult err;
213     VULKAN_CALL_RESULT_NOCHECK(fSharedContext->interface(), err,
214                                GetFenceStatus(fSharedContext->device(), fSubmitFence));
215     switch (err) {
216         case VK_SUCCESS:
217         case VK_ERROR_DEVICE_LOST:
218             return true;
219 
220         case VK_NOT_READY:
221             return false;
222 
223         default:
224             SKGPU_LOG_F("Error calling vkGetFenceStatus. Error: %d", err);
225             SK_ABORT("Got an invalid fence status");
226             return false;
227     }
228 }
229 
waitUntilFinished()230 void VulkanCommandBuffer::waitUntilFinished() {
231     if (fSubmitFence == VK_NULL_HANDLE) {
232         return;
233     }
234     VULKAN_CALL_ERRCHECK(fSharedContext->interface(), WaitForFences(fSharedContext->device(),
235                                                                     1,
236                                                                     &fSubmitFence,
237                                                                     /*waitAll=*/true,
238                                                                     /*timeout=*/UINT64_MAX));
239 }
240 
onAddRenderPass(const RenderPassDesc &,const Texture * colorTexture,const Texture * resolveTexture,const Texture * depthStencilTexture,SkRect viewport,const std::vector<std::unique_ptr<DrawPass>> & drawPasses)241 bool VulkanCommandBuffer::onAddRenderPass(
242         const RenderPassDesc&,
243         const Texture* colorTexture,
244         const Texture* resolveTexture,
245         const Texture* depthStencilTexture,
246         SkRect viewport,
247         const std::vector<std::unique_ptr<DrawPass>>& drawPasses) {
248     return false;
249 }
250 
onAddComputePass(const ComputePassDesc &,const ComputePipeline *,const std::vector<ResourceBinding> & bindings)251 bool VulkanCommandBuffer::onAddComputePass(const ComputePassDesc&,
252                                            const ComputePipeline*,
253                                            const std::vector<ResourceBinding>& bindings) {
254     return false;
255 }
256 
onCopyBufferToBuffer(const Buffer * srcBuffer,size_t srcOffset,const Buffer * dstBuffer,size_t dstOffset,size_t size)257 bool VulkanCommandBuffer::onCopyBufferToBuffer(const Buffer* srcBuffer,
258                                                size_t srcOffset,
259                                                const Buffer* dstBuffer,
260                                                size_t dstOffset,
261                                                size_t size) {
262     return false;
263 }
264 
onCopyTextureToBuffer(const Texture * texture,SkIRect srcRect,const Buffer * buffer,size_t bufferOffset,size_t bufferRowBytes)265 bool VulkanCommandBuffer::onCopyTextureToBuffer(const Texture* texture,
266                                                 SkIRect srcRect,
267                                                 const Buffer* buffer,
268                                                 size_t bufferOffset,
269                                                 size_t bufferRowBytes) {
270     this->submitPipelineBarriers();
271 
272     const VulkanTexture* srcTexture = static_cast<const VulkanTexture*>(texture);
273     VkBuffer dstBuffer = static_cast<const VulkanBuffer*>(buffer)->vkBuffer();
274 
275     // Obtain the VkFormat of the source texture so we can determine bytes per block.
276     VulkanTextureInfo srcTextureInfo;
277     texture->textureInfo().getVulkanTextureInfo(&srcTextureInfo);
278     size_t bytesPerBlock = VkFormatBytesPerBlock(srcTextureInfo.fFormat);
279 
280     // Set up copy region
281     VkBufferImageCopy region;
282     memset(&region, 0, sizeof(VkBufferImageCopy));
283     region.bufferOffset = bufferOffset;
284     // Vulkan expects bufferRowLength in texels, not bytes.
285     region.bufferRowLength = (uint32_t)(bufferRowBytes/bytesPerBlock);
286     region.bufferImageHeight = 0; // Tightly packed
287     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, /*mipLevel=*/0, 0, 1 };
288     region.imageOffset = { srcRect.left(), srcRect.top(), /*z=*/0 };
289     region.imageExtent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), /*depth=*/1 };
290 
291     // Enable editing of the source texture so we can change its layout so it can be copied from.
292     const_cast<VulkanTexture*>(srcTexture)->setImageLayout(this,
293                                                            VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
294                                                            VK_ACCESS_TRANSFER_READ_BIT,
295                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
296                                                            false);
297 
298     VULKAN_CALL(fSharedContext->interface(),
299                 CmdCopyImageToBuffer(fPrimaryCommandBuffer,
300                                      srcTexture->vkImage(),
301                                      VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
302                                      dstBuffer,
303                                      /*regionCount=*/1,
304                                      &region));
305     return true;
306 }
307 
onCopyBufferToTexture(const Buffer * buffer,const Texture * texture,const BufferTextureCopyData * copyData,int count)308 bool VulkanCommandBuffer::onCopyBufferToTexture(const Buffer* buffer,
309                                                 const Texture* texture,
310                                                 const BufferTextureCopyData* copyData,
311                                                 int count) {
312     this->submitPipelineBarriers();
313 
314     VkBuffer srcBuffer = static_cast<const VulkanBuffer*>(buffer)->vkBuffer();
315     const VulkanTexture* dstTexture = static_cast<const VulkanTexture*>(texture);
316 
317     // Obtain the VkFormat of the destination texture so we can determine bytes per block.
318     VulkanTextureInfo dstTextureInfo;
319     dstTexture->textureInfo().getVulkanTextureInfo(&dstTextureInfo);
320     size_t bytesPerBlock = VkFormatBytesPerBlock(dstTextureInfo.fFormat);
321 
322     // Set up copy regions.
323     SkTArray<VkBufferImageCopy> regions(count);
324     for (int i = 0; i < count; ++i) {
325         VkBufferImageCopy& region = regions.push_back();
326         memset(&region, 0, sizeof(VkBufferImageCopy));
327         region.bufferOffset = copyData[i].fBufferOffset;
328         // copyData provides row length in bytes, but Vulkan expects bufferRowLength in texels.
329         region.bufferRowLength = (uint32_t)(copyData[i].fBufferRowBytes/bytesPerBlock);
330         region.bufferImageHeight = 0; // Tightly packed
331         region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, copyData[i].fMipLevel, 0, 1 };
332         region.imageOffset = { copyData[i].fRect.left(),
333                                copyData[i].fRect.top(),
334                                /*z=*/0 };
335         region.imageExtent = { (uint32_t)copyData[i].fRect.width(),
336                                (uint32_t)copyData[i].fRect.height(),
337                                /*depth=*/1 };
338     }
339 
340     // Enable editing of the destination texture so we can change its layout so it can be copied to.
341     const_cast<VulkanTexture*>(dstTexture)->setImageLayout(this,
342                                                            VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
343                                                            VK_ACCESS_TRANSFER_WRITE_BIT,
344                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
345                                                            false);
346 
347     VULKAN_CALL(fSharedContext->interface(),
348             CmdCopyBufferToImage(fPrimaryCommandBuffer,
349                                  srcBuffer,
350                                  dstTexture->vkImage(),
351                                  VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
352                                  regions.size(),
353                                  regions.begin()));
354     return true;
355 }
356 
onCopyTextureToTexture(const Texture * src,SkIRect srcRect,const Texture * dst,SkIPoint dstPoint)357 bool VulkanCommandBuffer::onCopyTextureToTexture(const Texture* src,
358                                                  SkIRect srcRect,
359                                                  const Texture* dst,
360                                                  SkIPoint dstPoint) {
361     return false;
362 }
363 
onSynchronizeBufferToCpu(const Buffer *,bool * outDidResultInWork)364 bool VulkanCommandBuffer::onSynchronizeBufferToCpu(const Buffer*, bool* outDidResultInWork) {
365     return false;
366 }
367 
onClearBuffer(const Buffer *,size_t offset,size_t size)368 bool VulkanCommandBuffer::onClearBuffer(const Buffer*, size_t offset, size_t size) {
369     return false;
370 }
371 
372 #ifdef SK_ENABLE_PIET_GPU
onRenderPietScene(const skgpu::piet::Scene & scene,const Texture * target)373 void VulkanCommandBuffer::onRenderPietScene(const skgpu::piet::Scene& scene,
374                                             const Texture* target) {}
375 #endif
376 
addBufferMemoryBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier)377 void VulkanCommandBuffer::addBufferMemoryBarrier(const Resource* resource,
378                                                  VkPipelineStageFlags srcStageMask,
379                                                  VkPipelineStageFlags dstStageMask,
380                                                  bool byRegion,
381                                                  VkBufferMemoryBarrier* barrier) {
382     SkASSERT(resource);
383     this->pipelineBarrier(resource,
384                           srcStageMask,
385                           dstStageMask,
386                           byRegion,
387                           kBufferMemory_BarrierType,
388                           barrier);
389 }
390 
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier)391 void VulkanCommandBuffer::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
392                                                  VkPipelineStageFlags dstStageMask,
393                                                  bool byRegion,
394                                                  VkBufferMemoryBarrier* barrier) {
395     // We don't pass in a resource here to the command buffer. The command buffer only is using it
396     // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
397     // command with the buffer on the command buffer. Thus those other commands will already cause
398     // the command buffer to be holding a ref to the buffer.
399     this->pipelineBarrier(/*resource=*/nullptr,
400                           srcStageMask,
401                           dstStageMask,
402                           byRegion,
403                           kBufferMemory_BarrierType,
404                           barrier);
405 }
406 
addImageMemoryBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier)407 void VulkanCommandBuffer::addImageMemoryBarrier(const Resource* resource,
408                                                 VkPipelineStageFlags srcStageMask,
409                                                 VkPipelineStageFlags dstStageMask,
410                                                 bool byRegion,
411                                                 VkImageMemoryBarrier* barrier) {
412     SkASSERT(resource);
413     this->pipelineBarrier(resource,
414                           srcStageMask,
415                           dstStageMask,
416                           byRegion,
417                           kImageMemory_BarrierType,
418                           barrier);
419 }
420 
pipelineBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,BarrierType barrierType,void * barrier)421 void VulkanCommandBuffer::pipelineBarrier(const Resource* resource,
422                                           VkPipelineStageFlags srcStageMask,
423                                           VkPipelineStageFlags dstStageMask,
424                                           bool byRegion,
425                                           BarrierType barrierType,
426                                           void* barrier) {
427     // TODO: Do we need to handle wrapped command buffers?
428     // SkASSERT(!this->isWrapped());
429     SkASSERT(fActive);
430 #ifdef SK_DEBUG
431     // For images we can have barriers inside of render passes but they require us to add more
432     // support in subpasses which need self dependencies to have barriers inside them. Also, we can
433     // never have buffer barriers inside of a render pass. For now we will just assert that we are
434     // not in a render pass.
435     bool isValidSubpassBarrier = false;
436     if (barrierType == kImageMemory_BarrierType) {
437         VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier);
438         isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) &&
439             (imgBarrier->srcQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
440             (imgBarrier->dstQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
441             byRegion;
442     }
443     SkASSERT(!fActiveRenderPass || isValidSubpassBarrier);
444 #endif
445 
446     if (barrierType == kBufferMemory_BarrierType) {
447         const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier);
448         fBufferBarriers.push_back(*barrierPtr);
449     } else {
450         SkASSERT(barrierType == kImageMemory_BarrierType);
451         const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier);
452         // We need to check if we are adding a pipeline barrier that covers part of the same
453         // subresource range as a barrier that is already in current batch. If it does, then we must
454         // submit the first batch because the vulkan spec does not define a specific ordering for
455         // barriers submitted in the same batch.
456         // TODO: Look if we can gain anything by merging barriers together instead of submitting
457         // the old ones.
458         for (int i = 0; i < fImageBarriers.size(); ++i) {
459             VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
460             if (barrierPtr->image == currentBarrier.image) {
461                 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
462                 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
463                 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
464                 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
465                 SkASSERT(newRange.layerCount == oldRange.layerCount);
466                 uint32_t newStart = newRange.baseMipLevel;
467                 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
468                 uint32_t oldStart = oldRange.baseMipLevel;
469                 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
470                 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
471                     this->submitPipelineBarriers();
472                     break;
473                 }
474             }
475         }
476         fImageBarriers.push_back(*barrierPtr);
477     }
478     fBarriersByRegion |= byRegion;
479     fSrcStageMask = fSrcStageMask | srcStageMask;
480     fDstStageMask = fDstStageMask | dstStageMask;
481 
482     if (resource) {
483         this->trackResource(sk_ref_sp(resource));
484     }
485     if (fActiveRenderPass) {
486         this->submitPipelineBarriers(true);
487     }
488 }
489 
submitPipelineBarriers(bool forSelfDependency)490 void VulkanCommandBuffer::submitPipelineBarriers(bool forSelfDependency) {
491     SkASSERT(fActive);
492 
493     // TODO: Do we need to handle SecondaryCommandBuffers as well?
494 
495     // Currently we never submit a pipeline barrier without at least one buffer or image barrier.
496     if (fBufferBarriers.size() || fImageBarriers.size()) {
497         // For images we can have barriers inside of render passes but they require us to add more
498         // support in subpasses which need self dependencies to have barriers inside them. Also, we
499         // can never have buffer barriers inside of a render pass. For now we will just assert that
500         // we are not in a render pass.
501         SkASSERT(!fActiveRenderPass || forSelfDependency);
502         // TODO: Do we need to handle wrapped CommandBuffers?
503         //  SkASSERT(!this->isWrapped());
504         SkASSERT(fSrcStageMask && fDstStageMask);
505 
506         VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
507         VULKAN_CALL(fSharedContext->interface(),
508                     CmdPipelineBarrier(fPrimaryCommandBuffer, fSrcStageMask, fDstStageMask,
509                                        dependencyFlags,
510                                        /*memoryBarrierCount=*/0, /*pMemoryBarrier=*/nullptr,
511                                        fBufferBarriers.size(), fBufferBarriers.begin(),
512                                        fImageBarriers.size(), fImageBarriers.begin()));
513         fBufferBarriers.clear();
514         fImageBarriers.clear();
515         fBarriersByRegion = false;
516         fSrcStageMask = 0;
517         fDstStageMask = 0;
518     }
519     SkASSERT(!fBufferBarriers.size());
520     SkASSERT(!fImageBarriers.size());
521     SkASSERT(!fBarriersByRegion);
522     SkASSERT(!fSrcStageMask);
523     SkASSERT(!fDstStageMask);
524 }
525 
526 
527 } // namespace skgpu::graphite
528 
529