• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #ifndef GrVkCommandBuffer_DEFINED
9 #define GrVkCommandBuffer_DEFINED
10 
11 #include "include/gpu/vk/GrVkTypes.h"
12 #include "src/gpu/GpuRefCnt.h"
13 #include "src/gpu/ganesh/GrManagedResource.h"
14 #include "src/gpu/ganesh/vk/GrVkGpu.h"
15 #include "src/gpu/ganesh/vk/GrVkSemaphore.h"
16 #include "src/gpu/ganesh/vk/GrVkUtil.h"
17 
18 class GrVkFramebuffer;
19 class GrVkImage;
20 class GrVkPipeline;
21 class GrVkPipelineState;
22 class GrVkRenderPass;
23 class GrVkRenderTarget;
24 
25 class GrVkCommandBuffer {
26 public:
~GrVkCommandBuffer()27     virtual ~GrVkCommandBuffer() {}
28 
29     void invalidateState();
30 
31     ////////////////////////////////////////////////////////////////////////////
32     // CommandBuffer commands
33     ////////////////////////////////////////////////////////////////////////////
34     enum BarrierType {
35         kBufferMemory_BarrierType,
36         kImageMemory_BarrierType
37     };
38 
39     void pipelineBarrier(const GrVkGpu* gpu,
40                          const GrManagedResource* resource,
41                          VkPipelineStageFlags srcStageMask,
42                          VkPipelineStageFlags dstStageMask,
43                          bool byRegion,
44                          BarrierType barrierType,
45                          void* barrier);
46 
47     void bindInputBuffer(GrVkGpu* gpu, uint32_t binding, sk_sp<const GrBuffer> buffer);
48 
49     void bindIndexBuffer(GrVkGpu* gpu, sk_sp<const GrBuffer> buffer);
50 
51     void bindPipeline(const GrVkGpu* gpu, sk_sp<const GrVkPipeline> pipeline);
52 
53     void bindDescriptorSets(const GrVkGpu* gpu,
54                             VkPipelineLayout layout,
55                             uint32_t firstSet,
56                             uint32_t setCount,
57                             const VkDescriptorSet* descriptorSets,
58                             uint32_t dynamicOffsetCount,
59                             const uint32_t* dynamicOffsets);
60 
61     void pushConstants(const GrVkGpu* gpu, VkPipelineLayout layout,
62                        VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
63                        const void* values);
64 
65     void setViewport(const GrVkGpu* gpu,
66                      uint32_t firstViewport,
67                      uint32_t viewportCount,
68                      const VkViewport* viewports);
69 
70     void setScissor(const GrVkGpu* gpu,
71                     uint32_t firstScissor,
72                     uint32_t scissorCount,
73                     const VkRect2D* scissors);
74 
75     void setBlendConstants(const GrVkGpu* gpu, const float blendConstants[4]);
76 
77     // Commands that only work inside of a render pass
78     void clearAttachments(const GrVkGpu* gpu,
79                           int numAttachments,
80                           const VkClearAttachment* attachments,
81                           int numRects,
82                           const VkClearRect* clearRects);
83 
84     void drawIndexed(const GrVkGpu* gpu,
85                      uint32_t indexCount,
86                      uint32_t instanceCount,
87                      uint32_t firstIndex,
88                      int32_t vertexOffset,
89                      uint32_t firstInstance);
90 
91     void draw(const GrVkGpu* gpu,
92               uint32_t vertexCount,
93               uint32_t instanceCount,
94               uint32_t firstVertex,
95               uint32_t firstInstance);
96 
97     void drawIndirect(const GrVkGpu* gpu,
98                       sk_sp<const GrBuffer> indirectBuffer,
99                       VkDeviceSize offset,
100                       uint32_t drawCount,
101                       uint32_t stride);
102 
103     void drawIndexedIndirect(const GrVkGpu* gpu,
104                              sk_sp<const GrBuffer> indirectBuffer,
105                              VkDeviceSize offset,
106                              uint32_t drawCount,
107                              uint32_t stride);
108 
109     // Add ref-counted resource that will be tracked and released when this command buffer finishes
110     // execution
addResource(sk_sp<const GrManagedResource> resource)111     void addResource(sk_sp<const GrManagedResource> resource) {
112         SkASSERT(resource);
113         fTrackedResources.push_back(std::move(resource));
114     }
addResource(const GrManagedResource * resource)115     void addResource(const GrManagedResource* resource) {
116         this->addResource(sk_ref_sp(resource));
117     }
118 
119     // Add ref-counted resource that will be tracked and released when this command buffer finishes
120     // execution. When it is released, it will signal that the resource can be recycled for reuse.
addRecycledResource(gr_rp<const GrRecycledResource> resource)121     void addRecycledResource(gr_rp<const GrRecycledResource> resource) {
122         SkASSERT(resource);
123         fTrackedRecycledResources.push_back(std::move(resource));
124     }
125 
addRecycledResource(const GrRecycledResource * resource)126     void addRecycledResource(const GrRecycledResource* resource) {
127         this->addRecycledResource(gr_ref_rp<const GrRecycledResource>(resource));
128     }
129 
addGrBuffer(sk_sp<const GrBuffer> buffer)130     void addGrBuffer(sk_sp<const GrBuffer> buffer) {
131         fTrackedGpuBuffers.push_back(std::move(buffer));
132     }
133 
addGrSurface(sk_sp<const GrSurface> surface)134     void addGrSurface(sk_sp<const GrSurface> surface) {
135         fTrackedGpuSurfaces.push_back(std::move(surface));
136     }
137 
138     void releaseResources();
139 
140     void freeGPUData(const GrGpu* gpu, VkCommandPool pool) const;
141 
hasWork()142     bool hasWork() const { return fHasWork; }
143 
144 protected:
145     GrVkCommandBuffer(VkCommandBuffer cmdBuffer, bool isWrapped = false)
fIsActive(isWrapped)146             : fIsActive(isWrapped) // All wrapped command buffers start as active
147             , fCmdBuffer(cmdBuffer)
148             , fIsWrapped(isWrapped) {
149         this->invalidateState();
150     }
151 
isWrapped()152     bool isWrapped() const { return fIsWrapped; }
153 
154     void addingWork(const GrVkGpu* gpu);
155 
156     void submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency = false);
157 
158 private:
159     static constexpr int kInitialTrackedResourcesCount = 32;
160 
161 protected:
162     template <typename T>
163     using TrackedResourceArray = skia_private::STArray<kInitialTrackedResourcesCount, T>;
164     TrackedResourceArray<sk_sp<const GrManagedResource>> fTrackedResources;
165     TrackedResourceArray<gr_rp<const GrRecycledResource>> fTrackedRecycledResources;
166     skia_private::STArray<16, sk_sp<const GrBuffer>> fTrackedGpuBuffers;
167     skia_private::STArray<16, gr_cb<const GrSurface>> fTrackedGpuSurfaces;
168 
169     // Tracks whether we are in the middle of a command buffer begin/end calls and thus can add
170     // new commands to the buffer;
171     bool                      fIsActive;
172     bool                      fHasWork = false;
173 
174     // Stores a pointer to the current active render pass (i.e. begin has been called but not
175     // end). A nullptr means there is no active render pass. The GrVKCommandBuffer does not own
176     // the render pass.
177     const GrVkRenderPass*     fActiveRenderPass = nullptr;
178 
179     VkCommandBuffer           fCmdBuffer;
180 
onReleaseResources()181     virtual void onReleaseResources() {}
182     virtual void onFreeGPUData(const GrVkGpu* gpu) const = 0;
183 
184     static constexpr uint32_t kMaxInputBuffers = 2;
185 
186     VkBuffer fBoundInputBuffers[kMaxInputBuffers];
187     VkBuffer fBoundIndexBuffer;
188 
189     // Cached values used for dynamic state updates
190     VkViewport fCachedViewport;
191     VkRect2D   fCachedScissor;
192     float      fCachedBlendConstant[4];
193 
194     // Tracking of memory barriers so that we can submit them all in a batch together.
195     skia_private::STArray<1, VkBufferMemoryBarrier> fBufferBarriers;
196     skia_private::STArray<2, VkImageMemoryBarrier> fImageBarriers;
197     bool fBarriersByRegion = false;
198     VkPipelineStageFlags fSrcStageMask = 0;
199     VkPipelineStageFlags fDstStageMask = 0;
200 
201     bool fIsWrapped;
202 };
203 
204 class GrVkSecondaryCommandBuffer;
205 
206 class GrVkPrimaryCommandBuffer : public GrVkCommandBuffer {
207 public:
208     ~GrVkPrimaryCommandBuffer() override;
209 
210     static GrVkPrimaryCommandBuffer* Create(GrVkGpu* gpu, VkCommandPool cmdPool);
211 
212     void begin(GrVkGpu* gpu);
213     void end(GrVkGpu* gpu, bool abandoningBuffer = false);
214 
215     // Begins render pass on this command buffer. The framebuffer from GrVkRenderTarget will be used
216     // in the render pass.
217     bool beginRenderPass(GrVkGpu* gpu,
218                          const GrVkRenderPass*,
219                          sk_sp<const GrVkFramebuffer>,
220                          const VkClearValue clearValues[],
221                          const GrSurface* target,
222                          const SkIRect& bounds,
223                          bool forSecondaryCB);
224     void endRenderPass(const GrVkGpu* gpu);
225 
226     void nexSubpass(GrVkGpu* gpu, bool forSecondaryCB);
227 
228     // Submits the SecondaryCommandBuffer into this command buffer. It is required that we are
229     // currently inside a render pass that is compatible with the one used to create the
230     // SecondaryCommandBuffer.
231     void executeCommands(const GrVkGpu* gpu,
232                          std::unique_ptr<GrVkSecondaryCommandBuffer> secondaryBuffer);
233 
234     // Commands that only work outside of a render pass
235     void clearColorImage(const GrVkGpu* gpu,
236                          GrVkImage* image,
237                          const VkClearColorValue* color,
238                          uint32_t subRangeCount,
239                          const VkImageSubresourceRange* subRanges);
240 
241     void clearDepthStencilImage(const GrVkGpu* gpu,
242                                 GrVkImage* image,
243                                 const VkClearDepthStencilValue* color,
244                                 uint32_t subRangeCount,
245                                 const VkImageSubresourceRange* subRanges);
246 
247     void copyImage(const GrVkGpu* gpu,
248                    GrVkImage* srcImage,
249                    VkImageLayout srcLayout,
250                    GrVkImage* dstImage,
251                    VkImageLayout dstLayout,
252                    uint32_t copyRegionCount,
253                    const VkImageCopy* copyRegions);
254 
255     void blitImage(const GrVkGpu* gpu,
256                    const GrManagedResource* srcResource,
257                    VkImage srcImage,
258                    VkImageLayout srcLayout,
259                    const GrManagedResource* dstResource,
260                    VkImage dstImage,
261                    VkImageLayout dstLayout,
262                    uint32_t blitRegionCount,
263                    const VkImageBlit* blitRegions,
264                    VkFilter filter);
265 
266     void blitImage(const GrVkGpu* gpu,
267                    const GrVkImage& srcImage,
268                    const GrVkImage& dstImage,
269                    uint32_t blitRegionCount,
270                    const VkImageBlit* blitRegions,
271                    VkFilter filter);
272 
273     void copyImageToBuffer(const GrVkGpu* gpu,
274                            GrVkImage* srcImage,
275                            VkImageLayout srcLayout,
276                            sk_sp<GrGpuBuffer> dstBuffer,
277                            uint32_t copyRegionCount,
278                            const VkBufferImageCopy* copyRegions);
279 
280     // All uses of copyBufferToImage are done with buffers from our staging manager. The staging
281     // manager will handle making sure the command buffer refs the buffer. Thus we just pass in the
282     // raw VkBuffer here and don't worry about refs.
283     void copyBufferToImage(const GrVkGpu* gpu,
284                            VkBuffer srcBuffer,
285                            GrVkImage* dstImage,
286                            VkImageLayout dstLayout,
287                            uint32_t copyRegionCount,
288                            const VkBufferImageCopy* copyRegions);
289 
290     void fillBuffer(GrVkGpu* gpu,
291                     sk_sp<GrGpuBuffer>,
292                     VkDeviceSize offset,
293                     VkDeviceSize size,
294                     uint32_t data);
295 
296     void copyBuffer(GrVkGpu* gpu,
297                     sk_sp<GrGpuBuffer> srcBuffer,
298                     sk_sp<GrGpuBuffer> dstBuffer,
299                     uint32_t regionCount,
300                     const VkBufferCopy* regions);
301 
302     void updateBuffer(GrVkGpu* gpu,
303                       sk_sp<GrVkBuffer> dstBuffer,
304                       VkDeviceSize dstOffset,
305                       VkDeviceSize dataSize,
306                       const void* data);
307 
308     void resolveImage(GrVkGpu* gpu,
309                       const GrVkImage& srcImage,
310                       const GrVkImage& dstImage,
311                       uint32_t regionCount,
312                       const VkImageResolve* regions);
313 
314     bool submitToQueue(GrVkGpu* gpu, VkQueue queue,
315                        skia_private::TArray<GrVkSemaphore::Resource*>& signalSemaphores,
316                        skia_private::TArray<GrVkSemaphore::Resource*>& waitSemaphores);
317 
318     void forceSync(GrVkGpu* gpu);
319 
320     bool finished(GrVkGpu* gpu);
321 
322     void addFinishedProc(sk_sp<skgpu::RefCntedCallback> finishedProc);
323 
callFinishedProcs()324     void callFinishedProcs() {
325         fFinishedProcs.clear();
326     }
327 
328     void recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool);
329 
330 private:
GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer)331     explicit GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer)
332         : INHERITED(cmdBuffer)
333         , fSubmitFence(VK_NULL_HANDLE) {}
334 
335     void onFreeGPUData(const GrVkGpu* gpu) const override;
336 
337     void onReleaseResources() override;
338 
339     skia_private::TArray<std::unique_ptr<GrVkSecondaryCommandBuffer>, true> fSecondaryCommandBuffers;
340     VkFence                                                     fSubmitFence;
341     skia_private::TArray<sk_sp<skgpu::RefCntedCallback>>                    fFinishedProcs;
342 
343     using INHERITED = GrVkCommandBuffer;
344 };
345 
346 class GrVkSecondaryCommandBuffer : public GrVkCommandBuffer {
347 public:
348     static GrVkSecondaryCommandBuffer* Create(GrVkGpu* gpu, GrVkCommandPool* cmdPool);
349     // Used for wrapping an external secondary command buffer.
350     static GrVkSecondaryCommandBuffer* Create(VkCommandBuffer externalSecondaryCB,
351                                               const GrVkRenderPass* externalRenderPass);
352 
353     void begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
354                const GrVkRenderPass* compatibleRenderPass);
355     void end(GrVkGpu* gpu);
356 
357     void recycle(GrVkCommandPool* cmdPool);
358 
vkCommandBuffer()359     VkCommandBuffer vkCommandBuffer() { return fCmdBuffer; }
360 
361 private:
GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer,const GrVkRenderPass * externalRenderPass)362     explicit GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer,
363                                         const GrVkRenderPass* externalRenderPass)
364             : INHERITED(cmdBuffer, SkToBool(externalRenderPass)) {
365         fActiveRenderPass = externalRenderPass;
366     }
367 
onFreeGPUData(const GrVkGpu * gpu)368     void onFreeGPUData(const GrVkGpu* gpu) const override {}
369 
370     // Used for accessing fIsActive (on GrVkCommandBuffer)
371     friend class GrVkPrimaryCommandBuffer;
372 
373     using INHERITED = GrVkCommandBuffer;
374 };
375 
376 #endif
377