• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #ifndef GrVkCommandBuffer_DEFINED
9 #define GrVkCommandBuffer_DEFINED
10 
11 #include "include/gpu/vk/GrVkTypes.h"
12 #include "src/gpu/GrManagedResource.h"
13 #include "src/gpu/GrRefCnt.h"
14 #include "src/gpu/vk/GrVkGpu.h"
15 #include "src/gpu/vk/GrVkSemaphore.h"
16 #include "src/gpu/vk/GrVkUtil.h"
17 
18 class GrVkFramebuffer;
19 class GrVkImage;
20 class GrVkPipeline;
21 class GrVkPipelineState;
22 class GrVkRenderPass;
23 class GrVkRenderTarget;
24 
25 class GrVkCommandBuffer {
26 public:
~GrVkCommandBuffer()27     virtual ~GrVkCommandBuffer() {}
28 
29     void invalidateState();
30 
31     ////////////////////////////////////////////////////////////////////////////
32     // CommandBuffer commands
33     ////////////////////////////////////////////////////////////////////////////
34     enum BarrierType {
35         kBufferMemory_BarrierType,
36         kImageMemory_BarrierType
37     };
38 
39     void pipelineBarrier(const GrVkGpu* gpu,
40                          const GrManagedResource* resource,
41                          VkPipelineStageFlags srcStageMask,
42                          VkPipelineStageFlags dstStageMask,
43                          bool byRegion,
44                          BarrierType barrierType,
45                          void* barrier);
46 
47     void bindInputBuffer(GrVkGpu* gpu, uint32_t binding, sk_sp<const GrBuffer> buffer);
48 
49     void bindIndexBuffer(GrVkGpu* gpu, sk_sp<const GrBuffer> buffer);
50 
51     void bindPipeline(const GrVkGpu* gpu, sk_sp<const GrVkPipeline> pipeline);
52 
53     void bindDescriptorSets(const GrVkGpu* gpu,
54                             VkPipelineLayout layout,
55                             uint32_t firstSet,
56                             uint32_t setCount,
57                             const VkDescriptorSet* descriptorSets,
58                             uint32_t dynamicOffsetCount,
59                             const uint32_t* dynamicOffsets);
60 
61     void pushConstants(const GrVkGpu* gpu, VkPipelineLayout layout,
62                        VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
63                        const void* values);
64 
65     void setViewport(const GrVkGpu* gpu,
66                      uint32_t firstViewport,
67                      uint32_t viewportCount,
68                      const VkViewport* viewports);
69 
70     void setScissor(const GrVkGpu* gpu,
71                     uint32_t firstScissor,
72                     uint32_t scissorCount,
73                     const VkRect2D* scissors);
74 
75     void setBlendConstants(const GrVkGpu* gpu, const float blendConstants[4]);
76 
77     // Commands that only work inside of a render pass
78     void clearAttachments(const GrVkGpu* gpu,
79                           int numAttachments,
80                           const VkClearAttachment* attachments,
81                           int numRects,
82                           const VkClearRect* clearRects);
83 
84     void drawIndexed(const GrVkGpu* gpu,
85                      uint32_t indexCount,
86                      uint32_t instanceCount,
87                      uint32_t firstIndex,
88                      int32_t vertexOffset,
89                      uint32_t firstInstance);
90 
91     void draw(const GrVkGpu* gpu,
92               uint32_t vertexCount,
93               uint32_t instanceCount,
94               uint32_t firstVertex,
95               uint32_t firstInstance);
96 
97     void drawIndirect(const GrVkGpu* gpu,
98                       sk_sp<const GrBuffer> indirectBuffer,
99                       VkDeviceSize offset,
100                       uint32_t drawCount,
101                       uint32_t stride);
102 
103     void drawIndexedIndirect(const GrVkGpu* gpu,
104                              sk_sp<const GrBuffer> indirectBuffer,
105                              VkDeviceSize offset,
106                              uint32_t drawCount,
107                              uint32_t stride);
108 
109     // Add ref-counted resource that will be tracked and released when this command buffer finishes
110     // execution
addResource(sk_sp<const GrManagedResource> resource)111     void addResource(sk_sp<const GrManagedResource> resource) {
112         SkASSERT(resource);
113         resource->notifyQueuedForWorkOnGpu();
114         fTrackedResources.push_back(std::move(resource));
115     }
addResource(const GrManagedResource * resource)116     void addResource(const GrManagedResource* resource) {
117         this->addResource(sk_ref_sp(resource));
118     }
119 
120     // Add ref-counted resource that will be tracked and released when this command buffer finishes
121     // execution. When it is released, it will signal that the resource can be recycled for reuse.
addRecycledResource(gr_rp<const GrRecycledResource> resource)122     void addRecycledResource(gr_rp<const GrRecycledResource> resource) {
123         SkASSERT(resource);
124         resource->notifyQueuedForWorkOnGpu();
125         fTrackedRecycledResources.push_back(std::move(resource));
126     }
127 
addRecycledResource(const GrRecycledResource * resource)128     void addRecycledResource(const GrRecycledResource* resource) {
129         this->addRecycledResource(gr_ref_rp<const GrRecycledResource>(resource));
130     }
131 
addGrBuffer(sk_sp<const GrBuffer> buffer)132     void addGrBuffer(sk_sp<const GrBuffer> buffer) {
133         fTrackedGpuBuffers.push_back(std::move(buffer));
134     }
135 
addGrSurface(sk_sp<const GrSurface> surface)136     void addGrSurface(sk_sp<const GrSurface> surface) {
137         fTrackedGpuSurfaces.push_back(std::move(surface));
138     }
139 
140     void releaseResources();
141 
142     void freeGPUData(const GrGpu* gpu, VkCommandPool pool) const;
143 
hasWork()144     bool hasWork() const { return fHasWork; }
145 
146 protected:
147     GrVkCommandBuffer(VkCommandBuffer cmdBuffer, bool isWrapped = false)
fIsActive(isWrapped)148             : fIsActive(isWrapped) // All wrapped command buffers start as active
149             , fCmdBuffer(cmdBuffer)
150             , fIsWrapped(isWrapped) {
151         this->invalidateState();
152     }
153 
isWrapped()154     bool isWrapped() const { return fIsWrapped; }
155 
156     void addingWork(const GrVkGpu* gpu);
157 
158     void submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency = false);
159 
160 private:
161     static constexpr int kInitialTrackedResourcesCount = 32;
162 
163 protected:
164     template <typename T> using TrackedResourceArray = SkSTArray<kInitialTrackedResourcesCount, T>;
165     TrackedResourceArray<sk_sp<const GrManagedResource>> fTrackedResources;
166     TrackedResourceArray<gr_rp<const GrRecycledResource>> fTrackedRecycledResources;
167     SkSTArray<16, sk_sp<const GrBuffer>> fTrackedGpuBuffers;
168     SkSTArray<16, gr_cb<const GrSurface>> fTrackedGpuSurfaces;
169 
170     // Tracks whether we are in the middle of a command buffer begin/end calls and thus can add
171     // new commands to the buffer;
172     bool                      fIsActive;
173     bool                      fHasWork = false;
174 
175     // Stores a pointer to the current active render pass (i.e. begin has been called but not
176     // end). A nullptr means there is no active render pass. The GrVKCommandBuffer does not own
177     // the render pass.
178     const GrVkRenderPass*     fActiveRenderPass = nullptr;
179 
180     VkCommandBuffer           fCmdBuffer;
181 
onReleaseResources()182     virtual void onReleaseResources() {}
183     virtual void onFreeGPUData(const GrVkGpu* gpu) const = 0;
184 
185     static constexpr uint32_t kMaxInputBuffers = 2;
186 
187     VkBuffer fBoundInputBuffers[kMaxInputBuffers];
188     VkBuffer fBoundIndexBuffer;
189 
190     // Cached values used for dynamic state updates
191     VkViewport fCachedViewport;
192     VkRect2D   fCachedScissor;
193     float      fCachedBlendConstant[4];
194 
195     // Tracking of memory barriers so that we can submit them all in a batch together.
196     SkSTArray<1, VkBufferMemoryBarrier> fBufferBarriers;
197     SkSTArray<2, VkImageMemoryBarrier> fImageBarriers;
198     bool fBarriersByRegion = false;
199     VkPipelineStageFlags fSrcStageMask = 0;
200     VkPipelineStageFlags fDstStageMask = 0;
201 
202     bool fIsWrapped;
203 };
204 
205 class GrVkSecondaryCommandBuffer;
206 
207 class GrVkPrimaryCommandBuffer : public GrVkCommandBuffer {
208 public:
209     ~GrVkPrimaryCommandBuffer() override;
210 
211     static GrVkPrimaryCommandBuffer* Create(GrVkGpu* gpu, VkCommandPool cmdPool);
212 
213     void begin(GrVkGpu* gpu);
214     void end(GrVkGpu* gpu, bool abandoningBuffer = false);
215 
216     // Begins render pass on this command buffer. The framebuffer from GrVkRenderTarget will be used
217     // in the render pass.
218     bool beginRenderPass(GrVkGpu* gpu,
219                          const GrVkRenderPass*,
220                          sk_sp<const GrVkFramebuffer>,
221                          const VkClearValue clearValues[],
222                          const GrSurface* target,
223                          const SkIRect& bounds,
224                          bool forSecondaryCB);
225     void endRenderPass(const GrVkGpu* gpu);
226 
227     void nexSubpass(GrVkGpu* gpu, bool forSecondaryCB);
228 
229     // Submits the SecondaryCommandBuffer into this command buffer. It is required that we are
230     // currently inside a render pass that is compatible with the one used to create the
231     // SecondaryCommandBuffer.
232     void executeCommands(const GrVkGpu* gpu,
233                          std::unique_ptr<GrVkSecondaryCommandBuffer> secondaryBuffer);
234 
235     // Commands that only work outside of a render pass
236     void clearColorImage(const GrVkGpu* gpu,
237                          GrVkImage* image,
238                          const VkClearColorValue* color,
239                          uint32_t subRangeCount,
240                          const VkImageSubresourceRange* subRanges);
241 
242     void clearDepthStencilImage(const GrVkGpu* gpu,
243                                 GrVkImage* image,
244                                 const VkClearDepthStencilValue* color,
245                                 uint32_t subRangeCount,
246                                 const VkImageSubresourceRange* subRanges);
247 
248     void copyImage(const GrVkGpu* gpu,
249                    GrVkImage* srcImage,
250                    VkImageLayout srcLayout,
251                    GrVkImage* dstImage,
252                    VkImageLayout dstLayout,
253                    uint32_t copyRegionCount,
254                    const VkImageCopy* copyRegions);
255 
256     void blitImage(const GrVkGpu* gpu,
257                    const GrManagedResource* srcResource,
258                    VkImage srcImage,
259                    VkImageLayout srcLayout,
260                    const GrManagedResource* dstResource,
261                    VkImage dstImage,
262                    VkImageLayout dstLayout,
263                    uint32_t blitRegionCount,
264                    const VkImageBlit* blitRegions,
265                    VkFilter filter);
266 
267     void blitImage(const GrVkGpu* gpu,
268                    const GrVkImage& srcImage,
269                    const GrVkImage& dstImage,
270                    uint32_t blitRegionCount,
271                    const VkImageBlit* blitRegions,
272                    VkFilter filter);
273 
274     void copyImageToBuffer(const GrVkGpu* gpu,
275                            GrVkImage* srcImage,
276                            VkImageLayout srcLayout,
277                            sk_sp<GrGpuBuffer> dstBuffer,
278                            uint32_t copyRegionCount,
279                            const VkBufferImageCopy* copyRegions);
280 
281     // All uses of copyBufferToImage are done with buffers from our staging manager. The staging
282     // manager will handle making sure the command buffer refs the buffer. Thus we just pass in the
283     // raw VkBuffer here and don't worry about refs.
284     void copyBufferToImage(const GrVkGpu* gpu,
285                            VkBuffer srcBuffer,
286                            GrVkImage* dstImage,
287                            VkImageLayout dstLayout,
288                            uint32_t copyRegionCount,
289                            const VkBufferImageCopy* copyRegions);
290 
291     void copyBuffer(GrVkGpu* gpu,
292                     sk_sp<GrGpuBuffer> srcBuffer,
293                     sk_sp<GrGpuBuffer> dstBuffer,
294                     uint32_t regionCount,
295                     const VkBufferCopy* regions);
296 
297     void updateBuffer(GrVkGpu* gpu,
298                       sk_sp<GrVkBuffer> dstBuffer,
299                       VkDeviceSize dstOffset,
300                       VkDeviceSize dataSize,
301                       const void* data);
302 
303     void resolveImage(GrVkGpu* gpu,
304                       const GrVkImage& srcImage,
305                       const GrVkImage& dstImage,
306                       uint32_t regionCount,
307                       const VkImageResolve* regions);
308 
309     bool submitToQueue(GrVkGpu* gpu, VkQueue queue,
310                        SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
311                        SkTArray<GrVkSemaphore::Resource*>& waitSemaphores);
312 
313     void forceSync(GrVkGpu* gpu);
314 
315     bool finished(GrVkGpu* gpu);
316 
317     void addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc);
318 
callFinishedProcs()319     void callFinishedProcs() {
320         fFinishedProcs.reset();
321     }
322 
323     void recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool);
324 
325 private:
GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer)326     explicit GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer)
327         : INHERITED(cmdBuffer)
328         , fSubmitFence(VK_NULL_HANDLE) {}
329 
330     void onFreeGPUData(const GrVkGpu* gpu) const override;
331 
332     void onReleaseResources() override;
333 
334     SkTArray<std::unique_ptr<GrVkSecondaryCommandBuffer>, true> fSecondaryCommandBuffers;
335     VkFence                                                     fSubmitFence;
336     SkTArray<sk_sp<GrRefCntedCallback>>                         fFinishedProcs;
337 
338     using INHERITED = GrVkCommandBuffer;
339 };
340 
341 class GrVkSecondaryCommandBuffer : public GrVkCommandBuffer {
342 public:
343     static GrVkSecondaryCommandBuffer* Create(GrVkGpu* gpu, GrVkCommandPool* cmdPool);
344     // Used for wrapping an external secondary command buffer.
345     static GrVkSecondaryCommandBuffer* Create(VkCommandBuffer externalSecondaryCB,
346                                               const GrVkRenderPass* externalRenderPass);
347 
348     void begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
349                const GrVkRenderPass* compatibleRenderPass);
350     void end(GrVkGpu* gpu);
351 
352     void recycle(GrVkCommandPool* cmdPool);
353 
vkCommandBuffer()354     VkCommandBuffer vkCommandBuffer() { return fCmdBuffer; }
355 
356 private:
GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer,const GrVkRenderPass * externalRenderPass)357     explicit GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer,
358                                         const GrVkRenderPass* externalRenderPass)
359             : INHERITED(cmdBuffer, SkToBool(externalRenderPass)) {
360         fActiveRenderPass = externalRenderPass;
361     }
362 
onFreeGPUData(const GrVkGpu * gpu)363     void onFreeGPUData(const GrVkGpu* gpu) const override {}
364 
365     // Used for accessing fIsActive (on GrVkCommandBuffer)
366     friend class GrVkPrimaryCommandBuffer;
367 
368     using INHERITED = GrVkCommandBuffer;
369 };
370 
371 #endif
372