• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #ifndef GrVkCommandBuffer_DEFINED
9 #define GrVkCommandBuffer_DEFINED
10 
11 #include "include/core/SkBlurTypes.h"
12 #include "include/gpu/vk/GrVkTypes.h"
13 #include "src/gpu/GrManagedResource.h"
14 #include "src/gpu/GrRefCnt.h"
15 #include "src/gpu/vk/GrVkGpu.h"
16 #include "src/gpu/vk/GrVkSemaphore.h"
17 #include "src/gpu/vk/GrVkUtil.h"
18 
19 class GrVkFramebuffer;
20 class GrVkImage;
21 class GrVkPipeline;
22 class GrVkPipelineState;
23 class GrVkRenderPass;
24 class GrVkRenderTarget;
25 
26 class GrVkCommandBuffer {
27 public:
~GrVkCommandBuffer()28     virtual ~GrVkCommandBuffer() {}
29 
30     void invalidateState();
31 
32     ////////////////////////////////////////////////////////////////////////////
33     // CommandBuffer commands
34     ////////////////////////////////////////////////////////////////////////////
35     enum BarrierType {
36         kBufferMemory_BarrierType,
37         kImageMemory_BarrierType
38     };
39 
40     void pipelineBarrier(const GrVkGpu* gpu,
41                          const GrManagedResource* resource,
42                          VkPipelineStageFlags srcStageMask,
43                          VkPipelineStageFlags dstStageMask,
44                          bool byRegion,
45                          BarrierType barrierType,
46                          void* barrier);
47 
48     void bindInputBuffer(GrVkGpu* gpu, uint32_t binding, sk_sp<const GrBuffer> buffer);
49 
50     void bindIndexBuffer(GrVkGpu* gpu, sk_sp<const GrBuffer> buffer);
51 
52     void bindPipeline(const GrVkGpu* gpu, sk_sp<const GrVkPipeline> pipeline);
53 
54     void bindDescriptorSets(const GrVkGpu* gpu,
55                             VkPipelineLayout layout,
56                             uint32_t firstSet,
57                             uint32_t setCount,
58                             const VkDescriptorSet* descriptorSets,
59                             uint32_t dynamicOffsetCount,
60                             const uint32_t* dynamicOffsets);
61 
62     void pushConstants(const GrVkGpu* gpu, VkPipelineLayout layout,
63                        VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
64                        const void* values);
65 
66     void setViewport(const GrVkGpu* gpu,
67                      uint32_t firstViewport,
68                      uint32_t viewportCount,
69                      const VkViewport* viewports);
70 
71     void setScissor(const GrVkGpu* gpu,
72                     uint32_t firstScissor,
73                     uint32_t scissorCount,
74                     const VkRect2D* scissors);
75 
76     void setBlendConstants(const GrVkGpu* gpu, const float blendConstants[4]);
77 
78     // Commands that only work inside of a render pass
79     void clearAttachments(const GrVkGpu* gpu,
80                           int numAttachments,
81                           const VkClearAttachment* attachments,
82                           int numRects,
83                           const VkClearRect* clearRects);
84 
85     void drawIndexed(const GrVkGpu* gpu,
86                      uint32_t indexCount,
87                      uint32_t instanceCount,
88                      uint32_t firstIndex,
89                      int32_t vertexOffset,
90                      uint32_t firstInstance);
91 
92     void draw(const GrVkGpu* gpu,
93               uint32_t vertexCount,
94               uint32_t instanceCount,
95               uint32_t firstVertex,
96               uint32_t firstInstance);
97 
98     void drawIndirect(const GrVkGpu* gpu,
99                       sk_sp<const GrBuffer> indirectBuffer,
100                       VkDeviceSize offset,
101                       uint32_t drawCount,
102                       uint32_t stride);
103 
104     void drawIndexedIndirect(const GrVkGpu* gpu,
105                              sk_sp<const GrBuffer> indirectBuffer,
106                              VkDeviceSize offset,
107                              uint32_t drawCount,
108                              uint32_t stride);
109 
110     // Add ref-counted resource that will be tracked and released when this command buffer finishes
111     // execution
addResource(sk_sp<const GrManagedResource> resource)112     void addResource(sk_sp<const GrManagedResource> resource) {
113         SkASSERT(resource);
114         fTrackedResources.push_back(std::move(resource));
115     }
addResource(const GrManagedResource * resource)116     void addResource(const GrManagedResource* resource) {
117         this->addResource(sk_ref_sp(resource));
118     }
119 
120     // Add ref-counted resource that will be tracked and released when this command buffer finishes
121     // execution. When it is released, it will signal that the resource can be recycled for reuse.
addRecycledResource(gr_rp<const GrRecycledResource> resource)122     void addRecycledResource(gr_rp<const GrRecycledResource> resource) {
123         SkASSERT(resource);
124         fTrackedRecycledResources.push_back(std::move(resource));
125     }
126 
addRecycledResource(const GrRecycledResource * resource)127     void addRecycledResource(const GrRecycledResource* resource) {
128         this->addRecycledResource(gr_ref_rp<const GrRecycledResource>(resource));
129     }
130 
addGrBuffer(sk_sp<const GrBuffer> buffer)131     void addGrBuffer(sk_sp<const GrBuffer> buffer) {
132         fTrackedGpuBuffers.push_back(std::move(buffer));
133     }
134 
addGrSurface(sk_sp<const GrSurface> surface)135     void addGrSurface(sk_sp<const GrSurface> surface) {
136         fTrackedGpuSurfaces.push_back(std::move(surface));
137     }
138 
139     void releaseResources();
140 
141     void freeGPUData(const GrGpu* gpu, VkCommandPool pool) const;
142 
hasWork()143     bool hasWork() const { return fHasWork; }
144 
145 protected:
146     GrVkCommandBuffer(VkCommandBuffer cmdBuffer, bool isWrapped = false)
fIsActive(isWrapped)147             : fIsActive(isWrapped) // All wrapped command buffers start as active
148             , fCmdBuffer(cmdBuffer)
149             , fIsWrapped(isWrapped) {
150         this->invalidateState();
151     }
152 
isWrapped()153     bool isWrapped() const { return fIsWrapped; }
154 
155     void addingWork(const GrVkGpu* gpu);
156 
157     void submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency = false);
158 
159 private:
160     static constexpr int kInitialTrackedResourcesCount = 32;
161 
162 protected:
163     template <typename T> using TrackedResourceArray = SkSTArray<kInitialTrackedResourcesCount, T>;
164     TrackedResourceArray<sk_sp<const GrManagedResource>> fTrackedResources;
165     TrackedResourceArray<gr_rp<const GrRecycledResource>> fTrackedRecycledResources;
166     SkSTArray<16, sk_sp<const GrBuffer>> fTrackedGpuBuffers;
167     SkSTArray<16, gr_cb<const GrSurface>> fTrackedGpuSurfaces;
168 
169     // Tracks whether we are in the middle of a command buffer begin/end calls and thus can add
170     // new commands to the buffer;
171     bool                      fIsActive;
172     bool                      fHasWork = false;
173 
174     // Stores a pointer to the current active render pass (i.e. begin has been called but not
175     // end). A nullptr means there is no active render pass. The GrVKCommandBuffer does not own
176     // the render pass.
177     const GrVkRenderPass*     fActiveRenderPass = nullptr;
178 
179     VkCommandBuffer           fCmdBuffer;
180 
onReleaseResources()181     virtual void onReleaseResources() {}
182     virtual void onFreeGPUData(const GrVkGpu* gpu) const = 0;
183 
184     static constexpr uint32_t kMaxInputBuffers = 2;
185 
186     VkBuffer fBoundInputBuffers[kMaxInputBuffers];
187     VkBuffer fBoundIndexBuffer;
188 
189     // Cached values used for dynamic state updates
190     VkViewport fCachedViewport;
191     VkRect2D   fCachedScissor;
192     float      fCachedBlendConstant[4];
193 
194     // Tracking of memory barriers so that we can submit them all in a batch together.
195     SkSTArray<1, VkBufferMemoryBarrier> fBufferBarriers;
196     SkSTArray<2, VkImageMemoryBarrier> fImageBarriers;
197     bool fBarriersByRegion = false;
198     VkPipelineStageFlags fSrcStageMask = 0;
199     VkPipelineStageFlags fDstStageMask = 0;
200 
201     bool fIsWrapped;
202 };
203 
204 class GrVkSecondaryCommandBuffer;
205 
206 class GrVkPrimaryCommandBuffer : public GrVkCommandBuffer {
207 public:
208     ~GrVkPrimaryCommandBuffer() override;
209 
210     static GrVkPrimaryCommandBuffer* Create(GrVkGpu* gpu, VkCommandPool cmdPool);
211 
212     void begin(GrVkGpu* gpu);
213     void end(GrVkGpu* gpu, bool abandoningBuffer = false);
214 
215     // Begins render pass on this command buffer. The framebuffer from GrVkRenderTarget will be used
216     // in the render pass.
217     bool beginRenderPass(GrVkGpu* gpu,
218                          const GrVkRenderPass*,
219                          sk_sp<const GrVkFramebuffer>,
220                          const VkClearValue clearValues[],
221                          const GrSurface* target,
222                          const SkIRect& bounds,
223                          bool forSecondaryCB);
224     void endRenderPass(const GrVkGpu* gpu);
225 
226     void nexSubpass(GrVkGpu* gpu, bool forSecondaryCB);
227 
228     // Submits the SecondaryCommandBuffer into this command buffer. It is required that we are
229     // currently inside a render pass that is compatible with the one used to create the
230     // SecondaryCommandBuffer.
231     void executeCommands(const GrVkGpu* gpu,
232                          std::unique_ptr<GrVkSecondaryCommandBuffer> secondaryBuffer);
233 
234     // Commands that only work outside of a render pass
235     void clearColorImage(const GrVkGpu* gpu,
236                          GrVkImage* image,
237                          const VkClearColorValue* color,
238                          uint32_t subRangeCount,
239                          const VkImageSubresourceRange* subRanges);
240 
241     void clearDepthStencilImage(const GrVkGpu* gpu,
242                                 GrVkImage* image,
243                                 const VkClearDepthStencilValue* color,
244                                 uint32_t subRangeCount,
245                                 const VkImageSubresourceRange* subRanges);
246 
247     void copyImage(const GrVkGpu* gpu,
248                    GrVkImage* srcImage,
249                    VkImageLayout srcLayout,
250                    GrVkImage* dstImage,
251                    VkImageLayout dstLayout,
252                    uint32_t copyRegionCount,
253                    const VkImageCopy* copyRegions);
254 
255     void blitImage(const GrVkGpu* gpu,
256                    const GrManagedResource* srcResource,
257                    VkImage srcImage,
258                    VkImageLayout srcLayout,
259                    const GrManagedResource* dstResource,
260                    VkImage dstImage,
261                    VkImageLayout dstLayout,
262                    uint32_t blitRegionCount,
263                    const VkImageBlit* blitRegions,
264                    VkFilter filter);
265 
266     void blitImage(const GrVkGpu* gpu,
267                    const GrVkImage& srcImage,
268                    const GrVkImage& dstImage,
269                    uint32_t blitRegionCount,
270                    const VkImageBlit* blitRegions,
271                    VkFilter filter);
272 
273     void copyImageToBuffer(const GrVkGpu* gpu,
274                            GrVkImage* srcImage,
275                            VkImageLayout srcLayout,
276                            sk_sp<GrGpuBuffer> dstBuffer,
277                            uint32_t copyRegionCount,
278                            const VkBufferImageCopy* copyRegions);
279 
280     // All uses of copyBufferToImage are done with buffers from our staging manager. The staging
281     // manager will handle making sure the command buffer refs the buffer. Thus we just pass in the
282     // raw VkBuffer here and don't worry about refs.
283     void copyBufferToImage(const GrVkGpu* gpu,
284                            VkBuffer srcBuffer,
285                            GrVkImage* dstImage,
286                            VkImageLayout dstLayout,
287                            uint32_t copyRegionCount,
288                            const VkBufferImageCopy* copyRegions);
289 
290     void copyBuffer(GrVkGpu* gpu,
291                     sk_sp<GrGpuBuffer> srcBuffer,
292                     sk_sp<GrGpuBuffer> dstBuffer,
293                     uint32_t regionCount,
294                     const VkBufferCopy* regions);
295 
296     void updateBuffer(GrVkGpu* gpu,
297                       sk_sp<GrVkBuffer> dstBuffer,
298                       VkDeviceSize dstOffset,
299                       VkDeviceSize dataSize,
300                       const void* data);
301 
302     void resolveImage(GrVkGpu* gpu,
303                       const GrVkImage& srcImage,
304                       const GrVkImage& dstImage,
305                       uint32_t regionCount,
306                       const VkImageResolve* regions);
307 
308     bool submitToQueue(GrVkGpu* gpu, VkQueue queue,
309                        SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
310                        SkTArray<GrVkSemaphore::Resource*>& waitSemaphores);
311 
312     void forceSync(GrVkGpu* gpu);
313 
314     bool finished(GrVkGpu* gpu);
315 
316     void addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc);
317 
callFinishedProcs()318     void callFinishedProcs() {
319         fFinishedProcs.reset();
320     }
321 
322     void recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool);
323 
324     void drawBlurImage(const GrVkGpu* gpu, const GrVkImage* image, SkISize colorAttachmentDimensions,
325                         GrSurfaceOrigin rtOrigin, const SkBlurArg& blurArg);
326 
327 private:
GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer)328     explicit GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer)
329         : INHERITED(cmdBuffer)
330         , fSubmitFence(VK_NULL_HANDLE) {}
331 
332     void onFreeGPUData(const GrVkGpu* gpu) const override;
333 
334     void onReleaseResources() override;
335 
336     SkTArray<std::unique_ptr<GrVkSecondaryCommandBuffer>, true> fSecondaryCommandBuffers;
337     VkFence                                                     fSubmitFence;
338     SkTArray<sk_sp<GrRefCntedCallback>>                         fFinishedProcs;
339 
340     using INHERITED = GrVkCommandBuffer;
341 };
342 
343 class GrVkSecondaryCommandBuffer : public GrVkCommandBuffer {
344 public:
345     static GrVkSecondaryCommandBuffer* Create(GrVkGpu* gpu, GrVkCommandPool* cmdPool);
346     // Used for wrapping an external secondary command buffer.
347     static GrVkSecondaryCommandBuffer* Create(VkCommandBuffer externalSecondaryCB,
348                                               const GrVkRenderPass* externalRenderPass);
349 
350     void begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
351                const GrVkRenderPass* compatibleRenderPass);
352     void end(GrVkGpu* gpu);
353 
354     void recycle(GrVkCommandPool* cmdPool);
355 
vkCommandBuffer()356     VkCommandBuffer vkCommandBuffer() { return fCmdBuffer; }
357 
358 private:
GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer,const GrVkRenderPass * externalRenderPass)359     explicit GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer,
360                                         const GrVkRenderPass* externalRenderPass)
361             : INHERITED(cmdBuffer, SkToBool(externalRenderPass)) {
362         fActiveRenderPass = externalRenderPass;
363     }
364 
onFreeGPUData(const GrVkGpu * gpu)365     void onFreeGPUData(const GrVkGpu* gpu) const override {}
366 
367     // Used for accessing fIsActive (on GrVkCommandBuffer)
368     friend class GrVkPrimaryCommandBuffer;
369 
370     using INHERITED = GrVkCommandBuffer;
371 };
372 
373 #endif
374