• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrVkGpu_DEFINED
9 #define GrVkGpu_DEFINED
10 
11 #include "GrGpu.h"
12 #include "GrGpuFactory.h"
13 #include "vk/GrVkBackendContext.h"
14 #include "GrVkCaps.h"
15 #include "GrVkCopyManager.h"
16 #include "GrVkIndexBuffer.h"
17 #include "GrVkMemory.h"
18 #include "GrVkResourceProvider.h"
19 #include "GrVkSemaphore.h"
20 #include "GrVkVertexBuffer.h"
21 #include "GrVkUtil.h"
22 #include "vk/GrVkDefines.h"
23 
24 class GrPipeline;
25 
26 class GrVkBufferImpl;
27 class GrVkPipeline;
28 class GrVkPipelineState;
29 class GrVkPrimaryCommandBuffer;
30 class GrVkRenderPass;
31 class GrVkSecondaryCommandBuffer;
32 class GrVkTexture;
33 struct GrVkInterface;
34 
35 namespace SkSL {
36     class Compiler;
37 }
38 
39 class GrVkGpu : public GrGpu {
40 public:
41     static GrGpu* Create(GrBackendContext backendContext, const GrContextOptions& options,
42                          GrContext* context);
43 
44     ~GrVkGpu() override;
45 
46     void disconnect(DisconnectType) override;
47 
vkInterface()48     const GrVkInterface* vkInterface() const { return fBackendContext->fInterface.get(); }
vkCaps()49     const GrVkCaps& vkCaps() const { return *fVkCaps; }
50 
device()51     VkDevice device() const { return fDevice; }
queue()52     VkQueue  queue() const { return fQueue; }
cmdPool()53     VkCommandPool cmdPool() const { return fCmdPool; }
physicalDeviceMemoryProperties()54     VkPhysicalDeviceMemoryProperties physicalDeviceMemoryProperties() const {
55         return fPhysDevMemProps;
56     }
57 
resourceProvider()58     GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
59 
currentCommandBuffer()60     GrVkPrimaryCommandBuffer* currentCommandBuffer() { return fCurrentCmdBuffer; }
61 
62     enum SyncQueue {
63         kForce_SyncQueue,
64         kSkip_SyncQueue
65     };
66 
67     bool onGetReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, size_t rowBytes,
68                              GrPixelConfig readConfig, DrawPreference*,
69                              ReadPixelTempDrawInfo*) override;
70 
71     bool onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
72                               GrPixelConfig srcConfig, DrawPreference*,
73                               WritePixelTempDrawInfo*) override;
74 
75     bool onCopySurface(GrSurface* dst,
76                        GrSurface* src,
77                        const SkIRect& srcRect,
78                        const SkIPoint& dstPoint) override;
79 
80     void onQueryMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
81                                  int* effectiveSampleCnt, SamplePattern*) override;
82 
xferBarrier(GrRenderTarget *,GrXferBarrierType)83     void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
84 
85     GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h,
86                                                     GrPixelConfig config,
87                                                     bool isRenderTarget) override;
88     bool isTestingOnlyBackendTexture(GrBackendObject id) const override;
89     void deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandonTexture) override;
90 
91     GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
92                                                                 int width,
93                                                                 int height) override;
94 
95     void clearStencil(GrRenderTarget* target) override;
96 
97     GrGpuCommandBuffer* createCommandBuffer(
98             const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
99             const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) override;
100 
101     void addMemoryBarrier(VkPipelineStageFlags srcStageMask,
102                           VkPipelineStageFlags dstStageMask,
103                           bool byRegion,
104                           VkMemoryBarrier* barrier) const;
105     void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
106                                 VkPipelineStageFlags dstStageMask,
107                                 bool byRegion,
108                                 VkBufferMemoryBarrier* barrier) const;
109     void addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
110                                VkPipelineStageFlags dstStageMask,
111                                bool byRegion,
112                                VkImageMemoryBarrier* barrier) const;
113 
shaderCompiler()114     SkSL::Compiler* shaderCompiler() const {
115         return fCompiler;
116     }
117 
onResolveRenderTarget(GrRenderTarget * target)118     void onResolveRenderTarget(GrRenderTarget* target) override {
119         this->internalResolveRenderTarget(target, true);
120     }
121 
122     void submitSecondaryCommandBuffer(const SkTArray<GrVkSecondaryCommandBuffer*>&,
123                                       const GrVkRenderPass*,
124                                       const VkClearValue*,
125                                       GrVkRenderTarget*,
126                                       const SkIRect& bounds);
127 
128     void finishFlush() override;
129 
130     GrFence SK_WARN_UNUSED_RESULT insertFence() override;
131     bool waitFence(GrFence, uint64_t timeout) override;
132     void deleteFence(GrFence) const override;
133 
134     sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override;
135     sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
136                                             GrWrapOwnership ownership) override;
137     void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override;
138     void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
139 
140     sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
141 
142     void generateMipmap(GrVkTexture* tex);
143 
144     bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size);
145 
146     // Heaps
147     enum Heap {
148         kLinearImage_Heap = 0,
149         // We separate out small (i.e., <= 16K) images to reduce fragmentation
150         // in the main heap.
151         kOptimalImage_Heap,
152         kSmallOptimalImage_Heap,
153         // We have separate vertex and image heaps, because it's possible that
154         // a given Vulkan driver may allocate them separately.
155         kVertexBuffer_Heap,
156         kIndexBuffer_Heap,
157         kUniformBuffer_Heap,
158         kTexelBuffer_Heap,
159         kCopyReadBuffer_Heap,
160         kCopyWriteBuffer_Heap,
161 
162         kLastHeap = kCopyWriteBuffer_Heap
163     };
164     static const int kHeapCount = kLastHeap + 1;
165 
getHeap(Heap heap)166     GrVkHeap* getHeap(Heap heap) const { return fHeaps[heap].get(); }
167 
168 private:
169     GrVkGpu(GrContext* context, const GrContextOptions& options,
170             const GrVkBackendContext* backendContext);
171 
onResetContext(uint32_t resetBits)172     void onResetContext(uint32_t resetBits) override {}
173 
174     void destroyResources();
175 
176     sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
177                                      const GrMipLevel texels[], int mipLevelCount) override;
178 
179     sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
180                                           GrSurfaceOrigin,
181                                           GrBackendTextureFlags,
182                                           int sampleCnt,
183                                           GrWrapOwnership) override;
184     sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&,
185                                                     GrSurfaceOrigin) override;
186 
187     sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
188                                                              GrSurfaceOrigin,
189                                                              int sampleCnt) override;
190 
191     GrBuffer* onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern,
192                              const void* data) override;
193 
onCreateInstancedRendering()194     gr_instanced::InstancedRendering* onCreateInstancedRendering() override { return nullptr; }
195 
196     bool onReadPixels(GrSurface* surface,
197                       int left, int top, int width, int height,
198                       GrPixelConfig,
199                       void* buffer,
200                       size_t rowBytes) override;
201 
202     bool onWritePixels(GrSurface* surface,
203                        int left, int top, int width, int height,
204                        GrPixelConfig config, const GrMipLevel texels[], int mipLevelCount) override;
205 
206     bool onTransferPixels(GrTexture*,
207                           int left, int top, int width, int height,
208                           GrPixelConfig config, GrBuffer* transferBuffer,
209                           size_t offset, size_t rowBytes) override;
210 
211     // Ends and submits the current command buffer to the queue and then creates a new command
212     // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
213     // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in
214     // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command
215     // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those
216     // wait semaphores to the submission of this command buffer.
217     void submitCommandBuffer(SyncQueue sync);
218 
219     void internalResolveRenderTarget(GrRenderTarget* target, bool requiresSubmit);
220 
221     void copySurfaceAsCopyImage(GrSurface* dst,
222                                 GrSurface* src,
223                                 GrVkImage* dstImage,
224                                 GrVkImage* srcImage,
225                                 const SkIRect& srcRect,
226                                 const SkIPoint& dstPoint);
227 
228     void copySurfaceAsBlit(GrSurface* dst,
229                            GrSurface* src,
230                            GrVkImage* dstImage,
231                            GrVkImage* srcImage,
232                            const SkIRect& srcRect,
233                            const SkIPoint& dstPoint);
234 
235     void copySurfaceAsResolve(GrSurface* dst,
236                               GrSurface* src,
237                               const SkIRect& srcRect,
238                               const SkIPoint& dstPoint);
239 
240     // helpers for onCreateTexture and writeTexturePixels
241     bool uploadTexDataLinear(GrVkTexture* tex,
242                              int left, int top, int width, int height,
243                              GrPixelConfig dataConfig,
244                              const void* data,
245                              size_t rowBytes);
246     bool uploadTexDataOptimal(GrVkTexture* tex,
247                               int left, int top, int width, int height,
248                               GrPixelConfig dataConfig,
249                               const GrMipLevel texels[], int mipLevelCount);
250 
251     void resolveImage(GrSurface* dst,
252                       GrVkRenderTarget* src,
253                       const SkIRect& srcRect,
254                       const SkIPoint& dstPoint);
255 
256     sk_sp<const GrVkBackendContext> fBackendContext;
257     sk_sp<GrVkCaps>                 fVkCaps;
258 
259     // These Vulkan objects are provided by the client, and also stored in fBackendContext.
260     // They're copied here for convenient access.
261     VkDevice                                     fDevice;
262     VkQueue                                      fQueue;    // Must be Graphics queue
263 
264     // Created by GrVkGpu
265     GrVkResourceProvider                         fResourceProvider;
266     VkCommandPool                                fCmdPool;
267 
268     GrVkPrimaryCommandBuffer*                    fCurrentCmdBuffer;
269 
270     SkSTArray<1, const GrVkSemaphore::Resource*> fSemaphoresToWaitOn;
271     SkSTArray<1, const GrVkSemaphore::Resource*> fSemaphoresToSignal;
272 
273     VkPhysicalDeviceMemoryProperties             fPhysDevMemProps;
274 
275     std::unique_ptr<GrVkHeap>                    fHeaps[kHeapCount];
276 
277     GrVkCopyManager                              fCopyManager;
278 
279 #ifdef SK_ENABLE_VK_LAYERS
280     // For reporting validation layer errors
281     VkDebugReportCallbackEXT               fCallback;
282 #endif
283 
284     // compiler used for compiling sksl into spirv. We only want to create the compiler once since
285     // there is significant overhead to the first compile of any compiler.
286     SkSL::Compiler* fCompiler;
287 
288     // We need a bool to track whether or not we've already disconnected all the gpu resources from
289     // vulkan context.
290     bool fDisconnected;
291 
292     typedef GrGpu INHERITED;
293 };
294 
295 #endif
296