• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrVkGpu_DEFINED
9 #define GrVkGpu_DEFINED
10 
11 #include "include/gpu/vk/GrVkBackendContext.h"
12 #include "include/gpu/vk/GrVkTypes.h"
13 #include "src/gpu/ganesh/GrGpu.h"
14 #include "src/gpu/ganesh/GrStagingBufferManager.h"
15 #include "src/gpu/ganesh/vk/GrVkCaps.h"
16 #include "src/gpu/ganesh/vk/GrVkMSAALoadManager.h"
17 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
18 #include "src/gpu/ganesh/vk/GrVkSemaphore.h"
19 #include "src/gpu/ganesh/vk/GrVkUtil.h"
20 
21 class GrDirectContext;
22 class GrPipeline;
23 
24 class GrVkBuffer;
25 class GrVkCommandPool;
26 class GrVkFramebuffer;
27 class GrVkPipeline;
28 class GrVkPipelineState;
29 class GrVkPrimaryCommandBuffer;
30 class GrVkOpsRenderPass;
31 class GrVkRenderPass;
32 class GrVkSecondaryCommandBuffer;
33 class GrVkTexture;
34 
35 namespace skgpu { struct VulkanInterface; }
36 
37 namespace skgpu { class VulkanMemoryAllocator; }
38 
39 class GrVkGpu : public GrGpu {
40 public:
41     static sk_sp<GrGpu> Make(const GrVkBackendContext&, const GrContextOptions&, GrDirectContext*);
42 
43     ~GrVkGpu() override;
44 
45     void disconnect(DisconnectType) override;
disconnected()46     bool disconnected() const { return fDisconnected; }
47 
releaseUnlockedBackendObjects()48     void releaseUnlockedBackendObjects() override {
49         fResourceProvider.releaseUnlockedBackendObjects();
50     }
51 
52     GrThreadSafePipelineBuilder* pipelineBuilder() override;
53     sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override;
54 
vkInterface()55     const skgpu::VulkanInterface* vkInterface() const { return fInterface.get(); }
vkCaps()56     const GrVkCaps& vkCaps() const { return *fVkCaps; }
57 
stagingBufferManager()58     GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; }
59     void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override;
60 
isDeviceLost()61     bool isDeviceLost() const override { return fDeviceIsLost; }
62 
memoryAllocator()63     skgpu::VulkanMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); }
64 
physicalDevice()65     VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; }
device()66     VkDevice device() const { return fDevice; }
queue()67     VkQueue  queue() const { return fQueue; }
queueIndex()68     uint32_t  queueIndex() const { return fQueueIndex; }
cmdPool()69     GrVkCommandPool* cmdPool() const { return fMainCmdPool; }
physicalDeviceProperties()70     const VkPhysicalDeviceProperties& physicalDeviceProperties() const {
71         return fPhysDevProps;
72     }
physicalDeviceMemoryProperties()73     const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const {
74         return fPhysDevMemProps;
75     }
protectedContext()76     bool protectedContext() const { return fProtectedContext == GrProtected::kYes; }
77 
resourceProvider()78     GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
79 
currentCommandBuffer()80     GrVkPrimaryCommandBuffer* currentCommandBuffer() const { return fMainCmdBuffer; }
81 
82     void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
83 
84     bool setBackendTextureState(const GrBackendTexture&,
85                                 const skgpu::MutableTextureState&,
86                                 skgpu::MutableTextureState* previousState,
87                                 sk_sp<skgpu::RefCntedCallback> finishedCallback) override;
88 
89     bool setBackendRenderTargetState(const GrBackendRenderTarget&,
90                                      const skgpu::MutableTextureState&,
91                                      skgpu::MutableTextureState* previousState,
92                                      sk_sp<skgpu::RefCntedCallback> finishedCallback) override;
93 
94     void deleteBackendTexture(const GrBackendTexture&) override;
95 
96     bool compile(const GrProgramDesc&, const GrProgramInfo&) override;
97 
98 #if GR_TEST_UTILS
99     bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
100 
101     GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions,
102                                                                GrColorType,
103                                                                int sampleCnt,
104                                                                GrProtected) override;
105     void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
106 
resetShaderCacheForTesting()107     void resetShaderCacheForTesting() const override {
108         fResourceProvider.resetShaderCacheForTesting();
109     }
110 #endif
111 
112     sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
113                                               SkISize dimensions, int numStencilSamples) override;
114 
getPreferredStencilFormat(const GrBackendFormat &)115     GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override {
116         return GrBackendFormat::MakeVk(this->vkCaps().preferredStencilFormat());
117     }
118 
119     sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
120                                            const GrBackendFormat& format,
121                                            int numSamples,
122                                            GrProtected isProtected,
123                                            GrMemoryless isMemoryless) override;
124 
125     void addBufferMemoryBarrier(const GrManagedResource*,
126                                 VkPipelineStageFlags srcStageMask,
127                                 VkPipelineStageFlags dstStageMask,
128                                 bool byRegion,
129                                 VkBufferMemoryBarrier* barrier) const;
130     void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
131                                 VkPipelineStageFlags dstStageMask,
132                                 bool byRegion,
133                                 VkBufferMemoryBarrier* barrier) const;
134     void addImageMemoryBarrier(const GrManagedResource*,
135                                VkPipelineStageFlags srcStageMask,
136                                VkPipelineStageFlags dstStageMask,
137                                bool byRegion,
138                                VkImageMemoryBarrier* barrier) const;
139 
140     bool loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
141                              const GrVkRenderPass& renderPass,
142                              GrAttachment* dst,
143                              GrVkImage* src,
144                              const SkIRect& srcRect);
145 
146     bool onRegenerateMipMapLevels(GrTexture* tex) override;
147 
148     void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) override;
149 
150     void submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer>);
151 
152     void submit(GrOpsRenderPass*) override;
153 
154     GrFence SK_WARN_UNUSED_RESULT insertFence() override;
155     bool waitFence(GrFence) override;
156     void deleteFence(GrFence) override;
157 
158     std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override;
159     std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
160                                                       GrSemaphoreWrapType,
161                                                       GrWrapOwnership) override;
162     void insertSemaphore(GrSemaphore* semaphore) override;
163     void waitSemaphore(GrSemaphore* semaphore) override;
164 
165     // These match the definitions in SkDrawable, from whence they came
166     typedef void* SubmitContext;
167     typedef void (*SubmitProc)(SubmitContext submitContext);
168 
169     // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary
170     // command buffer to the gpu.
171     void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable);
172 
checkFinishProcs()173     void checkFinishProcs() override { fResourceProvider.checkCommandBuffers(); }
174     void finishOutstandingGpuWork() override;
175 
176     std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
177 
178     bool updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src, VkDeviceSize offset,
179                       VkDeviceSize size);
180 
181     bool zeroBuffer(sk_sp<GrGpuBuffer>);
182 
183     enum PersistentCacheKeyType : uint32_t {
184         kShader_PersistentCacheKeyType = 0,
185         kPipelineCache_PersistentCacheKeyType = 1,
186     };
187 
188     void storeVkPipelineCacheData() override;
189 
190     bool beginRenderPass(const GrVkRenderPass*,
191                          sk_sp<const GrVkFramebuffer>,
192                          const VkClearValue* colorClear,
193                          const GrSurface*,
194                          const SkIRect& renderPassBounds,
195                          bool forSecondaryCB);
196     void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds);
197 
198     // Returns true if VkResult indicates success and also checks for device lost or OOM. Every
199     // Vulkan call (and skgpu::VulkanMemoryAllocator call that returns VkResult) made on behalf of
200     // the GrVkGpu should be processed by this function so that we respond to OOMs and lost devices.
201     bool checkVkResult(VkResult);
202 
203 private:
204     enum SyncQueue {
205         kForce_SyncQueue,
206         kSkip_SyncQueue
207     };
208 
209     GrVkGpu(GrDirectContext*,
210             const GrVkBackendContext&,
211             const sk_sp<GrVkCaps> caps,
212             sk_sp<const skgpu::VulkanInterface>,
213             uint32_t instanceVersion,
214             uint32_t physicalDeviceVersion,
215             sk_sp<skgpu::VulkanMemoryAllocator>);
216 
217     void destroyResources();
218 
219     GrBackendTexture onCreateBackendTexture(SkISize dimensions,
220                                             const GrBackendFormat&,
221                                             GrRenderable,
222                                             GrMipmapped,
223                                             GrProtected,
224                                             std::string_view label) override;
225     GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
226                                                       const GrBackendFormat&,
227                                                       GrMipmapped,
228                                                       GrProtected) override;
229 
230     bool onClearBackendTexture(const GrBackendTexture&,
231                                sk_sp<skgpu::RefCntedCallback> finishedCallback,
232                                std::array<float, 4> color) override;
233 
234     bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
235                                           sk_sp<skgpu::RefCntedCallback> finishedCallback,
236                                           const void* data,
237                                           size_t length) override;
238 
239     bool setBackendSurfaceState(GrVkImageInfo info,
240                                 sk_sp<skgpu::MutableTextureStateRef> currentState,
241                                 SkISize dimensions,
242                                 const skgpu::VulkanMutableTextureState& newState,
243                                 skgpu::MutableTextureState* previousState,
244                                 sk_sp<skgpu::RefCntedCallback> finishedCallback);
245 
246     sk_sp<GrTexture> onCreateTexture(SkISize,
247                                      const GrBackendFormat&,
248                                      GrRenderable,
249                                      int renderTargetSampleCnt,
250                                      skgpu::Budgeted,
251                                      GrProtected,
252                                      int mipLevelCount,
253                                      uint32_t levelClearMask,
254                                      std::string_view label) override;
255     sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
256                                                const GrBackendFormat&,
257                                                skgpu::Budgeted,
258                                                GrMipmapped,
259                                                GrProtected,
260                                                const void* data,
261                                                size_t dataSize) override;
262 
263     sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
264                                           GrWrapOwnership,
265                                           GrWrapCacheable,
266                                           GrIOType) override;
267     sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
268                                                     GrWrapOwnership,
269                                                     GrWrapCacheable) override;
270     sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
271                                                     int sampleCnt,
272                                                     GrWrapOwnership,
273                                                     GrWrapCacheable) override;
274     sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override;
275 
276     sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
277                                                                 const GrVkDrawableInfo&) override;
278 
279     sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern) override;
280 
281     bool onReadPixels(GrSurface*,
282                       SkIRect,
283                       GrColorType surfaceColorType,
284                       GrColorType dstColorType,
285                       void* buffer,
286                       size_t rowBytes) override;
287 
288     bool onWritePixels(GrSurface*,
289                        SkIRect,
290                        GrColorType surfaceColorType,
291                        GrColorType srcColorType,
292                        const GrMipLevel[],
293                        int mipLevelCount,
294                        bool prepForTexSampling) override;
295 
296     bool onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
297                                       size_t srcOffset,
298                                       sk_sp<GrGpuBuffer> dst,
299                                       size_t dstOffset,
300                                       size_t size) override;
301 
302     bool onTransferPixelsTo(GrTexture*,
303                             SkIRect,
304                             GrColorType textureColorType,
305                             GrColorType bufferColorType,
306                             sk_sp<GrGpuBuffer>,
307                             size_t offset,
308                             size_t rowBytes) override;
309 
310     bool onTransferPixelsFrom(GrSurface*,
311                               SkIRect,
312                               GrColorType surfaceColorType,
313                               GrColorType bufferColorType,
314                               sk_sp<GrGpuBuffer>,
315                               size_t offset) override;
316 
317     bool onCopySurface(GrSurface* dst, const SkIRect& dstRect,
318                        GrSurface* src, const SkIRect& srcRect,
319                        GrSamplerState::Filter) override;
320 
321     void addFinishedProc(GrGpuFinishedProc finishedProc,
322                          GrGpuFinishedContext finishedContext) override;
323 
324     void addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback);
325 
326     GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*,
327                                         bool useMSAASurface,
328                                         GrAttachment* stencil,
329                                         GrSurfaceOrigin,
330                                         const SkIRect&,
331                                         const GrOpsRenderPass::LoadAndStoreInfo&,
332                                         const GrOpsRenderPass::StencilLoadAndStoreInfo&,
333                                         const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
334                                         GrXferBarrierFlags renderPassXferBarriers) override;
335 
336     void prepareSurfacesForBackendAccessAndStateUpdates(
337             SkSpan<GrSurfaceProxy*> proxies,
338             SkSurface::BackendSurfaceAccess access,
339             const skgpu::MutableTextureState* newState) override;
340 
341     bool onSubmitToGpu(bool syncCpu) override;
342 
343     void onReportSubmitHistograms() override;
344 
345     // Ends and submits the current command buffer to the queue and then creates a new command
346     // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
347     // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in
348     // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command
349     // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those
350     // wait semaphores to the submission of this command buffer.
351     bool submitCommandBuffer(SyncQueue sync);
352 
353     void copySurfaceAsCopyImage(GrSurface* dst,
354                                 GrSurface* src,
355                                 GrVkImage* dstImage,
356                                 GrVkImage* srcImage,
357                                 const SkIRect& srcRect,
358                                 const SkIPoint& dstPoint);
359 
360     void copySurfaceAsBlit(GrSurface* dst,
361                            GrSurface* src,
362                            GrVkImage* dstImage,
363                            GrVkImage* srcImage,
364                            const SkIRect& srcRect,
365                            const SkIRect& dstRect,
366                            GrSamplerState::Filter filter);
367 
368     void copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
369                               const SkIPoint& dstPoint);
370 
371     // helpers for onCreateTexture and writeTexturePixels
372     bool uploadTexDataLinear(GrVkImage* tex,
373                              SkIRect rect,
374                              GrColorType colorType,
375                              const void* data,
376                              size_t rowBytes);
377     bool uploadTexDataOptimal(GrVkImage* tex,
378                               SkIRect rect,
379                               GrColorType colorType,
380                               const GrMipLevel texels[],
381                               int mipLevelCount);
382     bool uploadTexDataCompressed(GrVkImage* tex, SkImage::CompressionType compression,
383                                  VkFormat vkFormat, SkISize dimensions, GrMipmapped mipmapped,
384                                  const void* data, size_t dataSize);
385     void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
386                       const SkIPoint& dstPoint);
387 
388     bool createVkImageForBackendSurface(VkFormat,
389                                         SkISize dimensions,
390                                         int sampleCnt,
391                                         GrTexturable,
392                                         GrRenderable,
393                                         GrMipmapped,
394                                         GrVkImageInfo*,
395                                         GrProtected);
396 
397     sk_sp<const skgpu::VulkanInterface>                   fInterface;
398     sk_sp<skgpu::VulkanMemoryAllocator>                   fMemoryAllocator;
399     sk_sp<GrVkCaps>                                       fVkCaps;
400     bool                                                  fDeviceIsLost = false;
401 
402     VkPhysicalDevice                                      fPhysicalDevice;
403     VkDevice                                              fDevice;
404     VkQueue                                               fQueue;    // Must be Graphics queue
405     uint32_t                                              fQueueIndex;
406 
407     // Created by GrVkGpu
408     GrVkResourceProvider                                  fResourceProvider;
409     GrStagingBufferManager                                fStagingBufferManager;
410 
411     GrVkMSAALoadManager                                   fMSAALoadManager;
412 
413     GrVkCommandPool*                                      fMainCmdPool;
414     // just a raw pointer; object's lifespan is managed by fCmdPool
415     GrVkPrimaryCommandBuffer*                             fMainCmdBuffer;
416 
417     SkSTArray<1, GrVkSemaphore::Resource*>                fSemaphoresToWaitOn;
418     SkSTArray<1, GrVkSemaphore::Resource*>                fSemaphoresToSignal;
419 
420     SkTArray<std::unique_ptr<SkDrawable::GpuDrawHandler>> fDrawables;
421 
422     VkPhysicalDeviceProperties                            fPhysDevProps;
423     VkPhysicalDeviceMemoryProperties                      fPhysDevMemProps;
424 
425     // We need a bool to track whether or not we've already disconnected all the gpu resources from
426     // vulkan context.
427     bool                                                  fDisconnected;
428 
429     GrProtected                                           fProtectedContext;
430 
431     std::unique_ptr<GrVkOpsRenderPass>                    fCachedOpsRenderPass;
432 
433     using INHERITED = GrGpu;
434 };
435 
436 #endif
437