• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrVkGpu_DEFINED
9 #define GrVkGpu_DEFINED
10 
11 #include "include/core/SkDrawable.h"
12 #include "include/core/SkRefCnt.h"
13 #include "include/gpu/GpuTypes.h"
14 #include "include/gpu/ganesh/GrBackendSurface.h"
15 #include "include/gpu/ganesh/GrTypes.h"
16 #include "include/gpu/ganesh/vk/GrVkBackendSurface.h"
17 #include "include/gpu/vk/VulkanTypes.h"
18 #include "include/private/base/SkAssert.h"
19 #include "include/private/base/SkSpan_impl.h"
20 #include "include/private/base/SkTArray.h"
21 #include "include/private/gpu/ganesh/GrTypesPriv.h"
22 #include "include/private/gpu/vk/SkiaVulkan.h"
23 #include "src/gpu/RefCntedCallback.h"
24 #include "src/gpu/ganesh/GrGpu.h"
25 #include "src/gpu/ganesh/GrOpsRenderPass.h"
26 #include "src/gpu/ganesh/GrSamplerState.h"
27 #include "src/gpu/ganesh/GrStagingBufferManager.h"
28 #include "src/gpu/ganesh/GrXferProcessor.h"
29 #include "src/gpu/ganesh/vk/GrVkCaps.h"
30 #include "src/gpu/ganesh/vk/GrVkMSAALoadManager.h"
31 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
32 #include "src/gpu/ganesh/vk/GrVkSemaphore.h"
33 
34 #include <array>
35 #include <cstddef>
36 #include <cstdint>
37 #include <memory>
38 #include <optional>
39 #include <string_view>
40 #include <utility>
41 
42 class GrAttachment;
43 class GrBackendSemaphore;
44 class GrDirectContext;
45 class GrGpuBuffer;
46 class GrManagedResource;
47 class GrProgramDesc;
48 class GrProgramInfo;
49 class GrRenderTarget;
50 class GrSemaphore;
51 class GrSurface;
52 class GrSurfaceProxy;
53 class GrTexture;
54 class GrThreadSafePipelineBuilder;
55 class GrVkBuffer;
56 class GrVkCommandBuffer;
57 class GrVkCommandPool;
58 class GrVkFramebuffer;
59 class GrVkImage;
60 class GrVkOpsRenderPass;
61 class GrVkPrimaryCommandBuffer;
62 class GrVkRenderPass;
63 class GrVkRenderTarget;
64 class GrVkSecondaryCommandBuffer;
65 enum class SkTextureCompressionType;
66 struct GrContextOptions;
67 struct GrVkDrawableInfo;
68 struct GrVkImageInfo;
69 struct SkIPoint;
70 struct SkIRect;
71 struct SkISize;
72 struct SkImageInfo;
73 
74 namespace SkSurfaces {
75 enum class BackendSurfaceAccess;
76 }
77 
78 namespace skgpu {
79 class MutableTextureState;
80 class VulkanMemoryAllocator;
81 struct VulkanBackendContext;
82 struct VulkanInterface;
83 }  // namespace skgpu
84 
85 class GrVkGpu : public GrGpu {
86 public:
87     static std::unique_ptr<GrGpu> Make(const skgpu::VulkanBackendContext&,
88                                        const GrContextOptions&,
89                                        GrDirectContext*);
90 
91     ~GrVkGpu() override;
92 
93     void disconnect(DisconnectType) override;
disconnected()94     bool disconnected() const { return fDisconnected; }
95 
releaseUnlockedBackendObjects()96     void releaseUnlockedBackendObjects() override {
97         fResourceProvider.releaseUnlockedBackendObjects();
98     }
99 
100     GrThreadSafePipelineBuilder* pipelineBuilder() override;
101     sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override;
102 
vkInterface()103     const skgpu::VulkanInterface* vkInterface() const { return fInterface.get(); }
vkCaps()104     const GrVkCaps& vkCaps() const { return *fVkCaps; }
105 
stagingBufferManager()106     GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; }
107     void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override;
108 
isDeviceLost()109     bool isDeviceLost() const override { return fDeviceIsLost; }
110 
memoryAllocator()111     skgpu::VulkanMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); }
memoryAllocatorCacheImage()112     skgpu::VulkanMemoryAllocator* memoryAllocatorCacheImage() const { return fMemoryAllocatorCacheImage.get(); }
113 
physicalDevice()114     VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; }
device()115     VkDevice device() const { return fDevice; }
queue()116     VkQueue  queue() const { return fQueue; }
queueIndex()117     uint32_t  queueIndex() const { return fQueueIndex; }
cmdPool()118     GrVkCommandPool* cmdPool() const { return fMainCmdPool; }
physicalDeviceProperties()119     const VkPhysicalDeviceProperties& physicalDeviceProperties() const {
120         return fPhysDevProps;
121     }
physicalDeviceMemoryProperties()122     const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const {
123         return fPhysDevMemProps;
124     }
protectedContext()125     bool protectedContext() const { return fProtectedContext == skgpu::Protected::kYes; }
126 
resourceProvider()127     GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
128 
currentCommandBuffer()129     GrVkPrimaryCommandBuffer* currentCommandBuffer() const { return fMainCmdBuffer; }
130 
131     void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
132 
133     bool setBackendTextureState(const GrBackendTexture&,
134                                 const skgpu::MutableTextureState&,
135                                 skgpu::MutableTextureState* previousState,
136                                 sk_sp<skgpu::RefCntedCallback> finishedCallback) override;
137 
138     bool setBackendRenderTargetState(const GrBackendRenderTarget&,
139                                      const skgpu::MutableTextureState&,
140                                      skgpu::MutableTextureState* previousState,
141                                      sk_sp<skgpu::RefCntedCallback> finishedCallback) override;
142 
143     void deleteBackendTexture(const GrBackendTexture&) override;
144 
145     bool compile(const GrProgramDesc&, const GrProgramInfo&) override;
146 
147 #if defined(GPU_TEST_UTILS)
148     bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
149 
150     GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions,
151                                                                GrColorType,
152                                                                int sampleCnt,
153                                                                GrProtected) override;
154     void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
155 
resetShaderCacheForTesting()156     void resetShaderCacheForTesting() const override {
157         fResourceProvider.resetShaderCacheForTesting();
158     }
159 #endif
160 
161     sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
162                                               SkISize dimensions, int numStencilSamples) override;
163 
getPreferredStencilFormat(const GrBackendFormat &)164     GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override {
165         return GrBackendFormats::MakeVk(this->vkCaps().preferredStencilFormat());
166     }
167 
168     sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
169                                            const GrBackendFormat& format,
170                                            int numSamples,
171                                            GrProtected isProtected,
172                                            GrMemoryless isMemoryless) override;
173 
174     void addBufferMemoryBarrier(const GrManagedResource*,
175                                 VkPipelineStageFlags srcStageMask,
176                                 VkPipelineStageFlags dstStageMask,
177                                 bool byRegion,
178                                 VkBufferMemoryBarrier* barrier) const;
179     void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
180                                 VkPipelineStageFlags dstStageMask,
181                                 bool byRegion,
182                                 VkBufferMemoryBarrier* barrier) const;
183     void addImageMemoryBarrier(const GrManagedResource*,
184                                VkPipelineStageFlags srcStageMask,
185                                VkPipelineStageFlags dstStageMask,
186                                bool byRegion,
187                                VkImageMemoryBarrier* barrier) const;
188 
189     bool loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
190                              const GrVkRenderPass& renderPass,
191                              GrAttachment* dst,
192                              GrVkImage* src,
193                              const SkIRect& srcRect);
194 
195     bool onRegenerateMipMapLevels(GrTexture* tex) override;
196 
197     void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) override;
198 
199     void submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer>);
200 
201     void submit(GrOpsRenderPass*) override;
202 
203     [[nodiscard]] std::unique_ptr<GrSemaphore> makeSemaphore(bool isOwned) override;
204     std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
205                                                       GrSemaphoreWrapType,
206                                                       GrWrapOwnership) override;
207     void insertSemaphore(GrSemaphore* semaphore) override;
208     void waitSemaphore(GrSemaphore* semaphore) override;
209 
210     // These match the definitions in SkDrawable, from whence they came
211     typedef void* SubmitContext;
212     typedef void (*SubmitProc)(SubmitContext submitContext);
213 
214     // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary
215     // command buffer to the gpu.
216     void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable);
217 
checkFinishedCallbacks()218     void checkFinishedCallbacks() override { fResourceProvider.checkCommandBuffers(); }
219     void finishOutstandingGpuWork() override;
220 
221     std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
222 
223     bool updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src, VkDeviceSize offset,
224                       VkDeviceSize size);
225 
226     bool zeroBuffer(sk_sp<GrGpuBuffer>);
227 
228     enum PersistentCacheKeyType : uint32_t {
229         kShader_PersistentCacheKeyType = 0,
230         kPipelineCache_PersistentCacheKeyType = 1,
231     };
232 
233     void storeVkPipelineCacheData() override;
234 
235     bool beginRenderPass(const GrVkRenderPass*,
236                          sk_sp<const GrVkFramebuffer>,
237                          const VkClearValue* colorClear,
238                          const GrSurface*,
239                          const SkIRect& renderPassBounds,
240                          bool forSecondaryCB);
241     void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds);
242 
243     // Returns true if VkResult indicates success and also checks for device lost or OOM. Every
244     // Vulkan call (and skgpu::VulkanMemoryAllocator call that returns VkResult) made on behalf of
245     // the GrVkGpu should be processed by this function so that we respond to OOMs and lost devices.
246     bool checkVkResult(VkResult);
247 
248 #ifdef SKIA_DFX_FOR_OHOS
249     void addAllocImageBytes(size_t bytes);
250     void removeAllocImageBytes(size_t bytes);
251     void addAllocBufferBytes(size_t bytes);
252     void removeAllocBufferBytes(size_t bytes);
253 #endif
254 
255     void vmaDefragment() override;
256     void dumpVmaStats(SkString *out) override;
257 private:
258     GrVkGpu(GrDirectContext*,
259             const skgpu::VulkanBackendContext&,
260             const sk_sp<GrVkCaps> caps,
261             sk_sp<const skgpu::VulkanInterface>,
262             uint32_t instanceVersion,
263             uint32_t physicalDeviceVersion,
264             sk_sp<skgpu::VulkanMemoryAllocator>,
265             sk_sp<skgpu::VulkanMemoryAllocator>);
266 
267     void destroyResources();
268 
269     GrBackendTexture onCreateBackendTexture(SkISize dimensions,
270                                             const GrBackendFormat&,
271                                             GrRenderable,
272                                             skgpu::Mipmapped,
273                                             GrProtected,
274                                             std::string_view label) override;
275     GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
276                                                       const GrBackendFormat&,
277                                                       skgpu::Mipmapped,
278                                                       GrProtected) override;
279 
280     bool onClearBackendTexture(const GrBackendTexture&,
281                                sk_sp<skgpu::RefCntedCallback> finishedCallback,
282                                std::array<float, 4> color) override;
283 
284     bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
285                                           sk_sp<skgpu::RefCntedCallback> finishedCallback,
286                                           const void* data,
287                                           size_t length) override;
288 
289     bool setBackendSurfaceState(GrVkImageInfo info,
290                                 sk_sp<skgpu::MutableTextureState> currentState,
291                                 SkISize dimensions,
292                                 VkImageLayout newLayout,
293                                 uint32_t newQueueFamilyIndex,
294                                 skgpu::MutableTextureState* previousState,
295                                 sk_sp<skgpu::RefCntedCallback> finishedCallback);
296 
297     sk_sp<GrTexture> onCreateTexture(SkISize,
298                                      const GrBackendFormat&,
299                                      GrRenderable,
300                                      int renderTargetSampleCnt,
301                                      skgpu::Budgeted,
302                                      GrProtected,
303                                      int mipLevelCount,
304                                      uint32_t levelClearMask,
305                                      std::string_view label) override;
306     sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
307                                                const GrBackendFormat&,
308                                                skgpu::Budgeted,
309                                                skgpu::Mipmapped,
310                                                GrProtected,
311                                                const void* data,
312                                                size_t dataSize) override;
313     sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
314                                                const GrBackendFormat&,
315                                                skgpu::Budgeted,
316                                                skgpu::Mipmapped,
317                                                GrProtected,
318                                                OH_NativeBuffer* nativeBuffer,
319                                                size_t bufferSize) override;
320 
321     sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
322                                           GrWrapOwnership,
323                                           GrWrapCacheable,
324                                           GrIOType) override;
325     sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
326                                                     GrWrapOwnership,
327                                                     GrWrapCacheable) override;
328     sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
329                                                     int sampleCnt,
330                                                     GrWrapOwnership,
331                                                     GrWrapCacheable) override;
332     sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override;
333 
334     sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
335                                                                 const GrVkDrawableInfo&) override;
336 
337     sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern) override;
338 
339     bool onReadPixels(GrSurface*,
340                       SkIRect,
341                       GrColorType surfaceColorType,
342                       GrColorType dstColorType,
343                       void* buffer,
344                       size_t rowBytes) override;
345 
346     bool onWritePixels(GrSurface*,
347                        SkIRect,
348                        GrColorType surfaceColorType,
349                        GrColorType srcColorType,
350                        const GrMipLevel[],
351                        int mipLevelCount,
352                        bool prepForTexSampling) override;
353 
354     bool onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
355                                       size_t srcOffset,
356                                       sk_sp<GrGpuBuffer> dst,
357                                       size_t dstOffset,
358                                       size_t size) override;
359 
360     bool onTransferPixelsTo(GrTexture*,
361                             SkIRect,
362                             GrColorType textureColorType,
363                             GrColorType bufferColorType,
364                             sk_sp<GrGpuBuffer>,
365                             size_t offset,
366                             size_t rowBytes) override;
367 
368     bool onTransferPixelsFrom(GrSurface*,
369                               SkIRect,
370                               GrColorType surfaceColorType,
371                               GrColorType bufferColorType,
372                               sk_sp<GrGpuBuffer>,
373                               size_t offset) override;
374 
375     bool onCopySurface(GrSurface* dst, const SkIRect& dstRect,
376                        GrSurface* src, const SkIRect& srcRect,
377                        GrSamplerState::Filter) override;
378 
addFinishedCallback(skgpu::AutoCallback callback,std::optional<GrTimerQuery> timerQuery)379     void addFinishedCallback(skgpu::AutoCallback callback,
380                              std::optional<GrTimerQuery> timerQuery) override {
381         SkASSERT(!timerQuery);
382         this->addFinishedCallback(skgpu::RefCntedCallback::Make(std::move(callback)));
383     }
384 
385     void addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback);
386 
387     GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*,
388                                         bool useMSAASurface,
389                                         GrAttachment* stencil,
390                                         GrSurfaceOrigin,
391                                         const SkIRect&,
392                                         const GrOpsRenderPass::LoadAndStoreInfo&,
393                                         const GrOpsRenderPass::StencilLoadAndStoreInfo&,
394                                         const skia_private::TArray<GrSurfaceProxy*, true>& sampledProxies,
395                                         GrXferBarrierFlags renderPassXferBarriers) override;
396 
397     void prepareSurfacesForBackendAccessAndStateUpdates(
398             SkSpan<GrSurfaceProxy*> proxies,
399             SkSurfaces::BackendSurfaceAccess access,
400             const skgpu::MutableTextureState* newState) override;
401 
402     bool onSubmitToGpu(const GrSubmitInfo& info) override;
403 
404     void onReportSubmitHistograms() override;
405 
406     // Ends and submits the current command buffer to the queue and then creates a new command
407     // buffer and begins it. If fSync in the submitInfo is set to GrSyncCpu::kYes, the function will
408     // wait for all work in the queue to finish before returning. If this GrVkGpu object has any
409     // semaphores in fSemaphoreToSignal, we will add those signal semaphores to the submission of
410     // this command buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we
411     // will add those wait semaphores to the submission of this command buffer.
412     //
413     // If fMarkBoundary in submitInfo is GrMarkFrameBoundary::kYes, then we will mark the end of a
414     // frame if the VK_EXT_frame_boundary extension is available.
415     bool submitCommandBuffer(const GrSubmitInfo& submitInfo);
416 
417     void copySurfaceAsCopyImage(GrSurface* dst,
418                                 GrSurface* src,
419                                 GrVkImage* dstImage,
420                                 GrVkImage* srcImage,
421                                 const SkIRect& srcRect,
422                                 const SkIPoint& dstPoint);
423 
424     void copySurfaceAsBlit(GrSurface* dst,
425                            GrSurface* src,
426                            GrVkImage* dstImage,
427                            GrVkImage* srcImage,
428                            const SkIRect& srcRect,
429                            const SkIRect& dstRect,
430                            GrSamplerState::Filter filter);
431 
432     void copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
433                               const SkIPoint& dstPoint);
434 
435     // helpers for onCreateTexture and writeTexturePixels
436     bool uploadTexDataLinear(GrVkImage* tex,
437                              SkIRect rect,
438                              GrColorType colorType,
439                              const void* data,
440                              size_t rowBytes);
441     bool uploadTexDataOptimal(GrVkImage* tex,
442                               SkIRect rect,
443                               GrColorType colorType,
444                               const GrMipLevel texels[],
445                               int mipLevelCount);
446     bool uploadTexDataCompressed(GrVkImage* tex,
447                                  SkTextureCompressionType compression,
448                                  VkFormat vkFormat,
449                                  SkISize dimensions,
450                                  skgpu::Mipmapped mipmapped,
451                                  const void* data,
452                                  size_t dataSize);
453     bool uploadTexDataCompressed(GrVkImage* tex, SkTextureCompressionType compression,
454                                  VkFormat vkFormat, SkISize dimensions, skgpu::Mipmapped mipMapped,
455                                  OH_NativeBuffer* nativeBuffer, size_t bufferSize);
456 
457     void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
458                       const SkIPoint& dstPoint);
459 
460     bool createVkImageForBackendSurface(VkFormat,
461                                         SkISize dimensions,
462                                         int sampleCnt,
463                                         GrTexturable,
464                                         GrRenderable,
465                                         skgpu::Mipmapped,
466                                         GrVkImageInfo*,
467                                         GrProtected);
468 
469     // checkVkResult dfx
470 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
471     void dumpDeviceFaultInfo(const std::string& errorCategory);
472     void dumpVkImageDfx(const std::string& errorCategory);
473 #endif
474     void reportVulkanError(const std::string& errorCategory);
475 
476     sk_sp<const skgpu::VulkanInterface>                   fInterface;
477     sk_sp<skgpu::VulkanMemoryAllocator>                   fMemoryAllocator;
478     sk_sp<skgpu::VulkanMemoryAllocator>                   fMemoryAllocatorCacheImage;
479     sk_sp<GrVkCaps>                                       fVkCaps;
480     bool                                                  fDeviceIsLost = false;
481 
482     VkPhysicalDevice                                      fPhysicalDevice;
483     VkDevice                                              fDevice;
484     VkQueue                                               fQueue;    // Must be Graphics queue
485     uint32_t                                              fQueueIndex;
486 
487     // Created by GrVkGpu
488     GrVkResourceProvider                                  fResourceProvider;
489     GrStagingBufferManager                                fStagingBufferManager;
490 
491     GrVkMSAALoadManager                                   fMSAALoadManager;
492 
493     GrVkCommandPool*                                      fMainCmdPool;
494     // just a raw pointer; object's lifespan is managed by fCmdPool
495     GrVkPrimaryCommandBuffer*                             fMainCmdBuffer;
496 
497     skia_private::STArray<1, GrVkSemaphore::Resource*>    fSemaphoresToWaitOn;
498     skia_private::STArray<1, GrVkSemaphore::Resource*>    fSemaphoresToSignal;
499 
500     skia_private::TArray<std::unique_ptr<SkDrawable::GpuDrawHandler>> fDrawables;
501 
502     VkPhysicalDeviceProperties                            fPhysDevProps;
503     VkPhysicalDeviceMemoryProperties                      fPhysDevMemProps;
504 
505     // We need a bool to track whether or not we've already disconnected all the gpu resources from
506     // vulkan context.
507     bool                                                  fDisconnected;
508 
509     skgpu::Protected                                      fProtectedContext;
510 
511     std::unique_ptr<GrVkOpsRenderPass>                    fCachedOpsRenderPass;
512 
513     skgpu::VulkanDeviceLostContext                        fDeviceLostContext;
514     skgpu::VulkanDeviceLostProc                           fDeviceLostProc;
515 
516     using INHERITED = GrGpu;
517 };
518 
519 #endif
520