1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrVkGpu_DEFINED 9 #define GrVkGpu_DEFINED 10 11 #include "include/gpu/ganesh/vk/GrVkBackendSurface.h" 12 #include "include/gpu/vk/GrVkTypes.h" 13 #include "include/gpu/vk/VulkanBackendContext.h" 14 #include "src/gpu/ganesh/GrGpu.h" 15 #include "src/gpu/ganesh/GrStagingBufferManager.h" 16 #include "src/gpu/ganesh/vk/GrVkCaps.h" 17 #include "src/gpu/ganesh/vk/GrVkMSAALoadManager.h" 18 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h" 19 #include "src/gpu/ganesh/vk/GrVkSemaphore.h" 20 #include "src/gpu/ganesh/vk/GrVkUtil.h" 21 22 class GrDirectContext; 23 class GrPipeline; 24 class GrVkBuffer; 25 class GrVkCommandPool; 26 class GrVkFramebuffer; 27 class GrVkOpsRenderPass; 28 class GrVkPipeline; 29 class GrVkPipelineState; 30 class GrVkPrimaryCommandBuffer; 31 class GrVkRenderPass; 32 class GrVkSecondaryCommandBuffer; 33 class GrVkTexture; 34 enum class SkTextureCompressionType; 35 36 namespace skgpu { 37 class VulkanMemoryAllocator; 38 class VulkanMutableTextureState; 39 struct VulkanInterface; 40 } 41 42 class GrVkGpu : public GrGpu { 43 public: 44 static std::unique_ptr<GrGpu> Make(const skgpu::VulkanBackendContext&, 45 const GrContextOptions&, 46 GrDirectContext*); 47 48 ~GrVkGpu() override; 49 50 void disconnect(DisconnectType) override; disconnected()51 bool disconnected() const { return fDisconnected; } 52 releaseUnlockedBackendObjects()53 void releaseUnlockedBackendObjects() override { 54 fResourceProvider.releaseUnlockedBackendObjects(); 55 } 56 57 GrThreadSafePipelineBuilder* pipelineBuilder() override; 58 sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override; 59 vkInterface()60 const skgpu::VulkanInterface* vkInterface() const { return fInterface.get(); } vkCaps()61 const GrVkCaps& vkCaps() const { return *fVkCaps; } 62 stagingBufferManager()63 GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; } 64 void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override; 65 isDeviceLost()66 bool isDeviceLost() const override { return fDeviceIsLost; } 67 memoryAllocator()68 skgpu::VulkanMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); } 69 physicalDevice()70 VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; } device()71 VkDevice device() const { return fDevice; } queue()72 VkQueue queue() const { return fQueue; } queueIndex()73 uint32_t queueIndex() const { return fQueueIndex; } cmdPool()74 GrVkCommandPool* cmdPool() const { return fMainCmdPool; } physicalDeviceProperties()75 const VkPhysicalDeviceProperties& physicalDeviceProperties() const { 76 return fPhysDevProps; 77 } physicalDeviceMemoryProperties()78 const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const { 79 return fPhysDevMemProps; 80 } protectedContext()81 bool protectedContext() const { return fProtectedContext == skgpu::Protected::kYes; } 82 resourceProvider()83 GrVkResourceProvider& resourceProvider() { return fResourceProvider; } 84 currentCommandBuffer()85 GrVkPrimaryCommandBuffer* currentCommandBuffer() const { return fMainCmdBuffer; } 86 87 void xferBarrier(GrRenderTarget*, GrXferBarrierType) override; 88 89 bool setBackendTextureState(const GrBackendTexture&, 90 const skgpu::MutableTextureState&, 91 skgpu::MutableTextureState* previousState, 92 sk_sp<skgpu::RefCntedCallback> finishedCallback) override; 93 94 bool setBackendRenderTargetState(const GrBackendRenderTarget&, 95 const skgpu::MutableTextureState&, 96 skgpu::MutableTextureState* previousState, 97 sk_sp<skgpu::RefCntedCallback> finishedCallback) override; 98 99 void deleteBackendTexture(const GrBackendTexture&) override; 100 101 bool compile(const GrProgramDesc&, const GrProgramInfo&) override; 102 103 #if defined(GR_TEST_UTILS) 104 bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override; 105 106 GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions, 107 GrColorType, 108 int sampleCnt, 109 GrProtected) override; 110 void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override; 111 resetShaderCacheForTesting()112 void resetShaderCacheForTesting() const override { 113 fResourceProvider.resetShaderCacheForTesting(); 114 } 115 #endif 116 117 sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/, 118 SkISize dimensions, int numStencilSamples) override; 119 getPreferredStencilFormat(const GrBackendFormat &)120 GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override { 121 return GrBackendFormats::MakeVk(this->vkCaps().preferredStencilFormat()); 122 } 123 124 sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions, 125 const GrBackendFormat& format, 126 int numSamples, 127 GrProtected isProtected, 128 GrMemoryless isMemoryless) override; 129 130 void addBufferMemoryBarrier(const GrManagedResource*, 131 VkPipelineStageFlags srcStageMask, 132 VkPipelineStageFlags dstStageMask, 133 bool byRegion, 134 VkBufferMemoryBarrier* barrier) const; 135 void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, 136 VkPipelineStageFlags dstStageMask, 137 bool byRegion, 138 VkBufferMemoryBarrier* barrier) const; 139 void addImageMemoryBarrier(const GrManagedResource*, 140 VkPipelineStageFlags srcStageMask, 141 VkPipelineStageFlags dstStageMask, 142 bool byRegion, 143 VkImageMemoryBarrier* barrier) const; 144 145 bool loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer, 146 const GrVkRenderPass& renderPass, 147 GrAttachment* dst, 148 GrVkImage* src, 149 const SkIRect& srcRect); 150 151 bool onRegenerateMipMapLevels(GrTexture* tex) override; 152 153 void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) override; 154 155 void submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer>); 156 157 void submit(GrOpsRenderPass*) override; 158 159 [[nodiscard]] std::unique_ptr<GrSemaphore> makeSemaphore(bool isOwned) override; 160 std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&, 161 GrSemaphoreWrapType, 162 GrWrapOwnership) override; 163 void insertSemaphore(GrSemaphore* semaphore) override; 164 void waitSemaphore(GrSemaphore* semaphore) override; 165 166 // These match the definitions in SkDrawable, from whence they came 167 typedef void* SubmitContext; 168 typedef void (*SubmitProc)(SubmitContext submitContext); 169 170 // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary 171 // command buffer to the gpu. 172 void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable); 173 checkFinishProcs()174 void checkFinishProcs() override { fResourceProvider.checkCommandBuffers(); } 175 void finishOutstandingGpuWork() override; 176 177 std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override; 178 179 bool updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src, VkDeviceSize offset, 180 VkDeviceSize size); 181 182 bool zeroBuffer(sk_sp<GrGpuBuffer>); 183 184 enum PersistentCacheKeyType : uint32_t { 185 kShader_PersistentCacheKeyType = 0, 186 kPipelineCache_PersistentCacheKeyType = 1, 187 }; 188 189 void storeVkPipelineCacheData() override; 190 191 bool beginRenderPass(const GrVkRenderPass*, 192 sk_sp<const GrVkFramebuffer>, 193 const VkClearValue* colorClear, 194 const GrSurface*, 195 const SkIRect& renderPassBounds, 196 bool forSecondaryCB); 197 void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds); 198 199 // Returns true if VkResult indicates success and also checks for device lost or OOM. Every 200 // Vulkan call (and skgpu::VulkanMemoryAllocator call that returns VkResult) made on behalf of 201 // the GrVkGpu should be processed by this function so that we respond to OOMs and lost devices. 202 bool checkVkResult(VkResult); 203 204 private: 205 enum SyncQueue { 206 kForce_SyncQueue, 207 kSkip_SyncQueue 208 }; 209 210 GrVkGpu(GrDirectContext*, 211 const skgpu::VulkanBackendContext&, 212 const sk_sp<GrVkCaps> caps, 213 sk_sp<const skgpu::VulkanInterface>, 214 uint32_t instanceVersion, 215 uint32_t physicalDeviceVersion, 216 sk_sp<skgpu::VulkanMemoryAllocator>); 217 218 void destroyResources(); 219 220 GrBackendTexture onCreateBackendTexture(SkISize dimensions, 221 const GrBackendFormat&, 222 GrRenderable, 223 skgpu::Mipmapped, 224 GrProtected, 225 std::string_view label) override; 226 GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, 227 const GrBackendFormat&, 228 skgpu::Mipmapped, 229 GrProtected) override; 230 231 bool onClearBackendTexture(const GrBackendTexture&, 232 sk_sp<skgpu::RefCntedCallback> finishedCallback, 233 std::array<float, 4> color) override; 234 235 bool onUpdateCompressedBackendTexture(const GrBackendTexture&, 236 sk_sp<skgpu::RefCntedCallback> finishedCallback, 237 const void* data, 238 size_t length) override; 239 240 bool setBackendSurfaceState(GrVkImageInfo info, 241 sk_sp<skgpu::MutableTextureState> currentState, 242 SkISize dimensions, 243 VkImageLayout newLayout, 244 uint32_t newQueueFamilyIndex, 245 skgpu::MutableTextureState* previousState, 246 sk_sp<skgpu::RefCntedCallback> finishedCallback); 247 248 sk_sp<GrTexture> onCreateTexture(SkISize, 249 const GrBackendFormat&, 250 GrRenderable, 251 int renderTargetSampleCnt, 252 skgpu::Budgeted, 253 GrProtected, 254 int mipLevelCount, 255 uint32_t levelClearMask, 256 std::string_view label) override; 257 sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions, 258 const GrBackendFormat&, 259 skgpu::Budgeted, 260 skgpu::Mipmapped, 261 GrProtected, 262 const void* data, 263 size_t dataSize) override; 264 265 sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, 266 GrWrapOwnership, 267 GrWrapCacheable, 268 GrIOType) override; 269 sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&, 270 GrWrapOwnership, 271 GrWrapCacheable) override; 272 sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, 273 int sampleCnt, 274 GrWrapOwnership, 275 GrWrapCacheable) override; 276 sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override; 277 278 sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, 279 const GrVkDrawableInfo&) override; 280 281 sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern) override; 282 283 bool onReadPixels(GrSurface*, 284 SkIRect, 285 GrColorType surfaceColorType, 286 GrColorType dstColorType, 287 void* buffer, 288 size_t rowBytes) override; 289 290 bool onWritePixels(GrSurface*, 291 SkIRect, 292 GrColorType surfaceColorType, 293 GrColorType srcColorType, 294 const GrMipLevel[], 295 int mipLevelCount, 296 bool prepForTexSampling) override; 297 298 bool onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src, 299 size_t srcOffset, 300 sk_sp<GrGpuBuffer> dst, 301 size_t dstOffset, 302 size_t size) override; 303 304 bool onTransferPixelsTo(GrTexture*, 305 SkIRect, 306 GrColorType textureColorType, 307 GrColorType bufferColorType, 308 sk_sp<GrGpuBuffer>, 309 size_t offset, 310 size_t rowBytes) override; 311 312 bool onTransferPixelsFrom(GrSurface*, 313 SkIRect, 314 GrColorType surfaceColorType, 315 GrColorType bufferColorType, 316 sk_sp<GrGpuBuffer>, 317 size_t offset) override; 318 319 bool onCopySurface(GrSurface* dst, const SkIRect& dstRect, 320 GrSurface* src, const SkIRect& srcRect, 321 GrSamplerState::Filter) override; 322 323 void addFinishedProc(GrGpuFinishedProc finishedProc, 324 GrGpuFinishedContext finishedContext) override; 325 326 void addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback); 327 328 GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*, 329 bool useMSAASurface, 330 GrAttachment* stencil, 331 GrSurfaceOrigin, 332 const SkIRect&, 333 const GrOpsRenderPass::LoadAndStoreInfo&, 334 const GrOpsRenderPass::StencilLoadAndStoreInfo&, 335 const skia_private::TArray<GrSurfaceProxy*, true>& sampledProxies, 336 GrXferBarrierFlags renderPassXferBarriers) override; 337 338 void prepareSurfacesForBackendAccessAndStateUpdates( 339 SkSpan<GrSurfaceProxy*> proxies, 340 SkSurfaces::BackendSurfaceAccess access, 341 const skgpu::MutableTextureState* newState) override; 342 343 bool onSubmitToGpu(GrSyncCpu sync) override; 344 345 void onReportSubmitHistograms() override; 346 347 // Ends and submits the current command buffer to the queue and then creates a new command 348 // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all 349 // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in 350 // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command 351 // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those 352 // wait semaphores to the submission of this command buffer. 353 bool submitCommandBuffer(SyncQueue sync); 354 355 void copySurfaceAsCopyImage(GrSurface* dst, 356 GrSurface* src, 357 GrVkImage* dstImage, 358 GrVkImage* srcImage, 359 const SkIRect& srcRect, 360 const SkIPoint& dstPoint); 361 362 void copySurfaceAsBlit(GrSurface* dst, 363 GrSurface* src, 364 GrVkImage* dstImage, 365 GrVkImage* srcImage, 366 const SkIRect& srcRect, 367 const SkIRect& dstRect, 368 GrSamplerState::Filter filter); 369 370 void copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 371 const SkIPoint& dstPoint); 372 373 // helpers for onCreateTexture and writeTexturePixels 374 bool uploadTexDataLinear(GrVkImage* tex, 375 SkIRect rect, 376 GrColorType colorType, 377 const void* data, 378 size_t rowBytes); 379 bool uploadTexDataOptimal(GrVkImage* tex, 380 SkIRect rect, 381 GrColorType colorType, 382 const GrMipLevel texels[], 383 int mipLevelCount); 384 bool uploadTexDataCompressed(GrVkImage* tex, 385 SkTextureCompressionType compression, 386 VkFormat vkFormat, 387 SkISize dimensions, 388 skgpu::Mipmapped mipmapped, 389 const void* data, 390 size_t dataSize); 391 void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect, 392 const SkIPoint& dstPoint); 393 394 bool createVkImageForBackendSurface(VkFormat, 395 SkISize dimensions, 396 int sampleCnt, 397 GrTexturable, 398 GrRenderable, 399 skgpu::Mipmapped, 400 GrVkImageInfo*, 401 GrProtected); 402 403 sk_sp<const skgpu::VulkanInterface> fInterface; 404 sk_sp<skgpu::VulkanMemoryAllocator> fMemoryAllocator; 405 sk_sp<GrVkCaps> fVkCaps; 406 bool fDeviceIsLost = false; 407 408 VkPhysicalDevice fPhysicalDevice; 409 VkDevice fDevice; 410 VkQueue fQueue; // Must be Graphics queue 411 uint32_t fQueueIndex; 412 413 // Created by GrVkGpu 414 GrVkResourceProvider fResourceProvider; 415 GrStagingBufferManager fStagingBufferManager; 416 417 GrVkMSAALoadManager fMSAALoadManager; 418 419 GrVkCommandPool* fMainCmdPool; 420 // just a raw pointer; object's lifespan is managed by fCmdPool 421 GrVkPrimaryCommandBuffer* fMainCmdBuffer; 422 423 skia_private::STArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; 424 skia_private::STArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; 425 426 skia_private::TArray<std::unique_ptr<SkDrawable::GpuDrawHandler>> fDrawables; 427 428 VkPhysicalDeviceProperties fPhysDevProps; 429 VkPhysicalDeviceMemoryProperties fPhysDevMemProps; 430 431 // We need a bool to track whether or not we've already disconnected all the gpu resources from 432 // vulkan context. 433 bool fDisconnected; 434 435 skgpu::Protected fProtectedContext; 436 437 std::unique_ptr<GrVkOpsRenderPass> fCachedOpsRenderPass; 438 439 skgpu::VulkanDeviceLostContext fDeviceLostContext; 440 skgpu::VulkanDeviceLostProc fDeviceLostProc; 441 442 using INHERITED = GrGpu; 443 }; 444 445 #endif 446