1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrVkGpu_DEFINED 9 #define GrVkGpu_DEFINED 10 11 #include "include/gpu/vk/GrVkBackendContext.h" 12 #include "include/gpu/vk/GrVkTypes.h" 13 #include "src/gpu/GrGpu.h" 14 #include "src/gpu/GrStagingBufferManager.h" 15 #include "src/gpu/vk/GrVkCaps.h" 16 #include "src/gpu/vk/GrVkMSAALoadManager.h" 17 #include "src/gpu/vk/GrVkMemory.h" 18 #include "src/gpu/vk/GrVkResourceProvider.h" 19 #include "src/gpu/vk/GrVkSemaphore.h" 20 #include "src/gpu/vk/GrVkUtil.h" 21 22 class GrDirectContext; 23 class GrPipeline; 24 25 class GrVkBuffer; 26 class GrVkCommandPool; 27 class GrVkFramebuffer; 28 class GrVkMemoryAllocator; 29 class GrVkPipeline; 30 class GrVkPipelineState; 31 class GrVkPrimaryCommandBuffer; 32 class GrVkOpsRenderPass; 33 class GrVkRenderPass; 34 class GrVkSecondaryCommandBuffer; 35 class GrVkTexture; 36 struct GrVkInterface; 37 38 class GrVkGpu : public GrGpu { 39 public: 40 static sk_sp<GrGpu> Make(const GrVkBackendContext&, const GrContextOptions&, GrDirectContext*); 41 42 ~GrVkGpu() override; 43 44 void disconnect(DisconnectType) override; disconnected()45 bool disconnected() const { return fDisconnected; } 46 releaseUnlockedBackendObjects()47 void releaseUnlockedBackendObjects() override { 48 fResourceProvider.releaseUnlockedBackendObjects(); 49 } 50 51 GrThreadSafePipelineBuilder* pipelineBuilder() override; 52 sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override; 53 vkInterface()54 const GrVkInterface* vkInterface() const { return fInterface.get(); } vkCaps()55 const GrVkCaps& vkCaps() const { return *fVkCaps; } 56 stagingBufferManager()57 GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; } 58 void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override; 59 isDeviceLost()60 bool isDeviceLost() const override { return fDeviceIsLost; } 61 memoryAllocator()62 GrVkMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); } 63 physicalDevice()64 VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; } device()65 VkDevice device() const { return fDevice; } queue()66 VkQueue queue() const { return fQueue; } queueIndex()67 uint32_t queueIndex() const { return fQueueIndex; } cmdPool()68 GrVkCommandPool* cmdPool() const { return fMainCmdPool; } physicalDeviceProperties()69 const VkPhysicalDeviceProperties& physicalDeviceProperties() const { 70 return fPhysDevProps; 71 } physicalDeviceMemoryProperties()72 const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const { 73 return fPhysDevMemProps; 74 } protectedContext()75 bool protectedContext() const { return fProtectedContext == GrProtected::kYes; } 76 resourceProvider()77 GrVkResourceProvider& resourceProvider() { return fResourceProvider; } 78 currentCommandBuffer()79 GrVkPrimaryCommandBuffer* currentCommandBuffer() const { return fMainCmdBuffer; } 80 81 void xferBarrier(GrRenderTarget*, GrXferBarrierType) override; 82 83 bool setBackendTextureState(const GrBackendTexture&, 84 const GrBackendSurfaceMutableState&, 85 GrBackendSurfaceMutableState* previousState, 86 sk_sp<GrRefCntedCallback> finishedCallback) override; 87 88 bool setBackendRenderTargetState(const GrBackendRenderTarget&, 89 const GrBackendSurfaceMutableState&, 90 GrBackendSurfaceMutableState* previousState, 91 sk_sp<GrRefCntedCallback> finishedCallback) override; 92 93 void deleteBackendTexture(const GrBackendTexture&) override; 94 95 bool compile(const GrProgramDesc&, const GrProgramInfo&) override; 96 97 #if GR_TEST_UTILS 98 bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override; 99 100 GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions, 101 GrColorType, 102 int sampleCnt, 103 GrProtected) override; 104 void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override; 105 resetShaderCacheForTesting()106 void resetShaderCacheForTesting() const override { 107 fResourceProvider.resetShaderCacheForTesting(); 108 } 109 #endif 110 111 sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/, 112 SkISize dimensions, int numStencilSamples) override; 113 getPreferredStencilFormat(const GrBackendFormat &)114 GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override { 115 return GrBackendFormat::MakeVk(this->vkCaps().preferredStencilFormat()); 116 } 117 118 sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions, 119 const GrBackendFormat& format, 120 int numSamples, 121 GrProtected isProtected, 122 GrMemoryless isMemoryless) override; 123 124 void addBufferMemoryBarrier(const GrManagedResource*, 125 VkPipelineStageFlags srcStageMask, 126 VkPipelineStageFlags dstStageMask, 127 bool byRegion, 128 VkBufferMemoryBarrier* barrier) const; 129 void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, 130 VkPipelineStageFlags dstStageMask, 131 bool byRegion, 132 VkBufferMemoryBarrier* barrier) const; 133 void addImageMemoryBarrier(const GrManagedResource*, 134 VkPipelineStageFlags srcStageMask, 135 VkPipelineStageFlags dstStageMask, 136 bool byRegion, 137 VkImageMemoryBarrier* barrier) const; 138 139 bool loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer, 140 const GrVkRenderPass& renderPass, 141 GrAttachment* dst, 142 GrVkImage* src, 143 const SkIRect& srcRect); 144 145 bool onRegenerateMipMapLevels(GrTexture* tex) override; 146 147 void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) override; 148 149 void submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer>); 150 151 void submit(GrOpsRenderPass*) override; 152 153 GrFence SK_WARN_UNUSED_RESULT insertFence() override; 154 bool waitFence(GrFence) override; 155 void deleteFence(GrFence) const override; 156 157 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override; 158 std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&, 159 GrSemaphoreWrapType, 160 GrWrapOwnership) override; 161 void insertSemaphore(GrSemaphore* semaphore) override; 162 void waitSemaphore(GrSemaphore* semaphore) override; 163 164 // These match the definitions in SkDrawable, from whence they came 165 typedef void* SubmitContext; 166 typedef void (*SubmitProc)(SubmitContext submitContext); 167 168 // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary 169 // command buffer to the gpu. 170 void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable); 171 checkFinishProcs()172 void checkFinishProcs() override { fResourceProvider.checkCommandBuffers(); } 173 void finishOutstandingGpuWork() override; 174 175 std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override; 176 177 void copyBuffer(sk_sp<GrGpuBuffer> srcBuffer, sk_sp<GrGpuBuffer> dstBuffer, 178 VkDeviceSize srcOffset, VkDeviceSize dstOffset, VkDeviceSize size); 179 bool updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src, VkDeviceSize offset, 180 VkDeviceSize size); 181 182 enum PersistentCacheKeyType : uint32_t { 183 kShader_PersistentCacheKeyType = 0, 184 kPipelineCache_PersistentCacheKeyType = 1, 185 }; 186 187 void storeVkPipelineCacheData() override; 188 189 bool beginRenderPass(const GrVkRenderPass*, 190 sk_sp<const GrVkFramebuffer>, 191 const VkClearValue* colorClear, 192 const GrSurface*, 193 const SkIRect& renderPassBounds, 194 bool forSecondaryCB); 195 void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds); 196 197 // Returns true if VkResult indicates success and also checks for device lost or OOM. Every 198 // Vulkan call (and GrVkMemoryAllocator call that returns VkResult) made on behalf of the 199 // GrVkGpu should be processed by this function so that we respond to OOMs and lost devices. 200 bool checkVkResult(VkResult); 201 202 private: 203 enum SyncQueue { 204 kForce_SyncQueue, 205 kSkip_SyncQueue 206 }; 207 208 GrVkGpu(GrDirectContext*, const GrVkBackendContext&, const sk_sp<GrVkCaps> caps, 209 sk_sp<const GrVkInterface>, uint32_t instanceVersion, uint32_t physicalDeviceVersion, 210 sk_sp<GrVkMemoryAllocator>); 211 212 void destroyResources(); 213 214 GrBackendTexture onCreateBackendTexture(SkISize dimensions, 215 const GrBackendFormat&, 216 GrRenderable, 217 GrMipmapped, 218 GrProtected) override; 219 GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, 220 const GrBackendFormat&, 221 GrMipmapped, 222 GrProtected) override; 223 224 bool onClearBackendTexture(const GrBackendTexture&, 225 sk_sp<GrRefCntedCallback> finishedCallback, 226 std::array<float, 4> color) override; 227 228 bool onUpdateCompressedBackendTexture(const GrBackendTexture&, 229 sk_sp<GrRefCntedCallback> finishedCallback, 230 const void* data, 231 size_t length) override; 232 233 bool setBackendSurfaceState(GrVkImageInfo info, 234 sk_sp<GrBackendSurfaceMutableStateImpl> currentState, 235 SkISize dimensions, 236 const GrVkSharedImageInfo& newInfo, 237 GrBackendSurfaceMutableState* previousState, 238 sk_sp<GrRefCntedCallback> finishedCallback); 239 240 sk_sp<GrTexture> onCreateTexture(SkISize, 241 const GrBackendFormat&, 242 GrRenderable, 243 int renderTargetSampleCnt, 244 SkBudgeted, 245 GrProtected, 246 int mipLevelCount, 247 uint32_t levelClearMask) override; 248 sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions, 249 const GrBackendFormat&, 250 SkBudgeted, 251 GrMipmapped, 252 GrProtected, 253 const void* data, size_t dataSize) override; 254 255 sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, 256 GrWrapOwnership, 257 GrWrapCacheable, 258 GrIOType) override; 259 sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&, 260 GrWrapOwnership, 261 GrWrapCacheable) override; 262 sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, 263 int sampleCnt, 264 GrWrapOwnership, 265 GrWrapCacheable) override; 266 sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override; 267 268 sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, 269 const GrVkDrawableInfo&) override; 270 271 sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern, 272 const void* data) override; 273 274 bool onReadPixels(GrSurface*, 275 SkIRect, 276 GrColorType surfaceColorType, 277 GrColorType dstColorType, 278 void* buffer, 279 size_t rowBytes) override; 280 281 bool onWritePixels(GrSurface*, 282 SkIRect, 283 GrColorType surfaceColorType, 284 GrColorType srcColorType, 285 const GrMipLevel[], 286 int mipLevelCount, 287 bool prepForTexSampling) override; 288 289 bool onTransferPixelsTo(GrTexture*, 290 SkIRect, 291 GrColorType textureColorType, 292 GrColorType bufferColorType, 293 sk_sp<GrGpuBuffer>, 294 size_t offset, 295 size_t rowBytes) override; 296 297 bool onTransferPixelsFrom(GrSurface*, 298 SkIRect, 299 GrColorType surfaceColorType, 300 GrColorType bufferColorType, 301 sk_sp<GrGpuBuffer>, 302 size_t offset) override; 303 304 bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 305 const SkIPoint& dstPoint) override; 306 307 void addFinishedProc(GrGpuFinishedProc finishedProc, 308 GrGpuFinishedContext finishedContext) override; 309 310 void addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback); 311 312 GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*, 313 bool useMSAASurface, 314 GrAttachment* stencil, 315 GrSurfaceOrigin, 316 const SkIRect&, 317 const GrOpsRenderPass::LoadAndStoreInfo&, 318 const GrOpsRenderPass::StencilLoadAndStoreInfo&, 319 const SkTArray<GrSurfaceProxy*, true>& sampledProxies, 320 GrXferBarrierFlags renderPassXferBarriers) override; 321 322 void prepareSurfacesForBackendAccessAndStateUpdates( 323 SkSpan<GrSurfaceProxy*> proxies, 324 SkSurface::BackendSurfaceAccess access, 325 const GrBackendSurfaceMutableState* newState) override; 326 327 bool onSubmitToGpu(bool syncCpu) override; 328 329 void onReportSubmitHistograms() override; 330 331 // Ends and submits the current command buffer to the queue and then creates a new command 332 // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all 333 // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in 334 // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command 335 // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those 336 // wait semaphores to the submission of this command buffer. 337 bool submitCommandBuffer(SyncQueue sync); 338 339 void copySurfaceAsCopyImage(GrSurface* dst, 340 GrSurface* src, 341 GrVkImage* dstImage, 342 GrVkImage* srcImage, 343 const SkIRect& srcRect, 344 const SkIPoint& dstPoint); 345 346 void copySurfaceAsBlit(GrSurface* dst, 347 GrSurface* src, 348 GrVkImage* dstImage, 349 GrVkImage* srcImage, 350 const SkIRect& srcRect, 351 const SkIPoint& dstPoint); 352 353 void copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 354 const SkIPoint& dstPoint); 355 356 // helpers for onCreateTexture and writeTexturePixels 357 bool uploadTexDataLinear(GrVkImage* tex, 358 SkIRect rect, 359 GrColorType colorType, 360 const void* data, 361 size_t rowBytes); 362 bool uploadTexDataOptimal(GrVkImage* tex, 363 SkIRect rect, 364 GrColorType colorType, 365 const GrMipLevel texels[], 366 int mipLevelCount); 367 bool uploadTexDataCompressed(GrVkImage* tex, SkImage::CompressionType compression, 368 VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped, 369 const void* data, size_t dataSize); 370 void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect, 371 const SkIPoint& dstPoint); 372 373 bool createVkImageForBackendSurface(VkFormat, 374 SkISize dimensions, 375 int sampleCnt, 376 GrTexturable, 377 GrRenderable, 378 GrMipmapped, 379 GrVkImageInfo*, 380 GrProtected); 381 382 sk_sp<const GrVkInterface> fInterface; 383 sk_sp<GrVkMemoryAllocator> fMemoryAllocator; 384 sk_sp<GrVkCaps> fVkCaps; 385 bool fDeviceIsLost = false; 386 387 VkPhysicalDevice fPhysicalDevice; 388 VkDevice fDevice; 389 VkQueue fQueue; // Must be Graphics queue 390 uint32_t fQueueIndex; 391 392 // Created by GrVkGpu 393 GrVkResourceProvider fResourceProvider; 394 GrStagingBufferManager fStagingBufferManager; 395 396 GrVkMSAALoadManager fMSAALoadManager; 397 398 GrVkCommandPool* fMainCmdPool; 399 // just a raw pointer; object's lifespan is managed by fCmdPool 400 GrVkPrimaryCommandBuffer* fMainCmdBuffer; 401 402 SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; 403 SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; 404 405 SkTArray<std::unique_ptr<SkDrawable::GpuDrawHandler>> fDrawables; 406 407 VkPhysicalDeviceProperties fPhysDevProps; 408 VkPhysicalDeviceMemoryProperties fPhysDevMemProps; 409 410 // We need a bool to track whether or not we've already disconnected all the gpu resources from 411 // vulkan context. 412 bool fDisconnected; 413 414 GrProtected fProtectedContext; 415 416 std::unique_ptr<GrVkOpsRenderPass> fCachedOpsRenderPass; 417 418 using INHERITED = GrGpu; 419 }; 420 421 #endif 422