1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrVkGpu_DEFINED 9 #define GrVkGpu_DEFINED 10 11 #include "include/gpu/vk/GrVkBackendContext.h" 12 #include "include/gpu/vk/GrVkTypes.h" 13 #include "src/gpu/GrGpu.h" 14 #include "src/gpu/GrStagingBufferManager.h" 15 #include "src/gpu/vk/GrVkCaps.h" 16 #include "src/gpu/vk/GrVkMSAALoadManager.h" 17 #include "src/gpu/vk/GrVkMemory.h" 18 #include "src/gpu/vk/GrVkMemoryReclaimer.h" 19 #include "src/gpu/vk/GrVkResourceProvider.h" 20 #include "src/gpu/vk/GrVkSemaphore.h" 21 #include "src/gpu/vk/GrVkUtil.h" 22 23 class GrDirectContext; 24 class GrPipeline; 25 26 class GrVkBuffer; 27 class GrVkCommandPool; 28 class GrVkFramebuffer; 29 class GrVkMemoryAllocator; 30 class GrVkPipeline; 31 class GrVkPipelineState; 32 class GrVkPrimaryCommandBuffer; 33 class GrVkOpsRenderPass; 34 class GrVkRenderPass; 35 class GrVkSecondaryCommandBuffer; 36 class GrVkTexture; 37 struct GrVkInterface; 38 39 class GrVkGpu : public GrGpu { 40 public: 41 static sk_sp<GrGpu> Make(const GrVkBackendContext&, const GrContextOptions&, GrDirectContext*); 42 43 ~GrVkGpu() override; 44 45 void disconnect(DisconnectType) override; disconnected()46 bool disconnected() const { return fDisconnected; } 47 releaseUnlockedBackendObjects()48 void releaseUnlockedBackendObjects() override { 49 fResourceProvider.releaseUnlockedBackendObjects(); 50 } 51 52 GrThreadSafePipelineBuilder* pipelineBuilder() override; 53 sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override; 54 vkInterface()55 const GrVkInterface* vkInterface() const { return fInterface.get(); } vkCaps()56 const GrVkCaps& vkCaps() const { return *fVkCaps; } 57 stagingBufferManager()58 GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; } 59 void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override; 60 isDeviceLost()61 bool isDeviceLost() const override { return fDeviceIsLost; } 62 memoryAllocator()63 GrVkMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); } memoryAllocatorCacheImage()64 GrVkMemoryAllocator* memoryAllocatorCacheImage() const { return fMemoryAllocatorCacheImage.get(); } 65 physicalDevice()66 VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; } device()67 VkDevice device() const { return fDevice; } queue()68 VkQueue queue() const { return fQueue; } queueIndex()69 uint32_t queueIndex() const { return fQueueIndex; } cmdPool()70 GrVkCommandPool* cmdPool() const { return fMainCmdPool; } physicalDeviceProperties()71 const VkPhysicalDeviceProperties& physicalDeviceProperties() const { 72 return fPhysDevProps; 73 } physicalDeviceMemoryProperties()74 const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const { 75 return fPhysDevMemProps; 76 } protectedContext()77 bool protectedContext() const { return fProtectedContext == GrProtected::kYes; } 78 resourceProvider()79 GrVkResourceProvider& resourceProvider() { return fResourceProvider; } 80 currentCommandBuffer()81 GrVkPrimaryCommandBuffer* currentCommandBuffer() const { return fMainCmdBuffer; } 82 83 void xferBarrier(GrRenderTarget*, GrXferBarrierType) override; 84 85 bool setBackendTextureState(const GrBackendTexture&, 86 const GrBackendSurfaceMutableState&, 87 GrBackendSurfaceMutableState* previousState, 88 sk_sp<GrRefCntedCallback> finishedCallback) override; 89 90 bool setBackendRenderTargetState(const GrBackendRenderTarget&, 91 const GrBackendSurfaceMutableState&, 92 GrBackendSurfaceMutableState* previousState, 93 sk_sp<GrRefCntedCallback> finishedCallback) override; 94 95 void deleteBackendTexture(const GrBackendTexture&) override; 96 97 bool compile(const GrProgramDesc&, const GrProgramInfo&) override; 98 99 #if GR_TEST_UTILS 100 bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override; 101 102 GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions, 103 GrColorType, 104 int sampleCnt, 105 GrProtected) override; 106 void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override; 107 resetShaderCacheForTesting()108 void resetShaderCacheForTesting() const override { 109 fResourceProvider.resetShaderCacheForTesting(); 110 } 111 #endif 112 113 sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/, 114 SkISize dimensions, int numStencilSamples) override; 115 getPreferredStencilFormat(const GrBackendFormat &)116 GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override { 117 return GrBackendFormat::MakeVk(this->vkCaps().preferredStencilFormat()); 118 } 119 120 sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions, 121 const GrBackendFormat& format, 122 int numSamples, 123 GrProtected isProtected, 124 GrMemoryless isMemoryless) override; 125 126 void addBufferMemoryBarrier(const GrManagedResource*, 127 VkPipelineStageFlags srcStageMask, 128 VkPipelineStageFlags dstStageMask, 129 bool byRegion, 130 VkBufferMemoryBarrier* barrier) const; 131 void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, 132 VkPipelineStageFlags dstStageMask, 133 bool byRegion, 134 VkBufferMemoryBarrier* barrier) const; 135 void addImageMemoryBarrier(const GrManagedResource*, 136 VkPipelineStageFlags srcStageMask, 137 VkPipelineStageFlags dstStageMask, 138 bool byRegion, 139 VkImageMemoryBarrier* barrier) const; 140 141 bool loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer, 142 const GrVkRenderPass& renderPass, 143 GrAttachment* dst, 144 GrVkImage* src, 145 const SkIRect& srcRect); 146 147 bool onRegenerateMipMapLevels(GrTexture* tex) override; 148 149 void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) override; 150 151 void submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer>); 152 153 void submit(GrOpsRenderPass*) override; 154 155 GrFence SK_WARN_UNUSED_RESULT insertFence() override; 156 bool waitFence(GrFence) override; 157 void deleteFence(GrFence) const override; 158 159 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override; 160 std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&, 161 GrSemaphoreWrapType, 162 GrWrapOwnership) override; 163 void insertSemaphore(GrSemaphore* semaphore) override; 164 void waitSemaphore(GrSemaphore* semaphore) override; 165 166 // These match the definitions in SkDrawable, from whence they came 167 typedef void* SubmitContext; 168 typedef void (*SubmitProc)(SubmitContext submitContext); 169 170 // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary 171 // command buffer to the gpu. 172 void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable); 173 checkFinishProcs()174 void checkFinishProcs() override { fResourceProvider.checkCommandBuffers(); } 175 void finishOutstandingGpuWork() override; 176 177 std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override; 178 179 void copyBuffer(sk_sp<GrGpuBuffer> srcBuffer, sk_sp<GrGpuBuffer> dstBuffer, 180 VkDeviceSize srcOffset, VkDeviceSize dstOffset, VkDeviceSize size); 181 bool updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src, VkDeviceSize offset, 182 VkDeviceSize size); 183 184 enum PersistentCacheKeyType : uint32_t { 185 kShader_PersistentCacheKeyType = 0, 186 kPipelineCache_PersistentCacheKeyType = 1, 187 }; 188 189 void storeVkPipelineCacheData() override; 190 191 bool beginRenderPass(const GrVkRenderPass*, 192 sk_sp<const GrVkFramebuffer>, 193 const VkClearValue* colorClear, 194 const GrSurface*, 195 const SkIRect& renderPassBounds, 196 bool forSecondaryCB); 197 void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds); 198 199 // Returns true if VkResult indicates success and also checks for device lost or OOM. Every 200 // Vulkan call (and GrVkMemoryAllocator call that returns VkResult) made on behalf of the 201 // GrVkGpu should be processed by this function so that we respond to OOMs and lost devices. 202 bool checkVkResult(VkResult); 203 204 std::array<int, 2> GetHpsDimension(const SkBlurArg& blurArg) const override; 205 vmaDefragment()206 void vmaDefragment() override { fMemoryAllocatorCacheImage->vmaDefragment(); } 207 void dumpVmaStats(SkString *out) override; 208 209 #ifdef SKIA_DFX_FOR_OHOS 210 void addAllocImageBytes(size_t bytes); 211 void removeAllocImageBytes(size_t bytes); 212 void addAllocBufferBytes(size_t bytes); 213 void removeAllocBufferBytes(size_t bytes); 214 #endif 215 216 // OH ISSUE: asyn memory reclaimer 217 void setGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority) override; 218 void flushGpuMemoryInWaitQueue() override; memoryReclaimer()219 GrVkMemoryReclaimer* memoryReclaimer() const { return fMemoryReclaimer.get(); } 220 221 private: 222 enum SyncQueue { 223 kForce_SyncQueue, 224 kSkip_SyncQueue 225 }; 226 227 GrVkGpu(GrDirectContext*, const GrVkBackendContext&, const sk_sp<GrVkCaps> caps, 228 sk_sp<const GrVkInterface>, uint32_t instanceVersion, uint32_t physicalDeviceVersion, 229 sk_sp<GrVkMemoryAllocator>, sk_sp<GrVkMemoryAllocator>); 230 231 void destroyResources(); 232 233 GrBackendTexture onCreateBackendTexture(SkISize dimensions, 234 const GrBackendFormat&, 235 GrRenderable, 236 GrMipmapped, 237 GrProtected) override; 238 GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, 239 const GrBackendFormat&, 240 GrMipmapped, 241 GrProtected) override; 242 243 bool onClearBackendTexture(const GrBackendTexture&, 244 sk_sp<GrRefCntedCallback> finishedCallback, 245 std::array<float, 4> color) override; 246 247 bool onUpdateCompressedBackendTexture(const GrBackendTexture&, 248 sk_sp<GrRefCntedCallback> finishedCallback, 249 const void* data, 250 size_t length) override; 251 252 bool setBackendSurfaceState(GrVkImageInfo info, 253 sk_sp<GrBackendSurfaceMutableStateImpl> currentState, 254 SkISize dimensions, 255 const GrVkSharedImageInfo& newInfo, 256 GrBackendSurfaceMutableState* previousState, 257 sk_sp<GrRefCntedCallback> finishedCallback); 258 259 sk_sp<GrTexture> onCreateTexture(SkISize, 260 const GrBackendFormat&, 261 GrRenderable, 262 int renderTargetSampleCnt, 263 SkBudgeted, 264 GrProtected, 265 int mipLevelCount, 266 uint32_t levelClearMask) override; 267 sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions, 268 const GrBackendFormat&, 269 SkBudgeted, 270 GrMipmapped, 271 GrProtected, 272 const void* data, size_t dataSize) override; 273 sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions, 274 const GrBackendFormat&, 275 SkBudgeted, 276 GrMipmapped, 277 GrProtected, 278 OH_NativeBuffer* nativeBuffer, 279 size_t bufferSize) override; 280 281 sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, 282 GrWrapOwnership, 283 GrWrapCacheable, 284 GrIOType) override; 285 sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&, 286 GrWrapOwnership, 287 GrWrapCacheable) override; 288 sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, 289 int sampleCnt, 290 GrWrapOwnership, 291 GrWrapCacheable) override; 292 sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override; 293 294 sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, 295 const GrVkDrawableInfo&) override; 296 297 sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern, 298 const void* data) override; 299 300 bool onReadPixels(GrSurface*, 301 SkIRect, 302 GrColorType surfaceColorType, 303 GrColorType dstColorType, 304 void* buffer, 305 size_t rowBytes) override; 306 307 bool onWritePixels(GrSurface*, 308 SkIRect, 309 GrColorType surfaceColorType, 310 GrColorType srcColorType, 311 const GrMipLevel[], 312 int mipLevelCount, 313 bool prepForTexSampling) override; 314 315 bool onTransferPixelsTo(GrTexture*, 316 SkIRect, 317 GrColorType textureColorType, 318 GrColorType bufferColorType, 319 sk_sp<GrGpuBuffer>, 320 size_t offset, 321 size_t rowBytes) override; 322 323 bool onTransferPixelsFrom(GrSurface*, 324 SkIRect, 325 GrColorType surfaceColorType, 326 GrColorType bufferColorType, 327 sk_sp<GrGpuBuffer>, 328 size_t offset) override; 329 330 bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 331 const SkIPoint& dstPoint) override; 332 333 void addFinishedProc(GrGpuFinishedProc finishedProc, 334 GrGpuFinishedContext finishedContext) override; 335 336 void addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback); 337 338 GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*, 339 bool useMSAASurface, 340 GrAttachment* stencil, 341 GrSurfaceOrigin, 342 const SkIRect&, 343 const GrOpsRenderPass::LoadAndStoreInfo&, 344 const GrOpsRenderPass::StencilLoadAndStoreInfo&, 345 const SkTArray<GrSurfaceProxy*, true>& sampledProxies, 346 GrXferBarrierFlags renderPassXferBarriers) override; 347 348 void prepareSurfacesForBackendAccessAndStateUpdates( 349 SkSpan<GrSurfaceProxy*> proxies, 350 SkSurface::BackendSurfaceAccess access, 351 const GrBackendSurfaceMutableState* newState) override; 352 353 bool onSubmitToGpu(bool syncCpu) override; 354 355 void onReportSubmitHistograms() override; 356 357 // Ends and submits the current command buffer to the queue and then creates a new command 358 // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all 359 // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in 360 // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command 361 // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those 362 // wait semaphores to the submission of this command buffer. 363 bool submitCommandBuffer(SyncQueue sync); 364 365 void copySurfaceAsCopyImage(GrSurface* dst, 366 GrSurface* src, 367 GrVkImage* dstImage, 368 GrVkImage* srcImage, 369 const SkIRect& srcRect, 370 const SkIPoint& dstPoint); 371 372 void copySurfaceAsBlit(GrSurface* dst, 373 GrSurface* src, 374 GrVkImage* dstImage, 375 GrVkImage* srcImage, 376 const SkIRect& srcRect, 377 const SkIPoint& dstPoint); 378 379 void copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 380 const SkIPoint& dstPoint); 381 382 // helpers for onCreateTexture and writeTexturePixels 383 bool uploadTexDataLinear(GrVkImage* tex, 384 SkIRect rect, 385 GrColorType colorType, 386 const void* data, 387 size_t rowBytes); 388 bool uploadTexDataOptimal(GrVkImage* tex, 389 SkIRect rect, 390 GrColorType colorType, 391 const GrMipLevel texels[], 392 int mipLevelCount); 393 bool uploadTexDataCompressed(GrVkImage* tex, SkImage::CompressionType compression, 394 VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped, 395 const void* data, size_t dataSize); 396 bool uploadTexDataCompressed(GrVkImage* tex, SkImage::CompressionType compression, 397 VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped, 398 OH_NativeBuffer* nativeBuffer, size_t bufferSize); 399 400 void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect, 401 const SkIPoint& dstPoint); 402 403 bool createVkImageForBackendSurface(VkFormat, 404 SkISize dimensions, 405 int sampleCnt, 406 GrTexturable, 407 GrRenderable, 408 GrMipmapped, 409 GrVkImageInfo*, 410 GrProtected); 411 412 sk_sp<const GrVkInterface> fInterface; 413 sk_sp<GrVkMemoryAllocator> fMemoryAllocator; 414 sk_sp<GrVkMemoryAllocator> fMemoryAllocatorCacheImage; 415 sk_sp<GrVkCaps> fVkCaps; 416 bool fDeviceIsLost = false; 417 418 VkPhysicalDevice fPhysicalDevice; 419 VkDevice fDevice; 420 VkQueue fQueue; // Must be Graphics queue 421 uint32_t fQueueIndex; 422 423 // Created by GrVkGpu 424 GrVkResourceProvider fResourceProvider; 425 GrStagingBufferManager fStagingBufferManager; 426 427 GrVkMSAALoadManager fMSAALoadManager; 428 429 GrVkCommandPool* fMainCmdPool; 430 // just a raw pointer; object's lifespan is managed by fCmdPool 431 GrVkPrimaryCommandBuffer* fMainCmdBuffer; 432 433 SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; 434 SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; 435 436 SkTArray<std::unique_ptr<SkDrawable::GpuDrawHandler>> fDrawables; 437 438 VkPhysicalDeviceProperties fPhysDevProps; 439 VkPhysicalDeviceMemoryProperties fPhysDevMemProps; 440 441 // We need a bool to track whether or not we've already disconnected all the gpu resources from 442 // vulkan context. 443 bool fDisconnected; 444 445 GrProtected fProtectedContext; 446 447 std::unique_ptr<GrVkOpsRenderPass> fCachedOpsRenderPass; 448 449 std::unique_ptr<GrVkMemoryReclaimer> fMemoryReclaimer; 450 451 using INHERITED = GrGpu; 452 453 #ifdef SKIA_OHOS_FOR_OHOS_TRACE 454 const int TRACE_LIMIT_TIME = 500; 455 #endif 456 }; 457 458 #endif 459