1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrVkGpu_DEFINED 9 #define GrVkGpu_DEFINED 10 11 #include "include/gpu/vk/GrVkBackendContext.h" 12 #include "include/gpu/vk/GrVkTypes.h" 13 #include "src/gpu/GrGpu.h" 14 #include "src/gpu/GrStagingBufferManager.h" 15 #include "src/gpu/vk/GrVkCaps.h" 16 #include "src/gpu/vk/GrVkMSAALoadManager.h" 17 #include "src/gpu/vk/GrVkMemory.h" 18 #include "src/gpu/vk/GrVkResourceProvider.h" 19 #include "src/gpu/vk/GrVkSemaphore.h" 20 #include "src/gpu/vk/GrVkUtil.h" 21 22 class GrDirectContext; 23 class GrPipeline; 24 25 class GrVkBuffer; 26 class GrVkCommandPool; 27 class GrVkFramebuffer; 28 class GrVkMemoryAllocator; 29 class GrVkPipeline; 30 class GrVkPipelineState; 31 class GrVkPrimaryCommandBuffer; 32 class GrVkOpsRenderPass; 33 class GrVkRenderPass; 34 class GrVkSecondaryCommandBuffer; 35 class GrVkTexture; 36 struct GrVkInterface; 37 38 class GrVkGpu : public GrGpu { 39 public: 40 static sk_sp<GrGpu> Make(const GrVkBackendContext&, const GrContextOptions&, GrDirectContext*); 41 42 ~GrVkGpu() override; 43 44 void disconnect(DisconnectType) override; disconnected()45 bool disconnected() const { return fDisconnected; } 46 releaseUnlockedBackendObjects()47 void releaseUnlockedBackendObjects() override { 48 fResourceProvider.releaseUnlockedBackendObjects(); 49 } 50 51 GrThreadSafePipelineBuilder* pipelineBuilder() override; 52 sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override; 53 vkInterface()54 const GrVkInterface* vkInterface() const { return fInterface.get(); } vkCaps()55 const GrVkCaps& vkCaps() const { return *fVkCaps; } 56 stagingBufferManager()57 GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; } 58 void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override; 59 isDeviceLost()60 bool isDeviceLost() const override { return fDeviceIsLost; } 61 memoryAllocator()62 GrVkMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); } 63 physicalDevice()64 VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; } device()65 VkDevice device() const { return fDevice; } queue()66 VkQueue queue() const { return fQueue; } queueIndex()67 uint32_t queueIndex() const { return fQueueIndex; } cmdPool()68 GrVkCommandPool* cmdPool() const { return fMainCmdPool; } physicalDeviceProperties()69 const VkPhysicalDeviceProperties& physicalDeviceProperties() const { 70 return fPhysDevProps; 71 } physicalDeviceMemoryProperties()72 const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const { 73 return fPhysDevMemProps; 74 } protectedContext()75 bool protectedContext() const { return fProtectedContext == GrProtected::kYes; } 76 resourceProvider()77 GrVkResourceProvider& resourceProvider() { return fResourceProvider; } 78 currentCommandBuffer()79 GrVkPrimaryCommandBuffer* currentCommandBuffer() const { return fMainCmdBuffer; } 80 81 void xferBarrier(GrRenderTarget*, GrXferBarrierType) override; 82 83 bool setBackendTextureState(const GrBackendTexture&, 84 const GrBackendSurfaceMutableState&, 85 GrBackendSurfaceMutableState* previousState, 86 sk_sp<GrRefCntedCallback> finishedCallback) override; 87 88 bool setBackendRenderTargetState(const GrBackendRenderTarget&, 89 const GrBackendSurfaceMutableState&, 90 GrBackendSurfaceMutableState* previousState, 91 sk_sp<GrRefCntedCallback> finishedCallback) override; 92 93 void deleteBackendTexture(const GrBackendTexture&) override; 94 95 bool compile(const GrProgramDesc&, const GrProgramInfo&) override; 96 97 #if GR_TEST_UTILS 98 bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override; 99 100 GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions, 101 GrColorType, 102 int sampleCnt, 103 GrProtected) override; 104 void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override; 105 resetShaderCacheForTesting()106 void resetShaderCacheForTesting() const override { 107 fResourceProvider.resetShaderCacheForTesting(); 108 } 109 #endif 110 111 sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/, 112 SkISize dimensions, int numStencilSamples) override; 113 getPreferredStencilFormat(const GrBackendFormat &)114 GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override { 115 return GrBackendFormat::MakeVk(this->vkCaps().preferredStencilFormat()); 116 } 117 118 sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions, 119 const GrBackendFormat& format, 120 int numSamples, 121 GrProtected isProtected) override; 122 123 void addBufferMemoryBarrier(const GrManagedResource*, 124 VkPipelineStageFlags srcStageMask, 125 VkPipelineStageFlags dstStageMask, 126 bool byRegion, 127 VkBufferMemoryBarrier* barrier) const; 128 void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, 129 VkPipelineStageFlags dstStageMask, 130 bool byRegion, 131 VkBufferMemoryBarrier* barrier) const; 132 void addImageMemoryBarrier(const GrManagedResource*, 133 VkPipelineStageFlags srcStageMask, 134 VkPipelineStageFlags dstStageMask, 135 bool byRegion, 136 VkImageMemoryBarrier* barrier) const; 137 138 bool loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer, 139 const GrVkRenderPass& renderPass, 140 GrAttachment* dst, 141 GrVkAttachment* src, 142 const SkIRect& srcRect); 143 144 bool onRegenerateMipMapLevels(GrTexture* tex) override; 145 146 void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) override; 147 148 void submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer>); 149 150 void submit(GrOpsRenderPass*) override; 151 152 GrFence SK_WARN_UNUSED_RESULT insertFence() override; 153 bool waitFence(GrFence) override; 154 void deleteFence(GrFence) const override; 155 156 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override; 157 std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore, 158 GrResourceProvider::SemaphoreWrapType wrapType, GrWrapOwnership ownership) override; 159 void insertSemaphore(GrSemaphore* semaphore) override; 160 void waitSemaphore(GrSemaphore* semaphore) override; 161 162 // These match the definitions in SkDrawable, from whence they came 163 typedef void* SubmitContext; 164 typedef void (*SubmitProc)(SubmitContext submitContext); 165 166 // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary 167 // command buffer to the gpu. 168 void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable); 169 checkFinishProcs()170 void checkFinishProcs() override { fResourceProvider.checkCommandBuffers(); } 171 void finishOutstandingGpuWork() override; 172 173 std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override; 174 175 void copyBuffer(sk_sp<GrGpuBuffer> srcBuffer, sk_sp<GrGpuBuffer> dstBuffer, 176 VkDeviceSize srcOffset, VkDeviceSize dstOffset, VkDeviceSize size); 177 bool updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src, VkDeviceSize offset, 178 VkDeviceSize size); 179 180 enum PersistentCacheKeyType : uint32_t { 181 kShader_PersistentCacheKeyType = 0, 182 kPipelineCache_PersistentCacheKeyType = 1, 183 }; 184 185 void storeVkPipelineCacheData() override; 186 187 bool beginRenderPass(const GrVkRenderPass*, 188 sk_sp<const GrVkFramebuffer>, 189 const VkClearValue* colorClear, 190 const GrSurface*, 191 const SkIRect& renderPassBounds, 192 bool forSecondaryCB); 193 void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds); 194 195 // Returns true if VkResult indicates success and also checks for device lost or OOM. Every 196 // Vulkan call (and GrVkMemoryAllocator call that returns VkResult) made on behalf of the 197 // GrVkGpu should be processed by this function so that we respond to OOMs and lost devices. 198 bool checkVkResult(VkResult); 199 200 private: 201 enum SyncQueue { 202 kForce_SyncQueue, 203 kSkip_SyncQueue 204 }; 205 206 GrVkGpu(GrDirectContext*, const GrVkBackendContext&, const sk_sp<GrVkCaps> caps, 207 sk_sp<const GrVkInterface>, uint32_t instanceVersion, uint32_t physicalDeviceVersion, 208 sk_sp<GrVkMemoryAllocator>); 209 210 void destroyResources(); 211 212 GrBackendTexture onCreateBackendTexture(SkISize dimensions, 213 const GrBackendFormat&, 214 GrRenderable, 215 GrMipmapped, 216 GrProtected) override; 217 GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, 218 const GrBackendFormat&, 219 GrMipmapped, 220 GrProtected) override; 221 222 bool onClearBackendTexture(const GrBackendTexture&, 223 sk_sp<GrRefCntedCallback> finishedCallback, 224 std::array<float, 4> color) override; 225 226 bool onUpdateCompressedBackendTexture(const GrBackendTexture&, 227 sk_sp<GrRefCntedCallback> finishedCallback, 228 const void* data, 229 size_t length) override; 230 231 bool setBackendSurfaceState(GrVkImageInfo info, 232 sk_sp<GrBackendSurfaceMutableStateImpl> currentState, 233 SkISize dimensions, 234 const GrVkSharedImageInfo& newInfo, 235 GrBackendSurfaceMutableState* previousState, 236 sk_sp<GrRefCntedCallback> finishedCallback); 237 238 sk_sp<GrTexture> onCreateTexture(SkISize, 239 const GrBackendFormat&, 240 GrRenderable, 241 int renderTargetSampleCnt, 242 SkBudgeted, 243 GrProtected, 244 int mipLevelCount, 245 uint32_t levelClearMask) override; 246 sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions, 247 const GrBackendFormat&, 248 SkBudgeted, 249 GrMipmapped, 250 GrProtected, 251 const void* data, size_t dataSize) override; 252 253 sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, 254 GrWrapOwnership, 255 GrWrapCacheable, 256 GrIOType) override; 257 sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&, 258 GrWrapOwnership, 259 GrWrapCacheable) override; 260 sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, 261 int sampleCnt, 262 GrWrapOwnership, 263 GrWrapCacheable) override; 264 sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override; 265 266 sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, 267 const GrVkDrawableInfo&) override; 268 269 sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern, 270 const void* data) override; 271 272 bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, 273 GrColorType surfaceColorType, GrColorType dstColorType, void* buffer, 274 size_t rowBytes) override; 275 276 bool onWritePixels(GrSurface* surface, int left, int top, int width, int height, 277 GrColorType surfaceColorType, GrColorType srcColorType, 278 const GrMipLevel texels[], int mipLevelCount, 279 bool prepForTexSampling) override; 280 281 bool onTransferPixelsTo(GrTexture*, int left, int top, int width, int height, 282 GrColorType textureColorType, GrColorType bufferColorType, 283 sk_sp<GrGpuBuffer> transferBuffer, size_t offset, 284 size_t rowBytes) override; 285 bool onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height, 286 GrColorType surfaceColorType, GrColorType bufferColorType, 287 sk_sp<GrGpuBuffer> transferBuffer, size_t offset) override; 288 289 bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 290 const SkIPoint& dstPoint) override; 291 292 void addFinishedProc(GrGpuFinishedProc finishedProc, 293 GrGpuFinishedContext finishedContext) override; 294 295 void addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback); 296 297 GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*, 298 bool useMSAASurface, 299 GrAttachment*, 300 GrSurfaceOrigin, 301 const SkIRect&, 302 const GrOpsRenderPass::LoadAndStoreInfo&, 303 const GrOpsRenderPass::StencilLoadAndStoreInfo&, 304 const SkTArray<GrSurfaceProxy*, true>& sampledProxies, 305 GrXferBarrierFlags renderPassXferBarriers) override; 306 307 void prepareSurfacesForBackendAccessAndStateUpdates( 308 SkSpan<GrSurfaceProxy*> proxies, 309 SkSurface::BackendSurfaceAccess access, 310 const GrBackendSurfaceMutableState* newState) override; 311 312 bool onSubmitToGpu(bool syncCpu) override; 313 314 void onReportSubmitHistograms() override; 315 316 // Ends and submits the current command buffer to the queue and then creates a new command 317 // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all 318 // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in 319 // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command 320 // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those 321 // wait semaphores to the submission of this command buffer. 322 bool submitCommandBuffer(SyncQueue sync); 323 324 void copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage* dstImage, 325 GrVkImage* srcImage, const SkIRect& srcRect, 326 const SkIPoint& dstPoint); 327 328 void copySurfaceAsBlit(GrSurface* dst, GrSurface* src, GrVkImage* dstImage, GrVkImage* srcImage, 329 const SkIRect& srcRect, const SkIPoint& dstPoint); 330 331 void copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 332 const SkIPoint& dstPoint); 333 334 // helpers for onCreateTexture and writeTexturePixels 335 bool uploadTexDataLinear(GrVkAttachment* tex, int left, int top, int width, int height, 336 GrColorType colorType, const void* data, size_t rowBytes); 337 bool uploadTexDataOptimal(GrVkAttachment* tex, int left, int top, int width, int height, 338 GrColorType colorType, const GrMipLevel texels[], int mipLevelCount); 339 bool uploadTexDataCompressed(GrVkAttachment* tex, SkImage::CompressionType compression, 340 VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped, 341 const void* data, size_t dataSize); 342 void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect, 343 const SkIPoint& dstPoint); 344 345 bool createVkImageForBackendSurface(VkFormat, 346 SkISize dimensions, 347 int sampleCnt, 348 GrTexturable, 349 GrRenderable, 350 GrMipmapped, 351 GrVkImageInfo*, 352 GrProtected); 353 354 sk_sp<const GrVkInterface> fInterface; 355 sk_sp<GrVkMemoryAllocator> fMemoryAllocator; 356 sk_sp<GrVkCaps> fVkCaps; 357 bool fDeviceIsLost = false; 358 359 VkPhysicalDevice fPhysicalDevice; 360 VkDevice fDevice; 361 VkQueue fQueue; // Must be Graphics queue 362 uint32_t fQueueIndex; 363 364 // Created by GrVkGpu 365 GrVkResourceProvider fResourceProvider; 366 GrStagingBufferManager fStagingBufferManager; 367 368 GrVkMSAALoadManager fMSAALoadManager; 369 370 GrVkCommandPool* fMainCmdPool; 371 // just a raw pointer; object's lifespan is managed by fCmdPool 372 GrVkPrimaryCommandBuffer* fMainCmdBuffer; 373 374 SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; 375 SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; 376 377 SkTArray<std::unique_ptr<SkDrawable::GpuDrawHandler>> fDrawables; 378 379 VkPhysicalDeviceProperties fPhysDevProps; 380 VkPhysicalDeviceMemoryProperties fPhysDevMemProps; 381 382 // We need a bool to track whether or not we've already disconnected all the gpu resources from 383 // vulkan context. 384 bool fDisconnected; 385 386 GrProtected fProtectedContext; 387 388 std::unique_ptr<GrVkOpsRenderPass> fCachedOpsRenderPass; 389 390 using INHERITED = GrGpu; 391 }; 392 393 #endif 394