/* * Copyright 2015 Google Inc. * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #ifndef GrVkGpu_DEFINED #define GrVkGpu_DEFINED #include "include/gpu/vk/GrVkBackendContext.h" #include "include/gpu/vk/GrVkTypes.h" #include "src/gpu/GrGpu.h" #include "src/gpu/GrStagingBufferManager.h" #include "src/gpu/vk/GrVkCaps.h" #include "src/gpu/vk/GrVkMSAALoadManager.h" #include "src/gpu/vk/GrVkMemory.h" #include "src/gpu/vk/GrVkResourceProvider.h" #include "src/gpu/vk/GrVkSemaphore.h" #include "src/gpu/vk/GrVkUtil.h" class GrDirectContext; class GrPipeline; class GrVkBuffer; class GrVkCommandPool; class GrVkFramebuffer; class GrVkMemoryAllocator; class GrVkPipeline; class GrVkPipelineState; class GrVkPrimaryCommandBuffer; class GrVkOpsRenderPass; class GrVkRenderPass; class GrVkSecondaryCommandBuffer; class GrVkTexture; struct GrVkInterface; class GrVkGpu : public GrGpu { public: static sk_sp Make(const GrVkBackendContext&, const GrContextOptions&, GrDirectContext*); ~GrVkGpu() override; void disconnect(DisconnectType) override; bool disconnected() const { return fDisconnected; } void releaseUnlockedBackendObjects() override { fResourceProvider.releaseUnlockedBackendObjects(); } GrThreadSafePipelineBuilder* pipelineBuilder() override; sk_sp refPipelineBuilder() override; const GrVkInterface* vkInterface() const { return fInterface.get(); } const GrVkCaps& vkCaps() const { return *fVkCaps; } GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; } void takeOwnershipOfBuffer(sk_sp) override; bool isDeviceLost() const override { return fDeviceIsLost; } GrVkMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); } VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; } VkDevice device() const { return fDevice; } VkQueue queue() const { return fQueue; } uint32_t queueIndex() const { return fQueueIndex; } GrVkCommandPool* cmdPool() const { return fMainCmdPool; } const VkPhysicalDeviceProperties& physicalDeviceProperties() const { return fPhysDevProps; } const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const { return fPhysDevMemProps; } bool protectedContext() const { return fProtectedContext == GrProtected::kYes; } GrVkResourceProvider& resourceProvider() { return fResourceProvider; } GrVkPrimaryCommandBuffer* currentCommandBuffer() const { return fMainCmdBuffer; } void xferBarrier(GrRenderTarget*, GrXferBarrierType) override; bool setBackendTextureState(const GrBackendTexture&, const GrBackendSurfaceMutableState&, GrBackendSurfaceMutableState* previousState, sk_sp finishedCallback) override; bool setBackendRenderTargetState(const GrBackendRenderTarget&, const GrBackendSurfaceMutableState&, GrBackendSurfaceMutableState* previousState, sk_sp finishedCallback) override; void deleteBackendTexture(const GrBackendTexture&) override; bool compile(const GrProgramDesc&, const GrProgramInfo&) override; #if GR_TEST_UTILS bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override; GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions, GrColorType, int sampleCnt, GrProtected) override; void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override; void resetShaderCacheForTesting() const override { fResourceProvider.resetShaderCacheForTesting(); } #endif sk_sp makeStencilAttachment(const GrBackendFormat& /*colorFormat*/, SkISize dimensions, int numStencilSamples) override; GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override { return GrBackendFormat::MakeVk(this->vkCaps().preferredStencilFormat()); } sk_sp makeMSAAAttachment(SkISize dimensions, const GrBackendFormat& format, int numSamples, GrProtected isProtected, GrMemoryless isMemoryless) override; void addBufferMemoryBarrier(const GrManagedResource*, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier* barrier) const; void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier* barrier) const; void addImageMemoryBarrier(const GrManagedResource*, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkImageMemoryBarrier* barrier) const; bool loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer, const GrVkRenderPass& renderPass, GrAttachment* dst, GrVkImage* src, const SkIRect& srcRect); bool onRegenerateMipMapLevels(GrTexture* tex) override; void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) override; void submitSecondaryCommandBuffer(std::unique_ptr); void submit(GrOpsRenderPass*) override; GrFence SK_WARN_UNUSED_RESULT insertFence() override; bool waitFence(GrFence) override; void deleteFence(GrFence) const override; std::unique_ptr SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override; std::unique_ptr wrapBackendSemaphore(const GrBackendSemaphore&, GrSemaphoreWrapType, GrWrapOwnership) override; void insertSemaphore(GrSemaphore* semaphore) override; void waitSemaphore(GrSemaphore* semaphore) override; // These match the definitions in SkDrawable, from whence they came typedef void* SubmitContext; typedef void (*SubmitProc)(SubmitContext submitContext); // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary // command buffer to the gpu. void addDrawable(std::unique_ptr drawable); void checkFinishProcs() override { fResourceProvider.checkCommandBuffers(); } void finishOutstandingGpuWork() override; std::unique_ptr prepareTextureForCrossContextUsage(GrTexture*) override; void copyBuffer(sk_sp srcBuffer, sk_sp dstBuffer, VkDeviceSize srcOffset, VkDeviceSize dstOffset, VkDeviceSize size); bool updateBuffer(sk_sp buffer, const void* src, VkDeviceSize offset, VkDeviceSize size); enum PersistentCacheKeyType : uint32_t { kShader_PersistentCacheKeyType = 0, kPipelineCache_PersistentCacheKeyType = 1, }; void storeVkPipelineCacheData() override; bool beginRenderPass(const GrVkRenderPass*, sk_sp, const VkClearValue* colorClear, const GrSurface*, const SkIRect& renderPassBounds, bool forSecondaryCB); void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds); // Returns true if VkResult indicates success and also checks for device lost or OOM. Every // Vulkan call (and GrVkMemoryAllocator call that returns VkResult) made on behalf of the // GrVkGpu should be processed by this function so that we respond to OOMs and lost devices. bool checkVkResult(VkResult); private: enum SyncQueue { kForce_SyncQueue, kSkip_SyncQueue }; GrVkGpu(GrDirectContext*, const GrVkBackendContext&, const sk_sp caps, sk_sp, uint32_t instanceVersion, uint32_t physicalDeviceVersion, sk_sp); void destroyResources(); GrBackendTexture onCreateBackendTexture(SkISize dimensions, const GrBackendFormat&, GrRenderable, GrMipmapped, GrProtected) override; GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) override; bool onClearBackendTexture(const GrBackendTexture&, sk_sp finishedCallback, std::array color) override; bool onUpdateCompressedBackendTexture(const GrBackendTexture&, sk_sp finishedCallback, const void* data, size_t length) override; bool setBackendSurfaceState(GrVkImageInfo info, sk_sp currentState, SkISize dimensions, const GrVkSharedImageInfo& newInfo, GrBackendSurfaceMutableState* previousState, sk_sp finishedCallback); sk_sp onCreateTexture(SkISize, const GrBackendFormat&, GrRenderable, int renderTargetSampleCnt, SkBudgeted, GrProtected, int mipLevelCount, uint32_t levelClearMask) override; sk_sp onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat&, SkBudgeted, GrMipmapped, GrProtected, const void* data, size_t dataSize) override; sk_sp onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable, GrIOType) override; sk_sp onWrapCompressedBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable) override; sk_sp onWrapRenderableBackendTexture(const GrBackendTexture&, int sampleCnt, GrWrapOwnership, GrWrapCacheable) override; sk_sp onWrapBackendRenderTarget(const GrBackendRenderTarget&) override; sk_sp onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, const GrVkDrawableInfo&) override; sk_sp onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern, const void* data) override; bool onReadPixels(GrSurface*, SkIRect, GrColorType surfaceColorType, GrColorType dstColorType, void* buffer, size_t rowBytes) override; bool onWritePixels(GrSurface*, SkIRect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel[], int mipLevelCount, bool prepForTexSampling) override; bool onTransferPixelsTo(GrTexture*, SkIRect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp, size_t offset, size_t rowBytes) override; bool onTransferPixelsFrom(GrSurface*, SkIRect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp, size_t offset) override; bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) override; void addFinishedProc(GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext) override; void addFinishedCallback(sk_sp finishedCallback); GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*, bool useMSAASurface, GrAttachment* stencil, GrSurfaceOrigin, const SkIRect&, const GrOpsRenderPass::LoadAndStoreInfo&, const GrOpsRenderPass::StencilLoadAndStoreInfo&, const SkTArray& sampledProxies, GrXferBarrierFlags renderPassXferBarriers) override; void prepareSurfacesForBackendAccessAndStateUpdates( SkSpan proxies, SkSurface::BackendSurfaceAccess access, const GrBackendSurfaceMutableState* newState) override; bool onSubmitToGpu(bool syncCpu) override; void onReportSubmitHistograms() override; // Ends and submits the current command buffer to the queue and then creates a new command // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those // wait semaphores to the submission of this command buffer. bool submitCommandBuffer(SyncQueue sync); void copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage* dstImage, GrVkImage* srcImage, const SkIRect& srcRect, const SkIPoint& dstPoint); void copySurfaceAsBlit(GrSurface* dst, GrSurface* src, GrVkImage* dstImage, GrVkImage* srcImage, const SkIRect& srcRect, const SkIPoint& dstPoint); void copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint); // helpers for onCreateTexture and writeTexturePixels bool uploadTexDataLinear(GrVkImage* tex, SkIRect rect, GrColorType colorType, const void* data, size_t rowBytes); bool uploadTexDataOptimal(GrVkImage* tex, SkIRect rect, GrColorType colorType, const GrMipLevel texels[], int mipLevelCount); bool uploadTexDataCompressed(GrVkImage* tex, SkImage::CompressionType compression, VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped, const void* data, size_t dataSize); void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect, const SkIPoint& dstPoint); bool createVkImageForBackendSurface(VkFormat, SkISize dimensions, int sampleCnt, GrTexturable, GrRenderable, GrMipmapped, GrVkImageInfo*, GrProtected); sk_sp fInterface; sk_sp fMemoryAllocator; sk_sp fVkCaps; bool fDeviceIsLost = false; VkPhysicalDevice fPhysicalDevice; VkDevice fDevice; VkQueue fQueue; // Must be Graphics queue uint32_t fQueueIndex; // Created by GrVkGpu GrVkResourceProvider fResourceProvider; GrStagingBufferManager fStagingBufferManager; GrVkMSAALoadManager fMSAALoadManager; GrVkCommandPool* fMainCmdPool; // just a raw pointer; object's lifespan is managed by fCmdPool GrVkPrimaryCommandBuffer* fMainCmdBuffer; SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; SkTArray> fDrawables; VkPhysicalDeviceProperties fPhysDevProps; VkPhysicalDeviceMemoryProperties fPhysDevMemProps; // We need a bool to track whether or not we've already disconnected all the gpu resources from // vulkan context. bool fDisconnected; GrProtected fProtectedContext; std::unique_ptr fCachedOpsRenderPass; using INHERITED = GrGpu; }; #endif