1 /* 2 * Copyright 2016 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrVkResourceProvider_DEFINED 9 #define GrVkResourceProvider_DEFINED 10 11 #include "include/gpu/vk/GrVkTypes.h" 12 #include "include/private/SkTArray.h" 13 #include "src/core/SkLRUCache.h" 14 #include "src/core/SkTDynamicHash.h" 15 #include "src/core/SkTInternalLList.h" 16 #include "src/gpu/GrResourceHandle.h" 17 #include "src/gpu/vk/GrVkDescriptorPool.h" 18 #include "src/gpu/vk/GrVkDescriptorSetManager.h" 19 #include "src/gpu/vk/GrVkPipelineStateBuilder.h" 20 #include "src/gpu/vk/GrVkRenderPass.h" 21 #include "src/gpu/vk/GrVkResource.h" 22 #include "src/gpu/vk/GrVkSampler.h" 23 #include "src/gpu/vk/GrVkSamplerYcbcrConversion.h" 24 #include "src/gpu/vk/GrVkUtil.h" 25 26 #include <mutex> 27 #include <thread> 28 29 class GrVkCommandPool; 30 class GrVkGpu; 31 class GrVkPipeline; 32 class GrVkPipelineState; 33 class GrVkPrimaryCommandBuffer; 34 class GrVkRenderTarget; 35 class GrVkSecondaryCommandBuffer; 36 class GrVkUniformHandler; 37 38 class GrVkResourceProvider { 39 public: 40 GrVkResourceProvider(GrVkGpu* gpu); 41 ~GrVkResourceProvider(); 42 43 // Set up any initial vk objects 44 void init(); 45 46 GrVkPipeline* createPipeline(int numColorSamples, 47 const GrPrimitiveProcessor& primProc, 48 const GrPipeline& pipeline, 49 const GrStencilSettings& stencil, 50 GrSurfaceOrigin, 51 VkPipelineShaderStageCreateInfo* shaderStageInfo, 52 int shaderStageCount, 53 GrPrimitiveType primitiveType, 54 VkRenderPass compatibleRenderPass, 55 VkPipelineLayout layout); 56 57 GR_DEFINE_RESOURCE_HANDLE_CLASS(CompatibleRPHandle); 58 59 // Finds or creates a simple render pass that matches the target, increments the refcount, 60 // and returns. The caller can optionally pass in a pointer to a CompatibleRPHandle. If this is 61 // non null it will be set to a handle that can be used in the furutre to quickly return a 62 // compatible GrVkRenderPasses without the need inspecting a GrVkRenderTarget. 63 const GrVkRenderPass* findCompatibleRenderPass(const GrVkRenderTarget& target, 64 CompatibleRPHandle* compatibleHandle = nullptr); 65 // The CompatibleRPHandle must be a valid handle previously set by a call to 66 // findCompatibleRenderPass(GrVkRenderTarget&, CompatibleRPHandle*). 67 const GrVkRenderPass* findCompatibleRenderPass(const CompatibleRPHandle& compatibleHandle); 68 69 const GrVkRenderPass* findCompatibleExternalRenderPass(VkRenderPass, 70 uint32_t colorAttachmentIndex); 71 72 // Finds or creates a render pass that matches the target and LoadStoreOps, increments the 73 // refcount, and returns. The caller can optionally pass in a pointer to a CompatibleRPHandle. 74 // If this is non null it will be set to a handle that can be used in the furutre to quickly 75 // return a GrVkRenderPasses without the need inspecting a GrVkRenderTarget. 76 const GrVkRenderPass* findRenderPass(const GrVkRenderTarget& target, 77 const GrVkRenderPass::LoadStoreOps& colorOps, 78 const GrVkRenderPass::LoadStoreOps& stencilOps, 79 CompatibleRPHandle* compatibleHandle = nullptr); 80 81 // The CompatibleRPHandle must be a valid handle previously set by a call to findRenderPass or 82 // findCompatibleRenderPass. 83 const GrVkRenderPass* findRenderPass(const CompatibleRPHandle& compatibleHandle, 84 const GrVkRenderPass::LoadStoreOps& colorOps, 85 const GrVkRenderPass::LoadStoreOps& stencilOps); 86 87 GrVkCommandPool* findOrCreateCommandPool(); 88 89 void checkCommandBuffers(); 90 91 // We must add the finishedProc to all active command buffers since we may have flushed work 92 // that the client cares about before they explicitly called flush and the GPU may reorder 93 // command execution. So we make sure all previously submitted work finishes before we call the 94 // finishedProc. 95 void addFinishedProcToActiveCommandBuffers(GrGpuFinishedProc finishedProc, 96 GrGpuFinishedContext finishedContext); 97 98 // Finds or creates a compatible GrVkDescriptorPool for the requested type and count. 99 // The refcount is incremented and a pointer returned. 100 // TODO: Currently this will just create a descriptor pool without holding onto a ref itself 101 // so we currently do not reuse them. Rquires knowing if another draw is currently using 102 // the GrVkDescriptorPool, the ability to reset pools, and the ability to purge pools out 103 // of our cache of GrVkDescriptorPools. 104 GrVkDescriptorPool* findOrCreateCompatibleDescriptorPool(VkDescriptorType type, uint32_t count); 105 106 // Finds or creates a compatible GrVkSampler based on the GrSamplerState and 107 // GrVkYcbcrConversionInfo. The refcount is incremented and a pointer returned. 108 GrVkSampler* findOrCreateCompatibleSampler(const GrSamplerState&, 109 const GrVkYcbcrConversionInfo& ycbcrInfo); 110 111 // Finds or creates a compatible GrVkSamplerYcbcrConversion based on the GrSamplerState and 112 // GrVkYcbcrConversionInfo. The refcount is incremented and a pointer returned. 113 GrVkSamplerYcbcrConversion* findOrCreateCompatibleSamplerYcbcrConversion( 114 const GrVkYcbcrConversionInfo& ycbcrInfo); 115 116 GrVkPipelineState* findOrCreateCompatiblePipelineState( 117 GrRenderTarget*, GrSurfaceOrigin, 118 const GrPipeline&, 119 const GrPrimitiveProcessor&, 120 const GrTextureProxy* const primProcProxies[], 121 GrPrimitiveType, 122 VkRenderPass compatibleRenderPass); 123 124 void getSamplerDescriptorSetHandle(VkDescriptorType type, 125 const GrVkUniformHandler&, 126 GrVkDescriptorSetManager::Handle* handle); 127 void getSamplerDescriptorSetHandle(VkDescriptorType type, 128 const SkTArray<uint32_t>& visibilities, 129 GrVkDescriptorSetManager::Handle* handle); 130 131 // Returns the compatible VkDescriptorSetLayout to use for uniform buffers. The caller does not 132 // own the VkDescriptorSetLayout and thus should not delete it. This function should be used 133 // when the caller needs the layout to create a VkPipelineLayout. 134 VkDescriptorSetLayout getUniformDSLayout() const; 135 136 // Returns the compatible VkDescriptorSetLayout to use for a specific sampler handle. The caller 137 // does not own the VkDescriptorSetLayout and thus should not delete it. This function should be 138 // used when the caller needs the layout to create a VkPipelineLayout. 139 VkDescriptorSetLayout getSamplerDSLayout(const GrVkDescriptorSetManager::Handle&) const; 140 141 // Returns a GrVkDescriptorSet that can be used for uniform buffers. The GrVkDescriptorSet 142 // is already reffed for the caller. 143 const GrVkDescriptorSet* getUniformDescriptorSet(); 144 145 // Returns a GrVkDescriptorSet that can be used for sampler descriptors that are compatible with 146 // the GrVkDescriptorSetManager::Handle passed in. The GrVkDescriptorSet is already reffed for 147 // the caller. 148 const GrVkDescriptorSet* getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle&); 149 150 151 // Signals that the descriptor set passed it, which is compatible with the passed in handle, 152 // can be reused by the next allocation request. 153 void recycleDescriptorSet(const GrVkDescriptorSet* descSet, 154 const GrVkDescriptorSetManager::Handle&); 155 156 // Creates or finds free uniform buffer resources of size GrVkUniformBuffer::kStandardSize. 157 // Anything larger will need to be created and released by the client. 158 const GrVkResource* findOrCreateStandardUniformBufferResource(); 159 160 // Signals that the resource passed to it (which should be a uniform buffer resource) 161 // can be reused by the next uniform buffer resource request. 162 void recycleStandardUniformBufferResource(const GrVkResource*); 163 164 void storePipelineCacheData(); 165 166 // Destroy any cached resources. To be called before destroying the VkDevice. 167 // The assumption is that all queues are idle and all command buffers are finished. 168 // For resource tracing to work properly, this should be called after unrefing all other 169 // resource usages. 170 // If deviceLost is true, then resources will not be checked to see if they've finished 171 // before deleting (see section 4.2.4 of the Vulkan spec). 172 void destroyResources(bool deviceLost); 173 174 // Abandon any cached resources. To be used when the context/VkDevice is lost. 175 // For resource tracing to work properly, this should be called after unrefing all other 176 // resource usages. 177 void abandonResources(); 178 179 void backgroundReset(GrVkCommandPool* pool); 180 181 void reset(GrVkCommandPool* pool); 182 183 #if GR_TEST_UTILS resetShaderCacheForTesting()184 void resetShaderCacheForTesting() const { fPipelineStateCache->release(); } 185 #endif 186 187 private: 188 189 #ifdef SK_DEBUG 190 #define GR_PIPELINE_STATE_CACHE_STATS 191 #endif 192 193 class PipelineStateCache : public ::SkNoncopyable { 194 public: 195 PipelineStateCache(GrVkGpu* gpu); 196 ~PipelineStateCache(); 197 198 void abandon(); 199 void release(); 200 GrVkPipelineState* refPipelineState(GrRenderTarget*, GrSurfaceOrigin, 201 const GrPrimitiveProcessor&, 202 const GrTextureProxy* const primProcProxies[], 203 const GrPipeline&, 204 GrPrimitiveType, 205 VkRenderPass compatibleRenderPass); 206 207 private: 208 enum { 209 // We may actually have kMaxEntries+1 PipelineStates in context because we create a new 210 // PipelineState before evicting from the cache. 211 kMaxEntries = 128, 212 }; 213 214 struct Entry; 215 216 struct DescHash { operatorDescHash217 uint32_t operator()(const GrProgramDesc& desc) const { 218 return SkOpts::hash_fn(desc.asKey(), desc.keyLength(), 0); 219 } 220 }; 221 222 SkLRUCache<const GrVkPipelineStateBuilder::Desc, std::unique_ptr<Entry>, DescHash> fMap; 223 224 GrVkGpu* fGpu; 225 226 #ifdef GR_PIPELINE_STATE_CACHE_STATS 227 int fTotalRequests; 228 int fCacheMisses; 229 #endif 230 }; 231 232 class CompatibleRenderPassSet { 233 public: 234 // This will always construct the basic load store render pass (all attachments load and 235 // store their data) so that there is at least one compatible VkRenderPass that can be used 236 // with this set. 237 CompatibleRenderPassSet(const GrVkGpu* gpu, const GrVkRenderTarget& target); 238 239 bool isCompatible(const GrVkRenderTarget& target) const; 240 getCompatibleRenderPass()241 GrVkRenderPass* getCompatibleRenderPass() const { 242 // The first GrVkRenderpass should always exist since we create the basic load store 243 // render pass on create 244 SkASSERT(fRenderPasses[0]); 245 return fRenderPasses[0]; 246 } 247 248 GrVkRenderPass* getRenderPass(const GrVkGpu* gpu, 249 const GrVkRenderPass::LoadStoreOps& colorOps, 250 const GrVkRenderPass::LoadStoreOps& stencilOps); 251 252 void releaseResources(GrVkGpu* gpu); 253 void abandonResources(); 254 255 private: 256 SkSTArray<4, GrVkRenderPass*> fRenderPasses; 257 int fLastReturnedIndex; 258 }; 259 260 VkPipelineCache pipelineCache(); 261 262 GrVkGpu* fGpu; 263 264 // Central cache for creating pipelines 265 VkPipelineCache fPipelineCache; 266 267 SkSTArray<4, CompatibleRenderPassSet> fRenderPassArray; 268 269 SkTArray<const GrVkRenderPass*> fExternalRenderPasses; 270 271 // Array of command pools that we are waiting on 272 SkSTArray<4, GrVkCommandPool*, true> fActiveCommandPools; 273 274 // Array of available command pools that are not in flight 275 SkSTArray<4, GrVkCommandPool*, true> fAvailableCommandPools; 276 277 // Array of available uniform buffer resources 278 SkSTArray<16, const GrVkResource*, true> fAvailableUniformBufferResources; 279 280 // Stores GrVkSampler objects that we've already created so we can reuse them across multiple 281 // GrVkPipelineStates 282 SkTDynamicHash<GrVkSampler, GrVkSampler::Key> fSamplers; 283 284 // Stores GrVkSamplerYcbcrConversion objects that we've already created so we can reuse them. 285 SkTDynamicHash<GrVkSamplerYcbcrConversion, GrVkSamplerYcbcrConversion::Key> fYcbcrConversions; 286 287 // Cache of GrVkPipelineStates 288 PipelineStateCache* fPipelineStateCache; 289 290 SkSTArray<4, std::unique_ptr<GrVkDescriptorSetManager>> fDescriptorSetManagers; 291 292 GrVkDescriptorSetManager::Handle fUniformDSHandle; 293 294 std::recursive_mutex fBackgroundMutex; 295 }; 296 297 #endif 298