1 /* 2 * Copyright 2016 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrVkResourceProvider_DEFINED 9 #define GrVkResourceProvider_DEFINED 10 11 #include "GrResourceHandle.h" 12 #include "GrVkDescriptorPool.h" 13 #include "GrVkDescriptorSetManager.h" 14 #include "GrVkPipelineStateBuilder.h" 15 #include "GrVkRenderPass.h" 16 #include "GrVkResource.h" 17 #include "GrVkSampler.h" 18 #include "GrVkSamplerYcbcrConversion.h" 19 #include "GrVkUtil.h" 20 #include "SkLRUCache.h" 21 #include "SkTArray.h" 22 #include "SkTDynamicHash.h" 23 #include "SkTInternalLList.h" 24 #include "vk/GrVkTypes.h" 25 26 #include <mutex> 27 #include <thread> 28 29 class GrVkCommandPool; 30 class GrVkCopyPipeline; 31 class GrVkGpu; 32 class GrVkPipeline; 33 class GrVkPipelineState; 34 class GrVkPrimaryCommandBuffer; 35 class GrVkRenderTarget; 36 class GrVkSecondaryCommandBuffer; 37 class GrVkUniformHandler; 38 39 class GrVkResourceProvider { 40 public: 41 GrVkResourceProvider(GrVkGpu* gpu); 42 ~GrVkResourceProvider(); 43 44 // Set up any initial vk objects 45 void init(); 46 47 GrVkPipeline* createPipeline(int numColorSamples, 48 const GrPrimitiveProcessor& primProc, 49 const GrPipeline& pipeline, 50 const GrStencilSettings& stencil, 51 VkPipelineShaderStageCreateInfo* shaderStageInfo, 52 int shaderStageCount, 53 GrPrimitiveType primitiveType, 54 VkRenderPass compatibleRenderPass, 55 VkPipelineLayout layout); 56 57 GrVkCopyPipeline* findOrCreateCopyPipeline(const GrVkRenderTarget* dst, 58 VkPipelineShaderStageCreateInfo*, 59 VkPipelineLayout); 60 61 GR_DEFINE_RESOURCE_HANDLE_CLASS(CompatibleRPHandle); 62 63 // Finds or creates a simple render pass that matches the target, increments the refcount, 64 // and returns. The caller can optionally pass in a pointer to a CompatibleRPHandle. If this is 65 // non null it will be set to a handle that can be used in the furutre to quickly return a 66 // compatible GrVkRenderPasses without the need inspecting a GrVkRenderTarget. 67 const GrVkRenderPass* findCompatibleRenderPass(const GrVkRenderTarget& target, 68 CompatibleRPHandle* compatibleHandle = nullptr); 69 // The CompatibleRPHandle must be a valid handle previously set by a call to 70 // findCompatibleRenderPass(GrVkRenderTarget&, CompatibleRPHandle*). 71 const GrVkRenderPass* findCompatibleRenderPass(const CompatibleRPHandle& compatibleHandle); 72 73 const GrVkRenderPass* findCompatibleExternalRenderPass(VkRenderPass, 74 uint32_t colorAttachmentIndex); 75 76 // Finds or creates a render pass that matches the target and LoadStoreOps, increments the 77 // refcount, and returns. The caller can optionally pass in a pointer to a CompatibleRPHandle. 78 // If this is non null it will be set to a handle that can be used in the furutre to quickly 79 // return a GrVkRenderPasses without the need inspecting a GrVkRenderTarget. 80 const GrVkRenderPass* findRenderPass(const GrVkRenderTarget& target, 81 const GrVkRenderPass::LoadStoreOps& colorOps, 82 const GrVkRenderPass::LoadStoreOps& stencilOps, 83 CompatibleRPHandle* compatibleHandle = nullptr); 84 85 // The CompatibleRPHandle must be a valid handle previously set by a call to findRenderPass or 86 // findCompatibleRenderPass. 87 const GrVkRenderPass* findRenderPass(const CompatibleRPHandle& compatibleHandle, 88 const GrVkRenderPass::LoadStoreOps& colorOps, 89 const GrVkRenderPass::LoadStoreOps& stencilOps); 90 91 GrVkCommandPool* findOrCreateCommandPool(); 92 93 void checkCommandBuffers(); 94 95 // We must add the finishedProc to all active command buffers since we may have flushed work 96 // that the client cares about before they explicitly called flush and the GPU may reorder 97 // command execution. So we make sure all previously submitted work finishes before we call the 98 // finishedProc. 99 void addFinishedProcToActiveCommandBuffers(GrGpuFinishedProc finishedProc, 100 GrGpuFinishedContext finishedContext); 101 102 // Finds or creates a compatible GrVkDescriptorPool for the requested type and count. 103 // The refcount is incremented and a pointer returned. 104 // TODO: Currently this will just create a descriptor pool without holding onto a ref itself 105 // so we currently do not reuse them. Rquires knowing if another draw is currently using 106 // the GrVkDescriptorPool, the ability to reset pools, and the ability to purge pools out 107 // of our cache of GrVkDescriptorPools. 108 GrVkDescriptorPool* findOrCreateCompatibleDescriptorPool(VkDescriptorType type, uint32_t count); 109 110 // Finds or creates a compatible GrVkSampler based on the GrSamplerState and 111 // GrVkYcbcrConversionInfo. The refcount is incremented and a pointer returned. 112 GrVkSampler* findOrCreateCompatibleSampler(const GrSamplerState&, 113 const GrVkYcbcrConversionInfo& ycbcrInfo); 114 115 // Finds or creates a compatible GrVkSamplerYcbcrConversion based on the GrSamplerState and 116 // GrVkYcbcrConversionInfo. The refcount is incremented and a pointer returned. 117 GrVkSamplerYcbcrConversion* findOrCreateCompatibleSamplerYcbcrConversion( 118 const GrVkYcbcrConversionInfo& ycbcrInfo); 119 120 GrVkPipelineState* findOrCreateCompatiblePipelineState( 121 GrRenderTarget*, GrSurfaceOrigin, 122 const GrPipeline&, 123 const GrPrimitiveProcessor&, 124 const GrTextureProxy* const primProcProxies[], 125 GrPrimitiveType, 126 VkRenderPass compatibleRenderPass); 127 128 void getSamplerDescriptorSetHandle(VkDescriptorType type, 129 const GrVkUniformHandler&, 130 GrVkDescriptorSetManager::Handle* handle); 131 void getSamplerDescriptorSetHandle(VkDescriptorType type, 132 const SkTArray<uint32_t>& visibilities, 133 GrVkDescriptorSetManager::Handle* handle); 134 135 // Returns the compatible VkDescriptorSetLayout to use for uniform buffers. The caller does not 136 // own the VkDescriptorSetLayout and thus should not delete it. This function should be used 137 // when the caller needs the layout to create a VkPipelineLayout. 138 VkDescriptorSetLayout getUniformDSLayout() const; 139 140 // Returns the compatible VkDescriptorSetLayout to use for a specific sampler handle. The caller 141 // does not own the VkDescriptorSetLayout and thus should not delete it. This function should be 142 // used when the caller needs the layout to create a VkPipelineLayout. 143 VkDescriptorSetLayout getSamplerDSLayout(const GrVkDescriptorSetManager::Handle&) const; 144 145 // Returns a GrVkDescriptorSet that can be used for uniform buffers. The GrVkDescriptorSet 146 // is already reffed for the caller. 147 const GrVkDescriptorSet* getUniformDescriptorSet(); 148 149 // Returns a GrVkDescriptorSet that can be used for sampler descriptors that are compatible with 150 // the GrVkDescriptorSetManager::Handle passed in. The GrVkDescriptorSet is already reffed for 151 // the caller. 152 const GrVkDescriptorSet* getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle&); 153 154 155 // Signals that the descriptor set passed it, which is compatible with the passed in handle, 156 // can be reused by the next allocation request. 157 void recycleDescriptorSet(const GrVkDescriptorSet* descSet, 158 const GrVkDescriptorSetManager::Handle&); 159 160 // Creates or finds free uniform buffer resources of size GrVkUniformBuffer::kStandardSize. 161 // Anything larger will need to be created and released by the client. 162 const GrVkResource* findOrCreateStandardUniformBufferResource(); 163 164 // Signals that the resource passed to it (which should be a uniform buffer resource) 165 // can be reused by the next uniform buffer resource request. 166 void recycleStandardUniformBufferResource(const GrVkResource*); 167 168 void storePipelineCacheData(); 169 170 // Destroy any cached resources. To be called before destroying the VkDevice. 171 // The assumption is that all queues are idle and all command buffers are finished. 172 // For resource tracing to work properly, this should be called after unrefing all other 173 // resource usages. 174 // If deviceLost is true, then resources will not be checked to see if they've finished 175 // before deleting (see section 4.2.4 of the Vulkan spec). 176 void destroyResources(bool deviceLost); 177 178 // Abandon any cached resources. To be used when the context/VkDevice is lost. 179 // For resource tracing to work properly, this should be called after unrefing all other 180 // resource usages. 181 void abandonResources(); 182 183 void backgroundReset(GrVkCommandPool* pool); 184 185 void reset(GrVkCommandPool* pool); 186 187 private: 188 189 #ifdef SK_DEBUG 190 #define GR_PIPELINE_STATE_CACHE_STATS 191 #endif 192 193 class PipelineStateCache : public ::SkNoncopyable { 194 public: 195 PipelineStateCache(GrVkGpu* gpu); 196 ~PipelineStateCache(); 197 198 void abandon(); 199 void release(); 200 GrVkPipelineState* refPipelineState(GrRenderTarget*, GrSurfaceOrigin, 201 const GrPrimitiveProcessor&, 202 const GrTextureProxy* const primProcProxies[], 203 const GrPipeline&, 204 GrPrimitiveType, 205 VkRenderPass compatibleRenderPass); 206 207 private: 208 enum { 209 // We may actually have kMaxEntries+1 PipelineStates in context because we create a new 210 // PipelineState before evicting from the cache. 211 kMaxEntries = 128, 212 }; 213 214 struct Entry; 215 216 struct DescHash { operatorDescHash217 uint32_t operator()(const GrProgramDesc& desc) const { 218 return SkOpts::hash_fn(desc.asKey(), desc.keyLength(), 0); 219 } 220 }; 221 222 SkLRUCache<const GrVkPipelineStateBuilder::Desc, std::unique_ptr<Entry>, DescHash> fMap; 223 224 GrVkGpu* fGpu; 225 226 #ifdef GR_PIPELINE_STATE_CACHE_STATS 227 int fTotalRequests; 228 int fCacheMisses; 229 #endif 230 }; 231 232 class CompatibleRenderPassSet { 233 public: 234 // This will always construct the basic load store render pass (all attachments load and 235 // store their data) so that there is at least one compatible VkRenderPass that can be used 236 // with this set. 237 CompatibleRenderPassSet(const GrVkGpu* gpu, const GrVkRenderTarget& target); 238 239 bool isCompatible(const GrVkRenderTarget& target) const; 240 getCompatibleRenderPass()241 GrVkRenderPass* getCompatibleRenderPass() const { 242 // The first GrVkRenderpass should always exist since we create the basic load store 243 // render pass on create 244 SkASSERT(fRenderPasses[0]); 245 return fRenderPasses[0]; 246 } 247 248 GrVkRenderPass* getRenderPass(const GrVkGpu* gpu, 249 const GrVkRenderPass::LoadStoreOps& colorOps, 250 const GrVkRenderPass::LoadStoreOps& stencilOps); 251 252 void releaseResources(GrVkGpu* gpu); 253 void abandonResources(); 254 255 private: 256 SkSTArray<4, GrVkRenderPass*> fRenderPasses; 257 int fLastReturnedIndex; 258 }; 259 260 VkPipelineCache pipelineCache(); 261 262 GrVkGpu* fGpu; 263 264 // Central cache for creating pipelines 265 VkPipelineCache fPipelineCache; 266 267 // Cache of previously created copy pipelines 268 SkTArray<GrVkCopyPipeline*> fCopyPipelines; 269 270 SkSTArray<4, CompatibleRenderPassSet> fRenderPassArray; 271 272 SkTArray<const GrVkRenderPass*> fExternalRenderPasses; 273 274 // Array of command pools that we are waiting on 275 SkSTArray<4, GrVkCommandPool*, true> fActiveCommandPools; 276 277 // Array of available command pools that are not in flight 278 SkSTArray<4, GrVkCommandPool*, true> fAvailableCommandPools; 279 280 // Array of available uniform buffer resources 281 SkSTArray<16, const GrVkResource*, true> fAvailableUniformBufferResources; 282 283 // Stores GrVkSampler objects that we've already created so we can reuse them across multiple 284 // GrVkPipelineStates 285 SkTDynamicHash<GrVkSampler, GrVkSampler::Key> fSamplers; 286 287 // Stores GrVkSamplerYcbcrConversion objects that we've already created so we can reuse them. 288 SkTDynamicHash<GrVkSamplerYcbcrConversion, GrVkSamplerYcbcrConversion::Key> fYcbcrConversions; 289 290 // Cache of GrVkPipelineStates 291 PipelineStateCache* fPipelineStateCache; 292 293 SkSTArray<4, std::unique_ptr<GrVkDescriptorSetManager>> fDescriptorSetManagers; 294 295 GrVkDescriptorSetManager::Handle fUniformDSHandle; 296 297 std::recursive_mutex fBackgroundMutex; 298 }; 299 300 #endif 301