• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #ifndef GrVkResourceProvider_DEFINED
9 #define GrVkResourceProvider_DEFINED
10 
11 #include "include/gpu/vk/GrVkTypes.h"
12 #include "include/private/base/SkMutex.h"
13 #include "include/private/base/SkTArray.h"
14 #include "src/base/SkTInternalLList.h"
15 #include "src/core/SkChecksum.h"
16 #include "src/core/SkLRUCache.h"
17 #include "src/core/SkTDynamicHash.h"
18 #include "src/gpu/ganesh/GrGpu.h"
19 #include "src/gpu/ganesh/GrManagedResource.h"
20 #include "src/gpu/ganesh/GrProgramDesc.h"
21 #include "src/gpu/ganesh/GrResourceHandle.h"
22 #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h"
23 #include "src/gpu/ganesh/vk/GrVkDescriptorPool.h"
24 #include "src/gpu/ganesh/vk/GrVkDescriptorSetManager.h"
25 #include "src/gpu/ganesh/vk/GrVkPipelineStateBuilder.h"
26 #include "src/gpu/ganesh/vk/GrVkRenderPass.h"
27 #include "src/gpu/ganesh/vk/GrVkSampler.h"
28 #include "src/gpu/ganesh/vk/GrVkSamplerYcbcrConversion.h"
29 #include "src/gpu/ganesh/vk/GrVkUtil.h"
30 
31 class GrVkCommandPool;
32 class GrVkGpu;
33 class GrVkPipeline;
34 class GrVkPipelineState;
35 class GrVkPrimaryCommandBuffer;
36 class GrVkRenderTarget;
37 class GrVkSecondaryCommandBuffer;
38 class GrVkUniformHandler;
39 
40 class GrVkResourceProvider {
41 public:
42     GrVkResourceProvider(GrVkGpu* gpu);
43     ~GrVkResourceProvider();
44 
pipelineStateCache()45     GrThreadSafePipelineBuilder* pipelineStateCache() {
46         return fPipelineStateCache.get();
47     }
48 
refPipelineStateCache()49     sk_sp<GrThreadSafePipelineBuilder> refPipelineStateCache() {
50         return fPipelineStateCache;
51     }
52 
53     // Set up any initial vk objects
54     void init();
55 
56     sk_sp<const GrVkPipeline> makePipeline(const GrProgramInfo&,
57                                            VkPipelineShaderStageCreateInfo* shaderStageInfo,
58                                            int shaderStageCount,
59                                            VkRenderPass compatibleRenderPass,
60                                            VkPipelineLayout layout,
61                                            uint32_t subpass);
62 
63     GR_DEFINE_RESOURCE_HANDLE_CLASS(CompatibleRPHandle)
64 
65     using SelfDependencyFlags = GrVkRenderPass::SelfDependencyFlags;
66     using LoadFromResolve = GrVkRenderPass::LoadFromResolve;
67 
68     // Finds or creates a simple render pass that matches the target, increments the refcount,
69     // and returns. The caller can optionally pass in a pointer to a CompatibleRPHandle. If this is
70     // non null it will be set to a handle that can be used in the furutre to quickly return a
71     // compatible GrVkRenderPasses without the need inspecting a GrVkRenderTarget.
72     const GrVkRenderPass* findCompatibleRenderPass(GrVkRenderTarget* target,
73                                                    CompatibleRPHandle* compatibleHandle,
74                                                    bool withResolve,
75                                                    bool withStencil,
76                                                    SelfDependencyFlags selfDepFlags,
77                                                    LoadFromResolve);
78     const GrVkRenderPass* findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor*,
79                                                    GrVkRenderPass::AttachmentFlags,
80                                                    SelfDependencyFlags selfDepFlags,
81                                                    LoadFromResolve,
82                                                    CompatibleRPHandle* compatibleHandle = nullptr);
83 
84     const GrVkRenderPass* findCompatibleExternalRenderPass(VkRenderPass,
85                                                            uint32_t colorAttachmentIndex);
86 
87 
88     // Finds or creates a render pass that matches the target and LoadStoreOps, increments the
89     // refcount, and returns. The caller can optionally pass in a pointer to a CompatibleRPHandle.
90     // If this is non null it will be set to a handle that can be used in the future to quickly
91     // return a GrVkRenderPass without the need to inspect a GrVkRenderTarget.
92     // TODO: sk_sp?
93     const GrVkRenderPass* findRenderPass(GrVkRenderTarget* target,
94                                          const GrVkRenderPass::LoadStoreOps& colorOps,
95                                          const GrVkRenderPass::LoadStoreOps& resolveOps,
96                                          const GrVkRenderPass::LoadStoreOps& stencilOps,
97                                          CompatibleRPHandle* compatibleHandle,
98                                          bool withResolve,
99                                          bool withStencil,
100                                          SelfDependencyFlags selfDepFlags,
101                                          LoadFromResolve);
102 
103     // The CompatibleRPHandle must be a valid handle previously set by a call to findRenderPass or
104     // findCompatibleRenderPass.
105     const GrVkRenderPass* findRenderPass(const CompatibleRPHandle& compatibleHandle,
106                                          const GrVkRenderPass::LoadStoreOps& colorOps,
107                                          const GrVkRenderPass::LoadStoreOps& resolveOps,
108                                          const GrVkRenderPass::LoadStoreOps& stencilOps);
109 
110     GrVkCommandPool* findOrCreateCommandPool();
111 
112     void checkCommandBuffers();
113 
114     void forceSyncAllCommandBuffers();
115 
116     // We must add the finishedProc to all active command buffers since we may have flushed work
117     // that the client cares about before they explicitly called flush and the GPU may reorder
118     // command execution. So we make sure all previously submitted work finishes before we call the
119     // finishedProc.
120     void addFinishedProcToActiveCommandBuffers(sk_sp<skgpu::RefCntedCallback> finishedCallback);
121 
122     // Finds or creates a compatible GrVkDescriptorPool for the requested type and count.
123     // The refcount is incremented and a pointer returned.
124     // TODO: Currently this will just create a descriptor pool without holding onto a ref itself
125     //       so we currently do not reuse them. Rquires knowing if another draw is currently using
126     //       the GrVkDescriptorPool, the ability to reset pools, and the ability to purge pools out
127     //       of our cache of GrVkDescriptorPools.
128     GrVkDescriptorPool* findOrCreateCompatibleDescriptorPool(VkDescriptorType type, uint32_t count);
129 
130     // Finds or creates a compatible GrVkSampler based on the GrSamplerState and
131     // GrVkYcbcrConversionInfo. The refcount is incremented and a pointer returned.
132     GrVkSampler* findOrCreateCompatibleSampler(GrSamplerState,
133                                                const GrVkYcbcrConversionInfo& ycbcrInfo);
134 
135     // Finds or creates a compatible GrVkSamplerYcbcrConversion based on the GrSamplerState and
136     // GrVkYcbcrConversionInfo. The refcount is incremented and a pointer returned.
137     GrVkSamplerYcbcrConversion* findOrCreateCompatibleSamplerYcbcrConversion(
138             const GrVkYcbcrConversionInfo& ycbcrInfo);
139 
140     GrVkPipelineState* findOrCreateCompatiblePipelineState(
141             GrRenderTarget*,
142             const GrProgramInfo&,
143             VkRenderPass compatibleRenderPass,
144             bool overrideSubpassForResolveLoad);
145 
146     GrVkPipelineState* findOrCreateCompatiblePipelineState(
147             const GrProgramDesc&,
148             const GrProgramInfo&,
149             VkRenderPass compatibleRenderPass,
150             GrThreadSafePipelineBuilder::Stats::ProgramCacheResult* stat);
151 
152     sk_sp<const GrVkPipeline> findOrCreateMSAALoadPipeline(
153             const GrVkRenderPass& renderPass,
154             int numSamples,
155             VkPipelineShaderStageCreateInfo*,
156             VkPipelineLayout);
157 
158     void getSamplerDescriptorSetHandle(VkDescriptorType type,
159                                        const GrVkUniformHandler&,
160                                        GrVkDescriptorSetManager::Handle* handle);
161 
162     // This is a convenience function to return a descriptor set for zero sammples. When making a
163     // VkPipelineLayout we must pass in an array of valid descriptor set handles. However, we have
164     // set up our system to have the descriptor sets be in the order uniform, sampler, input. So
165     // if we have a uniform and input we will need to have a valid handle for the sampler as well.
166     // When using the GrVkMSAALoadManager this is the case, but we also don't have a
167     // GrVkUniformHandler to pass into the more general function. Thus we use this call instead.
168     void getZeroSamplerDescriptorSetHandle(GrVkDescriptorSetManager::Handle* handle);
169 
170     // Returns the compatible VkDescriptorSetLayout to use for uniform buffers. The caller does not
171     // own the VkDescriptorSetLayout and thus should not delete it. This function should be used
172     // when the caller needs the layout to create a VkPipelineLayout.
173     VkDescriptorSetLayout getUniformDSLayout() const;
174 
175     // Returns the compatible VkDescriptorSetLayout to use for input attachments. The caller does
176     // not own the VkDescriptorSetLayout and thus should not delete it. This function should be used
177     // when the caller needs the layout to create a VkPipelineLayout.
178     VkDescriptorSetLayout getInputDSLayout() const;
179 
180     // Returns the compatible VkDescriptorSetLayout to use for a specific sampler handle. The caller
181     // does not own the VkDescriptorSetLayout and thus should not delete it. This function should be
182     // used when the caller needs the layout to create a VkPipelineLayout.
183     VkDescriptorSetLayout getSamplerDSLayout(const GrVkDescriptorSetManager::Handle&) const;
184 
185     // Returns a GrVkDescriptorSet that can be used for uniform buffers. The GrVkDescriptorSet
186     // is already reffed for the caller.
187     const GrVkDescriptorSet* getUniformDescriptorSet();
188 
189     // Returns a GrVkDescriptorSet that can be used for sampler descriptors that are compatible with
190     // the GrVkDescriptorSetManager::Handle passed in. The GrVkDescriptorSet is already reffed for
191     // the caller.
192     const GrVkDescriptorSet* getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle&);
193 
194     // Returns a GrVkDescriptorSet that can be used for input attachments. The GrVkDescriptorSet
195     // is already reffed for the caller.
196     const GrVkDescriptorSet* getInputDescriptorSet();
197 
198     // Signals that the descriptor set passed it, which is compatible with the passed in handle,
199     // can be reused by the next allocation request.
200     void recycleDescriptorSet(const GrVkDescriptorSet* descSet,
201                               const GrVkDescriptorSetManager::Handle&);
202 
203     void storePipelineCacheData();
204 
205     // Destroy any cached resources. To be called before destroying the VkDevice.
206     // The assumption is that all queues are idle and all command buffers are finished.
207     // For resource tracing to work properly, this should be called after unrefing all other
208     // resource usages.
209     void destroyResources();
210 
211     // Currently we just release available command pools (which also releases their buffers). The
212     // command buffers and pools take up the most memory. Other objects (e.g. samples,
213     // ycbcr conversions, etc.) tend to be fairly light weight and not worth the effort to remove
214     // them and then possibly remake them. Additionally many of those objects have refs/handles that
215     // are held by other objects that aren't deleted here. Thus the memory wins for removing these
216     // objects from the cache are probably not worth the complexity of safely releasing them.
217     void releaseUnlockedBackendObjects();
218 
219 #if defined(GR_TEST_UTILS)
resetShaderCacheForTesting()220     void resetShaderCacheForTesting() const { fPipelineStateCache->release(); }
221 #endif
222 
223 private:
224     class PipelineStateCache : public GrThreadSafePipelineBuilder {
225     public:
226         PipelineStateCache(GrVkGpu* gpu);
227         ~PipelineStateCache() override;
228 
229         void release();
230         GrVkPipelineState* findOrCreatePipelineState(GrRenderTarget*,
231                                                      const GrProgramInfo&,
232                                                      VkRenderPass compatibleRenderPass,
233                                                      bool overrideSubpassForResolveLoad);
findOrCreatePipelineState(const GrProgramDesc & desc,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,Stats::ProgramCacheResult * stat)234         GrVkPipelineState* findOrCreatePipelineState(const GrProgramDesc& desc,
235                                                      const GrProgramInfo& programInfo,
236                                                      VkRenderPass compatibleRenderPass,
237                                                      Stats::ProgramCacheResult* stat) {
238             return this->findOrCreatePipelineStateImpl(desc, programInfo, compatibleRenderPass,
239                                                        false, stat);
240         }
241 
242     private:
243         struct Entry;
244 
245         GrVkPipelineState* findOrCreatePipelineStateImpl(const GrProgramDesc&,
246                                                          const GrProgramInfo&,
247                                                          VkRenderPass compatibleRenderPass,
248                                                          bool overrideSubpassForResolveLoad,
249                                                          Stats::ProgramCacheResult*);
250 
251         struct DescHash {
operatorDescHash252             uint32_t operator()(const GrProgramDesc& desc) const {
253                 return SkChecksum::Hash32(desc.asKey(), desc.keyLength());
254             }
255         };
256 
257         SkLRUCache<const GrProgramDesc, std::unique_ptr<Entry>, DescHash> fMap;
258 
259         GrVkGpu*                    fGpu;
260     };
261 
262     class CompatibleRenderPassSet {
263     public:
264         // This will always construct the basic load store render pass (all attachments load and
265         // store their data) so that there is at least one compatible VkRenderPass that can be used
266         // with this set.
267         CompatibleRenderPassSet(GrVkRenderPass* renderPass);
268 
269         bool isCompatible(const GrVkRenderPass::AttachmentsDescriptor&,
270                           GrVkRenderPass::AttachmentFlags,
271                           SelfDependencyFlags selfDepFlags,
272                           LoadFromResolve) const;
273 
getCompatibleRenderPass()274         const GrVkRenderPass* getCompatibleRenderPass() const {
275             // The first GrVkRenderpass should always exist since we create the basic load store
276             // render pass on create
277             SkASSERT(fRenderPasses[0]);
278             return fRenderPasses[0];
279         }
280 
281         GrVkRenderPass* getRenderPass(GrVkGpu* gpu,
282                                       const GrVkRenderPass::LoadStoreOps& colorOps,
283                                       const GrVkRenderPass::LoadStoreOps& resolveOps,
284                                       const GrVkRenderPass::LoadStoreOps& stencilOps);
285 
286         void releaseResources();
287 
288     private:
289         skia_private::STArray<4, GrVkRenderPass*> fRenderPasses;
290         int                           fLastReturnedIndex;
291     };
292 
293     VkPipelineCache pipelineCache();
294 
295     GrVkGpu* fGpu;
296 
297     // Central cache for creating pipelines
298     VkPipelineCache fPipelineCache;
299 
300     struct MSAALoadPipeline {
301         sk_sp<const GrVkPipeline> fPipeline;
302         const GrVkRenderPass* fRenderPass;
303     };
304 
305     // Cache of previously created msaa load pipelines
306     skia_private::TArray<MSAALoadPipeline> fMSAALoadPipelines;
307 
308     skia_private::STArray<4, CompatibleRenderPassSet> fRenderPassArray;
309 
310     skia_private::TArray<const GrVkRenderPass*> fExternalRenderPasses;
311 
312     // Array of command pools that we are waiting on
313     skia_private::STArray<4, GrVkCommandPool*, true> fActiveCommandPools;
314 
315     // Array of available command pools that are not in flight
316     skia_private::STArray<4, GrVkCommandPool*, true> fAvailableCommandPools;
317 
318     // Stores GrVkSampler objects that we've already created so we can reuse them across multiple
319     // GrVkPipelineStates
320     SkTDynamicHash<GrVkSampler, GrVkSampler::Key> fSamplers;
321 
322     // Stores GrVkSamplerYcbcrConversion objects that we've already created so we can reuse them.
323     SkTDynamicHash<GrVkSamplerYcbcrConversion, GrVkSamplerYcbcrConversion::Key> fYcbcrConversions;
324 
325     // Cache of GrVkPipelineStates
326     sk_sp<PipelineStateCache> fPipelineStateCache;
327 
328     skia_private::STArray<4, std::unique_ptr<GrVkDescriptorSetManager>> fDescriptorSetManagers;
329 
330     GrVkDescriptorSetManager::Handle fUniformDSHandle;
331     GrVkDescriptorSetManager::Handle fInputDSHandle;
332 };
333 
334 #endif
335