• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #include "src/gpu/vk/GrVkResourceProvider.h"
9 
10 #include "include/gpu/GrDirectContext.h"
11 #include "src/core/SkTaskGroup.h"
12 #include "src/core/SkTraceEvent.h"
13 #include "src/gpu/GrDirectContextPriv.h"
14 #include "src/gpu/GrSamplerState.h"
15 #include "src/gpu/GrStencilSettings.h"
16 #include "src/gpu/vk/GrVkCommandBuffer.h"
17 #include "src/gpu/vk/GrVkCommandPool.h"
18 #include "src/gpu/vk/GrVkGpu.h"
19 #include "src/gpu/vk/GrVkPipeline.h"
20 #include "src/gpu/vk/GrVkRenderTarget.h"
21 #include "src/gpu/vk/GrVkUtil.h"
22 
GrVkResourceProvider(GrVkGpu * gpu)23 GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
24     : fGpu(gpu)
25     , fPipelineCache(VK_NULL_HANDLE)
26     , fPipelineCacheSize(0) {
27     fPipelineStateCache = sk_make_sp<PipelineStateCache>(gpu);
28 }
29 
~GrVkResourceProvider()30 GrVkResourceProvider::~GrVkResourceProvider() {
31     SkASSERT(0 == fRenderPassArray.count());
32     SkASSERT(0 == fExternalRenderPasses.count());
33     SkASSERT(0 == fMSAALoadPipelines.count());
34     SkASSERT(VK_NULL_HANDLE == fPipelineCache);
35 }
36 
pipelineCache()37 VkPipelineCache GrVkResourceProvider::pipelineCache() {
38     if (fPipelineCache == VK_NULL_HANDLE) {
39         VkPipelineCacheCreateInfo createInfo;
40         memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
41         createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
42         createInfo.pNext = nullptr;
43         createInfo.flags = 0;
44 
45         auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
46         sk_sp<SkData> cached;
47         if (persistentCache) {
48             uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
49             sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
50             cached = persistentCache->load(*keyData);
51         }
52         bool usedCached = false;
53         if (cached) {
54             uint32_t* cacheHeader = (uint32_t*)cached->data();
55             if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
56                 // For version one of the header, the total header size is 16 bytes plus
57                 // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
58                 // the breakdown of these bytes.
59                 SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
60                 const VkPhysicalDeviceProperties& devProps = fGpu->physicalDeviceProperties();
61                 const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
62                 if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
63                     !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
64                     createInfo.initialDataSize = cached->size();
65                     createInfo.pInitialData = cached->data();
66                     usedCached = true;
67                 }
68             }
69         }
70         if (!usedCached) {
71             createInfo.initialDataSize = 0;
72             createInfo.pInitialData = nullptr;
73         }
74 
75         VkResult result;
76         GR_VK_CALL_RESULT(fGpu, result, CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
77                                                             &fPipelineCache));
78         if (VK_SUCCESS != result) {
79             fPipelineCache = VK_NULL_HANDLE;
80         }
81     }
82     return fPipelineCache;
83 }
84 
init()85 void GrVkResourceProvider::init() {
86     // Init uniform descriptor objects
87     GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
88     fDescriptorSetManagers.emplace_back(dsm);
89     SkASSERT(1 == fDescriptorSetManagers.count());
90     fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
91     dsm = GrVkDescriptorSetManager::CreateInputManager(fGpu);
92     fDescriptorSetManagers.emplace_back(dsm);
93     SkASSERT(2 == fDescriptorSetManagers.count());
94     fInputDSHandle = GrVkDescriptorSetManager::Handle(1);
95 }
96 
makePipeline(const GrProgramInfo & programInfo,VkPipelineShaderStageCreateInfo * shaderStageInfo,int shaderStageCount,VkRenderPass compatibleRenderPass,VkPipelineLayout layout,uint32_t subpass)97 sk_sp<const GrVkPipeline> GrVkResourceProvider::makePipeline(
98         const GrProgramInfo& programInfo,
99         VkPipelineShaderStageCreateInfo* shaderStageInfo,
100         int shaderStageCount,
101         VkRenderPass compatibleRenderPass,
102         VkPipelineLayout layout,
103         uint32_t subpass) {
104     return GrVkPipeline::Make(fGpu, programInfo, shaderStageInfo, shaderStageCount,
105                               compatibleRenderPass, layout, this->pipelineCache(), subpass);
106 }
107 
108 // To create framebuffers, we first need to create a simple RenderPass that is
109 // only used for framebuffer creation. When we actually render we will create
110 // RenderPasses as needed that are compatible with the framebuffer.
111 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderTarget * target,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)112 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderTarget* target,
113                                                CompatibleRPHandle* compatibleHandle,
114                                                bool withResolve,
115                                                bool withStencil,
116                                                SelfDependencyFlags selfDepFlags,
117                                                LoadFromResolve loadFromResolve) {
118     // Get attachment information from render target. This includes which attachments the render
119     // target has (color, stencil) and the attachments format and sample count.
120     GrVkRenderPass::AttachmentFlags attachmentFlags;
121     GrVkRenderPass::AttachmentsDescriptor attachmentsDesc;
122     target->getAttachmentsDescriptor(&attachmentsDesc, &attachmentFlags, withResolve, withStencil);
123 
124     return this->findCompatibleRenderPass(&attachmentsDesc, attachmentFlags, selfDepFlags,
125                                           loadFromResolve, compatibleHandle);
126 }
127 
128 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor * desc,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve,CompatibleRPHandle * compatibleHandle)129 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor* desc,
130                                                GrVkRenderPass::AttachmentFlags attachmentFlags,
131                                                SelfDependencyFlags selfDepFlags,
132                                                LoadFromResolve loadFromResolve,
133                                                CompatibleRPHandle* compatibleHandle) {
134     for (int i = 0; i < fRenderPassArray.count(); ++i) {
135         if (fRenderPassArray[i].isCompatible(*desc, attachmentFlags, selfDepFlags,
136                                              loadFromResolve)) {
137             const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
138             renderPass->ref();
139             if (compatibleHandle) {
140                 *compatibleHandle = CompatibleRPHandle(i);
141             }
142             return renderPass;
143         }
144     }
145 
146     GrVkRenderPass* renderPass = GrVkRenderPass::CreateSimple(fGpu, desc, attachmentFlags,
147                                                               selfDepFlags, loadFromResolve);
148     if (!renderPass) {
149         return nullptr;
150     }
151     fRenderPassArray.emplace_back(renderPass);
152 
153     if (compatibleHandle) {
154         *compatibleHandle = CompatibleRPHandle(fRenderPassArray.count() - 1);
155     }
156     return renderPass;
157 }
158 
findCompatibleExternalRenderPass(VkRenderPass renderPass,uint32_t colorAttachmentIndex)159 const GrVkRenderPass* GrVkResourceProvider::findCompatibleExternalRenderPass(
160         VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
161     for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
162         if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
163             fExternalRenderPasses[i]->ref();
164 #ifdef SK_DEBUG
165             uint32_t cachedColorIndex;
166             SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
167             SkASSERT(cachedColorIndex == colorAttachmentIndex);
168 #endif
169             return fExternalRenderPasses[i];
170         }
171     }
172 
173     const GrVkRenderPass* newRenderPass = new GrVkRenderPass(fGpu, renderPass,
174                                                              colorAttachmentIndex);
175     fExternalRenderPasses.push_back(newRenderPass);
176     newRenderPass->ref();
177     return newRenderPass;
178 }
179 
findRenderPass(GrVkRenderTarget * target,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)180 const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
181         GrVkRenderTarget* target,
182         const GrVkRenderPass::LoadStoreOps& colorOps,
183         const GrVkRenderPass::LoadStoreOps& resolveOps,
184         const GrVkRenderPass::LoadStoreOps& stencilOps,
185         CompatibleRPHandle* compatibleHandle,
186         bool withResolve,
187         bool withStencil,
188         SelfDependencyFlags selfDepFlags,
189         LoadFromResolve loadFromResolve) {
190     GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
191     GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
192                                                                            : &tempRPHandle;
193     *pRPHandle = target->compatibleRenderPassHandle(withResolve, withStencil, selfDepFlags,
194                                                     loadFromResolve);
195     if (!pRPHandle->isValid()) {
196         return nullptr;
197     }
198 
199     return this->findRenderPass(*pRPHandle, colorOps, resolveOps, stencilOps);
200 }
201 
202 const GrVkRenderPass*
findRenderPass(const CompatibleRPHandle & compatibleHandle,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)203 GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
204                                      const GrVkRenderPass::LoadStoreOps& colorOps,
205                                      const GrVkRenderPass::LoadStoreOps& resolveOps,
206                                      const GrVkRenderPass::LoadStoreOps& stencilOps) {
207     SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
208     CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
209     const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
210                                                                    colorOps,
211                                                                    resolveOps,
212                                                                    stencilOps);
213     if (!renderPass) {
214         return nullptr;
215     }
216     renderPass->ref();
217     return renderPass;
218 }
219 
findOrCreateCompatibleDescriptorPool(VkDescriptorType type,uint32_t count)220 GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
221                                                             VkDescriptorType type, uint32_t count) {
222     return GrVkDescriptorPool::Create(fGpu, type, count);
223 }
224 
findOrCreateCompatibleSampler(GrSamplerState params,const GrVkYcbcrConversionInfo & ycbcrInfo)225 GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
226         GrSamplerState params, const GrVkYcbcrConversionInfo& ycbcrInfo) {
227     GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
228     if (!sampler) {
229         sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
230         if (!sampler) {
231             return nullptr;
232         }
233         fSamplers.add(sampler);
234     }
235     SkASSERT(sampler);
236     sampler->ref();
237     return sampler;
238 }
239 
findOrCreateCompatibleSamplerYcbcrConversion(const GrVkYcbcrConversionInfo & ycbcrInfo)240 GrVkSamplerYcbcrConversion* GrVkResourceProvider::findOrCreateCompatibleSamplerYcbcrConversion(
241         const GrVkYcbcrConversionInfo& ycbcrInfo) {
242     GrVkSamplerYcbcrConversion* ycbcrConversion =
243             fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
244     if (!ycbcrConversion) {
245         ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
246         if (!ycbcrConversion) {
247             return nullptr;
248         }
249         fYcbcrConversions.add(ycbcrConversion);
250     }
251     SkASSERT(ycbcrConversion);
252     ycbcrConversion->ref();
253     return ycbcrConversion;
254 }
255 
findOrCreateCompatiblePipelineState(GrRenderTarget * renderTarget,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,bool overrideSubpassForResolveLoad)256 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
257         GrRenderTarget* renderTarget,
258         const GrProgramInfo& programInfo,
259         VkRenderPass compatibleRenderPass,
260         bool overrideSubpassForResolveLoad) {
261     return fPipelineStateCache->findOrCreatePipelineState(renderTarget, programInfo,
262                                                           compatibleRenderPass,
263                                                           overrideSubpassForResolveLoad);
264 }
265 
findOrCreateCompatiblePipelineState(const GrProgramDesc & desc,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,GrThreadSafePipelineBuilder::Stats::ProgramCacheResult * stat)266 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
267         const GrProgramDesc& desc,
268         const GrProgramInfo& programInfo,
269         VkRenderPass compatibleRenderPass,
270         GrThreadSafePipelineBuilder::Stats::ProgramCacheResult* stat) {
271 
272     auto tmp =  fPipelineStateCache->findOrCreatePipelineState(desc, programInfo,
273                                                                compatibleRenderPass, stat);
274     if (!tmp) {
275         fPipelineStateCache->stats()->incNumPreCompilationFailures();
276     } else {
277         fPipelineStateCache->stats()->incNumPreProgramCacheResult(*stat);
278     }
279 
280     return tmp;
281 }
282 
findOrCreateMSAALoadPipeline(const GrVkRenderPass & renderPass,int numSamples,VkPipelineShaderStageCreateInfo * shaderStageInfo,VkPipelineLayout pipelineLayout)283 sk_sp<const GrVkPipeline> GrVkResourceProvider::findOrCreateMSAALoadPipeline(
284         const GrVkRenderPass& renderPass,
285         int numSamples,
286         VkPipelineShaderStageCreateInfo* shaderStageInfo,
287         VkPipelineLayout pipelineLayout) {
288     // Find or Create a compatible pipeline
289     sk_sp<const GrVkPipeline> pipeline;
290     for (int i = 0; i < fMSAALoadPipelines.count() && !pipeline; ++i) {
291         if (fMSAALoadPipelines[i].fRenderPass->isCompatible(renderPass)) {
292             pipeline = fMSAALoadPipelines[i].fPipeline;
293         }
294     }
295     if (!pipeline) {
296         pipeline = GrVkPipeline::Make(
297                 fGpu,
298                 /*vertexAttribs=*/GrGeometryProcessor::AttributeSet(),
299                 /*instanceAttribs=*/GrGeometryProcessor::AttributeSet(),
300                 GrPrimitiveType::kTriangleStrip,
301                 kTopLeft_GrSurfaceOrigin,
302                 GrStencilSettings(),
303                 numSamples,
304                 /*isHWantialiasState=*/false,
305                 GrXferProcessor::BlendInfo(),
306                 /*isWireframe=*/false,
307                 /*useConservativeRaster=*/false,
308                 /*subpass=*/0,
309                 shaderStageInfo,
310                 /*shaderStageCount=*/2,
311                 renderPass.vkRenderPass(),
312                 pipelineLayout,
313                 /*ownsLayout=*/false,
314                 this->pipelineCache());
315         if (!pipeline) {
316             return nullptr;
317         }
318         fMSAALoadPipelines.push_back({pipeline, &renderPass});
319     }
320     SkASSERT(pipeline);
321     return pipeline;
322 }
323 
getZeroSamplerDescriptorSetHandle(GrVkDescriptorSetManager::Handle * handle)324 void GrVkResourceProvider::getZeroSamplerDescriptorSetHandle(
325         GrVkDescriptorSetManager::Handle* handle) {
326     SkASSERT(handle);
327     for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
328         if (fDescriptorSetManagers[i]->isZeroSampler()) {
329             *handle = GrVkDescriptorSetManager::Handle(i);
330             return;
331         }
332     }
333 
334     GrVkDescriptorSetManager* dsm =
335             GrVkDescriptorSetManager::CreateZeroSamplerManager(fGpu);
336     fDescriptorSetManagers.emplace_back(dsm);
337     *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
338 }
339 
getSamplerDescriptorSetHandle(VkDescriptorType type,const GrVkUniformHandler & uniformHandler,GrVkDescriptorSetManager::Handle * handle)340 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
341                                                          const GrVkUniformHandler& uniformHandler,
342                                                          GrVkDescriptorSetManager::Handle* handle) {
343     SkASSERT(handle);
344     SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
345              VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
346     for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
347         if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
348            *handle = GrVkDescriptorSetManager::Handle(i);
349            return;
350         }
351     }
352 
353     GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
354                                                                                    uniformHandler);
355     fDescriptorSetManagers.emplace_back(dsm);
356     *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
357 }
358 
getUniformDSLayout() const359 VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
360     SkASSERT(fUniformDSHandle.isValid());
361     return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
362 }
363 
getInputDSLayout() const364 VkDescriptorSetLayout GrVkResourceProvider::getInputDSLayout() const {
365     SkASSERT(fInputDSHandle.isValid());
366     return fDescriptorSetManagers[fInputDSHandle.toIndex()]->layout();
367 }
368 
getSamplerDSLayout(const GrVkDescriptorSetManager::Handle & handle) const369 VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
370         const GrVkDescriptorSetManager::Handle& handle) const {
371     SkASSERT(handle.isValid());
372     return fDescriptorSetManagers[handle.toIndex()]->layout();
373 }
374 
getUniformDescriptorSet()375 const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
376     SkASSERT(fUniformDSHandle.isValid());
377     return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
378                                                                                 fUniformDSHandle);
379 }
380 
getInputDescriptorSet()381 const GrVkDescriptorSet* GrVkResourceProvider::getInputDescriptorSet() {
382     SkASSERT(fInputDSHandle.isValid());
383     return fDescriptorSetManagers[fInputDSHandle.toIndex()]->getDescriptorSet(fGpu, fInputDSHandle);
384 }
385 
getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle & handle)386 const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
387         const GrVkDescriptorSetManager::Handle& handle) {
388     SkASSERT(handle.isValid());
389     return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
390 }
391 
recycleDescriptorSet(const GrVkDescriptorSet * descSet,const GrVkDescriptorSetManager::Handle & handle)392 void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
393                                                 const GrVkDescriptorSetManager::Handle& handle) {
394     SkASSERT(descSet);
395     SkASSERT(handle.isValid());
396     int managerIdx = handle.toIndex();
397     SkASSERT(managerIdx < fDescriptorSetManagers.count());
398     fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
399 }
400 
findOrCreateCommandPool()401 GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
402     SkAutoMutexExclusive lock(fBackgroundMutex);
403     GrVkCommandPool* result;
404     if (fAvailableCommandPools.count()) {
405         result = fAvailableCommandPools.back();
406         fAvailableCommandPools.pop_back();
407     } else {
408         result = GrVkCommandPool::Create(fGpu);
409         if (!result) {
410             return nullptr;
411         }
412     }
413     SkASSERT(result->unique());
414     SkDEBUGCODE(
415         for (const GrVkCommandPool* pool : fActiveCommandPools) {
416             SkASSERT(pool != result);
417         }
418         for (const GrVkCommandPool* pool : fAvailableCommandPools) {
419             SkASSERT(pool != result);
420         }
421     )
422     fActiveCommandPools.push_back(result);
423     result->ref();
424     return result;
425 }
426 
checkCommandBuffers()427 void GrVkResourceProvider::checkCommandBuffers() {
428     // When resetting a command buffer it can trigger client provided procs (e.g. release or
429     // finished) to be called. During these calls the client could trigger us to abandon the vk
430     // context, e.g. if we are in a DEVICE_LOST state. When we abandon the vk context we will
431     // unref all the fActiveCommandPools and reset the array. Since this can happen in the middle
432     // of the loop here, we need to additionally check that fActiveCommandPools still has pools on
433     // each iteration.
434     //
435     // TODO: We really need to have a more robust way to protect us from client proc calls that
436     // happen in the middle of us doing work. This may be just one of many potential pitfalls that
437     // could happen from the client triggering GrDirectContext changes during a proc call.
438     for (int i = fActiveCommandPools.count() - 1; fActiveCommandPools.count() && i >= 0; --i) {
439         GrVkCommandPool* pool = fActiveCommandPools[i];
440         if (!pool->isOpen()) {
441             GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
442             if (buffer->finished(fGpu)) {
443                 fActiveCommandPools.removeShuffle(i);
444                 // This passes ownership of the pool to the backgroundReset call. The pool should
445                 // not be used again from this function.
446                 // TODO: We should see if we can use sk_sps here to make this more explicit.
447                 this->backgroundReset(pool);
448             }
449         }
450     }
451     if (fActiveCommandPools.count() >= 3000) {
452 #ifdef SKIA_OHOS
453         SK_LOGE("GrVkResourceProvider::checkCommandBuffers: fActiveCommandPools size [%{public}d]", fActiveCommandPools.count());
454 #endif
455     }
456 }
457 
forceSyncAllCommandBuffers()458 void GrVkResourceProvider::forceSyncAllCommandBuffers() {
459     for (int i = fActiveCommandPools.count() - 1; fActiveCommandPools.count() && i >= 0; --i) {
460         GrVkCommandPool* pool = fActiveCommandPools[i];
461         if (!pool->isOpen()) {
462             GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
463             buffer->forceSync(fGpu);
464         }
465     }
466 }
467 
addFinishedProcToActiveCommandBuffers(sk_sp<GrRefCntedCallback> finishedCallback)468 void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
469         sk_sp<GrRefCntedCallback> finishedCallback) {
470     for (int i = 0; i < fActiveCommandPools.count(); ++i) {
471         GrVkCommandPool* pool = fActiveCommandPools[i];
472         GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
473         buffer->addFinishedProc(finishedCallback);
474     }
475 }
476 
destroyResources()477 void GrVkResourceProvider::destroyResources() {
478     SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
479     if (taskGroup) {
480         taskGroup->wait();
481     }
482 
483     // Release all msaa load pipelines
484     fMSAALoadPipelines.reset();
485 
486     // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
487     for (int i = 0; i < fRenderPassArray.count(); ++i) {
488         fRenderPassArray[i].releaseResources();
489     }
490     fRenderPassArray.reset();
491 
492     for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
493         fExternalRenderPasses[i]->unref();
494     }
495     fExternalRenderPasses.reset();
496 
497     // Iterate through all store GrVkSamplers and unref them before resetting the hash table.
498     fSamplers.foreach([&](auto* elt) { elt->unref(); });
499     fSamplers.reset();
500 
501     fYcbcrConversions.foreach([&](auto* elt) { elt->unref(); });
502     fYcbcrConversions.reset();
503 
504     fPipelineStateCache->release();
505 
506     GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
507     fPipelineCache = VK_NULL_HANDLE;
508 
509     for (GrVkCommandPool* pool : fActiveCommandPools) {
510         SkASSERT(pool->unique());
511         pool->unref();
512     }
513     fActiveCommandPools.reset();
514 
515     {
516         SkAutoMutexExclusive lock(fBackgroundMutex);
517         for (GrVkCommandPool* pool : fAvailableCommandPools) {
518             SkASSERT(pool->unique());
519             pool->unref();
520         }
521         fAvailableCommandPools.reset();
522     }
523 
524     // We must release/destroy all command buffers and pipeline states before releasing the
525     // GrVkDescriptorSetManagers. Additionally, we must release all uniform buffers since they hold
526     // refs to GrVkDescriptorSets.
527     for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
528         fDescriptorSetManagers[i]->release(fGpu);
529     }
530     fDescriptorSetManagers.reset();
531 
532 }
533 
releaseUnlockedBackendObjects()534 void GrVkResourceProvider::releaseUnlockedBackendObjects() {
535     SkAutoMutexExclusive lock(fBackgroundMutex);
536     for (GrVkCommandPool* pool : fAvailableCommandPools) {
537         SkASSERT(pool->unique());
538         pool->unref();
539     }
540     fAvailableCommandPools.reset();
541 }
542 
backgroundReset(GrVkCommandPool * pool)543 void GrVkResourceProvider::backgroundReset(GrVkCommandPool* pool) {
544     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
545     SkASSERT(pool->unique());
546     pool->releaseResources();
547     // After releasing resources we may have called a client callback proc which may have
548     // disconnected the GrVkGpu. In that case we do not want to push the pool back onto the cache,
549     // but instead just drop the pool.
550     if (fGpu->disconnected()) {
551         pool->unref();
552         return;
553     }
554     SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
555     if (taskGroup) {
556         taskGroup->add([this, pool]() {
557             this->reset(pool);
558         });
559     } else {
560         this->reset(pool);
561     }
562 }
563 
reset(GrVkCommandPool * pool)564 void GrVkResourceProvider::reset(GrVkCommandPool* pool) {
565     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
566     SkASSERT(pool->unique());
567     pool->reset(fGpu);
568     SkAutoMutexExclusive lock(fBackgroundMutex);
569     fAvailableCommandPools.push_back(pool);
570 }
571 
storePipelineCacheData()572 void GrVkResourceProvider::storePipelineCacheData() {
573     if (this->pipelineCache() == VK_NULL_HANDLE) {
574         return;
575     }
576     size_t dataSize = 0;
577     VkResult result;
578     GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
579                                                          &dataSize, nullptr));
580     if (result != VK_SUCCESS) {
581         return;
582     }
583     // store VkPipelineCache when cache size update
584     if (dataSize == fPipelineCacheSize) {
585         return;
586     }
587     std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
588 
589     GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
590                                                          &dataSize, (void*)data.get()));
591     if (result != VK_SUCCESS) {
592         return;
593     }
594     fPipelineCacheSize = dataSize;
595     uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
596     sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
597 
598     SkDebugf("store vkPipelineCache, data size:%zu", fPipelineCacheSize);
599     fGpu->getContext()->priv().getPersistentCache()->store(
600             *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize), SkString("VkPipelineCache"));
601 }
602 
603 ////////////////////////////////////////////////////////////////////////////////
604 
CompatibleRenderPassSet(GrVkRenderPass * renderPass)605 GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(GrVkRenderPass* renderPass)
606         : fLastReturnedIndex(0) {
607     renderPass->ref();
608     fRenderPasses.push_back(renderPass);
609 }
610 
isCompatible(const GrVkRenderPass::AttachmentsDescriptor & attachmentsDescriptor,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve) const611 bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
612         const GrVkRenderPass::AttachmentsDescriptor& attachmentsDescriptor,
613         GrVkRenderPass::AttachmentFlags attachmentFlags,
614         SelfDependencyFlags selfDepFlags,
615         LoadFromResolve loadFromResolve) const {
616     // The first GrVkRenderpass should always exists since we create the basic load store
617     // render pass on create
618     SkASSERT(fRenderPasses[0]);
619     return fRenderPasses[0]->isCompatible(attachmentsDescriptor, attachmentFlags, selfDepFlags,
620                                           loadFromResolve);
621 }
622 
getRenderPass(GrVkGpu * gpu,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)623 GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
624         GrVkGpu* gpu,
625         const GrVkRenderPass::LoadStoreOps& colorOps,
626         const GrVkRenderPass::LoadStoreOps& resolveOps,
627         const GrVkRenderPass::LoadStoreOps& stencilOps) {
628     for (int i = 0; i < fRenderPasses.count(); ++i) {
629         int idx = (i + fLastReturnedIndex) % fRenderPasses.count();
630         if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, resolveOps, stencilOps)) {
631             fLastReturnedIndex = idx;
632             return fRenderPasses[idx];
633         }
634     }
635     GrVkRenderPass* renderPass = GrVkRenderPass::Create(gpu, *this->getCompatibleRenderPass(),
636                                                         colorOps, resolveOps, stencilOps);
637     if (!renderPass) {
638         return nullptr;
639     }
640     fRenderPasses.push_back(renderPass);
641     fLastReturnedIndex = fRenderPasses.count() - 1;
642     return renderPass;
643 }
644 
releaseResources()645 void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources() {
646     for (int i = 0; i < fRenderPasses.count(); ++i) {
647         if (fRenderPasses[i]) {
648             fRenderPasses[i]->unref();
649             fRenderPasses[i] = nullptr;
650         }
651     }
652 }
653 
654