• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #include "src/gpu/vk/GrVkResourceProvider.h"
9 
10 #include "include/gpu/GrDirectContext.h"
11 #include "src/core/SkTaskGroup.h"
12 #include "src/gpu/GrDirectContextPriv.h"
13 #include "src/gpu/GrSamplerState.h"
14 #include "src/gpu/GrStencilSettings.h"
15 #include "src/gpu/vk/GrVkCommandBuffer.h"
16 #include "src/gpu/vk/GrVkCommandPool.h"
17 #include "src/gpu/vk/GrVkGpu.h"
18 #include "src/gpu/vk/GrVkPipeline.h"
19 #include "src/gpu/vk/GrVkRenderTarget.h"
20 #include "src/gpu/vk/GrVkUtil.h"
21 
GrVkResourceProvider(GrVkGpu * gpu)22 GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
23     : fGpu(gpu)
24     , fPipelineCache(VK_NULL_HANDLE) {
25     fPipelineStateCache = sk_make_sp<PipelineStateCache>(gpu);
26 }
27 
~GrVkResourceProvider()28 GrVkResourceProvider::~GrVkResourceProvider() {
29     SkASSERT(0 == fRenderPassArray.count());
30     SkASSERT(0 == fExternalRenderPasses.count());
31     SkASSERT(0 == fMSAALoadPipelines.count());
32     SkASSERT(VK_NULL_HANDLE == fPipelineCache);
33 }
34 
pipelineCache()35 VkPipelineCache GrVkResourceProvider::pipelineCache() {
36     if (fPipelineCache == VK_NULL_HANDLE) {
37         VkPipelineCacheCreateInfo createInfo;
38         memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
39         createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
40         createInfo.pNext = nullptr;
41         createInfo.flags = 0;
42 
43         auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
44         sk_sp<SkData> cached;
45         if (persistentCache) {
46             uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
47             sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
48             cached = persistentCache->load(*keyData);
49         }
50         bool usedCached = false;
51         if (cached) {
52             uint32_t* cacheHeader = (uint32_t*)cached->data();
53             if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
54                 // For version one of the header, the total header size is 16 bytes plus
55                 // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
56                 // the breakdown of these bytes.
57                 SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
58                 const VkPhysicalDeviceProperties& devProps = fGpu->physicalDeviceProperties();
59                 const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
60                 if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
61                     !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
62                     createInfo.initialDataSize = cached->size();
63                     createInfo.pInitialData = cached->data();
64                     usedCached = true;
65                 }
66             }
67         }
68         if (!usedCached) {
69             createInfo.initialDataSize = 0;
70             createInfo.pInitialData = nullptr;
71         }
72 
73         VkResult result;
74         GR_VK_CALL_RESULT(fGpu, result, CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
75                                                             &fPipelineCache));
76         if (VK_SUCCESS != result) {
77             fPipelineCache = VK_NULL_HANDLE;
78         }
79     }
80     return fPipelineCache;
81 }
82 
init()83 void GrVkResourceProvider::init() {
84     // Init uniform descriptor objects
85     GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
86     fDescriptorSetManagers.emplace_back(dsm);
87     SkASSERT(1 == fDescriptorSetManagers.count());
88     fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
89     dsm = GrVkDescriptorSetManager::CreateInputManager(fGpu);
90     fDescriptorSetManagers.emplace_back(dsm);
91     SkASSERT(2 == fDescriptorSetManagers.count());
92     fInputDSHandle = GrVkDescriptorSetManager::Handle(1);
93 }
94 
makePipeline(const GrProgramInfo & programInfo,VkPipelineShaderStageCreateInfo * shaderStageInfo,int shaderStageCount,VkRenderPass compatibleRenderPass,VkPipelineLayout layout,uint32_t subpass)95 sk_sp<const GrVkPipeline> GrVkResourceProvider::makePipeline(
96         const GrProgramInfo& programInfo,
97         VkPipelineShaderStageCreateInfo* shaderStageInfo,
98         int shaderStageCount,
99         VkRenderPass compatibleRenderPass,
100         VkPipelineLayout layout,
101         uint32_t subpass) {
102     return GrVkPipeline::Make(fGpu, programInfo, shaderStageInfo, shaderStageCount,
103                               compatibleRenderPass, layout, this->pipelineCache(), subpass);
104 }
105 
106 // To create framebuffers, we first need to create a simple RenderPass that is
107 // only used for framebuffer creation. When we actually render we will create
108 // RenderPasses as needed that are compatible with the framebuffer.
109 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderTarget * target,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)110 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderTarget* target,
111                                                CompatibleRPHandle* compatibleHandle,
112                                                bool withResolve,
113                                                bool withStencil,
114                                                SelfDependencyFlags selfDepFlags,
115                                                LoadFromResolve loadFromResolve) {
116     // Get attachment information from render target. This includes which attachments the render
117     // target has (color, stencil) and the attachments format and sample count.
118     GrVkRenderPass::AttachmentFlags attachmentFlags;
119     GrVkRenderPass::AttachmentsDescriptor attachmentsDesc;
120     target->getAttachmentsDescriptor(&attachmentsDesc, &attachmentFlags, withResolve, withStencil);
121 
122     return this->findCompatibleRenderPass(&attachmentsDesc, attachmentFlags, selfDepFlags,
123                                           loadFromResolve, compatibleHandle);
124 }
125 
126 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor * desc,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve,CompatibleRPHandle * compatibleHandle)127 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor* desc,
128                                                GrVkRenderPass::AttachmentFlags attachmentFlags,
129                                                SelfDependencyFlags selfDepFlags,
130                                                LoadFromResolve loadFromResolve,
131                                                CompatibleRPHandle* compatibleHandle) {
132     for (int i = 0; i < fRenderPassArray.count(); ++i) {
133         if (fRenderPassArray[i].isCompatible(*desc, attachmentFlags, selfDepFlags,
134                                              loadFromResolve)) {
135             const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
136             renderPass->ref();
137             if (compatibleHandle) {
138                 *compatibleHandle = CompatibleRPHandle(i);
139             }
140             return renderPass;
141         }
142     }
143 
144     GrVkRenderPass* renderPass = GrVkRenderPass::CreateSimple(fGpu, desc, attachmentFlags,
145                                                               selfDepFlags, loadFromResolve);
146     if (!renderPass) {
147         return nullptr;
148     }
149     fRenderPassArray.emplace_back(renderPass);
150 
151     if (compatibleHandle) {
152         *compatibleHandle = CompatibleRPHandle(fRenderPassArray.count() - 1);
153     }
154     return renderPass;
155 }
156 
findCompatibleExternalRenderPass(VkRenderPass renderPass,uint32_t colorAttachmentIndex)157 const GrVkRenderPass* GrVkResourceProvider::findCompatibleExternalRenderPass(
158         VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
159     for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
160         if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
161             fExternalRenderPasses[i]->ref();
162 #ifdef SK_DEBUG
163             uint32_t cachedColorIndex;
164             SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
165             SkASSERT(cachedColorIndex == colorAttachmentIndex);
166 #endif
167             return fExternalRenderPasses[i];
168         }
169     }
170 
171     const GrVkRenderPass* newRenderPass = new GrVkRenderPass(fGpu, renderPass,
172                                                              colorAttachmentIndex);
173     fExternalRenderPasses.push_back(newRenderPass);
174     newRenderPass->ref();
175     return newRenderPass;
176 }
177 
findRenderPass(GrVkRenderTarget * target,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)178 const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
179         GrVkRenderTarget* target,
180         const GrVkRenderPass::LoadStoreOps& colorOps,
181         const GrVkRenderPass::LoadStoreOps& resolveOps,
182         const GrVkRenderPass::LoadStoreOps& stencilOps,
183         CompatibleRPHandle* compatibleHandle,
184         bool withResolve,
185         bool withStencil,
186         SelfDependencyFlags selfDepFlags,
187         LoadFromResolve loadFromResolve) {
188     GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
189     GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
190                                                                            : &tempRPHandle;
191     *pRPHandle = target->compatibleRenderPassHandle(withResolve, withStencil, selfDepFlags,
192                                                     loadFromResolve);
193     if (!pRPHandle->isValid()) {
194         return nullptr;
195     }
196 
197     return this->findRenderPass(*pRPHandle, colorOps, resolveOps, stencilOps);
198 }
199 
200 const GrVkRenderPass*
findRenderPass(const CompatibleRPHandle & compatibleHandle,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)201 GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
202                                      const GrVkRenderPass::LoadStoreOps& colorOps,
203                                      const GrVkRenderPass::LoadStoreOps& resolveOps,
204                                      const GrVkRenderPass::LoadStoreOps& stencilOps) {
205     SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
206     CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
207     const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
208                                                                    colorOps,
209                                                                    resolveOps,
210                                                                    stencilOps);
211     if (!renderPass) {
212         return nullptr;
213     }
214     renderPass->ref();
215     return renderPass;
216 }
217 
findOrCreateCompatibleDescriptorPool(VkDescriptorType type,uint32_t count)218 GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
219                                                             VkDescriptorType type, uint32_t count) {
220     return GrVkDescriptorPool::Create(fGpu, type, count);
221 }
222 
findOrCreateCompatibleSampler(GrSamplerState params,const GrVkYcbcrConversionInfo & ycbcrInfo)223 GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
224         GrSamplerState params, const GrVkYcbcrConversionInfo& ycbcrInfo) {
225     GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
226     if (!sampler) {
227         sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
228         if (!sampler) {
229             return nullptr;
230         }
231         fSamplers.add(sampler);
232     }
233     SkASSERT(sampler);
234     sampler->ref();
235     return sampler;
236 }
237 
findOrCreateCompatibleSamplerYcbcrConversion(const GrVkYcbcrConversionInfo & ycbcrInfo)238 GrVkSamplerYcbcrConversion* GrVkResourceProvider::findOrCreateCompatibleSamplerYcbcrConversion(
239         const GrVkYcbcrConversionInfo& ycbcrInfo) {
240     GrVkSamplerYcbcrConversion* ycbcrConversion =
241             fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
242     if (!ycbcrConversion) {
243         ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
244         if (!ycbcrConversion) {
245             return nullptr;
246         }
247         fYcbcrConversions.add(ycbcrConversion);
248     }
249     SkASSERT(ycbcrConversion);
250     ycbcrConversion->ref();
251     return ycbcrConversion;
252 }
253 
findOrCreateCompatiblePipelineState(GrRenderTarget * renderTarget,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,bool overrideSubpassForResolveLoad)254 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
255         GrRenderTarget* renderTarget,
256         const GrProgramInfo& programInfo,
257         VkRenderPass compatibleRenderPass,
258         bool overrideSubpassForResolveLoad) {
259     return fPipelineStateCache->findOrCreatePipelineState(renderTarget, programInfo,
260                                                           compatibleRenderPass,
261                                                           overrideSubpassForResolveLoad);
262 }
263 
findOrCreateCompatiblePipelineState(const GrProgramDesc & desc,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,GrThreadSafePipelineBuilder::Stats::ProgramCacheResult * stat)264 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
265         const GrProgramDesc& desc,
266         const GrProgramInfo& programInfo,
267         VkRenderPass compatibleRenderPass,
268         GrThreadSafePipelineBuilder::Stats::ProgramCacheResult* stat) {
269 
270     auto tmp =  fPipelineStateCache->findOrCreatePipelineState(desc, programInfo,
271                                                                compatibleRenderPass, stat);
272     if (!tmp) {
273         fPipelineStateCache->stats()->incNumPreCompilationFailures();
274     } else {
275         fPipelineStateCache->stats()->incNumPreProgramCacheResult(*stat);
276     }
277 
278     return tmp;
279 }
280 
findOrCreateMSAALoadPipeline(const GrVkRenderPass & renderPass,int numSamples,VkPipelineShaderStageCreateInfo * shaderStageInfo,VkPipelineLayout pipelineLayout)281 sk_sp<const GrVkPipeline> GrVkResourceProvider::findOrCreateMSAALoadPipeline(
282         const GrVkRenderPass& renderPass,
283         int numSamples,
284         VkPipelineShaderStageCreateInfo* shaderStageInfo,
285         VkPipelineLayout pipelineLayout) {
286     // Find or Create a compatible pipeline
287     sk_sp<const GrVkPipeline> pipeline;
288     for (int i = 0; i < fMSAALoadPipelines.count() && !pipeline; ++i) {
289         if (fMSAALoadPipelines[i].fRenderPass->isCompatible(renderPass)) {
290             pipeline = fMSAALoadPipelines[i].fPipeline;
291         }
292     }
293     if (!pipeline) {
294         pipeline = GrVkPipeline::Make(
295                 fGpu,
296                 /*vertexAttribs=*/GrGeometryProcessor::AttributeSet(),
297                 /*instanceAttribs=*/GrGeometryProcessor::AttributeSet(),
298                 GrPrimitiveType::kTriangleStrip,
299                 kTopLeft_GrSurfaceOrigin,
300                 GrStencilSettings(),
301                 numSamples,
302                 /*isHWantialiasState=*/false,
303                 GrXferProcessor::BlendInfo(),
304                 /*isWireframe=*/false,
305                 /*useConservativeRaster=*/false,
306                 /*subpass=*/0,
307                 shaderStageInfo,
308                 /*shaderStageCount=*/2,
309                 renderPass.vkRenderPass(),
310                 pipelineLayout,
311                 /*ownsLayout=*/false,
312                 this->pipelineCache());
313         if (!pipeline) {
314             return nullptr;
315         }
316         fMSAALoadPipelines.push_back({pipeline, &renderPass});
317     }
318     SkASSERT(pipeline);
319     return pipeline;
320 }
321 
getZeroSamplerDescriptorSetHandle(GrVkDescriptorSetManager::Handle * handle)322 void GrVkResourceProvider::getZeroSamplerDescriptorSetHandle(
323         GrVkDescriptorSetManager::Handle* handle) {
324     SkASSERT(handle);
325     for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
326         if (fDescriptorSetManagers[i]->isZeroSampler()) {
327             *handle = GrVkDescriptorSetManager::Handle(i);
328             return;
329         }
330     }
331 
332     GrVkDescriptorSetManager* dsm =
333             GrVkDescriptorSetManager::CreateZeroSamplerManager(fGpu);
334     fDescriptorSetManagers.emplace_back(dsm);
335     *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
336 }
337 
getSamplerDescriptorSetHandle(VkDescriptorType type,const GrVkUniformHandler & uniformHandler,GrVkDescriptorSetManager::Handle * handle)338 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
339                                                          const GrVkUniformHandler& uniformHandler,
340                                                          GrVkDescriptorSetManager::Handle* handle) {
341     SkASSERT(handle);
342     SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
343              VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
344     for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
345         if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
346            *handle = GrVkDescriptorSetManager::Handle(i);
347            return;
348         }
349     }
350 
351     GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
352                                                                                    uniformHandler);
353     fDescriptorSetManagers.emplace_back(dsm);
354     *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
355 }
356 
getUniformDSLayout() const357 VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
358     SkASSERT(fUniformDSHandle.isValid());
359     return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
360 }
361 
getInputDSLayout() const362 VkDescriptorSetLayout GrVkResourceProvider::getInputDSLayout() const {
363     SkASSERT(fInputDSHandle.isValid());
364     return fDescriptorSetManagers[fInputDSHandle.toIndex()]->layout();
365 }
366 
getSamplerDSLayout(const GrVkDescriptorSetManager::Handle & handle) const367 VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
368         const GrVkDescriptorSetManager::Handle& handle) const {
369     SkASSERT(handle.isValid());
370     return fDescriptorSetManagers[handle.toIndex()]->layout();
371 }
372 
getUniformDescriptorSet()373 const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
374     SkASSERT(fUniformDSHandle.isValid());
375     return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
376                                                                                 fUniformDSHandle);
377 }
378 
getInputDescriptorSet()379 const GrVkDescriptorSet* GrVkResourceProvider::getInputDescriptorSet() {
380     SkASSERT(fInputDSHandle.isValid());
381     return fDescriptorSetManagers[fInputDSHandle.toIndex()]->getDescriptorSet(fGpu, fInputDSHandle);
382 }
383 
getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle & handle)384 const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
385         const GrVkDescriptorSetManager::Handle& handle) {
386     SkASSERT(handle.isValid());
387     return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
388 }
389 
recycleDescriptorSet(const GrVkDescriptorSet * descSet,const GrVkDescriptorSetManager::Handle & handle)390 void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
391                                                 const GrVkDescriptorSetManager::Handle& handle) {
392     SkASSERT(descSet);
393     SkASSERT(handle.isValid());
394     int managerIdx = handle.toIndex();
395     SkASSERT(managerIdx < fDescriptorSetManagers.count());
396     fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
397 }
398 
findOrCreateCommandPool()399 GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
400     SkAutoMutexExclusive lock(fBackgroundMutex);
401     GrVkCommandPool* result;
402     if (fAvailableCommandPools.count()) {
403         result = fAvailableCommandPools.back();
404         fAvailableCommandPools.pop_back();
405     } else {
406         result = GrVkCommandPool::Create(fGpu);
407         if (!result) {
408             return nullptr;
409         }
410     }
411     SkASSERT(result->unique());
412     SkDEBUGCODE(
413         for (const GrVkCommandPool* pool : fActiveCommandPools) {
414             SkASSERT(pool != result);
415         }
416         for (const GrVkCommandPool* pool : fAvailableCommandPools) {
417             SkASSERT(pool != result);
418         }
419     )
420     fActiveCommandPools.push_back(result);
421     result->ref();
422     return result;
423 }
424 
checkCommandBuffers()425 void GrVkResourceProvider::checkCommandBuffers() {
426     // When resetting a command buffer it can trigger client provided procs (e.g. release or
427     // finished) to be called. During these calls the client could trigger us to abandon the vk
428     // context, e.g. if we are in a DEVICE_LOST state. When we abandon the vk context we will
429     // unref all the fActiveCommandPools and reset the array. Since this can happen in the middle
430     // of the loop here, we need to additionally check that fActiveCommandPools still has pools on
431     // each iteration.
432     //
433     // TODO: We really need to have a more robust way to protect us from client proc calls that
434     // happen in the middle of us doing work. This may be just one of many potential pitfalls that
435     // could happen from the client triggering GrDirectContext changes during a proc call.
436     for (int i = fActiveCommandPools.count() - 1; fActiveCommandPools.count() && i >= 0; --i) {
437         GrVkCommandPool* pool = fActiveCommandPools[i];
438         if (!pool->isOpen()) {
439             GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
440             if (buffer->finished(fGpu)) {
441                 fActiveCommandPools.removeShuffle(i);
442                 // This passes ownership of the pool to the backgroundReset call. The pool should
443                 // not be used again from this function.
444                 // TODO: We should see if we can use sk_sps here to make this more explicit.
445                 this->backgroundReset(pool);
446             }
447         }
448     }
449 }
450 
forceSyncAllCommandBuffers()451 void GrVkResourceProvider::forceSyncAllCommandBuffers() {
452     for (int i = fActiveCommandPools.count() - 1; fActiveCommandPools.count() && i >= 0; --i) {
453         GrVkCommandPool* pool = fActiveCommandPools[i];
454         if (!pool->isOpen()) {
455             GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
456             buffer->forceSync(fGpu);
457         }
458     }
459 }
460 
addFinishedProcToActiveCommandBuffers(sk_sp<GrRefCntedCallback> finishedCallback)461 void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
462         sk_sp<GrRefCntedCallback> finishedCallback) {
463     for (int i = 0; i < fActiveCommandPools.count(); ++i) {
464         GrVkCommandPool* pool = fActiveCommandPools[i];
465         GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
466         buffer->addFinishedProc(finishedCallback);
467     }
468 }
469 
destroyResources()470 void GrVkResourceProvider::destroyResources() {
471     SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
472     if (taskGroup) {
473         taskGroup->wait();
474     }
475 
476     // Release all msaa load pipelines
477     fMSAALoadPipelines.reset();
478 
479     // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
480     for (int i = 0; i < fRenderPassArray.count(); ++i) {
481         fRenderPassArray[i].releaseResources();
482     }
483     fRenderPassArray.reset();
484 
485     for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
486         fExternalRenderPasses[i]->unref();
487     }
488     fExternalRenderPasses.reset();
489 
490     // Iterate through all store GrVkSamplers and unref them before resetting the hash table.
491     fSamplers.foreach([&](auto* elt) { elt->unref(); });
492     fSamplers.reset();
493 
494     fYcbcrConversions.foreach([&](auto* elt) { elt->unref(); });
495     fYcbcrConversions.reset();
496 
497     fPipelineStateCache->release();
498 
499     GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
500     fPipelineCache = VK_NULL_HANDLE;
501 
502     for (GrVkCommandPool* pool : fActiveCommandPools) {
503         SkASSERT(pool->unique());
504         pool->unref();
505     }
506     fActiveCommandPools.reset();
507 
508     {
509         SkAutoMutexExclusive lock(fBackgroundMutex);
510         for (GrVkCommandPool* pool : fAvailableCommandPools) {
511             SkASSERT(pool->unique());
512             pool->unref();
513         }
514         fAvailableCommandPools.reset();
515     }
516 
517     // We must release/destroy all command buffers and pipeline states before releasing the
518     // GrVkDescriptorSetManagers. Additionally, we must release all uniform buffers since they hold
519     // refs to GrVkDescriptorSets.
520     for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
521         fDescriptorSetManagers[i]->release(fGpu);
522     }
523     fDescriptorSetManagers.reset();
524 
525 }
526 
releaseUnlockedBackendObjects()527 void GrVkResourceProvider::releaseUnlockedBackendObjects() {
528     SkAutoMutexExclusive lock(fBackgroundMutex);
529     for (GrVkCommandPool* pool : fAvailableCommandPools) {
530         SkASSERT(pool->unique());
531         pool->unref();
532     }
533     fAvailableCommandPools.reset();
534 }
535 
backgroundReset(GrVkCommandPool * pool)536 void GrVkResourceProvider::backgroundReset(GrVkCommandPool* pool) {
537     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
538     SkASSERT(pool->unique());
539     pool->releaseResources();
540     // After releasing resources we may have called a client callback proc which may have
541     // disconnected the GrVkGpu. In that case we do not want to push the pool back onto the cache,
542     // but instead just drop the pool.
543     if (fGpu->disconnected()) {
544         pool->unref();
545         return;
546     }
547     SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
548     if (taskGroup) {
549         taskGroup->add([this, pool]() {
550             this->reset(pool);
551         });
552     } else {
553         this->reset(pool);
554     }
555 }
556 
reset(GrVkCommandPool * pool)557 void GrVkResourceProvider::reset(GrVkCommandPool* pool) {
558     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
559     SkASSERT(pool->unique());
560     pool->reset(fGpu);
561     SkAutoMutexExclusive lock(fBackgroundMutex);
562     fAvailableCommandPools.push_back(pool);
563 }
564 
storePipelineCacheData()565 void GrVkResourceProvider::storePipelineCacheData() {
566     if (this->pipelineCache() == VK_NULL_HANDLE) {
567         return;
568     }
569     size_t dataSize = 0;
570     VkResult result;
571     GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
572                                                          &dataSize, nullptr));
573     if (result != VK_SUCCESS) {
574         return;
575     }
576 
577     std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
578 
579     GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
580                                                          &dataSize, (void*)data.get()));
581     if (result != VK_SUCCESS) {
582         return;
583     }
584 
585     uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
586     sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
587 
588     fGpu->getContext()->priv().getPersistentCache()->store(
589             *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize), SkString("VkPipelineCache"));
590 }
591 
592 ////////////////////////////////////////////////////////////////////////////////
593 
CompatibleRenderPassSet(GrVkRenderPass * renderPass)594 GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(GrVkRenderPass* renderPass)
595         : fLastReturnedIndex(0) {
596     renderPass->ref();
597     fRenderPasses.push_back(renderPass);
598 }
599 
isCompatible(const GrVkRenderPass::AttachmentsDescriptor & attachmentsDescriptor,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve) const600 bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
601         const GrVkRenderPass::AttachmentsDescriptor& attachmentsDescriptor,
602         GrVkRenderPass::AttachmentFlags attachmentFlags,
603         SelfDependencyFlags selfDepFlags,
604         LoadFromResolve loadFromResolve) const {
605     // The first GrVkRenderpass should always exists since we create the basic load store
606     // render pass on create
607     SkASSERT(fRenderPasses[0]);
608     return fRenderPasses[0]->isCompatible(attachmentsDescriptor, attachmentFlags, selfDepFlags,
609                                           loadFromResolve);
610 }
611 
getRenderPass(GrVkGpu * gpu,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)612 GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
613         GrVkGpu* gpu,
614         const GrVkRenderPass::LoadStoreOps& colorOps,
615         const GrVkRenderPass::LoadStoreOps& resolveOps,
616         const GrVkRenderPass::LoadStoreOps& stencilOps) {
617     for (int i = 0; i < fRenderPasses.count(); ++i) {
618         int idx = (i + fLastReturnedIndex) % fRenderPasses.count();
619         if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, resolveOps, stencilOps)) {
620             fLastReturnedIndex = idx;
621             return fRenderPasses[idx];
622         }
623     }
624     GrVkRenderPass* renderPass = GrVkRenderPass::Create(gpu, *this->getCompatibleRenderPass(),
625                                                         colorOps, resolveOps, stencilOps);
626     if (!renderPass) {
627         return nullptr;
628     }
629     fRenderPasses.push_back(renderPass);
630     fLastReturnedIndex = fRenderPasses.count() - 1;
631     return renderPass;
632 }
633 
releaseResources()634 void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources() {
635     for (int i = 0; i < fRenderPasses.count(); ++i) {
636         if (fRenderPasses[i]) {
637             fRenderPasses[i]->unref();
638             fRenderPasses[i] = nullptr;
639         }
640     }
641 }
642 
643