1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
9
10 #include "include/gpu/GrDirectContext.h"
11 #include "src/core/SkTaskGroup.h"
12 #include "src/core/SkTraceEvent.h"
13 #include "src/gpu/ganesh/GrDirectContextPriv.h"
14 #include "src/gpu/ganesh/GrSamplerState.h"
15 #include "src/gpu/ganesh/GrStencilSettings.h"
16 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
17 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
18 #include "src/gpu/ganesh/vk/GrVkGpu.h"
19 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
20 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
21 #include "src/gpu/ganesh/vk/GrVkUtil.h"
22
GrVkResourceProvider(GrVkGpu * gpu)23 GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
24 : fGpu(gpu)
25 , fPipelineCache(VK_NULL_HANDLE) {
26 fPipelineStateCache = sk_make_sp<PipelineStateCache>(gpu);
27 }
28
~GrVkResourceProvider()29 GrVkResourceProvider::~GrVkResourceProvider() {
30 SkASSERT(0 == fRenderPassArray.size());
31 SkASSERT(0 == fExternalRenderPasses.size());
32 SkASSERT(0 == fMSAALoadPipelines.size());
33 SkASSERT(VK_NULL_HANDLE == fPipelineCache);
34 }
35
pipelineCache()36 VkPipelineCache GrVkResourceProvider::pipelineCache() {
37 if (fPipelineCache == VK_NULL_HANDLE) {
38 VkPipelineCacheCreateInfo createInfo;
39 memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
40 createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
41 createInfo.pNext = nullptr;
42 createInfo.flags = 0;
43
44 auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
45 sk_sp<SkData> cached;
46 if (persistentCache) {
47 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
48 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
49 cached = persistentCache->load(*keyData);
50 }
51 bool usedCached = false;
52 if (cached) {
53 uint32_t* cacheHeader = (uint32_t*)cached->data();
54 if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
55 // For version one of the header, the total header size is 16 bytes plus
56 // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
57 // the breakdown of these bytes.
58 SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
59 const VkPhysicalDeviceProperties& devProps = fGpu->physicalDeviceProperties();
60 const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
61 if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
62 !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
63 createInfo.initialDataSize = cached->size();
64 createInfo.pInitialData = cached->data();
65 usedCached = true;
66 }
67 }
68 }
69 if (!usedCached) {
70 createInfo.initialDataSize = 0;
71 createInfo.pInitialData = nullptr;
72 }
73
74 VkResult result;
75 GR_VK_CALL_RESULT(fGpu, result, CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
76 &fPipelineCache));
77 if (VK_SUCCESS != result) {
78 fPipelineCache = VK_NULL_HANDLE;
79 }
80 }
81 return fPipelineCache;
82 }
83
init()84 void GrVkResourceProvider::init() {
85 // Init uniform descriptor objects
86 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
87 fDescriptorSetManagers.emplace_back(dsm);
88 SkASSERT(1 == fDescriptorSetManagers.size());
89 fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
90 dsm = GrVkDescriptorSetManager::CreateInputManager(fGpu);
91 fDescriptorSetManagers.emplace_back(dsm);
92 SkASSERT(2 == fDescriptorSetManagers.size());
93 fInputDSHandle = GrVkDescriptorSetManager::Handle(1);
94 }
95
makePipeline(const GrProgramInfo & programInfo,VkPipelineShaderStageCreateInfo * shaderStageInfo,int shaderStageCount,VkRenderPass compatibleRenderPass,VkPipelineLayout layout,uint32_t subpass)96 sk_sp<const GrVkPipeline> GrVkResourceProvider::makePipeline(
97 const GrProgramInfo& programInfo,
98 VkPipelineShaderStageCreateInfo* shaderStageInfo,
99 int shaderStageCount,
100 VkRenderPass compatibleRenderPass,
101 VkPipelineLayout layout,
102 uint32_t subpass) {
103 return GrVkPipeline::Make(fGpu, programInfo, shaderStageInfo, shaderStageCount,
104 compatibleRenderPass, layout, this->pipelineCache(), subpass);
105 }
106
107 // To create framebuffers, we first need to create a simple RenderPass that is
108 // only used for framebuffer creation. When we actually render we will create
109 // RenderPasses as needed that are compatible with the framebuffer.
110 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderTarget * target,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)111 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderTarget* target,
112 CompatibleRPHandle* compatibleHandle,
113 bool withResolve,
114 bool withStencil,
115 SelfDependencyFlags selfDepFlags,
116 LoadFromResolve loadFromResolve) {
117 // Get attachment information from render target. This includes which attachments the render
118 // target has (color, stencil) and the attachments format and sample count.
119 GrVkRenderPass::AttachmentFlags attachmentFlags;
120 GrVkRenderPass::AttachmentsDescriptor attachmentsDesc;
121 if (!target->getAttachmentsDescriptor(&attachmentsDesc, &attachmentFlags,
122 withResolve, withStencil)) {
123 return nullptr;
124 }
125
126 return this->findCompatibleRenderPass(&attachmentsDesc, attachmentFlags, selfDepFlags,
127 loadFromResolve, compatibleHandle);
128 }
129
130 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor * desc,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve,CompatibleRPHandle * compatibleHandle)131 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor* desc,
132 GrVkRenderPass::AttachmentFlags attachmentFlags,
133 SelfDependencyFlags selfDepFlags,
134 LoadFromResolve loadFromResolve,
135 CompatibleRPHandle* compatibleHandle) {
136 for (int i = 0; i < fRenderPassArray.size(); ++i) {
137 if (fRenderPassArray[i].isCompatible(*desc, attachmentFlags, selfDepFlags,
138 loadFromResolve)) {
139 const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
140 renderPass->ref();
141 if (compatibleHandle) {
142 *compatibleHandle = CompatibleRPHandle(i);
143 }
144 return renderPass;
145 }
146 }
147
148 GrVkRenderPass* renderPass = GrVkRenderPass::CreateSimple(fGpu, desc, attachmentFlags,
149 selfDepFlags, loadFromResolve);
150 if (!renderPass) {
151 return nullptr;
152 }
153 fRenderPassArray.emplace_back(renderPass);
154
155 if (compatibleHandle) {
156 *compatibleHandle = CompatibleRPHandle(fRenderPassArray.size() - 1);
157 }
158 return renderPass;
159 }
160
findCompatibleExternalRenderPass(VkRenderPass renderPass,uint32_t colorAttachmentIndex)161 const GrVkRenderPass* GrVkResourceProvider::findCompatibleExternalRenderPass(
162 VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
163 for (int i = 0; i < fExternalRenderPasses.size(); ++i) {
164 if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
165 fExternalRenderPasses[i]->ref();
166 #ifdef SK_DEBUG
167 uint32_t cachedColorIndex;
168 SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
169 SkASSERT(cachedColorIndex == colorAttachmentIndex);
170 #endif
171 return fExternalRenderPasses[i];
172 }
173 }
174
175 const GrVkRenderPass* newRenderPass = new GrVkRenderPass(fGpu, renderPass,
176 colorAttachmentIndex);
177 fExternalRenderPasses.push_back(newRenderPass);
178 newRenderPass->ref();
179 return newRenderPass;
180 }
181
findRenderPass(GrVkRenderTarget * target,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)182 const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
183 GrVkRenderTarget* target,
184 const GrVkRenderPass::LoadStoreOps& colorOps,
185 const GrVkRenderPass::LoadStoreOps& resolveOps,
186 const GrVkRenderPass::LoadStoreOps& stencilOps,
187 CompatibleRPHandle* compatibleHandle,
188 bool withResolve,
189 bool withStencil,
190 SelfDependencyFlags selfDepFlags,
191 LoadFromResolve loadFromResolve) {
192 GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
193 GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
194 : &tempRPHandle;
195 *pRPHandle = target->compatibleRenderPassHandle(withResolve, withStencil, selfDepFlags,
196 loadFromResolve);
197 if (!pRPHandle->isValid()) {
198 return nullptr;
199 }
200
201 return this->findRenderPass(*pRPHandle, colorOps, resolveOps, stencilOps);
202 }
203
204 const GrVkRenderPass*
findRenderPass(const CompatibleRPHandle & compatibleHandle,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)205 GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
206 const GrVkRenderPass::LoadStoreOps& colorOps,
207 const GrVkRenderPass::LoadStoreOps& resolveOps,
208 const GrVkRenderPass::LoadStoreOps& stencilOps) {
209 SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.size());
210 CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
211 const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
212 colorOps,
213 resolveOps,
214 stencilOps);
215 if (!renderPass) {
216 return nullptr;
217 }
218 renderPass->ref();
219 return renderPass;
220 }
221
findOrCreateCompatibleDescriptorPool(VkDescriptorType type,uint32_t count)222 GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
223 VkDescriptorType type, uint32_t count) {
224 return GrVkDescriptorPool::Create(fGpu, type, count);
225 }
226
findOrCreateCompatibleSampler(GrSamplerState params,const GrVkYcbcrConversionInfo & ycbcrInfo)227 GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
228 GrSamplerState params, const GrVkYcbcrConversionInfo& ycbcrInfo) {
229 GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
230 if (!sampler) {
231 sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
232 if (!sampler) {
233 return nullptr;
234 }
235 fSamplers.add(sampler);
236 }
237 SkASSERT(sampler);
238 sampler->ref();
239 return sampler;
240 }
241
findOrCreateCompatibleSamplerYcbcrConversion(const GrVkYcbcrConversionInfo & ycbcrInfo)242 GrVkSamplerYcbcrConversion* GrVkResourceProvider::findOrCreateCompatibleSamplerYcbcrConversion(
243 const GrVkYcbcrConversionInfo& ycbcrInfo) {
244 GrVkSamplerYcbcrConversion* ycbcrConversion =
245 fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
246 if (!ycbcrConversion) {
247 ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
248 if (!ycbcrConversion) {
249 return nullptr;
250 }
251 fYcbcrConversions.add(ycbcrConversion);
252 }
253 SkASSERT(ycbcrConversion);
254 ycbcrConversion->ref();
255 return ycbcrConversion;
256 }
257
findOrCreateCompatiblePipelineState(GrRenderTarget * renderTarget,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,bool overrideSubpassForResolveLoad)258 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
259 GrRenderTarget* renderTarget,
260 const GrProgramInfo& programInfo,
261 VkRenderPass compatibleRenderPass,
262 bool overrideSubpassForResolveLoad) {
263 return fPipelineStateCache->findOrCreatePipelineState(renderTarget, programInfo,
264 compatibleRenderPass,
265 overrideSubpassForResolveLoad);
266 }
267
findOrCreateCompatiblePipelineState(const GrProgramDesc & desc,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,GrThreadSafePipelineBuilder::Stats::ProgramCacheResult * stat)268 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
269 const GrProgramDesc& desc,
270 const GrProgramInfo& programInfo,
271 VkRenderPass compatibleRenderPass,
272 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult* stat) {
273
274 auto tmp = fPipelineStateCache->findOrCreatePipelineState(desc, programInfo,
275 compatibleRenderPass, stat);
276 if (!tmp) {
277 fPipelineStateCache->stats()->incNumPreCompilationFailures();
278 } else {
279 fPipelineStateCache->stats()->incNumPreProgramCacheResult(*stat);
280 }
281
282 return tmp;
283 }
284
findOrCreateMSAALoadPipeline(const GrVkRenderPass & renderPass,int numSamples,VkPipelineShaderStageCreateInfo * shaderStageInfo,VkPipelineLayout pipelineLayout)285 sk_sp<const GrVkPipeline> GrVkResourceProvider::findOrCreateMSAALoadPipeline(
286 const GrVkRenderPass& renderPass,
287 int numSamples,
288 VkPipelineShaderStageCreateInfo* shaderStageInfo,
289 VkPipelineLayout pipelineLayout) {
290 // Find or Create a compatible pipeline
291 sk_sp<const GrVkPipeline> pipeline;
292 for (int i = 0; i < fMSAALoadPipelines.size() && !pipeline; ++i) {
293 if (fMSAALoadPipelines[i].fRenderPass->isCompatible(renderPass)) {
294 pipeline = fMSAALoadPipelines[i].fPipeline;
295 }
296 }
297 if (!pipeline) {
298 pipeline = GrVkPipeline::Make(
299 fGpu,
300 /*vertexAttribs=*/GrGeometryProcessor::AttributeSet(),
301 /*instanceAttribs=*/GrGeometryProcessor::AttributeSet(),
302 GrPrimitiveType::kTriangleStrip,
303 kTopLeft_GrSurfaceOrigin,
304 GrStencilSettings(),
305 numSamples,
306 /*isHWantialiasState=*/false,
307 skgpu::BlendInfo(),
308 /*isWireframe=*/false,
309 /*useConservativeRaster=*/false,
310 /*subpass=*/0,
311 shaderStageInfo,
312 /*shaderStageCount=*/2,
313 renderPass.vkRenderPass(),
314 pipelineLayout,
315 /*ownsLayout=*/false,
316 this->pipelineCache());
317 if (!pipeline) {
318 return nullptr;
319 }
320 fMSAALoadPipelines.push_back({pipeline, &renderPass});
321 }
322 SkASSERT(pipeline);
323 return pipeline;
324 }
325
getZeroSamplerDescriptorSetHandle(GrVkDescriptorSetManager::Handle * handle)326 void GrVkResourceProvider::getZeroSamplerDescriptorSetHandle(
327 GrVkDescriptorSetManager::Handle* handle) {
328 SkASSERT(handle);
329 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
330 if (fDescriptorSetManagers[i]->isZeroSampler()) {
331 *handle = GrVkDescriptorSetManager::Handle(i);
332 return;
333 }
334 }
335
336 GrVkDescriptorSetManager* dsm =
337 GrVkDescriptorSetManager::CreateZeroSamplerManager(fGpu);
338 fDescriptorSetManagers.emplace_back(dsm);
339 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.size() - 1);
340 }
341
getSamplerDescriptorSetHandle(VkDescriptorType type,const GrVkUniformHandler & uniformHandler,GrVkDescriptorSetManager::Handle * handle)342 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
343 const GrVkUniformHandler& uniformHandler,
344 GrVkDescriptorSetManager::Handle* handle) {
345 SkASSERT(handle);
346 SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
347 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
348 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
349 if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
350 *handle = GrVkDescriptorSetManager::Handle(i);
351 return;
352 }
353 }
354
355 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
356 uniformHandler);
357 fDescriptorSetManagers.emplace_back(dsm);
358 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.size() - 1);
359 }
360
getUniformDSLayout() const361 VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
362 SkASSERT(fUniformDSHandle.isValid());
363 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
364 }
365
getInputDSLayout() const366 VkDescriptorSetLayout GrVkResourceProvider::getInputDSLayout() const {
367 SkASSERT(fInputDSHandle.isValid());
368 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->layout();
369 }
370
getSamplerDSLayout(const GrVkDescriptorSetManager::Handle & handle) const371 VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
372 const GrVkDescriptorSetManager::Handle& handle) const {
373 SkASSERT(handle.isValid());
374 return fDescriptorSetManagers[handle.toIndex()]->layout();
375 }
376
getUniformDescriptorSet()377 const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
378 SkASSERT(fUniformDSHandle.isValid());
379 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
380 fUniformDSHandle);
381 }
382
getInputDescriptorSet()383 const GrVkDescriptorSet* GrVkResourceProvider::getInputDescriptorSet() {
384 SkASSERT(fInputDSHandle.isValid());
385 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->getDescriptorSet(fGpu, fInputDSHandle);
386 }
387
getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle & handle)388 const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
389 const GrVkDescriptorSetManager::Handle& handle) {
390 SkASSERT(handle.isValid());
391 return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
392 }
393
recycleDescriptorSet(const GrVkDescriptorSet * descSet,const GrVkDescriptorSetManager::Handle & handle)394 void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
395 const GrVkDescriptorSetManager::Handle& handle) {
396 SkASSERT(descSet);
397 SkASSERT(handle.isValid());
398 int managerIdx = handle.toIndex();
399 SkASSERT(managerIdx < fDescriptorSetManagers.size());
400 fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
401 }
402
findOrCreateCommandPool()403 GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
404 GrVkCommandPool* result;
405 if (fAvailableCommandPools.size()) {
406 result = fAvailableCommandPools.back();
407 fAvailableCommandPools.pop_back();
408 } else {
409 result = GrVkCommandPool::Create(fGpu);
410 if (!result) {
411 return nullptr;
412 }
413 }
414 SkASSERT(result->unique());
415 SkDEBUGCODE(
416 for (const GrVkCommandPool* pool : fActiveCommandPools) {
417 SkASSERT(pool != result);
418 }
419 for (const GrVkCommandPool* pool : fAvailableCommandPools) {
420 SkASSERT(pool != result);
421 }
422 )
423 fActiveCommandPools.push_back(result);
424 result->ref();
425 return result;
426 }
427
checkCommandBuffers()428 void GrVkResourceProvider::checkCommandBuffers() {
429 // When resetting a command buffer it can trigger client provided procs (e.g. release or
430 // finished) to be called. During these calls the client could trigger us to abandon the vk
431 // context, e.g. if we are in a DEVICE_LOST state. When we abandon the vk context we will
432 // unref all the fActiveCommandPools and reset the array. Since this can happen in the middle
433 // of the loop here, we need to additionally check that fActiveCommandPools still has pools on
434 // each iteration.
435 //
436 // TODO: We really need to have a more robust way to protect us from client proc calls that
437 // happen in the middle of us doing work. This may be just one of many potential pitfalls that
438 // could happen from the client triggering GrDirectContext changes during a proc call.
439 for (int i = fActiveCommandPools.size() - 1; fActiveCommandPools.size() && i >= 0; --i) {
440 GrVkCommandPool* pool = fActiveCommandPools[i];
441 if (!pool->isOpen()) {
442 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
443 if (buffer->finished(fGpu)) {
444 fActiveCommandPools.removeShuffle(i);
445 SkASSERT(pool->unique());
446 pool->reset(fGpu);
447 // After resetting the pool (specifically releasing the pool's resources) we may
448 // have called a client callback proc which may have disconnected the GrVkGpu. In
449 // that case we do not want to push the pool back onto the cache, but instead just
450 // drop the pool.
451 if (fGpu->disconnected()) {
452 pool->unref();
453 return;
454 }
455 fAvailableCommandPools.push_back(pool);
456 }
457 }
458 }
459 }
460
forceSyncAllCommandBuffers()461 void GrVkResourceProvider::forceSyncAllCommandBuffers() {
462 for (int i = fActiveCommandPools.size() - 1; fActiveCommandPools.size() && i >= 0; --i) {
463 GrVkCommandPool* pool = fActiveCommandPools[i];
464 if (!pool->isOpen()) {
465 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
466 buffer->forceSync(fGpu);
467 }
468 }
469 }
470
addFinishedProcToActiveCommandBuffers(sk_sp<skgpu::RefCntedCallback> finishedCallback)471 void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
472 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
473 for (int i = 0; i < fActiveCommandPools.size(); ++i) {
474 GrVkCommandPool* pool = fActiveCommandPools[i];
475 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
476 buffer->addFinishedProc(finishedCallback);
477 }
478 }
479
destroyResources()480 void GrVkResourceProvider::destroyResources() {
481 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
482 if (taskGroup) {
483 taskGroup->wait();
484 }
485
486 // Release all msaa load pipelines
487 fMSAALoadPipelines.clear();
488
489 // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
490 for (int i = 0; i < fRenderPassArray.size(); ++i) {
491 fRenderPassArray[i].releaseResources();
492 }
493 fRenderPassArray.clear();
494
495 for (int i = 0; i < fExternalRenderPasses.size(); ++i) {
496 fExternalRenderPasses[i]->unref();
497 }
498 fExternalRenderPasses.clear();
499
500 // Iterate through all store GrVkSamplers and unref them before resetting the hash table.
501 fSamplers.foreach([&](auto* elt) { elt->unref(); });
502 fSamplers.reset();
503
504 fYcbcrConversions.foreach([&](auto* elt) { elt->unref(); });
505 fYcbcrConversions.reset();
506
507 fPipelineStateCache->release();
508
509 GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
510 fPipelineCache = VK_NULL_HANDLE;
511
512 for (GrVkCommandPool* pool : fActiveCommandPools) {
513 SkASSERT(pool->unique());
514 pool->unref();
515 }
516 fActiveCommandPools.clear();
517
518 for (GrVkCommandPool* pool : fAvailableCommandPools) {
519 SkASSERT(pool->unique());
520 pool->unref();
521 }
522 fAvailableCommandPools.clear();
523
524 // We must release/destroy all command buffers and pipeline states before releasing the
525 // GrVkDescriptorSetManagers. Additionally, we must release all uniform buffers since they hold
526 // refs to GrVkDescriptorSets.
527 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
528 fDescriptorSetManagers[i]->release(fGpu);
529 }
530 fDescriptorSetManagers.clear();
531
532 }
533
releaseUnlockedBackendObjects()534 void GrVkResourceProvider::releaseUnlockedBackendObjects() {
535 for (GrVkCommandPool* pool : fAvailableCommandPools) {
536 SkASSERT(pool->unique());
537 pool->unref();
538 }
539 fAvailableCommandPools.clear();
540 }
541
storePipelineCacheData()542 void GrVkResourceProvider::storePipelineCacheData() {
543 if (this->pipelineCache() == VK_NULL_HANDLE) {
544 return;
545 }
546 size_t dataSize = 0;
547 VkResult result;
548 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
549 &dataSize, nullptr));
550 if (result != VK_SUCCESS) {
551 return;
552 }
553
554 std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
555
556 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
557 &dataSize, (void*)data.get()));
558 if (result != VK_SUCCESS) {
559 return;
560 }
561
562 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
563 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
564
565 fGpu->getContext()->priv().getPersistentCache()->store(
566 *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize), SkString("VkPipelineCache"));
567 }
568
569 ////////////////////////////////////////////////////////////////////////////////
570
CompatibleRenderPassSet(GrVkRenderPass * renderPass)571 GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(GrVkRenderPass* renderPass)
572 : fLastReturnedIndex(0) {
573 renderPass->ref();
574 fRenderPasses.push_back(renderPass);
575 }
576
isCompatible(const GrVkRenderPass::AttachmentsDescriptor & attachmentsDescriptor,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve) const577 bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
578 const GrVkRenderPass::AttachmentsDescriptor& attachmentsDescriptor,
579 GrVkRenderPass::AttachmentFlags attachmentFlags,
580 SelfDependencyFlags selfDepFlags,
581 LoadFromResolve loadFromResolve) const {
582 // The first GrVkRenderpass should always exists since we create the basic load store
583 // render pass on create
584 SkASSERT(fRenderPasses[0]);
585 return fRenderPasses[0]->isCompatible(attachmentsDescriptor, attachmentFlags, selfDepFlags,
586 loadFromResolve);
587 }
588
getRenderPass(GrVkGpu * gpu,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)589 GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
590 GrVkGpu* gpu,
591 const GrVkRenderPass::LoadStoreOps& colorOps,
592 const GrVkRenderPass::LoadStoreOps& resolveOps,
593 const GrVkRenderPass::LoadStoreOps& stencilOps) {
594 for (int i = 0; i < fRenderPasses.size(); ++i) {
595 int idx = (i + fLastReturnedIndex) % fRenderPasses.size();
596 if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, resolveOps, stencilOps)) {
597 fLastReturnedIndex = idx;
598 return fRenderPasses[idx];
599 }
600 }
601 GrVkRenderPass* renderPass = GrVkRenderPass::Create(gpu, *this->getCompatibleRenderPass(),
602 colorOps, resolveOps, stencilOps);
603 if (!renderPass) {
604 return nullptr;
605 }
606 fRenderPasses.push_back(renderPass);
607 fLastReturnedIndex = fRenderPasses.size() - 1;
608 return renderPass;
609 }
610
releaseResources()611 void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources() {
612 for (int i = 0; i < fRenderPasses.size(); ++i) {
613 if (fRenderPasses[i]) {
614 fRenderPasses[i]->unref();
615 fRenderPasses[i] = nullptr;
616 }
617 }
618 }
619
620