1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
9
10 #include "include/core/SkData.h"
11 #include "include/core/SkString.h"
12 #include "include/gpu/ganesh/GrDirectContext.h"
13 #include "include/gpu/ganesh/GrTypes.h"
14 #include "include/private/base/SkDebug.h"
15 #include "include/private/gpu/ganesh/GrTypesPriv.h"
16 #include "src/core/SkTaskGroup.h"
17 #include "src/core/SkTraceEvent.h"
18 #include "src/gpu/Blend.h"
19 #include "src/gpu/RefCntedCallback.h"
20 #include "src/gpu/ganesh/GrDirectContextPriv.h"
21 #include "src/gpu/ganesh/GrGeometryProcessor.h"
22 #include "src/gpu/ganesh/GrSamplerState.h"
23 #include "src/gpu/ganesh/GrStencilSettings.h"
24 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
25 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
26 #include "src/gpu/ganesh/vk/GrVkDescriptorPool.h"
27 #include "src/gpu/ganesh/vk/GrVkGpu.h"
28 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
29 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
30 #include "src/gpu/ganesh/vk/GrVkUtil.h"
31
32 #include <cstring>
33
34 class GrProgramInfo;
35 class GrRenderTarget;
36 class GrVkDescriptorSet;
37
GrVkResourceProvider(GrVkGpu * gpu)38 GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
39 : fGpu(gpu)
40 , fPipelineCache(VK_NULL_HANDLE)
41 , fPipelineCacheSize(0) {
42 fPipelineStateCache = sk_make_sp<PipelineStateCache>(gpu);
43 }
44
~GrVkResourceProvider()45 GrVkResourceProvider::~GrVkResourceProvider() {
46 SkASSERT(fRenderPassArray.empty());
47 SkASSERT(fExternalRenderPasses.empty());
48 SkASSERT(fMSAALoadPipelines.empty());
49 SkASSERT(VK_NULL_HANDLE == fPipelineCache);
50 }
51
pipelineCache()52 VkPipelineCache GrVkResourceProvider::pipelineCache() {
53 if (fPipelineCache == VK_NULL_HANDLE) {
54 TRACE_EVENT0("skia.shaders", "CreatePipelineCache-GrVkResourceProvider");
55 VkPipelineCacheCreateInfo createInfo;
56 memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
57 createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
58 createInfo.pNext = nullptr;
59 createInfo.flags = 0;
60
61 auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
62 sk_sp<SkData> cached;
63 if (persistentCache) {
64 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
65 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
66 cached = persistentCache->load(*keyData);
67 }
68 bool usedCached = false;
69 if (cached) {
70 const uint32_t* cacheHeader = (const uint32_t*)cached->data();
71 if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
72 // For version one of the header, the total header size is 16 bytes plus
73 // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
74 // the breakdown of these bytes.
75 SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
76 const VkPhysicalDeviceProperties& devProps = fGpu->physicalDeviceProperties();
77 const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
78 if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
79 !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
80 createInfo.initialDataSize = cached->size();
81 createInfo.pInitialData = cached->data();
82 usedCached = true;
83 }
84 }
85 }
86 if (!usedCached) {
87 createInfo.initialDataSize = 0;
88 createInfo.pInitialData = nullptr;
89 }
90
91 VkResult result;
92 GR_VK_CALL_RESULT(fGpu, result, CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
93 &fPipelineCache));
94 if (VK_SUCCESS != result) {
95 fPipelineCache = VK_NULL_HANDLE;
96 }
97 }
98 return fPipelineCache;
99 }
100
init()101 void GrVkResourceProvider::init() {
102 // Init uniform descriptor objects
103 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
104 fDescriptorSetManagers.emplace_back(dsm);
105 SkASSERT(1 == fDescriptorSetManagers.size());
106 fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
107 dsm = GrVkDescriptorSetManager::CreateInputManager(fGpu);
108 fDescriptorSetManagers.emplace_back(dsm);
109 SkASSERT(2 == fDescriptorSetManagers.size());
110 fInputDSHandle = GrVkDescriptorSetManager::Handle(1);
111 }
112
makePipeline(const GrProgramInfo & programInfo,VkPipelineShaderStageCreateInfo * shaderStageInfo,int shaderStageCount,VkRenderPass compatibleRenderPass,VkPipelineLayout layout,uint32_t subpass)113 sk_sp<const GrVkPipeline> GrVkResourceProvider::makePipeline(
114 const GrProgramInfo& programInfo,
115 VkPipelineShaderStageCreateInfo* shaderStageInfo,
116 int shaderStageCount,
117 VkRenderPass compatibleRenderPass,
118 VkPipelineLayout layout,
119 uint32_t subpass) {
120 return GrVkPipeline::Make(fGpu, programInfo, shaderStageInfo, shaderStageCount,
121 compatibleRenderPass, layout, this->pipelineCache(), subpass);
122 }
123
124 // To create framebuffers, we first need to create a simple RenderPass that is
125 // only used for framebuffer creation. When we actually render we will create
126 // RenderPasses as needed that are compatible with the framebuffer.
127 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderTarget * target,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)128 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderTarget* target,
129 CompatibleRPHandle* compatibleHandle,
130 bool withResolve,
131 bool withStencil,
132 SelfDependencyFlags selfDepFlags,
133 LoadFromResolve loadFromResolve) {
134 // Get attachment information from render target. This includes which attachments the render
135 // target has (color, stencil) and the attachments format and sample count.
136 GrVkRenderPass::AttachmentFlags attachmentFlags;
137 GrVkRenderPass::AttachmentsDescriptor attachmentsDesc;
138 if (!target->getAttachmentsDescriptor(&attachmentsDesc, &attachmentFlags,
139 withResolve, withStencil)) {
140 return nullptr;
141 }
142
143 return this->findCompatibleRenderPass(&attachmentsDesc, attachmentFlags, selfDepFlags,
144 loadFromResolve, compatibleHandle);
145 }
146
147 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor * desc,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve,CompatibleRPHandle * compatibleHandle)148 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor* desc,
149 GrVkRenderPass::AttachmentFlags attachmentFlags,
150 SelfDependencyFlags selfDepFlags,
151 LoadFromResolve loadFromResolve,
152 CompatibleRPHandle* compatibleHandle) {
153 for (int i = 0; i < fRenderPassArray.size(); ++i) {
154 if (fRenderPassArray[i].isCompatible(*desc, attachmentFlags, selfDepFlags,
155 loadFromResolve)) {
156 const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
157 renderPass->ref();
158 if (compatibleHandle) {
159 *compatibleHandle = CompatibleRPHandle(i);
160 }
161 return renderPass;
162 }
163 }
164
165 GrVkRenderPass* renderPass = GrVkRenderPass::CreateSimple(fGpu, desc, attachmentFlags,
166 selfDepFlags, loadFromResolve);
167 if (!renderPass) {
168 return nullptr;
169 }
170 fRenderPassArray.emplace_back(renderPass);
171
172 if (compatibleHandle) {
173 *compatibleHandle = CompatibleRPHandle(fRenderPassArray.size() - 1);
174 }
175 return renderPass;
176 }
177
findCompatibleExternalRenderPass(VkRenderPass renderPass,uint32_t colorAttachmentIndex)178 const GrVkRenderPass* GrVkResourceProvider::findCompatibleExternalRenderPass(
179 VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
180 for (int i = 0; i < fExternalRenderPasses.size(); ++i) {
181 if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
182 fExternalRenderPasses[i]->ref();
183 #ifdef SK_DEBUG
184 uint32_t cachedColorIndex;
185 SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
186 SkASSERT(cachedColorIndex == colorAttachmentIndex);
187 #endif
188 return fExternalRenderPasses[i];
189 }
190 }
191
192 const GrVkRenderPass* newRenderPass = new GrVkRenderPass(fGpu, renderPass,
193 colorAttachmentIndex);
194 fExternalRenderPasses.push_back(newRenderPass);
195 newRenderPass->ref();
196 return newRenderPass;
197 }
198
findRenderPass(GrVkRenderTarget * target,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)199 const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
200 GrVkRenderTarget* target,
201 const GrVkRenderPass::LoadStoreOps& colorOps,
202 const GrVkRenderPass::LoadStoreOps& resolveOps,
203 const GrVkRenderPass::LoadStoreOps& stencilOps,
204 CompatibleRPHandle* compatibleHandle,
205 bool withResolve,
206 bool withStencil,
207 SelfDependencyFlags selfDepFlags,
208 LoadFromResolve loadFromResolve) {
209 GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
210 GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
211 : &tempRPHandle;
212 *pRPHandle = target->compatibleRenderPassHandle(withResolve, withStencil, selfDepFlags,
213 loadFromResolve);
214 if (!pRPHandle->isValid()) {
215 return nullptr;
216 }
217
218 return this->findRenderPass(*pRPHandle, colorOps, resolveOps, stencilOps);
219 }
220
221 const GrVkRenderPass*
findRenderPass(const CompatibleRPHandle & compatibleHandle,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)222 GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
223 const GrVkRenderPass::LoadStoreOps& colorOps,
224 const GrVkRenderPass::LoadStoreOps& resolveOps,
225 const GrVkRenderPass::LoadStoreOps& stencilOps) {
226 SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.size());
227 CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
228 const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
229 colorOps,
230 resolveOps,
231 stencilOps);
232 if (!renderPass) {
233 return nullptr;
234 }
235 renderPass->ref();
236 return renderPass;
237 }
238
findOrCreateCompatibleDescriptorPool(VkDescriptorType type,uint32_t count)239 GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
240 VkDescriptorType type, uint32_t count) {
241 return GrVkDescriptorPool::Create(fGpu, type, count);
242 }
243
findOrCreateCompatibleSampler(GrSamplerState params,const skgpu::VulkanYcbcrConversionInfo & ycbcrInfo)244 GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
245 GrSamplerState params, const skgpu::VulkanYcbcrConversionInfo& ycbcrInfo) {
246 GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
247 if (!sampler) {
248 sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
249 if (!sampler) {
250 return nullptr;
251 }
252 fSamplers.add(sampler);
253 }
254 SkASSERT(sampler);
255 sampler->ref();
256 return sampler;
257 }
258
findOrCreateCompatibleSamplerYcbcrConversion(const skgpu::VulkanYcbcrConversionInfo & ycbcrInfo)259 GrVkSamplerYcbcrConversion* GrVkResourceProvider::findOrCreateCompatibleSamplerYcbcrConversion(
260 const skgpu::VulkanYcbcrConversionInfo& ycbcrInfo) {
261 GrVkSamplerYcbcrConversion* ycbcrConversion =
262 fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
263 if (!ycbcrConversion) {
264 ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
265 if (!ycbcrConversion) {
266 return nullptr;
267 }
268 fYcbcrConversions.add(ycbcrConversion);
269 }
270 SkASSERT(ycbcrConversion);
271 ycbcrConversion->ref();
272 return ycbcrConversion;
273 }
274
findOrCreateCompatiblePipelineState(GrRenderTarget * renderTarget,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,bool overrideSubpassForResolveLoad)275 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
276 GrRenderTarget* renderTarget,
277 const GrProgramInfo& programInfo,
278 VkRenderPass compatibleRenderPass,
279 bool overrideSubpassForResolveLoad) {
280 return fPipelineStateCache->findOrCreatePipelineState(renderTarget, programInfo,
281 compatibleRenderPass,
282 overrideSubpassForResolveLoad);
283 }
284
findOrCreateCompatiblePipelineState(const GrProgramDesc & desc,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,GrThreadSafePipelineBuilder::Stats::ProgramCacheResult * stat)285 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
286 const GrProgramDesc& desc,
287 const GrProgramInfo& programInfo,
288 VkRenderPass compatibleRenderPass,
289 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult* stat) {
290
291 auto tmp = fPipelineStateCache->findOrCreatePipelineState(desc, programInfo,
292 compatibleRenderPass, stat);
293 if (!tmp) {
294 fPipelineStateCache->stats()->incNumPreCompilationFailures();
295 } else {
296 fPipelineStateCache->stats()->incNumPreProgramCacheResult(*stat);
297 }
298
299 return tmp;
300 }
301
findOrCreateMSAALoadPipeline(const GrVkRenderPass & renderPass,int numSamples,VkPipelineShaderStageCreateInfo * shaderStageInfo,VkPipelineLayout pipelineLayout)302 sk_sp<const GrVkPipeline> GrVkResourceProvider::findOrCreateMSAALoadPipeline(
303 const GrVkRenderPass& renderPass,
304 int numSamples,
305 VkPipelineShaderStageCreateInfo* shaderStageInfo,
306 VkPipelineLayout pipelineLayout) {
307 // Find or Create a compatible pipeline
308 sk_sp<const GrVkPipeline> pipeline;
309 for (int i = 0; i < fMSAALoadPipelines.size() && !pipeline; ++i) {
310 if (fMSAALoadPipelines[i].fRenderPass->isCompatible(renderPass)) {
311 pipeline = fMSAALoadPipelines[i].fPipeline;
312 }
313 }
314 if (!pipeline) {
315 pipeline = GrVkPipeline::Make(
316 fGpu,
317 /*vertexAttribs=*/GrGeometryProcessor::AttributeSet(),
318 /*instanceAttribs=*/GrGeometryProcessor::AttributeSet(),
319 GrPrimitiveType::kTriangleStrip,
320 kTopLeft_GrSurfaceOrigin,
321 GrStencilSettings(),
322 numSamples,
323 /*isHWantialiasState=*/false,
324 skgpu::BlendInfo(),
325 /*isWireframe=*/false,
326 /*useConservativeRaster=*/false,
327 /*subpass=*/0,
328 shaderStageInfo,
329 /*shaderStageCount=*/2,
330 renderPass.vkRenderPass(),
331 pipelineLayout,
332 /*ownsLayout=*/false,
333 this->pipelineCache());
334 if (!pipeline) {
335 return nullptr;
336 }
337 fMSAALoadPipelines.push_back({pipeline, &renderPass});
338 }
339 SkASSERT(pipeline);
340 return pipeline;
341 }
342
getZeroSamplerDescriptorSetHandle(GrVkDescriptorSetManager::Handle * handle)343 void GrVkResourceProvider::getZeroSamplerDescriptorSetHandle(
344 GrVkDescriptorSetManager::Handle* handle) {
345 SkASSERT(handle);
346 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
347 if (fDescriptorSetManagers[i]->isZeroSampler()) {
348 *handle = GrVkDescriptorSetManager::Handle(i);
349 return;
350 }
351 }
352
353 GrVkDescriptorSetManager* dsm =
354 GrVkDescriptorSetManager::CreateZeroSamplerManager(fGpu);
355 fDescriptorSetManagers.emplace_back(dsm);
356 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.size() - 1);
357 }
358
getSamplerDescriptorSetHandle(VkDescriptorType type,const GrVkUniformHandler & uniformHandler,GrVkDescriptorSetManager::Handle * handle)359 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
360 const GrVkUniformHandler& uniformHandler,
361 GrVkDescriptorSetManager::Handle* handle) {
362 SkASSERT(handle);
363 SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
364 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
365 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
366 if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
367 *handle = GrVkDescriptorSetManager::Handle(i);
368 return;
369 }
370 }
371
372 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
373 uniformHandler);
374 fDescriptorSetManagers.emplace_back(dsm);
375 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.size() - 1);
376 }
377
getUniformDSLayout() const378 VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
379 SkASSERT(fUniformDSHandle.isValid());
380 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
381 }
382
getInputDSLayout() const383 VkDescriptorSetLayout GrVkResourceProvider::getInputDSLayout() const {
384 SkASSERT(fInputDSHandle.isValid());
385 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->layout();
386 }
387
getSamplerDSLayout(const GrVkDescriptorSetManager::Handle & handle) const388 VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
389 const GrVkDescriptorSetManager::Handle& handle) const {
390 SkASSERT(handle.isValid());
391 return fDescriptorSetManagers[handle.toIndex()]->layout();
392 }
393
getUniformDescriptorSet()394 const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
395 SkASSERT(fUniformDSHandle.isValid());
396 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
397 fUniformDSHandle);
398 }
399
getInputDescriptorSet()400 const GrVkDescriptorSet* GrVkResourceProvider::getInputDescriptorSet() {
401 SkASSERT(fInputDSHandle.isValid());
402 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->getDescriptorSet(fGpu, fInputDSHandle);
403 }
404
getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle & handle)405 const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
406 const GrVkDescriptorSetManager::Handle& handle) {
407 SkASSERT(handle.isValid());
408 return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
409 }
410
recycleDescriptorSet(const GrVkDescriptorSet * descSet,const GrVkDescriptorSetManager::Handle & handle)411 void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
412 const GrVkDescriptorSetManager::Handle& handle) {
413 SkASSERT(descSet);
414 SkASSERT(handle.isValid());
415 int managerIdx = handle.toIndex();
416 SkASSERT(managerIdx < fDescriptorSetManagers.size());
417 fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
418 }
419
findOrCreateCommandPool()420 GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
421 GrVkCommandPool* result;
422 if (!fAvailableCommandPools.empty()) {
423 result = fAvailableCommandPools.back();
424 fAvailableCommandPools.pop_back();
425 } else {
426 result = GrVkCommandPool::Create(fGpu);
427 if (!result) {
428 return nullptr;
429 }
430 }
431 SkASSERT(result->unique());
432 SkDEBUGCODE(
433 for (const GrVkCommandPool* pool : fActiveCommandPools) {
434 SkASSERT(pool != result);
435 }
436 for (const GrVkCommandPool* pool : fAvailableCommandPools) {
437 SkASSERT(pool != result);
438 }
439 )
440 fActiveCommandPools.push_back(result);
441 result->ref();
442 return result;
443 }
444
checkCommandBuffers()445 void GrVkResourceProvider::checkCommandBuffers() {
446 // When resetting a command buffer it can trigger client provided procs (e.g. release or
447 // finished) to be called. During these calls the client could trigger us to abandon the vk
448 // context, e.g. if we are in a DEVICE_LOST state. When we abandon the vk context we will
449 // unref all the fActiveCommandPools and reset the array. Since this can happen in the middle
450 // of the loop here, we need to additionally check that fActiveCommandPools still has pools on
451 // each iteration.
452 //
453 // TODO: We really need to have a more robust way to protect us from client proc calls that
454 // happen in the middle of us doing work. This may be just one of many potential pitfalls that
455 // could happen from the client triggering GrDirectContext changes during a proc call.
456 for (int i = fActiveCommandPools.size() - 1; !fActiveCommandPools.empty() && i >= 0; --i) {
457 GrVkCommandPool* pool = fActiveCommandPools[i];
458 if (!pool->isOpen()) {
459 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
460 if (buffer->finished(fGpu)) {
461 fActiveCommandPools.removeShuffle(i);
462 SkASSERT(pool->unique());
463 pool->reset(fGpu);
464 // After resetting the pool (specifically releasing the pool's resources) we may
465 // have called a client callback proc which may have disconnected the GrVkGpu. In
466 // that case we do not want to push the pool back onto the cache, but instead just
467 // drop the pool.
468 if (fGpu->disconnected()) {
469 pool->unref();
470 return;
471 }
472 fAvailableCommandPools.push_back(pool);
473 }
474 }
475 }
476 }
477
forceSyncAllCommandBuffers()478 void GrVkResourceProvider::forceSyncAllCommandBuffers() {
479 for (int i = fActiveCommandPools.size() - 1; !fActiveCommandPools.empty() && i >= 0; --i) {
480 GrVkCommandPool* pool = fActiveCommandPools[i];
481 if (!pool->isOpen()) {
482 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
483 buffer->forceSync(fGpu);
484 }
485 }
486 }
487
addFinishedProcToActiveCommandBuffers(sk_sp<skgpu::RefCntedCallback> finishedCallback)488 void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
489 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
490 for (int i = 0; i < fActiveCommandPools.size(); ++i) {
491 GrVkCommandPool* pool = fActiveCommandPools[i];
492 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
493 buffer->addFinishedProc(finishedCallback);
494 }
495 }
496
destroyResources()497 void GrVkResourceProvider::destroyResources() {
498 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
499 if (taskGroup) {
500 taskGroup->wait();
501 }
502
503 // Release all msaa load pipelines
504 fMSAALoadPipelines.clear();
505
506 // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
507 for (int i = 0; i < fRenderPassArray.size(); ++i) {
508 fRenderPassArray[i].releaseResources();
509 }
510 fRenderPassArray.clear();
511
512 for (int i = 0; i < fExternalRenderPasses.size(); ++i) {
513 fExternalRenderPasses[i]->unref();
514 }
515 fExternalRenderPasses.clear();
516
517 // Iterate through all store GrVkSamplers and unref them before resetting the hash table.
518 fSamplers.foreach([&](auto* elt) { elt->unref(); });
519 fSamplers.reset();
520
521 fYcbcrConversions.foreach([&](auto* elt) { elt->unref(); });
522 fYcbcrConversions.reset();
523
524 fPipelineStateCache->release();
525
526 GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
527 fPipelineCache = VK_NULL_HANDLE;
528
529 for (GrVkCommandPool* pool : fActiveCommandPools) {
530 SkASSERT(pool->unique());
531 pool->unref();
532 }
533 fActiveCommandPools.clear();
534
535 for (GrVkCommandPool* pool : fAvailableCommandPools) {
536 SkASSERT(pool->unique());
537 pool->unref();
538 }
539 fAvailableCommandPools.clear();
540
541 // We must release/destroy all command buffers and pipeline states before releasing the
542 // GrVkDescriptorSetManagers. Additionally, we must release all uniform buffers since they hold
543 // refs to GrVkDescriptorSets.
544 for (int i = 0; i < fDescriptorSetManagers.size(); ++i) {
545 fDescriptorSetManagers[i]->release(fGpu);
546 }
547 fDescriptorSetManagers.clear();
548
549 }
550
releaseUnlockedBackendObjects()551 void GrVkResourceProvider::releaseUnlockedBackendObjects() {
552 for (GrVkCommandPool* pool : fAvailableCommandPools) {
553 SkASSERT(pool->unique());
554 pool->unref();
555 }
556 fAvailableCommandPools.clear();
557 }
558
storePipelineCacheData()559 void GrVkResourceProvider::storePipelineCacheData() {
560 if (this->pipelineCache() == VK_NULL_HANDLE) {
561 return;
562 }
563 size_t dataSize = 0;
564 VkResult result;
565 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
566 &dataSize, nullptr));
567 if (result != VK_SUCCESS) {
568 return;
569 }
570
571 // store VkPipelineCache when cache size update
572 if (dataSize == fPipelineCacheSize) {
573 return;
574 }
575
576 std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
577
578 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
579 &dataSize, (void*)data.get()));
580 if (result != VK_SUCCESS) {
581 return;
582 }
583
584 fPipelineCacheSize = dataSize;
585 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
586 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
587
588 SkDebugf("store vkPipelineCache, data size:%zu", fPipelineCacheSize);
589 fGpu->getContext()->priv().getPersistentCache()->store(
590 *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize), SkString("VkPipelineCache"));
591 }
592
593 ////////////////////////////////////////////////////////////////////////////////
594
CompatibleRenderPassSet(GrVkRenderPass * renderPass)595 GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(GrVkRenderPass* renderPass)
596 : fLastReturnedIndex(0) {
597 renderPass->ref();
598 fRenderPasses.push_back(renderPass);
599 }
600
isCompatible(const GrVkRenderPass::AttachmentsDescriptor & attachmentsDescriptor,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve) const601 bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
602 const GrVkRenderPass::AttachmentsDescriptor& attachmentsDescriptor,
603 GrVkRenderPass::AttachmentFlags attachmentFlags,
604 SelfDependencyFlags selfDepFlags,
605 LoadFromResolve loadFromResolve) const {
606 // The first GrVkRenderpass should always exists since we create the basic load store
607 // render pass on create
608 SkASSERT(fRenderPasses[0]);
609 return fRenderPasses[0]->isCompatible(attachmentsDescriptor, attachmentFlags, selfDepFlags,
610 loadFromResolve);
611 }
612
getRenderPass(GrVkGpu * gpu,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)613 GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
614 GrVkGpu* gpu,
615 const GrVkRenderPass::LoadStoreOps& colorOps,
616 const GrVkRenderPass::LoadStoreOps& resolveOps,
617 const GrVkRenderPass::LoadStoreOps& stencilOps) {
618 for (int i = 0; i < fRenderPasses.size(); ++i) {
619 int idx = (i + fLastReturnedIndex) % fRenderPasses.size();
620 if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, resolveOps, stencilOps)) {
621 fLastReturnedIndex = idx;
622 return fRenderPasses[idx];
623 }
624 }
625 GrVkRenderPass* renderPass = GrVkRenderPass::Create(gpu, *this->getCompatibleRenderPass(),
626 colorOps, resolveOps, stencilOps);
627 if (!renderPass) {
628 return nullptr;
629 }
630 fRenderPasses.push_back(renderPass);
631 fLastReturnedIndex = fRenderPasses.size() - 1;
632 return renderPass;
633 }
634
releaseResources()635 void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources() {
636 for (int i = 0; i < fRenderPasses.size(); ++i) {
637 if (fRenderPasses[i]) {
638 fRenderPasses[i]->unref();
639 fRenderPasses[i] = nullptr;
640 }
641 }
642 }
643
644