1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkResourceProvider.h"
9
10 #include "include/gpu/GrDirectContext.h"
11 #include "src/core/SkTaskGroup.h"
12 #include "src/core/SkTraceEvent.h"
13 #include "src/gpu/GrDirectContextPriv.h"
14 #include "src/gpu/GrSamplerState.h"
15 #include "src/gpu/GrStencilSettings.h"
16 #include "src/gpu/vk/GrVkCommandBuffer.h"
17 #include "src/gpu/vk/GrVkCommandPool.h"
18 #include "src/gpu/vk/GrVkGpu.h"
19 #include "src/gpu/vk/GrVkPipeline.h"
20 #include "src/gpu/vk/GrVkRenderTarget.h"
21 #include "src/gpu/vk/GrVkUtil.h"
22
GrVkResourceProvider(GrVkGpu * gpu)23 GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
24 : fGpu(gpu)
25 , fPipelineCache(VK_NULL_HANDLE)
26 , fPipelineCacheSize(0) {
27 fPipelineStateCache = sk_make_sp<PipelineStateCache>(gpu);
28 }
29
~GrVkResourceProvider()30 GrVkResourceProvider::~GrVkResourceProvider() {
31 SkASSERT(0 == fRenderPassArray.count());
32 SkASSERT(0 == fExternalRenderPasses.count());
33 SkASSERT(0 == fMSAALoadPipelines.count());
34 SkASSERT(VK_NULL_HANDLE == fPipelineCache);
35 }
36
pipelineCache()37 VkPipelineCache GrVkResourceProvider::pipelineCache() {
38 if (fPipelineCache == VK_NULL_HANDLE) {
39 VkPipelineCacheCreateInfo createInfo;
40 memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
41 createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
42 createInfo.pNext = nullptr;
43 createInfo.flags = 0;
44
45 auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
46 sk_sp<SkData> cached;
47 if (persistentCache) {
48 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
49 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
50 cached = persistentCache->load(*keyData);
51 }
52 bool usedCached = false;
53 if (cached) {
54 uint32_t* cacheHeader = (uint32_t*)cached->data();
55 if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
56 // For version one of the header, the total header size is 16 bytes plus
57 // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
58 // the breakdown of these bytes.
59 SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
60 const VkPhysicalDeviceProperties& devProps = fGpu->physicalDeviceProperties();
61 const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
62 if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
63 !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
64 createInfo.initialDataSize = cached->size();
65 createInfo.pInitialData = cached->data();
66 usedCached = true;
67 }
68 }
69 }
70 if (!usedCached) {
71 createInfo.initialDataSize = 0;
72 createInfo.pInitialData = nullptr;
73 }
74
75 VkResult result;
76 GR_VK_CALL_RESULT(fGpu, result, CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
77 &fPipelineCache));
78 if (VK_SUCCESS != result) {
79 fPipelineCache = VK_NULL_HANDLE;
80 }
81 }
82 return fPipelineCache;
83 }
84
init()85 void GrVkResourceProvider::init() {
86 // Init uniform descriptor objects
87 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
88 fDescriptorSetManagers.emplace_back(dsm);
89 SkASSERT(1 == fDescriptorSetManagers.count());
90 fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
91 dsm = GrVkDescriptorSetManager::CreateInputManager(fGpu);
92 fDescriptorSetManagers.emplace_back(dsm);
93 SkASSERT(2 == fDescriptorSetManagers.count());
94 fInputDSHandle = GrVkDescriptorSetManager::Handle(1);
95 }
96
makePipeline(const GrProgramInfo & programInfo,VkPipelineShaderStageCreateInfo * shaderStageInfo,int shaderStageCount,VkRenderPass compatibleRenderPass,VkPipelineLayout layout,uint32_t subpass)97 sk_sp<const GrVkPipeline> GrVkResourceProvider::makePipeline(
98 const GrProgramInfo& programInfo,
99 VkPipelineShaderStageCreateInfo* shaderStageInfo,
100 int shaderStageCount,
101 VkRenderPass compatibleRenderPass,
102 VkPipelineLayout layout,
103 uint32_t subpass) {
104 return GrVkPipeline::Make(fGpu, programInfo, shaderStageInfo, shaderStageCount,
105 compatibleRenderPass, layout, this->pipelineCache(), subpass);
106 }
107
108 // To create framebuffers, we first need to create a simple RenderPass that is
109 // only used for framebuffer creation. When we actually render we will create
110 // RenderPasses as needed that are compatible with the framebuffer.
111 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderTarget * target,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)112 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderTarget* target,
113 CompatibleRPHandle* compatibleHandle,
114 bool withResolve,
115 bool withStencil,
116 SelfDependencyFlags selfDepFlags,
117 LoadFromResolve loadFromResolve) {
118 // Get attachment information from render target. This includes which attachments the render
119 // target has (color, stencil) and the attachments format and sample count.
120 GrVkRenderPass::AttachmentFlags attachmentFlags;
121 GrVkRenderPass::AttachmentsDescriptor attachmentsDesc;
122 target->getAttachmentsDescriptor(&attachmentsDesc, &attachmentFlags, withResolve, withStencil);
123
124 return this->findCompatibleRenderPass(&attachmentsDesc, attachmentFlags, selfDepFlags,
125 loadFromResolve, compatibleHandle);
126 }
127
128 const GrVkRenderPass*
findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor * desc,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve,CompatibleRPHandle * compatibleHandle)129 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor* desc,
130 GrVkRenderPass::AttachmentFlags attachmentFlags,
131 SelfDependencyFlags selfDepFlags,
132 LoadFromResolve loadFromResolve,
133 CompatibleRPHandle* compatibleHandle) {
134 for (int i = 0; i < fRenderPassArray.count(); ++i) {
135 if (fRenderPassArray[i].isCompatible(*desc, attachmentFlags, selfDepFlags,
136 loadFromResolve)) {
137 const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
138 renderPass->ref();
139 if (compatibleHandle) {
140 *compatibleHandle = CompatibleRPHandle(i);
141 }
142 return renderPass;
143 }
144 }
145
146 GrVkRenderPass* renderPass = GrVkRenderPass::CreateSimple(fGpu, desc, attachmentFlags,
147 selfDepFlags, loadFromResolve);
148 if (!renderPass) {
149 return nullptr;
150 }
151 fRenderPassArray.emplace_back(renderPass);
152
153 if (compatibleHandle) {
154 *compatibleHandle = CompatibleRPHandle(fRenderPassArray.count() - 1);
155 }
156 return renderPass;
157 }
158
findCompatibleExternalRenderPass(VkRenderPass renderPass,uint32_t colorAttachmentIndex)159 const GrVkRenderPass* GrVkResourceProvider::findCompatibleExternalRenderPass(
160 VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
161 for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
162 if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
163 fExternalRenderPasses[i]->ref();
164 #ifdef SK_DEBUG
165 uint32_t cachedColorIndex;
166 SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
167 SkASSERT(cachedColorIndex == colorAttachmentIndex);
168 #endif
169 return fExternalRenderPasses[i];
170 }
171 }
172
173 const GrVkRenderPass* newRenderPass = new GrVkRenderPass(fGpu, renderPass,
174 colorAttachmentIndex);
175 fExternalRenderPasses.push_back(newRenderPass);
176 newRenderPass->ref();
177 return newRenderPass;
178 }
179
findRenderPass(GrVkRenderTarget * target,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps,CompatibleRPHandle * compatibleHandle,bool withResolve,bool withStencil,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve)180 const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
181 GrVkRenderTarget* target,
182 const GrVkRenderPass::LoadStoreOps& colorOps,
183 const GrVkRenderPass::LoadStoreOps& resolveOps,
184 const GrVkRenderPass::LoadStoreOps& stencilOps,
185 CompatibleRPHandle* compatibleHandle,
186 bool withResolve,
187 bool withStencil,
188 SelfDependencyFlags selfDepFlags,
189 LoadFromResolve loadFromResolve) {
190 GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
191 GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
192 : &tempRPHandle;
193 *pRPHandle = target->compatibleRenderPassHandle(withResolve, withStencil, selfDepFlags,
194 loadFromResolve);
195 if (!pRPHandle->isValid()) {
196 return nullptr;
197 }
198
199 return this->findRenderPass(*pRPHandle, colorOps, resolveOps, stencilOps);
200 }
201
202 const GrVkRenderPass*
findRenderPass(const CompatibleRPHandle & compatibleHandle,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)203 GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
204 const GrVkRenderPass::LoadStoreOps& colorOps,
205 const GrVkRenderPass::LoadStoreOps& resolveOps,
206 const GrVkRenderPass::LoadStoreOps& stencilOps) {
207 SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
208 CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
209 const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
210 colorOps,
211 resolveOps,
212 stencilOps);
213 if (!renderPass) {
214 return nullptr;
215 }
216 renderPass->ref();
217 return renderPass;
218 }
219
findOrCreateCompatibleDescriptorPool(VkDescriptorType type,uint32_t count)220 GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
221 VkDescriptorType type, uint32_t count) {
222 return GrVkDescriptorPool::Create(fGpu, type, count);
223 }
224
findOrCreateCompatibleSampler(GrSamplerState params,const GrVkYcbcrConversionInfo & ycbcrInfo)225 GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
226 GrSamplerState params, const GrVkYcbcrConversionInfo& ycbcrInfo) {
227 GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
228 if (!sampler) {
229 sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
230 if (!sampler) {
231 return nullptr;
232 }
233 fSamplers.add(sampler);
234 }
235 SkASSERT(sampler);
236 sampler->ref();
237 return sampler;
238 }
239
findOrCreateCompatibleSamplerYcbcrConversion(const GrVkYcbcrConversionInfo & ycbcrInfo)240 GrVkSamplerYcbcrConversion* GrVkResourceProvider::findOrCreateCompatibleSamplerYcbcrConversion(
241 const GrVkYcbcrConversionInfo& ycbcrInfo) {
242 GrVkSamplerYcbcrConversion* ycbcrConversion =
243 fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
244 if (!ycbcrConversion) {
245 ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
246 if (!ycbcrConversion) {
247 return nullptr;
248 }
249 fYcbcrConversions.add(ycbcrConversion);
250 }
251 SkASSERT(ycbcrConversion);
252 ycbcrConversion->ref();
253 return ycbcrConversion;
254 }
255
findOrCreateCompatiblePipelineState(GrRenderTarget * renderTarget,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,bool overrideSubpassForResolveLoad)256 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
257 GrRenderTarget* renderTarget,
258 const GrProgramInfo& programInfo,
259 VkRenderPass compatibleRenderPass,
260 bool overrideSubpassForResolveLoad) {
261 return fPipelineStateCache->findOrCreatePipelineState(renderTarget, programInfo,
262 compatibleRenderPass,
263 overrideSubpassForResolveLoad);
264 }
265
findOrCreateCompatiblePipelineState(const GrProgramDesc & desc,const GrProgramInfo & programInfo,VkRenderPass compatibleRenderPass,GrThreadSafePipelineBuilder::Stats::ProgramCacheResult * stat)266 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
267 const GrProgramDesc& desc,
268 const GrProgramInfo& programInfo,
269 VkRenderPass compatibleRenderPass,
270 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult* stat) {
271
272 auto tmp = fPipelineStateCache->findOrCreatePipelineState(desc, programInfo,
273 compatibleRenderPass, stat);
274 if (!tmp) {
275 fPipelineStateCache->stats()->incNumPreCompilationFailures();
276 } else {
277 fPipelineStateCache->stats()->incNumPreProgramCacheResult(*stat);
278 }
279
280 return tmp;
281 }
282
findOrCreateMSAALoadPipeline(const GrVkRenderPass & renderPass,int numSamples,VkPipelineShaderStageCreateInfo * shaderStageInfo,VkPipelineLayout pipelineLayout)283 sk_sp<const GrVkPipeline> GrVkResourceProvider::findOrCreateMSAALoadPipeline(
284 const GrVkRenderPass& renderPass,
285 int numSamples,
286 VkPipelineShaderStageCreateInfo* shaderStageInfo,
287 VkPipelineLayout pipelineLayout) {
288 // Find or Create a compatible pipeline
289 sk_sp<const GrVkPipeline> pipeline;
290 for (int i = 0; i < fMSAALoadPipelines.count() && !pipeline; ++i) {
291 if (fMSAALoadPipelines[i].fRenderPass->isCompatible(renderPass)) {
292 pipeline = fMSAALoadPipelines[i].fPipeline;
293 }
294 }
295 if (!pipeline) {
296 pipeline = GrVkPipeline::Make(
297 fGpu,
298 /*vertexAttribs=*/GrGeometryProcessor::AttributeSet(),
299 /*instanceAttribs=*/GrGeometryProcessor::AttributeSet(),
300 GrPrimitiveType::kTriangleStrip,
301 kTopLeft_GrSurfaceOrigin,
302 GrStencilSettings(),
303 numSamples,
304 /*isHWantialiasState=*/false,
305 GrXferProcessor::BlendInfo(),
306 /*isWireframe=*/false,
307 /*useConservativeRaster=*/false,
308 /*subpass=*/0,
309 shaderStageInfo,
310 /*shaderStageCount=*/2,
311 renderPass.vkRenderPass(),
312 pipelineLayout,
313 /*ownsLayout=*/false,
314 this->pipelineCache());
315 if (!pipeline) {
316 return nullptr;
317 }
318 fMSAALoadPipelines.push_back({pipeline, &renderPass});
319 }
320 SkASSERT(pipeline);
321 return pipeline;
322 }
323
getZeroSamplerDescriptorSetHandle(GrVkDescriptorSetManager::Handle * handle)324 void GrVkResourceProvider::getZeroSamplerDescriptorSetHandle(
325 GrVkDescriptorSetManager::Handle* handle) {
326 SkASSERT(handle);
327 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
328 if (fDescriptorSetManagers[i]->isZeroSampler()) {
329 *handle = GrVkDescriptorSetManager::Handle(i);
330 return;
331 }
332 }
333
334 GrVkDescriptorSetManager* dsm =
335 GrVkDescriptorSetManager::CreateZeroSamplerManager(fGpu);
336 fDescriptorSetManagers.emplace_back(dsm);
337 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
338 }
339
getSamplerDescriptorSetHandle(VkDescriptorType type,const GrVkUniformHandler & uniformHandler,GrVkDescriptorSetManager::Handle * handle)340 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
341 const GrVkUniformHandler& uniformHandler,
342 GrVkDescriptorSetManager::Handle* handle) {
343 SkASSERT(handle);
344 SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
345 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
346 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
347 if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
348 *handle = GrVkDescriptorSetManager::Handle(i);
349 return;
350 }
351 }
352
353 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
354 uniformHandler);
355 fDescriptorSetManagers.emplace_back(dsm);
356 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
357 }
358
getUniformDSLayout() const359 VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
360 SkASSERT(fUniformDSHandle.isValid());
361 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
362 }
363
getInputDSLayout() const364 VkDescriptorSetLayout GrVkResourceProvider::getInputDSLayout() const {
365 SkASSERT(fInputDSHandle.isValid());
366 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->layout();
367 }
368
getSamplerDSLayout(const GrVkDescriptorSetManager::Handle & handle) const369 VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
370 const GrVkDescriptorSetManager::Handle& handle) const {
371 SkASSERT(handle.isValid());
372 return fDescriptorSetManagers[handle.toIndex()]->layout();
373 }
374
getUniformDescriptorSet()375 const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
376 SkASSERT(fUniformDSHandle.isValid());
377 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
378 fUniformDSHandle);
379 }
380
getInputDescriptorSet()381 const GrVkDescriptorSet* GrVkResourceProvider::getInputDescriptorSet() {
382 SkASSERT(fInputDSHandle.isValid());
383 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->getDescriptorSet(fGpu, fInputDSHandle);
384 }
385
getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle & handle)386 const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
387 const GrVkDescriptorSetManager::Handle& handle) {
388 SkASSERT(handle.isValid());
389 return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
390 }
391
recycleDescriptorSet(const GrVkDescriptorSet * descSet,const GrVkDescriptorSetManager::Handle & handle)392 void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
393 const GrVkDescriptorSetManager::Handle& handle) {
394 SkASSERT(descSet);
395 SkASSERT(handle.isValid());
396 int managerIdx = handle.toIndex();
397 SkASSERT(managerIdx < fDescriptorSetManagers.count());
398 fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
399 }
400
findOrCreateCommandPool()401 GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
402 SkAutoMutexExclusive lock(fBackgroundMutex);
403 GrVkCommandPool* result;
404 if (fAvailableCommandPools.count()) {
405 result = fAvailableCommandPools.back();
406 fAvailableCommandPools.pop_back();
407 } else {
408 result = GrVkCommandPool::Create(fGpu);
409 if (!result) {
410 return nullptr;
411 }
412 }
413 SkASSERT(result->unique());
414 SkDEBUGCODE(
415 for (const GrVkCommandPool* pool : fActiveCommandPools) {
416 SkASSERT(pool != result);
417 }
418 for (const GrVkCommandPool* pool : fAvailableCommandPools) {
419 SkASSERT(pool != result);
420 }
421 )
422 fActiveCommandPools.push_back(result);
423 result->ref();
424 return result;
425 }
426
checkCommandBuffers()427 void GrVkResourceProvider::checkCommandBuffers() {
428 // When resetting a command buffer it can trigger client provided procs (e.g. release or
429 // finished) to be called. During these calls the client could trigger us to abandon the vk
430 // context, e.g. if we are in a DEVICE_LOST state. When we abandon the vk context we will
431 // unref all the fActiveCommandPools and reset the array. Since this can happen in the middle
432 // of the loop here, we need to additionally check that fActiveCommandPools still has pools on
433 // each iteration.
434 //
435 // TODO: We really need to have a more robust way to protect us from client proc calls that
436 // happen in the middle of us doing work. This may be just one of many potential pitfalls that
437 // could happen from the client triggering GrDirectContext changes during a proc call.
438 for (int i = fActiveCommandPools.count() - 1; fActiveCommandPools.count() && i >= 0; --i) {
439 GrVkCommandPool* pool = fActiveCommandPools[i];
440 if (!pool->isOpen()) {
441 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
442 if (buffer->finished(fGpu)) {
443 fActiveCommandPools.removeShuffle(i);
444 // This passes ownership of the pool to the backgroundReset call. The pool should
445 // not be used again from this function.
446 // TODO: We should see if we can use sk_sps here to make this more explicit.
447 this->backgroundReset(pool);
448 }
449 }
450 }
451 }
452
forceSyncAllCommandBuffers()453 void GrVkResourceProvider::forceSyncAllCommandBuffers() {
454 for (int i = fActiveCommandPools.count() - 1; fActiveCommandPools.count() && i >= 0; --i) {
455 GrVkCommandPool* pool = fActiveCommandPools[i];
456 if (!pool->isOpen()) {
457 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
458 buffer->forceSync(fGpu);
459 }
460 }
461 }
462
addFinishedProcToActiveCommandBuffers(sk_sp<GrRefCntedCallback> finishedCallback)463 void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
464 sk_sp<GrRefCntedCallback> finishedCallback) {
465 for (int i = 0; i < fActiveCommandPools.count(); ++i) {
466 GrVkCommandPool* pool = fActiveCommandPools[i];
467 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
468 buffer->addFinishedProc(finishedCallback);
469 }
470 }
471
destroyResources()472 void GrVkResourceProvider::destroyResources() {
473 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
474 if (taskGroup) {
475 taskGroup->wait();
476 }
477
478 // Release all msaa load pipelines
479 fMSAALoadPipelines.reset();
480
481 // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
482 for (int i = 0; i < fRenderPassArray.count(); ++i) {
483 fRenderPassArray[i].releaseResources();
484 }
485 fRenderPassArray.reset();
486
487 for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
488 fExternalRenderPasses[i]->unref();
489 }
490 fExternalRenderPasses.reset();
491
492 // Iterate through all store GrVkSamplers and unref them before resetting the hash table.
493 fSamplers.foreach([&](auto* elt) { elt->unref(); });
494 fSamplers.reset();
495
496 fYcbcrConversions.foreach([&](auto* elt) { elt->unref(); });
497 fYcbcrConversions.reset();
498
499 fPipelineStateCache->release();
500
501 GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
502 fPipelineCache = VK_NULL_HANDLE;
503
504 for (GrVkCommandPool* pool : fActiveCommandPools) {
505 SkASSERT(pool->unique());
506 pool->unref();
507 }
508 fActiveCommandPools.reset();
509
510 {
511 SkAutoMutexExclusive lock(fBackgroundMutex);
512 for (GrVkCommandPool* pool : fAvailableCommandPools) {
513 SkASSERT(pool->unique());
514 pool->unref();
515 }
516 fAvailableCommandPools.reset();
517 }
518
519 // We must release/destroy all command buffers and pipeline states before releasing the
520 // GrVkDescriptorSetManagers. Additionally, we must release all uniform buffers since they hold
521 // refs to GrVkDescriptorSets.
522 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
523 fDescriptorSetManagers[i]->release(fGpu);
524 }
525 fDescriptorSetManagers.reset();
526
527 }
528
releaseUnlockedBackendObjects()529 void GrVkResourceProvider::releaseUnlockedBackendObjects() {
530 SkAutoMutexExclusive lock(fBackgroundMutex);
531 for (GrVkCommandPool* pool : fAvailableCommandPools) {
532 SkASSERT(pool->unique());
533 pool->unref();
534 }
535 fAvailableCommandPools.reset();
536 }
537
backgroundReset(GrVkCommandPool * pool)538 void GrVkResourceProvider::backgroundReset(GrVkCommandPool* pool) {
539 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
540 SkASSERT(pool->unique());
541 pool->releaseResources();
542 // After releasing resources we may have called a client callback proc which may have
543 // disconnected the GrVkGpu. In that case we do not want to push the pool back onto the cache,
544 // but instead just drop the pool.
545 if (fGpu->disconnected()) {
546 pool->unref();
547 return;
548 }
549 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
550 if (taskGroup) {
551 taskGroup->add([this, pool]() {
552 this->reset(pool);
553 });
554 } else {
555 this->reset(pool);
556 }
557 }
558
reset(GrVkCommandPool * pool)559 void GrVkResourceProvider::reset(GrVkCommandPool* pool) {
560 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
561 SkASSERT(pool->unique());
562 pool->reset(fGpu);
563 SkAutoMutexExclusive lock(fBackgroundMutex);
564 fAvailableCommandPools.push_back(pool);
565 }
566
storePipelineCacheData()567 void GrVkResourceProvider::storePipelineCacheData() {
568 if (this->pipelineCache() == VK_NULL_HANDLE) {
569 return;
570 }
571 size_t dataSize = 0;
572 VkResult result;
573 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
574 &dataSize, nullptr));
575 if (result != VK_SUCCESS) {
576 return;
577 }
578 // store VkPipelineCache when cache size update
579 if (dataSize == fPipelineCacheSize) {
580 return;
581 }
582 std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
583
584 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
585 &dataSize, (void*)data.get()));
586 if (result != VK_SUCCESS) {
587 return;
588 }
589 fPipelineCacheSize = dataSize;
590 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
591 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
592
593 SkDebugf("store vkPipelineCache, data size:%zu", fPipelineCacheSize);
594 fGpu->getContext()->priv().getPersistentCache()->store(
595 *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize), SkString("VkPipelineCache"));
596 }
597
598 ////////////////////////////////////////////////////////////////////////////////
599
CompatibleRenderPassSet(GrVkRenderPass * renderPass)600 GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(GrVkRenderPass* renderPass)
601 : fLastReturnedIndex(0) {
602 renderPass->ref();
603 fRenderPasses.push_back(renderPass);
604 }
605
isCompatible(const GrVkRenderPass::AttachmentsDescriptor & attachmentsDescriptor,GrVkRenderPass::AttachmentFlags attachmentFlags,SelfDependencyFlags selfDepFlags,LoadFromResolve loadFromResolve) const606 bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
607 const GrVkRenderPass::AttachmentsDescriptor& attachmentsDescriptor,
608 GrVkRenderPass::AttachmentFlags attachmentFlags,
609 SelfDependencyFlags selfDepFlags,
610 LoadFromResolve loadFromResolve) const {
611 // The first GrVkRenderpass should always exists since we create the basic load store
612 // render pass on create
613 SkASSERT(fRenderPasses[0]);
614 return fRenderPasses[0]->isCompatible(attachmentsDescriptor, attachmentFlags, selfDepFlags,
615 loadFromResolve);
616 }
617
getRenderPass(GrVkGpu * gpu,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & resolveOps,const GrVkRenderPass::LoadStoreOps & stencilOps)618 GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
619 GrVkGpu* gpu,
620 const GrVkRenderPass::LoadStoreOps& colorOps,
621 const GrVkRenderPass::LoadStoreOps& resolveOps,
622 const GrVkRenderPass::LoadStoreOps& stencilOps) {
623 for (int i = 0; i < fRenderPasses.count(); ++i) {
624 int idx = (i + fLastReturnedIndex) % fRenderPasses.count();
625 if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, resolveOps, stencilOps)) {
626 fLastReturnedIndex = idx;
627 return fRenderPasses[idx];
628 }
629 }
630 GrVkRenderPass* renderPass = GrVkRenderPass::Create(gpu, *this->getCompatibleRenderPass(),
631 colorOps, resolveOps, stencilOps);
632 if (!renderPass) {
633 return nullptr;
634 }
635 fRenderPasses.push_back(renderPass);
636 fLastReturnedIndex = fRenderPasses.count() - 1;
637 return renderPass;
638 }
639
releaseResources()640 void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources() {
641 for (int i = 0; i < fRenderPasses.count(); ++i) {
642 if (fRenderPasses[i]) {
643 fRenderPasses[i]->unref();
644 fRenderPasses[i] = nullptr;
645 }
646 }
647 }
648
649