1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkResourceProvider.h"
9
10 #include "src/core/SkTaskGroup.h"
11 #include "src/gpu/GrContextPriv.h"
12 #include "src/gpu/GrSamplerState.h"
13 #include "src/gpu/vk/GrVkCommandBuffer.h"
14 #include "src/gpu/vk/GrVkCommandPool.h"
15 #include "src/gpu/vk/GrVkGpu.h"
16 #include "src/gpu/vk/GrVkPipeline.h"
17 #include "src/gpu/vk/GrVkRenderTarget.h"
18 #include "src/gpu/vk/GrVkUniformBuffer.h"
19 #include "src/gpu/vk/GrVkUtil.h"
20
21 #ifdef SK_TRACE_VK_RESOURCES
22 std::atomic<uint32_t> GrVkResource::fKeyCounter{0};
23 #endif
24
GrVkResourceProvider(GrVkGpu * gpu)25 GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
26 : fGpu(gpu)
27 , fPipelineCache(VK_NULL_HANDLE) {
28 fPipelineStateCache = new PipelineStateCache(gpu);
29 }
30
~GrVkResourceProvider()31 GrVkResourceProvider::~GrVkResourceProvider() {
32 SkASSERT(0 == fRenderPassArray.count());
33 SkASSERT(0 == fExternalRenderPasses.count());
34 SkASSERT(VK_NULL_HANDLE == fPipelineCache);
35 delete fPipelineStateCache;
36 }
37
pipelineCache()38 VkPipelineCache GrVkResourceProvider::pipelineCache() {
39 if (fPipelineCache == VK_NULL_HANDLE) {
40 VkPipelineCacheCreateInfo createInfo;
41 memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
42 createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
43 createInfo.pNext = nullptr;
44 createInfo.flags = 0;
45
46 auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
47 sk_sp<SkData> cached;
48 if (persistentCache) {
49 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
50 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
51 cached = persistentCache->load(*keyData);
52 }
53 bool usedCached = false;
54 if (cached) {
55 uint32_t* cacheHeader = (uint32_t*)cached->data();
56 if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
57 // For version one of the header, the total header size is 16 bytes plus
58 // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
59 // the breakdown of these bytes.
60 SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
61 const VkPhysicalDeviceProperties& devProps = fGpu->physicalDeviceProperties();
62 const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
63 if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
64 !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
65 createInfo.initialDataSize = cached->size();
66 createInfo.pInitialData = cached->data();
67 usedCached = true;
68 }
69 }
70 }
71 if (!usedCached) {
72 createInfo.initialDataSize = 0;
73 createInfo.pInitialData = nullptr;
74 }
75 VkResult result = GR_VK_CALL(fGpu->vkInterface(),
76 CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
77 &fPipelineCache));
78 SkASSERT(VK_SUCCESS == result);
79 if (VK_SUCCESS != result) {
80 fPipelineCache = VK_NULL_HANDLE;
81 }
82 }
83 return fPipelineCache;
84 }
85
init()86 void GrVkResourceProvider::init() {
87 // Init uniform descriptor objects
88 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
89 fDescriptorSetManagers.emplace_back(dsm);
90 SkASSERT(1 == fDescriptorSetManagers.count());
91 fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
92 }
93
createPipeline(int numColorSamples,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrStencilSettings & stencil,GrSurfaceOrigin origin,VkPipelineShaderStageCreateInfo * shaderStageInfo,int shaderStageCount,GrPrimitiveType primitiveType,VkRenderPass compatibleRenderPass,VkPipelineLayout layout)94 GrVkPipeline* GrVkResourceProvider::createPipeline(int numColorSamples,
95 const GrPrimitiveProcessor& primProc,
96 const GrPipeline& pipeline,
97 const GrStencilSettings& stencil,
98 GrSurfaceOrigin origin,
99 VkPipelineShaderStageCreateInfo* shaderStageInfo,
100 int shaderStageCount,
101 GrPrimitiveType primitiveType,
102 VkRenderPass compatibleRenderPass,
103 VkPipelineLayout layout) {
104 return GrVkPipeline::Create(
105 fGpu, numColorSamples, primProc, pipeline, stencil, origin, shaderStageInfo,
106 shaderStageCount, primitiveType, compatibleRenderPass, layout, this->pipelineCache());
107 }
108
109 // To create framebuffers, we first need to create a simple RenderPass that is
110 // only used for framebuffer creation. When we actually render we will create
111 // RenderPasses as needed that are compatible with the framebuffer.
112 const GrVkRenderPass*
findCompatibleRenderPass(const GrVkRenderTarget & target,CompatibleRPHandle * compatibleHandle)113 GrVkResourceProvider::findCompatibleRenderPass(const GrVkRenderTarget& target,
114 CompatibleRPHandle* compatibleHandle) {
115 for (int i = 0; i < fRenderPassArray.count(); ++i) {
116 if (fRenderPassArray[i].isCompatible(target)) {
117 const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
118 renderPass->ref();
119 if (compatibleHandle) {
120 *compatibleHandle = CompatibleRPHandle(i);
121 }
122 return renderPass;
123 }
124 }
125
126 const GrVkRenderPass* renderPass =
127 fRenderPassArray.emplace_back(fGpu, target).getCompatibleRenderPass();
128 renderPass->ref();
129
130 if (compatibleHandle) {
131 *compatibleHandle = CompatibleRPHandle(fRenderPassArray.count() - 1);
132 }
133 return renderPass;
134 }
135
136 const GrVkRenderPass*
findCompatibleRenderPass(const CompatibleRPHandle & compatibleHandle)137 GrVkResourceProvider::findCompatibleRenderPass(const CompatibleRPHandle& compatibleHandle) {
138 SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
139 int index = compatibleHandle.toIndex();
140 const GrVkRenderPass* renderPass = fRenderPassArray[index].getCompatibleRenderPass();
141 renderPass->ref();
142 return renderPass;
143 }
144
findCompatibleExternalRenderPass(VkRenderPass renderPass,uint32_t colorAttachmentIndex)145 const GrVkRenderPass* GrVkResourceProvider::findCompatibleExternalRenderPass(
146 VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
147 for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
148 if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
149 fExternalRenderPasses[i]->ref();
150 #ifdef SK_DEBUG
151 uint32_t cachedColorIndex;
152 SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
153 SkASSERT(cachedColorIndex == colorAttachmentIndex);
154 #endif
155 return fExternalRenderPasses[i];
156 }
157 }
158
159 const GrVkRenderPass* newRenderPass = new GrVkRenderPass(renderPass, colorAttachmentIndex);
160 fExternalRenderPasses.push_back(newRenderPass);
161 newRenderPass->ref();
162 return newRenderPass;
163 }
164
findRenderPass(const GrVkRenderTarget & target,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & stencilOps,CompatibleRPHandle * compatibleHandle)165 const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
166 const GrVkRenderTarget& target,
167 const GrVkRenderPass::LoadStoreOps& colorOps,
168 const GrVkRenderPass::LoadStoreOps& stencilOps,
169 CompatibleRPHandle* compatibleHandle) {
170 GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
171 GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
172 : &tempRPHandle;
173 *pRPHandle = target.compatibleRenderPassHandle();
174
175 // This will get us the handle to (and possible create) the compatible set for the specific
176 // GrVkRenderPass we are looking for.
177 this->findCompatibleRenderPass(target, compatibleHandle);
178 return this->findRenderPass(*pRPHandle, colorOps, stencilOps);
179 }
180
181 const GrVkRenderPass*
findRenderPass(const CompatibleRPHandle & compatibleHandle,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & stencilOps)182 GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
183 const GrVkRenderPass::LoadStoreOps& colorOps,
184 const GrVkRenderPass::LoadStoreOps& stencilOps) {
185 SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
186 CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
187 const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
188 colorOps,
189 stencilOps);
190 renderPass->ref();
191 return renderPass;
192 }
193
findOrCreateCompatibleDescriptorPool(VkDescriptorType type,uint32_t count)194 GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
195 VkDescriptorType type, uint32_t count) {
196 return new GrVkDescriptorPool(fGpu, type, count);
197 }
198
findOrCreateCompatibleSampler(const GrSamplerState & params,const GrVkYcbcrConversionInfo & ycbcrInfo)199 GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
200 const GrSamplerState& params, const GrVkYcbcrConversionInfo& ycbcrInfo) {
201 GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
202 if (!sampler) {
203 sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
204 if (!sampler) {
205 return nullptr;
206 }
207 fSamplers.add(sampler);
208 }
209 SkASSERT(sampler);
210 sampler->ref();
211 return sampler;
212 }
213
findOrCreateCompatibleSamplerYcbcrConversion(const GrVkYcbcrConversionInfo & ycbcrInfo)214 GrVkSamplerYcbcrConversion* GrVkResourceProvider::findOrCreateCompatibleSamplerYcbcrConversion(
215 const GrVkYcbcrConversionInfo& ycbcrInfo) {
216 GrVkSamplerYcbcrConversion* ycbcrConversion =
217 fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
218 if (!ycbcrConversion) {
219 ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
220 if (!ycbcrConversion) {
221 return nullptr;
222 }
223 fYcbcrConversions.add(ycbcrConversion);
224 }
225 SkASSERT(ycbcrConversion);
226 ycbcrConversion->ref();
227 return ycbcrConversion;
228 }
229
findOrCreateCompatiblePipelineState(GrRenderTarget * renderTarget,GrSurfaceOrigin origin,const GrPipeline & pipeline,const GrPrimitiveProcessor & proc,const GrTextureProxy * const primProcProxies[],GrPrimitiveType primitiveType,VkRenderPass compatibleRenderPass)230 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
231 GrRenderTarget* renderTarget, GrSurfaceOrigin origin,
232 const GrPipeline& pipeline, const GrPrimitiveProcessor& proc,
233 const GrTextureProxy* const primProcProxies[], GrPrimitiveType primitiveType,
234 VkRenderPass compatibleRenderPass) {
235 return fPipelineStateCache->refPipelineState(renderTarget, origin, proc, primProcProxies,
236 pipeline, primitiveType, compatibleRenderPass);
237 }
238
getSamplerDescriptorSetHandle(VkDescriptorType type,const GrVkUniformHandler & uniformHandler,GrVkDescriptorSetManager::Handle * handle)239 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
240 const GrVkUniformHandler& uniformHandler,
241 GrVkDescriptorSetManager::Handle* handle) {
242 SkASSERT(handle);
243 SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
244 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
245 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
246 if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
247 *handle = GrVkDescriptorSetManager::Handle(i);
248 return;
249 }
250 }
251
252 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
253 uniformHandler);
254 fDescriptorSetManagers.emplace_back(dsm);
255 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
256 }
257
getSamplerDescriptorSetHandle(VkDescriptorType type,const SkTArray<uint32_t> & visibilities,GrVkDescriptorSetManager::Handle * handle)258 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
259 const SkTArray<uint32_t>& visibilities,
260 GrVkDescriptorSetManager::Handle* handle) {
261 SkASSERT(handle);
262 SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
263 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
264 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
265 if (fDescriptorSetManagers[i]->isCompatible(type, visibilities)) {
266 *handle = GrVkDescriptorSetManager::Handle(i);
267 return;
268 }
269 }
270
271 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
272 visibilities);
273 fDescriptorSetManagers.emplace_back(dsm);
274 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
275 }
276
getUniformDSLayout() const277 VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
278 SkASSERT(fUniformDSHandle.isValid());
279 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
280 }
281
getSamplerDSLayout(const GrVkDescriptorSetManager::Handle & handle) const282 VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
283 const GrVkDescriptorSetManager::Handle& handle) const {
284 SkASSERT(handle.isValid());
285 return fDescriptorSetManagers[handle.toIndex()]->layout();
286 }
287
getUniformDescriptorSet()288 const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
289 SkASSERT(fUniformDSHandle.isValid());
290 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
291 fUniformDSHandle);
292 }
293
getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle & handle)294 const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
295 const GrVkDescriptorSetManager::Handle& handle) {
296 SkASSERT(handle.isValid());
297 return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
298 }
299
recycleDescriptorSet(const GrVkDescriptorSet * descSet,const GrVkDescriptorSetManager::Handle & handle)300 void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
301 const GrVkDescriptorSetManager::Handle& handle) {
302 SkASSERT(descSet);
303 SkASSERT(handle.isValid());
304 int managerIdx = handle.toIndex();
305 SkASSERT(managerIdx < fDescriptorSetManagers.count());
306 fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
307 }
308
findOrCreateCommandPool()309 GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
310 std::unique_lock<std::recursive_mutex> lock(fBackgroundMutex);
311 GrVkCommandPool* result;
312 if (fAvailableCommandPools.count()) {
313 result = fAvailableCommandPools.back();
314 fAvailableCommandPools.pop_back();
315 } else {
316 result = GrVkCommandPool::Create(fGpu);
317 }
318 SkASSERT(result->unique());
319 SkDEBUGCODE(
320 for (const GrVkCommandPool* pool : fActiveCommandPools) {
321 SkASSERT(pool != result);
322 }
323 for (const GrVkCommandPool* pool : fAvailableCommandPools) {
324 SkASSERT(pool != result);
325 }
326 )
327 fActiveCommandPools.push_back(result);
328 result->ref();
329 return result;
330 }
331
checkCommandBuffers()332 void GrVkResourceProvider::checkCommandBuffers() {
333 for (int i = fActiveCommandPools.count() - 1; i >= 0; --i) {
334 GrVkCommandPool* pool = fActiveCommandPools[i];
335 if (!pool->isOpen()) {
336 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
337 if (buffer->finished(fGpu)) {
338 fActiveCommandPools.removeShuffle(i);
339 this->backgroundReset(pool);
340 }
341 }
342 }
343 }
344
addFinishedProcToActiveCommandBuffers(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)345 void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
346 GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext) {
347 sk_sp<GrRefCntedCallback> procRef(new GrRefCntedCallback(finishedProc, finishedContext));
348 for (int i = 0; i < fActiveCommandPools.count(); ++i) {
349 GrVkCommandPool* pool = fActiveCommandPools[i];
350 if (!pool->isOpen()) {
351 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
352 buffer->addFinishedProc(procRef);
353 }
354 }
355 }
356
findOrCreateStandardUniformBufferResource()357 const GrVkResource* GrVkResourceProvider::findOrCreateStandardUniformBufferResource() {
358 const GrVkResource* resource = nullptr;
359 int count = fAvailableUniformBufferResources.count();
360 if (count > 0) {
361 resource = fAvailableUniformBufferResources[count - 1];
362 fAvailableUniformBufferResources.removeShuffle(count - 1);
363 } else {
364 resource = GrVkUniformBuffer::CreateResource(fGpu, GrVkUniformBuffer::kStandardSize);
365 }
366 return resource;
367 }
368
recycleStandardUniformBufferResource(const GrVkResource * resource)369 void GrVkResourceProvider::recycleStandardUniformBufferResource(const GrVkResource* resource) {
370 fAvailableUniformBufferResources.push_back(resource);
371 }
372
destroyResources(bool deviceLost)373 void GrVkResourceProvider::destroyResources(bool deviceLost) {
374 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
375 if (taskGroup) {
376 taskGroup->wait();
377 }
378
379 // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
380 for (int i = 0; i < fRenderPassArray.count(); ++i) {
381 fRenderPassArray[i].releaseResources(fGpu);
382 }
383 fRenderPassArray.reset();
384
385 for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
386 fExternalRenderPasses[i]->unref(fGpu);
387 }
388 fExternalRenderPasses.reset();
389
390 // Iterate through all store GrVkSamplers and unref them before resetting the hash.
391 for (decltype(fSamplers)::Iter iter(&fSamplers); !iter.done(); ++iter) {
392 (*iter).unref(fGpu);
393 }
394 fSamplers.reset();
395
396 for (decltype(fYcbcrConversions)::Iter iter(&fYcbcrConversions); !iter.done(); ++iter) {
397 (*iter).unref(fGpu);
398 }
399 fYcbcrConversions.reset();
400
401 fPipelineStateCache->release();
402
403 GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
404 fPipelineCache = VK_NULL_HANDLE;
405
406 for (GrVkCommandPool* pool : fActiveCommandPools) {
407 SkASSERT(pool->unique());
408 pool->unref(fGpu);
409 }
410 fActiveCommandPools.reset();
411
412 for (GrVkCommandPool* pool : fAvailableCommandPools) {
413 SkASSERT(pool->unique());
414 pool->unref(fGpu);
415 }
416 fAvailableCommandPools.reset();
417
418 // We must release/destroy all command buffers and pipeline states before releasing the
419 // GrVkDescriptorSetManagers
420 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
421 fDescriptorSetManagers[i]->release(fGpu);
422 }
423 fDescriptorSetManagers.reset();
424
425 // release our uniform buffers
426 for (int i = 0; i < fAvailableUniformBufferResources.count(); ++i) {
427 SkASSERT(fAvailableUniformBufferResources[i]->unique());
428 fAvailableUniformBufferResources[i]->unref(fGpu);
429 }
430 fAvailableUniformBufferResources.reset();
431 }
432
abandonResources()433 void GrVkResourceProvider::abandonResources() {
434 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
435 if (taskGroup) {
436 taskGroup->wait();
437 }
438
439 // Abandon all command pools
440 for (int i = 0; i < fActiveCommandPools.count(); ++i) {
441 SkASSERT(fActiveCommandPools[i]->unique());
442 fActiveCommandPools[i]->unrefAndAbandon();
443 }
444 fActiveCommandPools.reset();
445 for (int i = 0; i < fAvailableCommandPools.count(); ++i) {
446 SkASSERT(fAvailableCommandPools[i]->unique());
447 fAvailableCommandPools[i]->unrefAndAbandon();
448 }
449 fAvailableCommandPools.reset();
450
451 // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
452 for (int i = 0; i < fRenderPassArray.count(); ++i) {
453 fRenderPassArray[i].abandonResources();
454 }
455 fRenderPassArray.reset();
456
457 for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
458 fExternalRenderPasses[i]->unrefAndAbandon();
459 }
460 fExternalRenderPasses.reset();
461
462 // Iterate through all store GrVkSamplers and unrefAndAbandon them before resetting the hash.
463 SkTDynamicHash<GrVkSampler, GrVkSampler::Key>::Iter iter(&fSamplers);
464 for (; !iter.done(); ++iter) {
465 (*iter).unrefAndAbandon();
466 }
467 fSamplers.reset();
468
469 for (decltype(fYcbcrConversions)::Iter iter(&fYcbcrConversions); !iter.done(); ++iter) {
470 (*iter).unrefAndAbandon();
471 }
472 fYcbcrConversions.reset();
473
474 fPipelineStateCache->abandon();
475
476 fPipelineCache = VK_NULL_HANDLE;
477
478 // We must abandon all command buffers and pipeline states before abandoning the
479 // GrVkDescriptorSetManagers
480 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
481 fDescriptorSetManagers[i]->abandon();
482 }
483 fDescriptorSetManagers.reset();
484
485 // release our uniform buffers
486 for (int i = 0; i < fAvailableUniformBufferResources.count(); ++i) {
487 SkASSERT(fAvailableUniformBufferResources[i]->unique());
488 fAvailableUniformBufferResources[i]->unrefAndAbandon();
489 }
490 fAvailableUniformBufferResources.reset();
491 }
492
backgroundReset(GrVkCommandPool * pool)493 void GrVkResourceProvider::backgroundReset(GrVkCommandPool* pool) {
494 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
495 SkASSERT(pool->unique());
496 pool->releaseResources(fGpu);
497 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
498 if (taskGroup) {
499 taskGroup->add([this, pool]() {
500 this->reset(pool);
501 });
502 } else {
503 this->reset(pool);
504 }
505 }
506
reset(GrVkCommandPool * pool)507 void GrVkResourceProvider::reset(GrVkCommandPool* pool) {
508 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
509 SkASSERT(pool->unique());
510 pool->reset(fGpu);
511 std::unique_lock<std::recursive_mutex> providerLock(fBackgroundMutex);
512 fAvailableCommandPools.push_back(pool);
513 }
514
storePipelineCacheData()515 void GrVkResourceProvider::storePipelineCacheData() {
516 size_t dataSize = 0;
517 VkResult result = GR_VK_CALL(fGpu->vkInterface(), GetPipelineCacheData(fGpu->device(),
518 this->pipelineCache(),
519 &dataSize, nullptr));
520 SkASSERT(result == VK_SUCCESS);
521
522 std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
523
524 result = GR_VK_CALL(fGpu->vkInterface(), GetPipelineCacheData(fGpu->device(),
525 this->pipelineCache(),
526 &dataSize,
527 (void*)data.get()));
528 SkASSERT(result == VK_SUCCESS);
529
530 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
531 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
532
533 fGpu->getContext()->priv().getPersistentCache()->store(
534 *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize));
535 }
536
537 ////////////////////////////////////////////////////////////////////////////////
538
CompatibleRenderPassSet(const GrVkGpu * gpu,const GrVkRenderTarget & target)539 GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(
540 const GrVkGpu* gpu,
541 const GrVkRenderTarget& target)
542 : fLastReturnedIndex(0) {
543 fRenderPasses.emplace_back(new GrVkRenderPass());
544 fRenderPasses[0]->initSimple(gpu, target);
545 }
546
isCompatible(const GrVkRenderTarget & target) const547 bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
548 const GrVkRenderTarget& target) const {
549 // The first GrVkRenderpass should always exists since we create the basic load store
550 // render pass on create
551 SkASSERT(fRenderPasses[0]);
552 return fRenderPasses[0]->isCompatible(target);
553 }
554
getRenderPass(const GrVkGpu * gpu,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & stencilOps)555 GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
556 const GrVkGpu* gpu,
557 const GrVkRenderPass::LoadStoreOps& colorOps,
558 const GrVkRenderPass::LoadStoreOps& stencilOps) {
559 for (int i = 0; i < fRenderPasses.count(); ++i) {
560 int idx = (i + fLastReturnedIndex) % fRenderPasses.count();
561 if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, stencilOps)) {
562 fLastReturnedIndex = idx;
563 return fRenderPasses[idx];
564 }
565 }
566 GrVkRenderPass* renderPass = fRenderPasses.emplace_back(new GrVkRenderPass());
567 renderPass->init(gpu, *this->getCompatibleRenderPass(), colorOps, stencilOps);
568 fLastReturnedIndex = fRenderPasses.count() - 1;
569 return renderPass;
570 }
571
releaseResources(GrVkGpu * gpu)572 void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources(GrVkGpu* gpu) {
573 for (int i = 0; i < fRenderPasses.count(); ++i) {
574 if (fRenderPasses[i]) {
575 fRenderPasses[i]->unref(gpu);
576 fRenderPasses[i] = nullptr;
577 }
578 }
579 }
580
abandonResources()581 void GrVkResourceProvider::CompatibleRenderPassSet::abandonResources() {
582 for (int i = 0; i < fRenderPasses.count(); ++i) {
583 if (fRenderPasses[i]) {
584 fRenderPasses[i]->unrefAndAbandon();
585 fRenderPasses[i] = nullptr;
586 }
587 }
588 }
589