1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkDescriptorSetManager.h"
9
10 #include "src/gpu/ganesh/vk/GrVkDescriptorPool.h"
11 #include "src/gpu/ganesh/vk/GrVkDescriptorSet.h"
12 #include "src/gpu/ganesh/vk/GrVkGpu.h"
13 #include "src/gpu/ganesh/vk/GrVkUniformHandler.h"
14
15 using namespace skia_private;
16
17 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
18 #include <sanitizer/lsan_interface.h>
19 #endif
20
CreateUniformManager(GrVkGpu * gpu)21 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateUniformManager(GrVkGpu* gpu) {
22 STArray<1, uint32_t> visibilities;
23 uint32_t stages = kVertex_GrShaderFlag | kFragment_GrShaderFlag;
24 visibilities.push_back(stages);
25 TArray<const GrVkSampler*> samplers;
26 return Create(gpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, visibilities, samplers);
27 }
28
CreateSamplerManager(GrVkGpu * gpu,VkDescriptorType type,const GrVkUniformHandler & uniformHandler)29 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
30 GrVkGpu* gpu, VkDescriptorType type, const GrVkUniformHandler& uniformHandler) {
31 STArray<4, uint32_t> visibilities;
32 STArray<4, const GrVkSampler*> immutableSamplers;
33 SkASSERT(type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
34 for (int i = 0 ; i < uniformHandler.numSamplers(); ++i) {
35 visibilities.push_back(uniformHandler.samplerVisibility(i));
36 immutableSamplers.push_back(uniformHandler.immutableSampler(i));
37 }
38 return Create(gpu, type, visibilities, immutableSamplers);
39 }
40
CreateZeroSamplerManager(GrVkGpu * gpu)41 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateZeroSamplerManager(GrVkGpu* gpu) {
42 TArray<uint32_t> visibilities;
43 TArray<const GrVkSampler*> immutableSamplers;
44 return Create(gpu, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, visibilities, immutableSamplers);
45 }
46
CreateInputManager(GrVkGpu * gpu)47 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateInputManager(GrVkGpu* gpu) {
48 STArray<1, uint32_t> visibilities;
49 visibilities.push_back(kFragment_GrShaderFlag);
50 TArray<const GrVkSampler*> samplers;
51 return Create(gpu, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, visibilities, samplers);
52 }
53
visibility_to_vk_stage_flags(uint32_t visibility)54 VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
55 VkShaderStageFlags flags = 0;
56
57 if (visibility & kVertex_GrShaderFlag) {
58 flags |= VK_SHADER_STAGE_VERTEX_BIT;
59 }
60 if (visibility & kFragment_GrShaderFlag) {
61 flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
62 }
63 return flags;
64 }
65
get_layout_and_desc_count(GrVkGpu * gpu,VkDescriptorType type,const TArray<uint32_t> & visibilities,const TArray<const GrVkSampler * > & immutableSamplers,VkDescriptorSetLayout * descSetLayout,uint32_t * descCountPerSet)66 static bool get_layout_and_desc_count(GrVkGpu* gpu,
67 VkDescriptorType type,
68 const TArray<uint32_t>& visibilities,
69 const TArray<const GrVkSampler*>& immutableSamplers,
70 VkDescriptorSetLayout* descSetLayout,
71 uint32_t* descCountPerSet) {
72 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
73 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
74 uint32_t numBindings = visibilities.size();
75 std::unique_ptr<VkDescriptorSetLayoutBinding[]> dsSamplerBindings(
76 new VkDescriptorSetLayoutBinding[numBindings]);
77 *descCountPerSet = 0;
78 for (uint32_t i = 0; i < numBindings; ++i) {
79 uint32_t visibility = visibilities[i];
80 dsSamplerBindings[i].binding = i;
81 dsSamplerBindings[i].descriptorType = type;
82 dsSamplerBindings[i].descriptorCount = 1;
83 dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
84 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type) {
85 if (immutableSamplers[i]) {
86 (*descCountPerSet) += gpu->vkCaps().ycbcrCombinedImageSamplerDescriptorCount();
87 dsSamplerBindings[i].pImmutableSamplers = immutableSamplers[i]->samplerPtr();
88 } else {
89 (*descCountPerSet)++;
90 dsSamplerBindings[i].pImmutableSamplers = nullptr;
91 }
92 }
93 }
94
95 VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
96 memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
97 dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
98 dsSamplerLayoutCreateInfo.pNext = nullptr;
99 dsSamplerLayoutCreateInfo.flags = 0;
100 dsSamplerLayoutCreateInfo.bindingCount = numBindings;
101 // Setting to nullptr fixes an error in the param checker validation layer. Even though
102 // bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
103 // null.
104 dsSamplerLayoutCreateInfo.pBindings = numBindings ? dsSamplerBindings.get() : nullptr;
105
106 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
107 // skia:8713
108 __lsan::ScopedDisabler lsanDisabler;
109 #endif
110 VkResult result;
111 GR_VK_CALL_RESULT(gpu, result,
112 CreateDescriptorSetLayout(gpu->device(),
113 &dsSamplerLayoutCreateInfo,
114 nullptr,
115 descSetLayout));
116 if (result != VK_SUCCESS) {
117 return false;
118 }
119 } else if (type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
120 static constexpr int kUniformDescPerSet = 1;
121 SkASSERT(kUniformDescPerSet == visibilities.size());
122 // Create Uniform Buffer Descriptor
123 VkDescriptorSetLayoutBinding dsUniBinding;
124 dsUniBinding.binding = GrVkUniformHandler::kUniformBinding;
125 dsUniBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
126 dsUniBinding.descriptorCount = 1;
127 dsUniBinding.stageFlags = visibility_to_vk_stage_flags(visibilities[0]);
128 dsUniBinding.pImmutableSamplers = nullptr;
129
130 VkDescriptorSetLayoutCreateInfo uniformLayoutCreateInfo;
131 uniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
132 uniformLayoutCreateInfo.pNext = nullptr;
133 uniformLayoutCreateInfo.flags = 0;
134 uniformLayoutCreateInfo.bindingCount = 1;
135 uniformLayoutCreateInfo.pBindings = &dsUniBinding;
136
137 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
138 // skia:8713
139 __lsan::ScopedDisabler lsanDisabler;
140 #endif
141 VkResult result;
142 GR_VK_CALL_RESULT(gpu, result, CreateDescriptorSetLayout(gpu->device(),
143 &uniformLayoutCreateInfo,
144 nullptr,
145 descSetLayout));
146 if (result != VK_SUCCESS) {
147 return false;
148 }
149
150 *descCountPerSet = kUniformDescPerSet;
151 } else {
152 SkASSERT(type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
153 static constexpr int kInputDescPerSet = 1;
154 SkASSERT(kInputDescPerSet == visibilities.size());
155
156 // Create Input Buffer Descriptor
157 VkDescriptorSetLayoutBinding dsInpuBinding;
158 dsInpuBinding.binding = 0;
159 dsInpuBinding.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
160 dsInpuBinding.descriptorCount = 1;
161 SkASSERT(visibilities[0] == kFragment_GrShaderFlag);
162 dsInpuBinding.stageFlags = visibility_to_vk_stage_flags(visibilities[0]);
163 dsInpuBinding.pImmutableSamplers = nullptr;
164
165 VkDescriptorSetLayoutCreateInfo inputLayoutCreateInfo;
166 inputLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
167 inputLayoutCreateInfo.pNext = nullptr;
168 inputLayoutCreateInfo.flags = 0;
169 inputLayoutCreateInfo.bindingCount = 1;
170 inputLayoutCreateInfo.pBindings = &dsInpuBinding;
171
172 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
173 // skia:8713
174 __lsan::ScopedDisabler lsanDisabler;
175 #endif
176 VkResult result;
177 GR_VK_CALL_RESULT(gpu, result, CreateDescriptorSetLayout(gpu->device(),
178 &inputLayoutCreateInfo,
179 nullptr, descSetLayout));
180 if (result != VK_SUCCESS) {
181 return false;
182 }
183
184 *descCountPerSet = kInputDescPerSet;
185 }
186 return true;
187 }
188
Create(GrVkGpu * gpu,VkDescriptorType type,const TArray<uint32_t> & visibilities,const TArray<const GrVkSampler * > & immutableSamplers)189 GrVkDescriptorSetManager* GrVkDescriptorSetManager::Create(
190 GrVkGpu* gpu, VkDescriptorType type,
191 const TArray<uint32_t>& visibilities,
192 const TArray<const GrVkSampler*>& immutableSamplers) {
193 #ifdef SK_DEBUG
194 if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
195 SkASSERT(visibilities.size() == immutableSamplers.size());
196 } else {
197 SkASSERT(immutableSamplers.empty());
198 }
199 #endif
200
201 VkDescriptorSetLayout descSetLayout;
202 uint32_t descCountPerSet;
203 if (!get_layout_and_desc_count(gpu, type, visibilities, immutableSamplers, &descSetLayout,
204 &descCountPerSet)) {
205 return nullptr;
206 }
207 return new GrVkDescriptorSetManager(gpu, type, descSetLayout, descCountPerSet, visibilities,
208 immutableSamplers);
209 }
210
GrVkDescriptorSetManager(GrVkGpu * gpu,VkDescriptorType type,VkDescriptorSetLayout descSetLayout,uint32_t descCountPerSet,const TArray<uint32_t> & visibilities,const TArray<const GrVkSampler * > & immutableSamplers)211 GrVkDescriptorSetManager::GrVkDescriptorSetManager(
212 GrVkGpu* gpu, VkDescriptorType type, VkDescriptorSetLayout descSetLayout,
213 uint32_t descCountPerSet, const TArray<uint32_t>& visibilities,
214 const TArray<const GrVkSampler*>& immutableSamplers)
215 : fPoolManager(descSetLayout, type, descCountPerSet) {
216 for (int i = 0; i < visibilities.size(); ++i) {
217 fBindingVisibilities.push_back(visibilities[i]);
218 }
219 for (int i = 0; i < immutableSamplers.size(); ++i) {
220 const GrVkSampler* sampler = immutableSamplers[i];
221 if (sampler) {
222 sampler->ref();
223 }
224 fImmutableSamplers.push_back(sampler);
225 }
226 }
227
getDescriptorSet(GrVkGpu * gpu,const Handle & handle)228 const GrVkDescriptorSet* GrVkDescriptorSetManager::getDescriptorSet(GrVkGpu* gpu,
229 const Handle& handle) {
230 const GrVkDescriptorSet* ds = nullptr;
231 int count = fFreeSets.size();
232 if (count > 0) {
233 ds = fFreeSets[count - 1];
234 fFreeSets.removeShuffle(count - 1);
235 } else {
236 VkDescriptorSet vkDS;
237 if (!fPoolManager.getNewDescriptorSet(gpu, &vkDS)) {
238 return nullptr;
239 }
240
241 ds = new GrVkDescriptorSet(gpu, vkDS, fPoolManager.fPool, handle);
242 }
243 SkASSERT(ds);
244 return ds;
245 }
246
recycleDescriptorSet(const GrVkDescriptorSet * descSet)247 void GrVkDescriptorSetManager::recycleDescriptorSet(const GrVkDescriptorSet* descSet) {
248 SkASSERT(descSet);
249 fFreeSets.push_back(descSet);
250 }
251
release(GrVkGpu * gpu)252 void GrVkDescriptorSetManager::release(GrVkGpu* gpu) {
253 fPoolManager.freeGPUResources(gpu);
254
255 for (int i = 0; i < fFreeSets.size(); ++i) {
256 fFreeSets[i]->unref();
257 }
258 fFreeSets.clear();
259
260 for (int i = 0; i < fImmutableSamplers.size(); ++i) {
261 if (fImmutableSamplers[i]) {
262 fImmutableSamplers[i]->unref();
263 }
264 }
265 fImmutableSamplers.clear();
266 }
267
isCompatible(VkDescriptorType type,const GrVkUniformHandler * uniHandler) const268 bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
269 const GrVkUniformHandler* uniHandler) const {
270 SkASSERT(uniHandler);
271 if (type != fPoolManager.fDescType) {
272 return false;
273 }
274
275 SkASSERT(type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
276 if (fBindingVisibilities.size() != uniHandler->numSamplers()) {
277 return false;
278 }
279 for (int i = 0; i < uniHandler->numSamplers(); ++i) {
280 if (uniHandler->samplerVisibility(i) != fBindingVisibilities[i] ||
281 uniHandler->immutableSampler(i) != fImmutableSamplers[i]) {
282 return false;
283 }
284 }
285 return true;
286 }
287
isZeroSampler() const288 bool GrVkDescriptorSetManager::isZeroSampler() const {
289 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER != fPoolManager.fDescType) {
290 return false;
291 }
292 if (!fBindingVisibilities.empty()) {
293 return false;
294 }
295 return true;
296 }
297
298 ////////////////////////////////////////////////////////////////////////////////
299
DescriptorPoolManager(VkDescriptorSetLayout layout,VkDescriptorType type,uint32_t descCountPerSet)300 GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
301 VkDescriptorSetLayout layout,
302 VkDescriptorType type,
303 uint32_t descCountPerSet)
304 : fDescLayout(layout)
305 , fDescType(type)
306 , fDescCountPerSet(descCountPerSet)
307 , fMaxDescriptors(kStartNumDescriptors)
308 , fCurrentDescriptorCount(0)
309 , fPool(nullptr) {
310 }
311
getNewPool(GrVkGpu * gpu)312 bool GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
313 if (fPool) {
314 fPool->unref();
315 uint32_t newPoolSize = fMaxDescriptors + ((fMaxDescriptors + 1) >> 1);
316 if (newPoolSize < kMaxDescriptors) {
317 fMaxDescriptors = newPoolSize;
318 } else {
319 fMaxDescriptors = kMaxDescriptors;
320 }
321
322 }
323 fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
324 fMaxDescriptors);
325 return SkToBool(fPool);
326 }
327
getNewDescriptorSet(GrVkGpu * gpu,VkDescriptorSet * ds)328 bool GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
329 VkDescriptorSet* ds) {
330 if (!fMaxDescriptors) {
331 return false;
332 }
333 fCurrentDescriptorCount += fDescCountPerSet;
334 if (!fPool || fCurrentDescriptorCount > fMaxDescriptors) {
335 if (!this->getNewPool(gpu) ) {
336 return false;
337 }
338 fCurrentDescriptorCount = fDescCountPerSet;
339 }
340
341 VkDescriptorSetAllocateInfo dsAllocateInfo;
342 memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));
343 dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
344 dsAllocateInfo.pNext = nullptr;
345 dsAllocateInfo.descriptorPool = fPool->descPool();
346 dsAllocateInfo.descriptorSetCount = 1;
347 dsAllocateInfo.pSetLayouts = &fDescLayout;
348 VkResult result;
349 GR_VK_CALL_RESULT(gpu, result, AllocateDescriptorSets(gpu->device(),
350 &dsAllocateInfo,
351 ds));
352 return result == VK_SUCCESS;
353 }
354
freeGPUResources(GrVkGpu * gpu)355 void GrVkDescriptorSetManager::DescriptorPoolManager::freeGPUResources(GrVkGpu* gpu) {
356 if (fDescLayout) {
357 GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDescLayout,
358 nullptr));
359 fDescLayout = VK_NULL_HANDLE;
360 }
361
362 if (fPool) {
363 fPool->unref();
364 fPool = nullptr;
365 }
366 }
367
368