1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrVkDescriptorSetManager.h"
9
10 #include "GrVkDescriptorPool.h"
11 #include "GrVkDescriptorSet.h"
12 #include "GrVkGpu.h"
13 #include "GrVkUniformHandler.h"
14
CreateUniformManager(GrVkGpu * gpu)15 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateUniformManager(GrVkGpu* gpu) {
16 SkSTArray<2, uint32_t> visibilities;
17 // We set the visibility of the first binding to all supported geometry processing shader
18 // stages (vertex, tesselation, geometry, etc.) and the second binding to the fragment
19 // shader.
20 uint32_t geomStages = kVertex_GrShaderFlag;
21 if (gpu->vkCaps().shaderCaps()->geometryShaderSupport()) {
22 geomStages |= kGeometry_GrShaderFlag;
23 }
24 visibilities.push_back(geomStages);
25 visibilities.push_back(kFragment_GrShaderFlag);
26
27 SkTArray<const GrVkSampler*> samplers;
28 return new GrVkDescriptorSetManager(gpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, visibilities,
29 samplers);
30 }
31
CreateSamplerManager(GrVkGpu * gpu,VkDescriptorType type,const GrVkUniformHandler & uniformHandler)32 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
33 GrVkGpu* gpu, VkDescriptorType type, const GrVkUniformHandler& uniformHandler) {
34 SkSTArray<4, uint32_t> visibilities;
35 SkSTArray<4, const GrVkSampler*> immutableSamplers;
36 SkASSERT(type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
37 for (int i = 0 ; i < uniformHandler.numSamplers(); ++i) {
38 visibilities.push_back(uniformHandler.samplerVisibility(i));
39 immutableSamplers.push_back(uniformHandler.immutableSampler(i));
40 }
41 return new GrVkDescriptorSetManager(gpu, type, visibilities, immutableSamplers);
42 }
43
CreateSamplerManager(GrVkGpu * gpu,VkDescriptorType type,const SkTArray<uint32_t> & visibilities)44 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
45 GrVkGpu* gpu, VkDescriptorType type, const SkTArray<uint32_t>& visibilities) {
46 SkSTArray<4, const GrVkSampler*> immutableSamplers;
47 SkASSERT(type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
48 for (int i = 0 ; i < visibilities.count(); ++i) {
49 immutableSamplers.push_back(nullptr);
50 }
51 return new GrVkDescriptorSetManager(gpu, type, visibilities, immutableSamplers);
52 }
53
GrVkDescriptorSetManager(GrVkGpu * gpu,VkDescriptorType type,const SkTArray<uint32_t> & visibilities,const SkTArray<const GrVkSampler * > & immutableSamplers)54 GrVkDescriptorSetManager::GrVkDescriptorSetManager(
55 GrVkGpu* gpu, VkDescriptorType type,
56 const SkTArray<uint32_t>& visibilities,
57 const SkTArray<const GrVkSampler*>& immutableSamplers)
58 : fPoolManager(type, gpu, visibilities, immutableSamplers) {
59 #ifdef SK_DEBUG
60 if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
61 SkASSERT(visibilities.count() == immutableSamplers.count());
62 } else {
63 SkASSERT(immutableSamplers.count() == 0);
64 }
65 #endif
66 for (int i = 0; i < visibilities.count(); ++i) {
67 fBindingVisibilities.push_back(visibilities[i]);
68 }
69 for (int i = 0; i < immutableSamplers.count(); ++i) {
70 const GrVkSampler* sampler = immutableSamplers[i];
71 if (sampler) {
72 sampler->ref();
73 }
74 fImmutableSamplers.push_back(sampler);
75 }
76 }
77
getDescriptorSet(GrVkGpu * gpu,const Handle & handle)78 const GrVkDescriptorSet* GrVkDescriptorSetManager::getDescriptorSet(GrVkGpu* gpu,
79 const Handle& handle) {
80 const GrVkDescriptorSet* ds = nullptr;
81 int count = fFreeSets.count();
82 if (count > 0) {
83 ds = fFreeSets[count - 1];
84 fFreeSets.removeShuffle(count - 1);
85 } else {
86 VkDescriptorSet vkDS;
87 fPoolManager.getNewDescriptorSet(gpu, &vkDS);
88
89 ds = new GrVkDescriptorSet(vkDS, fPoolManager.fPool, handle);
90 }
91 SkASSERT(ds);
92 return ds;
93 }
94
recycleDescriptorSet(const GrVkDescriptorSet * descSet)95 void GrVkDescriptorSetManager::recycleDescriptorSet(const GrVkDescriptorSet* descSet) {
96 SkASSERT(descSet);
97 fFreeSets.push_back(descSet);
98 }
99
release(GrVkGpu * gpu)100 void GrVkDescriptorSetManager::release(GrVkGpu* gpu) {
101 fPoolManager.freeGPUResources(gpu);
102
103 for (int i = 0; i < fFreeSets.count(); ++i) {
104 fFreeSets[i]->unref(gpu);
105 }
106 fFreeSets.reset();
107
108 for (int i = 0; i < fImmutableSamplers.count(); ++i) {
109 if (fImmutableSamplers[i]) {
110 fImmutableSamplers[i]->unref(gpu);
111 }
112 }
113 fImmutableSamplers.reset();
114 }
115
abandon()116 void GrVkDescriptorSetManager::abandon() {
117 fPoolManager.abandonGPUResources();
118
119 for (int i = 0; i < fFreeSets.count(); ++i) {
120 fFreeSets[i]->unrefAndAbandon();
121 }
122 fFreeSets.reset();
123
124 for (int i = 0; i < fImmutableSamplers.count(); ++i) {
125 if (fImmutableSamplers[i]) {
126 fImmutableSamplers[i]->unrefAndAbandon();
127 }
128 }
129 fImmutableSamplers.reset();
130 }
131
isCompatible(VkDescriptorType type,const GrVkUniformHandler * uniHandler) const132 bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
133 const GrVkUniformHandler* uniHandler) const {
134 SkASSERT(uniHandler);
135 if (type != fPoolManager.fDescType) {
136 return false;
137 }
138
139 SkASSERT(type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
140 if (fBindingVisibilities.count() != uniHandler->numSamplers()) {
141 return false;
142 }
143 for (int i = 0; i < uniHandler->numSamplers(); ++i) {
144 if (uniHandler->samplerVisibility(i) != fBindingVisibilities[i] ||
145 uniHandler->immutableSampler(i) != fImmutableSamplers[i]) {
146 return false;
147 }
148 }
149 return true;
150 }
151
isCompatible(VkDescriptorType type,const SkTArray<uint32_t> & visibilities) const152 bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
153 const SkTArray<uint32_t>& visibilities) const {
154 if (type != fPoolManager.fDescType) {
155 return false;
156 }
157
158 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
159 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
160 if (fBindingVisibilities.count() != visibilities.count()) {
161 return false;
162 }
163 for (int i = 0; i < visibilities.count(); ++i) {
164 if (visibilities[i] != fBindingVisibilities[i] || fImmutableSamplers[i] != nullptr) {
165 return false;
166 }
167 }
168 }
169 return true;
170 }
171
172 ////////////////////////////////////////////////////////////////////////////////
173
visibility_to_vk_stage_flags(uint32_t visibility)174 VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
175 VkShaderStageFlags flags = 0;
176
177 if (visibility & kVertex_GrShaderFlag) {
178 flags |= VK_SHADER_STAGE_VERTEX_BIT;
179 }
180 if (visibility & kGeometry_GrShaderFlag) {
181 flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
182 }
183 if (visibility & kFragment_GrShaderFlag) {
184 flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
185 }
186 return flags;
187 }
188
DescriptorPoolManager(VkDescriptorType type,GrVkGpu * gpu,const SkTArray<uint32_t> & visibilities,const SkTArray<const GrVkSampler * > & immutableSamplers)189 GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
190 VkDescriptorType type,
191 GrVkGpu* gpu,
192 const SkTArray<uint32_t>& visibilities,
193 const SkTArray<const GrVkSampler*>& immutableSamplers)
194 : fDescType(type)
195 , fCurrentDescriptorCount(0)
196 , fPool(nullptr) {
197
198
199 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
200 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
201 uint32_t numBindings = visibilities.count();
202 std::unique_ptr<VkDescriptorSetLayoutBinding[]> dsSamplerBindings(
203 new VkDescriptorSetLayoutBinding[numBindings]);
204 for (uint32_t i = 0; i < numBindings; ++i) {
205 uint32_t visibility = visibilities[i];
206 dsSamplerBindings[i].binding = i;
207 dsSamplerBindings[i].descriptorType = type;
208 dsSamplerBindings[i].descriptorCount = 1;
209 dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
210 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type) {
211 if (immutableSamplers[i]) {
212 dsSamplerBindings[i].pImmutableSamplers = immutableSamplers[i]->samplerPtr();
213 } else {
214 dsSamplerBindings[i].pImmutableSamplers = nullptr;
215 }
216 }
217 }
218
219 VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
220 memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
221 dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
222 dsSamplerLayoutCreateInfo.pNext = nullptr;
223 dsSamplerLayoutCreateInfo.flags = 0;
224 dsSamplerLayoutCreateInfo.bindingCount = numBindings;
225 // Setting to nullptr fixes an error in the param checker validation layer. Even though
226 // bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
227 // null.
228 dsSamplerLayoutCreateInfo.pBindings = numBindings ? dsSamplerBindings.get() : nullptr;
229
230 GR_VK_CALL_ERRCHECK(gpu->vkInterface(),
231 CreateDescriptorSetLayout(gpu->device(),
232 &dsSamplerLayoutCreateInfo,
233 nullptr,
234 &fDescLayout));
235 fDescCountPerSet = visibilities.count();
236 } else {
237 SkASSERT(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type);
238 GR_STATIC_ASSERT(2 == kUniformDescPerSet);
239 SkASSERT(kUniformDescPerSet == visibilities.count());
240 // Create Uniform Buffer Descriptor
241 static const uint32_t bindings[kUniformDescPerSet] =
242 { GrVkUniformHandler::kGeometryBinding, GrVkUniformHandler::kFragBinding };
243 VkDescriptorSetLayoutBinding dsUniBindings[kUniformDescPerSet];
244 memset(&dsUniBindings, 0, kUniformDescPerSet * sizeof(VkDescriptorSetLayoutBinding));
245 for (int i = 0; i < kUniformDescPerSet; ++i) {
246 dsUniBindings[i].binding = bindings[i];
247 dsUniBindings[i].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
248 dsUniBindings[i].descriptorCount = 1;
249 dsUniBindings[i].stageFlags = visibility_to_vk_stage_flags(visibilities[i]);
250 dsUniBindings[i].pImmutableSamplers = nullptr;
251 }
252
253 VkDescriptorSetLayoutCreateInfo uniformLayoutCreateInfo;
254 memset(&uniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
255 uniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
256 uniformLayoutCreateInfo.pNext = nullptr;
257 uniformLayoutCreateInfo.flags = 0;
258 uniformLayoutCreateInfo.bindingCount = 2;
259 uniformLayoutCreateInfo.pBindings = dsUniBindings;
260
261 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateDescriptorSetLayout(gpu->device(),
262 &uniformLayoutCreateInfo,
263 nullptr,
264 &fDescLayout));
265 fDescCountPerSet = kUniformDescPerSet;
266 }
267
268 SkASSERT(fDescCountPerSet < kStartNumDescriptors);
269 fMaxDescriptors = kStartNumDescriptors;
270 SkASSERT(fMaxDescriptors > 0);
271 this->getNewPool(gpu);
272 }
273
getNewPool(GrVkGpu * gpu)274 void GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
275 if (fPool) {
276 fPool->unref(gpu);
277 uint32_t newPoolSize = fMaxDescriptors + ((fMaxDescriptors + 1) >> 1);
278 if (newPoolSize < kMaxDescriptors) {
279 fMaxDescriptors = newPoolSize;
280 } else {
281 fMaxDescriptors = kMaxDescriptors;
282 }
283
284 }
285 fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
286 fMaxDescriptors);
287 SkASSERT(fPool);
288 }
289
getNewDescriptorSet(GrVkGpu * gpu,VkDescriptorSet * ds)290 void GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
291 VkDescriptorSet* ds) {
292 if (!fMaxDescriptors) {
293 return;
294 }
295 fCurrentDescriptorCount += fDescCountPerSet;
296 if (fCurrentDescriptorCount > fMaxDescriptors) {
297 this->getNewPool(gpu);
298 fCurrentDescriptorCount = fDescCountPerSet;
299 }
300
301 VkDescriptorSetAllocateInfo dsAllocateInfo;
302 memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));
303 dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
304 dsAllocateInfo.pNext = nullptr;
305 dsAllocateInfo.descriptorPool = fPool->descPool();
306 dsAllocateInfo.descriptorSetCount = 1;
307 dsAllocateInfo.pSetLayouts = &fDescLayout;
308 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), AllocateDescriptorSets(gpu->device(),
309 &dsAllocateInfo,
310 ds));
311 }
312
freeGPUResources(GrVkGpu * gpu)313 void GrVkDescriptorSetManager::DescriptorPoolManager::freeGPUResources(GrVkGpu* gpu) {
314 if (fDescLayout) {
315 GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDescLayout,
316 nullptr));
317 fDescLayout = VK_NULL_HANDLE;
318 }
319
320 if (fPool) {
321 fPool->unref(gpu);
322 fPool = nullptr;
323 }
324 }
325
abandonGPUResources()326 void GrVkDescriptorSetManager::DescriptorPoolManager::abandonGPUResources() {
327 fDescLayout = VK_NULL_HANDLE;
328 if (fPool) {
329 fPool->unrefAndAbandon();
330 fPool = nullptr;
331 }
332 }
333