1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrVkDescriptorSetManager.h"
9
10 #include "GrVkDescriptorPool.h"
11 #include "GrVkDescriptorSet.h"
12 #include "GrVkGpu.h"
13 #include "GrVkUniformHandler.h"
14
CreateUniformManager(GrVkGpu * gpu)15 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateUniformManager(GrVkGpu* gpu) {
16 SkSTArray<2, uint32_t> visibilities;
17 // We set the visibility of the first binding to all supported geometry processing shader
18 // stages (vertex, tesselation, geometry, etc.) and the second binding to the fragment
19 // shader.
20 uint32_t geomStages = kVertex_GrShaderFlag;
21 if (gpu->vkCaps().shaderCaps()->geometryShaderSupport()) {
22 geomStages |= kGeometry_GrShaderFlag;
23 }
24 visibilities.push_back(geomStages);
25 visibilities.push_back(kFragment_GrShaderFlag);
26 return new GrVkDescriptorSetManager(gpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, visibilities);
27 }
28
CreateSamplerManager(GrVkGpu * gpu,VkDescriptorType type,const GrVkUniformHandler & uniformHandler)29 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
30 GrVkGpu* gpu, VkDescriptorType type, const GrVkUniformHandler& uniformHandler) {
31 SkSTArray<4, uint32_t> visibilities;
32 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type) {
33 for (int i = 0 ; i < uniformHandler.numSamplers(); ++i) {
34 visibilities.push_back(uniformHandler.samplerVisibility(i));
35 }
36 } else {
37 SkASSERT(type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
38 for (int i = 0 ; i < uniformHandler.numTexelBuffers(); ++i) {
39 visibilities.push_back(uniformHandler.texelBufferVisibility(i));
40 }
41 }
42 return CreateSamplerManager(gpu, type, visibilities);
43 }
44
CreateSamplerManager(GrVkGpu * gpu,VkDescriptorType type,const SkTArray<uint32_t> & visibilities)45 GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
46 GrVkGpu* gpu, VkDescriptorType type, const SkTArray<uint32_t>& visibilities) {
47 return new GrVkDescriptorSetManager(gpu, type, visibilities);
48 }
49
GrVkDescriptorSetManager(GrVkGpu * gpu,VkDescriptorType type,const SkTArray<uint32_t> & visibilities)50 GrVkDescriptorSetManager::GrVkDescriptorSetManager(GrVkGpu* gpu,
51 VkDescriptorType type,
52 const SkTArray<uint32_t>& visibilities)
53 : fPoolManager(type, gpu, visibilities) {
54 for (int i = 0; i < visibilities.count(); ++i) {
55 fBindingVisibilities.push_back(visibilities[i]);
56 }
57 }
58
getDescriptorSet(GrVkGpu * gpu,const Handle & handle)59 const GrVkDescriptorSet* GrVkDescriptorSetManager::getDescriptorSet(GrVkGpu* gpu,
60 const Handle& handle) {
61 const GrVkDescriptorSet* ds = nullptr;
62 int count = fFreeSets.count();
63 if (count > 0) {
64 ds = fFreeSets[count - 1];
65 fFreeSets.removeShuffle(count - 1);
66 } else {
67 VkDescriptorSet vkDS;
68 fPoolManager.getNewDescriptorSet(gpu, &vkDS);
69
70 ds = new GrVkDescriptorSet(vkDS, fPoolManager.fPool, handle);
71 }
72 SkASSERT(ds);
73 return ds;
74 }
75
recycleDescriptorSet(const GrVkDescriptorSet * descSet)76 void GrVkDescriptorSetManager::recycleDescriptorSet(const GrVkDescriptorSet* descSet) {
77 SkASSERT(descSet);
78 fFreeSets.push_back(descSet);
79 }
80
release(const GrVkGpu * gpu)81 void GrVkDescriptorSetManager::release(const GrVkGpu* gpu) {
82 fPoolManager.freeGPUResources(gpu);
83
84 for (int i = 0; i < fFreeSets.count(); ++i) {
85 fFreeSets[i]->unref(gpu);
86 }
87 fFreeSets.reset();
88 }
89
abandon()90 void GrVkDescriptorSetManager::abandon() {
91 fPoolManager.abandonGPUResources();
92
93 for (int i = 0; i < fFreeSets.count(); ++i) {
94 fFreeSets[i]->unrefAndAbandon();
95 }
96 fFreeSets.reset();
97 }
98
isCompatible(VkDescriptorType type,const GrVkUniformHandler * uniHandler) const99 bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
100 const GrVkUniformHandler* uniHandler) const {
101 SkASSERT(uniHandler);
102 if (type != fPoolManager.fDescType) {
103 return false;
104 }
105
106 if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
107 if (fBindingVisibilities.count() != uniHandler->numSamplers()) {
108 return false;
109 }
110 for (int i = 0; i < uniHandler->numSamplers(); ++i) {
111 if (uniHandler->samplerVisibility(i) != fBindingVisibilities[i]) {
112 return false;
113 }
114 }
115 } else if (VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
116 if (fBindingVisibilities.count() != uniHandler->numTexelBuffers()) {
117 return false;
118 }
119 for (int i = 0; i < uniHandler->numTexelBuffers(); ++i) {
120 if (uniHandler->texelBufferVisibility(i) != fBindingVisibilities[i]) {
121 return false;
122 }
123 }
124 }
125 return true;
126 }
127
isCompatible(VkDescriptorType type,const SkTArray<uint32_t> & visibilities) const128 bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
129 const SkTArray<uint32_t>& visibilities) const {
130 if (type != fPoolManager.fDescType) {
131 return false;
132 }
133
134 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
135 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
136 if (fBindingVisibilities.count() != visibilities.count()) {
137 return false;
138 }
139 for (int i = 0; i < visibilities.count(); ++i) {
140 if (visibilities[i] != fBindingVisibilities[i]) {
141 return false;
142 }
143 }
144 }
145 return true;
146 }
147
148 ////////////////////////////////////////////////////////////////////////////////
149
visibility_to_vk_stage_flags(uint32_t visibility)150 VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
151 VkShaderStageFlags flags = 0;
152
153 if (visibility & kVertex_GrShaderFlag) {
154 flags |= VK_SHADER_STAGE_VERTEX_BIT;
155 }
156 if (visibility & kGeometry_GrShaderFlag) {
157 flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
158 }
159 if (visibility & kFragment_GrShaderFlag) {
160 flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
161 }
162 return flags;
163 }
164
DescriptorPoolManager(VkDescriptorType type,GrVkGpu * gpu,const SkTArray<uint32_t> & visibilities)165 GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
166 VkDescriptorType type,
167 GrVkGpu* gpu,
168 const SkTArray<uint32_t>& visibilities)
169 : fDescType(type)
170 , fCurrentDescriptorCount(0)
171 , fPool(nullptr) {
172
173
174 if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
175 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
176 uint32_t numBindings = visibilities.count();
177 std::unique_ptr<VkDescriptorSetLayoutBinding[]> dsSamplerBindings(
178 new VkDescriptorSetLayoutBinding[numBindings]);
179 for (uint32_t i = 0; i < numBindings; ++i) {
180 uint32_t visibility = visibilities[i];
181 dsSamplerBindings[i].binding = i;
182 dsSamplerBindings[i].descriptorType = type;
183 dsSamplerBindings[i].descriptorCount = 1;
184 dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
185 dsSamplerBindings[i].pImmutableSamplers = nullptr;
186 }
187
188 VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
189 memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
190 dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
191 dsSamplerLayoutCreateInfo.pNext = nullptr;
192 dsSamplerLayoutCreateInfo.flags = 0;
193 dsSamplerLayoutCreateInfo.bindingCount = numBindings;
194 // Setting to nullptr fixes an error in the param checker validation layer. Even though
195 // bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
196 // null.
197 dsSamplerLayoutCreateInfo.pBindings = numBindings ? dsSamplerBindings.get() : nullptr;
198
199 GR_VK_CALL_ERRCHECK(gpu->vkInterface(),
200 CreateDescriptorSetLayout(gpu->device(),
201 &dsSamplerLayoutCreateInfo,
202 nullptr,
203 &fDescLayout));
204 fDescCountPerSet = visibilities.count();
205 } else {
206 SkASSERT(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type);
207 GR_STATIC_ASSERT(2 == kUniformDescPerSet);
208 SkASSERT(kUniformDescPerSet == visibilities.count());
209 // Create Uniform Buffer Descriptor
210 static const uint32_t bindings[kUniformDescPerSet] =
211 { GrVkUniformHandler::kGeometryBinding, GrVkUniformHandler::kFragBinding };
212 VkDescriptorSetLayoutBinding dsUniBindings[kUniformDescPerSet];
213 memset(&dsUniBindings, 0, kUniformDescPerSet * sizeof(VkDescriptorSetLayoutBinding));
214 for (int i = 0; i < kUniformDescPerSet; ++i) {
215 dsUniBindings[i].binding = bindings[i];
216 dsUniBindings[i].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
217 dsUniBindings[i].descriptorCount = 1;
218 dsUniBindings[i].stageFlags = visibility_to_vk_stage_flags(visibilities[i]);
219 dsUniBindings[i].pImmutableSamplers = nullptr;
220 }
221
222 VkDescriptorSetLayoutCreateInfo uniformLayoutCreateInfo;
223 memset(&uniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
224 uniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
225 uniformLayoutCreateInfo.pNext = nullptr;
226 uniformLayoutCreateInfo.flags = 0;
227 uniformLayoutCreateInfo.bindingCount = 2;
228 uniformLayoutCreateInfo.pBindings = dsUniBindings;
229
230 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateDescriptorSetLayout(gpu->device(),
231 &uniformLayoutCreateInfo,
232 nullptr,
233 &fDescLayout));
234 fDescCountPerSet = kUniformDescPerSet;
235 }
236
237 SkASSERT(fDescCountPerSet < kStartNumDescriptors);
238 fMaxDescriptors = kStartNumDescriptors;
239 SkASSERT(fMaxDescriptors > 0);
240 this->getNewPool(gpu);
241 }
242
getNewPool(GrVkGpu * gpu)243 void GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
244 if (fPool) {
245 fPool->unref(gpu);
246 uint32_t newPoolSize = fMaxDescriptors + ((fMaxDescriptors + 1) >> 1);
247 if (newPoolSize < kMaxDescriptors) {
248 fMaxDescriptors = newPoolSize;
249 } else {
250 fMaxDescriptors = kMaxDescriptors;
251 }
252
253 }
254 fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
255 fMaxDescriptors);
256 SkASSERT(fPool);
257 }
258
getNewDescriptorSet(GrVkGpu * gpu,VkDescriptorSet * ds)259 void GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
260 VkDescriptorSet* ds) {
261 if (!fMaxDescriptors) {
262 return;
263 }
264 fCurrentDescriptorCount += fDescCountPerSet;
265 if (fCurrentDescriptorCount > fMaxDescriptors) {
266 this->getNewPool(gpu);
267 fCurrentDescriptorCount = fDescCountPerSet;
268 }
269
270 VkDescriptorSetAllocateInfo dsAllocateInfo;
271 memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));
272 dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
273 dsAllocateInfo.pNext = nullptr;
274 dsAllocateInfo.descriptorPool = fPool->descPool();
275 dsAllocateInfo.descriptorSetCount = 1;
276 dsAllocateInfo.pSetLayouts = &fDescLayout;
277 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), AllocateDescriptorSets(gpu->device(),
278 &dsAllocateInfo,
279 ds));
280 }
281
freeGPUResources(const GrVkGpu * gpu)282 void GrVkDescriptorSetManager::DescriptorPoolManager::freeGPUResources(const GrVkGpu* gpu) {
283 if (fDescLayout) {
284 GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDescLayout,
285 nullptr));
286 fDescLayout = VK_NULL_HANDLE;
287 }
288
289 if (fPool) {
290 fPool->unref(gpu);
291 fPool = nullptr;
292 }
293 }
294
abandonGPUResources()295 void GrVkDescriptorSetManager::DescriptorPoolManager::abandonGPUResources() {
296 fDescLayout = VK_NULL_HANDLE;
297 if (fPool) {
298 fPool->unrefAndAbandon();
299 fPool = nullptr;
300 }
301 }
302