• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 
30 #include "util/mesa-sha1.h"
31 #include "radv_private.h"
32 #include "sid.h"
33 #include "vk_format.h"
34 #include "vk_util.h"
35 
36 
has_equal_immutable_samplers(const VkSampler * samplers,uint32_t count)37 static bool has_equal_immutable_samplers(const VkSampler *samplers, uint32_t count)
38 {
39 	if (!samplers)
40 		return false;
41 	for(uint32_t i = 1; i < count; ++i) {
42 		if (memcmp(radv_sampler_from_handle(samplers[0])->state,
43 		           radv_sampler_from_handle(samplers[i])->state, 16)) {
44 			return false;
45 		}
46 	}
47 	return true;
48 }
49 
binding_compare(const void * av,const void * bv)50 static int binding_compare(const void* av, const void *bv)
51 {
52 	const VkDescriptorSetLayoutBinding *a = (const VkDescriptorSetLayoutBinding*)av;
53 	const VkDescriptorSetLayoutBinding *b = (const VkDescriptorSetLayoutBinding*)bv;
54 
55 	return (a->binding < b->binding) ? -1 : (a->binding > b->binding) ? 1 : 0;
56 }
57 
58 static VkDescriptorSetLayoutBinding *
create_sorted_bindings(const VkDescriptorSetLayoutBinding * bindings,unsigned count)59 create_sorted_bindings(const VkDescriptorSetLayoutBinding *bindings, unsigned count) {
60 	VkDescriptorSetLayoutBinding *sorted_bindings = malloc(MAX2(count * sizeof(VkDescriptorSetLayoutBinding), 1));
61 	if (!sorted_bindings)
62 		return NULL;
63 
64 	if (count) {
65 		memcpy(sorted_bindings, bindings, count * sizeof(VkDescriptorSetLayoutBinding));
66 		qsort(sorted_bindings, count, sizeof(VkDescriptorSetLayoutBinding), binding_compare);
67 	}
68 
69 	return sorted_bindings;
70 }
71 
radv_CreateDescriptorSetLayout(VkDevice _device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)72 VkResult radv_CreateDescriptorSetLayout(
73 	VkDevice                                    _device,
74 	const VkDescriptorSetLayoutCreateInfo*      pCreateInfo,
75 	const VkAllocationCallbacks*                pAllocator,
76 	VkDescriptorSetLayout*                      pSetLayout)
77 {
78 	RADV_FROM_HANDLE(radv_device, device, _device);
79 	struct radv_descriptor_set_layout *set_layout;
80 
81 	assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
82 	const VkDescriptorSetLayoutBindingFlagsCreateInfo *variable_flags =
83 		vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
84 
85 	uint32_t num_bindings = 0;
86 	uint32_t immutable_sampler_count = 0;
87 	uint32_t ycbcr_sampler_count = 0;
88 	for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
89 		num_bindings = MAX2(num_bindings, pCreateInfo->pBindings[j].binding + 1);
90 		if ((pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
91 		     pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
92 		     pCreateInfo->pBindings[j].pImmutableSamplers) {
93 			immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
94 
95 			bool has_ycbcr_sampler = false;
96 			for (unsigned i = 0; i < pCreateInfo->pBindings[j].descriptorCount; ++i) {
97 				if (radv_sampler_from_handle(pCreateInfo->pBindings[j].pImmutableSamplers[i])->ycbcr_sampler)
98 					has_ycbcr_sampler = true;
99 			}
100 
101 			if (has_ycbcr_sampler)
102 				ycbcr_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
103 		}
104 	}
105 
106 	uint32_t samplers_offset =
107 			offsetof(struct radv_descriptor_set_layout, binding[num_bindings]);
108 	size_t size = samplers_offset + immutable_sampler_count * 4 * sizeof(uint32_t);
109 	if (ycbcr_sampler_count > 0) {
110 		/* Store block of offsets first, followed by the conversion descriptors (padded to the struct alignment) */
111 		size += num_bindings * sizeof(uint32_t);
112 		size = ALIGN(size, alignof(struct radv_sampler_ycbcr_conversion));
113 		size += ycbcr_sampler_count * sizeof(struct radv_sampler_ycbcr_conversion);
114 	}
115 
116 	set_layout = vk_zalloc2(&device->vk.alloc, pAllocator, size, 8,
117 	                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
118 	if (!set_layout)
119 		return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
120 
121 	vk_object_base_init(&device->vk, &set_layout->base,
122 			    VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
123 
124 	set_layout->flags = pCreateInfo->flags;
125 	set_layout->layout_size = size;
126 
127 	/* We just allocate all the samplers at the end of the struct */
128 	uint32_t *samplers = (uint32_t*)&set_layout->binding[num_bindings];
129 	struct radv_sampler_ycbcr_conversion *ycbcr_samplers = NULL;
130 	uint32_t *ycbcr_sampler_offsets = NULL;
131 
132 	if (ycbcr_sampler_count > 0) {
133 		ycbcr_sampler_offsets = samplers + 4 * immutable_sampler_count;
134 		set_layout->ycbcr_sampler_offsets_offset = (char*)ycbcr_sampler_offsets - (char*)set_layout;
135 
136 		uintptr_t first_ycbcr_sampler_offset = (uintptr_t)ycbcr_sampler_offsets + sizeof(uint32_t) * num_bindings;
137 		first_ycbcr_sampler_offset = ALIGN(first_ycbcr_sampler_offset, alignof(struct radv_sampler_ycbcr_conversion));
138 		ycbcr_samplers = (struct radv_sampler_ycbcr_conversion *)first_ycbcr_sampler_offset;
139 	} else
140 		set_layout->ycbcr_sampler_offsets_offset = 0;
141 
142 	VkDescriptorSetLayoutBinding *bindings = create_sorted_bindings(pCreateInfo->pBindings,
143 	                                                                pCreateInfo->bindingCount);
144 	if (!bindings) {
145 		vk_object_base_finish(&set_layout->base);
146 		vk_free2(&device->vk.alloc, pAllocator, set_layout);
147 		return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
148 	}
149 
150 	set_layout->binding_count = num_bindings;
151 	set_layout->shader_stages = 0;
152 	set_layout->dynamic_shader_stages = 0;
153 	set_layout->has_immutable_samplers = false;
154 	set_layout->size = 0;
155 
156 	uint32_t buffer_count = 0;
157 	uint32_t dynamic_offset_count = 0;
158 
159 	for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
160 		const VkDescriptorSetLayoutBinding *binding = bindings + j;
161 		uint32_t b = binding->binding;
162 		uint32_t alignment = 0;
163 		unsigned binding_buffer_count = 0;
164 		uint32_t descriptor_count = binding->descriptorCount;
165 		bool has_ycbcr_sampler = false;
166 
167 		/* main image + fmask */
168 		uint32_t max_sampled_image_descriptors = 2;
169 
170 		if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
171 		    binding->pImmutableSamplers) {
172 			for (unsigned i = 0; i < binding->descriptorCount; ++i) {
173 				struct radv_sampler_ycbcr_conversion *conversion =
174 					radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler;
175 
176 				if (conversion) {
177 					has_ycbcr_sampler = true;
178 					max_sampled_image_descriptors = MAX2(max_sampled_image_descriptors,
179 					                                     vk_format_get_plane_count(conversion->format));
180 				}
181 			}
182 		}
183 
184 		switch (binding->descriptorType) {
185 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
186 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
187 			assert(!(pCreateInfo->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
188 			set_layout->binding[b].dynamic_offset_count = 1;
189 			set_layout->dynamic_shader_stages |= binding->stageFlags;
190 			set_layout->binding[b].size = 0;
191 			binding_buffer_count = 1;
192 			alignment = 1;
193 			break;
194 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
195 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
196 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
197 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
198 			set_layout->binding[b].size = 16;
199 			binding_buffer_count = 1;
200 			alignment = 16;
201 			break;
202 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
203 		case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
204 		case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
205 			/* main descriptor + fmask descriptor */
206 			set_layout->binding[b].size = 64;
207 			binding_buffer_count = 1;
208 			alignment = 32;
209 			break;
210 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
211 			/* main descriptor + fmask descriptor + sampler */
212 			set_layout->binding[b].size = 96;
213 			binding_buffer_count = 1;
214 			alignment = 32;
215 			break;
216 		case VK_DESCRIPTOR_TYPE_SAMPLER:
217 			set_layout->binding[b].size = 16;
218 			alignment = 16;
219 			break;
220 		case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
221 			alignment = 16;
222 			set_layout->binding[b].size = descriptor_count;
223 			descriptor_count = 1;
224 			break;
225 		default:
226 			break;
227 		}
228 
229 		set_layout->size = align(set_layout->size, alignment);
230 		set_layout->binding[b].type = binding->descriptorType;
231 		set_layout->binding[b].array_size = descriptor_count;
232 		set_layout->binding[b].offset = set_layout->size;
233 		set_layout->binding[b].buffer_offset = buffer_count;
234 		set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
235 
236 		if (variable_flags && binding->binding < variable_flags->bindingCount &&
237 		    (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
238 			assert(!binding->pImmutableSamplers); /* Terribly ill defined  how many samplers are valid */
239 			assert(binding->binding == num_bindings - 1);
240 
241 			set_layout->has_variable_descriptors = true;
242 		}
243 
244 		if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
245 		     binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
246 		    binding->pImmutableSamplers) {
247 			set_layout->binding[b].immutable_samplers_offset = samplers_offset;
248 			set_layout->binding[b].immutable_samplers_equal =
249 				has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount);
250 			set_layout->has_immutable_samplers = true;
251 
252 
253 			for (uint32_t i = 0; i < binding->descriptorCount; i++)
254 				memcpy(samplers + 4 * i, &radv_sampler_from_handle(binding->pImmutableSamplers[i])->state, 16);
255 
256 			/* Don't reserve space for the samplers if they're not accessed. */
257 			if (set_layout->binding[b].immutable_samplers_equal) {
258 				if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
259 				    max_sampled_image_descriptors <= 2)
260 					set_layout->binding[b].size -= 32;
261 				else if (binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
262 					set_layout->binding[b].size -= 16;
263 			}
264 			samplers += 4 * binding->descriptorCount;
265 			samplers_offset += 4 * sizeof(uint32_t) * binding->descriptorCount;
266 
267 			if (has_ycbcr_sampler) {
268 				ycbcr_sampler_offsets[b] = (const char*)ycbcr_samplers - (const char*)set_layout;
269 				for (uint32_t i = 0; i < binding->descriptorCount; i++) {
270 					if (radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler)
271 						ycbcr_samplers[i] = *radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler;
272 					else
273 						ycbcr_samplers[i].format = VK_FORMAT_UNDEFINED;
274 				}
275 				ycbcr_samplers += binding->descriptorCount;
276 			}
277 		}
278 
279 		set_layout->size += descriptor_count * set_layout->binding[b].size;
280 		buffer_count += descriptor_count * binding_buffer_count;
281 		dynamic_offset_count += descriptor_count *
282 			set_layout->binding[b].dynamic_offset_count;
283 		set_layout->shader_stages |= binding->stageFlags;
284 	}
285 
286 	free(bindings);
287 
288 	set_layout->buffer_count = buffer_count;
289 	set_layout->dynamic_offset_count = dynamic_offset_count;
290 
291 	*pSetLayout = radv_descriptor_set_layout_to_handle(set_layout);
292 
293 	return VK_SUCCESS;
294 }
295 
radv_DestroyDescriptorSetLayout(VkDevice _device,VkDescriptorSetLayout _set_layout,const VkAllocationCallbacks * pAllocator)296 void radv_DestroyDescriptorSetLayout(
297 	VkDevice                                    _device,
298 	VkDescriptorSetLayout                       _set_layout,
299 	const VkAllocationCallbacks*                pAllocator)
300 {
301 	RADV_FROM_HANDLE(radv_device, device, _device);
302 	RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, _set_layout);
303 
304 	if (!set_layout)
305 		return;
306 
307 	vk_object_base_finish(&set_layout->base);
308 	vk_free2(&device->vk.alloc, pAllocator, set_layout);
309 }
310 
radv_GetDescriptorSetLayoutSupport(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,VkDescriptorSetLayoutSupport * pSupport)311 void radv_GetDescriptorSetLayoutSupport(VkDevice device,
312                                         const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
313                                         VkDescriptorSetLayoutSupport* pSupport)
314 {
315 	VkDescriptorSetLayoutBinding *bindings = create_sorted_bindings(pCreateInfo->pBindings,
316 	                                                                pCreateInfo->bindingCount);
317 	if (!bindings) {
318 		pSupport->supported = false;
319 		return;
320 	}
321 
322 	const VkDescriptorSetLayoutBindingFlagsCreateInfo *variable_flags =
323 		vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
324 	VkDescriptorSetVariableDescriptorCountLayoutSupport *variable_count =
325 		vk_find_struct((void*)pCreateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT);
326 	if (variable_count) {
327 		variable_count->maxVariableDescriptorCount = 0;
328 	}
329 
330 	bool supported = true;
331 	uint64_t size = 0;
332 	for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
333 		const VkDescriptorSetLayoutBinding *binding = bindings + i;
334 
335 		uint64_t descriptor_size = 0;
336 		uint64_t descriptor_alignment = 1;
337 		uint32_t descriptor_count = binding->descriptorCount;
338 		switch (binding->descriptorType) {
339 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
340 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
341 			break;
342 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
343 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
344 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
345 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
346 			descriptor_size = 16;
347 			descriptor_alignment = 16;
348 			break;
349 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
350 		case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
351 		case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
352 			descriptor_size = 64;
353 			descriptor_alignment = 32;
354 			break;
355 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
356 			if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
357 				descriptor_size = 64;
358 			} else {
359 				descriptor_size = 96;
360 			}
361 			descriptor_alignment = 32;
362 			break;
363 		case VK_DESCRIPTOR_TYPE_SAMPLER:
364 			if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
365 				descriptor_size = 16;
366 				descriptor_alignment = 16;
367 			}
368 			break;
369 		case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
370 			descriptor_alignment = 16;
371 			descriptor_size = descriptor_count;
372 			descriptor_count = 1;
373 			break;
374 		default:
375 			break;
376 		}
377 
378 		if (size && !align_u64(size, descriptor_alignment)) {
379 			supported = false;
380 		}
381 		size = align_u64(size, descriptor_alignment);
382 
383 		uint64_t max_count = INT32_MAX;
384 		if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
385 			max_count = INT32_MAX - size;
386 		else if (descriptor_size)
387 			max_count = (INT32_MAX - size) / descriptor_size;
388 
389 		if (max_count < descriptor_count) {
390 			supported = false;
391 		}
392 		if (variable_flags && binding->binding <variable_flags->bindingCount && variable_count &&
393 		    (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
394 			variable_count->maxVariableDescriptorCount = MIN2(UINT32_MAX, max_count);
395 		}
396 		size += descriptor_count * descriptor_size;
397 	}
398 
399 	free(bindings);
400 
401 	pSupport->supported = supported;
402 }
403 
404 /*
405  * Pipeline layouts.  These have nothing to do with the pipeline.  They are
406  * just multiple descriptor set layouts pasted together.
407  */
408 
radv_CreatePipelineLayout(VkDevice _device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout)409 VkResult radv_CreatePipelineLayout(
410 	VkDevice                                    _device,
411 	const VkPipelineLayoutCreateInfo*           pCreateInfo,
412 	const VkAllocationCallbacks*                pAllocator,
413 	VkPipelineLayout*                           pPipelineLayout)
414 {
415 	RADV_FROM_HANDLE(radv_device, device, _device);
416 	struct radv_pipeline_layout *layout;
417 	struct mesa_sha1 ctx;
418 
419 	assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
420 
421 	layout = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*layout), 8,
422 			     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
423 	if (layout == NULL)
424 		return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
425 
426 	vk_object_base_init(&device->vk, &layout->base,
427 			    VK_OBJECT_TYPE_PIPELINE_LAYOUT);
428 
429 	layout->num_sets = pCreateInfo->setLayoutCount;
430 
431 	unsigned dynamic_offset_count = 0;
432 	uint16_t dynamic_shader_stages = 0;
433 
434 
435 	_mesa_sha1_init(&ctx);
436 	for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
437 		RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout,
438 				 pCreateInfo->pSetLayouts[set]);
439 		layout->set[set].layout = set_layout;
440 
441 		layout->set[set].dynamic_offset_start = dynamic_offset_count;
442 		layout->set[set].dynamic_offset_count = 0;
443 		layout->set[set].dynamic_offset_stages = 0;
444 
445 		for (uint32_t b = 0; b < set_layout->binding_count; b++) {
446 			layout->set[set].dynamic_offset_count +=
447 				set_layout->binding[b].array_size * set_layout->binding[b].dynamic_offset_count;
448 			layout->set[set].dynamic_offset_stages |= set_layout->dynamic_shader_stages;
449 		}
450 		dynamic_offset_count += layout->set[set].dynamic_offset_count;
451 		dynamic_shader_stages |= layout->set[set].dynamic_offset_stages;
452 		_mesa_sha1_update(&ctx, set_layout, set_layout->layout_size);
453 	}
454 
455 	layout->dynamic_offset_count = dynamic_offset_count;
456 	layout->dynamic_shader_stages = dynamic_shader_stages;
457 	layout->push_constant_size = 0;
458 
459 	for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
460 		const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
461 		layout->push_constant_size = MAX2(layout->push_constant_size,
462 						  range->offset + range->size);
463 	}
464 
465 	layout->push_constant_size = align(layout->push_constant_size, 16);
466 	_mesa_sha1_update(&ctx, &layout->push_constant_size,
467 			  sizeof(layout->push_constant_size));
468 	_mesa_sha1_final(&ctx, layout->sha1);
469 	*pPipelineLayout = radv_pipeline_layout_to_handle(layout);
470 
471 	return VK_SUCCESS;
472 }
473 
radv_DestroyPipelineLayout(VkDevice _device,VkPipelineLayout _pipelineLayout,const VkAllocationCallbacks * pAllocator)474 void radv_DestroyPipelineLayout(
475 	VkDevice                                    _device,
476 	VkPipelineLayout                            _pipelineLayout,
477 	const VkAllocationCallbacks*                pAllocator)
478 {
479 	RADV_FROM_HANDLE(radv_device, device, _device);
480 	RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
481 
482 	if (!pipeline_layout)
483 		return;
484 
485 	vk_object_base_finish(&pipeline_layout->base);
486 	vk_free2(&device->vk.alloc, pAllocator, pipeline_layout);
487 }
488 
489 #define EMPTY 1
490 
491 static VkResult
radv_descriptor_set_create(struct radv_device * device,struct radv_descriptor_pool * pool,const struct radv_descriptor_set_layout * layout,const uint32_t * variable_count,struct radv_descriptor_set ** out_set)492 radv_descriptor_set_create(struct radv_device *device,
493 			   struct radv_descriptor_pool *pool,
494 			   const struct radv_descriptor_set_layout *layout,
495 			   const uint32_t *variable_count,
496 			   struct radv_descriptor_set **out_set)
497 {
498 	struct radv_descriptor_set *set;
499 	uint32_t buffer_count = layout->buffer_count;
500 	if (variable_count) {
501 		unsigned stride = 1;
502 		if (layout->binding[layout->binding_count - 1].type == VK_DESCRIPTOR_TYPE_SAMPLER ||
503 		    layout->binding[layout->binding_count - 1].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
504 			stride = 0;
505 		buffer_count = layout->binding[layout->binding_count - 1].buffer_offset +
506 		               *variable_count * stride;
507 	}
508 	unsigned range_offset = sizeof(struct radv_descriptor_set) +
509 		sizeof(struct radeon_winsys_bo *) * buffer_count;
510 	unsigned mem_size = range_offset +
511 		sizeof(struct radv_descriptor_range) * layout->dynamic_offset_count;
512 
513 	if (pool->host_memory_base) {
514 		if (pool->host_memory_end - pool->host_memory_ptr < mem_size)
515 			return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
516 
517 		set = (struct radv_descriptor_set*)pool->host_memory_ptr;
518 		pool->host_memory_ptr += mem_size;
519 	} else {
520 		set = vk_alloc2(&device->vk.alloc, NULL, mem_size, 8,
521 		                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
522 
523 		if (!set)
524 			return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
525 	}
526 
527 	memset(set, 0, mem_size);
528 
529 	vk_object_base_init(&device->vk, &set->base,
530 			    VK_OBJECT_TYPE_DESCRIPTOR_SET);
531 
532 	if (layout->dynamic_offset_count) {
533 		set->dynamic_descriptors = (struct radv_descriptor_range*)((uint8_t*)set + range_offset);
534 	}
535 
536 	set->layout = layout;
537 	set->buffer_count = buffer_count;
538 	uint32_t layout_size = layout->size;
539 	if (variable_count) {
540 		assert(layout->has_variable_descriptors);
541 		uint32_t stride = layout->binding[layout->binding_count - 1].size;
542 		if (layout->binding[layout->binding_count - 1].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
543 			stride = 1;
544 
545 		layout_size = layout->binding[layout->binding_count - 1].offset +
546 		              *variable_count * stride;
547 	}
548 	layout_size = align_u32(layout_size, 32);
549 	if (layout_size) {
550 		set->size = layout_size;
551 
552 		if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) {
553 			vk_free2(&device->vk.alloc, NULL, set);
554 			return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
555 		}
556 
557 		/* try to allocate linearly first, so that we don't spend
558 		 * time looking for gaps if the app only allocates &
559 		 * resets via the pool. */
560 		if (pool->current_offset + layout_size <= pool->size) {
561 			set->bo = pool->bo;
562 			set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
563 			set->va = radv_buffer_get_va(set->bo) + pool->current_offset;
564 			if (!pool->host_memory_base) {
565 				pool->entries[pool->entry_count].offset = pool->current_offset;
566 				pool->entries[pool->entry_count].size = layout_size;
567 				pool->entries[pool->entry_count].set = set;
568 				pool->entry_count++;
569 			}
570 			pool->current_offset += layout_size;
571 		} else if (!pool->host_memory_base) {
572 			uint64_t offset = 0;
573 			int index;
574 
575 			for (index = 0; index < pool->entry_count; ++index) {
576 				if (pool->entries[index].offset - offset >= layout_size)
577 					break;
578 				offset = pool->entries[index].offset + pool->entries[index].size;
579 			}
580 
581 			if (pool->size - offset < layout_size) {
582 				vk_free2(&device->vk.alloc, NULL, set);
583 				return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
584 			}
585 			set->bo = pool->bo;
586 			set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
587 			set->va = radv_buffer_get_va(set->bo) + offset;
588 			memmove(&pool->entries[index + 1], &pool->entries[index],
589 				sizeof(pool->entries[0]) * (pool->entry_count - index));
590 			pool->entries[index].offset = offset;
591 			pool->entries[index].size = layout_size;
592 			pool->entries[index].set = set;
593 			pool->entry_count++;
594 		} else
595 			return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
596 	}
597 
598 	if (layout->has_immutable_samplers) {
599 		for (unsigned i = 0; i < layout->binding_count; ++i) {
600 			if (!layout->binding[i].immutable_samplers_offset ||
601 			layout->binding[i].immutable_samplers_equal)
602 				continue;
603 
604 			unsigned offset = layout->binding[i].offset / 4;
605 			if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
606 				offset += radv_combined_image_descriptor_sampler_offset(layout->binding + i) / 4;
607 
608 			const uint32_t *samplers = (const uint32_t*)((const char*)layout + layout->binding[i].immutable_samplers_offset);
609 			for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
610 				memcpy(set->mapped_ptr + offset, samplers + 4 * j, 16);
611 				offset += layout->binding[i].size / 4;
612 			}
613 
614 		}
615 	}
616 	*out_set = set;
617 	return VK_SUCCESS;
618 }
619 
620 static void
radv_descriptor_set_destroy(struct radv_device * device,struct radv_descriptor_pool * pool,struct radv_descriptor_set * set,bool free_bo)621 radv_descriptor_set_destroy(struct radv_device *device,
622 			    struct radv_descriptor_pool *pool,
623 			    struct radv_descriptor_set *set,
624 			    bool free_bo)
625 {
626 	assert(!pool->host_memory_base);
627 
628 	if (free_bo && set->size && !pool->host_memory_base) {
629 		uint32_t offset = (uint8_t*)set->mapped_ptr - pool->mapped_ptr;
630 		for (int i = 0; i < pool->entry_count; ++i) {
631 			if (pool->entries[i].offset == offset) {
632 				memmove(&pool->entries[i], &pool->entries[i+1],
633 					sizeof(pool->entries[i]) * (pool->entry_count - i - 1));
634 				--pool->entry_count;
635 				break;
636 			}
637 		}
638 	}
639 	vk_object_base_finish(&set->base);
640 	vk_free2(&device->vk.alloc, NULL, set);
641 }
642 
radv_destroy_descriptor_pool(struct radv_device * device,const VkAllocationCallbacks * pAllocator,struct radv_descriptor_pool * pool)643 static void radv_destroy_descriptor_pool(struct radv_device *device,
644                                          const VkAllocationCallbacks *pAllocator,
645                                          struct radv_descriptor_pool *pool)
646 {
647 	if (!pool->host_memory_base) {
648 		for(int i = 0; i < pool->entry_count; ++i) {
649 			radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
650 		}
651 	}
652 
653 	if (pool->bo)
654 		device->ws->buffer_destroy(pool->bo);
655 
656 	vk_object_base_finish(&pool->base);
657 	vk_free2(&device->vk.alloc, pAllocator, pool);
658 }
659 
radv_CreateDescriptorPool(VkDevice _device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)660 VkResult radv_CreateDescriptorPool(
661 	VkDevice                                    _device,
662 	const VkDescriptorPoolCreateInfo*           pCreateInfo,
663 	const VkAllocationCallbacks*                pAllocator,
664 	VkDescriptorPool*                           pDescriptorPool)
665 {
666 	RADV_FROM_HANDLE(radv_device, device, _device);
667 	struct radv_descriptor_pool *pool;
668 	uint64_t size = sizeof(struct radv_descriptor_pool);
669 	uint64_t bo_size = 0, bo_count = 0, range_count = 0;
670 
671 	vk_foreach_struct(ext, pCreateInfo->pNext) {
672 		switch (ext->sType) {
673 		case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT: {
674 			const struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT *info =
675 				(const struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT*)ext;
676 			/* the sizes are 4 aligned, and we need to align to at
677 			 * most 32, which needs at most 28 bytes extra per
678 			 * binding. */
679 			bo_size += 28llu * info->maxInlineUniformBlockBindings;
680 			break;
681 		}
682 		default:
683 			break;
684 		}
685 	}
686 
687 	for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
688 		if (pCreateInfo->pPoolSizes[i].type != VK_DESCRIPTOR_TYPE_SAMPLER)
689 			bo_count += pCreateInfo->pPoolSizes[i].descriptorCount;
690 
691 		switch(pCreateInfo->pPoolSizes[i].type) {
692 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
693 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
694 			range_count += pCreateInfo->pPoolSizes[i].descriptorCount;
695 			break;
696 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
697 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
698 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
699 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
700 		case VK_DESCRIPTOR_TYPE_SAMPLER:
701 			/* 32 as we may need to align for images */
702 			bo_size += 32 * pCreateInfo->pPoolSizes[i].descriptorCount;
703 			break;
704 		case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
705 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
706 		case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
707 			bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
708 			break;
709 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
710 			bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
711 			break;
712 		case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
713 			bo_size += pCreateInfo->pPoolSizes[i].descriptorCount;
714 			break;
715 		default:
716 			break;
717 		}
718 	}
719 
720 	if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
721 		uint64_t host_size = pCreateInfo->maxSets * sizeof(struct radv_descriptor_set);
722 		host_size += sizeof(struct radeon_winsys_bo*) * bo_count;
723 		host_size += sizeof(struct radv_descriptor_range) * range_count;
724 		size += host_size;
725 	} else {
726 		size += sizeof(struct radv_descriptor_pool_entry) * pCreateInfo->maxSets;
727 	}
728 
729 	pool = vk_alloc2(&device->vk.alloc, pAllocator, size, 8,
730 	                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
731 	if (!pool)
732 		return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
733 
734 	memset(pool, 0, sizeof(*pool));
735 
736 	vk_object_base_init(&device->vk, &pool->base,
737 			    VK_OBJECT_TYPE_DESCRIPTOR_POOL);
738 
739 	if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
740 		pool->host_memory_base = (uint8_t*)pool + sizeof(struct radv_descriptor_pool);
741 		pool->host_memory_ptr = pool->host_memory_base;
742 		pool->host_memory_end = (uint8_t*)pool + size;
743 	}
744 
745 	if (bo_size) {
746 		pool->bo = device->ws->buffer_create(device->ws, bo_size, 32,
747 						     RADEON_DOMAIN_VRAM,
748 						     RADEON_FLAG_NO_INTERPROCESS_SHARING |
749 						     RADEON_FLAG_READ_ONLY |
750 						     RADEON_FLAG_32BIT,
751 						     RADV_BO_PRIORITY_DESCRIPTOR);
752 		if (!pool->bo) {
753 			radv_destroy_descriptor_pool(device, pAllocator, pool);
754 			return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
755 		}
756 		pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
757 		if (!pool->mapped_ptr) {
758 			radv_destroy_descriptor_pool(device, pAllocator, pool);
759 			return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
760 		}
761 	}
762 	pool->size = bo_size;
763 	pool->max_entry_count = pCreateInfo->maxSets;
764 
765 	*pDescriptorPool = radv_descriptor_pool_to_handle(pool);
766 	return VK_SUCCESS;
767 }
768 
radv_DestroyDescriptorPool(VkDevice _device,VkDescriptorPool _pool,const VkAllocationCallbacks * pAllocator)769 void radv_DestroyDescriptorPool(
770 	VkDevice                                    _device,
771 	VkDescriptorPool                            _pool,
772 	const VkAllocationCallbacks*                pAllocator)
773 {
774 	RADV_FROM_HANDLE(radv_device, device, _device);
775 	RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
776 
777 	if (!pool)
778 		return;
779 
780 	radv_destroy_descriptor_pool(device, pAllocator, pool);
781 }
782 
radv_ResetDescriptorPool(VkDevice _device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)783 VkResult radv_ResetDescriptorPool(
784 	VkDevice                                    _device,
785 	VkDescriptorPool                            descriptorPool,
786 	VkDescriptorPoolResetFlags                  flags)
787 {
788 	RADV_FROM_HANDLE(radv_device, device, _device);
789 	RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
790 
791 	if (!pool->host_memory_base) {
792 		for(int i = 0; i < pool->entry_count; ++i) {
793 			radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
794 		}
795 		pool->entry_count = 0;
796 	}
797 
798 	pool->current_offset = 0;
799 	pool->host_memory_ptr = pool->host_memory_base;
800 
801 	return VK_SUCCESS;
802 }
803 
radv_AllocateDescriptorSets(VkDevice _device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)804 VkResult radv_AllocateDescriptorSets(
805 	VkDevice                                    _device,
806 	const VkDescriptorSetAllocateInfo*          pAllocateInfo,
807 	VkDescriptorSet*                            pDescriptorSets)
808 {
809 	RADV_FROM_HANDLE(radv_device, device, _device);
810 	RADV_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
811 
812 	VkResult result = VK_SUCCESS;
813 	uint32_t i;
814 	struct radv_descriptor_set *set = NULL;
815 
816 	const VkDescriptorSetVariableDescriptorCountAllocateInfo *variable_counts =
817 		vk_find_struct_const(pAllocateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
818 	const uint32_t zero = 0;
819 
820 	/* allocate a set of buffers for each shader to contain descriptors */
821 	for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
822 		RADV_FROM_HANDLE(radv_descriptor_set_layout, layout,
823 				 pAllocateInfo->pSetLayouts[i]);
824 
825 		const uint32_t *variable_count = NULL;
826 		if (variable_counts) {
827 			if (i < variable_counts->descriptorSetCount)
828 				variable_count = variable_counts->pDescriptorCounts + i;
829 			else
830 				variable_count = &zero;
831 		}
832 
833 		assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
834 
835 		result = radv_descriptor_set_create(device, pool, layout, variable_count, &set);
836 		if (result != VK_SUCCESS)
837 			break;
838 
839 		pDescriptorSets[i] = radv_descriptor_set_to_handle(set);
840 	}
841 
842 	if (result != VK_SUCCESS) {
843 		radv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
844 					i, pDescriptorSets);
845 		for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
846 			pDescriptorSets[i] = VK_NULL_HANDLE;
847 		}
848 	}
849 	return result;
850 }
851 
radv_FreeDescriptorSets(VkDevice _device,VkDescriptorPool descriptorPool,uint32_t count,const VkDescriptorSet * pDescriptorSets)852 VkResult radv_FreeDescriptorSets(
853 	VkDevice                                    _device,
854 	VkDescriptorPool                            descriptorPool,
855 	uint32_t                                    count,
856 	const VkDescriptorSet*                      pDescriptorSets)
857 {
858 	RADV_FROM_HANDLE(radv_device, device, _device);
859 	RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
860 
861 	for (uint32_t i = 0; i < count; i++) {
862 		RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
863 
864 		if (set && !pool->host_memory_base)
865 			radv_descriptor_set_destroy(device, pool, set, true);
866 	}
867 	return VK_SUCCESS;
868 }
869 
write_texel_buffer_descriptor(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,unsigned * dst,struct radeon_winsys_bo ** buffer_list,const VkBufferView _buffer_view)870 static void write_texel_buffer_descriptor(struct radv_device *device,
871 					  struct radv_cmd_buffer *cmd_buffer,
872 					  unsigned *dst,
873 					  struct radeon_winsys_bo **buffer_list,
874 					  const VkBufferView _buffer_view)
875 {
876 	RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
877 
878 	if (!buffer_view) {
879 		memset(dst, 0, 4 * 4);
880 		return;
881 	}
882 
883 	memcpy(dst, buffer_view->state, 4 * 4);
884 
885 	if (cmd_buffer)
886 		radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer_view->bo);
887 	else
888 		*buffer_list = buffer_view->bo;
889 }
890 
write_buffer_descriptor(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,unsigned * dst,struct radeon_winsys_bo ** buffer_list,const VkDescriptorBufferInfo * buffer_info)891 static void write_buffer_descriptor(struct radv_device *device,
892                                     struct radv_cmd_buffer *cmd_buffer,
893                                     unsigned *dst,
894                                     struct radeon_winsys_bo **buffer_list,
895                                     const VkDescriptorBufferInfo *buffer_info)
896 {
897 	RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
898 
899 	if (!buffer) {
900 		memset(dst, 0, 4 * 4);
901 		return;
902 	}
903 
904 	uint64_t va = radv_buffer_get_va(buffer->bo);
905 	uint32_t range = buffer_info->range;
906 
907 	if (buffer_info->range == VK_WHOLE_SIZE)
908 		range = buffer->size - buffer_info->offset;
909 
910 	/* robustBufferAccess is relaxed enough to allow this (in combination
911 	 * with the alignment/size we return from vkGetBufferMemoryRequirements)
912 	 * and this allows the shader compiler to create more efficient 8/16-bit
913 	 * buffer accesses. */
914 	range = align(range, 4);
915 
916 	va += buffer_info->offset + buffer->offset;
917 
918 	uint32_t rsrc_word3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
919 			      S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
920 			      S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
921 			      S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
922 
923 	if (device->physical_device->rad_info.chip_class >= GFX10) {
924 		rsrc_word3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
925 			      S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
926 			      S_008F0C_RESOURCE_LEVEL(1);
927 	} else {
928 		rsrc_word3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
929 			      S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
930 	}
931 
932 	dst[0] = va;
933 	dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
934 	dst[2] = range;
935 	dst[3] = rsrc_word3;
936 
937 	if (cmd_buffer)
938 		radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo);
939 	else
940 		*buffer_list = buffer->bo;
941 }
942 
write_block_descriptor(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,void * dst,const VkWriteDescriptorSet * writeset)943 static void write_block_descriptor(struct radv_device *device,
944                                    struct radv_cmd_buffer *cmd_buffer,
945                                    void *dst,
946                                    const VkWriteDescriptorSet *writeset)
947 {
948 	const VkWriteDescriptorSetInlineUniformBlockEXT *inline_ub =
949 		vk_find_struct_const(writeset->pNext, WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT);
950 
951 	memcpy(dst, inline_ub->pData, inline_ub->dataSize);
952 }
953 
write_dynamic_buffer_descriptor(struct radv_device * device,struct radv_descriptor_range * range,struct radeon_winsys_bo ** buffer_list,const VkDescriptorBufferInfo * buffer_info)954 static void write_dynamic_buffer_descriptor(struct radv_device *device,
955                                             struct radv_descriptor_range *range,
956                                             struct radeon_winsys_bo **buffer_list,
957                                             const VkDescriptorBufferInfo *buffer_info)
958 {
959 	RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
960 	uint64_t va;
961 	unsigned size;
962 
963 	if (!buffer) {
964 		range->va = 0;
965 		return;
966 	}
967 
968 	va = radv_buffer_get_va(buffer->bo);
969 	size = buffer_info->range;
970 
971 	if (buffer_info->range == VK_WHOLE_SIZE)
972 		size = buffer->size - buffer_info->offset;
973 
974 	/* robustBufferAccess is relaxed enough to allow this (in combination
975 	 * with the alignment/size we return from vkGetBufferMemoryRequirements)
976 	 * and this allows the shader compiler to create more efficient 8/16-bit
977 	 * buffer accesses. */
978 	size = align(size, 4);
979 
980 	va += buffer_info->offset + buffer->offset;
981 	range->va = va;
982 	range->size = size;
983 
984 	*buffer_list = buffer->bo;
985 }
986 
987 static void
write_image_descriptor(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,unsigned size,unsigned * dst,struct radeon_winsys_bo ** buffer_list,VkDescriptorType descriptor_type,const VkDescriptorImageInfo * image_info)988 write_image_descriptor(struct radv_device *device,
989 		       struct radv_cmd_buffer *cmd_buffer,
990 		       unsigned size, unsigned *dst,
991 		       struct radeon_winsys_bo **buffer_list,
992 		       VkDescriptorType descriptor_type,
993 		       const VkDescriptorImageInfo *image_info)
994 {
995 	RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
996 	union radv_descriptor *descriptor;
997 
998 	if (!iview) {
999 		memset(dst, 0, size);
1000 		return;
1001 	}
1002 
1003 	if (descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
1004 		descriptor = &iview->storage_descriptor;
1005 	} else {
1006 		descriptor = &iview->descriptor;
1007 	}
1008 
1009 	memcpy(dst, descriptor, size);
1010 
1011 	if (cmd_buffer)
1012 		radv_cs_add_buffer(device->ws, cmd_buffer->cs, iview->bo);
1013 	else
1014 		*buffer_list = iview->bo;
1015 }
1016 
1017 static void
write_combined_image_sampler_descriptor(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,unsigned sampler_offset,unsigned * dst,struct radeon_winsys_bo ** buffer_list,VkDescriptorType descriptor_type,const VkDescriptorImageInfo * image_info,bool has_sampler)1018 write_combined_image_sampler_descriptor(struct radv_device *device,
1019 					struct radv_cmd_buffer *cmd_buffer,
1020 					unsigned sampler_offset,
1021 					unsigned *dst,
1022 					struct radeon_winsys_bo **buffer_list,
1023 					VkDescriptorType descriptor_type,
1024 					const VkDescriptorImageInfo *image_info,
1025 					bool has_sampler)
1026 {
1027 	RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
1028 
1029 	write_image_descriptor(device, cmd_buffer, sampler_offset, dst, buffer_list,
1030 	                       descriptor_type, image_info);
1031 	/* copy over sampler state */
1032 	if (has_sampler) {
1033 		memcpy(dst + sampler_offset / sizeof(*dst), sampler->state, 16);
1034 	}
1035 }
1036 
1037 static void
write_sampler_descriptor(struct radv_device * device,unsigned * dst,const VkDescriptorImageInfo * image_info)1038 write_sampler_descriptor(struct radv_device *device,
1039 					unsigned *dst,
1040 					const VkDescriptorImageInfo *image_info)
1041 {
1042 	RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
1043 
1044 	memcpy(dst, sampler->state, 16);
1045 }
1046 
radv_update_descriptor_sets(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,VkDescriptorSet dstSetOverride,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)1047 void radv_update_descriptor_sets(
1048 	struct radv_device*                         device,
1049 	struct radv_cmd_buffer*                     cmd_buffer,
1050 	VkDescriptorSet                             dstSetOverride,
1051 	uint32_t                                    descriptorWriteCount,
1052 	const VkWriteDescriptorSet*                 pDescriptorWrites,
1053 	uint32_t                                    descriptorCopyCount,
1054 	const VkCopyDescriptorSet*                  pDescriptorCopies)
1055 {
1056 	uint32_t i, j;
1057 	for (i = 0; i < descriptorWriteCount; i++) {
1058 		const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
1059 		RADV_FROM_HANDLE(radv_descriptor_set, set,
1060 		                 dstSetOverride ? dstSetOverride : writeset->dstSet);
1061 		const struct radv_descriptor_set_binding_layout *binding_layout =
1062 			set->layout->binding + writeset->dstBinding;
1063 		uint32_t *ptr = set->mapped_ptr;
1064 		struct radeon_winsys_bo **buffer_list =  set->descriptors;
1065 		/* Immutable samplers are not copied into push descriptors when they are
1066 		 * allocated, so if we are writing push descriptors we have to copy the
1067 		 * immutable samplers into them now.
1068 		 */
1069 		const bool copy_immutable_samplers = cmd_buffer &&
1070 			binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal;
1071 		const uint32_t *samplers = radv_immutable_samplers(set->layout, binding_layout);
1072 
1073 		ptr += binding_layout->offset / 4;
1074 
1075 		if (writeset->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
1076 			write_block_descriptor(device, cmd_buffer, (uint8_t*)ptr + writeset->dstArrayElement, writeset);
1077 			continue;
1078 		}
1079 
1080 		ptr += binding_layout->size * writeset->dstArrayElement / 4;
1081 		buffer_list += binding_layout->buffer_offset;
1082 		buffer_list += writeset->dstArrayElement;
1083 		for (j = 0; j < writeset->descriptorCount; ++j) {
1084 			switch(writeset->descriptorType) {
1085 			case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1086 			case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1087 				unsigned idx = writeset->dstArrayElement + j;
1088 				idx += binding_layout->dynamic_offset_offset;
1089 				assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1090 				write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
1091 								buffer_list, writeset->pBufferInfo + j);
1092 				break;
1093 			}
1094 			case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1095 			case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1096 				write_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
1097 							writeset->pBufferInfo + j);
1098 				break;
1099 			case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1100 			case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1101 				write_texel_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
1102 							      writeset->pTexelBufferView[j]);
1103 				break;
1104 			case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1105 			case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1106 			case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1107 				write_image_descriptor(device, cmd_buffer, 64, ptr, buffer_list,
1108 						       writeset->descriptorType,
1109 						       writeset->pImageInfo + j);
1110 				break;
1111 			case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
1112 				unsigned sampler_offset = radv_combined_image_descriptor_sampler_offset(binding_layout);
1113 				write_combined_image_sampler_descriptor(device, cmd_buffer, sampler_offset,
1114 									ptr, buffer_list,
1115 									writeset->descriptorType,
1116 									writeset->pImageInfo + j,
1117 									!binding_layout->immutable_samplers_offset);
1118 				if (copy_immutable_samplers) {
1119 					const unsigned idx = writeset->dstArrayElement + j;
1120 					memcpy((char*)ptr + sampler_offset, samplers + 4 * idx, 16);
1121 				}
1122 				break;
1123 			}
1124 			case VK_DESCRIPTOR_TYPE_SAMPLER:
1125 				if (!binding_layout->immutable_samplers_offset) {
1126 					write_sampler_descriptor(device, ptr,
1127 					                         writeset->pImageInfo + j);
1128 				} else if (copy_immutable_samplers) {
1129 					unsigned idx = writeset->dstArrayElement + j;
1130 					memcpy(ptr, samplers + 4 * idx, 16);
1131 				}
1132 				break;
1133 			default:
1134 				break;
1135 			}
1136 			ptr += binding_layout->size / 4;
1137 			++buffer_list;
1138 		}
1139 
1140 	}
1141 
1142 	for (i = 0; i < descriptorCopyCount; i++) {
1143 		const VkCopyDescriptorSet *copyset = &pDescriptorCopies[i];
1144 		RADV_FROM_HANDLE(radv_descriptor_set, src_set,
1145 		                 copyset->srcSet);
1146 		RADV_FROM_HANDLE(radv_descriptor_set, dst_set,
1147 		                 copyset->dstSet);
1148 		const struct radv_descriptor_set_binding_layout *src_binding_layout =
1149 			src_set->layout->binding + copyset->srcBinding;
1150 		const struct radv_descriptor_set_binding_layout *dst_binding_layout =
1151 			dst_set->layout->binding + copyset->dstBinding;
1152 		uint32_t *src_ptr = src_set->mapped_ptr;
1153 		uint32_t *dst_ptr = dst_set->mapped_ptr;
1154 		struct radeon_winsys_bo **src_buffer_list = src_set->descriptors;
1155 		struct radeon_winsys_bo **dst_buffer_list = dst_set->descriptors;
1156 
1157 		src_ptr += src_binding_layout->offset / 4;
1158 		dst_ptr += dst_binding_layout->offset / 4;
1159 
1160 		if (src_binding_layout->type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
1161 			src_ptr += copyset->srcArrayElement / 4;
1162 			dst_ptr += copyset->dstArrayElement / 4;
1163 
1164 			memcpy(dst_ptr, src_ptr, copyset->descriptorCount);
1165 			continue;
1166 		}
1167 
1168 		src_ptr += src_binding_layout->size * copyset->srcArrayElement / 4;
1169 		dst_ptr += dst_binding_layout->size * copyset->dstArrayElement / 4;
1170 
1171 		src_buffer_list += src_binding_layout->buffer_offset;
1172 		src_buffer_list += copyset->srcArrayElement;
1173 
1174 		dst_buffer_list += dst_binding_layout->buffer_offset;
1175 		dst_buffer_list += copyset->dstArrayElement;
1176 
1177 		for (j = 0; j < copyset->descriptorCount; ++j) {
1178 			switch (src_binding_layout->type) {
1179 			case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1180 			case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1181 				unsigned src_idx = copyset->srcArrayElement + j;
1182 				unsigned dst_idx = copyset->dstArrayElement + j;
1183 				struct radv_descriptor_range *src_range, *dst_range;
1184 				src_idx += src_binding_layout->dynamic_offset_offset;
1185 				dst_idx += dst_binding_layout->dynamic_offset_offset;
1186 
1187 				src_range = src_set->dynamic_descriptors + src_idx;
1188 				dst_range = dst_set->dynamic_descriptors + dst_idx;
1189 				*dst_range = *src_range;
1190 				break;
1191 			}
1192 			default:
1193 				memcpy(dst_ptr, src_ptr, src_binding_layout->size);
1194 			}
1195 			src_ptr += src_binding_layout->size / 4;
1196 			dst_ptr += dst_binding_layout->size / 4;
1197 
1198 			if (src_binding_layout->type != VK_DESCRIPTOR_TYPE_SAMPLER) {
1199 				/* Sampler descriptors don't have a buffer list. */
1200 				dst_buffer_list[j] = src_buffer_list[j];
1201 			}
1202 		}
1203 	}
1204 }
1205 
radv_UpdateDescriptorSets(VkDevice _device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)1206 void radv_UpdateDescriptorSets(
1207 	VkDevice                                    _device,
1208 	uint32_t                                    descriptorWriteCount,
1209 	const VkWriteDescriptorSet*                 pDescriptorWrites,
1210 	uint32_t                                    descriptorCopyCount,
1211 	const VkCopyDescriptorSet*                  pDescriptorCopies)
1212 {
1213 	RADV_FROM_HANDLE(radv_device, device, _device);
1214 
1215 	radv_update_descriptor_sets(device, NULL, VK_NULL_HANDLE, descriptorWriteCount, pDescriptorWrites,
1216 			            descriptorCopyCount, pDescriptorCopies);
1217 }
1218 
radv_CreateDescriptorUpdateTemplate(VkDevice _device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)1219 VkResult radv_CreateDescriptorUpdateTemplate(VkDevice _device,
1220                                              const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
1221                                              const VkAllocationCallbacks *pAllocator,
1222                                              VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
1223 {
1224 	RADV_FROM_HANDLE(radv_device, device, _device);
1225 	RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->descriptorSetLayout);
1226 	const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount;
1227 	const size_t size = sizeof(struct radv_descriptor_update_template) +
1228 		sizeof(struct radv_descriptor_update_template_entry) * entry_count;
1229 	struct radv_descriptor_update_template *templ;
1230 	uint32_t i;
1231 
1232 	templ = vk_alloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1233 	if (!templ)
1234 		return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1235 
1236 	vk_object_base_init(&device->vk, &templ->base,
1237 			    VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE);
1238 
1239 	templ->entry_count = entry_count;
1240 
1241 	if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
1242 		RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->pipelineLayout);
1243 
1244 		/* descriptorSetLayout should be ignored for push descriptors
1245 		 * and instead it refers to pipelineLayout and set.
1246 		 */
1247 		assert(pCreateInfo->set < MAX_SETS);
1248 		set_layout = pipeline_layout->set[pCreateInfo->set].layout;
1249 
1250 		templ->bind_point = pCreateInfo->pipelineBindPoint;
1251 	}
1252 
1253 	for (i = 0; i < entry_count; i++) {
1254 		const VkDescriptorUpdateTemplateEntry *entry = &pCreateInfo->pDescriptorUpdateEntries[i];
1255 		const struct radv_descriptor_set_binding_layout *binding_layout =
1256 			set_layout->binding + entry->dstBinding;
1257 		const uint32_t buffer_offset = binding_layout->buffer_offset + entry->dstArrayElement;
1258 		const uint32_t *immutable_samplers = NULL;
1259 		uint32_t dst_offset;
1260 		uint32_t dst_stride;
1261 
1262 		/* dst_offset is an offset into dynamic_descriptors when the descriptor
1263 		   is dynamic, and an offset into mapped_ptr otherwise */
1264 		switch (entry->descriptorType) {
1265 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1266 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1267 			assert(pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET);
1268 			dst_offset = binding_layout->dynamic_offset_offset + entry->dstArrayElement;
1269 			dst_stride = 0; /* Not used */
1270 			break;
1271 		default:
1272 			switch (entry->descriptorType) {
1273 			case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1274 			case VK_DESCRIPTOR_TYPE_SAMPLER:
1275 				/* Immutable samplers are copied into push descriptors when they are pushed */
1276 				if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR &&
1277 				    binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal) {
1278 					immutable_samplers = radv_immutable_samplers(set_layout, binding_layout) + entry->dstArrayElement * 4;
1279 				}
1280 				break;
1281 			default:
1282 				break;
1283 			}
1284 			dst_offset = binding_layout->offset / 4;
1285 			if (entry->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
1286 				dst_offset += entry->dstArrayElement / 4;
1287 			else
1288 				dst_offset += binding_layout->size * entry->dstArrayElement / 4;
1289 
1290 			dst_stride = binding_layout->size / 4;
1291 			break;
1292 		}
1293 
1294 		templ->entry[i] = (struct radv_descriptor_update_template_entry) {
1295 			.descriptor_type = entry->descriptorType,
1296 			.descriptor_count = entry->descriptorCount,
1297 			.src_offset = entry->offset,
1298 			.src_stride = entry->stride,
1299 			.dst_offset = dst_offset,
1300 			.dst_stride = dst_stride,
1301 			.buffer_offset = buffer_offset,
1302 			.has_sampler = !binding_layout->immutable_samplers_offset,
1303 			.sampler_offset = radv_combined_image_descriptor_sampler_offset(binding_layout),
1304 			.immutable_samplers = immutable_samplers
1305 		};
1306 	}
1307 
1308 	*pDescriptorUpdateTemplate = radv_descriptor_update_template_to_handle(templ);
1309 	return VK_SUCCESS;
1310 }
1311 
radv_DestroyDescriptorUpdateTemplate(VkDevice _device,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const VkAllocationCallbacks * pAllocator)1312 void radv_DestroyDescriptorUpdateTemplate(VkDevice _device,
1313                                           VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1314                                           const VkAllocationCallbacks *pAllocator)
1315 {
1316 	RADV_FROM_HANDLE(radv_device, device, _device);
1317 	RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
1318 
1319 	if (!templ)
1320 		return;
1321 
1322 	vk_object_base_finish(&templ->base);
1323 	vk_free2(&device->vk.alloc, pAllocator, templ);
1324 }
1325 
radv_update_descriptor_set_with_template(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,struct radv_descriptor_set * set,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)1326 void radv_update_descriptor_set_with_template(struct radv_device *device,
1327                                               struct radv_cmd_buffer *cmd_buffer,
1328                                               struct radv_descriptor_set *set,
1329                                               VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1330                                               const void *pData)
1331 {
1332 	RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
1333 	uint32_t i;
1334 
1335 	for (i = 0; i < templ->entry_count; ++i) {
1336 		struct radeon_winsys_bo **buffer_list = set->descriptors + templ->entry[i].buffer_offset;
1337 		uint32_t *pDst = set->mapped_ptr + templ->entry[i].dst_offset;
1338 		const uint8_t *pSrc = ((const uint8_t *) pData) + templ->entry[i].src_offset;
1339 		uint32_t j;
1340 
1341 		if (templ->entry[i].descriptor_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
1342 			memcpy((uint8_t*)pDst, pSrc, templ->entry[i].descriptor_count);
1343 			continue;
1344 		}
1345 
1346 		for (j = 0; j < templ->entry[i].descriptor_count; ++j) {
1347 			switch (templ->entry[i].descriptor_type) {
1348 			case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1349 			case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1350 				const unsigned idx = templ->entry[i].dst_offset + j;
1351 				assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1352 				write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
1353 								buffer_list, (struct VkDescriptorBufferInfo *) pSrc);
1354 				break;
1355 			}
1356 			case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1357 			case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1358 				write_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
1359 				                        (struct VkDescriptorBufferInfo *) pSrc);
1360 				break;
1361 			case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1362 			case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1363 				write_texel_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
1364 						              *(VkBufferView *) pSrc);
1365 				break;
1366 			case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1367 			case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1368 			case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1369 				write_image_descriptor(device, cmd_buffer, 64, pDst, buffer_list,
1370 						       templ->entry[i].descriptor_type,
1371 					               (struct VkDescriptorImageInfo *) pSrc);
1372 				break;
1373 			case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1374 				write_combined_image_sampler_descriptor(device, cmd_buffer, templ->entry[i].sampler_offset,
1375 									pDst, buffer_list, templ->entry[i].descriptor_type,
1376 									(struct VkDescriptorImageInfo *) pSrc,
1377 									templ->entry[i].has_sampler);
1378 				if (templ->entry[i].immutable_samplers) {
1379 					memcpy((char*)pDst + templ->entry[i].sampler_offset, templ->entry[i].immutable_samplers + 4 * j, 16);
1380 				}
1381 				break;
1382 			case VK_DESCRIPTOR_TYPE_SAMPLER:
1383 				if (templ->entry[i].has_sampler)
1384 					write_sampler_descriptor(device, pDst,
1385 					                         (struct VkDescriptorImageInfo *) pSrc);
1386 				else if (templ->entry[i].immutable_samplers)
1387 					memcpy(pDst, templ->entry[i].immutable_samplers + 4 * j, 16);
1388 				break;
1389 			default:
1390 				break;
1391 			}
1392 		        pSrc += templ->entry[i].src_stride;
1393 			pDst += templ->entry[i].dst_stride;
1394 			++buffer_list;
1395 		}
1396 	}
1397 }
1398 
radv_UpdateDescriptorSetWithTemplate(VkDevice _device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)1399 void radv_UpdateDescriptorSetWithTemplate(VkDevice _device,
1400                                           VkDescriptorSet descriptorSet,
1401                                           VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1402                                           const void *pData)
1403 {
1404 	RADV_FROM_HANDLE(radv_device, device, _device);
1405 	RADV_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
1406 
1407 	radv_update_descriptor_set_with_template(device, NULL, set, descriptorUpdateTemplate, pData);
1408 }
1409 
1410 
radv_CreateSamplerYcbcrConversion(VkDevice _device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)1411 VkResult radv_CreateSamplerYcbcrConversion(VkDevice _device,
1412 					   const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
1413 					   const VkAllocationCallbacks* pAllocator,
1414 					   VkSamplerYcbcrConversion* pYcbcrConversion)
1415 {
1416 	RADV_FROM_HANDLE(radv_device, device, _device);
1417 	struct radv_sampler_ycbcr_conversion *conversion = NULL;
1418 
1419 	conversion = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*conversion), 8,
1420 	                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1421 
1422 	if (conversion == NULL)
1423 		return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1424 
1425 	vk_object_base_init(&device->vk, &conversion->base,
1426 			    VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION);
1427 
1428 	conversion->format = pCreateInfo->format;
1429 	conversion->ycbcr_model = pCreateInfo->ycbcrModel;
1430 	conversion->ycbcr_range = pCreateInfo->ycbcrRange;
1431 	conversion->components = pCreateInfo->components;
1432 	conversion->chroma_offsets[0] = pCreateInfo->xChromaOffset;
1433 	conversion->chroma_offsets[1] = pCreateInfo->yChromaOffset;
1434 	conversion->chroma_filter = pCreateInfo->chromaFilter;
1435 
1436 	*pYcbcrConversion = radv_sampler_ycbcr_conversion_to_handle(conversion);
1437 	return VK_SUCCESS;
1438 }
1439 
1440 
radv_DestroySamplerYcbcrConversion(VkDevice _device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)1441 void radv_DestroySamplerYcbcrConversion(VkDevice _device,
1442 					VkSamplerYcbcrConversion ycbcrConversion,
1443 					const VkAllocationCallbacks* pAllocator)
1444 {
1445 	RADV_FROM_HANDLE(radv_device, device, _device);
1446 	RADV_FROM_HANDLE(radv_sampler_ycbcr_conversion, ycbcr_conversion, ycbcrConversion);
1447 
1448 	if (!ycbcr_conversion)
1449 		return;
1450 
1451 	vk_object_base_finish(&ycbcr_conversion->base);
1452 	vk_free2(&device->vk.alloc, pAllocator, ycbcr_conversion);
1453 }
1454