• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2017 The Khronos Group Inc.
6  * Copyright (c) 2018 NVIDIA Corporation
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *	  http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Vulkan descriptor set tests
23  *//*--------------------------------------------------------------------*/
24 
25 // These tests generate random descriptor set layouts, where each descriptor
26 // set has a random number of bindings, each binding has a random array size
27 // and random descriptor type. The descriptor types are all backed by buffers
28 // or buffer views, and each buffer is filled with a unique integer starting
29 // from zero. The shader fetches from each descriptor (possibly using dynamic
30 // indexing of the descriptor array) and compares against the expected value.
31 //
32 // The different test cases vary the maximum number of descriptors used of
33 // each type. "Low" limit tests use the spec minimum maximum limit, "high"
34 // limit tests use up to 4k descriptors of the corresponding type. Test cases
35 // also vary the type indexing used, and shader stage.
36 
37 #include "vktBindingDescriptorSetRandomTests.hpp"
38 
39 #include "vkBufferWithMemory.hpp"
40 #include "vkImageWithMemory.hpp"
41 #include "vkQueryUtil.hpp"
42 #include "vkBuilderUtil.hpp"
43 #include "vkCmdUtil.hpp"
44 #include "vkTypeUtil.hpp"
45 #include "vkObjUtil.hpp"
46 #include "vkRayTracingUtil.hpp"
47 
48 #include "vktTestGroupUtil.hpp"
49 #include "vktTestCase.hpp"
50 
51 #include "deDefs.h"
52 #include "deMath.h"
53 #include "deRandom.h"
54 #include "deSharedPtr.hpp"
55 #include "deString.h"
56 
57 #include "tcuTestCase.hpp"
58 #include "tcuTestLog.hpp"
59 
60 #include <string>
61 #include <sstream>
62 #include <algorithm>
63 #include <map>
64 #include <utility>
65 #include <memory>
66 
67 namespace vkt
68 {
69 namespace BindingModel
70 {
71 namespace
72 {
73 using namespace vk;
74 using namespace std;
75 
76 static const deUint32 DIM = 8;
77 
78 #ifndef CTS_USES_VULKANSC
79 static const VkFlags	ALL_RAY_TRACING_STAGES	= VK_SHADER_STAGE_RAYGEN_BIT_KHR
80 												| VK_SHADER_STAGE_ANY_HIT_BIT_KHR
81 												| VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR
82 												| VK_SHADER_STAGE_MISS_BIT_KHR
83 												| VK_SHADER_STAGE_INTERSECTION_BIT_KHR
84 												| VK_SHADER_STAGE_CALLABLE_BIT_KHR;
85 #endif
86 
87 typedef enum
88 {
89 	INDEX_TYPE_NONE = 0,
90 	INDEX_TYPE_CONSTANT,
91 	INDEX_TYPE_PUSHCONSTANT,
92 	INDEX_TYPE_DEPENDENT,
93 	INDEX_TYPE_RUNTIME_SIZE,
94 } IndexType;
95 
96 typedef enum
97 {
98 	STAGE_COMPUTE = 0,
99 	STAGE_VERTEX,
100 	STAGE_FRAGMENT,
101 	STAGE_RAYGEN_NV,
102 	STAGE_RAYGEN,
103 	STAGE_INTERSECT,
104 	STAGE_ANY_HIT,
105 	STAGE_CLOSEST_HIT,
106 	STAGE_MISS,
107 	STAGE_CALLABLE,
108 	STAGE_TASK,
109 	STAGE_MESH,
110 } Stage;
111 
112 typedef enum
113 {
114 	UPDATE_AFTER_BIND_DISABLED = 0,
115 	UPDATE_AFTER_BIND_ENABLED,
116 } UpdateAfterBind;
117 
118 struct DescriptorId
119 {
DescriptorIdvkt::BindingModel::__anona96eab6d0111::DescriptorId120 	DescriptorId (deUint32 set_, deUint32 binding_, deUint32 number_)
121 		: set(set_), binding(binding_), number(number_)
122 		{}
123 
operator <vkt::BindingModel::__anona96eab6d0111::DescriptorId124 	bool operator< (const DescriptorId& other) const
125 	{
126 		return (set < other.set || (set == other.set && (binding < other.binding || (binding == other.binding && number < other.number))));
127 	}
128 
129 	deUint32 set;
130 	deUint32 binding;
131 	deUint32 number;
132 };
133 
134 struct WriteInfo
135 {
WriteInfovkt::BindingModel::__anona96eab6d0111::WriteInfo136 	WriteInfo () : ptr(nullptr), expected(0u), writeGenerated(false) {}
137 
138 	deInt32*	ptr;
139 	deInt32		expected;
140 	bool		writeGenerated;
141 };
142 
isRayTracingStageKHR(const Stage stage)143 bool isRayTracingStageKHR (const Stage stage)
144 {
145 	switch (stage)
146 	{
147 		case STAGE_COMPUTE:
148 		case STAGE_VERTEX:
149 		case STAGE_FRAGMENT:
150 		case STAGE_RAYGEN_NV:
151 		case STAGE_TASK:
152 		case STAGE_MESH:
153 			return false;
154 
155 		case STAGE_RAYGEN:
156 		case STAGE_INTERSECT:
157 		case STAGE_ANY_HIT:
158 		case STAGE_CLOSEST_HIT:
159 		case STAGE_MISS:
160 		case STAGE_CALLABLE:
161 			return true;
162 
163 		default: TCU_THROW(InternalError, "Unknown stage specified");
164 	}
165 }
166 
isMeshStage(Stage stage)167 bool isMeshStage (Stage stage)
168 {
169 	return (stage == STAGE_TASK || stage == STAGE_MESH);
170 }
171 
isVertexPipelineStage(Stage stage)172 bool isVertexPipelineStage (Stage stage)
173 {
174 	return (isMeshStage(stage) || stage == STAGE_VERTEX);
175 }
176 
177 #ifndef CTS_USES_VULKANSC
getShaderStageFlag(const Stage stage)178 VkShaderStageFlagBits getShaderStageFlag (const Stage stage)
179 {
180 	switch (stage)
181 	{
182 		case STAGE_RAYGEN:		return VK_SHADER_STAGE_RAYGEN_BIT_KHR;
183 		case STAGE_ANY_HIT:		return VK_SHADER_STAGE_ANY_HIT_BIT_KHR;
184 		case STAGE_CLOSEST_HIT:	return VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR;
185 		case STAGE_MISS:		return VK_SHADER_STAGE_MISS_BIT_KHR;
186 		case STAGE_INTERSECT:	return VK_SHADER_STAGE_INTERSECTION_BIT_KHR;
187 		case STAGE_CALLABLE:	return VK_SHADER_STAGE_CALLABLE_BIT_KHR;
188 		default: TCU_THROW(InternalError, "Unknown stage specified");
189 	}
190 }
191 #endif
192 
getAllShaderStagesFor(Stage stage)193 VkShaderStageFlags getAllShaderStagesFor(Stage stage)
194 {
195 #ifndef CTS_USES_VULKANSC
196 	if (stage == STAGE_RAYGEN_NV)
197 		return VK_SHADER_STAGE_RAYGEN_BIT_NV;
198 
199 	if (isRayTracingStageKHR(stage))
200 		return ALL_RAY_TRACING_STAGES;
201 
202 	if (isMeshStage(stage))
203 		return (VK_SHADER_STAGE_MESH_BIT_EXT | ((stage == STAGE_TASK) ? VK_SHADER_STAGE_TASK_BIT_EXT : 0));
204 #else
205 	DE_UNREF(stage);
206 #endif // CTS_USES_VULKANSC
207 
208 	return (VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT);
209 }
210 
getAllPipelineStagesFor(Stage stage)211 VkPipelineStageFlags getAllPipelineStagesFor(Stage stage)
212 {
213 #ifndef CTS_USES_VULKANSC
214 	if (stage == STAGE_RAYGEN_NV)
215 		return VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV;
216 
217 	if (isRayTracingStageKHR(stage))
218 		return VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR;
219 
220 	if (isMeshStage(stage))
221 		return (VK_PIPELINE_STAGE_MESH_SHADER_BIT_EXT | ((stage == STAGE_TASK) ? VK_PIPELINE_STAGE_TASK_SHADER_BIT_EXT : 0));
222 #else
223 	DE_UNREF(stage);
224 #endif // CTS_USES_VULKANSC
225 
226 	return (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
227 }
228 
usesAccelerationStructure(const Stage stage)229 bool usesAccelerationStructure (const Stage stage)
230 {
231 	return (isRayTracingStageKHR(stage) && stage != STAGE_RAYGEN && stage != STAGE_CALLABLE);
232 }
233 
234 class RandomLayout
235 {
236 public:
RandomLayout(deUint32 numSets)237 	RandomLayout(deUint32 numSets) :
238 		layoutBindings(numSets),
239 		layoutBindingFlags(numSets),
240 		arraySizes(numSets),
241 		variableDescriptorSizes(numSets)
242 		{
243 		}
244 
245 	// These three are indexed by [set][binding]
246 	vector<vector<VkDescriptorSetLayoutBinding> > layoutBindings;
247 	vector<vector<VkDescriptorBindingFlags> > layoutBindingFlags;
248 	vector<vector<deUint32> > arraySizes;
249 	// size of the variable descriptor (last) binding in each set
250 	vector<deUint32> variableDescriptorSizes;
251 
252 	// List of descriptors that will write the descriptor value instead of reading it.
253 	map<DescriptorId, WriteInfo> descriptorWrites;
254 
255 };
256 
257 struct CaseDef
258 {
259 	IndexType						indexType;
260 	deUint32						numDescriptorSets;
261 	deUint32						maxPerStageUniformBuffers;
262 	deUint32						maxUniformBuffersDynamic;
263 	deUint32						maxPerStageStorageBuffers;
264 	deUint32						maxStorageBuffersDynamic;
265 	deUint32						maxPerStageSampledImages;
266 	deUint32						maxPerStageStorageImages;
267 	deUint32						maxPerStageStorageTexelBuffers;
268 	deUint32						maxInlineUniformBlocks;
269 	deUint32						maxInlineUniformBlockSize;
270 	deUint32						maxPerStageInputAttachments;
271 	Stage							stage;
272 	UpdateAfterBind					uab;
273 	deUint32						seed;
274 	VkFlags							allShaderStages;
275 	VkFlags							allPipelineStages;
276 	// Shared by the test case and the test instance.
277 	std::shared_ptr<RandomLayout>	randomLayout;
278 };
279 
280 
281 class DescriptorSetRandomTestInstance : public TestInstance
282 {
283 public:
284 								DescriptorSetRandomTestInstance		(Context& context, const std::shared_ptr<CaseDef>& data);
285 								~DescriptorSetRandomTestInstance	(void);
286 	tcu::TestStatus				iterate								(void);
287 private:
288 	// Shared pointer because the test case and the test instance need to share the random layout information. Specifically, the
289 	// descriptorWrites map, which is filled from the test case and used by the test instance.
290 	std::shared_ptr<CaseDef>	m_data_ptr;
291 	CaseDef&					m_data;
292 };
293 
DescriptorSetRandomTestInstance(Context & context,const std::shared_ptr<CaseDef> & data)294 DescriptorSetRandomTestInstance::DescriptorSetRandomTestInstance (Context& context, const std::shared_ptr<CaseDef>& data)
295 	: vkt::TestInstance		(context)
296 	, m_data_ptr			(data)
297 	, m_data				(*m_data_ptr.get())
298 {
299 }
300 
~DescriptorSetRandomTestInstance(void)301 DescriptorSetRandomTestInstance::~DescriptorSetRandomTestInstance (void)
302 {
303 }
304 
305 class DescriptorSetRandomTestCase : public TestCase
306 {
307 	public:
308 								DescriptorSetRandomTestCase		(tcu::TestContext& context, const char* name, const char* desc, const CaseDef& data);
309 								~DescriptorSetRandomTestCase	(void);
310 	virtual	void				initPrograms					(SourceCollections& programCollection) const;
311 	virtual TestInstance*		createInstance					(Context& context) const;
312 	virtual void				checkSupport					(Context& context) const;
313 
314 private:
315 	// See DescriptorSetRandomTestInstance about the need for a shared pointer here.
316 	std::shared_ptr<CaseDef>	m_data_ptr;
317 	CaseDef&					m_data;
318 };
319 
DescriptorSetRandomTestCase(tcu::TestContext & context,const char * name,const char * desc,const CaseDef & data)320 DescriptorSetRandomTestCase::DescriptorSetRandomTestCase (tcu::TestContext& context, const char* name, const char* desc, const CaseDef& data)
321 	: vkt::TestCase	(context, name, desc)
322 	, m_data_ptr	(std::make_shared<CaseDef>(data))
323 	, m_data		(*reinterpret_cast<CaseDef*>(m_data_ptr.get()))
324 {
325 }
326 
~DescriptorSetRandomTestCase(void)327 DescriptorSetRandomTestCase::~DescriptorSetRandomTestCase	(void)
328 {
329 }
330 
checkSupport(Context & context) const331 void DescriptorSetRandomTestCase::checkSupport(Context& context) const
332 {
333 	VkPhysicalDeviceProperties2 properties;
334 	deMemset(&properties, 0, sizeof(properties));
335 	properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
336 
337 #ifndef CTS_USES_VULKANSC
338 	void** pNextTail = &properties.pNext;
339 	// Get needed properties.
340 	VkPhysicalDeviceInlineUniformBlockPropertiesEXT inlineUniformProperties;
341 	deMemset(&inlineUniformProperties, 0, sizeof(inlineUniformProperties));
342 	inlineUniformProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT;
343 
344 	if (context.isDeviceFunctionalitySupported("VK_EXT_inline_uniform_block"))
345 	{
346 		*pNextTail = &inlineUniformProperties;
347 		pNextTail = &inlineUniformProperties.pNext;
348 	}
349 	*pNextTail = NULL;
350 #endif
351 
352 	context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
353 
354 	// Get needed features.
355 	auto features				= context.getDeviceFeatures2();
356 	auto indexingFeatures		= context.getDescriptorIndexingFeatures();
357 #ifndef CTS_USES_VULKANSC
358 	auto inlineUniformFeatures	= context.getInlineUniformBlockFeatures();
359 #endif
360 
361 	// Check needed properties and features
362 	if (isVertexPipelineStage(m_data.stage) && !features.features.vertexPipelineStoresAndAtomics)
363 	{
364 		TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
365 	}
366 #ifndef CTS_USES_VULKANSC
367 	else if (m_data.stage == STAGE_RAYGEN_NV)
368 	{
369 		context.requireDeviceFunctionality("VK_NV_ray_tracing");
370 	}
371 	else if (isRayTracingStageKHR(m_data.stage))
372 	{
373 		context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
374 		context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
375 
376 		const VkPhysicalDeviceRayTracingPipelineFeaturesKHR&	rayTracingPipelineFeaturesKHR = context.getRayTracingPipelineFeatures();
377 		if (rayTracingPipelineFeaturesKHR.rayTracingPipeline == DE_FALSE)
378 			TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceRayTracingPipelineFeaturesKHR.rayTracingPipeline");
379 
380 		const VkPhysicalDeviceAccelerationStructureFeaturesKHR&	accelerationStructureFeaturesKHR = context.getAccelerationStructureFeatures();
381 		if (accelerationStructureFeaturesKHR.accelerationStructure == DE_FALSE)
382 			TCU_THROW(TestError, "VK_KHR_ray_tracing_pipeline requires VkPhysicalDeviceAccelerationStructureFeaturesKHR.accelerationStructure");
383 	}
384 
385 	if (isMeshStage(m_data.stage))
386 	{
387 		const auto& meshFeatures = context.getMeshShaderFeaturesEXT();
388 
389 		if (!meshFeatures.meshShader)
390 			TCU_THROW(NotSupportedError, "Mesh shaders not supported");
391 
392 		if (m_data.stage == STAGE_TASK && !meshFeatures.taskShader)
393 			TCU_THROW(NotSupportedError, "Task shaders not supported");
394 	}
395 #endif
396 
397 	// Note binding 0 in set 0 is the output storage image, always present and not subject to dynamic indexing.
398 	if ((m_data.indexType == INDEX_TYPE_PUSHCONSTANT ||
399 		 m_data.indexType == INDEX_TYPE_DEPENDENT ||
400 		 m_data.indexType == INDEX_TYPE_RUNTIME_SIZE) &&
401 		((m_data.maxPerStageUniformBuffers > 0u && !features.features.shaderUniformBufferArrayDynamicIndexing) ||
402 		 (m_data.maxPerStageStorageBuffers > 0u && !features.features.shaderStorageBufferArrayDynamicIndexing) ||
403 		 (m_data.maxPerStageStorageImages > 1u && !features.features.shaderStorageImageArrayDynamicIndexing) ||
404 		 (m_data.stage == STAGE_FRAGMENT && m_data.maxPerStageInputAttachments > 0u && (!indexingFeatures.shaderInputAttachmentArrayDynamicIndexing)) ||
405 		 (m_data.maxPerStageSampledImages > 0u && !indexingFeatures.shaderUniformTexelBufferArrayDynamicIndexing) ||
406 		 (m_data.maxPerStageStorageTexelBuffers > 0u && !indexingFeatures.shaderStorageTexelBufferArrayDynamicIndexing)))
407 	{
408 		TCU_THROW(NotSupportedError, "Dynamic indexing not supported");
409 	}
410 
411 	if (m_data.numDescriptorSets > properties.properties.limits.maxBoundDescriptorSets)
412 	{
413 		TCU_THROW(NotSupportedError, "Number of descriptor sets not supported");
414 	}
415 
416 	if ((m_data.maxPerStageUniformBuffers + m_data.maxPerStageStorageBuffers +
417 		m_data.maxPerStageSampledImages + m_data.maxPerStageStorageImages +
418 		m_data.maxPerStageStorageTexelBuffers + m_data.maxPerStageInputAttachments) >
419 		properties.properties.limits.maxPerStageResources)
420 	{
421 		TCU_THROW(NotSupportedError, "Number of descriptors not supported");
422 	}
423 
424 	if (m_data.maxPerStageUniformBuffers		> properties.properties.limits.maxPerStageDescriptorUniformBuffers ||
425 		m_data.maxPerStageStorageBuffers		> properties.properties.limits.maxPerStageDescriptorStorageBuffers ||
426 		m_data.maxUniformBuffersDynamic			> properties.properties.limits.maxDescriptorSetUniformBuffersDynamic ||
427 		m_data.maxStorageBuffersDynamic			> properties.properties.limits.maxDescriptorSetStorageBuffersDynamic ||
428 		m_data.maxPerStageSampledImages			> properties.properties.limits.maxPerStageDescriptorSampledImages ||
429 		(m_data.maxPerStageStorageImages +
430 		 m_data.maxPerStageStorageTexelBuffers)	> properties.properties.limits.maxPerStageDescriptorStorageImages ||
431 		m_data.maxPerStageInputAttachments		> properties.properties.limits.maxPerStageDescriptorInputAttachments)
432 	{
433 		TCU_THROW(NotSupportedError, "Number of descriptors not supported");
434 	}
435 
436 #ifndef CTS_USES_VULKANSC
437 	if (m_data.maxInlineUniformBlocks != 0 &&
438 		!inlineUniformFeatures.inlineUniformBlock)
439 	{
440 		TCU_THROW(NotSupportedError, "Inline uniform blocks not supported");
441 	}
442 
443 	if (m_data.maxInlineUniformBlocks > inlineUniformProperties.maxPerStageDescriptorInlineUniformBlocks)
444 	{
445 		TCU_THROW(NotSupportedError, "Number of inline uniform blocks not supported");
446 	}
447 
448 	if (m_data.maxInlineUniformBlocks != 0 &&
449 		m_data.maxInlineUniformBlockSize > inlineUniformProperties.maxInlineUniformBlockSize)
450 	{
451 		TCU_THROW(NotSupportedError, "Inline uniform block size not supported");
452 	}
453 #endif
454 
455 	if (m_data.indexType == INDEX_TYPE_RUNTIME_SIZE &&
456 		!indexingFeatures.runtimeDescriptorArray)
457 	{
458 		TCU_THROW(NotSupportedError, "runtimeDescriptorArray not supported");
459 	}
460 }
461 
462 // Return a random value in the range [min, max]
randRange(deRandom * rnd,deInt32 min,deInt32 max)463 deInt32 randRange(deRandom *rnd, deInt32 min, deInt32 max)
464 {
465 	if (max < 0)
466 		return 0;
467 
468 	return (deRandom_getUint32(rnd) % (max - min + 1)) + min;
469 }
470 
chooseWritesRandomly(vk::VkDescriptorType type,RandomLayout & randomLayout,deRandom & rnd,deUint32 set,deUint32 binding,deUint32 count)471 void chooseWritesRandomly(vk::VkDescriptorType type, RandomLayout& randomLayout, deRandom& rnd, deUint32 set, deUint32 binding, deUint32 count)
472 {
473 	// Make sure the type supports writes.
474 	switch (type)
475 	{
476 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
477 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
478 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
479 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
480 		break;
481 	default:
482 		DE_ASSERT(false);
483 		break;
484 	}
485 
486 	for (deUint32 i = 0u; i < count; ++i)
487 	{
488 		// 1/2 chance of being a write.
489 		if (randRange(&rnd, 1, 2) == 1)
490 			randomLayout.descriptorWrites[DescriptorId(set, binding, i)] = {};
491 	}
492 }
493 
generateRandomLayout(RandomLayout & randomLayout,const CaseDef & caseDef,deRandom & rnd)494 void generateRandomLayout(RandomLayout& randomLayout, const CaseDef &caseDef, deRandom& rnd)
495 {
496 	// Count the number of each resource type, to avoid overflowing the limits.
497 	deUint32 numUBO = 0;
498 	deUint32 numUBODyn = 0;
499 	deUint32 numSSBO = 0;
500 	deUint32 numSSBODyn = 0;
501 	deUint32 numImage = 0;
502 	deUint32 numStorageTex = 0;
503 	deUint32 numTexBuffer = 0;
504 #ifndef CTS_USES_VULKANSC
505 	deUint32 numInlineUniformBlocks = 0;
506 #endif
507 	deUint32 numInputAttachments = 0;
508 
509 	// TODO: Consider varying these
510 	deUint32 minBindings = 0;
511 	// Try to keep the workload roughly constant while exercising higher numbered sets.
512 	deUint32 maxBindings = 128u / caseDef.numDescriptorSets;
513 	// No larger than 32 elements for dynamic indexing tests, due to 128B limit
514 	// for push constants (used for the indices)
515 	deUint32 maxArray = caseDef.indexType == INDEX_TYPE_NONE ? 0 : 32;
516 
517 	// Each set has a random number of bindings, each binding has a random
518 	// array size and a random descriptor type.
519 	for (deUint32 s = 0; s < caseDef.numDescriptorSets; ++s)
520 	{
521 		vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
522 		vector<VkDescriptorBindingFlags> &bindingsFlags = randomLayout.layoutBindingFlags[s];
523 		vector<deUint32> &arraySizes = randomLayout.arraySizes[s];
524 		int numBindings = randRange(&rnd, minBindings, maxBindings);
525 
526 		// Guarantee room for the output image
527 		if (s == 0 && numBindings == 0)
528 		{
529 			numBindings = 1;
530 		}
531 		// Guarantee room for the raytracing acceleration structure
532 		if (s == 0 && numBindings < 2 && usesAccelerationStructure(caseDef.stage))
533 		{
534 			numBindings = 2;
535 		}
536 
537 		bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
538 		bindingsFlags = vector<VkDescriptorBindingFlags>(numBindings);
539 		arraySizes = vector<deUint32>(numBindings);
540 	}
541 
542 	// BUFFER_DYNAMIC descriptor types cannot be used with VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT bindings in one set
543 	bool allowDynamicBuffers = caseDef.uab != UPDATE_AFTER_BIND_ENABLED;
544 
545 	// Iterate over bindings first, then over sets. This prevents the low-limit bindings
546 	// from getting clustered in low-numbered sets.
547 	for (deUint32 b = 0; b <= maxBindings; ++b)
548 	{
549 		for (deUint32 s = 0; s < caseDef.numDescriptorSets; ++s)
550 		{
551 			vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
552 			vector<deUint32> &arraySizes = randomLayout.arraySizes[s];
553 
554 			if (b >= bindings.size())
555 			{
556 				continue;
557 			}
558 
559 			VkDescriptorSetLayoutBinding &binding = bindings[b];
560 			binding.binding = b;
561 			binding.pImmutableSamplers = NULL;
562 			binding.stageFlags = caseDef.allShaderStages;
563 
564 			// Output image
565 			if (s == 0 && b == 0)
566 			{
567 				binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
568 				binding.descriptorCount = 1;
569 				binding.stageFlags = caseDef.allShaderStages;
570 				numImage++;
571 				arraySizes[b] = 0;
572 				continue;
573 			}
574 
575 #ifndef CTS_USES_VULKANSC
576 			// Raytracing acceleration structure
577 			if (s == 0 && b == 1 && usesAccelerationStructure(caseDef.stage))
578 			{
579 				binding.descriptorType = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR;
580 				binding.descriptorCount = 1;
581 				binding.stageFlags = caseDef.allShaderStages;
582 				arraySizes[b] = 0;
583 				continue;
584 			}
585 #endif
586 
587 			binding.descriptorCount = 0;
588 
589 			// Select a random type of descriptor.
590 			std::map<int, vk::VkDescriptorType> intToType;
591 			{
592 				int index = 0;
593 				intToType[index++] = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
594 				intToType[index++] = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
595 				intToType[index++] = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
596 				intToType[index++] = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
597 				intToType[index++] = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
598 #ifndef CTS_USES_VULKANSC
599 				intToType[index++] = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT;
600 #endif
601 				if (caseDef.stage == STAGE_FRAGMENT)
602 				{
603 					intToType[index++] = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
604 				}
605 				if (allowDynamicBuffers)
606 				{
607 					intToType[index++] = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
608 					intToType[index++] = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
609 				}
610 			}
611 
612 			int r = randRange(&rnd, 0, static_cast<int>(intToType.size() - 1));
613 			DE_ASSERT(r >= 0 && static_cast<size_t>(r) < intToType.size());
614 
615 			// Add a binding for that descriptor type if possible.
616 			binding.descriptorType = intToType[r];
617 			switch (binding.descriptorType)
618 			{
619 			default: DE_ASSERT(0); // Fallthrough
620 			case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
621 				if (numUBO < caseDef.maxPerStageUniformBuffers)
622 				{
623 					arraySizes[b] = randRange(&rnd, 0, de::min(maxArray, caseDef.maxPerStageUniformBuffers - numUBO));
624 					binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
625 					numUBO += binding.descriptorCount;
626 				}
627 				break;
628 			case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
629 				if (numSSBO < caseDef.maxPerStageStorageBuffers)
630 				{
631 					arraySizes[b] = randRange(&rnd, 0, de::min(maxArray, caseDef.maxPerStageStorageBuffers - numSSBO));
632 					binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
633 					numSSBO += binding.descriptorCount;
634 
635 					chooseWritesRandomly(binding.descriptorType, randomLayout, rnd, s, b, binding.descriptorCount);
636 				}
637 				break;
638 			case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
639 				if (numStorageTex < caseDef.maxPerStageStorageTexelBuffers)
640 				{
641 					arraySizes[b] = randRange(&rnd, 0, de::min(maxArray, caseDef.maxPerStageStorageTexelBuffers - numStorageTex));
642 					binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
643 					numStorageTex += binding.descriptorCount;
644 
645 					chooseWritesRandomly(binding.descriptorType, randomLayout, rnd, s, b, binding.descriptorCount);
646 				}
647 				break;
648 			case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
649 				if (numImage < caseDef.maxPerStageStorageImages)
650 				{
651 					arraySizes[b] = randRange(&rnd, 0, de::min(maxArray, caseDef.maxPerStageStorageImages - numImage));
652 					binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
653 					numImage += binding.descriptorCount;
654 
655 					chooseWritesRandomly(binding.descriptorType, randomLayout, rnd, s, b, binding.descriptorCount);
656 				}
657 				break;
658 			case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
659 				if (numTexBuffer < caseDef.maxPerStageSampledImages)
660 				{
661 					arraySizes[b] = randRange(&rnd, 0, de::min(maxArray, caseDef.maxPerStageSampledImages - numTexBuffer));
662 					binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
663 					numTexBuffer += binding.descriptorCount;
664 				}
665 				break;
666 #ifndef CTS_USES_VULKANSC
667 			case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
668 				if (caseDef.maxInlineUniformBlocks > 0)
669 				{
670 					if (numInlineUniformBlocks < caseDef.maxInlineUniformBlocks)
671 					{
672 						arraySizes[b] = randRange(&rnd, 1, (caseDef.maxInlineUniformBlockSize - 16) / 16); // subtract 16 for "ivec4 unused"
673 						arraySizes[b] = de::min(maxArray, arraySizes[b]);
674 						binding.descriptorCount = (arraySizes[b] ? arraySizes[b] : 1) * 16 + 16; // add 16 for "ivec4 unused"
675 						numInlineUniformBlocks++;
676 					}
677 					else
678 					{
679 						// The meaning of descriptorCount for inline uniform blocks is diferrent from usual, which means
680 						// (descriptorCount == 0) doesn't mean it will be discarded.
681 						// So we use a similar trick to the below by replacing with a different type of descriptor.
682 						binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
683 					}
684 				}
685 				else
686 				{
687 					// Plug in an unused descriptor type, so validation layers that don't
688 					// support inline_uniform_block don't crash.
689 					binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
690 				}
691 				break;
692 #endif
693 			case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
694 				if (numUBODyn < caseDef.maxUniformBuffersDynamic &&
695 					numUBO < caseDef.maxPerStageUniformBuffers)
696 				{
697 					arraySizes[b] = randRange(&rnd, 0, de::min(maxArray, de::min(caseDef.maxUniformBuffersDynamic - numUBODyn,
698 																				 caseDef.maxPerStageUniformBuffers - numUBO)));
699 					binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
700 					numUBO += binding.descriptorCount;
701 					numUBODyn += binding.descriptorCount;
702 				}
703 				break;
704 			case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
705 				if (numSSBODyn < caseDef.maxStorageBuffersDynamic &&
706 					numSSBO < caseDef.maxPerStageStorageBuffers)
707 				{
708 					arraySizes[b] = randRange(&rnd, 0, de::min(maxArray, de::min(caseDef.maxStorageBuffersDynamic - numSSBODyn,
709 																				 caseDef.maxPerStageStorageBuffers - numSSBO)));
710 					binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
711 					numSSBO += binding.descriptorCount;
712 					numSSBODyn += binding.descriptorCount;
713 
714 					chooseWritesRandomly(binding.descriptorType, randomLayout, rnd, s, b, binding.descriptorCount);
715 				}
716 				break;
717 			case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
718 				if (numInputAttachments < caseDef.maxPerStageInputAttachments)
719 				{
720 					arraySizes[b] = randRange(&rnd, 0, de::min(maxArray, caseDef.maxPerStageInputAttachments - numInputAttachments));
721 					binding.descriptorCount = arraySizes[b] ? arraySizes[b] : 1;
722 					numInputAttachments += binding.descriptorCount;
723 				}
724 				break;
725 			}
726 
727 			binding.stageFlags = ((binding.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) ? (VkFlags)(VK_SHADER_STAGE_FRAGMENT_BIT) : caseDef.allShaderStages);
728 		}
729 	}
730 
731 	for (deUint32 s = 0; s < caseDef.numDescriptorSets; ++s)
732 	{
733 		vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
734 		vector<VkDescriptorBindingFlags> &bindingsFlags = randomLayout.layoutBindingFlags[s];
735 		vector<deUint32> &variableDescriptorSizes = randomLayout.variableDescriptorSizes;
736 
737 		// Choose a variable descriptor count size. If the feature is not supported, we'll just
738 		// allocate the whole thing later on.
739 		if (bindings.size() > 0 &&
740 			bindings[bindings.size()-1].descriptorType != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC &&
741 			bindings[bindings.size()-1].descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC &&
742 			bindings[bindings.size()-1].descriptorType != VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT &&
743 #ifndef CTS_USES_VULKANSC
744 			bindings[bindings.size()-1].descriptorType != VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR &&
745 #endif
746 			bindings[bindings.size()-1].descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
747 			!(s == 0 && bindings.size() == 1) && // Don't cut out the output image binding
748 			randRange(&rnd, 1,4) == 1) // 1 in 4 chance
749 		{
750 
751 			bindingsFlags[bindings.size()-1] |= VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT;
752 			variableDescriptorSizes[s] = randRange(&rnd, 0,bindings[bindings.size()-1].descriptorCount);
753 #ifndef CTS_USES_VULKANSC
754 			if (bindings[bindings.size()-1].descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
755 			{
756 				// keep a multiple of 16B
757 				variableDescriptorSizes[s] &= ~0xF;
758 			}
759 #endif
760 		}
761 	}
762 }
763 
764 class CheckDecider
765 {
766 public:
CheckDecider(deRandom & rnd,deUint32 descriptorCount)767 	CheckDecider (deRandom& rnd, deUint32 descriptorCount)
768 		: m_rnd(rnd)
769 		, m_count(descriptorCount)
770 		, m_remainder(0u)
771 		, m_have_remainder(false)
772 	{
773 	}
774 
shouldCheck(deUint32 arrayIndex)775 	bool shouldCheck (deUint32 arrayIndex)
776 	{
777 		// Always check the first 3 and the last one, at least.
778 		if (arrayIndex <= 2u || arrayIndex == m_count - 1u)
779 			return true;
780 
781 		if (!m_have_remainder)
782 		{
783 			// Find a random remainder for this set and binding.
784 			DE_ASSERT(m_count >= kRandomChecksPerBinding);
785 
786 			// Because the divisor will be m_count/kRandomChecksPerBinding and the remainder will be chosen randomly for the
787 			// divisor, we expect to check around kRandomChecksPerBinding descriptors per binding randomly, no matter the amount of
788 			// descriptors in the binding.
789 			m_remainder = static_cast<deUint32>(randRange(&m_rnd, 0, static_cast<deInt32>((m_count / kRandomChecksPerBinding) - 1)));
790 			m_have_remainder = true;
791 		}
792 
793 		return (arrayIndex % m_count == m_remainder);
794 	}
795 
796 private:
797 	static constexpr deUint32 kRandomChecksPerBinding = 4u;
798 
799 	deRandom&	m_rnd;
800 	deUint32	m_count;
801 	deUint32	m_remainder;
802 	bool		m_have_remainder;
803 };
804 
initPrograms(SourceCollections & programCollection) const805 void DescriptorSetRandomTestCase::initPrograms (SourceCollections& programCollection) const
806 {
807 	const vk::ShaderBuildOptions buildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, 0u, true);
808 
809 	deRandom rnd;
810 	deRandom_init(&rnd, m_data.seed);
811 
812 	m_data.randomLayout.reset(new RandomLayout(m_data.numDescriptorSets));
813 	RandomLayout& randomLayout = *m_data.randomLayout.get();
814 	generateRandomLayout(randomLayout, m_data, rnd);
815 
816 	std::stringstream decls, checks;
817 
818 	deUint32 inputAttachments	= 0;
819 	deUint32 descriptor			= 0;
820 
821 	for (deUint32 s = 0; s < m_data.numDescriptorSets; ++s)
822 	{
823 		vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
824 		vector<VkDescriptorBindingFlags> bindingsFlags = randomLayout.layoutBindingFlags[s];
825 		vector<deUint32> &arraySizes = randomLayout.arraySizes[s];
826 		vector<deUint32> &variableDescriptorSizes = randomLayout.variableDescriptorSizes;
827 
828 		for (size_t b = 0; b < bindings.size(); ++b)
829 		{
830 			VkDescriptorSetLayoutBinding &binding = bindings[b];
831 			deUint32 descriptorIncrement = 1;
832 #ifndef CTS_USES_VULKANSC
833 			if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
834 				descriptorIncrement = 16;
835 #endif
836 
837 			// Construct the declaration for the binding
838 			if (binding.descriptorCount > 0)
839 			{
840 				std::stringstream array;
841 				if (m_data.indexType == INDEX_TYPE_RUNTIME_SIZE
842 #ifndef CTS_USES_VULKANSC
843 					&& binding.descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
844 #endif
845 					)
846 				{
847 					if (arraySizes[b])
848 					{
849 						array << "[]";
850 					}
851 				}
852 				else
853 				{
854 					if (arraySizes[b])
855 					{
856 						array << "[" << arraySizes[b] << "]";
857 					}
858 				}
859 
860 				switch (binding.descriptorType)
861 				{
862 #ifndef CTS_USES_VULKANSC
863 				case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
864 					decls << "layout(set = " << s << ", binding = " << b << ") uniform inlineubodef" << s << "_" << b << " { ivec4 unused; int val" << array.str() << "; } inlineubo" << s << "_" << b << ";\n";
865 					break;
866 #endif
867 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
868 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
869 					decls << "layout(set = " << s << ", binding = " << b << ") uniform ubodef" << s << "_" << b << " { int val; } ubo" << s << "_" << b << array.str()  << ";\n";
870 					break;
871 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
872 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
873 					decls << "layout(set = " << s << ", binding = " << b << ") buffer sbodef" << s << "_" << b << " { int val; } ssbo" << s << "_" << b << array.str()  << ";\n";
874 					break;
875 				case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
876 					decls << "layout(set = " << s << ", binding = " << b << ") uniform itextureBuffer texbo" << s << "_" << b << array.str()  << ";\n";
877 					break;
878 				case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
879 					decls << "layout(r32i, set = " << s << ", binding = " << b << ") uniform iimageBuffer image" << s << "_" << b << array.str()  << ";\n";
880 					break;
881 				case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
882 					decls << "layout(r32i, set = " << s << ", binding = " << b << ") uniform iimage2D simage" << s << "_" << b << array.str()  << ";\n";
883 					break;
884 				case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
885 					decls << "layout(input_attachment_index = " << inputAttachments << ", set = " << s << ", binding = " << b << ") uniform isubpassInput attachment" << s << "_" << b << array.str()  << ";\n";
886 					inputAttachments += binding.descriptorCount;
887 					break;
888 #ifndef CTS_USES_VULKANSC
889 				case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
890 					DE_ASSERT(s == 0 && b == 1);
891 					DE_ASSERT(bindings.size() >= 2);
892 					decls << "layout(set = " << s << ", binding = " << b << ") uniform accelerationStructureEXT as" << s << "_" << b << ";\n";
893 					break;
894 #endif
895 				default: DE_ASSERT(0);
896 				}
897 
898 				const deUint32	arraySize		= de::max(1u, arraySizes[b]);
899 				CheckDecider	checkDecider	(rnd, arraySize);
900 
901 				for (deUint32 ai = 0; ai < arraySize; ++ai, descriptor += descriptorIncrement)
902 				{
903 					// Don't access descriptors past the end of the allocated range for
904 					// variable descriptor count
905 					if (b == bindings.size() - 1 &&
906 						(bindingsFlags[b] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT))
907 					{
908 #ifndef CTS_USES_VULKANSC
909 						if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
910 						{
911 							// Convert to bytes and add 16 for "ivec4 unused" in case of inline uniform block
912 							const deUint32 uboRange = ai*16 + 16;
913 							if (uboRange >= variableDescriptorSizes[s])
914 								continue;
915 						}
916 						else
917 #endif
918 						{
919 							if (ai >= variableDescriptorSizes[s])
920 								continue;
921 						}
922 					}
923 
924 					if (s == 0 && b == 0)
925 					{
926 						// This is the output image, skip.
927 						continue;
928 					}
929 
930 					if (s == 0 && b == 1 && usesAccelerationStructure(m_data.stage))
931 					{
932 						// This is the raytracing acceleration structure, skip.
933 						continue;
934 					}
935 
936 					if (checkDecider.shouldCheck(ai))
937 					{
938 						// Check that the value in the descriptor equals its descriptor number.
939 						// i.e. check "ubo[c].val == descriptor" or "ubo[pushconst[c]].val == descriptor"
940 						// When doing a write check, write the descriptor number in the value.
941 
942 						// First, construct the index. This can be a constant literal, a value
943 						// from a push constant, or a function of the previous descriptor value.
944 						std::stringstream ind;
945 						switch (m_data.indexType)
946 						{
947 						case INDEX_TYPE_NONE:
948 						case INDEX_TYPE_CONSTANT:
949 							// The index is just the constant literal
950 							if (arraySizes[b])
951 							{
952 								ind << "[" << ai << "]";
953 							}
954 							break;
955 						case INDEX_TYPE_PUSHCONSTANT:
956 							// identity is an int[], directly index it
957 							if (arraySizes[b])
958 							{
959 								ind << "[pc.identity[" << ai << "]]";
960 							}
961 							break;
962 						case INDEX_TYPE_RUNTIME_SIZE:
963 						case INDEX_TYPE_DEPENDENT:
964 							// Index is a function of the previous return value (which is reset to zero)
965 							if (arraySizes[b])
966 							{
967 								ind << "[accum + " << ai << "]";
968 							}
969 							break;
970 						default: DE_ASSERT(0);
971 						}
972 
973 						const DescriptorId	descriptorId	(s, static_cast<deUint32>(b), ai);
974 						auto				writesItr		= randomLayout.descriptorWrites.find(descriptorId);
975 
976 						if (writesItr == randomLayout.descriptorWrites.end())
977 						{
978 							// Fetch from the descriptor.
979 							switch (binding.descriptorType)
980 							{
981 #ifndef CTS_USES_VULKANSC
982 							case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
983 								checks << "  temp = inlineubo" << s << "_" << b << ".val" << ind.str() << ";\n";
984 								break;
985 #endif
986 							case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
987 							case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
988 								checks << "  temp = ubo" << s << "_" << b << ind.str() << ".val;\n";
989 								break;
990 							case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
991 							case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
992 								checks << "  temp = ssbo" << s << "_" << b << ind.str() << ".val;\n";
993 								break;
994 							case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
995 								checks << "  temp = texelFetch(texbo" << s << "_" << b << ind.str() << ", 0).x;\n";
996 								break;
997 							case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
998 								checks << "  temp = imageLoad(image" << s << "_" << b << ind.str() << ", 0).x;\n";
999 								break;
1000 							case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1001 								checks << "  temp = imageLoad(simage" << s << "_" << b << ind.str() << ", ivec2(0, 0)).x;\n";
1002 								break;
1003 							case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1004 								checks << "  temp = subpassLoad(attachment" << s << "_" << b << ind.str() << ").r;\n";
1005 								break;
1006 							default: DE_ASSERT(0);
1007 							}
1008 
1009 							// Accumulate any incorrect values.
1010 							checks << "  accum |= temp - " << descriptor << ";\n";
1011 						}
1012 						else
1013 						{
1014 							// Check descriptor write. We need to confirm we are actually generating write code for this descriptor.
1015 							writesItr->second.writeGenerated = true;
1016 
1017 							// Assign each write operation to a single invocation to avoid race conditions.
1018 							const auto			expectedInvocationID	= descriptor % (DIM*DIM);
1019 							const std::string	writeCond				= "if (" + de::toString(expectedInvocationID) + " == invocationID)";
1020 
1021 							switch (binding.descriptorType)
1022 							{
1023 							case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1024 							case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1025 								checks << "  " << writeCond << " ssbo" << s << "_" << b << ind.str() << ".val = " << descriptor << ";\n";
1026 								break;
1027 							case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1028 								checks << "  " << writeCond << " imageStore(image" << s << "_" << b << ind.str() << ", 0, ivec4(" << descriptor << ", 0, 0, 0));\n";
1029 								break;
1030 							case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1031 								checks << "  " << writeCond << " imageStore(simage" << s << "_" << b << ind.str() << ", ivec2(0, 0), ivec4(" << descriptor << ", 0, 0, 0));\n";
1032 								break;
1033 							default: DE_ASSERT(0);
1034 							}
1035 						}
1036 					}
1037 				}
1038 			}
1039 		}
1040 	}
1041 
1042 	std::stringstream pushdecl;
1043 	switch (m_data.indexType)
1044 	{
1045 	case INDEX_TYPE_PUSHCONSTANT:
1046 		pushdecl << "layout (push_constant, std430) uniform Block { int identity[32]; } pc;\n";
1047 		break;
1048 	default: DE_ASSERT(0);
1049 	case INDEX_TYPE_NONE:
1050 	case INDEX_TYPE_CONSTANT:
1051 	case INDEX_TYPE_DEPENDENT:
1052 	case INDEX_TYPE_RUNTIME_SIZE:
1053 		break;
1054 	}
1055 
1056 
1057 	switch (m_data.stage)
1058 	{
1059 	default: DE_ASSERT(0); // Fallthrough
1060 	case STAGE_COMPUTE:
1061 		{
1062 			std::stringstream css;
1063 			css <<
1064 				"#version 450 core\n"
1065 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1066 				<< pushdecl.str()
1067 				<< decls.str() <<
1068 				"layout(local_size_x = 1, local_size_y = 1) in;\n"
1069 				"void main()\n"
1070 				"{\n"
1071 				"  const int invocationID = int(gl_GlobalInvocationID.y) * " << DIM << " + int(gl_GlobalInvocationID.x);\n"
1072 				"  int accum = 0, temp;\n"
1073 				<< checks.str() <<
1074 				"  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1075 				"  imageStore(simage0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1076 				"}\n";
1077 
1078 			programCollection.glslSources.add("test") << glu::ComputeSource(css.str());
1079 			break;
1080 		}
1081 #ifndef CTS_USES_VULKANSC
1082 	case STAGE_RAYGEN_NV:
1083 	{
1084 		std::stringstream css;
1085 		css <<
1086 			"#version 460 core\n"
1087 			"#extension GL_EXT_nonuniform_qualifier : enable\n"
1088 			"#extension GL_NV_ray_tracing : require\n"
1089 			<< pushdecl.str()
1090 			<< decls.str() <<
1091 			"void main()\n"
1092 			"{\n"
1093 			"  const int invocationID = int(gl_LaunchIDNV.y) * " << DIM << " + int(gl_LaunchIDNV.x);\n"
1094 			"  int accum = 0, temp;\n"
1095 			<< checks.str() <<
1096 			"  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1097 			"  imageStore(simage0_0, ivec2(gl_LaunchIDNV.xy), color);\n"
1098 			"}\n";
1099 
1100 		programCollection.glslSources.add("test") << glu::RaygenSource(css.str());
1101 		break;
1102 	}
1103 	case STAGE_RAYGEN:
1104 	{
1105 		std::stringstream css;
1106 		css <<
1107 			"#version 460 core\n"
1108 			"#extension GL_EXT_nonuniform_qualifier : enable\n"
1109 			"#extension GL_EXT_ray_tracing : require\n"
1110 			<< pushdecl.str()
1111 			<< decls.str() <<
1112 			"void main()\n"
1113 			"{\n"
1114 			"  const int invocationID = int(gl_LaunchIDEXT.y) * " << DIM << " + int(gl_LaunchIDEXT.x);\n"
1115 			"  int accum = 0, temp;\n"
1116 			<< checks.str() <<
1117 			"  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1118 			"  imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1119 			"}\n";
1120 
1121 		programCollection.glslSources.add("test") << glu::RaygenSource(updateRayTracingGLSL(css.str())) << buildOptions;
1122 		break;
1123 	}
1124 	case STAGE_INTERSECT:
1125 	{
1126 		{
1127 			programCollection.glslSources.add("rgen") << glu::RaygenSource(updateRayTracingGLSL(getCommonRayGenerationShader())) << buildOptions;
1128 		}
1129 
1130 		{
1131 			std::stringstream css;
1132 			css <<
1133 				"#version 460 core\n"
1134 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1135 				"#extension GL_EXT_ray_tracing : require\n"
1136 				"hitAttributeEXT vec3 hitAttribute;\n"
1137 				<< pushdecl.str()
1138 				<< decls.str() <<
1139 				"void main()\n"
1140 				"{\n"
1141 				"  const int invocationID = int(gl_LaunchIDEXT.y) * " << DIM << " + int(gl_LaunchIDEXT.x);\n"
1142 				"  int accum = 0, temp;\n"
1143 				<< checks.str() <<
1144 				"  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1145 				"  imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1146 				"  hitAttribute = vec3(0.0f, 0.0f, 0.0f);\n"
1147 				"  reportIntersectionEXT(1.0f, 0);\n"
1148 				"}\n";
1149 
1150 			programCollection.glslSources.add("test") << glu::IntersectionSource(updateRayTracingGLSL(css.str())) << buildOptions;
1151 		}
1152 
1153 		break;
1154 	}
1155 	case STAGE_ANY_HIT:
1156 	{
1157 		{
1158 			programCollection.glslSources.add("rgen") << glu::RaygenSource(updateRayTracingGLSL(getCommonRayGenerationShader())) << buildOptions;
1159 		}
1160 
1161 		{
1162 			std::stringstream css;
1163 			css <<
1164 				"#version 460 core\n"
1165 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1166 				"#extension GL_EXT_ray_tracing : require\n"
1167 				"layout(location = 0) rayPayloadInEXT vec3 hitValue;\n"
1168 				"hitAttributeEXT vec3 attribs;\n"
1169 				<< pushdecl.str()
1170 				<< decls.str() <<
1171 				"void main()\n"
1172 				"{\n"
1173 				"  const int invocationID = int(gl_LaunchIDEXT.y) * " << DIM << " + int(gl_LaunchIDEXT.x);\n"
1174 				"  int accum = 0, temp;\n"
1175 				<< checks.str() <<
1176 				"  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1177 				"  imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1178 				"}\n";
1179 
1180 			programCollection.glslSources.add("test") << glu::AnyHitSource(updateRayTracingGLSL(css.str())) << buildOptions;
1181 		}
1182 
1183 		break;
1184 	}
1185 	case STAGE_CLOSEST_HIT:
1186 	{
1187 		{
1188 			programCollection.glslSources.add("rgen") << glu::RaygenSource(updateRayTracingGLSL(getCommonRayGenerationShader())) << buildOptions;
1189 		}
1190 
1191 		{
1192 			std::stringstream css;
1193 			css <<
1194 				"#version 460 core\n"
1195 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1196 				"#extension GL_EXT_ray_tracing : require\n"
1197 				"layout(location = 0) rayPayloadInEXT vec3 hitValue;\n"
1198 				"hitAttributeEXT vec3 attribs;\n"
1199 				<< pushdecl.str()
1200 				<< decls.str() <<
1201 				"void main()\n"
1202 				"{\n"
1203 				"  const int invocationID = int(gl_LaunchIDEXT.y) * " << DIM << " + int(gl_LaunchIDEXT.x);\n"
1204 				"  int accum = 0, temp;\n"
1205 				<< checks.str() <<
1206 				"  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1207 				"  imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1208 				"}\n";
1209 
1210 			programCollection.glslSources.add("test") << glu::ClosestHitSource(updateRayTracingGLSL(css.str())) << buildOptions;
1211 		}
1212 
1213 		break;
1214 	}
1215 	case STAGE_MISS:
1216 	{
1217 		{
1218 			programCollection.glslSources.add("rgen") << glu::RaygenSource(updateRayTracingGLSL(getCommonRayGenerationShader())) << buildOptions;
1219 		}
1220 
1221 		{
1222 			std::stringstream css;
1223 			css <<
1224 				"#version 460 core\n"
1225 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1226 				"#extension GL_EXT_ray_tracing : require\n"
1227 				"layout(location = 0) rayPayloadInEXT vec3 hitValue;\n"
1228 				<< pushdecl.str()
1229 				<< decls.str() <<
1230 				"void main()\n"
1231 				"{\n"
1232 				"  const int invocationID = int(gl_LaunchIDEXT.y) * " << DIM << " + int(gl_LaunchIDEXT.x);\n"
1233 				"  int accum = 0, temp;\n"
1234 				<< checks.str() <<
1235 				"  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1236 				"  imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1237 				"}\n";
1238 
1239 			programCollection.glslSources.add("test") << glu::MissSource(updateRayTracingGLSL(css.str())) << buildOptions;
1240 		}
1241 
1242 		break;
1243 	}
1244 	case STAGE_CALLABLE:
1245 	{
1246 		{
1247 			std::stringstream css;
1248 			css <<
1249 				"#version 460 core\n"
1250 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1251 				"#extension GL_EXT_ray_tracing : require\n"
1252 				"layout(location = 0) callableDataEXT float dummy;"
1253 				"layout(set = 0, binding = 1) uniform accelerationStructureEXT topLevelAS;\n"
1254 				"\n"
1255 				"void main()\n"
1256 				"{\n"
1257 				"  executeCallableEXT(0, 0);\n"
1258 				"}\n";
1259 
1260 			programCollection.glslSources.add("rgen") << glu::RaygenSource(updateRayTracingGLSL(css.str())) << buildOptions;
1261 		}
1262 
1263 		{
1264 			std::stringstream css;
1265 			css <<
1266 				"#version 460 core\n"
1267 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1268 				"#extension GL_EXT_ray_tracing : require\n"
1269 				"layout(location = 0) callableDataInEXT float dummy;"
1270 				<< pushdecl.str()
1271 				<< decls.str() <<
1272 				"void main()\n"
1273 				"{\n"
1274 				"  const int invocationID = int(gl_LaunchIDEXT.y) * " << DIM << " + int(gl_LaunchIDEXT.x);\n"
1275 				"  int accum = 0, temp;\n"
1276 				<< checks.str() <<
1277 				"  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1278 				"  imageStore(simage0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1279 				"}\n";
1280 
1281 			programCollection.glslSources.add("test") << glu::CallableSource(updateRayTracingGLSL(css.str())) << buildOptions;
1282 		}
1283 		break;
1284 	}
1285 #endif
1286 	case STAGE_VERTEX:
1287 		{
1288 			std::stringstream vss;
1289 			vss <<
1290 				"#version 450 core\n"
1291 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1292 				<< pushdecl.str()
1293 				<< decls.str()  <<
1294 				"void main()\n"
1295 				"{\n"
1296 				"  const int invocationID = gl_VertexIndex;\n"
1297 				"  int accum = 0, temp;\n"
1298 				<< checks.str() <<
1299 				"  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1300 				"  imageStore(simage0_0, ivec2(gl_VertexIndex % " << DIM << ", gl_VertexIndex / " << DIM << "), color);\n"
1301 				"  gl_PointSize = 1.0f;\n"
1302 				"  gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1303 				"}\n";
1304 
1305 			programCollection.glslSources.add("test") << glu::VertexSource(vss.str());
1306 			break;
1307 		}
1308 	case STAGE_TASK:
1309 		{
1310 			std::stringstream task;
1311 			task
1312 				<< "#version 450\n"
1313 				<< "#extension GL_EXT_mesh_shader : enable\n"
1314 				<< "#extension GL_EXT_nonuniform_qualifier : enable\n"
1315 				<< pushdecl.str()
1316 				<< decls.str()
1317 				<< "layout(local_size_x=1, local_size_y=1, local_size_z=1) in;\n"
1318 				<< "void main()\n"
1319 				<< "{\n"
1320 				<< "  const int invocationID = int(gl_GlobalInvocationID.y) * " << DIM << " + int(gl_GlobalInvocationID.x);\n"
1321 				<< "  int accum = 0, temp;\n"
1322 				<< checks.str()
1323 				<< "  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1324 				<< "  imageStore(simage0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1325 				<< "  EmitMeshTasksEXT(0, 0, 0);\n"
1326 				<< "}\n"
1327 				;
1328 			programCollection.glslSources.add("test") << glu::TaskSource(task.str()) << buildOptions;
1329 
1330 			std::stringstream mesh;
1331 			mesh
1332 				<< "#version 450\n"
1333 				<< "#extension GL_EXT_mesh_shader : enable\n"
1334 				<< "#extension GL_EXT_nonuniform_qualifier : enable\n"
1335 				<< "layout(local_size_x=1, local_size_y=1, local_size_z=1) in;\n"
1336 				<< "layout(triangles) out;\n"
1337 				<< "layout(max_vertices=3, max_primitives=1) out;\n"
1338 				<< "void main()\n"
1339 				<< "{\n"
1340 				<< "  SetMeshOutputsEXT(0, 0);\n"
1341 				<< "}\n"
1342 				;
1343 			programCollection.glslSources.add("mesh") << glu::MeshSource(mesh.str()) << buildOptions;
1344 
1345 			break;
1346 		}
1347 	case STAGE_MESH:
1348 		{
1349 			std::stringstream mesh;
1350 			mesh
1351 				<< "#version 450\n"
1352 				<< "#extension GL_EXT_mesh_shader : enable\n"
1353 				<< "#extension GL_EXT_nonuniform_qualifier : enable\n"
1354 				<< pushdecl.str()
1355 				<< decls.str()
1356 				<< "layout(local_size_x=1, local_size_y=1, local_size_z=1) in;\n"
1357 				<< "layout(triangles) out;\n"
1358 				<< "layout(max_vertices=3, max_primitives=1) out;\n"
1359 				<< "void main()\n"
1360 				<< "{\n"
1361 				<< "  const int invocationID = int(gl_GlobalInvocationID.y) * " << DIM << " + int(gl_GlobalInvocationID.x);\n"
1362 				<< "  int accum = 0, temp;\n"
1363 				<< checks.str()
1364 				<< "  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1365 				<< "  imageStore(simage0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1366 				<< "}\n"
1367 				;
1368 			programCollection.glslSources.add("test") << glu::MeshSource(mesh.str()) << buildOptions;
1369 
1370 			break;
1371 		}
1372 	case STAGE_FRAGMENT:
1373 		{
1374 			std::stringstream vss;
1375 			vss <<
1376 				"#version 450 core\n"
1377 				"void main()\n"
1378 				"{\n"
1379 				// full-viewport quad
1380 				"  gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * float(gl_VertexIndex&1), 1);\n"
1381 				"}\n";
1382 
1383 			programCollection.glslSources.add("vert") << glu::VertexSource(vss.str());
1384 
1385 			std::stringstream fss;
1386 			fss <<
1387 				"#version 450 core\n"
1388 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1389 				<< pushdecl.str()
1390 				<< decls.str() <<
1391 				"void main()\n"
1392 				"{\n"
1393 				"  const int invocationID = int(gl_FragCoord.y) * " << DIM << " + int(gl_FragCoord.x);\n"
1394 				"  int accum = 0, temp;\n"
1395 				<< checks.str() <<
1396 				"  ivec4 color = (accum != 0) ? ivec4(0,0,0,0) : ivec4(1,0,0,1);\n"
1397 				"  imageStore(simage0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1398 				"}\n";
1399 
1400 			programCollection.glslSources.add("test") << glu::FragmentSource(fss.str());
1401 			break;
1402 		}
1403 	}
1404 
1405 }
1406 
createInstance(Context & context) const1407 TestInstance* DescriptorSetRandomTestCase::createInstance (Context& context) const
1408 {
1409 	return new DescriptorSetRandomTestInstance(context, m_data_ptr);
1410 }
1411 
appendShaderStageCreateInfo(std::vector<VkPipelineShaderStageCreateInfo> & vec,VkShaderModule module,VkShaderStageFlagBits stage)1412 void appendShaderStageCreateInfo (std::vector<VkPipelineShaderStageCreateInfo>& vec, VkShaderModule module, VkShaderStageFlagBits stage)
1413 {
1414 	const VkPipelineShaderStageCreateInfo info =
1415 	{
1416 		VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,	//	VkStructureType						sType;
1417 		nullptr,												//	const void*							pNext;
1418 		0u,														//	VkPipelineShaderStageCreateFlags	flags;
1419 		stage,													//	VkShaderStageFlagBits				stage;
1420 		module,													//	VkShaderModule						module;
1421 		"main",													//	const char*							pName;
1422 		nullptr,												//	const VkSpecializationInfo*			pSpecializationInfo;
1423 	};
1424 
1425 	vec.push_back(info);
1426 }
1427 
iterate(void)1428 tcu::TestStatus DescriptorSetRandomTestInstance::iterate (void)
1429 {
1430 	const InstanceInterface&	vki							= m_context.getInstanceInterface();
1431 	const DeviceInterface&		vk							= m_context.getDeviceInterface();
1432 	const VkDevice				device						= m_context.getDevice();
1433 	const VkPhysicalDevice		physicalDevice				= m_context.getPhysicalDevice();
1434 	Allocator&					allocator					= m_context.getDefaultAllocator();
1435 	const deUint32				queueFamilyIndex			= m_context.getUniversalQueueFamilyIndex();
1436 
1437 	deRandom					rnd;
1438 	VkPhysicalDeviceProperties2	properties					= getPhysicalDeviceExtensionProperties(vki, physicalDevice);
1439 #ifndef CTS_USES_VULKANSC
1440 	deUint32					shaderGroupHandleSize		= 0;
1441 	deUint32					shaderGroupBaseAlignment	= 1;
1442 #endif
1443 
1444 	deRandom_init(&rnd, m_data.seed);
1445 	RandomLayout& randomLayout = *m_data.randomLayout.get();
1446 
1447 #ifndef CTS_USES_VULKANSC
1448 	if (m_data.stage == STAGE_RAYGEN_NV)
1449 	{
1450 		const VkPhysicalDeviceRayTracingPropertiesNV rayTracingProperties = getPhysicalDeviceExtensionProperties(vki, physicalDevice);
1451 
1452 		shaderGroupHandleSize = rayTracingProperties.shaderGroupHandleSize;
1453 	}
1454 
1455 	if (isRayTracingStageKHR(m_data.stage))
1456 	{
1457 		de::MovePtr<RayTracingProperties>	rayTracingPropertiesKHR;
1458 
1459 		rayTracingPropertiesKHR		= makeRayTracingProperties(vki, physicalDevice);
1460 		shaderGroupHandleSize		= rayTracingPropertiesKHR->getShaderGroupHandleSize();
1461 		shaderGroupBaseAlignment	= rayTracingPropertiesKHR->getShaderGroupBaseAlignment();
1462 	}
1463 #endif
1464 
1465 	// Get needed features.
1466 	auto descriptorIndexingSupported	= m_context.isDeviceFunctionalitySupported("VK_EXT_descriptor_indexing");
1467 	auto indexingFeatures				= m_context.getDescriptorIndexingFeatures();
1468 #ifndef CTS_USES_VULKANSC
1469 	auto inlineUniformFeatures			= m_context.getInlineUniformBlockFeatures();
1470 #endif
1471 
1472 	VkPipelineBindPoint bindPoint;
1473 
1474 	switch (m_data.stage)
1475 	{
1476 	case STAGE_COMPUTE:
1477 		bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
1478 		break;
1479 #ifndef CTS_USES_VULKANSC
1480 	case STAGE_RAYGEN_NV:
1481 		bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_NV;
1482 		break;
1483 #endif
1484 	default:
1485 		bindPoint =
1486 #ifndef CTS_USES_VULKANSC
1487 			isRayTracingStageKHR(m_data.stage) ? VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR :
1488 #endif
1489 			VK_PIPELINE_BIND_POINT_GRAPHICS;
1490 		break;
1491 	}
1492 
1493 	DE_ASSERT(m_data.numDescriptorSets <= 32);
1494 	Move<vk::VkDescriptorSetLayout>	descriptorSetLayouts[32];
1495 	Move<vk::VkDescriptorPool>		descriptorPools[32];
1496 	Move<vk::VkDescriptorSet>		descriptorSets[32];
1497 
1498 	deUint32 numDescriptors = 0;
1499 	for (deUint32 s = 0; s < m_data.numDescriptorSets; ++s)
1500 	{
1501 		vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
1502 		vector<VkDescriptorBindingFlags> &bindingsFlags = randomLayout.layoutBindingFlags[s];
1503 		vector<deUint32> &variableDescriptorSizes = randomLayout.variableDescriptorSizes;
1504 
1505 		VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1506 		VkDescriptorSetLayoutCreateFlags layoutCreateFlags = 0;
1507 
1508 		for (size_t b = 0; b < bindings.size(); ++b)
1509 		{
1510 			VkDescriptorSetLayoutBinding &binding = bindings[b];
1511 			numDescriptors += binding.descriptorCount;
1512 
1513 			// Randomly choose some bindings to use update-after-bind, if it is supported
1514 			if (descriptorIndexingSupported &&
1515 				m_data.uab == UPDATE_AFTER_BIND_ENABLED &&
1516 				randRange(&rnd, 1, 8) == 1 && // 1 in 8 chance
1517 				(binding.descriptorType != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER			|| indexingFeatures.descriptorBindingUniformBufferUpdateAfterBind) &&
1518 				(binding.descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE				|| indexingFeatures.descriptorBindingStorageImageUpdateAfterBind) &&
1519 				(binding.descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER			|| indexingFeatures.descriptorBindingStorageBufferUpdateAfterBind) &&
1520 				(binding.descriptorType != VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER		|| indexingFeatures.descriptorBindingUniformTexelBufferUpdateAfterBind) &&
1521 				(binding.descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER		|| indexingFeatures.descriptorBindingStorageTexelBufferUpdateAfterBind) &&
1522 #ifndef CTS_USES_VULKANSC
1523 				(binding.descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT	|| inlineUniformFeatures.descriptorBindingInlineUniformBlockUpdateAfterBind) &&
1524 #endif
1525 				(binding.descriptorType != VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) &&
1526 				(binding.descriptorType != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) &&
1527 				(binding.descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1528 #ifndef CTS_USES_VULKANSC
1529 				&& (binding.descriptorType != VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR)
1530 #endif
1531 				)
1532 			{
1533 				bindingsFlags[b] |= VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT;
1534 				layoutCreateFlags |= VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT;
1535 				poolCreateFlags |= VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT;
1536 			}
1537 
1538 			if (!indexingFeatures.descriptorBindingVariableDescriptorCount)
1539 			{
1540 				bindingsFlags[b] &= ~VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT;
1541 			}
1542 		}
1543 
1544 		// Create a layout and allocate a descriptor set for it.
1545 
1546 		const VkDescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsInfo =
1547 		{
1548 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO,		// VkStructureType					sType;
1549 			DE_NULL,																// const void*						pNext;
1550 			(deUint32)bindings.size(),												// uint32_t							bindingCount;
1551 			bindings.empty() ? DE_NULL : bindingsFlags.data(),						// const VkDescriptorBindingFlags*	pBindingFlags;
1552 		};
1553 
1554 		const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo =
1555 		{
1556 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,	//  VkStructureType						sType;
1557 			(descriptorIndexingSupported ? &bindingFlagsInfo : DE_NULL),//  const void*							pNext;
1558 			layoutCreateFlags,											//  VkDescriptorSetLayoutCreateFlags	flags;
1559 			(deUint32)bindings.size(),									//  deUint32							bindingCount;
1560 			bindings.empty() ? DE_NULL : bindings.data()				//  const VkDescriptorSetLayoutBinding*	pBindings;
1561 		};
1562 
1563 		descriptorSetLayouts[s] = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
1564 
1565 		vk::DescriptorPoolBuilder poolBuilder;
1566 		poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, m_data.maxPerStageUniformBuffers);
1567 		poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, m_data.maxUniformBuffersDynamic);
1568 		poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_data.maxPerStageStorageBuffers);
1569 		poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, m_data.maxStorageBuffersDynamic);
1570 		poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, m_data.maxPerStageSampledImages);
1571 		poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, m_data.maxPerStageStorageTexelBuffers);
1572 		poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_data.maxPerStageStorageImages);
1573 		if (m_data.maxPerStageInputAttachments > 0u)
1574 		{
1575 			poolBuilder.addType(VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, m_data.maxPerStageInputAttachments);
1576 		}
1577 #ifndef CTS_USES_VULKANSC
1578 		if (m_data.maxInlineUniformBlocks > 0u)
1579 		{
1580 			poolBuilder.addType(VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT, m_data.maxInlineUniformBlocks * m_data.maxInlineUniformBlockSize);
1581 		}
1582 		if (usesAccelerationStructure(m_data.stage))
1583 		{
1584 			poolBuilder.addType(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, 1u);
1585 		}
1586 
1587 		VkDescriptorPoolInlineUniformBlockCreateInfoEXT inlineUniformBlockPoolCreateInfo =
1588 		{
1589 			VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT,	// VkStructureType	sType;
1590 			DE_NULL,																// const void*		pNext;
1591 			m_data.maxInlineUniformBlocks,											// uint32_t			maxInlineUniformBlockBindings;
1592 		};
1593 #endif
1594 		descriptorPools[s] = poolBuilder.build(vk, device, poolCreateFlags, 1u,
1595 #ifndef CTS_USES_VULKANSC
1596 											   m_data.maxInlineUniformBlocks ? &inlineUniformBlockPoolCreateInfo :
1597 #endif
1598 											   DE_NULL);
1599 
1600 		VkDescriptorSetVariableDescriptorCountAllocateInfo variableCountInfo =
1601 		{
1602 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO,		// VkStructureType	sType;
1603 			DE_NULL,																		// const void*		pNext;
1604 			0,																				// uint32_t			descriptorSetCount;
1605 			DE_NULL,																		// const uint32_t*	pDescriptorCounts;
1606 		};
1607 
1608 		const void *pNext = DE_NULL;
1609 		if (bindings.size() > 0 &&
1610 			bindingsFlags[bindings.size()-1] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT)
1611 		{
1612 			variableCountInfo.descriptorSetCount = 1;
1613 			variableCountInfo.pDescriptorCounts = &variableDescriptorSizes[s];
1614 			pNext = &variableCountInfo;
1615 		}
1616 
1617 		descriptorSets[s] = makeDescriptorSet(vk, device, *descriptorPools[s], *descriptorSetLayouts[s], pNext);
1618 	}
1619 
1620 	// Create a buffer to hold data for all descriptors.
1621 	VkDeviceSize	align = std::max({
1622 		properties.properties.limits.minTexelBufferOffsetAlignment,
1623 		properties.properties.limits.minUniformBufferOffsetAlignment,
1624 		properties.properties.limits.minStorageBufferOffsetAlignment,
1625 		(VkDeviceSize)sizeof(deUint32)});
1626 
1627 	de::MovePtr<BufferWithMemory> buffer;
1628 
1629 	buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1630 		vk, device, allocator, makeBufferCreateInfo(align*numDescriptors,
1631 													VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
1632 													VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
1633 													VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
1634 													VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
1635 													MemoryRequirement::HostVisible));
1636 	deUint8 *bufferPtr = (deUint8 *)buffer->getAllocation().getHostPtr();
1637 
1638 	// Create storage images separately.
1639 	deUint32				storageImageCount		= 0u;
1640 	vector<Move<VkImage>>	storageImages;
1641 
1642 	const VkImageCreateInfo	storageImgCreateInfo	=
1643 	{
1644 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
1645 		DE_NULL,								// const void*				pNext;
1646 		0u,										// VkImageCreateFlags		flags;
1647 		VK_IMAGE_TYPE_2D,						// VkImageType				imageType;
1648 		VK_FORMAT_R32_SINT,						// VkFormat					format;
1649 		{ 1u, 1u, 1u },							// VkExtent3D				extent;
1650 		1u,										// deUint32					mipLevels;
1651 		1u,										// deUint32					arrayLayers;
1652 		VK_SAMPLE_COUNT_1_BIT,					// VkSampleCountFlagBits	samples;
1653 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
1654 		VK_IMAGE_USAGE_STORAGE_BIT
1655 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
1656 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
1657 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
1658 		1u,										// deUint32					queueFamilyIndexCount;
1659 		&queueFamilyIndex,						// const deUint32*			pQueueFamilyIndices;
1660 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
1661 	};
1662 
1663 	// Create storage images.
1664 	for (const auto& bindings	: randomLayout.layoutBindings)
1665 	for (const auto& binding	: bindings)
1666 	{
1667 		if (binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1668 		{
1669 			storageImageCount += binding.descriptorCount;
1670 			for (deUint32 d = 0; d < binding.descriptorCount; ++d)
1671 			{
1672 				storageImages.push_back(createImage(vk, device, &storageImgCreateInfo));
1673 			}
1674 		}
1675 	}
1676 
1677 	// Allocate memory for them.
1678 	vk::VkMemoryRequirements storageImageMemReqs;
1679 	vk.getImageMemoryRequirements(device, *storageImages.front(), &storageImageMemReqs);
1680 
1681 	de::MovePtr<Allocation> storageImageAlloc;
1682 	VkDeviceSize			storageImageBlockSize = 0u;
1683 	{
1684 		VkDeviceSize mod = (storageImageMemReqs.size % storageImageMemReqs.alignment);
1685 		storageImageBlockSize = storageImageMemReqs.size + ((mod == 0u) ? 0u : storageImageMemReqs.alignment - mod);
1686 	}
1687 	storageImageMemReqs.size = storageImageBlockSize * storageImageCount;
1688 	storageImageAlloc = allocator.allocate(storageImageMemReqs, MemoryRequirement::Any);
1689 
1690 	// Allocate buffer to copy storage images to.
1691 	auto		storageImgBuffer	= de::MovePtr<BufferWithMemory>(new BufferWithMemory(vk, device, allocator, makeBufferCreateInfo(storageImageCount * sizeof(deInt32), VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
1692 	deInt32*	storageImgBufferPtr	= reinterpret_cast<deInt32*>(storageImgBuffer->getAllocation().getHostPtr());
1693 
1694 	// Create image views.
1695 	vector<Move<VkImageView>>	storageImageViews;
1696 	{
1697 		VkImageViewCreateInfo		storageImageViewCreateInfo =
1698 		{
1699 			VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,		// VkStructureType			sType;
1700 			DE_NULL,										// const void*				pNext;
1701 			0u,												// VkImageViewCreateFlags	flags;
1702 			DE_NULL,										// VkImage					image;
1703 			VK_IMAGE_VIEW_TYPE_2D,							// VkImageViewType			viewType;
1704 			VK_FORMAT_R32_SINT,								// VkFormat					format;
1705 			{												// VkComponentMapping		channels;
1706 				VK_COMPONENT_SWIZZLE_IDENTITY,
1707 				VK_COMPONENT_SWIZZLE_IDENTITY,
1708 				VK_COMPONENT_SWIZZLE_IDENTITY,
1709 				VK_COMPONENT_SWIZZLE_IDENTITY
1710 			},
1711 			{ VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u }	// VkImageSubresourceRange	subresourceRange;
1712 		};
1713 
1714 		for (deUint32 i = 0; i < static_cast<deUint32>(storageImages.size()); ++i)
1715 		{
1716 			// Bind image memory.
1717 			vk::VkImage img = *storageImages[i];
1718 			VK_CHECK(vk.bindImageMemory(device, img, storageImageAlloc->getMemory(), storageImageAlloc->getOffset() + i * storageImageBlockSize));
1719 
1720 			// Create view.
1721 			storageImageViewCreateInfo.image = img;
1722 			storageImageViews.push_back(createImageView(vk, device, &storageImageViewCreateInfo));
1723 		}
1724 	}
1725 
1726 	// Create input attachment images.
1727 	vector<Move<VkImage>>	inputAttachments;
1728 	const VkImageCreateInfo imgCreateInfo =
1729 	{
1730 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,										// VkStructureType			sType;
1731 		DE_NULL,																	// const void*				pNext;
1732 		0u,																			// VkImageCreateFlags		flags;
1733 		VK_IMAGE_TYPE_2D,															// VkImageType				imageType;
1734 		VK_FORMAT_R32_SINT,															// VkFormat					format;
1735 		{ DIM, DIM, 1u },															// VkExtent3D				extent;
1736 		1u,																			// deUint32					mipLevels;
1737 		1u,																			// deUint32					arrayLayers;
1738 		VK_SAMPLE_COUNT_1_BIT,														// VkSampleCountFlagBits	samples;
1739 		VK_IMAGE_TILING_OPTIMAL,													// VkImageTiling			tiling;
1740 		(VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT),	// VkImageUsageFlags		usage;
1741 		VK_SHARING_MODE_EXCLUSIVE,													// VkSharingMode			sharingMode;
1742 		1u,																			// deUint32					queueFamilyIndexCount;
1743 		&queueFamilyIndex,															// const deUint32*			pQueueFamilyIndices;
1744 		VK_IMAGE_LAYOUT_UNDEFINED													// VkImageLayout			initialLayout;
1745 
1746 	};
1747 
1748 	deUint32 inputAttachmentCount = 0u;
1749 	for (const auto& bindings	: randomLayout.layoutBindings)
1750 	for (const auto& binding	: bindings)
1751 	{
1752 		if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
1753 		{
1754 			inputAttachmentCount += binding.descriptorCount;
1755 			for (deUint32 d = 0; d < binding.descriptorCount; ++d)
1756 			{
1757 				inputAttachments.push_back(createImage(vk, device, &imgCreateInfo));
1758 			}
1759 		}
1760 	}
1761 
1762 	de::MovePtr<Allocation> inputAttachmentAlloc;
1763 	VkDeviceSize			imageBlockSize = 0u;
1764 
1765 	if (inputAttachmentCount > 0u)
1766 	{
1767 		VkMemoryRequirements	imageReqs		= getImageMemoryRequirements(vk, device, inputAttachments.back().get());
1768 		VkDeviceSize			mod				= imageReqs.size % imageReqs.alignment;
1769 
1770 		// Create memory for every input attachment image.
1771 		imageBlockSize	= imageReqs.size + ((mod == 0u) ? 0u : (imageReqs.alignment - mod));
1772 		imageReqs.size	= imageBlockSize * inputAttachmentCount;
1773 		inputAttachmentAlloc = allocator.allocate(imageReqs, MemoryRequirement::Any);
1774 	}
1775 
1776 	// Bind memory to each input attachment and create an image view.
1777 	VkImageViewCreateInfo		inputAttachmentViewParams =
1778 	{
1779 		VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,		// VkStructureType			sType;
1780 		DE_NULL,										// const void*				pNext;
1781 		0u,												// VkImageViewCreateFlags	flags;
1782 		DE_NULL,										// VkImage					image;
1783 		VK_IMAGE_VIEW_TYPE_2D,							// VkImageViewType			viewType;
1784 		VK_FORMAT_R32_SINT,								// VkFormat					format;
1785 		{												// VkComponentMapping		channels;
1786 			VK_COMPONENT_SWIZZLE_IDENTITY,
1787 			VK_COMPONENT_SWIZZLE_IDENTITY,
1788 			VK_COMPONENT_SWIZZLE_IDENTITY,
1789 			VK_COMPONENT_SWIZZLE_IDENTITY
1790 		},
1791 		{ VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u }	// VkImageSubresourceRange	subresourceRange;
1792 	};
1793 	vector<Move<VkImageView>>	inputAttachmentViews;
1794 
1795 	for (deUint32 i = 0; i < static_cast<deUint32>(inputAttachments.size()); ++i)
1796 	{
1797 		vk::VkImage img = *inputAttachments[i];
1798 		VK_CHECK(vk.bindImageMemory(device, img, inputAttachmentAlloc->getMemory(), inputAttachmentAlloc->getOffset() + i * imageBlockSize));
1799 
1800 		inputAttachmentViewParams.image = img;
1801 		inputAttachmentViews.push_back(createImageView(vk, device, &inputAttachmentViewParams));
1802 	}
1803 
1804 	// Create a view for each descriptor. Fill descriptor 'd' with an integer value equal to 'd'. In case the descriptor would be
1805 	// written to from the shader, store a -1 in it instead. Skip inline uniform blocks and use images for input attachments and
1806 	// storage images.
1807 
1808 	Move<VkCommandPool>				cmdPool						= createCommandPool(vk, device, 0, queueFamilyIndex);
1809 	const VkQueue					queue						= m_context.getUniversalQueue();
1810 	Move<VkCommandBuffer>			cmdBuffer					= allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1811 
1812 	const VkImageSubresourceRange	clearRange					=
1813 	{
1814 		VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspectFlags	aspectMask;
1815 		0u,			// deUint32				baseMipLevel;
1816 		1u,			// deUint32				levelCount;
1817 		0u,			// deUint32				baseArrayLayer;
1818 		1u			// deUint32				layerCount;
1819 	};
1820 
1821 	VkImageMemoryBarrier			preInputAttachmentBarrier	=
1822 	{
1823 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
1824 		DE_NULL,											// const void*			pNext
1825 		0u,													// VkAccessFlags		srcAccessMask
1826 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
1827 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
1828 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,				// VkImageLayout		newLayout
1829 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
1830 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
1831 		DE_NULL,											// VkImage				image
1832 		{
1833 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask
1834 			0u,										// uint32_t				baseMipLevel
1835 			1u,										// uint32_t				mipLevels,
1836 			0u,										// uint32_t				baseArray
1837 			1u,										// uint32_t				arraySize
1838 		}
1839 	};
1840 
1841 	VkImageMemoryBarrier			postInputAttachmentBarrier	=
1842 	{
1843 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,		// VkStructureType			sType;
1844 		DE_NULL,									// const void*				pNext;
1845 		VK_ACCESS_TRANSFER_WRITE_BIT,				// VkAccessFlags			srcAccessMask;
1846 		VK_ACCESS_INPUT_ATTACHMENT_READ_BIT,		// VkAccessFlags			dstAccessMask;
1847 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,		// VkImageLayout			oldLayout;
1848 		VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,	// VkImageLayout			newLayout;
1849 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					srcQueueFamilyIndex;
1850 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					dstQueueFamilyIndex;
1851 		DE_NULL,									// VkImage					image;
1852 		clearRange,									// VkImageSubresourceRange	subresourceRange;
1853 	};
1854 
1855 	VkImageMemoryBarrier			preStorageImageBarrier		=
1856 	{
1857 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,		// VkStructureType		sType
1858 		DE_NULL,									// const void*			pNext
1859 		0u,											// VkAccessFlags		srcAccessMask
1860 		VK_ACCESS_TRANSFER_WRITE_BIT,				// VkAccessFlags		dstAccessMask
1861 		VK_IMAGE_LAYOUT_UNDEFINED,					// VkImageLayout		oldLayout
1862 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,		// VkImageLayout		newLayout
1863 		VK_QUEUE_FAMILY_IGNORED,					// uint32_t				srcQueueFamilyIndex
1864 		VK_QUEUE_FAMILY_IGNORED,					// uint32_t				dstQueueFamilyIndex
1865 		DE_NULL,									// VkImage				image
1866 		{
1867 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask
1868 			0u,										// uint32_t				baseMipLevel
1869 			1u,										// uint32_t				mipLevels,
1870 			0u,										// uint32_t				baseArray
1871 			1u,										// uint32_t				arraySize
1872 		}
1873 	};
1874 
1875 	VkImageMemoryBarrier			postStorageImageBarrier		=
1876 	{
1877 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,						// VkStructureType			sType;
1878 		DE_NULL,													// const void*				pNext;
1879 		VK_ACCESS_TRANSFER_WRITE_BIT,								// VkAccessFlags			srcAccessMask;
1880 		(VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT),	// VkAccessFlags			dstAccessMask;
1881 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,						// VkImageLayout			oldLayout;
1882 		VK_IMAGE_LAYOUT_GENERAL,									// VkImageLayout			newLayout;
1883 		VK_QUEUE_FAMILY_IGNORED,									// deUint32					srcQueueFamilyIndex;
1884 		VK_QUEUE_FAMILY_IGNORED,									// deUint32					dstQueueFamilyIndex;
1885 		DE_NULL,													// VkImage					image;
1886 		clearRange,													// VkImageSubresourceRange	subresourceRange;
1887 	};
1888 
1889 	vk::VkClearColorValue			clearValue;
1890 	clearValue.uint32[0] = 0u;
1891 	clearValue.uint32[1] = 0u;
1892 	clearValue.uint32[2] = 0u;
1893 	clearValue.uint32[3] = 0u;
1894 
1895 	beginCommandBuffer(vk, *cmdBuffer, 0u);
1896 
1897 	int			descriptor		= 0;
1898 	deUint32	attachmentIndex	= 0;
1899 	deUint32	storageImgIndex	= 0;
1900 
1901 	typedef vk::Unique<vk::VkBufferView>		BufferViewHandleUp;
1902 	typedef de::SharedPtr<BufferViewHandleUp>	BufferViewHandleSp;
1903 
1904 	vector<BufferViewHandleSp>					bufferViews(de::max(1u,numDescriptors));
1905 
1906 	for (deUint32 s = 0; s < m_data.numDescriptorSets; ++s)
1907 	{
1908 		vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
1909 		for (size_t b = 0; b < bindings.size(); ++b)
1910 		{
1911 			VkDescriptorSetLayoutBinding &binding = bindings[b];
1912 
1913 			if (binding.descriptorCount == 0)
1914 			{
1915 				continue;
1916 			}
1917 #ifndef CTS_USES_VULKANSC
1918 			if (binding.descriptorType == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR)
1919 			{
1920 				descriptor++;
1921 			}
1922 #endif
1923 			else if (binding.descriptorType != VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT &&
1924 #ifndef CTS_USES_VULKANSC
1925 					 binding.descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT &&
1926 #endif
1927 					 binding.descriptorType != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1928 			{
1929 				for (deUint32 d = descriptor; d < descriptor + binding.descriptorCount; ++d)
1930 				{
1931 					DescriptorId	descriptorId	(s, static_cast<deUint32>(b), d - descriptor);
1932 					auto			writeInfoItr	= randomLayout.descriptorWrites.find(descriptorId);
1933 					deInt32*		ptr				= (deInt32 *)(bufferPtr + align*d);
1934 
1935 					if (writeInfoItr == randomLayout.descriptorWrites.end())
1936 					{
1937 						*ptr = static_cast<deInt32>(d);
1938 					}
1939 					else
1940 					{
1941 						*ptr = -1;
1942 						writeInfoItr->second.ptr = ptr;
1943 						writeInfoItr->second.expected = d;
1944 					}
1945 
1946 					const vk::VkBufferViewCreateInfo viewCreateInfo =
1947 					{
1948 						vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
1949 						DE_NULL,
1950 						(vk::VkBufferViewCreateFlags)0,
1951 						**buffer,								// buffer
1952 						VK_FORMAT_R32_SINT,						// format
1953 						(vk::VkDeviceSize)align*d,				// offset
1954 						(vk::VkDeviceSize)sizeof(deUint32)		// range
1955 					};
1956 					vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
1957 					bufferViews[d] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
1958 				}
1959 				descriptor += binding.descriptorCount;
1960 			}
1961 #ifndef CTS_USES_VULKANSC
1962 			else if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
1963 			{
1964 				// subtract 16 for "ivec4 unused"
1965 				DE_ASSERT(binding.descriptorCount >= 16);
1966 				descriptor += binding.descriptorCount - 16;
1967 			}
1968 #endif
1969 			else if (binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1970 			{
1971 				// Storage image.
1972 				for (deUint32 d = descriptor; d < descriptor + binding.descriptorCount; ++d)
1973 				{
1974 					VkImage			img				= *storageImages[storageImgIndex];
1975 					DescriptorId	descriptorId	(s, static_cast<deUint32>(b), d - descriptor);
1976 					deInt32*		ptr				= storageImgBufferPtr + storageImgIndex;
1977 
1978 					auto			writeInfoItr	= randomLayout.descriptorWrites.find(descriptorId);
1979 					const bool		isWrite			= (writeInfoItr != randomLayout.descriptorWrites.end());
1980 
1981 					if (isWrite)
1982 					{
1983 						writeInfoItr->second.ptr		= ptr;
1984 						writeInfoItr->second.expected	= static_cast<deInt32>(d);
1985 					}
1986 
1987 					preStorageImageBarrier.image	= img;
1988 					clearValue.int32[0]				= (isWrite ? -1 : static_cast<deInt32>(d));
1989 					postStorageImageBarrier.image	= img;
1990 
1991 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preStorageImageBarrier);
1992 					vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1, &clearRange);
1993 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postStorageImageBarrier);
1994 
1995 					++storageImgIndex;
1996 				}
1997 				descriptor += binding.descriptorCount;
1998 			}
1999 			else
2000 			{
2001 				// Input attachment.
2002 				for (deUint32 d = descriptor; d < descriptor + binding.descriptorCount; ++d)
2003 				{
2004 					VkImage img = *inputAttachments[attachmentIndex];
2005 
2006 					preInputAttachmentBarrier.image		= img;
2007 					clearValue.int32[0]					= static_cast<deInt32>(d);
2008 					postInputAttachmentBarrier.image	= img;
2009 
2010 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preInputAttachmentBarrier);
2011 					vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1, &clearRange);
2012 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postInputAttachmentBarrier);
2013 
2014 					++attachmentIndex;
2015 				}
2016 				descriptor += binding.descriptorCount;
2017 			}
2018 		}
2019 	}
2020 
2021 	// Flush modified memory.
2022 	flushAlloc(vk, device, buffer->getAllocation());
2023 
2024 	// Push constants are used for dynamic indexing. PushConstant[i] = i.
2025 	const VkPushConstantRange			pushConstRange			=
2026 	{
2027 		m_data.allShaderStages,	// VkShaderStageFlags	stageFlags
2028 		0,						// deUint32				offset
2029 		128						// deUint32				size
2030 	};
2031 
2032 	vector<vk::VkDescriptorSetLayout>	descriptorSetLayoutsRaw	(m_data.numDescriptorSets);
2033 	for (size_t i = 0; i < m_data.numDescriptorSets; ++i)
2034 	{
2035 		descriptorSetLayoutsRaw[i] = descriptorSetLayouts[i].get();
2036 	}
2037 
2038 	const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
2039 	{
2040 		VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,				//  VkStructureType					sType;
2041 		DE_NULL,													//  const void*						pNext;
2042 		(VkPipelineLayoutCreateFlags)0,								//  VkPipelineLayoutCreateFlags		flags;
2043 		m_data.numDescriptorSets,									//  deUint32						setLayoutCount;
2044 		&descriptorSetLayoutsRaw[0],								//  const VkDescriptorSetLayout*	pSetLayouts;
2045 		m_data.indexType == INDEX_TYPE_PUSHCONSTANT ? 1u : 0u,		//  deUint32						pushConstantRangeCount;
2046 		&pushConstRange,											//  const VkPushConstantRange*		pPushConstantRanges;
2047 	};
2048 
2049 	Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2050 
2051 	if (m_data.indexType == INDEX_TYPE_PUSHCONSTANT)
2052 	{
2053 		// PushConstant[i] = i
2054 		for (deUint32 i = 0; i < (deUint32)(128 / sizeof(deUint32)); ++i)
2055 		{
2056 			vk.cmdPushConstants(*cmdBuffer, *pipelineLayout, m_data.allShaderStages,
2057 								(deUint32)(i * sizeof(deUint32)), (deUint32)sizeof(deUint32), &i);
2058 		}
2059 	}
2060 
2061 	de::MovePtr<BufferWithMemory> copyBuffer;
2062 	copyBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2063 		vk, device, allocator, makeBufferCreateInfo(DIM*DIM*sizeof(deUint32), VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
2064 
2065 	// Special case for the output storage image.
2066 	const VkImageCreateInfo			imageCreateInfo			=
2067 	{
2068 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
2069 		DE_NULL,								// const void*				pNext;
2070 		(VkImageCreateFlags)0u,					// VkImageCreateFlags		flags;
2071 		VK_IMAGE_TYPE_2D,						// VkImageType				imageType;
2072 		VK_FORMAT_R32_SINT,						// VkFormat					format;
2073 		{
2074 			DIM,								// deUint32	width;
2075 			DIM,								// deUint32	height;
2076 			1u									// deUint32	depth;
2077 		},										// VkExtent3D				extent;
2078 		1u,										// deUint32					mipLevels;
2079 		1u,										// deUint32					arrayLayers;
2080 		VK_SAMPLE_COUNT_1_BIT,					// VkSampleCountFlagBits	samples;
2081 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
2082 		VK_IMAGE_USAGE_STORAGE_BIT
2083 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
2084 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
2085 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
2086 		0u,										// deUint32					queueFamilyIndexCount;
2087 		DE_NULL,								// const deUint32*			pQueueFamilyIndices;
2088 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
2089 	};
2090 
2091 	VkImageViewCreateInfo		imageViewCreateInfo		=
2092 	{
2093 		VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,	// VkStructureType			sType;
2094 		DE_NULL,									// const void*				pNext;
2095 		(VkImageViewCreateFlags)0u,					// VkImageViewCreateFlags	flags;
2096 		DE_NULL,									// VkImage					image;
2097 		VK_IMAGE_VIEW_TYPE_2D,						// VkImageViewType			viewType;
2098 		VK_FORMAT_R32_SINT,							// VkFormat					format;
2099 		{
2100 			VK_COMPONENT_SWIZZLE_IDENTITY,
2101 			VK_COMPONENT_SWIZZLE_IDENTITY,
2102 			VK_COMPONENT_SWIZZLE_IDENTITY,
2103 			VK_COMPONENT_SWIZZLE_IDENTITY
2104 		},											// VkComponentMapping		 components;
2105 		{
2106 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask;
2107 			0u,										// deUint32				baseMipLevel;
2108 			1u,										// deUint32				levelCount;
2109 			0u,										// deUint32				baseArrayLayer;
2110 			1u										// deUint32				layerCount;
2111 		}											// VkImageSubresourceRange	subresourceRange;
2112 	};
2113 
2114 	de::MovePtr<ImageWithMemory> image;
2115 	Move<VkImageView> imageView;
2116 
2117 	image = de::MovePtr<ImageWithMemory>(new ImageWithMemory(
2118 		vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2119 	imageViewCreateInfo.image = **image;
2120 	imageView = createImageView(vk, device, &imageViewCreateInfo, NULL);
2121 
2122 #ifndef CTS_USES_VULKANSC
2123 	// Create ray tracing structures
2124 	de::MovePtr<vk::BottomLevelAccelerationStructure>	bottomLevelAccelerationStructure;
2125 	de::MovePtr<vk::TopLevelAccelerationStructure>		topLevelAccelerationStructure;
2126 	VkStridedDeviceAddressRegionKHR						raygenShaderBindingTableRegion		= makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
2127 	VkStridedDeviceAddressRegionKHR						missShaderBindingTableRegion		= makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
2128 	VkStridedDeviceAddressRegionKHR						hitShaderBindingTableRegion			= makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
2129 	VkStridedDeviceAddressRegionKHR						callableShaderBindingTableRegion	= makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
2130 
2131 	if (usesAccelerationStructure(m_data.stage))
2132 	{
2133 		// Create bottom level acceleration structure
2134 		{
2135 			bottomLevelAccelerationStructure = makeBottomLevelAccelerationStructure();
2136 
2137 			bottomLevelAccelerationStructure->setDefaultGeometryData(getShaderStageFlag(m_data.stage));
2138 
2139 			bottomLevelAccelerationStructure->createAndBuild(vk, device, *cmdBuffer, allocator);
2140 		}
2141 
2142 		// Create top level acceleration structure
2143 		{
2144 			topLevelAccelerationStructure = makeTopLevelAccelerationStructure();
2145 
2146 			topLevelAccelerationStructure->setInstanceCount(1);
2147 			topLevelAccelerationStructure->addInstance(de::SharedPtr<BottomLevelAccelerationStructure>(bottomLevelAccelerationStructure.release()));
2148 
2149 			topLevelAccelerationStructure->createAndBuild(vk, device, *cmdBuffer, allocator);
2150 		}
2151 	}
2152 #endif
2153 
2154 	descriptor		= 0;
2155 	attachmentIndex	= 0;
2156 	storageImgIndex = 0;
2157 
2158 	for (deUint32 s = 0; s < m_data.numDescriptorSets; ++s)
2159 	{
2160 		vector<VkDescriptorSetLayoutBinding> &bindings = randomLayout.layoutBindings[s];
2161 		vector<VkDescriptorBindingFlags> &bindingsFlags = randomLayout.layoutBindingFlags[s];
2162 		vector<deUint32> &arraySizes = randomLayout.arraySizes[s];
2163 		vector<deUint32> &variableDescriptorSizes = randomLayout.variableDescriptorSizes;
2164 
2165 		vector<VkDescriptorBufferInfo> bufferInfoVec(numDescriptors);
2166 		vector<VkDescriptorImageInfo> imageInfoVec(numDescriptors);
2167 		vector<VkBufferView> bufferViewVec(numDescriptors);
2168 #ifndef CTS_USES_VULKANSC
2169 		vector<VkWriteDescriptorSetInlineUniformBlockEXT> inlineInfoVec(numDescriptors);
2170 		vector<VkWriteDescriptorSetAccelerationStructureKHR> accelerationInfoVec(numDescriptors);
2171 #endif
2172 		vector<deUint32> descriptorNumber(numDescriptors);
2173 		vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2174 		vector<VkWriteDescriptorSet> writesAfterBindVec(0);
2175 		int vecIndex = 0;
2176 		int numDynamic = 0;
2177 
2178 #ifndef CTS_USES_VULKANSC
2179 		vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore,		imgTemplateEntriesAfter,
2180 												bufTemplateEntriesBefore,		bufTemplateEntriesAfter,
2181 												texelBufTemplateEntriesBefore,	texelBufTemplateEntriesAfter,
2182 												inlineTemplateEntriesBefore,	inlineTemplateEntriesAfter;
2183 #endif
2184 		for (size_t b = 0; b < bindings.size(); ++b)
2185 		{
2186 			VkDescriptorSetLayoutBinding &binding = bindings[b];
2187 			deUint32 descriptorIncrement = 1;
2188 #ifndef CTS_USES_VULKANSC
2189 			if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
2190 				descriptorIncrement = 16;
2191 #endif
2192 
2193 			// Construct the declaration for the binding
2194 			if (binding.descriptorCount > 0)
2195 			{
2196 				bool updateAfterBind = !!(bindingsFlags[b] & VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT);
2197 				for (deUint32 ai = 0; ai < de::max(1u, arraySizes[b]); ++ai, descriptor += descriptorIncrement)
2198 				{
2199 					// Don't access descriptors past the end of the allocated range for
2200 					// variable descriptor count
2201 					if (b == bindings.size() - 1 &&
2202 						(bindingsFlags[b] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT))
2203 					{
2204 #ifndef CTS_USES_VULKANSC
2205 						if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
2206 						{
2207 							// Convert to bytes and add 16 for "ivec4 unused" in case of inline uniform block
2208 							const deUint32 uboRange = ai*16 + 16;
2209 							if (uboRange >= variableDescriptorSizes[s])
2210 								continue;
2211 						}
2212 						else
2213 #endif
2214 						{
2215 							if (ai >= variableDescriptorSizes[s])
2216 								continue;
2217 						}
2218 					}
2219 
2220 					// output image
2221 					switch (binding.descriptorType)
2222 					{
2223 					case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2224 						// Output image. Special case.
2225 						if (s == 0 && b == 0)
2226 						{
2227 							imageInfoVec[vecIndex] = makeDescriptorImageInfo(DE_NULL, *imageView, VK_IMAGE_LAYOUT_GENERAL);
2228 						}
2229 						else
2230 						{
2231 							imageInfoVec[vecIndex] = makeDescriptorImageInfo(DE_NULL, storageImageViews[storageImgIndex].get(), VK_IMAGE_LAYOUT_GENERAL);
2232 						}
2233 						++storageImgIndex;
2234 						break;
2235 					case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2236 						imageInfoVec[vecIndex] = makeDescriptorImageInfo(DE_NULL, inputAttachmentViews[attachmentIndex].get(), VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
2237 						++attachmentIndex;
2238 						break;
2239 #ifndef CTS_USES_VULKANSC
2240 					case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
2241 						// Handled below.
2242 						break;
2243 					case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
2244 						// Handled below.
2245 						break;
2246 #endif
2247 					default:
2248 						// Other descriptor types.
2249 						bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(**buffer, descriptor*align, sizeof(deUint32));
2250 						bufferViewVec[vecIndex] = **bufferViews[descriptor];
2251 						break;
2252 					}
2253 
2254 					descriptorNumber[descriptor] = descriptor;
2255 
2256 					VkWriteDescriptorSet w =
2257 					{
2258 						VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,		//  VkStructureType					sType;
2259 						DE_NULL,									//  const void*						pNext;
2260 						*descriptorSets[s],							//  VkDescriptorSet					dstSet;
2261 						(deUint32)b,								//  deUint32						dstBinding;
2262 						ai,											//  deUint32						dstArrayElement;
2263 						1u,											//  deUint32						descriptorCount;
2264 						binding.descriptorType,						//  VkDescriptorType				descriptorType;
2265 						&imageInfoVec[vecIndex],					//  const VkDescriptorImageInfo*	pImageInfo;
2266 						&bufferInfoVec[vecIndex],					//  const VkDescriptorBufferInfo*	pBufferInfo;
2267 						&bufferViewVec[vecIndex],					//  const VkBufferView*				pTexelBufferView;
2268 					};
2269 
2270 #ifndef CTS_USES_VULKANSC
2271 					if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
2272 					{
2273 						VkWriteDescriptorSetInlineUniformBlockEXT iuBlock =
2274 						{
2275 							VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT,	// VkStructureType	sType;
2276 							DE_NULL,															// const void*		pNext;
2277 							sizeof(deUint32),													// uint32_t			dataSize;
2278 							&descriptorNumber[descriptor],										// const void*		pData;
2279 						};
2280 
2281 						inlineInfoVec[vecIndex] = iuBlock;
2282 						w.dstArrayElement = ai*16 + 16; // add 16 to skip "ivec4 unused"
2283 						w.pNext = &inlineInfoVec[vecIndex];
2284 						w.descriptorCount = sizeof(deUint32);
2285 					}
2286 
2287 					if (binding.descriptorType == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR)
2288 					{
2289 						const TopLevelAccelerationStructure*			topLevelAccelerationStructurePtr		= topLevelAccelerationStructure.get();
2290 						VkWriteDescriptorSetAccelerationStructureKHR	accelerationStructureWriteDescriptorSet	=
2291 						{
2292 							VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR,	//  VkStructureType						sType;
2293 							DE_NULL,															//  const void*							pNext;
2294 							w.descriptorCount,													//  deUint32							accelerationStructureCount;
2295 							topLevelAccelerationStructurePtr->getPtr(),							//  const VkAccelerationStructureKHR*	pAccelerationStructures;
2296 						};
2297 
2298 						accelerationInfoVec[vecIndex] = accelerationStructureWriteDescriptorSet;
2299 						w.dstArrayElement = 0;
2300 						w.pNext = &accelerationInfoVec[vecIndex];
2301 					}
2302 
2303 					VkDescriptorUpdateTemplateEntry templateEntry =
2304 					{
2305 						(deUint32)b,				// uint32_t				dstBinding;
2306 						ai,							// uint32_t				dstArrayElement;
2307 						1u,							// uint32_t				descriptorCount;
2308 						binding.descriptorType,		// VkDescriptorType		descriptorType;
2309 						0,							// size_t				offset;
2310 						0,							// size_t				stride;
2311 					};
2312 
2313 					switch (binding.descriptorType)
2314 					{
2315 						default:
2316 							TCU_THROW(InternalError, "Unknown descriptor type");
2317 
2318 						case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2319 						case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2320 							templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2321 							(updateAfterBind ? imgTemplateEntriesAfter : imgTemplateEntriesBefore).push_back(templateEntry);
2322 							break;
2323 						case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2324 						case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2325 							templateEntry.offset = vecIndex * sizeof(VkBufferView);
2326 							(updateAfterBind ? texelBufTemplateEntriesAfter : texelBufTemplateEntriesBefore).push_back(templateEntry);
2327 							break;
2328 						case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2329 						case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2330 						case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2331 						case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2332 							templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2333 							(updateAfterBind ? bufTemplateEntriesAfter : bufTemplateEntriesBefore).push_back(templateEntry);
2334 							break;
2335 						case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
2336 							templateEntry.offset = descriptor * sizeof(deUint32);
2337 							templateEntry.dstArrayElement = ai*16 + 16; // add 16 to skip "ivec4 dummy"
2338 							templateEntry.descriptorCount = sizeof(deUint32);
2339 							(updateAfterBind ? inlineTemplateEntriesAfter : inlineTemplateEntriesBefore).push_back(templateEntry);
2340 							break;
2341 						case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
2342 							DE_ASSERT(!updateAfterBind);
2343 							DE_ASSERT(usesAccelerationStructure(m_data.stage));
2344 							break;
2345 					}
2346 #endif
2347 
2348 					vecIndex++;
2349 
2350 					(updateAfterBind ? writesAfterBindVec : writesBeforeBindVec).push_back(w);
2351 
2352 					// Count the number of dynamic descriptors in this set.
2353 					if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2354 						binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2355 					{
2356 						numDynamic++;
2357 					}
2358 				}
2359 			}
2360 		}
2361 
2362 		// Make zeros have at least one element so &zeros[0] works
2363 		vector<deUint32> zeros(de::max(1,numDynamic));
2364 		deMemset(&zeros[0], 0, numDynamic * sizeof(deUint32));
2365 
2366 #ifndef CTS_USES_VULKANSC
2367 		// Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2368 		if (randRange(&rnd, 1, 2) == 1 &&
2369 			m_context.contextSupports(vk::ApiVersion(0, 1, 1, 0)) &&
2370 			!usesAccelerationStructure(m_data.stage))
2371 		{
2372 			DE_ASSERT(!usesAccelerationStructure(m_data.stage));
2373 
2374 			VkDescriptorUpdateTemplateCreateInfo templateCreateInfo =
2375 			{
2376 				VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO,	// VkStructureType							sType;
2377 				NULL,														// void*									pNext;
2378 				0,															// VkDescriptorUpdateTemplateCreateFlags	flags;
2379 				0,															// uint32_t									descriptorUpdateEntryCount;
2380 				DE_NULL,													// uint32_t									descriptorUpdateEntryCount;
2381 				VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,			// VkDescriptorUpdateTemplateType			templateType;
2382 				descriptorSetLayouts[s].get(),								// VkDescriptorSetLayout					descriptorSetLayout;
2383 				bindPoint,													// VkPipelineBindPoint						pipelineBindPoint;
2384 				0,															// VkPipelineLayout							pipelineLayout;
2385 				0,															// uint32_t									set;
2386 			};
2387 
2388 			void *templateVectorData[] =
2389 			{
2390 				imageInfoVec.data(),
2391 				bufferInfoVec.data(),
2392 				bufferViewVec.data(),
2393 				descriptorNumber.data(),
2394 			};
2395 
2396 			vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] =
2397 			{
2398 				&imgTemplateEntriesBefore,
2399 				&bufTemplateEntriesBefore,
2400 				&texelBufTemplateEntriesBefore,
2401 				&inlineTemplateEntriesBefore,
2402 			};
2403 
2404 			vector<VkDescriptorUpdateTemplateEntry> *templateVectorsAfter[] =
2405 			{
2406 				&imgTemplateEntriesAfter,
2407 				&bufTemplateEntriesAfter,
2408 				&texelBufTemplateEntriesAfter,
2409 				&inlineTemplateEntriesAfter,
2410 			};
2411 
2412 			for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2413 			{
2414 				if (templateVectorsBefore[i]->size())
2415 				{
2416 					templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2417 					templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2418 					Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2419 					vk.updateDescriptorSetWithTemplate(device, descriptorSets[s].get(), *descriptorUpdateTemplate, templateVectorData[i]);
2420 				}
2421 			}
2422 
2423 			vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, s, 1, &descriptorSets[s].get(), numDynamic, &zeros[0]);
2424 
2425 			for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsAfter); ++i)
2426 			{
2427 				if (templateVectorsAfter[i]->size())
2428 				{
2429 					templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsAfter[i]->size();
2430 					templateCreateInfo.pDescriptorUpdateEntries = templateVectorsAfter[i]->data();
2431 					Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2432 					vk.updateDescriptorSetWithTemplate(device, descriptorSets[s].get(), *descriptorUpdateTemplate, templateVectorData[i]);
2433 				}
2434 			}
2435 
2436 		}
2437 		else
2438 #endif
2439 		{
2440 			if (writesBeforeBindVec.size())
2441 			{
2442 				vk.updateDescriptorSets(device, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0, NULL);
2443 			}
2444 
2445 			vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, s, 1, &descriptorSets[s].get(), numDynamic, &zeros[0]);
2446 
2447 			if (writesAfterBindVec.size())
2448 			{
2449 				vk.updateDescriptorSets(device, (deUint32)writesAfterBindVec.size(), &writesAfterBindVec[0], 0, NULL);
2450 			}
2451 		}
2452 	}
2453 
2454 	Move<VkPipeline> pipeline;
2455 	Move<VkRenderPass> renderPass;
2456 	Move<VkFramebuffer> framebuffer;
2457 
2458 #ifndef CTS_USES_VULKANSC
2459 	de::MovePtr<BufferWithMemory>	sbtBuffer;
2460 	de::MovePtr<BufferWithMemory>	raygenShaderBindingTable;
2461 	de::MovePtr<BufferWithMemory>	missShaderBindingTable;
2462 	de::MovePtr<BufferWithMemory>	hitShaderBindingTable;
2463 	de::MovePtr<BufferWithMemory>	callableShaderBindingTable;
2464 	de::MovePtr<RayTracingPipeline>	rayTracingPipeline;
2465 #endif
2466 
2467 	// Disable interval watchdog timer for long shader compilations that can
2468 	// happen when the number of descriptor sets gets to 32 and above.
2469 	if (m_data.numDescriptorSets >= 32)
2470 	{
2471 		m_context.getTestContext().touchWatchdogAndDisableIntervalTimeLimit();
2472 	}
2473 
2474 	if (m_data.stage == STAGE_COMPUTE)
2475 	{
2476 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2477 
2478 		const VkPipelineShaderStageCreateInfo	shaderCreateInfo =
2479 		{
2480 			VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2481 			DE_NULL,
2482 			(VkPipelineShaderStageCreateFlags)0,
2483 			VK_SHADER_STAGE_COMPUTE_BIT,								// stage
2484 			*shader,													// shader
2485 			"main",
2486 			DE_NULL,													// pSpecializationInfo
2487 		};
2488 
2489 		const VkComputePipelineCreateInfo		pipelineCreateInfo =
2490 		{
2491 			VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
2492 			DE_NULL,
2493 			0u,															// flags
2494 			shaderCreateInfo,											// cs
2495 			*pipelineLayout,											// layout
2496 			(vk::VkPipeline)0,											// basePipelineHandle
2497 			0u,															// basePipelineIndex
2498 		};
2499 		pipeline = createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
2500 	}
2501 #ifndef CTS_USES_VULKANSC
2502 	else if (m_data.stage == STAGE_RAYGEN_NV)
2503 	{
2504 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2505 
2506 		const VkPipelineShaderStageCreateInfo	shaderCreateInfo	=
2507 		{
2508 			VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,		//  VkStructureType						sType;
2509 			DE_NULL,													//  const void*							pNext;
2510 			(VkPipelineShaderStageCreateFlags)0,						//  VkPipelineShaderStageCreateFlags	flags;
2511 			VK_SHADER_STAGE_RAYGEN_BIT_NV,								//  VkShaderStageFlagBits				stage;
2512 			*shader,													//  VkShaderModule						module;
2513 			"main",														//  const char*							pName;
2514 			DE_NULL,													//  const VkSpecializationInfo*			pSpecializationInfo;
2515 		};
2516 
2517 		VkRayTracingShaderGroupCreateInfoNV		group				=
2518 		{
2519 			VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV,	//  VkStructureType					sType;
2520 			DE_NULL,													//  const void*						pNext;
2521 			VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV,				//  VkRayTracingShaderGroupTypeNV	type;
2522 			0,															//  deUint32						generalShader;
2523 			VK_SHADER_UNUSED_KHR,										//  deUint32						closestHitShader;
2524 			VK_SHADER_UNUSED_KHR,										//  deUint32						anyHitShader;
2525 			VK_SHADER_UNUSED_KHR,										//  deUint32						intersectionShader;
2526 		};
2527 
2528 		VkRayTracingPipelineCreateInfoNV		pipelineCreateInfo	=
2529 		{
2530 			VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV,	//  VkStructureType								sType;
2531 			DE_NULL,												//  const void*									pNext;
2532 			0,														//  VkPipelineCreateFlags						flags;
2533 			1,														//  deUint32									stageCount;
2534 			&shaderCreateInfo,										//  const VkPipelineShaderStageCreateInfo*		pStages;
2535 			1,														//  deUint32									groupCount;
2536 			&group,													//  const VkRayTracingShaderGroupCreateInfoNV*	pGroups;
2537 			0,														//  deUint32									maxRecursionDepth;
2538 			*pipelineLayout,										//  VkPipelineLayout							layout;
2539 			(vk::VkPipeline)0,										//  VkPipeline									basePipelineHandle;
2540 			0u,														//  deInt32										basePipelineIndex;
2541 		};
2542 
2543 		pipeline = createRayTracingPipelineNV(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
2544 
2545 		const auto allocSize = de::roundUp(static_cast<VkDeviceSize>(shaderGroupHandleSize), properties.properties.limits.nonCoherentAtomSize);
2546 
2547 		sbtBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(vk, device, allocator,
2548 			makeBufferCreateInfo(allocSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_RAY_TRACING_BIT_NV), MemoryRequirement::HostVisible));
2549 
2550 		const auto&	alloc	= sbtBuffer->getAllocation();
2551 		const auto	ptr		= reinterpret_cast<deUint32*>(alloc.getHostPtr());
2552 
2553 		invalidateAlloc(vk, device, alloc);
2554 		vk.getRayTracingShaderGroupHandlesKHR(device, *pipeline, 0, 1, static_cast<deUintptr>(allocSize), ptr);
2555 	}
2556 	else if (m_data.stage == STAGE_RAYGEN)
2557 	{
2558 		rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2559 
2560 		rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0), 0);
2561 
2562 		pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2563 
2564 		raygenShaderBindingTable				= rayTracingPipeline->createShaderBindingTable(vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2565 		raygenShaderBindingTableRegion			= makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
2566 	}
2567 	else if (m_data.stage == STAGE_INTERSECT)
2568 	{
2569 		rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2570 
2571 		rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR,		createShaderModule(vk, device, m_context.getBinaryCollection().get("rgen"), 0), 0);
2572 		rayTracingPipeline->addShader(VK_SHADER_STAGE_INTERSECTION_BIT_KHR,	createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0), 1);
2573 
2574 		pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2575 
2576 		raygenShaderBindingTable				= rayTracingPipeline->createShaderBindingTable(vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2577 		raygenShaderBindingTableRegion			= makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
2578 
2579 		hitShaderBindingTable					= rayTracingPipeline->createShaderBindingTable(vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1, 1);
2580 		hitShaderBindingTableRegion				= makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, hitShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
2581 	}
2582 	else if (m_data.stage == STAGE_ANY_HIT)
2583 	{
2584 		rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2585 
2586 		rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR,		createShaderModule(vk, device, m_context.getBinaryCollection().get("rgen"), 0), 0);
2587 		rayTracingPipeline->addShader(VK_SHADER_STAGE_ANY_HIT_BIT_KHR,		createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0), 1);
2588 
2589 		pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2590 
2591 		raygenShaderBindingTable				= rayTracingPipeline->createShaderBindingTable(vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2592 		raygenShaderBindingTableRegion			= makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
2593 
2594 		hitShaderBindingTable					= rayTracingPipeline->createShaderBindingTable(vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1, 1);
2595 		hitShaderBindingTableRegion				= makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, hitShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
2596 	}
2597 	else if (m_data.stage == STAGE_CLOSEST_HIT)
2598 	{
2599 		rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2600 
2601 		rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR,		createShaderModule(vk, device, m_context.getBinaryCollection().get("rgen"), 0), 0);
2602 		rayTracingPipeline->addShader(VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR,	createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0), 1);
2603 
2604 		pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2605 
2606 		raygenShaderBindingTable				= rayTracingPipeline->createShaderBindingTable(vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2607 		raygenShaderBindingTableRegion			= makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
2608 
2609 		hitShaderBindingTable					= rayTracingPipeline->createShaderBindingTable(vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1, 1);
2610 		hitShaderBindingTableRegion				= makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, hitShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
2611 	}
2612 	else if (m_data.stage == STAGE_MISS)
2613 	{
2614 		rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2615 
2616 		rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR,	createShaderModule(vk, device, m_context.getBinaryCollection().get("rgen"), 0), 0);
2617 		rayTracingPipeline->addShader(VK_SHADER_STAGE_MISS_BIT_KHR,		createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0), 1);
2618 
2619 		pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2620 
2621 		raygenShaderBindingTable				= rayTracingPipeline->createShaderBindingTable(vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2622 		raygenShaderBindingTableRegion			= makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
2623 
2624 		missShaderBindingTable					= rayTracingPipeline->createShaderBindingTable(vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1, 1);
2625 		missShaderBindingTableRegion			= makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, missShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
2626 	}
2627 	else if (m_data.stage == STAGE_CALLABLE)
2628 	{
2629 		rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
2630 
2631 		rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR,	createShaderModule(vk, device, m_context.getBinaryCollection().get("rgen"), 0), 0);
2632 		rayTracingPipeline->addShader(VK_SHADER_STAGE_CALLABLE_BIT_KHR,	createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0), 1);
2633 
2634 		pipeline = rayTracingPipeline->createPipeline(vk, device, *pipelineLayout);
2635 
2636 		raygenShaderBindingTable				= rayTracingPipeline->createShaderBindingTable(vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
2637 		raygenShaderBindingTableRegion			= makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, raygenShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
2638 
2639 		callableShaderBindingTable				= rayTracingPipeline->createShaderBindingTable(vk, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1, 1);
2640 		callableShaderBindingTableRegion		= makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vk, device, callableShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
2641 	}
2642 #endif
2643 	else
2644 	{
2645 		const VkAttachmentDescription	attachmentDescription	=
2646 		{
2647 			// Input attachment
2648 			(VkAttachmentDescriptionFlags)0,			// VkAttachmentDescriptionFlags	flags
2649 			VK_FORMAT_R32_SINT,							// VkFormat						format
2650 			VK_SAMPLE_COUNT_1_BIT,						// VkSampleCountFlagBits		samples
2651 			VK_ATTACHMENT_LOAD_OP_LOAD,					// VkAttachmentLoadOp			loadOp
2652 			VK_ATTACHMENT_STORE_OP_STORE,				// VkAttachmentStoreOp			storeOp
2653 			VK_ATTACHMENT_LOAD_OP_DONT_CARE,			// VkAttachmentLoadOp			stencilLoadOp
2654 			VK_ATTACHMENT_STORE_OP_DONT_CARE,			// VkAttachmentStoreOp			stencilStoreOp
2655 			VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,	// VkImageLayout				initialLayout
2656 			VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL	// VkImageLayout				finalLayout
2657 		};
2658 
2659 		vector<VkAttachmentDescription> attachmentDescriptions	(inputAttachments.size(), attachmentDescription);
2660 		vector<VkAttachmentReference>	attachmentReferences;
2661 
2662 		attachmentReferences.reserve(inputAttachments.size());
2663 		VkAttachmentReference attachmentReference =
2664 		{
2665 			0u,
2666 			VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
2667 		};
2668 		for (size_t i = 0; i < inputAttachments.size(); ++i)
2669 		{
2670 			attachmentReference.attachment = static_cast<deUint32>(i);
2671 			attachmentReferences.push_back(attachmentReference);
2672 		}
2673 
2674 		const VkSubpassDescription		subpassDesc				=
2675 		{
2676 			(VkSubpassDescriptionFlags)0,											// VkSubpassDescriptionFlags	flags
2677 			VK_PIPELINE_BIND_POINT_GRAPHICS,										// VkPipelineBindPoint			pipelineBindPoint
2678 			static_cast<deUint32>(attachmentReferences.size()),						// deUint32						inputAttachmentCount
2679 			de::dataOrNull(attachmentReferences),									// const VkAttachmentReference*	pInputAttachments
2680 			0u,																		// deUint32						colorAttachmentCount
2681 			DE_NULL,																// const VkAttachmentReference*	pColorAttachments
2682 			DE_NULL,																// const VkAttachmentReference*	pResolveAttachments
2683 			DE_NULL,																// const VkAttachmentReference*	pDepthStencilAttachment
2684 			0u,																		// deUint32						preserveAttachmentCount
2685 			DE_NULL																	// const deUint32*				pPreserveAttachments
2686 		};
2687 
2688 		const VkSubpassDependency		subpassDependency		=
2689 		{
2690 			VK_SUBPASS_EXTERNAL,							// deUint32				srcSubpass
2691 			0,												// deUint32				dstSubpass
2692 			VK_PIPELINE_STAGE_TRANSFER_BIT,					// VkPipelineStageFlags	srcStageMask
2693 			VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // dstStageMask
2694 			VK_ACCESS_TRANSFER_WRITE_BIT,					// VkAccessFlags		srcAccessMask
2695 			VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT  | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,	//	dstAccessMask
2696 			VK_DEPENDENCY_BY_REGION_BIT						// VkDependencyFlags	dependencyFlags
2697 		};
2698 
2699 		const VkRenderPassCreateInfo	renderPassParams		=
2700 		{
2701 			VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,				// VkStructureTypei					sType
2702 			DE_NULL,												// const void*						pNext
2703 			(VkRenderPassCreateFlags)0,								// VkRenderPassCreateFlags			flags
2704 			static_cast<deUint32>(attachmentDescriptions.size()),	// deUint32							attachmentCount
2705 			de::dataOrNull(attachmentDescriptions),					// const VkAttachmentDescription*	pAttachments
2706 			1u,														// deUint32							subpassCount
2707 			&subpassDesc,											// const VkSubpassDescription*		pSubpasses
2708 			1u,														// deUint32							dependencyCount
2709 			&subpassDependency										// const VkSubpassDependency*		pDependencies
2710 		};
2711 
2712 		renderPass = createRenderPass(vk, device, &renderPassParams);
2713 
2714 		vector<VkImageView> rawInputAttachmentViews;
2715 		rawInputAttachmentViews.reserve(inputAttachmentViews.size());
2716 		transform(begin(inputAttachmentViews), end(inputAttachmentViews), back_inserter(rawInputAttachmentViews),
2717 				  [](const Move<VkImageView>& ptr) { return ptr.get(); });
2718 
2719 		const vk::VkFramebufferCreateInfo	framebufferParams	=
2720 		{
2721 			vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,			// sType
2722 			DE_NULL,												// pNext
2723 			(vk::VkFramebufferCreateFlags)0,
2724 			*renderPass,											// renderPass
2725 			static_cast<deUint32>(rawInputAttachmentViews.size()),	// attachmentCount
2726 			de::dataOrNull(rawInputAttachmentViews),				// pAttachments
2727 			DIM,													// width
2728 			DIM,													// height
2729 			1u,														// layers
2730 		};
2731 
2732 		framebuffer = createFramebuffer(vk, device, &framebufferParams);
2733 
2734 		// Note: vertex input state and input assembly state will not be used for mesh pipelines.
2735 
2736 		const VkPipelineVertexInputStateCreateInfo		vertexInputStateCreateInfo		=
2737 		{
2738 			VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,	// VkStructureType							sType;
2739 			DE_NULL,													// const void*								pNext;
2740 			(VkPipelineVertexInputStateCreateFlags)0,					// VkPipelineVertexInputStateCreateFlags	flags;
2741 			0u,															// deUint32									vertexBindingDescriptionCount;
2742 			DE_NULL,													// const VkVertexInputBindingDescription*	pVertexBindingDescriptions;
2743 			0u,															// deUint32									vertexAttributeDescriptionCount;
2744 			DE_NULL														// const VkVertexInputAttributeDescription*	pVertexAttributeDescriptions;
2745 		};
2746 
2747 		const VkPipelineInputAssemblyStateCreateInfo	inputAssemblyStateCreateInfo	=
2748 		{
2749 			VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,	// VkStructureType							sType;
2750 			DE_NULL,														// const void*								pNext;
2751 			(VkPipelineInputAssemblyStateCreateFlags)0,						// VkPipelineInputAssemblyStateCreateFlags	flags;
2752 			(m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology						topology;
2753 			VK_FALSE														// VkBool32									primitiveRestartEnable;
2754 		};
2755 
2756 		const VkPipelineRasterizationStateCreateInfo	rasterizationStateCreateInfo	=
2757 		{
2758 			VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,		// VkStructureType							sType;
2759 			DE_NULL,														// const void*								pNext;
2760 			(VkPipelineRasterizationStateCreateFlags)0,						// VkPipelineRasterizationStateCreateFlags	flags;
2761 			VK_FALSE,														// VkBool32									depthClampEnable;
2762 			(m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE,			// VkBool32									rasterizerDiscardEnable;
2763 			VK_POLYGON_MODE_FILL,											// VkPolygonMode							polygonMode;
2764 			VK_CULL_MODE_NONE,												// VkCullModeFlags							cullMode;
2765 			VK_FRONT_FACE_CLOCKWISE,										// VkFrontFace								frontFace;
2766 			VK_FALSE,														// VkBool32									depthBiasEnable;
2767 			0.0f,															// float									depthBiasConstantFactor;
2768 			0.0f,															// float									depthBiasClamp;
2769 			0.0f,															// float									depthBiasSlopeFactor;
2770 			1.0f															// float									lineWidth;
2771 		};
2772 
2773 		const VkPipelineMultisampleStateCreateInfo		multisampleStateCreateInfo =
2774 		{
2775 			VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,	// VkStructureType							sType
2776 			DE_NULL,													// const void*								pNext
2777 			0u,															// VkPipelineMultisampleStateCreateFlags	flags
2778 			VK_SAMPLE_COUNT_1_BIT,										// VkSampleCountFlagBits					rasterizationSamples
2779 			VK_FALSE,													// VkBool32									sampleShadingEnable
2780 			1.0f,														// float									minSampleShading
2781 			DE_NULL,													// const VkSampleMask*						pSampleMask
2782 			VK_FALSE,													// VkBool32									alphaToCoverageEnable
2783 			VK_FALSE													// VkBool32									alphaToOneEnable
2784 		};
2785 
2786 		VkViewport viewport = makeViewport(DIM, DIM);
2787 		VkRect2D scissor = makeRect2D(DIM, DIM);
2788 
2789 		const VkPipelineViewportStateCreateInfo			viewportStateCreateInfo				=
2790 		{
2791 			VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,	// VkStructureType							sType
2792 			DE_NULL,												// const void*								pNext
2793 			(VkPipelineViewportStateCreateFlags)0,					// VkPipelineViewportStateCreateFlags		flags
2794 			1u,														// deUint32									viewportCount
2795 			&viewport,												// const VkViewport*						pViewports
2796 			1u,														// deUint32									scissorCount
2797 			&scissor												// const VkRect2D*							pScissors
2798 		};
2799 
2800 		Move<VkShaderModule> fs;
2801 		Move<VkShaderModule> vs;
2802 #ifndef CTS_USES_VULKANSC
2803 		Move<VkShaderModule> ms;
2804 		Move<VkShaderModule> ts;
2805 #endif // CTS_USES_VULKANSC
2806 
2807 		const auto& binaries = m_context.getBinaryCollection();
2808 
2809 		std::vector<VkPipelineShaderStageCreateInfo> stageCreateInfos;
2810 
2811 		if (m_data.stage == STAGE_VERTEX)
2812 		{
2813 			vs = createShaderModule(vk, device, binaries.get("test"));
2814 			appendShaderStageCreateInfo(stageCreateInfos, vs.get(), VK_SHADER_STAGE_VERTEX_BIT);
2815 		}
2816 		else if (m_data.stage == STAGE_FRAGMENT)
2817 		{
2818 			vs = createShaderModule(vk, device, binaries.get("vert"));
2819 			fs = createShaderModule(vk, device, binaries.get("test"));
2820 			appendShaderStageCreateInfo(stageCreateInfos, vs.get(), VK_SHADER_STAGE_VERTEX_BIT);
2821 			appendShaderStageCreateInfo(stageCreateInfos, fs.get(), VK_SHADER_STAGE_FRAGMENT_BIT);
2822 		}
2823 #ifndef CTS_USES_VULKANSC
2824 		else if (m_data.stage == STAGE_TASK)
2825 		{
2826 			ts = createShaderModule(vk, device, binaries.get("test"));
2827 			ms = createShaderModule(vk, device, binaries.get("mesh"));
2828 			appendShaderStageCreateInfo(stageCreateInfos, ts.get(), vk::VK_SHADER_STAGE_TASK_BIT_EXT);
2829 			appendShaderStageCreateInfo(stageCreateInfos, ms.get(), VK_SHADER_STAGE_MESH_BIT_EXT);
2830 		}
2831 		else if (m_data.stage == STAGE_MESH)
2832 		{
2833 			ms = createShaderModule(vk, device, binaries.get("test"));
2834 			appendShaderStageCreateInfo(stageCreateInfos, ms.get(), VK_SHADER_STAGE_MESH_BIT_EXT);
2835 		}
2836 #endif // CTS_USES_VULKANSC
2837 
2838 		const VkGraphicsPipelineCreateInfo				graphicsPipelineCreateInfo		=
2839 		{
2840 			VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,	// VkStructureType									sType;
2841 			DE_NULL,											// const void*										pNext;
2842 			(VkPipelineCreateFlags)0,							// VkPipelineCreateFlags							flags;
2843 			static_cast<uint32_t>(stageCreateInfos.size()),		// deUint32											stageCount;
2844 			de::dataOrNull(stageCreateInfos),					// const VkPipelineShaderStageCreateInfo*			pStages;
2845 			&vertexInputStateCreateInfo,						// const VkPipelineVertexInputStateCreateInfo*		pVertexInputState;
2846 			&inputAssemblyStateCreateInfo,						// const VkPipelineInputAssemblyStateCreateInfo*	pInputAssemblyState;
2847 			DE_NULL,											// const VkPipelineTessellationStateCreateInfo*		pTessellationState;
2848 			&viewportStateCreateInfo,							// const VkPipelineViewportStateCreateInfo*			pViewportState;
2849 			&rasterizationStateCreateInfo,						// const VkPipelineRasterizationStateCreateInfo*	pRasterizationState;
2850 			&multisampleStateCreateInfo,						// const VkPipelineMultisampleStateCreateInfo*		pMultisampleState;
2851 			DE_NULL,											// const VkPipelineDepthStencilStateCreateInfo*		pDepthStencilState;
2852 			DE_NULL,											// const VkPipelineColorBlendStateCreateInfo*		pColorBlendState;
2853 			DE_NULL,											// const VkPipelineDynamicStateCreateInfo*			pDynamicState;
2854 			pipelineLayout.get(),								// VkPipelineLayout									layout;
2855 			renderPass.get(),									// VkRenderPass										renderPass;
2856 			0u,													// deUint32											subpass;
2857 			DE_NULL,											// VkPipeline										basePipelineHandle;
2858 			0													// int												basePipelineIndex;
2859 		};
2860 
2861 		pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
2862 	}
2863 
2864 	const VkImageMemoryBarrier imageBarrier =
2865 	{
2866 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
2867 		DE_NULL,											// const void*			pNext
2868 		0u,													// VkAccessFlags		srcAccessMask
2869 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
2870 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
2871 		VK_IMAGE_LAYOUT_GENERAL,							// VkImageLayout		newLayout
2872 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
2873 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
2874 		**image,											// VkImage				image
2875 		{
2876 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask
2877 			0u,										// uint32_t				baseMipLevel
2878 			1u,										// uint32_t				mipLevels,
2879 			0u,										// uint32_t				baseArray
2880 			1u,										// uint32_t				arraySize
2881 		}
2882 	};
2883 
2884 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2885 							(VkDependencyFlags)0,
2886 							0, (const VkMemoryBarrier*)DE_NULL,
2887 							0, (const VkBufferMemoryBarrier*)DE_NULL,
2888 							1, &imageBarrier);
2889 
2890 	vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
2891 
2892 	VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2893 	VkClearValue clearColor = makeClearValueColorU32(0,0,0,0);
2894 
2895 	VkMemoryBarrier					memBarrier =
2896 	{
2897 		VK_STRUCTURE_TYPE_MEMORY_BARRIER,	// sType
2898 		DE_NULL,							// pNext
2899 		0u,									// srcAccessMask
2900 		0u,									// dstAccessMask
2901 	};
2902 
2903 	vk.cmdClearColorImage(*cmdBuffer, **image, VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
2904 
2905 	memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
2906 	memBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2907 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages,
2908 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
2909 
2910 	if (m_data.stage == STAGE_COMPUTE)
2911 	{
2912 		vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
2913 	}
2914 #ifndef CTS_USES_VULKANSC
2915 	else if (m_data.stage == STAGE_RAYGEN_NV)
2916 	{
2917 		vk.cmdTraceRaysNV(*cmdBuffer,
2918 			**sbtBuffer, 0,
2919 			DE_NULL, 0, 0,
2920 			DE_NULL, 0, 0,
2921 			DE_NULL, 0, 0,
2922 			DIM, DIM, 1);
2923 	}
2924 	else if (isRayTracingStageKHR(m_data.stage))
2925 	{
2926 		cmdTraceRays(vk,
2927 			*cmdBuffer,
2928 			&raygenShaderBindingTableRegion,
2929 			&missShaderBindingTableRegion,
2930 			&hitShaderBindingTableRegion,
2931 			&callableShaderBindingTableRegion,
2932 			DIM, DIM, 1);
2933 	}
2934 #endif
2935 	else
2936 	{
2937 		beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer,
2938 						makeRect2D(DIM, DIM),
2939 						0, DE_NULL, VK_SUBPASS_CONTENTS_INLINE);
2940 		// Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
2941 		if (m_data.stage == STAGE_VERTEX)
2942 		{
2943 			vk.cmdDraw(*cmdBuffer, DIM*DIM, 1u, 0u, 0u);
2944 		}
2945 		else if (m_data.stage == STAGE_FRAGMENT)
2946 		{
2947 			vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
2948 		}
2949 #ifndef CTS_USES_VULKANSC
2950 		else if (isMeshStage(m_data.stage))
2951 		{
2952 			vk.cmdDrawMeshTasksEXT(*cmdBuffer, DIM, DIM, 1u);
2953 		}
2954 #endif // CTS_USES_VULKANSC
2955 		endRenderPass(vk, *cmdBuffer);
2956 	}
2957 
2958 	memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2959 	memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
2960 	vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT,
2961 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
2962 
2963 	const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(DIM, DIM, 1u),
2964 															 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
2965 	vk.cmdCopyImageToBuffer(*cmdBuffer, **image, VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, &copyRegion);
2966 
2967 	const VkBufferMemoryBarrier copyBufferBarrier =
2968 	{
2969 		VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,	// VkStructureType	sType;
2970 		DE_NULL,									// const void*		pNext;
2971 		VK_ACCESS_TRANSFER_WRITE_BIT,				// VkAccessFlags	srcAccessMask;
2972 		VK_ACCESS_HOST_READ_BIT,					// VkAccessFlags	dstAccessMask;
2973 		VK_QUEUE_FAMILY_IGNORED,					// deUint32			srcQueueFamilyIndex;
2974 		VK_QUEUE_FAMILY_IGNORED,					// deUint32			dstQueueFamilyIndex;
2975 		**copyBuffer,								// VkBuffer			buffer;
2976 		0u,											// VkDeviceSize		offset;
2977 		VK_WHOLE_SIZE,								// VkDeviceSize		size;
2978 	};
2979 
2980 	// Add a barrier to read the copy buffer after copying.
2981 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0u, DE_NULL, 1u, &copyBufferBarrier, 0u, DE_NULL);
2982 
2983 	// Copy all storage images to the storage image buffer.
2984 	VkBufferImageCopy storageImgCopyRegion =
2985 	{
2986 		0u,																	// VkDeviceSize					bufferOffset;
2987 		0u,																	// uint32_t						bufferRowLength;
2988 		0u,																	// uint32_t						bufferImageHeight;
2989 		makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u),	// VkImageSubresourceLayers		imageSubresource;
2990 		makeOffset3D(0, 0, 0),												// VkOffset3D					imageOffset;
2991 		makeExtent3D(1u, 1u, 1u),											// VkExtent3D					imageExtent;
2992 	};
2993 
2994 	for (deUint32 i = 0; i < storageImageCount; ++i)
2995 	{
2996 		storageImgCopyRegion.bufferOffset = sizeof(deInt32) * i;
2997 		vk.cmdCopyImageToBuffer(*cmdBuffer, storageImages[i].get(), VK_IMAGE_LAYOUT_GENERAL, **storageImgBuffer, 1u, &storageImgCopyRegion);
2998 	}
2999 
3000 	const VkBufferMemoryBarrier storageImgBufferBarrier =
3001 	{
3002 		VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,	// VkStructureType	sType;
3003 		DE_NULL,									// const void*		pNext;
3004 		VK_ACCESS_TRANSFER_WRITE_BIT,				// VkAccessFlags	srcAccessMask;
3005 		VK_ACCESS_HOST_READ_BIT,					// VkAccessFlags	dstAccessMask;
3006 		VK_QUEUE_FAMILY_IGNORED,					// deUint32			srcQueueFamilyIndex;
3007 		VK_QUEUE_FAMILY_IGNORED,					// deUint32			dstQueueFamilyIndex;
3008 		**storageImgBuffer,							// VkBuffer			buffer;
3009 		0u,											// VkDeviceSize		offset;
3010 		VK_WHOLE_SIZE,								// VkDeviceSize		size;
3011 	};
3012 
3013 	// Add a barrier to read the storage image buffer after copying.
3014 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0u, DE_NULL, 1u, &storageImgBufferBarrier, 0u, DE_NULL);
3015 
3016 	const VkBufferMemoryBarrier descriptorBufferBarrier =
3017 	{
3018 		VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,					// VkStructureType	sType;
3019 		DE_NULL,													// const void*		pNext;
3020 		(VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT),	// VkAccessFlags	srcAccessMask;
3021 		VK_ACCESS_HOST_READ_BIT,									// VkAccessFlags	dstAccessMask;
3022 		VK_QUEUE_FAMILY_IGNORED,									// deUint32			srcQueueFamilyIndex;
3023 		VK_QUEUE_FAMILY_IGNORED,									// deUint32			dstQueueFamilyIndex;
3024 		**buffer,													// VkBuffer			buffer;
3025 		0u,															// VkDeviceSize		offset;
3026 		VK_WHOLE_SIZE,												// VkDeviceSize		size;
3027 	};
3028 
3029 	// Add a barrier to read stored data from shader writes in descriptor memory for other types of descriptors.
3030 	vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, nullptr, 1u, &descriptorBufferBarrier, 0u, nullptr);
3031 
3032 	endCommandBuffer(vk, *cmdBuffer);
3033 
3034 	submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
3035 
3036 	// Re-enable watchdog interval timer here to favor virtualized vulkan
3037 	// implementation that asynchronously creates the pipeline on the host.
3038 	if (m_data.numDescriptorSets >= 32)
3039 	{
3040 		m_context.getTestContext().touchWatchdogAndEnableIntervalTimeLimit();
3041 	}
3042 
3043 	// Verify output image.
3044 	deUint32 *ptr = (deUint32 *)copyBuffer->getAllocation().getHostPtr();
3045 	invalidateAlloc(vk, device, copyBuffer->getAllocation());
3046 
3047 	deUint32	failures = 0;
3048 	auto&		log = m_context.getTestContext().getLog();
3049 
3050 	for (deUint32 i = 0; i < DIM*DIM; ++i)
3051 	{
3052 		if (ptr[i] != 1)
3053 		{
3054 			failures++;
3055 			log << tcu::TestLog::Message << "Failure in copy buffer, ptr[" << i << "] = " << ptr[i] << tcu::TestLog::EndMessage;
3056 		}
3057 	}
3058 
3059 	// Verify descriptors with writes.
3060 	invalidateMappedMemoryRange(vk, device, buffer->getAllocation().getMemory(), buffer->getAllocation().getOffset(), VK_WHOLE_SIZE);
3061 	invalidateMappedMemoryRange(vk, device, storageImgBuffer->getAllocation().getMemory(), storageImgBuffer->getAllocation().getOffset(), VK_WHOLE_SIZE);
3062 
3063 	for (const auto& descIdWriteInfo : randomLayout.descriptorWrites)
3064 	{
3065 		const auto& writeInfo = descIdWriteInfo.second;
3066 		if (writeInfo.writeGenerated && *writeInfo.ptr != writeInfo.expected)
3067 		{
3068 			failures++;
3069 			log << tcu::TestLog::Message << "Failure in write operation; expected " << writeInfo.expected << " and found " << *writeInfo.ptr << tcu::TestLog::EndMessage;
3070 		}
3071 	}
3072 
3073 	if (failures == 0)
3074 		return tcu::TestStatus::pass("Pass");
3075 	else
3076 		return tcu::TestStatus::fail("Fail (failures=" + de::toString(failures) + ")");
3077 }
3078 
3079 }	// anonymous
3080 
createDescriptorSetRandomTests(tcu::TestContext & testCtx)3081 tcu::TestCaseGroup*	createDescriptorSetRandomTests (tcu::TestContext& testCtx)
3082 {
3083 	de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, "descriptorset_random", "Randomly-generated desciptor set layouts"));
3084 
3085 	deUint32 seed = 0;
3086 
3087 	typedef struct
3088 	{
3089 		deUint32				count;
3090 		const char*				name;
3091 		const char*				description;
3092 	} TestGroupCase;
3093 
3094 	TestGroupCase setsCases[] =
3095 	{
3096 		{ 4,	"sets4",	"4 descriptor sets"		},
3097 		{ 8,	"sets8",	"8 descriptor sets"		},
3098 		{ 16,	"sets16",	"16 descriptor sets"	},
3099 		{ 32,	"sets32",	"32 descriptor sets"	},
3100 	};
3101 
3102 	TestGroupCase indexCases[] =
3103 	{
3104 		{ INDEX_TYPE_NONE,			"noarray",		"all descriptor declarations are not arrays"		},
3105 		{ INDEX_TYPE_CONSTANT,		"constant",		"constant indexing of descriptor arrays"			},
3106 		{ INDEX_TYPE_PUSHCONSTANT,	"unifindexed",	"indexing descriptor arrays with push constants"	},
3107 		{ INDEX_TYPE_DEPENDENT,		"dynindexed",	"dynamically uniform indexing descriptor arrays"	},
3108 		{ INDEX_TYPE_RUNTIME_SIZE,	"runtimesize",	"runtime-size declarations of descriptor arrays"	},
3109 	};
3110 
3111 	TestGroupCase uboCases[] =
3112 	{
3113 		{ 0,			"noubo",			"no ubos"					},
3114 		{ 12,			"ubolimitlow",		"spec minmax ubo limit"		},
3115 		{ 4096,			"ubolimithigh",		"high ubo limit"			},
3116 	};
3117 
3118 	TestGroupCase sboCases[] =
3119 	{
3120 		{ 0,			"nosbo",			"no ssbos"					},
3121 		{ 4,			"sbolimitlow",		"spec minmax ssbo limit"	},
3122 		{ 4096,			"sbolimithigh",		"high ssbo limit"			},
3123 	};
3124 
3125 	TestGroupCase iaCases[] =
3126 	{
3127 		{ 0,			"noia",				"no input attachments"					},
3128 		{ 4,			"ialimitlow",		"spec minmax input attachment limit"	},
3129 		{ 64,			"ialimithigh",		"high input attachment limit"			},
3130 	};
3131 
3132 	TestGroupCase sampledImgCases[] =
3133 	{
3134 		{ 0,			"nosampledimg",		"no sampled images"			},
3135 		{ 16,			"sampledimglow",	"spec minmax image limit"	},
3136 		{ 4096,			"sampledimghigh",	"high image limit"			},
3137 	};
3138 
3139 	const struct
3140 	{
3141 		deUint32	sImgCount;
3142 		deUint32	sTexCount;
3143 		const char* name;
3144 		const char* description;
3145 	} sImgTexCases[] =
3146 	{
3147 		{ 1,		0,		"outimgonly",		"output storage image only"							},
3148 		{ 1,		3,		"outimgtexlow",		"output image low storage tex limit"				},
3149 		{ 4,		0,		"lowimgnotex",		"minmax storage images and no storage tex"			},
3150 		{ 3,		1,		"lowimgsingletex",	"low storage image single storage texel"			},
3151 		{ 2048,		2048,	"storageimghigh",	"high limit of storage images and texel buffers"	},
3152 	};
3153 
3154 	const struct
3155 	{
3156 		deUint32				iubCount;
3157 		deUint32				iubSize;
3158 		const char*				name;
3159 		const char*				description;
3160 	} iubCases[] =
3161 	{
3162 		{ 0, 0,		"noiub",			"no inline uniform blocks"			},
3163 		{ 4, 256,	"iublimitlow",		"inline uniform blocks low limit"	},
3164 		{ 8, 4096,	"iublimithigh",		"inline uniform blocks high limit"	},
3165 	};
3166 
3167 	TestGroupCase stageCases[] =
3168 	{
3169 		{ STAGE_COMPUTE,	"comp",		"compute"		},
3170 		{ STAGE_FRAGMENT,	"frag",		"fragment"		},
3171 		{ STAGE_VERTEX,		"vert",		"vertex"		},
3172 #ifndef CTS_USES_VULKANSC
3173 		{ STAGE_RAYGEN_NV,	"rgnv",		"raygen_nv"		},
3174 		{ STAGE_RAYGEN,		"rgen",		"raygen"		},
3175 		{ STAGE_INTERSECT,	"sect",		"intersect"		},
3176 		{ STAGE_ANY_HIT,	"ahit",		"any_hit"		},
3177 		{ STAGE_CLOSEST_HIT,"chit",		"closest_hit"	},
3178 		{ STAGE_MISS,		"miss",		"miss"			},
3179 		{ STAGE_CALLABLE,	"call",		"callable"		},
3180 		{ STAGE_TASK,		"task",		"task"			},
3181 		{ STAGE_MESH,		"mesh",		"mesh"			},
3182 #endif
3183 	};
3184 
3185 	TestGroupCase uabCases[] =
3186 	{
3187 		{ UPDATE_AFTER_BIND_DISABLED,	"nouab",	"no update after bind"		},
3188 		{ UPDATE_AFTER_BIND_ENABLED,	"uab",		"enable update after bind"	},
3189 	};
3190 
3191 	for (int setsNdx = 0; setsNdx < DE_LENGTH_OF_ARRAY(setsCases); setsNdx++)
3192 	{
3193 		de::MovePtr<tcu::TestCaseGroup> setsGroup(new tcu::TestCaseGroup(testCtx, setsCases[setsNdx].name, setsCases[setsNdx].description));
3194 		for (int indexNdx = 0; indexNdx < DE_LENGTH_OF_ARRAY(indexCases); indexNdx++)
3195 		{
3196 			de::MovePtr<tcu::TestCaseGroup> indexGroup(new tcu::TestCaseGroup(testCtx, indexCases[indexNdx].name, indexCases[indexNdx].description));
3197 			for (int uboNdx = 0; uboNdx < DE_LENGTH_OF_ARRAY(uboCases); uboNdx++)
3198 			{
3199 				de::MovePtr<tcu::TestCaseGroup> uboGroup(new tcu::TestCaseGroup(testCtx, uboCases[uboNdx].name, uboCases[uboNdx].description));
3200 				for (int sboNdx = 0; sboNdx < DE_LENGTH_OF_ARRAY(sboCases); sboNdx++)
3201 				{
3202 					de::MovePtr<tcu::TestCaseGroup> sboGroup(new tcu::TestCaseGroup(testCtx, sboCases[sboNdx].name, sboCases[sboNdx].description));
3203 					for (int sampledImgNdx = 0; sampledImgNdx < DE_LENGTH_OF_ARRAY(sampledImgCases); sampledImgNdx++)
3204 					{
3205 						de::MovePtr<tcu::TestCaseGroup> sampledImgGroup(new tcu::TestCaseGroup(testCtx, sampledImgCases[sampledImgNdx].name, sampledImgCases[sampledImgNdx].description));
3206 						for (int storageImgNdx = 0; storageImgNdx < DE_LENGTH_OF_ARRAY(sImgTexCases); ++storageImgNdx)
3207 						{
3208 							de::MovePtr<tcu::TestCaseGroup> storageImgGroup(new tcu::TestCaseGroup(testCtx, sImgTexCases[storageImgNdx].name, sImgTexCases[storageImgNdx].description));
3209 							for (int iubNdx = 0; iubNdx < DE_LENGTH_OF_ARRAY(iubCases); iubNdx++)
3210 							{
3211 								de::MovePtr<tcu::TestCaseGroup> iubGroup(new tcu::TestCaseGroup(testCtx, iubCases[iubNdx].name, iubCases[iubNdx].description));
3212 								for (int uabNdx = 0; uabNdx < DE_LENGTH_OF_ARRAY(uabCases); uabNdx++)
3213 								{
3214 									de::MovePtr<tcu::TestCaseGroup> uabGroup(new tcu::TestCaseGroup(testCtx, uabCases[uabNdx].name, uabCases[uabNdx].description));
3215 									for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
3216 									{
3217 										const Stage		currentStage			= static_cast<Stage>(stageCases[stageNdx].count);
3218 										const auto		shaderStages			= getAllShaderStagesFor(currentStage);
3219 										const auto		pipelineStages			= getAllPipelineStagesFor(currentStage);
3220 
3221 										de::MovePtr<tcu::TestCaseGroup> stageGroup(new tcu::TestCaseGroup(testCtx, stageCases[stageNdx].name, stageCases[stageNdx].description));
3222 										for (int iaNdx = 0; iaNdx < DE_LENGTH_OF_ARRAY(iaCases); ++iaNdx)
3223 										{
3224 											// Input attachments can only be used in the fragment stage.
3225 											if (currentStage != STAGE_FRAGMENT && iaCases[iaNdx].count > 0u)
3226 												continue;
3227 
3228 											// Allow only one high limit or all of them.
3229 											deUint32 highLimitCount = 0u;
3230 											if (uboNdx == DE_LENGTH_OF_ARRAY(uboCases) - 1)					++highLimitCount;
3231 											if (sboNdx == DE_LENGTH_OF_ARRAY(sboCases) - 1)					++highLimitCount;
3232 											if (sampledImgNdx == DE_LENGTH_OF_ARRAY(sampledImgCases) - 1)	++highLimitCount;
3233 											if (storageImgNdx == DE_LENGTH_OF_ARRAY(sImgTexCases) - 1)		++highLimitCount;
3234 											if (iaNdx == DE_LENGTH_OF_ARRAY(iaCases) - 1)					++highLimitCount;
3235 
3236 											if (highLimitCount > 1 && highLimitCount < 5)
3237 												continue;
3238 
3239 											// Allow only all, all-but-one, none or one "zero limits" at the same time, except for inline uniform blocks.
3240 											deUint32 zeroLimitCount = 0u;
3241 											if (uboNdx == 0)			++zeroLimitCount;
3242 											if (sboNdx == 0)			++zeroLimitCount;
3243 											if (sampledImgNdx == 0)		++zeroLimitCount;
3244 											if (storageImgNdx == 0)		++zeroLimitCount;
3245 											if (iaNdx == 0)				++zeroLimitCount;
3246 
3247 											if (zeroLimitCount > 1 && zeroLimitCount < 4)
3248 												continue;
3249 
3250 											// Avoid using multiple storage images if no dynamic indexing is being used.
3251 											if (storageImgNdx >= 2 && indexNdx < 2)
3252 												continue;
3253 
3254 											// Skip the case of no UBOs, SSBOs or sampled images when no dynamic indexing is being used.
3255 											if ((uboNdx == 0 || sboNdx == 0 || sampledImgNdx == 0) && indexNdx < 2)
3256 												continue;
3257 
3258 											de::MovePtr<tcu::TestCaseGroup> iaGroup(new tcu::TestCaseGroup(testCtx, iaCases[iaNdx].name, iaCases[iaNdx].description));
3259 
3260 											// Generate 10 random cases when working with only 4 sets and the number of descriptors is low. Otherwise just one case.
3261 											// Exception: the case of no descriptors of any kind only needs one case.
3262 											const deUint32 numSeeds = (setsCases[setsNdx].count == 4 && uboNdx < 2 && sboNdx < 2 && sampledImgNdx < 2 && storageImgNdx < 4 && iubNdx == 0 && iaNdx < 2 &&
3263 																	(uboNdx != 0 || sboNdx != 0 || sampledImgNdx != 0 || storageImgNdx != 0 || iaNdx != 0)) ? 10 : 1;
3264 
3265 											for (deUint32 rnd = 0; rnd < numSeeds; ++rnd)
3266 											{
3267 												CaseDef c =
3268 												{
3269 													(IndexType)indexCases[indexNdx].count,						// IndexType indexType;
3270 													setsCases[setsNdx].count,									// deUint32 numDescriptorSets;
3271 													uboCases[uboNdx].count,										// deUint32 maxPerStageUniformBuffers;
3272 													8,															// deUint32 maxUniformBuffersDynamic;
3273 													sboCases[sboNdx].count,										// deUint32 maxPerStageStorageBuffers;
3274 													4,															// deUint32 maxStorageBuffersDynamic;
3275 													sampledImgCases[sampledImgNdx].count,						// deUint32 maxPerStageSampledImages;
3276 													sImgTexCases[storageImgNdx].sImgCount,						// deUint32 maxPerStageStorageImages;
3277 													sImgTexCases[storageImgNdx].sTexCount,						// deUint32 maxPerStageStorageTexelBuffers;
3278 													iubCases[iubNdx].iubCount,									// deUint32 maxInlineUniformBlocks;
3279 													iubCases[iubNdx].iubSize,									// deUint32 maxInlineUniformBlockSize;
3280 													iaCases[iaNdx].count,										// deUint32 maxPerStageInputAttachments;
3281 													currentStage,												// Stage stage;
3282 													(UpdateAfterBind)uabCases[uabNdx].count,					// UpdateAfterBind uab;
3283 													seed++,														// deUint32 seed;
3284 													shaderStages,												// VkFlags allShaderStages;
3285 													pipelineStages,												// VkFlags allPipelineStages;
3286 													nullptr,													// std::shared_ptr<RandomLayout> randomLayout;
3287 												};
3288 
3289 												string name = de::toString(rnd);
3290 												iaGroup->addChild(new DescriptorSetRandomTestCase(testCtx, name.c_str(), "test", c));
3291 											}
3292 											stageGroup->addChild(iaGroup.release());
3293 										}
3294 										uabGroup->addChild(stageGroup.release());
3295 									}
3296 									iubGroup->addChild(uabGroup.release());
3297 								}
3298 								storageImgGroup->addChild(iubGroup.release());
3299 							}
3300 							sampledImgGroup->addChild(storageImgGroup.release());
3301 						}
3302 						sboGroup->addChild(sampledImgGroup.release());
3303 					}
3304 					uboGroup->addChild(sboGroup.release());
3305 				}
3306 				indexGroup->addChild(uboGroup.release());
3307 			}
3308 			setsGroup->addChild(indexGroup.release());
3309 		}
3310 		group->addChild(setsGroup.release());
3311 	}
3312 	return group.release();
3313 }
3314 
3315 }	// BindingModel
3316 }	// vkt
3317