• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2017-2019 The Khronos Group Inc.
6  * Copyright (c) 2018-2020 NVIDIA Corporation
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *	  http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Vulkan robustness2 tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktRobustnessExtsTests.hpp"
26 
27 #include "vkBufferWithMemory.hpp"
28 #include "vkImageWithMemory.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkBuilderUtil.hpp"
32 #include "vkCmdUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkObjUtil.hpp"
35 #include "vkBarrierUtil.hpp"
36 #include "vktRobustnessUtil.hpp"
37 
38 #include "vktTestGroupUtil.hpp"
39 #include "vktTestCase.hpp"
40 
41 #include "deDefs.h"
42 #include "deMath.h"
43 #include "deRandom.h"
44 #include "deSharedPtr.hpp"
45 #include "deString.h"
46 
47 #include "tcuVectorType.hpp"
48 #include "tcuTestCase.hpp"
49 #include "tcuTestLog.hpp"
50 
51 #include <string>
52 #include <sstream>
53 #include <algorithm>
54 #include <limits>
55 
56 namespace vkt
57 {
58 namespace robustness
59 {
60 namespace
61 {
62 using namespace vk;
63 using namespace std;
64 using de::SharedPtr;
65 
66 enum RobustnessFeatureBits
67 {
68 	RF_IMG_ROBUSTNESS	= (1		),
69 	RF_ROBUSTNESS2		= (1 << 1	),
70 	SIF_INT64ATOMICS	= (1 << 2	),
71 };
72 
73 using RobustnessFeatures = deUint32;
74 
75 // Class to wrap a singleton device with the indicated robustness features.
76 template <RobustnessFeatures FEATURES>
77 class SingletonDevice
78 {
SingletonDevice(Context & context)79 	SingletonDevice	(Context& context)
80 		: m_logicalDevice ()
81 	{
82 		// Note we are already checking the needed features are available in checkSupport().
83 		VkPhysicalDeviceRobustness2FeaturesEXT				robustness2Features				= initVulkanStructure();
84 		VkPhysicalDeviceImageRobustnessFeaturesEXT			imageRobustnessFeatures			= initVulkanStructure();
85 		VkPhysicalDeviceScalarBlockLayoutFeatures			scalarBlockLayoutFeatures		= initVulkanStructure();
86 		VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT	shaderImageAtomicInt64Features	= initVulkanStructure();
87 		VkPhysicalDeviceFeatures2							features2						= initVulkanStructure();
88 
89 		features2.pNext = &scalarBlockLayoutFeatures;
90 
91 		if (FEATURES & RF_IMG_ROBUSTNESS)
92 		{
93 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"));
94 			imageRobustnessFeatures.pNext = features2.pNext;
95 			features2.pNext = &imageRobustnessFeatures;
96 		}
97 
98 		if (FEATURES & RF_ROBUSTNESS2)
99 		{
100 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_robustness2"));
101 			robustness2Features.pNext = features2.pNext;
102 			features2.pNext = &robustness2Features;
103 		}
104 
105 		if (FEATURES & SIF_INT64ATOMICS)
106 		{
107 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_shader_image_atomic_int64"));
108 			shaderImageAtomicInt64Features.pNext = features2.pNext;
109 			features2.pNext = &shaderImageAtomicInt64Features;
110 		}
111 
112 		context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features2);
113 		m_logicalDevice = createRobustBufferAccessDevice(context, &features2);
114 	}
115 
116 public:
getDevice(Context & context)117 	static VkDevice getDevice(Context& context)
118 	{
119 		if (!m_singletonDevice)
120 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
121 		DE_ASSERT(m_singletonDevice);
122 		return m_singletonDevice->m_logicalDevice.get();
123 	}
124 
destroy()125 	static void destroy()
126 	{
127 		m_singletonDevice.clear();
128 	}
129 
130 private:
131 	Move<vk::VkDevice>							m_logicalDevice;
132 	static SharedPtr<SingletonDevice<FEATURES>>	m_singletonDevice;
133 };
134 
135 template <RobustnessFeatures FEATURES>
136 SharedPtr<SingletonDevice<FEATURES>> SingletonDevice<FEATURES>::m_singletonDevice;
137 
138 constexpr RobustnessFeatures kImageRobustness			= RF_IMG_ROBUSTNESS;
139 constexpr RobustnessFeatures kRobustness2				= RF_ROBUSTNESS2;
140 constexpr RobustnessFeatures kShaderImageInt64Atomics	= SIF_INT64ATOMICS;
141 
142 using ImageRobustnessSingleton	= SingletonDevice<kImageRobustness>;
143 using Robustness2Singleton		= SingletonDevice<kRobustness2>;
144 
145 using ImageRobustnessInt64AtomicsSingleton	= SingletonDevice<kImageRobustness | kShaderImageInt64Atomics>;
146 using Robustness2Int64AtomicsSingleton		= SingletonDevice<kRobustness2 | kShaderImageInt64Atomics>;
147 
148 // Render target / compute grid dimensions
149 static const deUint32 DIM = 8;
150 
151 // treated as a phony VkDescriptorType value
152 #define VERTEX_ATTRIBUTE_FETCH 999
153 
154 typedef enum
155 {
156 	STAGE_COMPUTE = 0,
157 	STAGE_VERTEX,
158 	STAGE_FRAGMENT,
159 	STAGE_RAYGEN
160 } Stage;
161 
162 struct CaseDef
163 {
164 	VkFormat format;
165 	Stage stage;
166 	VkFlags allShaderStages;
167 	VkFlags allPipelineStages;
168 	int/*VkDescriptorType*/ descriptorType;
169 	VkImageViewType viewType;
170 	VkSampleCountFlagBits samples;
171 	int bufferLen;
172 	bool unroll;
173 	bool vol;
174 	bool nullDescriptor;
175 	bool useTemplate;
176 	bool formatQualifier;
177 	bool pushDescriptor;
178 	bool testRobustness2;
179 	deUint32 imageDim[3]; // width, height, depth or layers
180 	bool readOnly;
181 };
182 
formatIsR64(const VkFormat & f)183 static bool formatIsR64(const VkFormat& f)
184 {
185 	switch (f)
186 	{
187 	case VK_FORMAT_R64_SINT:
188 	case VK_FORMAT_R64_UINT:
189 		return true;
190 	default:
191 		return false;
192 	}
193 }
194 
195 // Returns the appropriate singleton device for the given case.
getLogicalDevice(Context & ctx,const CaseDef & caseDef)196 VkDevice getLogicalDevice (Context& ctx, const CaseDef& caseDef)
197 {
198 	if (formatIsR64(caseDef.format))
199 	{
200 		if (caseDef.testRobustness2)
201 			return Robustness2Int64AtomicsSingleton::getDevice(ctx);
202 		return ImageRobustnessInt64AtomicsSingleton::getDevice(ctx);
203 	}
204 
205 	if (caseDef.testRobustness2)
206 		return Robustness2Singleton::getDevice(ctx);
207 	return ImageRobustnessSingleton::getDevice(ctx);
208 }
209 
210 class Layout
211 {
212 public:
213 	vector<VkDescriptorSetLayoutBinding> layoutBindings;
214 	vector<deUint8> refData;
215 };
216 
217 
218 class RobustnessExtsTestInstance : public TestInstance
219 {
220 public:
221 						RobustnessExtsTestInstance		(Context& context, const CaseDef& data);
222 						~RobustnessExtsTestInstance	(void);
223 	tcu::TestStatus		iterate								(void);
224 private:
225 	CaseDef				m_data;
226 };
227 
RobustnessExtsTestInstance(Context & context,const CaseDef & data)228 RobustnessExtsTestInstance::RobustnessExtsTestInstance (Context& context, const CaseDef& data)
229 	: vkt::TestInstance		(context)
230 	, m_data				(data)
231 {
232 }
233 
~RobustnessExtsTestInstance(void)234 RobustnessExtsTestInstance::~RobustnessExtsTestInstance (void)
235 {
236 }
237 
238 class RobustnessExtsTestCase : public TestCase
239 {
240 	public:
241 								RobustnessExtsTestCase		(tcu::TestContext& context, const char* name, const char* desc, const CaseDef data);
242 								~RobustnessExtsTestCase	(void);
243 	virtual	void				initPrograms					(SourceCollections& programCollection) const;
244 	virtual TestInstance*		createInstance					(Context& context) const;
245 	virtual void				checkSupport					(Context& context) const;
246 
247 private:
248 	CaseDef					m_data;
249 };
250 
RobustnessExtsTestCase(tcu::TestContext & context,const char * name,const char * desc,const CaseDef data)251 RobustnessExtsTestCase::RobustnessExtsTestCase (tcu::TestContext& context, const char* name, const char* desc, const CaseDef data)
252 	: vkt::TestCase	(context, name, desc)
253 	, m_data		(data)
254 {
255 }
256 
~RobustnessExtsTestCase(void)257 RobustnessExtsTestCase::~RobustnessExtsTestCase	(void)
258 {
259 }
260 
formatIsFloat(const VkFormat & f)261 static bool formatIsFloat(const VkFormat& f)
262 {
263 	switch (f)
264 	{
265 	case VK_FORMAT_R32_SFLOAT:
266 	case VK_FORMAT_R32G32_SFLOAT:
267 	case VK_FORMAT_R32G32B32A32_SFLOAT:
268 		return true;
269 	default:
270 		return false;
271 	}
272 }
273 
formatIsSignedInt(const VkFormat & f)274 static bool formatIsSignedInt(const VkFormat& f)
275 {
276 	switch (f)
277 	{
278 	case VK_FORMAT_R32_SINT:
279 	case VK_FORMAT_R64_SINT:
280 	case VK_FORMAT_R32G32_SINT:
281 	case VK_FORMAT_R32G32B32A32_SINT:
282 		return true;
283 	default:
284 		return false;
285 	}
286 }
287 
supportsStores(int descriptorType)288 static bool supportsStores(int descriptorType)
289 {
290 	switch (descriptorType)
291 	{
292 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
293 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
294 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
295 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
296 		return true;
297 	default:
298 		return false;
299 	}
300 }
301 
makeComputePipeline(const DeviceInterface & vk,const VkDevice device,const VkPipelineLayout pipelineLayout,const VkShaderModule shaderModule)302 Move<VkPipeline> makeComputePipeline (const DeviceInterface&	vk,
303 									  const VkDevice			device,
304 									  const VkPipelineLayout	pipelineLayout,
305 									  const VkShaderModule		shaderModule)
306 {
307 	const VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
308 	{
309 		VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,	// VkStructureType						sType;
310 		DE_NULL,												// const void*							pNext;
311 		(VkPipelineShaderStageCreateFlags)0,					// VkPipelineShaderStageCreateFlags		flags;
312 		VK_SHADER_STAGE_COMPUTE_BIT,							// VkShaderStageFlagBits				stage;
313 		shaderModule,											// VkShaderModule						module;
314 		"main",													// const char*							pName;
315 		DE_NULL,												// const VkSpecializationInfo*			pSpecializationInfo;
316 	};
317 
318 	const VkComputePipelineCreateInfo pipelineCreateInfo =
319 	{
320 		VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,		// VkStructureType					sType;
321 		DE_NULL,											// const void*						pNext;
322 		0u,													// VkPipelineCreateFlags			flags;
323 		pipelineShaderStageParams,							// VkPipelineShaderStageCreateInfo	stage;
324 		pipelineLayout,										// VkPipelineLayout					layout;
325 		(vk::VkPipeline)0,									// VkPipeline						basePipelineHandle;
326 		0,													// deInt32							basePipelineIndex;
327 	};
328 
329 	return createComputePipeline(vk, device, DE_NULL , &pipelineCreateInfo);
330 }
331 
checkSupport(Context & context) const332 void RobustnessExtsTestCase::checkSupport(Context& context) const
333 {
334 	const auto&	vki				= context.getInstanceInterface();
335 	const auto	physicalDevice	= context.getPhysicalDevice();
336 
337 	// We need to query feature support using the physical device instead of using the reported context features because robustness2
338 	// and image robustness are always disabled in the default device but they may be available.
339 	VkPhysicalDeviceRobustness2FeaturesEXT				robustness2Features				= initVulkanStructure();
340 	VkPhysicalDeviceImageRobustnessFeaturesEXT			imageRobustnessFeatures			= initVulkanStructure();
341 	VkPhysicalDeviceScalarBlockLayoutFeatures			scalarLayoutFeatures			= initVulkanStructure();
342 	VkPhysicalDeviceFeatures2KHR						features2						= initVulkanStructure();
343 
344 	context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
345 
346 	context.requireDeviceFunctionality("VK_EXT_scalar_block_layout");
347 	features2.pNext = &scalarLayoutFeatures;
348 
349 	if (context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"))
350 	{
351 		imageRobustnessFeatures.pNext = features2.pNext;
352 		features2.pNext = &imageRobustnessFeatures;
353 	}
354 
355 	if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
356 	{
357 		robustness2Features.pNext = features2.pNext;
358 		features2.pNext = &robustness2Features;
359 	}
360 
361 	vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
362 
363 	if (formatIsR64(m_data.format))
364 	{
365 		context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
366 
367 		VkFormatProperties formatProperties;
368 		vki.getPhysicalDeviceFormatProperties(context.getPhysicalDevice(), m_data.format, &formatProperties);
369 
370 		switch (m_data.descriptorType)
371 		{
372 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
373 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)
374 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT is not supported");
375 			break;
376 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
377 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)
378 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT is not supported");
379 			break;
380 		case VERTEX_ATTRIBUTE_FETCH:
381 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
382 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
383 			break;
384 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
385 			if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
386 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
387 			break;
388 		default: DE_ASSERT(true);
389 		}
390 
391 		if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
392 		{
393 			if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
394 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT is not supported");
395 		}
396 	}
397 
398 	// Check needed properties and features
399 	if (!scalarLayoutFeatures.scalarBlockLayout)
400 		TCU_THROW(NotSupportedError, "Scalar block layout not supported");
401 
402 	if (m_data.stage == STAGE_VERTEX && !features2.features.vertexPipelineStoresAndAtomics)
403 		TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
404 
405 	if (m_data.stage == STAGE_FRAGMENT && !features2.features.fragmentStoresAndAtomics)
406 		TCU_THROW(NotSupportedError, "Fragment shader stores not supported");
407 
408 	if (m_data.stage == STAGE_RAYGEN)
409 		context.requireDeviceFunctionality("VK_NV_ray_tracing");
410 
411 	switch (m_data.descriptorType)
412 	{
413 	default: DE_ASSERT(0); // Fallthrough
414 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
415 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
416 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
417 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
418 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
419 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
420 	case VERTEX_ATTRIBUTE_FETCH:
421 		if (m_data.testRobustness2)
422 		{
423 			if (!robustness2Features.robustBufferAccess2)
424 				TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
425 		}
426 		else
427 		{
428 			// This case is not tested here.
429 			DE_ASSERT(false);
430 		}
431 		break;
432 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
433 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
434 		if (m_data.testRobustness2)
435 		{
436 			if (!robustness2Features.robustImageAccess2)
437 				TCU_THROW(NotSupportedError, "robustImageAccess2 not supported");
438 		}
439 		else
440 		{
441 			if (!imageRobustnessFeatures.robustImageAccess)
442 				TCU_THROW(NotSupportedError, "robustImageAccess not supported");
443 		}
444 		break;
445 	}
446 
447 	if (m_data.nullDescriptor && !robustness2Features.nullDescriptor)
448 		TCU_THROW(NotSupportedError, "nullDescriptor not supported");
449 
450 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
451 		m_data.samples != VK_SAMPLE_COUNT_1_BIT &&
452 		!features2.features.shaderStorageImageMultisample)
453 		TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
454 
455 	if ((m_data.useTemplate || formatIsR64(m_data.format)) && !context.contextSupports(vk::ApiVersion(1, 1, 0)))
456 		TCU_THROW(NotSupportedError, "Vulkan 1.1 not supported");
457 
458 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
459 		!m_data.formatQualifier)
460 	{
461 		const VkFormatProperties3KHR formatProperties = context.getFormatProperties(m_data.format);
462 		if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
463 			TCU_THROW(NotSupportedError, "Format does not support reading without format");
464 		if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
465 			TCU_THROW(NotSupportedError, "Format does not support writing without format");
466 	}
467 
468 	if (m_data.pushDescriptor)
469 		context.requireDeviceFunctionality("VK_KHR_push_descriptor");
470 
471 	if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !features2.features.imageCubeArray)
472 		TCU_THROW(NotSupportedError, "Cube array image view type not supported");
473 
474 	if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") && !context.getDeviceFeatures().robustBufferAccess)
475 		TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
476 }
477 
generateLayout(Layout & layout,const CaseDef & caseDef)478 void generateLayout(Layout &layout, const CaseDef &caseDef)
479 {
480 	vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
481 	int numBindings = caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH ? 2 : 1;
482 	bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
483 
484 	for (deUint32 b = 0; b < layout.layoutBindings.size(); ++b)
485 	{
486 		VkDescriptorSetLayoutBinding &binding = bindings[b];
487 		binding.binding = b;
488 		binding.pImmutableSamplers = NULL;
489 		binding.stageFlags = caseDef.allShaderStages;
490 		binding.descriptorCount = 1;
491 
492 		// Output image
493 		if (b == 0)
494 			binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
495 		else if (caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH)
496 			binding.descriptorType = (VkDescriptorType)caseDef.descriptorType;
497 	}
498 
499 	if (caseDef.nullDescriptor)
500 		return;
501 
502 	if (caseDef.bufferLen == 0)
503 	{
504 		// Clear color values for image tests
505 		static deUint32 urefData[4]		= { 0x12345678, 0x23456789, 0x34567890, 0x45678901 };
506 		static deUint64 urefData64[4]	= { 0x1234567887654321, 0x234567899, 0x345678909, 0x456789019 };
507 		static float frefData[4]		= { 123.f, 234.f, 345.f, 456.f };
508 
509 		if (formatIsR64(caseDef.format))
510 		{
511 			layout.refData.resize(32);
512 			deUint64 *ptr = (deUint64 *)layout.refData.data();
513 
514 			for (unsigned int i = 0; i < 4; ++i)
515 			{
516 				ptr[i] = urefData64[i];
517 			}
518 		}
519 		else
520 		{
521 			layout.refData.resize(16);
522 			deMemcpy(layout.refData.data(), formatIsFloat(caseDef.format) ? (const void *)frefData : (const void *)urefData, sizeof(frefData));
523 		}
524 	}
525 	else
526 	{
527 		layout.refData.resize(caseDef.bufferLen & (formatIsR64(caseDef.format) ? ~7: ~3));
528 		for (unsigned int i = 0; i < caseDef.bufferLen / (formatIsR64(caseDef.format) ? sizeof(deUint64) : sizeof(deUint32)); ++i)
529 		{
530 			if (formatIsFloat(caseDef.format))
531 			{
532 				float *f = (float *)layout.refData.data() + i;
533 				*f = 2.0f*(float)i + 3.0f;
534 			}
535 			if (formatIsR64(caseDef.format))
536 			{
537 				deUint64 *u = (deUint64 *)layout.refData.data() + i;
538 				*u = 2 * i + 3;
539 			}
540 			else
541 			{
542 				int *u = (int *)layout.refData.data() + i;
543 				*u = 2*i + 3;
544 			}
545 		}
546 	}
547 }
548 
genFetch(const CaseDef & caseDef,int numComponents,const string & vecType,const string & coord,const string & lod)549 static string genFetch(const CaseDef &caseDef, int numComponents, const string& vecType, const string& coord, const string& lod)
550 {
551 	std::stringstream s;
552 	// Fetch from the descriptor.
553 	switch (caseDef.descriptorType)
554 	{
555 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
556 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
557 		s << vecType << "(ubo0_1.val[" << coord << "]";
558 		for (int i = numComponents; i < 4; ++i) s << ", 0";
559 		s << ")";
560 		break;
561 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
562 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
563 		s << vecType << "(ssbo0_1.val[" << coord << "]";
564 		for (int i = numComponents; i < 4; ++i) s << ", 0";
565 		s << ")";
566 		break;
567 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
568 		s << "texelFetch(texbo0_1, " << coord << ")";
569 		break;
570 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
571 		s << "imageLoad(image0_1, " << coord << ")";
572 		break;
573 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
574 		if (caseDef.samples > VK_SAMPLE_COUNT_1_BIT)
575 			s << "texelFetch(texture0_1, " << coord << ")";
576 		else
577 			s << "texelFetch(texture0_1, " << coord << ", " << lod << ")";
578 		break;
579 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
580 		s << "imageLoad(image0_1, " << coord << ")";
581 		break;
582 	case VERTEX_ATTRIBUTE_FETCH:
583 		s << "attr";
584 		break;
585 	default: DE_ASSERT(0);
586 	}
587 	return s.str();
588 }
589 
590 static const int storeValue = 123;
591 
592 // Get the value stored by genStore.
getStoreValue(int descriptorType,int numComponents,const string & vecType,const string & bufType)593 static string getStoreValue(int descriptorType, int numComponents, const string& vecType, const string& bufType)
594 {
595 	std::stringstream s;
596 	switch (descriptorType)
597 	{
598 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
599 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
600 		s << vecType  << "(" << bufType << "(" << storeValue << ")";
601 		for (int i = numComponents; i < 4; ++i) s << ", 0";
602 		s << ")";
603 		break;
604 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
605 		s << vecType << "(" << storeValue << ")";
606 		break;
607 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
608 		s << vecType << "(" << storeValue << ")";
609 		break;
610 	default: DE_ASSERT(0);
611 	}
612 	return s.str();
613 }
614 
genStore(int descriptorType,const string & vecType,const string & bufType,const string & coord)615 static string genStore(int descriptorType, const string& vecType, const string& bufType, const string& coord)
616 {
617 	std::stringstream s;
618 	// Store to the descriptor.
619 	switch (descriptorType)
620 	{
621 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
622 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
623 		s << "ssbo0_1.val[" << coord << "] = " << bufType << "(" << storeValue << ")";
624 		break;
625 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
626 		s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
627 		break;
628 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
629 		s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
630 		break;
631 	default: DE_ASSERT(0);
632 	}
633 	return s.str();
634 }
635 
genAtomic(int descriptorType,const string & bufType,const string & coord)636 static string genAtomic(int descriptorType, const string& bufType, const string& coord)
637 {
638 	std::stringstream s;
639 	// Store to the descriptor. The value doesn't matter, since we only test out of bounds coordinates.
640 	switch (descriptorType)
641 	{
642 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
643 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
644 		s << "atomicAdd(ssbo0_1.val[" << coord << "], " << bufType << "(10))";
645 		break;
646 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
647 		s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
648 		break;
649 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
650 		s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
651 		break;
652 	default: DE_ASSERT(0);
653 	}
654 	return s.str();
655 }
656 
getShaderImageFormatQualifier(const tcu::TextureFormat & format)657 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
658 {
659 	const char* orderPart;
660 	const char* typePart;
661 
662 	switch (format.order)
663 	{
664 		case tcu::TextureFormat::R:		orderPart = "r";	break;
665 		case tcu::TextureFormat::RG:	orderPart = "rg";	break;
666 		case tcu::TextureFormat::RGB:	orderPart = "rgb";	break;
667 		case tcu::TextureFormat::RGBA:	orderPart = "rgba";	break;
668 
669 		default:
670 			DE_FATAL("Impossible");
671 			orderPart = DE_NULL;
672 	}
673 
674 	switch (format.type)
675 	{
676 		case tcu::TextureFormat::FLOAT:				typePart = "32f";		break;
677 		case tcu::TextureFormat::HALF_FLOAT:		typePart = "16f";		break;
678 
679 		case tcu::TextureFormat::UNSIGNED_INT64:	typePart = "64ui";		break;
680 		case tcu::TextureFormat::UNSIGNED_INT32:	typePart = "32ui";		break;
681 		case tcu::TextureFormat::UNSIGNED_INT16:	typePart = "16ui";		break;
682 		case tcu::TextureFormat::UNSIGNED_INT8:		typePart = "8ui";		break;
683 
684 		case tcu::TextureFormat::SIGNED_INT64:		typePart = "64i";		break;
685 		case tcu::TextureFormat::SIGNED_INT32:		typePart = "32i";		break;
686 		case tcu::TextureFormat::SIGNED_INT16:		typePart = "16i";		break;
687 		case tcu::TextureFormat::SIGNED_INT8:		typePart = "8i";		break;
688 
689 		case tcu::TextureFormat::UNORM_INT16:		typePart = "16";		break;
690 		case tcu::TextureFormat::UNORM_INT8:		typePart = "8";			break;
691 
692 		case tcu::TextureFormat::SNORM_INT16:		typePart = "16_snorm";	break;
693 		case tcu::TextureFormat::SNORM_INT8:		typePart = "8_snorm";	break;
694 
695 		default:
696 			DE_FATAL("Impossible");
697 			typePart = DE_NULL;
698 	}
699 
700 	return std::string() + orderPart + typePart;
701 }
702 
genCoord(string c,int numCoords,VkSampleCountFlagBits samples,int dim)703 string genCoord(string c, int numCoords, VkSampleCountFlagBits samples, int dim)
704 {
705 	if (numCoords == 1)
706 		return c;
707 
708 	if (samples != VK_SAMPLE_COUNT_1_BIT)
709 		numCoords--;
710 
711 	string coord = "ivec" + to_string(numCoords) + "(";
712 
713 	for (int i = 0; i < numCoords; ++i)
714 	{
715 		if (i == dim)
716 			coord += c;
717 		else
718 			coord += "0";
719 		if (i < numCoords - 1)
720 			coord += ", ";
721 	}
722 	coord += ")";
723 
724 	// Append sample coordinate
725 	if (samples != VK_SAMPLE_COUNT_1_BIT)
726 	{
727 		coord += ", ";
728 		if (dim == numCoords)
729 			coord += c;
730 		else
731 			coord += "0";
732 	}
733 	return coord;
734 }
735 
736 // Normalized coordinates. Divide by "imageDim" and add 0.25 so we're not on a pixel boundary.
genCoordNorm(const CaseDef & caseDef,string c,int numCoords,int numNormalizedCoords,int dim)737 string genCoordNorm(const CaseDef &caseDef, string c, int numCoords, int numNormalizedCoords, int dim)
738 {
739 	if (numCoords == 1)
740 		return c + " / float(" + to_string(caseDef.imageDim[dim]) + ")";
741 
742 	string coord = "vec" + to_string(numCoords) + "(";
743 
744 	for (int i = 0; i < numCoords; ++i)
745 	{
746 		if (i == dim)
747 			coord += c;
748 		else
749 			coord += "0.25";
750 		if (i < numNormalizedCoords)
751 			coord += " / float(" + to_string(caseDef.imageDim[dim]) + ")";
752 		if (i < numCoords - 1)
753 			coord += ", ";
754 	}
755 	coord += ")";
756 	return coord;
757 }
758 
initPrograms(SourceCollections & programCollection) const759 void RobustnessExtsTestCase::initPrograms (SourceCollections& programCollection) const
760 {
761 	VkFormat format = m_data.format;
762 
763 	Layout layout;
764 	generateLayout(layout, m_data);
765 
766 	if (layout.layoutBindings.size() > 1 &&
767 		layout.layoutBindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
768 	{
769 		if (format == VK_FORMAT_R64_SINT)
770 			format = VK_FORMAT_R32G32_SINT;
771 
772 		if (format == VK_FORMAT_R64_UINT)
773 			format = VK_FORMAT_R32G32_UINT;
774 	}
775 
776 	std::stringstream decls, checks;
777 
778 	const string	r64			= formatIsR64(format) ? "64" : "";
779 	const string	i64Type		= formatIsR64(format) ? "64_t" : "";
780 	const string	vecType		= formatIsFloat(format) ? "vec4" : (formatIsSignedInt(format) ? ("i" + r64 + "vec4") : ("u" + r64 + "vec4"));
781 	const string	qLevelType	= vecType == "vec4" ? "float" : ((vecType == "ivec4") | (vecType == "i64vec4")) ? ("int" + i64Type) : ("uint" + i64Type);
782 
783 	decls << "uvec4 abs(uvec4 x) { return x; }\n";
784 	if (formatIsR64(format))
785 		decls << "u64vec4 abs(u64vec4 x) { return x; }\n";
786 	decls << "int smod(int a, int b) { if (a < 0) a += b*(abs(a)/b+1); return a%b; }\n";
787 
788 
789 	const int	componetsSize = (formatIsR64(format) ? 8 : 4);
790 	int			refDataNumElements = deIntRoundToPow2(((int)layout.refData.size() / componetsSize), 4);
791 	// Pad reference data to include zeros, up to max value of robustUniformBufferAccessSizeAlignment (256).
792 	// robustStorageBufferAccessSizeAlignment is 4, so no extra padding needed.
793 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
794 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
795 	{
796 		refDataNumElements = deIntRoundToPow2(refDataNumElements, 256 / (formatIsR64(format) ? 8 : 4));
797 	}
798 	if (m_data.nullDescriptor)
799 		refDataNumElements = 4;
800 
801 	if (formatIsFloat(format))
802 	{
803 		decls << "float refData[" << refDataNumElements << "] = {";
804 		int i;
805 		for (i = 0; i < (int)layout.refData.size() / 4; ++i)
806 		{
807 			if (i != 0)
808 				decls << ", ";
809 			decls << ((const float *)layout.refData.data())[i];
810 		}
811 		while (i < refDataNumElements)
812 		{
813 			if (i != 0)
814 				decls << ", ";
815 			decls << "0";
816 			i++;
817 		}
818 	}
819 	else if (formatIsR64(format))
820 	{
821 		decls << "int" << i64Type << " refData[" << refDataNumElements << "] = {";
822 		int i;
823 		for (i = 0; i < (int)layout.refData.size() / 8; ++i)
824 		{
825 			if (i != 0)
826 				decls << ", ";
827 			decls << ((const deUint64 *)layout.refData.data())[i] << "l";
828 		}
829 		while (i < refDataNumElements)
830 		{
831 			if (i != 0)
832 				decls << ", ";
833 			decls << "0l";
834 			i++;
835 		}
836 	}
837 	else
838 	{
839 		decls << "int" << " refData[" << refDataNumElements << "] = {";
840 		int i;
841 		for (i = 0; i < (int)layout.refData.size() / 4; ++i)
842 		{
843 			if (i != 0)
844 				decls << ", ";
845 			decls << ((const int *)layout.refData.data())[i];
846 		}
847 		while (i < refDataNumElements)
848 		{
849 			if (i != 0)
850 				decls << ", ";
851 			decls << "0";
852 			i++;
853 		}
854 	}
855 
856 	decls << "};\n";
857 	decls << vecType << " zzzz = " << vecType << "(0);\n";
858 	decls << vecType << " zzzo = " << vecType << "(0, 0, 0, 1);\n";
859 	decls << vecType << " expectedIB;\n";
860 
861 	string imgprefix = (formatIsFloat(format) ? "" : formatIsSignedInt(format) ? "i" : "u") + r64;
862 	string imgqualif = (m_data.formatQualifier) ? getShaderImageFormatQualifier(mapVkFormat(format)) + ", " : "";
863 	string outputimgqualif = getShaderImageFormatQualifier(mapVkFormat(format));
864 
865 	string imageDim = "";
866 	int numCoords, numNormalizedCoords;
867 	bool layered = false;
868 	switch (m_data.viewType)
869 	{
870 		default: DE_ASSERT(0); // Fallthrough
871 		case VK_IMAGE_VIEW_TYPE_1D:			imageDim = "1D";		numCoords = 1;	numNormalizedCoords = 1;	break;
872 		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	imageDim = "1DArray";	numCoords = 2;	numNormalizedCoords = 1;	layered = true;	break;
873 		case VK_IMAGE_VIEW_TYPE_2D:			imageDim = "2D";		numCoords = 2;	numNormalizedCoords = 2;	break;
874 		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	imageDim = "2DArray";	numCoords = 3;	numNormalizedCoords = 2;	layered = true;	break;
875 		case VK_IMAGE_VIEW_TYPE_3D:			imageDim = "3D";		numCoords = 3;	numNormalizedCoords = 3;	break;
876 		case VK_IMAGE_VIEW_TYPE_CUBE:		imageDim = "Cube";		numCoords = 3;	numNormalizedCoords = 3;	break;
877 		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:	imageDim = "CubeArray";	numCoords = 4;	numNormalizedCoords = 3;	layered = true;	break;
878 	}
879 	if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
880 	{
881 		switch (m_data.viewType)
882 		{
883 			default: DE_ASSERT(0); // Fallthrough
884 			case VK_IMAGE_VIEW_TYPE_2D:			imageDim = "2DMS";		break;
885 			case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	imageDim = "2DMSArray";	break;
886 		}
887 		numCoords++;
888 	}
889 	bool dataDependsOnLayer = (m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_data.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) && !m_data.nullDescriptor;
890 
891 	// Special case imageLoad(imageCubeArray, ...) which uses ivec3
892 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
893 		m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
894 	{
895 		numCoords = 3;
896 	}
897 
898 	int numComponents = tcu::getPixelSize(mapVkFormat(format)) / tcu::getChannelSize(mapVkFormat(format).type);
899 	string bufType;
900 	if (numComponents == 1)
901 		bufType = string(formatIsFloat(format) ? "float" : formatIsSignedInt(format) ? "int" : "uint") + i64Type;
902 	else
903 		bufType = imgprefix + "vec" + std::to_string(numComponents);
904 
905 	// For UBO's, which have a declared size in the shader, don't access outside that size.
906 	bool declaredSize = false;
907 	switch (m_data.descriptorType) {
908 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
909 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
910 		declaredSize = true;
911 		break;
912 	default:
913 		break;
914 	}
915 
916 	checks << "  int inboundcoords, clampedLayer;\n";
917 	checks << "  " << vecType << " expectedIB2;\n";
918 	if (m_data.unroll)
919 	{
920 		if (declaredSize)
921 			checks << "  [[unroll]] for (int c = 0; c <= 10; ++c) {\n";
922 		else
923 			checks << "  [[unroll]] for (int c = -10; c <= 10; ++c) {\n";
924 	}
925 	else
926 	{
927 		if (declaredSize)
928 			checks << "  [[dont_unroll]] for (int c = 1023; c >= 0; --c) {\n";
929 		else
930 			checks << "  [[dont_unroll]] for (int c = 1050; c >= -1050; --c) {\n";
931 	}
932 
933 	if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
934 		checks << "    int idx = smod(gl_VertexIndex * " << numComponents << ", " << refDataNumElements << ");\n";
935 	else
936 		checks << "    int idx = smod(c * " << numComponents << ", " << refDataNumElements << ");\n";
937 
938 	decls << "layout(" << outputimgqualif << ", set = 0, binding = 0) uniform " << imgprefix << "image2D image0_0;\n";
939 
940 	const char *vol = m_data.vol ? "volatile " : "";
941 	const char *ro = m_data.readOnly ? "readonly " : "";
942 
943 	// Construct the declaration for the binding
944 	switch (m_data.descriptorType)
945 	{
946 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
947 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
948 		decls << "layout(scalar, set = 0, binding = 1) uniform ubodef0_1 { " << bufType << " val[1024]; } ubo0_1;\n";
949 		break;
950 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
951 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
952 		decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1 { " << bufType << " val[]; } ssbo0_1;\n";
953 		decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1_pad { vec4 pad; " << bufType << " val[]; } ssbo0_1_pad;\n";
954 		break;
955 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
956 		switch(format)
957 		{
958 		case VK_FORMAT_R64_SINT:
959 			decls << "layout(set = 0, binding = 1) uniform itextureBuffer texbo0_1;\n";
960 			break;
961 		case VK_FORMAT_R64_UINT:
962 			decls << "layout(set = 0, binding = 1) uniform utextureBuffer texbo0_1;\n";
963 			break;
964 		default:
965 			decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "textureBuffer texbo0_1;\n";
966 		}
967 		break;
968 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
969 		decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "imageBuffer image0_1;\n";
970 		break;
971 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
972 		decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "image" << imageDim << " image0_1;\n";
973 		break;
974 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
975 		switch (format)
976 		{
977 		case VK_FORMAT_R64_SINT:
978 			decls << "layout(set = 0, binding = 1) uniform isampler" << imageDim << " texture0_1; \n";
979 			break;
980 		case VK_FORMAT_R64_UINT:
981 			decls << "layout(set = 0, binding = 1) uniform usampler" << imageDim << " texture0_1; \n";
982 			break;
983 		default:
984 			decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "sampler" << imageDim << " texture0_1;\n";
985 			break;
986 		}
987 		break;
988 	case VERTEX_ATTRIBUTE_FETCH:
989 		if (formatIsR64(format))
990 		{
991 			decls << "layout(location = 0) in " << (formatIsSignedInt(format) ? ("int64_t") : ("uint64_t")) << " attr;\n";
992 		}
993 		else
994 		{
995 			decls << "layout(location = 0) in " << vecType << " attr;\n";
996 		}
997 		break;
998 	default: DE_ASSERT(0);
999 	}
1000 
1001 	string expectedOOB;
1002 	string defaultw;
1003 
1004 	switch (m_data.descriptorType)
1005 	{
1006 	default: DE_ASSERT(0); // Fallthrough
1007 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1008 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1009 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1010 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1011 		expectedOOB = "zzzz";
1012 		defaultw = "0";
1013 		break;
1014 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1015 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1016 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1017 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1018 	case VERTEX_ATTRIBUTE_FETCH:
1019 		if (numComponents == 1)
1020 		{
1021 			expectedOOB = "zzzo";
1022 		}
1023 		else if (numComponents == 2)
1024 		{
1025 			expectedOOB = "zzzo";
1026 		}
1027 		else
1028 		{
1029 			expectedOOB = "zzzz";
1030 		}
1031 		defaultw = "1";
1032 		break;
1033 	}
1034 
1035 	string idx;
1036 	switch (m_data.descriptorType)
1037 	{
1038 	default: DE_ASSERT(0); // Fallthrough
1039 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1040 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1041 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1042 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1043 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1044 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1045 	case VERTEX_ATTRIBUTE_FETCH:
1046 		idx = "idx";
1047 		break;
1048 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1049 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1050 		idx = "0";
1051 		break;
1052 	}
1053 
1054 	if (m_data.nullDescriptor)
1055 	{
1056 		checks << "    expectedIB = zzzz;\n";
1057 		checks << "    inboundcoords = 0;\n";
1058 		checks << "    int paddedinboundcoords = 0;\n";
1059 		// Vertex attribute fetch still gets format conversion applied
1060 		if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
1061 			expectedOOB = "zzzz";
1062 	}
1063 	else
1064 	{
1065 		checks << "    expectedIB.x = refData[" << idx << "];\n";
1066 		if (numComponents > 1)
1067 		{
1068 			checks << "    expectedIB.y = refData[" << idx << "+1];\n";
1069 		}
1070 		else
1071 		{
1072 			checks << "    expectedIB.y = 0;\n";
1073 		}
1074 		if (numComponents > 2)
1075 		{
1076 			checks << "    expectedIB.z = refData[" << idx << "+2];\n";
1077 			checks << "    expectedIB.w = refData[" << idx << "+3];\n";
1078 		}
1079 		else
1080 		{
1081 			checks << "    expectedIB.z = 0;\n";
1082 			checks << "    expectedIB.w = " << defaultw << ";\n";
1083 		}
1084 
1085 		switch (m_data.descriptorType)
1086 		{
1087 		default: DE_ASSERT(0); // Fallthrough
1088 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1089 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1090 			// UBOs can either strictly bounds check against inboundcoords, or can
1091 			// return the contents from memory for the range padded up to paddedinboundcoords.
1092 			checks << "    int paddedinboundcoords = " << refDataNumElements / numComponents << ";\n";
1093 			// fallthrough
1094 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1095 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1096 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1097 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1098 		case VERTEX_ATTRIBUTE_FETCH:
1099 			checks << "    inboundcoords = " << layout.refData.size() / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32)) / numComponents << ";\n";
1100 			break;
1101 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1102 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1103 			// set per-component below
1104 			break;
1105 		}
1106 	}
1107 
1108 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1109 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
1110 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1111 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1112 		 !m_data.readOnly)
1113 	{
1114 		for (int i = 0; i < numCoords; ++i)
1115 		{
1116 			// Treat i==3 coord (cube array layer) like i == 2
1117 			deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1118 			if (!m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1119 				checks << "    inboundcoords = " << coordDim << ";\n";
1120 
1121 			string coord = genCoord("c", numCoords, m_data.samples, i);
1122 			string inboundcoords =
1123 				m_data.nullDescriptor ? "0" :
1124 				(m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1) ? to_string(m_data.samples) : "inboundcoords";
1125 
1126 			checks << "    if (c < 0 || c >= " << inboundcoords << ") " << genStore(m_data.descriptorType, vecType, bufType, coord) << ";\n";
1127 			if (m_data.formatQualifier &&
1128 				(format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT))
1129 			{
1130 				checks << "    if (c < 0 || c >= " << inboundcoords << ") " << genAtomic(m_data.descriptorType, bufType, coord) << ";\n";
1131 			}
1132 		}
1133 	}
1134 
1135 	for (int i = 0; i < numCoords; ++i)
1136 	{
1137 		// Treat i==3 coord (cube array layer) like i == 2
1138 		deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1139 		if (!m_data.nullDescriptor)
1140 		{
1141 			switch (m_data.descriptorType)
1142 			{
1143 			default:
1144 				break;
1145 			case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1146 			case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1147 				checks << "    inboundcoords = " << coordDim << ";\n";
1148 				break;
1149 			}
1150 		}
1151 
1152 		string coord = genCoord("c", numCoords, m_data.samples, i);
1153 
1154 		if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1155 		{
1156 			if (formatIsR64(format))
1157 			{
1158 				checks << "    temp.x = attr;\n";
1159 				checks << "    temp.y = 0l;\n";
1160 				checks << "    temp.z = 0l;\n";
1161 				checks << "    temp.w = 0l;\n";
1162 				checks << "    if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp.x -= expectedIB.x; else temp -= zzzz;\n";
1163 			}
1164 			else
1165 			{
1166 				checks << "    temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1167 				checks << "    if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp -= expectedIB; else temp -= " << expectedOOB << ";\n";
1168 			}
1169 			// Accumulate any incorrect values.
1170 			checks << "    accum += abs(temp);\n";
1171 		}
1172 		// Skip texelFetch testing for cube(array) - texelFetch doesn't support it
1173 		if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH &&
1174 			!(m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1175 			  (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)))
1176 		{
1177 			checks << "    temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1178 
1179 			checks << "    expectedIB2 = expectedIB;\n";
1180 
1181 			// Expected data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1182 			if (dataDependsOnLayer && i == numNormalizedCoords)
1183 				checks << "    if (c >= 0 && c < inboundcoords) expectedIB2 += " << vecType << "(c, 0, 0, 0);\n";
1184 
1185 			if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1)
1186 			{
1187 				if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1188 				{
1189 					checks << "    if (temp == zzzz) temp = " << vecType << "(0);\n";
1190 					if (m_data.formatQualifier && numComponents < 4)
1191 						checks << "    else if (temp == zzzo) temp = " << vecType << "(0);\n";
1192 					checks << "    else temp = " << vecType << "(1);\n";
1193 				}
1194 				else
1195 					// multisample coord doesn't have defined behavior for OOB, so just set temp to 0.
1196 					checks << "    if (c >= 0 && c < " << m_data.samples << ") temp -= expectedIB2; else temp = " << vecType << "(0);\n";
1197 			}
1198 			else
1199 			{
1200 				// Storage buffers may be split into per-component loads. Generate a second
1201 				// expected out of bounds value where some subset of the components are
1202 				// actually in-bounds. If both loads and stores are split into per-component
1203 				// accesses, then the result value can be a mix of storeValue and zero.
1204 				string expectedOOB2 = expectedOOB;
1205 				string expectedOOB3 = expectedOOB;
1206 				if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1207 					 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1208 					 !m_data.nullDescriptor)
1209 				{
1210 					int len = m_data.bufferLen & (formatIsR64(format) ? ~7 : ~3);
1211 					int mod = (int)((len / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32))) % numComponents);
1212 					string sstoreValue = de::toString(storeValue);
1213 					switch (mod)
1214 					{
1215 					case 0:
1216 						break;
1217 					case 1:
1218 						expectedOOB2 = vecType + "(expectedIB2.x, 0, 0, 0)";
1219 						expectedOOB3 = vecType + "(" + sstoreValue + ", 0, 0, 0)";
1220 						break;
1221 					case 2:
1222 						expectedOOB2 = vecType + "(expectedIB2.xy, 0, 0)";
1223 						expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", 0, 0)";
1224 						break;
1225 					case 3:
1226 						expectedOOB2 = vecType + "(expectedIB2.xyz, 0)";
1227 						expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", " + sstoreValue + ", 0)";
1228 						break;
1229 					}
1230 				}
1231 
1232 				// Entirely in-bounds.
1233 				checks << "    if (c >= 0 && c < inboundcoords) {\n"
1234 						  "       if (temp == expectedIB2) temp = " << vecType << "(0); else temp = " << vecType << "(1);\n"
1235 						  "    }\n";
1236 
1237 				// normal out-of-bounds value
1238 				if (m_data.testRobustness2)
1239 					checks << "    else if (temp == " << expectedOOB << ") temp = " << vecType << "(0);\n";
1240 				else
1241 					// image_robustness relaxes alpha which is allowed to be zero or one
1242 					checks << "    else if (temp == zzzz || temp == zzzo) temp = " << vecType << "(0);\n";
1243 
1244 				if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1245 					m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1246 				{
1247 					checks << "    else if (c >= 0 && c < paddedinboundcoords && temp == expectedIB2) temp = " << vecType << "(0);\n";
1248 				}
1249 
1250 				// null descriptor loads with image format layout qualifier that doesn't include alpha may return alpha=1
1251 				if (m_data.nullDescriptor && m_data.formatQualifier &&
1252 					(m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) &&
1253 					numComponents < 4)
1254 					checks << "    else if (temp == zzzo) temp = " << vecType << "(0);\n";
1255 
1256 				// non-volatile value replaced with stored value
1257 				if (supportsStores(m_data.descriptorType) && !m_data.vol)
1258 					checks << "    else if (temp == " << getStoreValue(m_data.descriptorType, numComponents, vecType, bufType) << ") temp = " << vecType << "(0);\n";
1259 
1260 				// value straddling the boundary, returning a partial vector
1261 				if (expectedOOB2 != expectedOOB)
1262 					checks << "    else if (c == inboundcoords && temp == " << expectedOOB2 << ") temp = " << vecType << "(0);\n";
1263 				if (expectedOOB3 != expectedOOB)
1264 					checks << "    else if (c == inboundcoords && temp == " << expectedOOB3 << ") temp = " << vecType << "(0);\n";
1265 
1266 				// failure
1267 				checks << "    else temp = " << vecType << "(1);\n";
1268 			}
1269 			// Accumulate any incorrect values.
1270 			checks << "    accum += abs(temp);\n";
1271 
1272 			// Only the full robustness2 extension provides guarantees about out-of-bounds mip levels.
1273 			if (m_data.testRobustness2 && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER && m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1274 			{
1275 				// Fetch from an out of bounds mip level. Expect this to always return the OOB value.
1276 				string coord0 = genCoord("0", numCoords, m_data.samples, i);
1277 				checks << "    if (c != 0) temp = " << genFetch(m_data, numComponents, vecType, coord0, "c") << "; else temp = " << vecType << "(0);\n";
1278 				checks << "    if (c != 0) temp -= " << expectedOOB << ";\n";
1279 				checks << "    accum += abs(temp);\n";
1280 			}
1281 		}
1282 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1283 			m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1284 		{
1285 			string coordNorm = genCoordNorm(m_data, "(c+0.25)", numCoords, numNormalizedCoords, i);
1286 
1287 			checks << "    expectedIB2 = expectedIB;\n";
1288 
1289 			// Data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1290 			if (dataDependsOnLayer && i == numNormalizedCoords)
1291 			{
1292 				checks << "    clampedLayer = clamp(c, 0, " << coordDim-1 << ");\n";
1293 				checks << "    expectedIB2 += " << vecType << "(clampedLayer, 0, 0, 0);\n";
1294 			}
1295 
1296 			stringstream normexpected;
1297 			// Cubemap fetches are always in-bounds. Layer coordinate is clamped, so is always in-bounds.
1298 			if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE ||
1299 				m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
1300 				(layered && i == numCoords-1))
1301 				normexpected << "    temp -= expectedIB2;\n";
1302 			else
1303 			{
1304 				normexpected << "    if (c >= 0 && c < inboundcoords)\n";
1305 				normexpected << "        temp -= expectedIB2;\n";
1306 				normexpected << "    else\n";
1307 				if (m_data.testRobustness2)
1308 					normexpected << "        temp -= " << expectedOOB << ";\n";
1309 				else
1310 					// image_robustness relaxes alpha which is allowed to be zero or one
1311 					normexpected << "        temp = " << vecType << "((temp == zzzz || temp == zzzo) ? 0 : 1);\n";
1312 			}
1313 
1314 			checks << "    temp = texture(texture0_1, " << coordNorm << ");\n";
1315 			checks << normexpected.str();
1316 			checks << "    accum += abs(temp);\n";
1317 			checks << "    temp = textureLod(texture0_1, " << coordNorm << ", 0.0f);\n";
1318 			checks << normexpected.str();
1319 			checks << "    accum += abs(temp);\n";
1320 			checks << "    temp = textureGrad(texture0_1, " << coordNorm << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ");\n";
1321 			checks << normexpected.str();
1322 			checks << "    accum += abs(temp);\n";
1323 		}
1324 		if (m_data.nullDescriptor)
1325 		{
1326 			const char *sizeswiz;
1327 			switch (m_data.viewType)
1328 			{
1329 				default: DE_ASSERT(0); // Fallthrough
1330 				case VK_IMAGE_VIEW_TYPE_1D:			sizeswiz = ".xxxx";	break;
1331 				case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	sizeswiz = ".xyxx";	break;
1332 				case VK_IMAGE_VIEW_TYPE_2D:			sizeswiz = ".xyxx";	break;
1333 				case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	sizeswiz = ".xyzx";	break;
1334 				case VK_IMAGE_VIEW_TYPE_3D:			sizeswiz = ".xyzx";	break;
1335 				case VK_IMAGE_VIEW_TYPE_CUBE:		sizeswiz = ".xyxx";	break;
1336 				case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:	sizeswiz = ".xyzx";	break;
1337 			}
1338 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1339 			{
1340 				if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1341 				{
1342 					checks << "    temp = textureSize(texture0_1, 0)" << sizeswiz <<";\n";
1343 					checks << "    accum += abs(temp);\n";
1344 
1345 					// checking textureSize with clearly out of range LOD values
1346 					checks << "    temp = textureSize(texture0_1, " << -i << ")" << sizeswiz <<";\n";
1347 					checks << "    accum += abs(temp);\n";
1348 					checks << "    temp = textureSize(texture0_1, " << (std::numeric_limits<deInt32>::max() - i) << ")" << sizeswiz <<";\n";
1349 					checks << "    accum += abs(temp);\n";
1350 				}
1351 				else
1352 				{
1353 					checks << "    temp = textureSize(texture0_1)" << sizeswiz <<";\n";
1354 					checks << "    accum += abs(temp);\n";
1355 					checks << "    temp = textureSamples(texture0_1).xxxx;\n";
1356 					checks << "    accum += abs(temp);\n";
1357 				}
1358 			}
1359 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1360 			{
1361 				if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1362 				{
1363 					checks << "    temp = imageSize(image0_1)" << sizeswiz <<";\n";
1364 					checks << "    accum += abs(temp);\n";
1365 				}
1366 				else
1367 				{
1368 					checks << "    temp = imageSize(image0_1)" << sizeswiz <<";\n";
1369 					checks << "    accum += abs(temp);\n";
1370 					checks << "    temp = imageSamples(image0_1).xxxx;\n";
1371 					checks << "    accum += abs(temp);\n";
1372 				}
1373 			}
1374 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1375 				m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1376 			{
1377 				// expect zero for runtime-sized array .length()
1378 				checks << "    temp = " << vecType << "(ssbo0_1.val.length());\n";
1379 				checks << "    accum += abs(temp);\n";
1380 				checks << "    temp = " << vecType << "(ssbo0_1_pad.val.length());\n";
1381 				checks << "    accum += abs(temp);\n";
1382 			}
1383 		}
1384 	}
1385 	checks << "  }\n";
1386 
1387 	// outside the coordinates loop because we only need to call it once
1388 	if (m_data.nullDescriptor &&
1389 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1390 		m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1391 	{
1392 		checks << "  temp_ql = " << qLevelType << "(textureQueryLevels(texture0_1));\n";
1393 		checks << "  temp = " << vecType << "(temp_ql);\n";
1394 		checks << "  accum += abs(temp);\n";
1395 	}
1396 
1397 	const bool is64BitFormat = formatIsR64(m_data.format);
1398 	std::string SupportR64 = (is64BitFormat ?
1399 							std::string("#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
1400 							"#extension GL_EXT_shader_image_int64 : require\n") :
1401 							std::string());
1402 
1403 	switch (m_data.stage)
1404 	{
1405 	default: DE_ASSERT(0); // Fallthrough
1406 	case STAGE_COMPUTE:
1407 		{
1408 			std::stringstream css;
1409 			css <<
1410 				"#version 450 core\n"
1411 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1412 				"#extension GL_EXT_scalar_block_layout : enable\n"
1413 				"#extension GL_EXT_samplerless_texture_functions : enable\n"
1414 				"#extension GL_EXT_control_flow_attributes : enable\n"
1415 				"#extension GL_EXT_shader_image_load_formatted : enable\n"
1416 				<< SupportR64
1417 				<< decls.str() <<
1418 				"layout(local_size_x = 1, local_size_y = 1) in;\n"
1419 				"void main()\n"
1420 				"{\n"
1421 				"  " << vecType << " accum = " << vecType << "(0);\n"
1422 				"  " << vecType << " temp;\n"
1423 				"  " << qLevelType << " temp_ql;\n"
1424 				<< checks.str() <<
1425 				"  " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1426 				"  imageStore(image0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1427 				"}\n";
1428 
1429 			programCollection.glslSources.add("test") << glu::ComputeSource(css.str())
1430 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1431 			break;
1432 		}
1433 	case STAGE_RAYGEN:
1434 	{
1435 		std::stringstream css;
1436 		css <<
1437 			"#version 460 core\n"
1438 			"#extension GL_EXT_samplerless_texture_functions : enable\n"
1439 			"#extension GL_EXT_scalar_block_layout : enable\n"
1440 			"#extension GL_EXT_nonuniform_qualifier : enable\n"
1441 			"#extension GL_EXT_control_flow_attributes : enable\n"
1442 			"#extension GL_NV_ray_tracing : require\n"
1443 			"#extension GL_EXT_shader_image_load_formatted : enable\n"
1444 			<< SupportR64
1445 			<< decls.str() <<
1446 			"void main()\n"
1447 			"{\n"
1448 			"  " << vecType << " accum = " << vecType << "(0);\n"
1449 			"  " << vecType << " temp;\n"
1450 			"  " << qLevelType << " temp_ql;\n"
1451 			<< checks.str() <<
1452 			"  " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1453 			"  imageStore(image0_0, ivec2(gl_LaunchIDNV.xy), color);\n"
1454 			"}\n";
1455 
1456 		programCollection.glslSources.add("test") << glu::RaygenSource(css.str())
1457 			<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1458 		break;
1459 	}
1460 	case STAGE_VERTEX:
1461 		{
1462 			std::stringstream vss;
1463 			vss <<
1464 				"#version 450 core\n"
1465 				"#extension GL_EXT_samplerless_texture_functions : enable\n"
1466 				"#extension GL_EXT_scalar_block_layout : enable\n"
1467 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1468 				"#extension GL_EXT_control_flow_attributes : enable\n"
1469 				"#extension GL_EXT_shader_image_load_formatted : enable\n"
1470 				<< SupportR64
1471 				<< decls.str() <<
1472 				"void main()\n"
1473 				"{\n"
1474 				"  " << vecType << " accum = " << vecType << "(0);\n"
1475 				"  " << vecType << " temp;\n"
1476 				"  " << qLevelType << " temp_ql;\n"
1477 				<< checks.str() <<
1478 				"  " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1479 				"  imageStore(image0_0, ivec2(gl_VertexIndex % " << DIM << ", gl_VertexIndex / " << DIM << "), color);\n"
1480 				"  gl_PointSize = 1.0f;\n"
1481 				"  gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1482 				"}\n";
1483 
1484 			programCollection.glslSources.add("test") << glu::VertexSource(vss.str())
1485 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1486 			break;
1487 		}
1488 	case STAGE_FRAGMENT:
1489 		{
1490 			if (m_data.nullDescriptor &&
1491 				m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1492 				m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1493 			{
1494 				// as here we only want to check that textureQueryLod returns 0 when
1495 				// texture0_1 is null, we don't need to use the actual texture coordinates
1496 				// (and modify the vertex shader below to do so). Any coordinates are fine.
1497 				// gl_FragCoord has been selected "randomly", instead of selecting 0 for example.
1498 				std::string lod_str = (numNormalizedCoords == 1) ? ");" : (numNormalizedCoords == 2) ? "y);" : "yz);";
1499 				checks << "  vec2 lod = textureQueryLod(texture0_1, gl_FragCoord.x" << lod_str << "\n";
1500 				checks << "  temp_ql = " << qLevelType <<
1501 "(ceil(abs(lod.x) + abs(lod.y)));\n";
1502 				checks << "  temp = " << vecType << "(temp_ql);\n";
1503 				checks << "  accum += abs(temp);\n";
1504 			}
1505 
1506 			std::stringstream vss;
1507 			vss <<
1508 				"#version 450 core\n"
1509 				"void main()\n"
1510 				"{\n"
1511 				// full-viewport quad
1512 				"  gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * float(gl_VertexIndex&1), 1);\n"
1513 				"}\n";
1514 
1515 			programCollection.glslSources.add("vert") << glu::VertexSource(vss.str())
1516 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1517 
1518 			std::stringstream fss;
1519 			fss <<
1520 				"#version 450 core\n"
1521 				"#extension GL_EXT_samplerless_texture_functions : enable\n"
1522 				"#extension GL_EXT_scalar_block_layout : enable\n"
1523 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1524 				"#extension GL_EXT_control_flow_attributes : enable\n"
1525 				"#extension GL_EXT_shader_image_load_formatted : enable\n"
1526 				<< SupportR64
1527 				<< decls.str() <<
1528 				"void main()\n"
1529 				"{\n"
1530 				"  " << vecType << " accum = " << vecType << "(0);\n"
1531 				"  " << vecType << " temp;\n"
1532 				"  " << qLevelType << " temp_ql;\n"
1533 				<< checks.str() <<
1534 				"  " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1535 				"  imageStore(image0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1536 				"}\n";
1537 
1538 			programCollection.glslSources.add("test") << glu::FragmentSource(fss.str())
1539 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1540 			break;
1541 		}
1542 	}
1543 
1544 	// The 64-bit conditions below are redundant. Can we support the below shader for other than 64-bit formats?
1545 	if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && is64BitFormat)
1546 	{
1547 		const std::string	ivecCords = (m_data.viewType == VK_IMAGE_VIEW_TYPE_2D ? "ivec2(gx, gy)" : "ivec3(gx, gy, gz)");
1548 		std::stringstream	fillShader;
1549 
1550 		fillShader <<
1551 			"#version 450\n"
1552 			<< SupportR64
1553 			<< "\n"
1554 			"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1555 			"layout (" + getShaderImageFormatQualifier(mapVkFormat(m_data.format)) + ", binding=0) volatile uniform "
1556 			<< string(formatIsSignedInt(m_data.format) ? "i" : "u") + string(is64BitFormat ? "64" : "") << "image" << imageDim << +" u_resultImage;\n"
1557 			"\n"
1558 			"layout(std430, binding = 1) buffer inputBuffer\n"
1559 			"{\n"
1560 			"  int" << (is64BitFormat ? "64_t" : "") << " data[];\n"
1561 			"} inBuffer;\n"
1562 			"\n"
1563 			"void main(void)\n"
1564 			"{\n"
1565 			"  int gx = int(gl_GlobalInvocationID.x);\n"
1566 			"  int gy = int(gl_GlobalInvocationID.y);\n"
1567 			"  int gz = int(gl_GlobalInvocationID.z);\n"
1568 			"  uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n";
1569 
1570 			for(int ndx = 0; ndx < static_cast<int>(m_data.samples); ++ndx)
1571 			{
1572 				fillShader << "  imageStore(u_resultImage, " << ivecCords << ", " << ndx << ", i64vec4(inBuffer.data[index]));\n";
1573 			}
1574 
1575 			fillShader << "}\n";
1576 
1577 		programCollection.glslSources.add("fillShader") << glu::ComputeSource(fillShader.str())
1578 			<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1579 	}
1580 
1581 }
1582 
imageViewTypeToImageType(VkImageViewType type)1583 VkImageType imageViewTypeToImageType (VkImageViewType type)
1584 {
1585 	switch (type)
1586 	{
1587 		case VK_IMAGE_VIEW_TYPE_1D:
1588 		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:		return VK_IMAGE_TYPE_1D;
1589 		case VK_IMAGE_VIEW_TYPE_2D:
1590 		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1591 		case VK_IMAGE_VIEW_TYPE_CUBE:
1592 		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:		return VK_IMAGE_TYPE_2D;
1593 		case VK_IMAGE_VIEW_TYPE_3D:				return VK_IMAGE_TYPE_3D;
1594 		default:
1595 			DE_ASSERT(false);
1596 	}
1597 
1598 	return VK_IMAGE_TYPE_2D;
1599 }
1600 
createInstance(Context & context) const1601 TestInstance* RobustnessExtsTestCase::createInstance (Context& context) const
1602 {
1603 	return new RobustnessExtsTestInstance(context, m_data);
1604 }
1605 
iterate(void)1606 tcu::TestStatus RobustnessExtsTestInstance::iterate (void)
1607 {
1608 	const InstanceInterface&	vki					= m_context.getInstanceInterface();
1609 	const VkDevice				device				= getLogicalDevice(m_context, m_data);
1610 	const DeviceDriver			vk					(m_context.getPlatformInterface(), m_context.getInstance(), device);
1611 	const VkPhysicalDevice		physicalDevice		= m_context.getPhysicalDevice();
1612 	SimpleAllocator				allocator			(vk, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1613 
1614 	Layout layout;
1615 	generateLayout(layout, m_data);
1616 
1617 	// Get needed properties.
1618 	VkPhysicalDeviceProperties2 properties;
1619 	deMemset(&properties, 0, sizeof(properties));
1620 	properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
1621 	void** pNextTail = &properties.pNext;
1622 
1623 	VkPhysicalDeviceRayTracingPropertiesNV rayTracingProperties;
1624 	deMemset(&rayTracingProperties, 0, sizeof(rayTracingProperties));
1625 	rayTracingProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV;
1626 
1627 	VkPhysicalDeviceRobustness2PropertiesEXT robustness2Properties;
1628 	deMemset(&robustness2Properties, 0, sizeof(robustness2Properties));
1629 	robustness2Properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT;
1630 
1631 	if (m_context.isDeviceFunctionalitySupported("VK_NV_ray_tracing"))
1632 	{
1633 		*pNextTail = &rayTracingProperties;
1634 		pNextTail = &rayTracingProperties.pNext;
1635 	}
1636 
1637 	if (m_context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
1638 	{
1639 		*pNextTail = &robustness2Properties;
1640 		pNextTail = &robustness2Properties.pNext;
1641 	}
1642 
1643 	vki.getPhysicalDeviceProperties2(physicalDevice, &properties);
1644 
1645 	if (m_data.testRobustness2)
1646 	{
1647 		if (robustness2Properties.robustStorageBufferAccessSizeAlignment != 1 &&
1648 			robustness2Properties.robustStorageBufferAccessSizeAlignment != 4)
1649 			return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustStorageBufferAccessSizeAlignment must be 1 or 4");
1650 
1651 		if (robustness2Properties.robustUniformBufferAccessSizeAlignment < 1 ||
1652 			robustness2Properties.robustUniformBufferAccessSizeAlignment > 256 ||
1653 			!deIntIsPow2((int)robustness2Properties.robustUniformBufferAccessSizeAlignment))
1654 			return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustUniformBufferAccessSizeAlignment must be a power of two in [1,256]");
1655 	}
1656 
1657 	VkPipelineBindPoint bindPoint;
1658 
1659 	switch (m_data.stage)
1660 	{
1661 	case STAGE_COMPUTE:
1662 		bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
1663 		break;
1664 	case STAGE_RAYGEN:
1665 		bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_NV;
1666 		break;
1667 	default:
1668 		bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1669 		break;
1670 	}
1671 
1672 	Move<vk::VkDescriptorSetLayout>	descriptorSetLayout;
1673 	Move<vk::VkDescriptorPool>		descriptorPool;
1674 	Move<vk::VkDescriptorSet>		descriptorSet;
1675 
1676 	int formatBytes = tcu::getPixelSize(mapVkFormat(m_data.format));
1677 	int numComponents = formatBytes / tcu::getChannelSize(mapVkFormat(m_data.format).type);
1678 
1679 	vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
1680 
1681 	VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1682 	VkDescriptorSetLayoutCreateFlags layoutCreateFlags = m_data.pushDescriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
1683 
1684 	// Create a layout and allocate a descriptor set for it.
1685 
1686 	const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo =
1687 	{
1688 		vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1689 		DE_NULL,
1690 
1691 		layoutCreateFlags,
1692 		(deUint32)bindings.size(),
1693 		bindings.empty() ? DE_NULL : bindings.data()
1694 	};
1695 
1696 	descriptorSetLayout = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
1697 
1698 	vk::DescriptorPoolBuilder poolBuilder;
1699 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1);
1700 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1);
1701 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1);
1702 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1);
1703 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1);
1704 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1);
1705 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1);
1706 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2);
1707 
1708 	descriptorPool = poolBuilder.build(vk, device, poolCreateFlags, 1u, DE_NULL);
1709 
1710 	const void *pNext = DE_NULL;
1711 
1712 	if (!m_data.pushDescriptor)
1713 		descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout, pNext);
1714 
1715 	de::MovePtr<BufferWithMemory> buffer;
1716 
1717 	deUint8 *bufferPtr = DE_NULL;
1718 	if (!m_data.nullDescriptor)
1719 	{
1720 		// Create a buffer to hold data for all descriptors.
1721 		VkDeviceSize	size = de::max(
1722 			(VkDeviceSize)(m_data.bufferLen ? m_data.bufferLen : 1),
1723 			(VkDeviceSize)256);
1724 
1725 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1726 			m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1727 		{
1728 			size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment);
1729 		}
1730 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1731 				 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1732 		{
1733 			size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment);
1734 		}
1735 		else if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1736 		{
1737 			size = m_data.bufferLen;
1738 		}
1739 
1740 		buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1741 			vk, device, allocator, makeBufferCreateInfo(size,
1742 														VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
1743 														VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
1744 														VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
1745 														VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
1746 														VK_BUFFER_USAGE_VERTEX_BUFFER_BIT),
1747 														MemoryRequirement::HostVisible));
1748 		bufferPtr = (deUint8 *)buffer->getAllocation().getHostPtr();
1749 
1750 		deMemset(bufferPtr, 0x3f, (size_t)size);
1751 
1752 		deMemset(bufferPtr, 0, m_data.bufferLen);
1753 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1754 			m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1755 		{
1756 			deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment));
1757 		}
1758 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1759 				 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1760 		{
1761 			deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment));
1762 		}
1763 	}
1764 
1765 	const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1766 
1767 	Move<VkDescriptorSetLayout>		descriptorSetLayoutR64;
1768 	Move<VkDescriptorPool>			descriptorPoolR64;
1769 	Move<VkDescriptorSet>			descriptorSetFillImage;
1770 	Move<VkShaderModule>			shaderModuleFillImage;
1771 	Move<VkPipelineLayout>			pipelineLayoutFillImage;
1772 	Move<VkPipeline>				pipelineFillImage;
1773 
1774 	Move<VkCommandPool>				cmdPool		= createCommandPool(vk, device, 0, queueFamilyIndex);
1775 	Move<VkCommandBuffer>			cmdBuffer	= allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1776 	VkQueue							queue;
1777 
1778 	vk.getDeviceQueue(device, queueFamilyIndex, 0, &queue);
1779 
1780 	const VkImageSubresourceRange	barrierRange				=
1781 	{
1782 		VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspectFlags	aspectMask;
1783 		0u,							// deUint32				baseMipLevel;
1784 		VK_REMAINING_MIP_LEVELS,	// deUint32				levelCount;
1785 		0u,							// deUint32				baseArrayLayer;
1786 		VK_REMAINING_ARRAY_LAYERS	// deUint32				layerCount;
1787 	};
1788 
1789 	VkImageMemoryBarrier			preImageBarrier				=
1790 	{
1791 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
1792 		DE_NULL,											// const void*			pNext
1793 		0u,													// VkAccessFlags		srcAccessMask
1794 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
1795 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
1796 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,				// VkImageLayout		newLayout
1797 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
1798 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
1799 		DE_NULL,											// VkImage				image
1800 		barrierRange,										// VkImageSubresourceRange	subresourceRange;
1801 	};
1802 
1803 	VkImageMemoryBarrier			postImageBarrier			=
1804 	{
1805 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,		// VkStructureType			sType;
1806 		DE_NULL,									// const void*				pNext;
1807 		VK_ACCESS_TRANSFER_WRITE_BIT,				// VkAccessFlags			srcAccessMask;
1808 		VK_ACCESS_SHADER_READ_BIT,					// VkAccessFlags			dstAccessMask;
1809 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,		// VkImageLayout			oldLayout;
1810 		VK_IMAGE_LAYOUT_GENERAL,					// VkImageLayout			newLayout;
1811 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					srcQueueFamilyIndex;
1812 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					dstQueueFamilyIndex;
1813 		DE_NULL,									// VkImage					image;
1814 		barrierRange,								// VkImageSubresourceRange	subresourceRange;
1815 	};
1816 
1817 	vk::VkClearColorValue			clearValue;
1818 	clearValue.uint32[0] = 0u;
1819 	clearValue.uint32[1] = 0u;
1820 	clearValue.uint32[2] = 0u;
1821 	clearValue.uint32[3] = 0u;
1822 
1823 	beginCommandBuffer(vk, *cmdBuffer, 0u);
1824 
1825 	typedef vk::Unique<vk::VkBufferView>		BufferViewHandleUp;
1826 	typedef de::SharedPtr<BufferViewHandleUp>	BufferViewHandleSp;
1827 	typedef de::SharedPtr<ImageWithMemory>		ImageWithMemorySp;
1828 	typedef de::SharedPtr<Unique<VkImageView> >	VkImageViewSp;
1829 	typedef de::MovePtr<BufferWithMemory>		BufferWithMemoryMp;
1830 
1831 	vector<BufferViewHandleSp>					bufferViews(1);
1832 
1833 	VkImageCreateFlags imageCreateFlags = 0;
1834 	if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1835 		imageCreateFlags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
1836 
1837 	const bool featureSampledImage = ((getPhysicalDeviceFormatProperties(m_context.getInstanceInterface(),
1838 										m_context.getPhysicalDevice(),
1839 										m_data.format).optimalTilingFeatures &
1840 										VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) == VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
1841 
1842 	const VkImageUsageFlags usageSampledImage = (featureSampledImage ? VK_IMAGE_USAGE_SAMPLED_BIT : (VkImageUsageFlagBits)0);
1843 
1844 	const VkImageCreateInfo			outputImageCreateInfo			=
1845 	{
1846 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
1847 		DE_NULL,								// const void*				pNext;
1848 		(VkImageCreateFlags)0u,					// VkImageCreateFlags		flags;
1849 		VK_IMAGE_TYPE_2D,						// VkImageType				imageType;
1850 		m_data.format,							// VkFormat					format;
1851 		{
1852 			DIM,								// deUint32	width;
1853 			DIM,								// deUint32	height;
1854 			1u									// deUint32	depth;
1855 		},										// VkExtent3D				extent;
1856 		1u,										// deUint32					mipLevels;
1857 		1u,										// deUint32					arrayLayers;
1858 		VK_SAMPLE_COUNT_1_BIT,					// VkSampleCountFlagBits	samples;
1859 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
1860 		VK_IMAGE_USAGE_STORAGE_BIT
1861 		| usageSampledImage
1862 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
1863 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
1864 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
1865 		0u,										// deUint32					queueFamilyIndexCount;
1866 		DE_NULL,								// const deUint32*			pQueueFamilyIndices;
1867 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
1868 	};
1869 
1870 	deUint32 width = m_data.imageDim[0];
1871 	deUint32 height = m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] : 1;
1872 	deUint32 depth = m_data.viewType == VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
1873 	deUint32 layers = m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] :
1874 						m_data.viewType != VK_IMAGE_VIEW_TYPE_1D &&
1875 						m_data.viewType != VK_IMAGE_VIEW_TYPE_2D &&
1876 						m_data.viewType != VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
1877 
1878 	const VkImageUsageFlags usageImage = (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? VK_IMAGE_USAGE_STORAGE_BIT : (VkImageUsageFlagBits)0);
1879 
1880 	const VkImageCreateInfo			imageCreateInfo			=
1881 	{
1882 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
1883 		DE_NULL,								// const void*				pNext;
1884 		imageCreateFlags,						// VkImageCreateFlags		flags;
1885 		imageViewTypeToImageType(m_data.viewType),	// VkImageType				imageType;
1886 		m_data.format,							// VkFormat					format;
1887 		{
1888 			width,								// deUint32	width;
1889 			height,								// deUint32	height;
1890 			depth								// deUint32	depth;
1891 		},										// VkExtent3D				extent;
1892 		1u,										// deUint32					mipLevels;
1893 		layers,									// deUint32					arrayLayers;
1894 		m_data.samples,							// VkSampleCountFlagBits	samples;
1895 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
1896 		usageImage
1897 		| usageSampledImage
1898 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
1899 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
1900 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
1901 		0u,										// deUint32					queueFamilyIndexCount;
1902 		DE_NULL,								// const deUint32*			pQueueFamilyIndices;
1903 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
1904 	};
1905 
1906 	VkImageViewCreateInfo		imageViewCreateInfo		=
1907 	{
1908 		VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,	// VkStructureType			sType;
1909 		DE_NULL,									// const void*				pNext;
1910 		(VkImageViewCreateFlags)0u,					// VkImageViewCreateFlags	flags;
1911 		DE_NULL,									// VkImage					image;
1912 		VK_IMAGE_VIEW_TYPE_2D,						// VkImageViewType			viewType;
1913 		m_data.format,								// VkFormat					format;
1914 		{
1915 			VK_COMPONENT_SWIZZLE_IDENTITY,
1916 			VK_COMPONENT_SWIZZLE_IDENTITY,
1917 			VK_COMPONENT_SWIZZLE_IDENTITY,
1918 			VK_COMPONENT_SWIZZLE_IDENTITY
1919 		},											// VkComponentMapping		 components;
1920 		{
1921 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask;
1922 			0u,										// deUint32				baseMipLevel;
1923 			VK_REMAINING_MIP_LEVELS,				// deUint32				levelCount;
1924 			0u,										// deUint32				baseArrayLayer;
1925 			VK_REMAINING_ARRAY_LAYERS				// deUint32				layerCount;
1926 		}											// VkImageSubresourceRange	subresourceRange;
1927 	};
1928 
1929 	vector<ImageWithMemorySp> images(2);
1930 	vector<VkImageViewSp> imageViews(2);
1931 
1932 	if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1933 	{
1934 		deUint32 *ptr = (deUint32 *)bufferPtr;
1935 		deMemcpy(ptr, layout.refData.data(), layout.refData.size());
1936 	}
1937 
1938 	BufferWithMemoryMp				bufferImageR64;
1939 	BufferWithMemoryMp				bufferOutputImageR64;
1940 	const VkDeviceSize				sizeOutputR64	= 8 * outputImageCreateInfo.extent.width * outputImageCreateInfo.extent.height * outputImageCreateInfo.extent.depth;
1941 	const VkDeviceSize				sizeOneLayers	= 8 * imageCreateInfo.extent.width * imageCreateInfo.extent.height * imageCreateInfo.extent.depth;
1942 	const VkDeviceSize				sizeImageR64	= sizeOneLayers * layers;
1943 
1944 	if (formatIsR64(m_data.format))
1945 	{
1946 		bufferOutputImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1947 			vk, device, allocator,
1948 			makeBufferCreateInfo(sizeOutputR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
1949 			MemoryRequirement::HostVisible));
1950 
1951 		deUint64* bufferUint64Ptr = (deUint64 *)bufferOutputImageR64->getAllocation().getHostPtr();
1952 
1953 		for (int ndx = 0; ndx < static_cast<int>(sizeOutputR64 / 8); ++ndx)
1954 		{
1955 			bufferUint64Ptr[ndx] = 0;
1956 		}
1957 		flushAlloc(vk, device, bufferOutputImageR64->getAllocation());
1958 
1959 		bufferImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1960 			vk, device, allocator,
1961 			makeBufferCreateInfo(sizeImageR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
1962 			MemoryRequirement::HostVisible));
1963 
1964 		for (deUint32 layerNdx = 0; layerNdx < layers; ++layerNdx)
1965 		{
1966 			bufferUint64Ptr = (deUint64 *)bufferImageR64->getAllocation().getHostPtr();
1967 			bufferUint64Ptr = bufferUint64Ptr + ((sizeOneLayers * layerNdx) / 8);
1968 
1969 			for (int ndx = 0; ndx < static_cast<int>(sizeOneLayers / 8); ++ndx)
1970 			{
1971 				bufferUint64Ptr[ndx] = 0x1234567887654321 + ((m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? layerNdx : 0);
1972 			}
1973 		}
1974 		flushAlloc(vk, device, bufferImageR64->getAllocation());
1975 	}
1976 
1977 	for (size_t b = 0; b < bindings.size(); ++b)
1978 	{
1979 		VkDescriptorSetLayoutBinding &binding = bindings[b];
1980 
1981 		if (binding.descriptorCount == 0)
1982 			continue;
1983 		if (b == 1 && m_data.nullDescriptor)
1984 			continue;
1985 
1986 		DE_ASSERT(binding.descriptorCount == 1);
1987 		switch (binding.descriptorType)
1988 		{
1989 		default: DE_ASSERT(0); // Fallthrough
1990 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1991 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1992 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1993 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1994 			{
1995 				deUint32 *ptr = (deUint32 *)bufferPtr;
1996 				deMemcpy(ptr, layout.refData.data(), layout.refData.size());
1997 			}
1998 			break;
1999 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2000 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2001 			{
2002 				deUint32 *ptr = (deUint32 *)bufferPtr;
2003 				deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2004 
2005 				const vk::VkBufferViewCreateInfo viewCreateInfo =
2006 				{
2007 					vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
2008 					DE_NULL,
2009 					(vk::VkBufferViewCreateFlags)0,
2010 					**buffer,								// buffer
2011 					m_data.format,							// format
2012 					(vk::VkDeviceSize)0,					// offset
2013 					(vk::VkDeviceSize)m_data.bufferLen		// range
2014 				};
2015 				vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
2016 				bufferViews[0] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
2017 			}
2018 			break;
2019 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2020 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2021 			{
2022 				if (bindings.size() > 1 &&
2023 					bindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2024 				{
2025 					if (m_data.format == VK_FORMAT_R64_SINT)
2026 						imageViewCreateInfo.format = VK_FORMAT_R32G32_SINT;
2027 
2028 					if (m_data.format == VK_FORMAT_R64_UINT)
2029 						imageViewCreateInfo.format = VK_FORMAT_R32G32_UINT;
2030 				}
2031 
2032 				if (b == 0)
2033 				{
2034 					images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, outputImageCreateInfo, MemoryRequirement::Any));
2035 					imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
2036 				}
2037 				else
2038 				{
2039 					images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2040 					imageViewCreateInfo.viewType = m_data.viewType;
2041 				}
2042 				imageViewCreateInfo.image = **images[b];
2043 				imageViews[b] = VkImageViewSp(new Unique<VkImageView>(createImageView(vk, device, &imageViewCreateInfo, NULL)));
2044 
2045 				VkImage						img			= **images[b];
2046 				const VkBuffer&				bufferR64= ((b == 0) ? *(*bufferOutputImageR64) : *(*(bufferImageR64)));
2047 				const VkImageCreateInfo&	imageInfo	= ((b == 0) ? outputImageCreateInfo : imageCreateInfo);
2048 				const deUint32				clearLayers	= b == 0 ? 1 : layers;
2049 
2050 				if (!formatIsR64(m_data.format))
2051 				{
2052 					preImageBarrier.image	= img;
2053 					if (b == 1)
2054 					{
2055 						if (formatIsFloat(m_data.format))
2056 						{
2057 							deMemcpy(&clearValue.float32[0], layout.refData.data(), layout.refData.size());
2058 						}
2059 						else if (formatIsSignedInt(m_data.format))
2060 						{
2061 							deMemcpy(&clearValue.int32[0], layout.refData.data(), layout.refData.size());
2062 						}
2063 						else
2064 						{
2065 							deMemcpy(&clearValue.uint32[0], layout.refData.data(), layout.refData.size());
2066 						}
2067 					}
2068 					postImageBarrier.image	= img;
2069 
2070 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2071 
2072 					for (unsigned int i = 0; i < clearLayers; ++i)
2073 					{
2074 						const VkImageSubresourceRange	clearRange				=
2075 						{
2076 							VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspectFlags	aspectMask;
2077 							0u,							// deUint32				baseMipLevel;
2078 							VK_REMAINING_MIP_LEVELS,	// deUint32				levelCount;
2079 							i,							// deUint32				baseArrayLayer;
2080 							1							// deUint32				layerCount;
2081 						};
2082 
2083 						vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1, &clearRange);
2084 
2085 						// Use same data for all faces for cube(array), otherwise make value a function of the layer
2086 						if (m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2087 						{
2088 							if (formatIsFloat(m_data.format))
2089 								clearValue.float32[0] += 1;
2090 							else if (formatIsSignedInt(m_data.format))
2091 								clearValue.int32[0] += 1;
2092 							else
2093 								clearValue.uint32[0] += 1;
2094 						}
2095 					}
2096 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2097 				}
2098 				else
2099 				{
2100 					if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && (b == 1))
2101 					{
2102 						const VkImageSubresourceRange	subresourceRange	= makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, clearLayers);
2103 						const VkImageMemoryBarrier		imageBarrierPre		= makeImageMemoryBarrier(0,
2104 																				VK_ACCESS_SHADER_WRITE_BIT,
2105 																				VK_IMAGE_LAYOUT_UNDEFINED,
2106 																				VK_IMAGE_LAYOUT_GENERAL,
2107 																				img,
2108 																				subresourceRange);
2109 						const VkImageMemoryBarrier		imageBarrierPost	= makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
2110 																				VK_ACCESS_SHADER_READ_BIT,
2111 																				VK_IMAGE_LAYOUT_GENERAL,
2112 																				VK_IMAGE_LAYOUT_GENERAL,
2113 																				img,
2114 																				subresourceRange);
2115 
2116 						descriptorSetLayoutR64 =
2117 							DescriptorSetLayoutBuilder()
2118 							.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2119 							.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2120 							.build(vk, device);
2121 
2122 						descriptorPoolR64 =
2123 							DescriptorPoolBuilder()
2124 							.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2125 							.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,1)
2126 							.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 2u);
2127 
2128 						descriptorSetFillImage = makeDescriptorSet(vk,
2129 							device,
2130 							*descriptorPoolR64,
2131 							*descriptorSetLayoutR64);
2132 
2133 						shaderModuleFillImage	= createShaderModule(vk, device, m_context.getBinaryCollection().get("fillShader"), 0);
2134 						pipelineLayoutFillImage	= makePipelineLayout(vk, device, *descriptorSetLayoutR64);
2135 						pipelineFillImage		= makeComputePipeline(vk, device, *pipelineLayoutFillImage, *shaderModuleFillImage);
2136 
2137 						const VkDescriptorImageInfo		descResultImageInfo		= makeDescriptorImageInfo(DE_NULL, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2138 						const VkDescriptorBufferInfo	descResultBufferInfo	= makeDescriptorBufferInfo(bufferR64, 0, sizeImageR64);
2139 
2140 						DescriptorSetUpdateBuilder()
2141 							.writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
2142 							.writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descResultBufferInfo)
2143 							.update(vk, device);
2144 
2145 						vk.cmdPipelineBarrier(*cmdBuffer,
2146 							VK_PIPELINE_STAGE_HOST_BIT,
2147 							VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2148 							(VkDependencyFlags)0,
2149 							0, (const VkMemoryBarrier*)DE_NULL,
2150 							0, (const VkBufferMemoryBarrier*)DE_NULL,
2151 							1, &imageBarrierPre);
2152 
2153 						vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineFillImage);
2154 						vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayoutFillImage, 0u, 1u, &(*descriptorSetFillImage), 0u, DE_NULL);
2155 
2156 						vk.cmdDispatch(*cmdBuffer, imageInfo.extent.width, imageInfo.extent.height, clearLayers);
2157 
2158 						vk.cmdPipelineBarrier(*cmdBuffer,
2159 									VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2160 									VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
2161 									(VkDependencyFlags)0,
2162 									0, (const VkMemoryBarrier*)DE_NULL,
2163 									0, (const VkBufferMemoryBarrier*)DE_NULL,
2164 									1, &imageBarrierPost);
2165 					}
2166 					else
2167 					{
2168 						VkDeviceSize					size			= ((b == 0) ? sizeOutputR64 : sizeImageR64);
2169 						const vector<VkBufferImageCopy>	bufferImageCopy	(1, makeBufferImageCopy(imageInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, clearLayers)));
2170 
2171 						copyBufferToImage(vk,
2172 							*cmdBuffer,
2173 							bufferR64,
2174 							size,
2175 							bufferImageCopy,
2176 							VK_IMAGE_ASPECT_COLOR_BIT,
2177 							1,
2178 							clearLayers, img, VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2179 					}
2180 				}
2181 			}
2182 			break;
2183 		}
2184 	}
2185 
2186 	const VkSamplerCreateInfo	samplerParams	=
2187 	{
2188 		VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,		// VkStructureType			sType;
2189 		DE_NULL,									// const void*				pNext;
2190 		0,											// VkSamplerCreateFlags		flags;
2191 		VK_FILTER_NEAREST,							// VkFilter					magFilter:
2192 		VK_FILTER_NEAREST,							// VkFilter					minFilter;
2193 		VK_SAMPLER_MIPMAP_MODE_NEAREST,				// VkSamplerMipmapMode		mipmapMode;
2194 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeU;
2195 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeV;
2196 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeW;
2197 		0.0f,										// float					mipLodBias;
2198 		VK_FALSE,									// VkBool32					anistoropyEnable;
2199 		1.0f,										// float					maxAnisotropy;
2200 		VK_FALSE,									// VkBool32					compareEnable;
2201 		VK_COMPARE_OP_ALWAYS,						// VkCompareOp				compareOp;
2202 		0.0f,										// float					minLod;
2203 		0.0f,										// float					maxLod;
2204 		formatIsFloat(m_data.format) ?
2205 			VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK :
2206 			VK_BORDER_COLOR_INT_TRANSPARENT_BLACK,	// VkBorderColor			borderColor;
2207 		VK_FALSE									// VkBool32					unnormalizedCoordinates;
2208 	};
2209 
2210 	Move<VkSampler>				sampler			(createSampler(vk, device, &samplerParams));
2211 
2212 	// Flush modified memory.
2213 	if (!m_data.nullDescriptor)
2214 		flushAlloc(vk, device, buffer->getAllocation());
2215 
2216 	const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
2217 	{
2218 		VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,				// sType
2219 		DE_NULL,													// pNext
2220 		(VkPipelineLayoutCreateFlags)0,
2221 		1u,															// setLayoutCount
2222 		&descriptorSetLayout.get(),									// pSetLayouts
2223 		0u,															// pushConstantRangeCount
2224 		DE_NULL,													// pPushConstantRanges
2225 	};
2226 
2227 	Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2228 
2229 	de::MovePtr<BufferWithMemory> copyBuffer;
2230 	copyBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2231 		vk, device, allocator, makeBufferCreateInfo(DIM*DIM*16, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
2232 
2233 	{
2234 		vector<VkDescriptorBufferInfo> bufferInfoVec(2);
2235 		vector<VkDescriptorImageInfo> imageInfoVec(2);
2236 		vector<VkBufferView> bufferViewVec(2);
2237 		vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2238 		int vecIndex = 0;
2239 		int numDynamic = 0;
2240 
2241 		vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore,
2242 												bufTemplateEntriesBefore,
2243 												texelBufTemplateEntriesBefore;
2244 
2245 		for (size_t b = 0; b < bindings.size(); ++b)
2246 		{
2247 			VkDescriptorSetLayoutBinding &binding = bindings[b];
2248 			// Construct the declaration for the binding
2249 			if (binding.descriptorCount > 0)
2250 			{
2251 				// output image
2252 				switch (binding.descriptorType)
2253 				{
2254 				case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2255 				case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2256 					// Output image.
2257 					if (b == 1 && m_data.nullDescriptor)
2258 						imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, DE_NULL, VK_IMAGE_LAYOUT_GENERAL);
2259 					else
2260 						imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2261 					break;
2262 				case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2263 				case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2264 					if (b == 1 && m_data.nullDescriptor)
2265 						bufferViewVec[vecIndex] = DE_NULL;
2266 					else
2267 						bufferViewVec[vecIndex] = **bufferViews[0];
2268 					break;
2269 				default:
2270 					// Other descriptor types.
2271 					if (b == 1 && m_data.nullDescriptor)
2272 						bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(DE_NULL, 0, VK_WHOLE_SIZE);
2273 					else
2274 						bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(**buffer, 0, layout.refData.size());
2275 					break;
2276 				}
2277 
2278 				VkWriteDescriptorSet w =
2279 				{
2280 					VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,				// sType
2281 					DE_NULL,											// pNext
2282 					m_data.pushDescriptor ? DE_NULL : *descriptorSet,	// dstSet
2283 					(deUint32)b,										// binding
2284 					0,													// dstArrayElement
2285 					1u,													// descriptorCount
2286 					binding.descriptorType,								// descriptorType
2287 					&imageInfoVec[vecIndex],							// pImageInfo
2288 					&bufferInfoVec[vecIndex],							// pBufferInfo
2289 					&bufferViewVec[vecIndex],							// pTexelBufferView
2290 				};
2291 
2292 				VkDescriptorUpdateTemplateEntry templateEntry =
2293 				{
2294 					(deUint32)b,				// uint32_t				dstBinding;
2295 					0,							// uint32_t				dstArrayElement;
2296 					1u,							// uint32_t				descriptorCount;
2297 					binding.descriptorType,		// VkDescriptorType		descriptorType;
2298 					0,							// size_t				offset;
2299 					0,							// size_t				stride;
2300 				};
2301 
2302 				switch (binding.descriptorType)
2303 				{
2304 				default: DE_ASSERT(0); // Fallthrough
2305 				case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2306 				case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2307 					templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2308 					imgTemplateEntriesBefore.push_back(templateEntry);
2309 					break;
2310 				case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2311 				case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2312 					templateEntry.offset = vecIndex * sizeof(VkBufferView);
2313 					texelBufTemplateEntriesBefore.push_back(templateEntry);
2314 					break;
2315 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2316 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2317 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2318 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2319 					templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2320 					bufTemplateEntriesBefore.push_back(templateEntry);
2321 					break;
2322 				}
2323 
2324 				vecIndex++;
2325 
2326 				writesBeforeBindVec.push_back(w);
2327 
2328 				// Count the number of dynamic descriptors in this set.
2329 				if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2330 					binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2331 				{
2332 					numDynamic++;
2333 				}
2334 			}
2335 		}
2336 
2337 		// Make zeros have at least one element so &zeros[0] works
2338 		vector<deUint32> zeros(de::max(1,numDynamic));
2339 		deMemset(&zeros[0], 0, numDynamic * sizeof(deUint32));
2340 
2341 		// Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2342 		if (m_data.useTemplate)
2343 		{
2344 			VkDescriptorUpdateTemplateCreateInfo templateCreateInfo =
2345 			{
2346 				VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO,	// VkStructureType							sType;
2347 				NULL,														// void*									pNext;
2348 				0,															// VkDescriptorUpdateTemplateCreateFlags	flags;
2349 				0,															// uint32_t									descriptorUpdateEntryCount;
2350 				DE_NULL,													// uint32_t									descriptorUpdateEntryCount;
2351 				m_data.pushDescriptor ?
2352 					VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR :
2353 					VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,		// VkDescriptorUpdateTemplateType			templateType;
2354 				descriptorSetLayout.get(),									// VkDescriptorSetLayout					descriptorSetLayout;
2355 				bindPoint,													// VkPipelineBindPoint						pipelineBindPoint;
2356 				*pipelineLayout,											// VkPipelineLayout							pipelineLayout;
2357 				0,															// uint32_t									set;
2358 			};
2359 
2360 			void *templateVectorData[] =
2361 			{
2362 				imageInfoVec.data(),
2363 				bufferInfoVec.data(),
2364 				bufferViewVec.data(),
2365 			};
2366 
2367 			vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] =
2368 			{
2369 				&imgTemplateEntriesBefore,
2370 				&bufTemplateEntriesBefore,
2371 				&texelBufTemplateEntriesBefore,
2372 			};
2373 
2374 			if (m_data.pushDescriptor)
2375 			{
2376 				for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2377 				{
2378 					if (templateVectorsBefore[i]->size())
2379 					{
2380 						templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2381 						templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2382 						Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2383 						vk.cmdPushDescriptorSetWithTemplateKHR(*cmdBuffer, *descriptorUpdateTemplate, *pipelineLayout, 0, templateVectorData[i]);
2384 					}
2385 				}
2386 			}
2387 			else
2388 			{
2389 				for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2390 				{
2391 					if (templateVectorsBefore[i]->size())
2392 					{
2393 						templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2394 						templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2395 						Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2396 						vk.updateDescriptorSetWithTemplate(device, descriptorSet.get(), *descriptorUpdateTemplate, templateVectorData[i]);
2397 					}
2398 				}
2399 
2400 				vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2401 			}
2402 		}
2403 		else
2404 		{
2405 			if (m_data.pushDescriptor)
2406 			{
2407 				if (writesBeforeBindVec.size())
2408 				{
2409 					vk.cmdPushDescriptorSetKHR(*cmdBuffer, bindPoint, *pipelineLayout, 0, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0]);
2410 				}
2411 			}
2412 			else
2413 			{
2414 				if (writesBeforeBindVec.size())
2415 				{
2416 					vk.updateDescriptorSets(device, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0, NULL);
2417 				}
2418 
2419 				vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2420 			}
2421 		}
2422 	}
2423 
2424 	Move<VkPipeline> pipeline;
2425 	Move<VkRenderPass> renderPass;
2426 	Move<VkFramebuffer> framebuffer;
2427 
2428 	de::MovePtr<BufferWithMemory> sbtBuffer;
2429 
2430 	if (m_data.stage == STAGE_COMPUTE)
2431 	{
2432 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2433 
2434 		pipeline = makeComputePipeline(vk, device, *pipelineLayout, *shader);
2435 
2436 	}
2437 	else if (m_data.stage == STAGE_RAYGEN)
2438 	{
2439 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2440 
2441 		const VkPipelineShaderStageCreateInfo	shaderCreateInfo =
2442 		{
2443 			VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2444 			DE_NULL,
2445 			(VkPipelineShaderStageCreateFlags)0,
2446 			VK_SHADER_STAGE_RAYGEN_BIT_NV,								// stage
2447 			*shader,													// shader
2448 			"main",
2449 			DE_NULL,													// pSpecializationInfo
2450 		};
2451 
2452 		VkRayTracingShaderGroupCreateInfoNV group =
2453 		{
2454 			VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV,
2455 			DE_NULL,
2456 			VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV,			// type
2457 			0,														// generalShader
2458 			VK_SHADER_UNUSED_NV,									// closestHitShader
2459 			VK_SHADER_UNUSED_NV,									// anyHitShader
2460 			VK_SHADER_UNUSED_NV,									// intersectionShader
2461 		};
2462 
2463 		VkRayTracingPipelineCreateInfoNV pipelineCreateInfo = {
2464 			VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV,	// sType
2465 			DE_NULL,												// pNext
2466 			0,														// flags
2467 			1,														// stageCount
2468 			&shaderCreateInfo,										// pStages
2469 			1,														// groupCount
2470 			&group,													// pGroups
2471 			0,														// maxRecursionDepth
2472 			*pipelineLayout,										// layout
2473 			(vk::VkPipeline)0,										// basePipelineHandle
2474 			0u,														// basePipelineIndex
2475 		};
2476 
2477 		pipeline = createRayTracingPipelineNV(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
2478 
2479 		sbtBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2480 			vk, device, allocator, makeBufferCreateInfo(rayTracingProperties.shaderGroupHandleSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_RAY_TRACING_BIT_NV), MemoryRequirement::HostVisible));
2481 
2482 		deUint32 *ptr = (deUint32 *)sbtBuffer->getAllocation().getHostPtr();
2483 		invalidateAlloc(vk, device, sbtBuffer->getAllocation());
2484 
2485 		vk.getRayTracingShaderGroupHandlesNV(device, *pipeline, 0, 1, rayTracingProperties.shaderGroupHandleSize, ptr);
2486 	}
2487 	else
2488 	{
2489 		const VkSubpassDescription		subpassDesc				=
2490 		{
2491 			(VkSubpassDescriptionFlags)0,											// VkSubpassDescriptionFlags	flags
2492 			VK_PIPELINE_BIND_POINT_GRAPHICS,										// VkPipelineBindPoint			pipelineBindPoint
2493 			0u,																		// deUint32						inputAttachmentCount
2494 			DE_NULL,																// const VkAttachmentReference*	pInputAttachments
2495 			0u,																		// deUint32						colorAttachmentCount
2496 			DE_NULL,																// const VkAttachmentReference*	pColorAttachments
2497 			DE_NULL,																// const VkAttachmentReference*	pResolveAttachments
2498 			DE_NULL,																// const VkAttachmentReference*	pDepthStencilAttachment
2499 			0u,																		// deUint32						preserveAttachmentCount
2500 			DE_NULL																	// const deUint32*				pPreserveAttachments
2501 		};
2502 
2503 		const VkSubpassDependency		subpassDependency		=
2504 		{
2505 			VK_SUBPASS_EXTERNAL,							// deUint32				srcSubpass
2506 			0,												// deUint32				dstSubpass
2507 			VK_PIPELINE_STAGE_TRANSFER_BIT,					// VkPipelineStageFlags	srcStageMask
2508 			VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,			// VkPipelineStageFlags	dstStageMask
2509 			VK_ACCESS_TRANSFER_WRITE_BIT,					// VkAccessFlags		srcAccessMask
2510 			VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT,	//	dstAccessMask
2511 			VK_DEPENDENCY_BY_REGION_BIT						// VkDependencyFlags	dependencyFlags
2512 		};
2513 
2514 		const VkRenderPassCreateInfo	renderPassParams		=
2515 		{
2516 			VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,				// VkStructureTypei					sType
2517 			DE_NULL,												// const void*						pNext
2518 			(VkRenderPassCreateFlags)0,								// VkRenderPassCreateFlags			flags
2519 			0u,														// deUint32							attachmentCount
2520 			DE_NULL,												// const VkAttachmentDescription*	pAttachments
2521 			1u,														// deUint32							subpassCount
2522 			&subpassDesc,											// const VkSubpassDescription*		pSubpasses
2523 			1u,														// deUint32							dependencyCount
2524 			&subpassDependency										// const VkSubpassDependency*		pDependencies
2525 		};
2526 
2527 		renderPass = createRenderPass(vk, device, &renderPassParams);
2528 
2529 		const vk::VkFramebufferCreateInfo	framebufferParams	=
2530 		{
2531 			vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,			// sType
2532 			DE_NULL,												// pNext
2533 			(vk::VkFramebufferCreateFlags)0,
2534 			*renderPass,											// renderPass
2535 			0u,														// attachmentCount
2536 			DE_NULL,												// pAttachments
2537 			DIM,													// width
2538 			DIM,													// height
2539 			1u,														// layers
2540 		};
2541 
2542 		framebuffer = createFramebuffer(vk, device, &framebufferParams);
2543 
2544 		const VkVertexInputBindingDescription			vertexInputBindingDescription		=
2545 		{
2546 			0u,								// deUint32			 binding
2547 			(deUint32)formatBytes,			// deUint32			 stride
2548 			VK_VERTEX_INPUT_RATE_VERTEX,	// VkVertexInputRate	inputRate
2549 		};
2550 
2551 		const VkVertexInputAttributeDescription			vertexInputAttributeDescription		=
2552 		{
2553 			0u,								// deUint32	location
2554 			0u,								// deUint32	binding
2555 			m_data.format,					// VkFormat	format
2556 			0u								// deUint32	offset
2557 		};
2558 
2559 		deUint32 numAttribs = m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH ? 1u : 0u;
2560 
2561 		const VkPipelineVertexInputStateCreateInfo		vertexInputStateCreateInfo		=
2562 		{
2563 			VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,	// VkStructureType							sType;
2564 			DE_NULL,													// const void*								pNext;
2565 			(VkPipelineVertexInputStateCreateFlags)0,					// VkPipelineVertexInputStateCreateFlags	flags;
2566 			numAttribs,													// deUint32									vertexBindingDescriptionCount;
2567 			&vertexInputBindingDescription,								// const VkVertexInputBindingDescription*	pVertexBindingDescriptions;
2568 			numAttribs,													// deUint32									vertexAttributeDescriptionCount;
2569 			&vertexInputAttributeDescription							// const VkVertexInputAttributeDescription*	pVertexAttributeDescriptions;
2570 		};
2571 
2572 		const VkPipelineInputAssemblyStateCreateInfo	inputAssemblyStateCreateInfo	=
2573 		{
2574 			VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,	// VkStructureType							sType;
2575 			DE_NULL,														// const void*								pNext;
2576 			(VkPipelineInputAssemblyStateCreateFlags)0,						// VkPipelineInputAssemblyStateCreateFlags	flags;
2577 			(m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology						topology;
2578 			VK_FALSE														// VkBool32									primitiveRestartEnable;
2579 		};
2580 
2581 		const VkPipelineRasterizationStateCreateInfo	rasterizationStateCreateInfo	=
2582 		{
2583 			VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,		// VkStructureType							sType;
2584 			DE_NULL,														// const void*								pNext;
2585 			(VkPipelineRasterizationStateCreateFlags)0,						// VkPipelineRasterizationStateCreateFlags	flags;
2586 			VK_FALSE,														// VkBool32									depthClampEnable;
2587 			(m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE,			// VkBool32									rasterizerDiscardEnable;
2588 			VK_POLYGON_MODE_FILL,											// VkPolygonMode							polygonMode;
2589 			VK_CULL_MODE_NONE,												// VkCullModeFlags							cullMode;
2590 			VK_FRONT_FACE_CLOCKWISE,										// VkFrontFace								frontFace;
2591 			VK_FALSE,														// VkBool32									depthBiasEnable;
2592 			0.0f,															// float									depthBiasConstantFactor;
2593 			0.0f,															// float									depthBiasClamp;
2594 			0.0f,															// float									depthBiasSlopeFactor;
2595 			1.0f															// float									lineWidth;
2596 		};
2597 
2598 		const VkPipelineMultisampleStateCreateInfo		multisampleStateCreateInfo =
2599 		{
2600 			VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,	// VkStructureType							sType
2601 			DE_NULL,													// const void*								pNext
2602 			0u,															// VkPipelineMultisampleStateCreateFlags	flags
2603 			VK_SAMPLE_COUNT_1_BIT,										// VkSampleCountFlagBits					rasterizationSamples
2604 			VK_FALSE,													// VkBool32									sampleShadingEnable
2605 			1.0f,														// float									minSampleShading
2606 			DE_NULL,													// const VkSampleMask*						pSampleMask
2607 			VK_FALSE,													// VkBool32									alphaToCoverageEnable
2608 			VK_FALSE													// VkBool32									alphaToOneEnable
2609 		};
2610 
2611 		VkViewport viewport = makeViewport(DIM, DIM);
2612 		VkRect2D scissor = makeRect2D(DIM, DIM);
2613 
2614 		const VkPipelineViewportStateCreateInfo			viewportStateCreateInfo				=
2615 		{
2616 			VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,	// VkStructureType							sType
2617 			DE_NULL,												// const void*								pNext
2618 			(VkPipelineViewportStateCreateFlags)0,					// VkPipelineViewportStateCreateFlags		flags
2619 			1u,														// deUint32									viewportCount
2620 			&viewport,												// const VkViewport*						pViewports
2621 			1u,														// deUint32									scissorCount
2622 			&scissor												// const VkRect2D*							pScissors
2623 		};
2624 
2625 		Move<VkShaderModule> fs;
2626 		Move<VkShaderModule> vs;
2627 
2628 		deUint32 numStages;
2629 		if (m_data.stage == STAGE_VERTEX)
2630 		{
2631 			vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2632 			fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0); // bogus
2633 			numStages = 1u;
2634 		}
2635 		else
2636 		{
2637 			vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0);
2638 			fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2639 			numStages = 2u;
2640 		}
2641 
2642 		const VkPipelineShaderStageCreateInfo	shaderCreateInfo[2] =
2643 		{
2644 			{
2645 				VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2646 				DE_NULL,
2647 				(VkPipelineShaderStageCreateFlags)0,
2648 				VK_SHADER_STAGE_VERTEX_BIT,									// stage
2649 				*vs,														// shader
2650 				"main",
2651 				DE_NULL,													// pSpecializationInfo
2652 			},
2653 			{
2654 				VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2655 				DE_NULL,
2656 				(VkPipelineShaderStageCreateFlags)0,
2657 				VK_SHADER_STAGE_FRAGMENT_BIT,								// stage
2658 				*fs,														// shader
2659 				"main",
2660 				DE_NULL,													// pSpecializationInfo
2661 			}
2662 		};
2663 
2664 		const VkGraphicsPipelineCreateInfo				graphicsPipelineCreateInfo		=
2665 		{
2666 			VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,	// VkStructureType									sType;
2667 			DE_NULL,											// const void*										pNext;
2668 			(VkPipelineCreateFlags)0,							// VkPipelineCreateFlags							flags;
2669 			numStages,											// deUint32											stageCount;
2670 			&shaderCreateInfo[0],								// const VkPipelineShaderStageCreateInfo*			pStages;
2671 			&vertexInputStateCreateInfo,						// const VkPipelineVertexInputStateCreateInfo*		pVertexInputState;
2672 			&inputAssemblyStateCreateInfo,						// const VkPipelineInputAssemblyStateCreateInfo*	pInputAssemblyState;
2673 			DE_NULL,											// const VkPipelineTessellationStateCreateInfo*		pTessellationState;
2674 			&viewportStateCreateInfo,							// const VkPipelineViewportStateCreateInfo*			pViewportState;
2675 			&rasterizationStateCreateInfo,						// const VkPipelineRasterizationStateCreateInfo*	pRasterizationState;
2676 			&multisampleStateCreateInfo,						// const VkPipelineMultisampleStateCreateInfo*		pMultisampleState;
2677 			DE_NULL,											// const VkPipelineDepthStencilStateCreateInfo*		pDepthStencilState;
2678 			DE_NULL,											// const VkPipelineColorBlendStateCreateInfo*		pColorBlendState;
2679 			DE_NULL,											// const VkPipelineDynamicStateCreateInfo*			pDynamicState;
2680 			pipelineLayout.get(),								// VkPipelineLayout									layout;
2681 			renderPass.get(),									// VkRenderPass										renderPass;
2682 			0u,													// deUint32											subpass;
2683 			DE_NULL,											// VkPipeline										basePipelineHandle;
2684 			0													// int												basePipelineIndex;
2685 		};
2686 
2687 		pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
2688 	}
2689 
2690 	const VkImageMemoryBarrier imageBarrier =
2691 	{
2692 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
2693 		DE_NULL,											// const void*			pNext
2694 		0u,													// VkAccessFlags		srcAccessMask
2695 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
2696 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
2697 		VK_IMAGE_LAYOUT_GENERAL,							// VkImageLayout		newLayout
2698 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
2699 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
2700 		**images[0],										// VkImage				image
2701 		{
2702 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask
2703 			0u,										// uint32_t				baseMipLevel
2704 			1u,										// uint32_t				mipLevels,
2705 			0u,										// uint32_t				baseArray
2706 			1u,										// uint32_t				arraySize
2707 		}
2708 	};
2709 
2710 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2711 							(VkDependencyFlags)0,
2712 							0, (const VkMemoryBarrier*)DE_NULL,
2713 							0, (const VkBufferMemoryBarrier*)DE_NULL,
2714 							1, &imageBarrier);
2715 
2716 	vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
2717 
2718 	if (!formatIsR64(m_data.format))
2719 	{
2720 		VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2721 		VkClearValue clearColor = makeClearValueColorU32(0,0,0,0);
2722 
2723 		vk.cmdClearColorImage(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
2724 	}
2725 	else
2726 	{
2727 		const vector<VkBufferImageCopy>	bufferImageCopy(1, makeBufferImageCopy(outputImageCreateInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1)));
2728 		copyBufferToImage(vk,
2729 			*cmdBuffer,
2730 			*(*bufferOutputImageR64),
2731 			sizeOutputR64,
2732 			bufferImageCopy,
2733 			VK_IMAGE_ASPECT_COLOR_BIT,
2734 			1,
2735 			1, **images[0], VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2736 	}
2737 
2738 	VkMemoryBarrier					memBarrier =
2739 	{
2740 		VK_STRUCTURE_TYPE_MEMORY_BARRIER,	// sType
2741 		DE_NULL,							// pNext
2742 		0u,									// srcAccessMask
2743 		0u,									// dstAccessMask
2744 	};
2745 
2746 	memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
2747 	memBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2748 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages,
2749 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
2750 
2751 	if (m_data.stage == STAGE_COMPUTE)
2752 	{
2753 		vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
2754 	}
2755 	else if (m_data.stage == STAGE_RAYGEN)
2756 	{
2757 		vk.cmdTraceRaysNV(*cmdBuffer,
2758 			**sbtBuffer, 0,
2759 			DE_NULL, 0, 0,
2760 			DE_NULL, 0, 0,
2761 			DE_NULL, 0, 0,
2762 			DIM, DIM, 1);
2763 	}
2764 	else
2765 	{
2766 		beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer,
2767 						makeRect2D(DIM, DIM),
2768 						0, DE_NULL, VK_SUBPASS_CONTENTS_INLINE);
2769 		// Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
2770 		if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2771 		{
2772 			VkDeviceSize zeroOffset = 0;
2773 			VkBuffer b = m_data.nullDescriptor ? DE_NULL : **buffer;
2774 			vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &b, &zeroOffset);
2775 			vk.cmdDraw(*cmdBuffer, 1000u, 1u, 0u, 0u);
2776 		}
2777 		if (m_data.stage == STAGE_VERTEX)
2778 		{
2779 			vk.cmdDraw(*cmdBuffer, DIM*DIM, 1u, 0u, 0u);
2780 		}
2781 		else
2782 		{
2783 			vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
2784 		}
2785 		endRenderPass(vk, *cmdBuffer);
2786 	}
2787 
2788 	memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2789 	memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
2790 	vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT,
2791 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
2792 
2793 	const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(DIM, DIM, 1u),
2794 															 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
2795 	vk.cmdCopyImageToBuffer(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, &copyRegion);
2796 
2797 	endCommandBuffer(vk, *cmdBuffer);
2798 
2799 	submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
2800 
2801 	void *ptr = copyBuffer->getAllocation().getHostPtr();
2802 
2803 	invalidateAlloc(vk, device, copyBuffer->getAllocation());
2804 
2805 	qpTestResult res = QP_TEST_RESULT_PASS;
2806 
2807 	for (deUint32 i = 0; i < DIM*DIM; ++i)
2808 	{
2809 		if (formatIsFloat(m_data.format))
2810 		{
2811 			if (((float *)ptr)[i * numComponents] != 1.0f)
2812 			{
2813 				res = QP_TEST_RESULT_FAIL;
2814 			}
2815 		}
2816 		else if (formatIsR64(m_data.format))
2817 		{
2818 			if (((deUint64 *)ptr)[i * numComponents] != 1)
2819 			{
2820 				res = QP_TEST_RESULT_FAIL;
2821 			}
2822 		}
2823 		else
2824 		{
2825 			if (((deUint32 *)ptr)[i * numComponents] != 1)
2826 			{
2827 				res = QP_TEST_RESULT_FAIL;
2828 			}
2829 		}
2830 	}
2831 
2832 	return tcu::TestStatus(res, qpGetTestResultName(res));
2833 }
2834 
2835 }	// anonymous
2836 
createTests(tcu::TestCaseGroup * group,bool robustness2)2837 static void createTests (tcu::TestCaseGroup* group, bool robustness2)
2838 {
2839 	tcu::TestContext& testCtx = group->getTestContext();
2840 
2841 	typedef struct
2842 	{
2843 		deUint32				count;
2844 		const char*				name;
2845 		const char*				description;
2846 	} TestGroupCase;
2847 
2848 	TestGroupCase fmtCases[] =
2849 	{
2850 		{ VK_FORMAT_R32_SINT,				"r32i",		""		},
2851 		{ VK_FORMAT_R32_UINT,				"r32ui",	""		},
2852 		{ VK_FORMAT_R32_SFLOAT,				"r32f",		""		},
2853 		{ VK_FORMAT_R32G32_SINT,			"rg32i",	""		},
2854 		{ VK_FORMAT_R32G32_UINT,			"rg32ui",	""		},
2855 		{ VK_FORMAT_R32G32_SFLOAT,			"rg32f",	""		},
2856 		{ VK_FORMAT_R32G32B32A32_SINT,		"rgba32i",	""		},
2857 		{ VK_FORMAT_R32G32B32A32_UINT,		"rgba32ui",	""		},
2858 		{ VK_FORMAT_R32G32B32A32_SFLOAT,	"rgba32f",	""		},
2859 		{ VK_FORMAT_R64_SINT,				"r64i",		""		},
2860 		{ VK_FORMAT_R64_UINT,				"r64ui",	""		},
2861 	};
2862 
2863 	TestGroupCase fullDescCases[] =
2864 	{
2865 		{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,				"uniform_buffer",			""		},
2866 		{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,				"storage_buffer",			""		},
2867 		{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,		"uniform_buffer_dynamic",	""		},
2868 		{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,		"storage_buffer_dynamic",	""		},
2869 		{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,			"uniform_texel_buffer",		""		},
2870 		{ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,			"storage_texel_buffer",		""		},
2871 		{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,					"storage_image",			""		},
2872 		{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,		"sampled_image",			""		},
2873 		{ VERTEX_ATTRIBUTE_FETCH,							"vertex_attribute_fetch",	""		},
2874 	};
2875 
2876 	TestGroupCase imgDescCases[] =
2877 	{
2878 		{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,					"storage_image",			""		},
2879 		{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,		"sampled_image",			""		},
2880 	};
2881 
2882 	TestGroupCase fullLenCases32Bit[] =
2883 	{
2884 		{ ~0U,			"null_descriptor",	""		},
2885 		{ 0,			"img",				""		},
2886 		{ 4,			"len_4",			""		},
2887 		{ 8,			"len_8",			""		},
2888 		{ 12,			"len_12",			""		},
2889 		{ 16,			"len_16",			""		},
2890 		{ 20,			"len_20",			""		},
2891 		{ 31,			"len_31",			""		},
2892 		{ 32,			"len_32",			""		},
2893 		{ 33,			"len_33",			""		},
2894 		{ 35,			"len_35",			""		},
2895 		{ 36,			"len_36",			""		},
2896 		{ 39,			"len_39",			""		},
2897 		{ 40,			"len_41",			""		},
2898 		{ 252,			"len_252",			""		},
2899 		{ 256,			"len_256",			""		},
2900 		{ 260,			"len_260",			""		},
2901 	};
2902 
2903 	TestGroupCase fullLenCases64Bit[] =
2904 	{
2905 		{ ~0U,			"null_descriptor",	""		},
2906 		{ 0,			"img",				""		},
2907 		{ 8,			"len_8",			""		},
2908 		{ 16,			"len_16",			""		},
2909 		{ 24,			"len_24",			""		},
2910 		{ 32,			"len_32",			""		},
2911 		{ 40,			"len_40",			""		},
2912 		{ 62,			"len_62",			""		},
2913 		{ 64,			"len_64",			""		},
2914 		{ 66,			"len_66",			""		},
2915 		{ 70,			"len_70",			""		},
2916 		{ 72,			"len_72",			""		},
2917 		{ 78,			"len_78",			""		},
2918 		{ 80,			"len_80",			""		},
2919 		{ 504,			"len_504",			""		},
2920 		{ 512,			"len_512",			""		},
2921 		{ 520,			"len_520",			""		},
2922 	};
2923 
2924 	TestGroupCase imgLenCases[] =
2925 	{
2926 		{ 0,	"img",	""		},
2927 	};
2928 
2929 	TestGroupCase viewCases[] =
2930 	{
2931 		{ VK_IMAGE_VIEW_TYPE_1D,			"1d",			""		},
2932 		{ VK_IMAGE_VIEW_TYPE_2D,			"2d",			""		},
2933 		{ VK_IMAGE_VIEW_TYPE_3D,			"3d",			""		},
2934 		{ VK_IMAGE_VIEW_TYPE_CUBE,			"cube",			""		},
2935 		{ VK_IMAGE_VIEW_TYPE_1D_ARRAY,		"1d_array",		""		},
2936 		{ VK_IMAGE_VIEW_TYPE_2D_ARRAY,		"2d_array",		""		},
2937 		{ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,	"cube_array",	""		},
2938 	};
2939 
2940 	TestGroupCase sampCases[] =
2941 	{
2942 		{ VK_SAMPLE_COUNT_1_BIT,			"samples_1",	""		},
2943 		{ VK_SAMPLE_COUNT_4_BIT,			"samples_4",	""		},
2944 	};
2945 
2946 	TestGroupCase stageCases[] =
2947 	{
2948 		{ STAGE_COMPUTE,	"comp",		"compute"	},
2949 		{ STAGE_FRAGMENT,	"frag",		"fragment"	},
2950 		{ STAGE_VERTEX,		"vert",		"vertex"	},
2951 		{ STAGE_RAYGEN,		"rgen",		"raygen"	},
2952 	};
2953 
2954 	TestGroupCase volCases[] =
2955 	{
2956 		{ 0,			"nonvolatile",	""		},
2957 		{ 1,			"volatile",		""		},
2958 	};
2959 
2960 	TestGroupCase unrollCases[] =
2961 	{
2962 		{ 0,			"dontunroll",	""		},
2963 		{ 1,			"unroll",		""		},
2964 	};
2965 
2966 	TestGroupCase tempCases[] =
2967 	{
2968 		{ 0,			"notemplate",	""		},
2969 		{ 1,			"template",		""		},
2970 	};
2971 
2972 	TestGroupCase pushCases[] =
2973 	{
2974 		{ 0,			"bind",			""		},
2975 		{ 1,			"push",			""		},
2976 	};
2977 
2978 	TestGroupCase fmtQualCases[] =
2979 	{
2980 		{ 0,			"no_fmt_qual",	""		},
2981 		{ 1,			"fmt_qual",		""		},
2982 	};
2983 
2984 	TestGroupCase readOnlyCases[] =
2985 	{
2986 		{ 0,			"readwrite",	""		},
2987 		{ 1,			"readonly",		""		},
2988 	};
2989 
2990 	for (int pushNdx = 0; pushNdx < DE_LENGTH_OF_ARRAY(pushCases); pushNdx++)
2991 	{
2992 		de::MovePtr<tcu::TestCaseGroup> pushGroup(new tcu::TestCaseGroup(testCtx, pushCases[pushNdx].name, pushCases[pushNdx].name));
2993 		for (int tempNdx = 0; tempNdx < DE_LENGTH_OF_ARRAY(tempCases); tempNdx++)
2994 		{
2995 			de::MovePtr<tcu::TestCaseGroup> tempGroup(new tcu::TestCaseGroup(testCtx, tempCases[tempNdx].name, tempCases[tempNdx].name));
2996 			for (int fmtNdx = 0; fmtNdx < DE_LENGTH_OF_ARRAY(fmtCases); fmtNdx++)
2997 			{
2998 				de::MovePtr<tcu::TestCaseGroup> fmtGroup(new tcu::TestCaseGroup(testCtx, fmtCases[fmtNdx].name, fmtCases[fmtNdx].name));
2999 
3000 				int fmtSize = tcu::getPixelSize(mapVkFormat((VkFormat)fmtCases[fmtNdx].count));
3001 
3002 				for (int unrollNdx = 0; unrollNdx < DE_LENGTH_OF_ARRAY(unrollCases); unrollNdx++)
3003 				{
3004 					de::MovePtr<tcu::TestCaseGroup> unrollGroup(new tcu::TestCaseGroup(testCtx, unrollCases[unrollNdx].name, unrollCases[unrollNdx].name));
3005 					for (int volNdx = 0; volNdx < DE_LENGTH_OF_ARRAY(volCases); volNdx++)
3006 					{
3007 						de::MovePtr<tcu::TestCaseGroup> volGroup(new tcu::TestCaseGroup(testCtx, volCases[volNdx].name, volCases[volNdx].name));
3008 
3009 						int numDescCases = robustness2 ? DE_LENGTH_OF_ARRAY(fullDescCases) : DE_LENGTH_OF_ARRAY(imgDescCases);
3010 						TestGroupCase *descCases = robustness2 ? fullDescCases : imgDescCases;
3011 
3012 						for (int descNdx = 0; descNdx < numDescCases; descNdx++)
3013 						{
3014 							de::MovePtr<tcu::TestCaseGroup> descGroup(new tcu::TestCaseGroup(testCtx, descCases[descNdx].name, descCases[descNdx].name));
3015 
3016 							for (int roNdx = 0; roNdx < DE_LENGTH_OF_ARRAY(readOnlyCases); roNdx++)
3017 							{
3018 								de::MovePtr<tcu::TestCaseGroup> rwGroup(new tcu::TestCaseGroup(testCtx, readOnlyCases[roNdx].name, readOnlyCases[roNdx].name));
3019 
3020 								// readonly cases are just for storage_buffer
3021 								if (readOnlyCases[roNdx].count != 0 &&
3022 									descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER &&
3023 									descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
3024 									continue;
3025 
3026 								for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
3027 								{
3028 									de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name, fmtQualCases[fmtQualNdx].name));
3029 
3030 									// format qualifier is only used for storage image and storage texel buffers
3031 									if (fmtQualCases[fmtQualNdx].count &&
3032 										!(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
3033 										continue;
3034 
3035 									if (pushCases[pushNdx].count &&
3036 										(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3037 										continue;
3038 
3039 									const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
3040 									int numLenCases = robustness2 ? DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) : DE_LENGTH_OF_ARRAY(imgLenCases);
3041 									TestGroupCase *lenCases = robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
3042 
3043 									for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
3044 									{
3045 										if (lenCases[lenNdx].count != ~0U)
3046 										{
3047 											bool bufferLen = lenCases[lenNdx].count != 0;
3048 											bool bufferDesc = descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
3049 											if (bufferLen != bufferDesc)
3050 												continue;
3051 
3052 											// Add template tests cases only for null_descriptor cases
3053 											if (tempCases[tempNdx].count)
3054 												continue;
3055 										}
3056 
3057 										if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
3058 											((lenCases[lenNdx].count % fmtSize) != 0) &&
3059 											lenCases[lenNdx].count != ~0U)
3060 										{
3061 											continue;
3062 										}
3063 
3064 										// "volatile" only applies to storage images/buffers
3065 										if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
3066 											continue;
3067 
3068 										de::MovePtr<tcu::TestCaseGroup> lenGroup(new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name, lenCases[lenNdx].name));
3069 										for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
3070 										{
3071 											de::MovePtr<tcu::TestCaseGroup> sampGroup(new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name, sampCases[sampNdx].name));
3072 											for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
3073 											{
3074 												if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
3075 													descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3076 													descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
3077 												{
3078 													// buffer descriptors don't have different dimensionalities. Only test "1D"
3079 													continue;
3080 												}
3081 
3082 												if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D && viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
3083 													sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3084 												{
3085 													continue;
3086 												}
3087 
3088 												de::MovePtr<tcu::TestCaseGroup> viewGroup(new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name, viewCases[viewNdx].name));
3089 												for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
3090 												{
3091 													Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
3092 													VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
3093 													VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
3094 													if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
3095 													{
3096 														allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
3097 														allPipelineStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV;
3098 													}
3099 
3100 													if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
3101 														currentStage != STAGE_VERTEX)
3102 														continue;
3103 
3104 													deUint32 imageDim[3] = {5, 11, 6};
3105 													if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
3106 														viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
3107 														imageDim[1] = imageDim[0];
3108 
3109 													CaseDef c =
3110 													{
3111 														(VkFormat)fmtCases[fmtNdx].count,								// VkFormat format;
3112 														currentStage,													// Stage stage;
3113 														allShaderStages,												// VkFlags allShaderStages;
3114 														allPipelineStages,												// VkFlags allPipelineStages;
3115 														(int)descCases[descNdx].count,									// VkDescriptorType descriptorType;
3116 														(VkImageViewType)viewCases[viewNdx].count,						// VkImageViewType viewType;
3117 														(VkSampleCountFlagBits)sampCases[sampNdx].count,				// VkSampleCountFlagBits samples;
3118 														(int)lenCases[lenNdx].count,									// int bufferLen;
3119 														(bool)unrollCases[unrollNdx].count,								// bool unroll;
3120 														(bool)volCases[volNdx].count,									// bool vol;
3121 														(bool)(lenCases[lenNdx].count == ~0U),							// bool nullDescriptor
3122 														(bool)tempCases[tempNdx].count,									// bool useTemplate
3123 														(bool)fmtQualCases[fmtQualNdx].count,							// bool formatQualifier
3124 														(bool)pushCases[pushNdx].count,									// bool pushDescriptor;
3125 														(bool)robustness2,												// bool testRobustness2;
3126 														{ imageDim[0], imageDim[1], imageDim[2] },						// deUint32 imageDim[3];
3127 														(bool)(readOnlyCases[roNdx].count == 1),						// bool readOnly;
3128 													};
3129 
3130 													viewGroup->addChild(new RobustnessExtsTestCase(testCtx, stageCases[stageNdx].name, stageCases[stageNdx].name, c));
3131 												}
3132 												sampGroup->addChild(viewGroup.release());
3133 											}
3134 											lenGroup->addChild(sampGroup.release());
3135 										}
3136 										fmtQualGroup->addChild(lenGroup.release());
3137 									}
3138 									// Put storage_buffer tests in separate readonly vs readwrite groups. Other types
3139 									// go directly into descGroup
3140 									if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3141 										descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3142 										rwGroup->addChild(fmtQualGroup.release());
3143 									} else {
3144 										descGroup->addChild(fmtQualGroup.release());
3145 									}
3146 								}
3147 								if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3148 									descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3149 									descGroup->addChild(rwGroup.release());
3150 								}
3151 							}
3152 							volGroup->addChild(descGroup.release());
3153 						}
3154 						unrollGroup->addChild(volGroup.release());
3155 					}
3156 					fmtGroup->addChild(unrollGroup.release());
3157 				}
3158 				tempGroup->addChild(fmtGroup.release());
3159 			}
3160 			pushGroup->addChild(tempGroup.release());
3161 		}
3162 		group->addChild(pushGroup.release());
3163 	}
3164 }
3165 
createRobustness2Tests(tcu::TestCaseGroup * group)3166 static void createRobustness2Tests (tcu::TestCaseGroup* group)
3167 {
3168 	createTests(group, /*robustness2=*/true);
3169 }
3170 
createImageRobustnessTests(tcu::TestCaseGroup * group)3171 static void createImageRobustnessTests (tcu::TestCaseGroup* group)
3172 {
3173 	createTests(group, /*robustness2=*/false);
3174 }
3175 
cleanupGroup(tcu::TestCaseGroup * group)3176 static void cleanupGroup (tcu::TestCaseGroup* group)
3177 {
3178 	DE_UNREF(group);
3179 	// Destroy singleton objects.
3180 	Robustness2Int64AtomicsSingleton::destroy();
3181 	ImageRobustnessInt64AtomicsSingleton::destroy();
3182 	ImageRobustnessSingleton::destroy();
3183 	Robustness2Singleton::destroy();
3184 }
3185 
createRobustness2Tests(tcu::TestContext & testCtx)3186 tcu::TestCaseGroup* createRobustness2Tests (tcu::TestContext& testCtx)
3187 {
3188 	return createTestGroup(testCtx, "robustness2", "VK_EXT_robustness2 tests",
3189 							createRobustness2Tests, cleanupGroup);
3190 }
3191 
createImageRobustnessTests(tcu::TestContext & testCtx)3192 tcu::TestCaseGroup* createImageRobustnessTests (tcu::TestContext& testCtx)
3193 {
3194 	return createTestGroup(testCtx, "image_robustness", "VK_EXT_image_robustness tests",
3195 							createImageRobustnessTests, cleanupGroup);
3196 }
3197 
3198 }	// robustness
3199 }	// vkt
3200