• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2017-2019 The Khronos Group Inc.
6  * Copyright (c) 2018-2020 NVIDIA Corporation
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *	  http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Vulkan robustness2 tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktRobustnessExtsTests.hpp"
26 
27 #include "vkBufferWithMemory.hpp"
28 #include "vkImageWithMemory.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkDeviceUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkObjUtil.hpp"
36 #include "vkBarrierUtil.hpp"
37 #include "vktRobustnessUtil.hpp"
38 
39 #include "vktTestGroupUtil.hpp"
40 #include "vktTestCase.hpp"
41 
42 #include "deDefs.h"
43 #include "deMath.h"
44 #include "deRandom.h"
45 #include "deSharedPtr.hpp"
46 #include "deString.h"
47 
48 #include "tcuVectorType.hpp"
49 #include "tcuTestCase.hpp"
50 #include "tcuTestLog.hpp"
51 
52 #include <string>
53 #include <sstream>
54 #include <algorithm>
55 #include <limits>
56 
57 namespace vkt
58 {
59 namespace robustness
60 {
61 namespace
62 {
63 using namespace vk;
64 using namespace std;
65 using de::SharedPtr;
66 
67 enum RobustnessFeatureBits
68 {
69 	RF_IMG_ROBUSTNESS		= (1		),
70 	RF_ROBUSTNESS2			= (1 << 1	),
71 	SIF_INT64ATOMICS		= (1 << 2	),
72 	RF_PIPELINE_ROBUSTNESS	= (1 << 3	),
73 	SBL_SCALAR_BLOCK_LAYOUT	= (1 << 4	),
74 };
75 
76 using RobustnessFeatures = deUint32;
77 
78 // Class to wrap a singleton device with the indicated robustness features.
79 template <RobustnessFeatures FEATURES>
80 class SingletonDevice
81 {
SingletonDevice(Context & context)82 	SingletonDevice	(Context& context)
83 		: m_context(context)
84 		, m_logicalDevice()
85 	{
86 		// Note we are already checking the needed features are available in checkSupport().
87 		VkPhysicalDeviceRobustness2FeaturesEXT				robustness2Features				= initVulkanStructure();
88 		VkPhysicalDeviceImageRobustnessFeaturesEXT			imageRobustnessFeatures			= initVulkanStructure();
89 		VkPhysicalDeviceScalarBlockLayoutFeatures			scalarBlockLayoutFeatures		= initVulkanStructure();
90 		VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT	shaderImageAtomicInt64Features	= initVulkanStructure();
91 		VkPhysicalDeviceFeatures2							features2						= initVulkanStructure();
92 
93 		if (FEATURES & SBL_SCALAR_BLOCK_LAYOUT)
94 		{
95 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"));
96 			scalarBlockLayoutFeatures.pNext = features2.pNext;
97 			features2.pNext = &scalarBlockLayoutFeatures;
98 		}
99 
100 		if (FEATURES & RF_IMG_ROBUSTNESS)
101 		{
102 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"));
103 
104 			if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
105 			{
106 				imageRobustnessFeatures.pNext = features2.pNext;
107 				features2.pNext = &imageRobustnessFeatures;
108 			}
109 		}
110 
111 		if (FEATURES & RF_ROBUSTNESS2)
112 		{
113 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_robustness2"));
114 
115 			if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
116 			{
117 				robustness2Features.pNext = features2.pNext;
118 				features2.pNext = &robustness2Features;
119 			}
120 		}
121 
122 #ifndef CTS_USES_VULKANSC
123 		VkPhysicalDevicePipelineRobustnessFeaturesEXT		pipelineRobustnessFeatures = initVulkanStructure();
124 		if (FEATURES & RF_PIPELINE_ROBUSTNESS)
125 		{
126 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"));
127 			pipelineRobustnessFeatures.pNext = features2.pNext;
128 			features2.pNext = &pipelineRobustnessFeatures;
129 		}
130 #endif
131 
132 		if (FEATURES & SIF_INT64ATOMICS)
133 		{
134 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_shader_image_atomic_int64"));
135 			shaderImageAtomicInt64Features.pNext = features2.pNext;
136 			features2.pNext = &shaderImageAtomicInt64Features;
137 		}
138 
139 		const auto&	vki				= m_context.getInstanceInterface();
140 		const auto	instance		= m_context.getInstance();
141 		const auto	physicalDevice	= chooseDevice(vki, instance, context.getTestContext().getCommandLine());
142 
143 		vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
144 		m_logicalDevice = createRobustBufferAccessDevice(context, &features2);
145 
146 #ifndef CTS_USES_VULKANSC
147 		m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), instance, *m_logicalDevice));
148 #else
149 		m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), instance, *m_logicalDevice, context.getTestContext().getCommandLine(), context.getResourceInterface(), m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_logicalDevice));
150 #endif // CTS_USES_VULKANSC
151 	}
152 
153 public:
~SingletonDevice()154 	~SingletonDevice()
155 	{
156 	}
157 
getDevice(Context & context)158 	static VkDevice getDevice(Context& context)
159 	{
160 		if (!m_singletonDevice)
161 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
162 		DE_ASSERT(m_singletonDevice);
163 		return m_singletonDevice->m_logicalDevice.get();
164 	}
getDeviceInterface(Context & context)165 	static const DeviceInterface& getDeviceInterface(Context& context)
166 	{
167 		if (!m_singletonDevice)
168 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
169 		DE_ASSERT(m_singletonDevice);
170 		return *(m_singletonDevice->m_deviceDriver.get());
171 	}
172 
destroy()173 	static void destroy()
174 	{
175 		m_singletonDevice.clear();
176 	}
177 
178 private:
179 	const Context&								m_context;
180 	Move<vk::VkDevice>							m_logicalDevice;
181 #ifndef CTS_USES_VULKANSC
182 	de::MovePtr<vk::DeviceDriver>				m_deviceDriver;
183 #else
184 	de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter>	m_deviceDriver;
185 #endif // CTS_USES_VULKANSC
186 
187 	static SharedPtr<SingletonDevice<FEATURES>>	m_singletonDevice;
188 };
189 
190 template <RobustnessFeatures FEATURES>
191 SharedPtr<SingletonDevice<FEATURES>> SingletonDevice<FEATURES>::m_singletonDevice;
192 
193 constexpr RobustnessFeatures kImageRobustness			= RF_IMG_ROBUSTNESS;
194 constexpr RobustnessFeatures kRobustness2				= RF_ROBUSTNESS2;
195 constexpr RobustnessFeatures kPipelineRobustness		= RF_PIPELINE_ROBUSTNESS;
196 constexpr RobustnessFeatures kShaderImageInt64Atomics	= SIF_INT64ATOMICS;
197 constexpr RobustnessFeatures kScalarBlockLayout			= SBL_SCALAR_BLOCK_LAYOUT;
198 
199 using ImageRobustnessSingleton	= SingletonDevice<kImageRobustness>;
200 using Robustness2Singleton		= SingletonDevice<kRobustness2>;
201 
202 using ImageRobustnessScalarSingleton	= SingletonDevice<kImageRobustness | kScalarBlockLayout>;
203 using Robustness2ScalarSingleton		= SingletonDevice<kRobustness2 | kScalarBlockLayout>;
204 
205 using PipelineRobustnessImageRobustnessSingleton	= SingletonDevice<kImageRobustness | kPipelineRobustness>;
206 using PipelineRobustnessRobustness2Singleton		= SingletonDevice<kRobustness2 | kPipelineRobustness>;
207 
208 using PipelineRobustnessImageRobustnessScalarSingleton	= SingletonDevice<kImageRobustness | kPipelineRobustness | kScalarBlockLayout>;
209 using PipelineRobustnessRobustness2ScalarSingleton		= SingletonDevice<kRobustness2 | kPipelineRobustness | kScalarBlockLayout>;
210 
211 using ImageRobustnessInt64AtomicsSingleton	= SingletonDevice<kImageRobustness | kShaderImageInt64Atomics>;
212 using Robustness2Int64AtomicsSingleton		= SingletonDevice<kRobustness2 | kShaderImageInt64Atomics>;
213 
214 using ImageRobustnessInt64AtomicsScalarSingleton	= SingletonDevice<kImageRobustness | kShaderImageInt64Atomics | kScalarBlockLayout>;
215 using Robustness2Int64AtomicsScalarSingleton		= SingletonDevice<kRobustness2 | kShaderImageInt64Atomics | kScalarBlockLayout>;
216 
217 using PipelineRobustnessImageRobustnessInt64AtomicsSingleton	= SingletonDevice<kImageRobustness | kPipelineRobustness | kShaderImageInt64Atomics>;
218 using PipelineRobustnessRobustness2Int64AtomicsSingleton		= SingletonDevice<kRobustness2 | kPipelineRobustness | kShaderImageInt64Atomics>;
219 
220 using PipelineRobustnessImageRobustnessInt64AtomicsScalarSingleton	= SingletonDevice<kImageRobustness | kPipelineRobustness | kShaderImageInt64Atomics | kScalarBlockLayout>;
221 using PipelineRobustnessRobustness2Int64AtomicsScalarSingleton		= SingletonDevice<kRobustness2 | kPipelineRobustness | kShaderImageInt64Atomics | kScalarBlockLayout>;
222 
223 // Render target / compute grid dimensions
224 static const deUint32 DIM = 8;
225 
226 // treated as a phony VkDescriptorType value
227 #define VERTEX_ATTRIBUTE_FETCH 999
228 
229 typedef enum
230 {
231 	STAGE_COMPUTE = 0,
232 	STAGE_VERTEX,
233 	STAGE_FRAGMENT,
234 	STAGE_RAYGEN
235 } Stage;
236 
237 struct CaseDef
238 {
239 	VkFormat format;
240 	Stage stage;
241 	VkFlags allShaderStages;
242 	VkFlags allPipelineStages;
243 	int/*VkDescriptorType*/ descriptorType;
244 	VkImageViewType viewType;
245 	VkSampleCountFlagBits samples;
246 	int bufferLen;
247 	bool unroll;
248 	bool vol;
249 	bool nullDescriptor;
250 	bool useTemplate;
251 	bool formatQualifier;
252 	bool pushDescriptor;
253 	bool testRobustness2;
254 	bool testPipelineRobustness;
255 	deUint32 imageDim[3]; // width, height, depth or layers
256 	bool readOnly;
257 
needsScalarBlockLayoutvkt::robustness::__anon79cf55c80111::CaseDef258 	bool needsScalarBlockLayout() const
259 	{
260 		bool scalarNeeded = false;
261 
262 		switch (descriptorType)
263 		{
264 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
265 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
266 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
267 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
268 			scalarNeeded = true;
269 			break;
270 		default:
271 			scalarNeeded = false;
272 			break;
273 		}
274 
275 		return scalarNeeded;
276 	}
277 };
278 
formatIsR64(const VkFormat & f)279 static bool formatIsR64(const VkFormat& f)
280 {
281 	switch (f)
282 	{
283 	case VK_FORMAT_R64_SINT:
284 	case VK_FORMAT_R64_UINT:
285 		return true;
286 	default:
287 		return false;
288 	}
289 }
290 
291 // Returns the appropriate singleton device for the given case.
getLogicalDevice(Context & ctx,const CaseDef & caseDef)292 VkDevice getLogicalDevice (Context& ctx, const CaseDef& caseDef)
293 {
294 	if (caseDef.needsScalarBlockLayout())
295 	{
296 		if (caseDef.testPipelineRobustness)
297 		{
298 			if (formatIsR64(caseDef.format))
299 			{
300 				if (caseDef.testRobustness2)
301 					return PipelineRobustnessRobustness2Int64AtomicsScalarSingleton::getDevice(ctx);
302 				return PipelineRobustnessImageRobustnessInt64AtomicsScalarSingleton::getDevice(ctx);
303 			}
304 
305 			if (caseDef.testRobustness2)
306 				return PipelineRobustnessRobustness2ScalarSingleton::getDevice(ctx);
307 			return PipelineRobustnessImageRobustnessScalarSingleton::getDevice(ctx);
308 		}
309 
310 		if (formatIsR64(caseDef.format))
311 		{
312 			if (caseDef.testRobustness2)
313 				return Robustness2Int64AtomicsScalarSingleton::getDevice(ctx);
314 			return ImageRobustnessInt64AtomicsScalarSingleton::getDevice(ctx);
315 		}
316 
317 		if (caseDef.testRobustness2)
318 			return Robustness2ScalarSingleton::getDevice(ctx);
319 		return ImageRobustnessScalarSingleton::getDevice(ctx);
320 	}
321 
322 	if (caseDef.testPipelineRobustness)
323 	{
324 		if (formatIsR64(caseDef.format))
325 		{
326 			if (caseDef.testRobustness2)
327 				return PipelineRobustnessRobustness2Int64AtomicsSingleton::getDevice(ctx);
328 			return PipelineRobustnessImageRobustnessInt64AtomicsSingleton::getDevice(ctx);
329 		}
330 
331 		if (caseDef.testRobustness2)
332 			return PipelineRobustnessRobustness2Singleton::getDevice(ctx);
333 		return PipelineRobustnessImageRobustnessSingleton::getDevice(ctx);
334 	}
335 
336 	if (formatIsR64(caseDef.format))
337 	{
338 		if (caseDef.testRobustness2)
339 			return Robustness2Int64AtomicsSingleton::getDevice(ctx);
340 		return ImageRobustnessInt64AtomicsSingleton::getDevice(ctx);
341 	}
342 
343 	if (caseDef.testRobustness2)
344 		return Robustness2Singleton::getDevice(ctx);
345 	return ImageRobustnessSingleton::getDevice(ctx);
346 }
347 
348 // Returns the appropriate singleton device driver for the given case.
getDeviceInterface(Context & ctx,const CaseDef & caseDef)349 const DeviceInterface& getDeviceInterface(Context& ctx, const CaseDef& caseDef)
350 {
351 	if (caseDef.needsScalarBlockLayout())
352 	{
353 		if (formatIsR64(caseDef.format))
354 		{
355 			if (caseDef.testRobustness2)
356 				return Robustness2Int64AtomicsScalarSingleton::getDeviceInterface(ctx);
357 			return ImageRobustnessInt64AtomicsScalarSingleton::getDeviceInterface(ctx);
358 		}
359 
360 		if (caseDef.testRobustness2)
361 			return Robustness2ScalarSingleton::getDeviceInterface(ctx);
362 		return ImageRobustnessScalarSingleton::getDeviceInterface(ctx);
363 	}
364 
365 	if (formatIsR64(caseDef.format))
366 	{
367 		if (caseDef.testRobustness2)
368 			return Robustness2Int64AtomicsSingleton::getDeviceInterface(ctx);
369 		return ImageRobustnessInt64AtomicsSingleton::getDeviceInterface(ctx);
370 	}
371 
372 	if (caseDef.testRobustness2)
373 		return Robustness2Singleton::getDeviceInterface(ctx);
374 	return ImageRobustnessSingleton::getDeviceInterface(ctx);
375 }
376 
377 
378 class Layout
379 {
380 public:
381 	vector<VkDescriptorSetLayoutBinding> layoutBindings;
382 	vector<deUint8> refData;
383 };
384 
385 
386 class RobustnessExtsTestInstance : public TestInstance
387 {
388 public:
389 						RobustnessExtsTestInstance		(Context& context, const CaseDef& data);
390 						~RobustnessExtsTestInstance	(void);
391 	tcu::TestStatus		iterate								(void);
392 private:
393 	CaseDef				m_data;
394 };
395 
RobustnessExtsTestInstance(Context & context,const CaseDef & data)396 RobustnessExtsTestInstance::RobustnessExtsTestInstance (Context& context, const CaseDef& data)
397 	: vkt::TestInstance		(context)
398 	, m_data				(data)
399 {
400 }
401 
~RobustnessExtsTestInstance(void)402 RobustnessExtsTestInstance::~RobustnessExtsTestInstance (void)
403 {
404 }
405 
406 class RobustnessExtsTestCase : public TestCase
407 {
408 	public:
409 								RobustnessExtsTestCase		(tcu::TestContext& context, const char* name, const char* desc, const CaseDef data);
410 								~RobustnessExtsTestCase	(void);
411 	virtual	void				initPrograms					(SourceCollections& programCollection) const;
412 	virtual TestInstance*		createInstance					(Context& context) const;
413 	virtual void				checkSupport					(Context& context) const;
414 
415 private:
416 	CaseDef					m_data;
417 };
418 
RobustnessExtsTestCase(tcu::TestContext & context,const char * name,const char * desc,const CaseDef data)419 RobustnessExtsTestCase::RobustnessExtsTestCase (tcu::TestContext& context, const char* name, const char* desc, const CaseDef data)
420 	: vkt::TestCase	(context, name, desc)
421 	, m_data		(data)
422 {
423 }
424 
~RobustnessExtsTestCase(void)425 RobustnessExtsTestCase::~RobustnessExtsTestCase	(void)
426 {
427 }
428 
formatIsFloat(const VkFormat & f)429 static bool formatIsFloat(const VkFormat& f)
430 {
431 	switch (f)
432 	{
433 	case VK_FORMAT_R32_SFLOAT:
434 	case VK_FORMAT_R32G32_SFLOAT:
435 	case VK_FORMAT_R32G32B32A32_SFLOAT:
436 		return true;
437 	default:
438 		return false;
439 	}
440 }
441 
formatIsSignedInt(const VkFormat & f)442 static bool formatIsSignedInt(const VkFormat& f)
443 {
444 	switch (f)
445 	{
446 	case VK_FORMAT_R32_SINT:
447 	case VK_FORMAT_R64_SINT:
448 	case VK_FORMAT_R32G32_SINT:
449 	case VK_FORMAT_R32G32B32A32_SINT:
450 		return true;
451 	default:
452 		return false;
453 	}
454 }
455 
supportsStores(int descriptorType)456 static bool supportsStores(int descriptorType)
457 {
458 	switch (descriptorType)
459 	{
460 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
461 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
462 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
463 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
464 		return true;
465 	default:
466 		return false;
467 	}
468 }
469 
470 #ifndef CTS_USES_VULKANSC
getPipelineRobustnessInfo(bool robustness2,int descriptorType)471 static VkPipelineRobustnessCreateInfoEXT getPipelineRobustnessInfo(bool robustness2, int descriptorType)
472 {
473 	VkPipelineRobustnessCreateInfoEXT robustnessCreateInfo = initVulkanStructure();
474 
475 	switch (descriptorType)
476 	{
477 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
478 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
479 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
480 			robustnessCreateInfo.storageBuffers	= (robustness2
481 												? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
482 												: VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
483 			break;
484 
485 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
486 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
487 			robustnessCreateInfo.images	= (robustness2
488 										? VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT
489 										: VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT);
490 			break;
491 
492 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
493 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
494 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
495 			robustnessCreateInfo.uniformBuffers	= (robustness2
496 												? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
497 												: VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
498 			break;
499 
500 		case VERTEX_ATTRIBUTE_FETCH:
501 			robustnessCreateInfo.vertexInputs	= (robustness2
502 												? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
503 												: VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
504 			break;
505 
506 		default:
507 			DE_ASSERT(0);
508 	}
509 
510 	return robustnessCreateInfo;
511 }
512 #endif
513 
checkSupport(Context & context) const514 void RobustnessExtsTestCase::checkSupport(Context& context) const
515 {
516 	const auto&	vki				= context.getInstanceInterface();
517 	const auto	physicalDevice	= context.getPhysicalDevice();
518 
519 	// We need to query feature support using the physical device instead of using the reported context features because robustness2
520 	// and image robustness are always disabled in the default device but they may be available.
521 	VkPhysicalDeviceRobustness2FeaturesEXT				robustness2Features				= initVulkanStructure();
522 	VkPhysicalDeviceImageRobustnessFeaturesEXT			imageRobustnessFeatures			= initVulkanStructure();
523 	VkPhysicalDeviceScalarBlockLayoutFeatures			scalarLayoutFeatures			= initVulkanStructure();
524 	VkPhysicalDeviceFeatures2							features2						= initVulkanStructure();
525 
526 	context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
527 
528 	if (context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"))
529 	{
530 		scalarLayoutFeatures.pNext = features2.pNext;
531 		features2.pNext = &scalarLayoutFeatures;
532 	}
533 
534 	if (context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"))
535 	{
536 		imageRobustnessFeatures.pNext = features2.pNext;
537 		features2.pNext = &imageRobustnessFeatures;
538 	}
539 
540 	if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
541 	{
542 		robustness2Features.pNext = features2.pNext;
543 		features2.pNext = &robustness2Features;
544 	}
545 
546 #ifndef CTS_USES_VULKANSC
547 	VkPhysicalDevicePipelineRobustnessFeaturesEXT		pipelineRobustnessFeatures = initVulkanStructure();
548 	if (context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"))
549 	{
550 		pipelineRobustnessFeatures.pNext = features2.pNext;
551 		features2.pNext = &pipelineRobustnessFeatures;
552 	}
553 #endif
554 
555 	vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
556 
557 	if (formatIsR64(m_data.format))
558 	{
559 		context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
560 
561 		VkFormatProperties formatProperties;
562 		vki.getPhysicalDeviceFormatProperties(physicalDevice, m_data.format, &formatProperties);
563 
564 #ifndef CTS_USES_VULKANSC
565 		const VkFormatProperties3KHR formatProperties3 = context.getFormatProperties(m_data.format);
566 #endif // CTS_USES_VULKANSC
567 
568 		switch (m_data.descriptorType)
569 		{
570 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
571 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)
572 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT is not supported");
573 			break;
574 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
575 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)
576 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT is not supported");
577 #ifndef CTS_USES_VULKANSC
578 			if ((formatProperties3.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR) != VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR)
579 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT is not supported");
580 #endif // CTS_USES_VULKANSC
581 			break;
582 		case VERTEX_ATTRIBUTE_FETCH:
583 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
584 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
585 			break;
586 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
587 			if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
588 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
589 			break;
590 		default: DE_ASSERT(true);
591 		}
592 
593 		if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
594 		{
595 			if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
596 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT is not supported");
597 		}
598 	}
599 
600 	// Check needed properties and features
601 	if (m_data.needsScalarBlockLayout() && !scalarLayoutFeatures.scalarBlockLayout)
602 		TCU_THROW(NotSupportedError, "Scalar block layout not supported");
603 
604 	if (m_data.stage == STAGE_VERTEX && !features2.features.vertexPipelineStoresAndAtomics)
605 		TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
606 
607 	if (m_data.stage == STAGE_FRAGMENT && !features2.features.fragmentStoresAndAtomics)
608 		TCU_THROW(NotSupportedError, "Fragment shader stores not supported");
609 
610 	if (m_data.stage == STAGE_RAYGEN)
611 		context.requireDeviceFunctionality("VK_NV_ray_tracing");
612 
613 	switch (m_data.descriptorType)
614 	{
615 	default: DE_ASSERT(0); // Fallthrough
616 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
617 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
618 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
619 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
620 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
621 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
622 	case VERTEX_ATTRIBUTE_FETCH:
623 		if (m_data.testRobustness2)
624 		{
625 			if (!robustness2Features.robustBufferAccess2)
626 				TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
627 		}
628 		else
629 		{
630 			// This case is not tested here.
631 			DE_ASSERT(false);
632 		}
633 		break;
634 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
635 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
636 		if (m_data.testRobustness2)
637 		{
638 			if (!robustness2Features.robustImageAccess2)
639 				TCU_THROW(NotSupportedError, "robustImageAccess2 not supported");
640 		}
641 		else
642 		{
643 			if (!imageRobustnessFeatures.robustImageAccess)
644 				TCU_THROW(NotSupportedError, "robustImageAccess not supported");
645 		}
646 		break;
647 	}
648 
649 	if (m_data.nullDescriptor && !robustness2Features.nullDescriptor)
650 		TCU_THROW(NotSupportedError, "nullDescriptor not supported");
651 
652 	// The fill shader for 64-bit multisample image tests uses a storage image.
653 	if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && formatIsR64(m_data.format) &&
654 		!features2.features.shaderStorageImageMultisample)
655 		TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
656 
657 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
658 		m_data.samples != VK_SAMPLE_COUNT_1_BIT &&
659 		!features2.features.shaderStorageImageMultisample)
660 		TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
661 
662 	if ((m_data.useTemplate || formatIsR64(m_data.format)) && !context.contextSupports(vk::ApiVersion(0, 1, 1, 0)))
663 		TCU_THROW(NotSupportedError, "Vulkan 1.1 not supported");
664 
665 #ifndef CTS_USES_VULKANSC
666 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
667 		!m_data.formatQualifier)
668 	{
669 		const VkFormatProperties3 formatProperties = context.getFormatProperties(m_data.format);
670 		if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
671 			TCU_THROW(NotSupportedError, "Format does not support reading without format");
672 		if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
673 			TCU_THROW(NotSupportedError, "Format does not support writing without format");
674 	}
675 #else
676 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
677 		!m_data.formatQualifier &&
678 		(!features2.features.shaderStorageImageReadWithoutFormat || !features2.features.shaderStorageImageWriteWithoutFormat))
679 		TCU_THROW(NotSupportedError, "shaderStorageImageReadWithoutFormat or shaderStorageImageWriteWithoutFormat not supported");
680 #endif // CTS_USES_VULKANSC
681 
682 	if (m_data.pushDescriptor)
683 		context.requireDeviceFunctionality("VK_KHR_push_descriptor");
684 
685 	if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !features2.features.imageCubeArray)
686 		TCU_THROW(NotSupportedError, "Cube array image view type not supported");
687 
688 	if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") && !context.getDeviceFeatures().robustBufferAccess)
689 		TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
690 
691 #ifndef CTS_USES_VULKANSC
692 	if (m_data.testPipelineRobustness && !pipelineRobustnessFeatures.pipelineRobustness)
693 		TCU_THROW(NotSupportedError, "pipelineRobustness not supported");
694 #endif
695 }
696 
generateLayout(Layout & layout,const CaseDef & caseDef)697 void generateLayout(Layout &layout, const CaseDef &caseDef)
698 {
699 	vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
700 	int numBindings = caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH ? 2 : 1;
701 	bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
702 
703 	for (deUint32 b = 0; b < layout.layoutBindings.size(); ++b)
704 	{
705 		VkDescriptorSetLayoutBinding &binding = bindings[b];
706 		binding.binding = b;
707 		binding.pImmutableSamplers = NULL;
708 		binding.stageFlags = caseDef.allShaderStages;
709 		binding.descriptorCount = 1;
710 
711 		// Output image
712 		if (b == 0)
713 			binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
714 		else if (caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH)
715 			binding.descriptorType = (VkDescriptorType)caseDef.descriptorType;
716 	}
717 
718 	if (caseDef.nullDescriptor)
719 		return;
720 
721 	if (caseDef.bufferLen == 0)
722 	{
723 		// Clear color values for image tests
724 		static deUint32 urefData[4]		= { 0x12345678, 0x23456789, 0x34567890, 0x45678901 };
725 		static deUint64 urefData64[4]	= { 0x1234567887654321, 0x234567899, 0x345678909, 0x456789019 };
726 		static float frefData[4]		= { 123.f, 234.f, 345.f, 456.f };
727 
728 		if (formatIsR64(caseDef.format))
729 		{
730 			layout.refData.resize(32);
731 			deUint64 *ptr = (deUint64 *)layout.refData.data();
732 
733 			for (unsigned int i = 0; i < 4; ++i)
734 			{
735 				ptr[i] = urefData64[i];
736 			}
737 		}
738 		else
739 		{
740 			layout.refData.resize(16);
741 			deMemcpy(layout.refData.data(), formatIsFloat(caseDef.format) ? (const void *)frefData : (const void *)urefData, sizeof(frefData));
742 		}
743 	}
744 	else
745 	{
746 		layout.refData.resize(caseDef.bufferLen & (formatIsR64(caseDef.format) ? ~7: ~3));
747 		for (unsigned int i = 0; i < caseDef.bufferLen / (formatIsR64(caseDef.format) ? sizeof(deUint64) : sizeof(deUint32)); ++i)
748 		{
749 			if (formatIsFloat(caseDef.format))
750 			{
751 				float *f = (float *)layout.refData.data() + i;
752 				*f = 2.0f*(float)i + 3.0f;
753 			}
754 			if (formatIsR64(caseDef.format))
755 			{
756 				deUint64 *u = (deUint64 *)layout.refData.data() + i;
757 				*u = 2 * i + 3;
758 			}
759 			else
760 			{
761 				int *u = (int *)layout.refData.data() + i;
762 				*u = 2*i + 3;
763 			}
764 		}
765 	}
766 }
767 
genFetch(const CaseDef & caseDef,int numComponents,const string & vecType,const string & coord,const string & lod)768 static string genFetch(const CaseDef &caseDef, int numComponents, const string& vecType, const string& coord, const string& lod)
769 {
770 	std::stringstream s;
771 	// Fetch from the descriptor.
772 	switch (caseDef.descriptorType)
773 	{
774 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
775 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
776 		s << vecType << "(ubo0_1.val[" << coord << "]";
777 		for (int i = numComponents; i < 4; ++i) s << ", 0";
778 		s << ")";
779 		break;
780 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
781 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
782 		s << vecType << "(ssbo0_1.val[" << coord << "]";
783 		for (int i = numComponents; i < 4; ++i) s << ", 0";
784 		s << ")";
785 		break;
786 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
787 		s << "texelFetch(texbo0_1, " << coord << ")";
788 		break;
789 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
790 		s << "imageLoad(image0_1, " << coord << ")";
791 		break;
792 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
793 		if (caseDef.samples > VK_SAMPLE_COUNT_1_BIT)
794 			s << "texelFetch(texture0_1, " << coord << ")";
795 		else
796 			s << "texelFetch(texture0_1, " << coord << ", " << lod << ")";
797 		break;
798 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
799 		s << "imageLoad(image0_1, " << coord << ")";
800 		break;
801 	case VERTEX_ATTRIBUTE_FETCH:
802 		s << "attr";
803 		break;
804 	default: DE_ASSERT(0);
805 	}
806 	return s.str();
807 }
808 
809 static const int storeValue = 123;
810 
811 // Get the value stored by genStore.
getStoreValue(int descriptorType,int numComponents,const string & vecType,const string & bufType)812 static string getStoreValue(int descriptorType, int numComponents, const string& vecType, const string& bufType)
813 {
814 	std::stringstream s;
815 	switch (descriptorType)
816 	{
817 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
818 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
819 		s << vecType  << "(" << bufType << "(" << storeValue << ")";
820 		for (int i = numComponents; i < 4; ++i) s << ", 0";
821 		s << ")";
822 		break;
823 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
824 		s << vecType << "(" << storeValue << ")";
825 		break;
826 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
827 		s << vecType << "(" << storeValue << ")";
828 		break;
829 	default: DE_ASSERT(0);
830 	}
831 	return s.str();
832 }
833 
genStore(int descriptorType,const string & vecType,const string & bufType,const string & coord)834 static string genStore(int descriptorType, const string& vecType, const string& bufType, const string& coord)
835 {
836 	std::stringstream s;
837 	// Store to the descriptor.
838 	switch (descriptorType)
839 	{
840 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
841 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
842 		s << "ssbo0_1.val[" << coord << "] = " << bufType << "(" << storeValue << ")";
843 		break;
844 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
845 		s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
846 		break;
847 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
848 		s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
849 		break;
850 	default: DE_ASSERT(0);
851 	}
852 	return s.str();
853 }
854 
genAtomic(int descriptorType,const string & bufType,const string & coord)855 static string genAtomic(int descriptorType, const string& bufType, const string& coord)
856 {
857 	std::stringstream s;
858 	// Store to the descriptor. The value doesn't matter, since we only test out of bounds coordinates.
859 	switch (descriptorType)
860 	{
861 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
862 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
863 		s << "atomicAdd(ssbo0_1.val[" << coord << "], " << bufType << "(10))";
864 		break;
865 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
866 		s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
867 		break;
868 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
869 		s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
870 		break;
871 	default: DE_ASSERT(0);
872 	}
873 	return s.str();
874 }
875 
getShaderImageFormatQualifier(const tcu::TextureFormat & format)876 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
877 {
878 	const char* orderPart;
879 	const char* typePart;
880 
881 	switch (format.order)
882 	{
883 		case tcu::TextureFormat::R:		orderPart = "r";	break;
884 		case tcu::TextureFormat::RG:	orderPart = "rg";	break;
885 		case tcu::TextureFormat::RGB:	orderPart = "rgb";	break;
886 		case tcu::TextureFormat::RGBA:	orderPart = "rgba";	break;
887 
888 		default:
889 			DE_FATAL("Impossible");
890 			orderPart = DE_NULL;
891 	}
892 
893 	switch (format.type)
894 	{
895 		case tcu::TextureFormat::FLOAT:				typePart = "32f";		break;
896 		case tcu::TextureFormat::HALF_FLOAT:		typePart = "16f";		break;
897 
898 		case tcu::TextureFormat::UNSIGNED_INT64:	typePart = "64ui";		break;
899 		case tcu::TextureFormat::UNSIGNED_INT32:	typePart = "32ui";		break;
900 		case tcu::TextureFormat::UNSIGNED_INT16:	typePart = "16ui";		break;
901 		case tcu::TextureFormat::UNSIGNED_INT8:		typePart = "8ui";		break;
902 
903 		case tcu::TextureFormat::SIGNED_INT64:		typePart = "64i";		break;
904 		case tcu::TextureFormat::SIGNED_INT32:		typePart = "32i";		break;
905 		case tcu::TextureFormat::SIGNED_INT16:		typePart = "16i";		break;
906 		case tcu::TextureFormat::SIGNED_INT8:		typePart = "8i";		break;
907 
908 		case tcu::TextureFormat::UNORM_INT16:		typePart = "16";		break;
909 		case tcu::TextureFormat::UNORM_INT8:		typePart = "8";			break;
910 
911 		case tcu::TextureFormat::SNORM_INT16:		typePart = "16_snorm";	break;
912 		case tcu::TextureFormat::SNORM_INT8:		typePart = "8_snorm";	break;
913 
914 		default:
915 			DE_FATAL("Impossible");
916 			typePart = DE_NULL;
917 	}
918 
919 	return std::string() + orderPart + typePart;
920 }
921 
genCoord(string c,int numCoords,VkSampleCountFlagBits samples,int dim)922 string genCoord(string c, int numCoords, VkSampleCountFlagBits samples, int dim)
923 {
924 	if (numCoords == 1)
925 		return c;
926 
927 	if (samples != VK_SAMPLE_COUNT_1_BIT)
928 		numCoords--;
929 
930 	string coord = "ivec" + to_string(numCoords) + "(";
931 
932 	for (int i = 0; i < numCoords; ++i)
933 	{
934 		if (i == dim)
935 			coord += c;
936 		else
937 			coord += "0";
938 		if (i < numCoords - 1)
939 			coord += ", ";
940 	}
941 	coord += ")";
942 
943 	// Append sample coordinate
944 	if (samples != VK_SAMPLE_COUNT_1_BIT)
945 	{
946 		coord += ", ";
947 		if (dim == numCoords)
948 			coord += c;
949 		else
950 			coord += "0";
951 	}
952 	return coord;
953 }
954 
955 // Normalized coordinates. Divide by "imageDim" and add 0.25 so we're not on a pixel boundary.
genCoordNorm(const CaseDef & caseDef,string c,int numCoords,int numNormalizedCoords,int dim)956 string genCoordNorm(const CaseDef &caseDef, string c, int numCoords, int numNormalizedCoords, int dim)
957 {
958 	// dim can be 3 for cube_array. Reuse the number of layers in that case.
959 	dim = std::min(dim, 2);
960 
961 	if (numCoords == 1)
962 		return c + " / float(" + to_string(caseDef.imageDim[dim]) + ")";
963 
964 	string coord = "vec" + to_string(numCoords) + "(";
965 
966 	for (int i = 0; i < numCoords; ++i)
967 	{
968 		if (i == dim)
969 			coord += c;
970 		else
971 			coord += "0.25";
972 		if (i < numNormalizedCoords)
973 			coord += " / float(" + to_string(caseDef.imageDim[dim]) + ")";
974 		if (i < numCoords - 1)
975 			coord += ", ";
976 	}
977 	coord += ")";
978 	return coord;
979 }
980 
initPrograms(SourceCollections & programCollection) const981 void RobustnessExtsTestCase::initPrograms (SourceCollections& programCollection) const
982 {
983 	VkFormat format = m_data.format;
984 
985 	Layout layout;
986 	generateLayout(layout, m_data);
987 
988 	if (layout.layoutBindings.size() > 1 &&
989 		layout.layoutBindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
990 	{
991 		if (format == VK_FORMAT_R64_SINT)
992 			format = VK_FORMAT_R32G32_SINT;
993 
994 		if (format == VK_FORMAT_R64_UINT)
995 			format = VK_FORMAT_R32G32_UINT;
996 	}
997 
998 	std::stringstream decls, checks;
999 
1000 	const string	r64			= formatIsR64(format) ? "64" : "";
1001 	const string	i64Type		= formatIsR64(format) ? "64_t" : "";
1002 	const string	vecType		= formatIsFloat(format) ? "vec4" : (formatIsSignedInt(format) ? ("i" + r64 + "vec4") : ("u" + r64 + "vec4"));
1003 	const string	qLevelType	= vecType == "vec4" ? "float" : ((vecType == "ivec4") || (vecType == "i64vec4")) ? ("int" + i64Type) : ("uint" + i64Type);
1004 
1005 	decls << "uvec4 abs(uvec4 x) { return x; }\n";
1006 	if (formatIsR64(format))
1007 		decls << "u64vec4 abs(u64vec4 x) { return x; }\n";
1008 	decls << "int smod(int a, int b) { if (a < 0) a += b*(abs(a)/b+1); return a%b; }\n";
1009 
1010 
1011 	const int	componetsSize = (formatIsR64(format) ? 8 : 4);
1012 	int			refDataNumElements = deIntRoundToPow2(((int)layout.refData.size() / componetsSize), 4);
1013 	// Pad reference data to include zeros, up to max value of robustUniformBufferAccessSizeAlignment (256).
1014 	// robustStorageBufferAccessSizeAlignment is 4, so no extra padding needed.
1015 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1016 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1017 	{
1018 		refDataNumElements = deIntRoundToPow2(refDataNumElements, 256 / (formatIsR64(format) ? 8 : 4));
1019 	}
1020 	if (m_data.nullDescriptor)
1021 		refDataNumElements = 4;
1022 
1023 	if (formatIsFloat(format))
1024 	{
1025 		decls << "float refData[" << refDataNumElements << "] = {";
1026 		int i;
1027 		for (i = 0; i < (int)layout.refData.size() / 4; ++i)
1028 		{
1029 			if (i != 0)
1030 				decls << ", ";
1031 			decls << ((const float *)layout.refData.data())[i];
1032 		}
1033 		while (i < refDataNumElements)
1034 		{
1035 			if (i != 0)
1036 				decls << ", ";
1037 			decls << "0";
1038 			i++;
1039 		}
1040 	}
1041 	else if (formatIsR64(format))
1042 	{
1043 		decls << "int" << i64Type << " refData[" << refDataNumElements << "] = {";
1044 		int i;
1045 		for (i = 0; i < (int)layout.refData.size() / 8; ++i)
1046 		{
1047 			if (i != 0)
1048 				decls << ", ";
1049 			decls << ((const deUint64 *)layout.refData.data())[i] << "l";
1050 		}
1051 		while (i < refDataNumElements)
1052 		{
1053 			if (i != 0)
1054 				decls << ", ";
1055 			decls << "0l";
1056 			i++;
1057 		}
1058 	}
1059 	else
1060 	{
1061 		decls << "int" << " refData[" << refDataNumElements << "] = {";
1062 		int i;
1063 		for (i = 0; i < (int)layout.refData.size() / 4; ++i)
1064 		{
1065 			if (i != 0)
1066 				decls << ", ";
1067 			decls << ((const int *)layout.refData.data())[i];
1068 		}
1069 		while (i < refDataNumElements)
1070 		{
1071 			if (i != 0)
1072 				decls << ", ";
1073 			decls << "0";
1074 			i++;
1075 		}
1076 	}
1077 
1078 	decls << "};\n";
1079 	decls << vecType << " zzzz = " << vecType << "(0);\n";
1080 	decls << vecType << " zzzo = " << vecType << "(0, 0, 0, 1);\n";
1081 	decls << vecType << " expectedIB;\n";
1082 
1083 	string imgprefix = (formatIsFloat(format) ? "" : formatIsSignedInt(format) ? "i" : "u") + r64;
1084 	string imgqualif = (m_data.formatQualifier) ? getShaderImageFormatQualifier(mapVkFormat(format)) + ", " : "";
1085 	string outputimgqualif = getShaderImageFormatQualifier(mapVkFormat(format));
1086 
1087 	string imageDim = "";
1088 	int numCoords, numNormalizedCoords;
1089 	bool layered = false;
1090 	switch (m_data.viewType)
1091 	{
1092 		default: DE_ASSERT(0); // Fallthrough
1093 		case VK_IMAGE_VIEW_TYPE_1D:			imageDim = "1D";		numCoords = 1;	numNormalizedCoords = 1;	break;
1094 		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	imageDim = "1DArray";	numCoords = 2;	numNormalizedCoords = 1;	layered = true;	break;
1095 		case VK_IMAGE_VIEW_TYPE_2D:			imageDim = "2D";		numCoords = 2;	numNormalizedCoords = 2;	break;
1096 		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	imageDim = "2DArray";	numCoords = 3;	numNormalizedCoords = 2;	layered = true;	break;
1097 		case VK_IMAGE_VIEW_TYPE_3D:			imageDim = "3D";		numCoords = 3;	numNormalizedCoords = 3;	break;
1098 		case VK_IMAGE_VIEW_TYPE_CUBE:		imageDim = "Cube";		numCoords = 3;	numNormalizedCoords = 3;	break;
1099 		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:	imageDim = "CubeArray";	numCoords = 4;	numNormalizedCoords = 3;	layered = true;	break;
1100 	}
1101 	if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
1102 	{
1103 		switch (m_data.viewType)
1104 		{
1105 			default: DE_ASSERT(0); // Fallthrough
1106 			case VK_IMAGE_VIEW_TYPE_2D:			imageDim = "2DMS";		break;
1107 			case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	imageDim = "2DMSArray";	break;
1108 		}
1109 		numCoords++;
1110 	}
1111 	bool dataDependsOnLayer = (m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_data.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) && !m_data.nullDescriptor;
1112 
1113 	// Special case imageLoad(imageCubeArray, ...) which uses ivec3
1114 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
1115 		m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1116 	{
1117 		numCoords = 3;
1118 	}
1119 
1120 	int numComponents = tcu::getPixelSize(mapVkFormat(format)) / tcu::getChannelSize(mapVkFormat(format).type);
1121 	string bufType;
1122 	if (numComponents == 1)
1123 		bufType = string(formatIsFloat(format) ? "float" : formatIsSignedInt(format) ? "int" : "uint") + i64Type;
1124 	else
1125 		bufType = imgprefix + "vec" + std::to_string(numComponents);
1126 
1127 	// For UBO's, which have a declared size in the shader, don't access outside that size.
1128 	bool declaredSize = false;
1129 	switch (m_data.descriptorType) {
1130 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1131 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1132 		declaredSize = true;
1133 		break;
1134 	default:
1135 		break;
1136 	}
1137 
1138 	checks << "  int inboundcoords, clampedLayer;\n";
1139 	checks << "  " << vecType << " expectedIB2;\n";
1140 	if (m_data.unroll)
1141 	{
1142 		if (declaredSize)
1143 			checks << "  [[unroll]] for (int c = 0; c <= 10; ++c) {\n";
1144 		else
1145 			checks << "  [[unroll]] for (int c = -10; c <= 10; ++c) {\n";
1146 	}
1147 	else
1148 	{
1149 		if (declaredSize)
1150 			checks << "  [[dont_unroll]] for (int c = 1023; c >= 0; --c) {\n";
1151 		else
1152 			checks << "  [[dont_unroll]] for (int c = 1050; c >= -1050; --c) {\n";
1153 	}
1154 
1155 	if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1156 		checks << "    int idx = smod(gl_VertexIndex * " << numComponents << ", " << refDataNumElements << ");\n";
1157 	else
1158 		checks << "    int idx = smod(c * " << numComponents << ", " << refDataNumElements << ");\n";
1159 
1160 	decls << "layout(" << outputimgqualif << ", set = 0, binding = 0) uniform " << imgprefix << "image2D image0_0;\n";
1161 
1162 	const char *vol = m_data.vol ? "volatile " : "";
1163 	const char *ro = m_data.readOnly ? "readonly " : "";
1164 
1165 	// Construct the declaration for the binding
1166 	switch (m_data.descriptorType)
1167 	{
1168 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1169 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1170 		decls << "layout(scalar, set = 0, binding = 1) uniform ubodef0_1 { " << bufType << " val[1024]; } ubo0_1;\n";
1171 		break;
1172 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1173 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1174 		decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1 { " << bufType << " val[]; } ssbo0_1;\n";
1175 		decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1_pad { vec4 pad; " << bufType << " val[]; } ssbo0_1_pad;\n";
1176 		break;
1177 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1178 		switch(format)
1179 		{
1180 		case VK_FORMAT_R64_SINT:
1181 			decls << "layout(set = 0, binding = 1) uniform itextureBuffer texbo0_1;\n";
1182 			break;
1183 		case VK_FORMAT_R64_UINT:
1184 			decls << "layout(set = 0, binding = 1) uniform utextureBuffer texbo0_1;\n";
1185 			break;
1186 		default:
1187 			decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "textureBuffer texbo0_1;\n";
1188 		}
1189 		break;
1190 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1191 		decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "imageBuffer image0_1;\n";
1192 		break;
1193 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1194 		decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "image" << imageDim << " image0_1;\n";
1195 		break;
1196 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1197 		switch (format)
1198 		{
1199 		case VK_FORMAT_R64_SINT:
1200 			decls << "layout(set = 0, binding = 1) uniform isampler" << imageDim << " texture0_1; \n";
1201 			break;
1202 		case VK_FORMAT_R64_UINT:
1203 			decls << "layout(set = 0, binding = 1) uniform usampler" << imageDim << " texture0_1; \n";
1204 			break;
1205 		default:
1206 			decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "sampler" << imageDim << " texture0_1;\n";
1207 			break;
1208 		}
1209 		break;
1210 	case VERTEX_ATTRIBUTE_FETCH:
1211 		if (formatIsR64(format))
1212 		{
1213 			decls << "layout(location = 0) in " << (formatIsSignedInt(format) ? ("int64_t") : ("uint64_t")) << " attr;\n";
1214 		}
1215 		else
1216 		{
1217 			decls << "layout(location = 0) in " << vecType << " attr;\n";
1218 		}
1219 		break;
1220 	default: DE_ASSERT(0);
1221 	}
1222 
1223 	string expectedOOB;
1224 	string defaultw;
1225 
1226 	switch (m_data.descriptorType)
1227 	{
1228 	default: DE_ASSERT(0); // Fallthrough
1229 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1230 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1231 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1232 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1233 		expectedOOB = "zzzz";
1234 		defaultw = "0";
1235 		break;
1236 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1237 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1238 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1239 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1240 	case VERTEX_ATTRIBUTE_FETCH:
1241 		if (numComponents == 1)
1242 		{
1243 			expectedOOB = "zzzo";
1244 		}
1245 		else if (numComponents == 2)
1246 		{
1247 			expectedOOB = "zzzo";
1248 		}
1249 		else
1250 		{
1251 			expectedOOB = "zzzz";
1252 		}
1253 		defaultw = "1";
1254 		break;
1255 	}
1256 
1257 	string idx;
1258 	switch (m_data.descriptorType)
1259 	{
1260 	default: DE_ASSERT(0); // Fallthrough
1261 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1262 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1263 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1264 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1265 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1266 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1267 	case VERTEX_ATTRIBUTE_FETCH:
1268 		idx = "idx";
1269 		break;
1270 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1271 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1272 		idx = "0";
1273 		break;
1274 	}
1275 
1276 	if (m_data.nullDescriptor)
1277 	{
1278 		checks << "    expectedIB = zzzz;\n";
1279 		checks << "    inboundcoords = 0;\n";
1280 		checks << "    int paddedinboundcoords = 0;\n";
1281 		// Vertex attribute fetch still gets format conversion applied
1282 		if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
1283 			expectedOOB = "zzzz";
1284 	}
1285 	else
1286 	{
1287 		checks << "    expectedIB.x = refData[" << idx << "];\n";
1288 		if (numComponents > 1)
1289 		{
1290 			checks << "    expectedIB.y = refData[" << idx << "+1];\n";
1291 		}
1292 		else
1293 		{
1294 			checks << "    expectedIB.y = 0;\n";
1295 		}
1296 		if (numComponents > 2)
1297 		{
1298 			checks << "    expectedIB.z = refData[" << idx << "+2];\n";
1299 			checks << "    expectedIB.w = refData[" << idx << "+3];\n";
1300 		}
1301 		else
1302 		{
1303 			checks << "    expectedIB.z = 0;\n";
1304 			checks << "    expectedIB.w = " << defaultw << ";\n";
1305 		}
1306 
1307 		switch (m_data.descriptorType)
1308 		{
1309 		default: DE_ASSERT(0); // Fallthrough
1310 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1311 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1312 			// UBOs can either strictly bounds check against inboundcoords, or can
1313 			// return the contents from memory for the range padded up to paddedinboundcoords.
1314 			checks << "    int paddedinboundcoords = " << refDataNumElements / numComponents << ";\n";
1315 			// fallthrough
1316 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1317 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1318 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1319 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1320 		case VERTEX_ATTRIBUTE_FETCH:
1321 			checks << "    inboundcoords = " << layout.refData.size() / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32)) / numComponents << ";\n";
1322 			break;
1323 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1324 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1325 			// set per-component below
1326 			break;
1327 		}
1328 	}
1329 
1330 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1331 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
1332 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1333 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1334 		 !m_data.readOnly)
1335 	{
1336 		for (int i = 0; i < numCoords; ++i)
1337 		{
1338 			// Treat i==3 coord (cube array layer) like i == 2
1339 			deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1340 			if (!m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1341 				checks << "    inboundcoords = " << coordDim << ";\n";
1342 
1343 			string coord = genCoord("c", numCoords, m_data.samples, i);
1344 			string inboundcoords =
1345 				m_data.nullDescriptor ? "0" :
1346 				(m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1) ? to_string(m_data.samples) : "inboundcoords";
1347 
1348 			checks << "    if (c < 0 || c >= " << inboundcoords << ") " << genStore(m_data.descriptorType, vecType, bufType, coord) << ";\n";
1349 			if (m_data.formatQualifier &&
1350 				(format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT))
1351 			{
1352 				checks << "    if (c < 0 || c >= " << inboundcoords << ") " << genAtomic(m_data.descriptorType, bufType, coord) << ";\n";
1353 			}
1354 		}
1355 	}
1356 
1357 	for (int i = 0; i < numCoords; ++i)
1358 	{
1359 		// Treat i==3 coord (cube array layer) like i == 2
1360 		deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1361 		if (!m_data.nullDescriptor)
1362 		{
1363 			switch (m_data.descriptorType)
1364 			{
1365 			default:
1366 				break;
1367 			case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1368 			case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1369 				checks << "    inboundcoords = " << coordDim << ";\n";
1370 				break;
1371 			}
1372 		}
1373 
1374 		string coord = genCoord("c", numCoords, m_data.samples, i);
1375 
1376 		if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1377 		{
1378 			if (formatIsR64(format))
1379 			{
1380 				checks << "    temp.x = attr;\n";
1381 				checks << "    temp.y = 0l;\n";
1382 				checks << "    temp.z = 0l;\n";
1383 				checks << "    temp.w = 0l;\n";
1384 				checks << "    if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp.x -= expectedIB.x; else temp -= zzzz;\n";
1385 			}
1386 			else
1387 			{
1388 				checks << "    temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1389 				checks << "    if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp -= expectedIB; else temp -= " << expectedOOB << ";\n";
1390 			}
1391 			// Accumulate any incorrect values.
1392 			checks << "    accum += abs(temp);\n";
1393 		}
1394 		// Skip texelFetch testing for cube(array) - texelFetch doesn't support it
1395 		if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH &&
1396 			!(m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1397 			  (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)))
1398 		{
1399 			checks << "    temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1400 
1401 			checks << "    expectedIB2 = expectedIB;\n";
1402 
1403 			// Expected data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1404 			if (dataDependsOnLayer && i == numNormalizedCoords)
1405 				checks << "    if (c >= 0 && c < inboundcoords) expectedIB2 += " << vecType << "(c, 0, 0, 0);\n";
1406 
1407 			if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1)
1408 			{
1409 				if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1410 				{
1411 					checks << "    if (temp == zzzz) temp = " << vecType << "(0);\n";
1412 					if (m_data.formatQualifier && numComponents < 4)
1413 						checks << "    else if (temp == zzzo) temp = " << vecType << "(0);\n";
1414 					checks << "    else temp = " << vecType << "(1);\n";
1415 				}
1416 				else
1417 					// multisample coord doesn't have defined behavior for OOB, so just set temp to 0.
1418 					checks << "    if (c >= 0 && c < " << m_data.samples << ") temp -= expectedIB2; else temp = " << vecType << "(0);\n";
1419 			}
1420 			else
1421 			{
1422 				// Storage buffers may be split into per-component loads. Generate a second
1423 				// expected out of bounds value where some subset of the components are
1424 				// actually in-bounds. If both loads and stores are split into per-component
1425 				// accesses, then the result value can be a mix of storeValue and zero.
1426 				string expectedOOB2 = expectedOOB;
1427 				string expectedOOB3 = expectedOOB;
1428 				if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1429 					 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1430 					 !m_data.nullDescriptor)
1431 				{
1432 					int len = m_data.bufferLen & (formatIsR64(format) ? ~7 : ~3);
1433 					int mod = (int)((len / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32))) % numComponents);
1434 					string sstoreValue = de::toString(storeValue);
1435 					switch (mod)
1436 					{
1437 					case 0:
1438 						break;
1439 					case 1:
1440 						expectedOOB2 = vecType + "(expectedIB2.x, 0, 0, 0)";
1441 						expectedOOB3 = vecType + "(" + sstoreValue + ", 0, 0, 0)";
1442 						break;
1443 					case 2:
1444 						expectedOOB2 = vecType + "(expectedIB2.xy, 0, 0)";
1445 						expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", 0, 0)";
1446 						break;
1447 					case 3:
1448 						expectedOOB2 = vecType + "(expectedIB2.xyz, 0)";
1449 						expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", " + sstoreValue + ", 0)";
1450 						break;
1451 					}
1452 				}
1453 
1454 				// Entirely in-bounds.
1455 				checks << "    if (c >= 0 && c < inboundcoords) {\n"
1456 						  "       if (temp == expectedIB2) temp = " << vecType << "(0); else temp = " << vecType << "(1);\n"
1457 						  "    }\n";
1458 
1459 				// normal out-of-bounds value
1460 				if (m_data.testRobustness2)
1461 					checks << "    else if (temp == " << expectedOOB << ") temp = " << vecType << "(0);\n";
1462 				else
1463 					// image_robustness relaxes alpha which is allowed to be zero or one
1464 					checks << "    else if (temp == zzzz || temp == zzzo) temp = " << vecType << "(0);\n";
1465 
1466 				if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1467 					m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1468 				{
1469 					checks << "    else if (c >= 0 && c < paddedinboundcoords && temp == expectedIB2) temp = " << vecType << "(0);\n";
1470 				}
1471 
1472 				// null descriptor loads with image format layout qualifier that doesn't include alpha may return alpha=1
1473 				if (m_data.nullDescriptor && m_data.formatQualifier &&
1474 					(m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) &&
1475 					numComponents < 4)
1476 					checks << "    else if (temp == zzzo) temp = " << vecType << "(0);\n";
1477 
1478 				// non-volatile value replaced with stored value
1479 				if (supportsStores(m_data.descriptorType) && !m_data.vol)
1480 					checks << "    else if (temp == " << getStoreValue(m_data.descriptorType, numComponents, vecType, bufType) << ") temp = " << vecType << "(0);\n";
1481 
1482 				// value straddling the boundary, returning a partial vector
1483 				if (expectedOOB2 != expectedOOB)
1484 					checks << "    else if (c == inboundcoords && temp == " << expectedOOB2 << ") temp = " << vecType << "(0);\n";
1485 				if (expectedOOB3 != expectedOOB)
1486 					checks << "    else if (c == inboundcoords && temp == " << expectedOOB3 << ") temp = " << vecType << "(0);\n";
1487 
1488 				// failure
1489 				checks << "    else temp = " << vecType << "(1);\n";
1490 			}
1491 			// Accumulate any incorrect values.
1492 			checks << "    accum += abs(temp);\n";
1493 
1494 			// Only the full robustness2 extension provides guarantees about out-of-bounds mip levels.
1495 			if (m_data.testRobustness2 && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER && m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1496 			{
1497 				// Fetch from an out of bounds mip level. Expect this to always return the OOB value.
1498 				string coord0 = genCoord("0", numCoords, m_data.samples, i);
1499 				checks << "    if (c != 0) temp = " << genFetch(m_data, numComponents, vecType, coord0, "c") << "; else temp = " << vecType << "(0);\n";
1500 				checks << "    if (c != 0) temp -= " << expectedOOB << ";\n";
1501 				checks << "    accum += abs(temp);\n";
1502 			}
1503 		}
1504 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1505 			m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1506 		{
1507 			string coordNorm = genCoordNorm(m_data, "(c+0.25)", numCoords, numNormalizedCoords, i);
1508 
1509 			checks << "    expectedIB2 = expectedIB;\n";
1510 
1511 			// Data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1512 			if (dataDependsOnLayer && i == numNormalizedCoords)
1513 			{
1514 				checks << "    clampedLayer = clamp(c, 0, " << coordDim-1 << ");\n";
1515 				checks << "    expectedIB2 += " << vecType << "(clampedLayer, 0, 0, 0);\n";
1516 			}
1517 
1518 			stringstream normexpected;
1519 			// Cubemap fetches are always in-bounds. Layer coordinate is clamped, so is always in-bounds.
1520 			if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE ||
1521 				m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
1522 				(layered && i == numCoords-1))
1523 				normexpected << "    temp -= expectedIB2;\n";
1524 			else
1525 			{
1526 				normexpected << "    if (c >= 0 && c < inboundcoords)\n";
1527 				normexpected << "        temp -= expectedIB2;\n";
1528 				normexpected << "    else\n";
1529 				if (m_data.testRobustness2)
1530 					normexpected << "        temp -= " << expectedOOB << ";\n";
1531 				else
1532 					// image_robustness relaxes alpha which is allowed to be zero or one
1533 					normexpected << "        temp = " << vecType << "((temp == zzzz || temp == zzzo) ? 0 : 1);\n";
1534 			}
1535 
1536 			checks << "    temp = texture(texture0_1, " << coordNorm << ");\n";
1537 			checks << normexpected.str();
1538 			checks << "    accum += abs(temp);\n";
1539 			checks << "    temp = textureLod(texture0_1, " << coordNorm << ", 0.0f);\n";
1540 			checks << normexpected.str();
1541 			checks << "    accum += abs(temp);\n";
1542 			checks << "    temp = textureGrad(texture0_1, " << coordNorm << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ");\n";
1543 			checks << normexpected.str();
1544 			checks << "    accum += abs(temp);\n";
1545 		}
1546 		if (m_data.nullDescriptor)
1547 		{
1548 			const char *sizeswiz;
1549 			switch (m_data.viewType)
1550 			{
1551 				default: DE_ASSERT(0); // Fallthrough
1552 				case VK_IMAGE_VIEW_TYPE_1D:			sizeswiz = ".xxxx";	break;
1553 				case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	sizeswiz = ".xyxx";	break;
1554 				case VK_IMAGE_VIEW_TYPE_2D:			sizeswiz = ".xyxx";	break;
1555 				case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	sizeswiz = ".xyzx";	break;
1556 				case VK_IMAGE_VIEW_TYPE_3D:			sizeswiz = ".xyzx";	break;
1557 				case VK_IMAGE_VIEW_TYPE_CUBE:		sizeswiz = ".xyxx";	break;
1558 				case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:	sizeswiz = ".xyzx";	break;
1559 			}
1560 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1561 			{
1562 				if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1563 				{
1564 					checks << "    temp = textureSize(texture0_1, 0)" << sizeswiz <<";\n";
1565 					checks << "    accum += abs(temp);\n";
1566 
1567 					// checking textureSize with clearly out of range LOD values
1568 					checks << "    temp = textureSize(texture0_1, " << -i << ")" << sizeswiz <<";\n";
1569 					checks << "    accum += abs(temp);\n";
1570 					checks << "    temp = textureSize(texture0_1, " << (std::numeric_limits<deInt32>::max() - i) << ")" << sizeswiz <<";\n";
1571 					checks << "    accum += abs(temp);\n";
1572 				}
1573 				else
1574 				{
1575 					checks << "    temp = textureSize(texture0_1)" << sizeswiz <<";\n";
1576 					checks << "    accum += abs(temp);\n";
1577 					checks << "    temp = textureSamples(texture0_1).xxxx;\n";
1578 					checks << "    accum += abs(temp);\n";
1579 				}
1580 			}
1581 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1582 			{
1583 				if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1584 				{
1585 					checks << "    temp = imageSize(image0_1)" << sizeswiz <<";\n";
1586 					checks << "    accum += abs(temp);\n";
1587 				}
1588 				else
1589 				{
1590 					checks << "    temp = imageSize(image0_1)" << sizeswiz <<";\n";
1591 					checks << "    accum += abs(temp);\n";
1592 					checks << "    temp = imageSamples(image0_1).xxxx;\n";
1593 					checks << "    accum += abs(temp);\n";
1594 				}
1595 			}
1596 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1597 				m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1598 			{
1599 				// expect zero for runtime-sized array .length()
1600 				checks << "    temp = " << vecType << "(ssbo0_1.val.length());\n";
1601 				checks << "    accum += abs(temp);\n";
1602 				checks << "    temp = " << vecType << "(ssbo0_1_pad.val.length());\n";
1603 				checks << "    accum += abs(temp);\n";
1604 			}
1605 		}
1606 	}
1607 	checks << "  }\n";
1608 
1609 	// outside the coordinates loop because we only need to call it once
1610 	if (m_data.nullDescriptor &&
1611 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1612 		m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1613 	{
1614 		checks << "  temp_ql = " << qLevelType << "(textureQueryLevels(texture0_1));\n";
1615 		checks << "  temp = " << vecType << "(temp_ql);\n";
1616 		checks << "  accum += abs(temp);\n";
1617 
1618 		if (m_data.stage == STAGE_FRAGMENT)
1619 		{
1620 			// as here we only want to check that textureQueryLod returns 0 when
1621 			// texture0_1 is null, we don't need to use the actual texture coordinates
1622 			// (and modify the vertex shader below to do so). Any coordinates are fine.
1623 			// gl_FragCoord has been selected "randomly", instead of selecting 0 for example.
1624 			std::string lod_str = (numNormalizedCoords == 1) ? ");" : (numNormalizedCoords == 2) ? "y);" : "yz);";
1625 			checks << "  vec2 lod = textureQueryLod(texture0_1, gl_FragCoord.x" << lod_str << "\n";
1626 			checks << "  temp_ql = " << qLevelType << "(ceil(abs(lod.x) + abs(lod.y)));\n";
1627 			checks << "  temp = " << vecType << "(temp_ql);\n";
1628 			checks << "  accum += abs(temp);\n";
1629 		}
1630 	}
1631 
1632 
1633 	const bool		needsScalarLayout	= m_data.needsScalarBlockLayout();
1634 	const uint32_t	shaderBuildOptions	= (needsScalarLayout
1635 										? static_cast<uint32_t>(vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS)
1636 										: 0u);
1637 
1638 	const bool is64BitFormat = formatIsR64(m_data.format);
1639 	std::string support =	"#version 460 core\n"
1640 							"#extension GL_EXT_nonuniform_qualifier : enable\n" +
1641 							(needsScalarLayout ? std::string("#extension GL_EXT_scalar_block_layout : enable\n") : std::string()) +
1642 							"#extension GL_EXT_samplerless_texture_functions : enable\n"
1643 							"#extension GL_EXT_control_flow_attributes : enable\n"
1644 							"#extension GL_EXT_shader_image_load_formatted : enable\n";
1645 	std::string SupportR64 =	"#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
1646 								"#extension GL_EXT_shader_image_int64 : require\n";
1647 	if (is64BitFormat)
1648 		support += SupportR64;
1649 	if (m_data.stage == STAGE_RAYGEN)
1650 		support += "#extension GL_NV_ray_tracing : require\n";
1651 
1652 	std::string code =	"  " + vecType + " accum = " + vecType + "(0);\n"
1653 						"  " + vecType + " temp;\n"
1654 						"  " + qLevelType + " temp_ql;\n" +
1655 						checks.str() +
1656 						"  " + vecType + " color = (accum != " + vecType + "(0)) ? " + vecType + "(0,0,0,0) : " + vecType + "(1,0,0,1);\n";
1657 
1658 	switch (m_data.stage)
1659 	{
1660 	default: DE_ASSERT(0); // Fallthrough
1661 	case STAGE_COMPUTE:
1662 		{
1663 			std::stringstream css;
1664 			css << support
1665 				<< decls.str() <<
1666 				"layout(local_size_x = 1, local_size_y = 1) in;\n"
1667 				"void main()\n"
1668 				"{\n"
1669 				<< code <<
1670 				"  imageStore(image0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1671 				"}\n";
1672 
1673 			programCollection.glslSources.add("test") << glu::ComputeSource(css.str())
1674 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1675 			break;
1676 		}
1677 	case STAGE_RAYGEN:
1678 		{
1679 			std::stringstream css;
1680 			css << support
1681 				<< decls.str() <<
1682 				"void main()\n"
1683 				"{\n"
1684 				<< code <<
1685 				"  imageStore(image0_0, ivec2(gl_LaunchIDNV.xy), color);\n"
1686 				"}\n";
1687 
1688 			programCollection.glslSources.add("test") << glu::RaygenSource(css.str())
1689 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1690 			break;
1691 		}
1692 	case STAGE_VERTEX:
1693 		{
1694 			std::stringstream vss;
1695 			vss << support
1696 				<< decls.str() <<
1697 				"void main()\n"
1698 				"{\n"
1699 				<< code <<
1700 				"  imageStore(image0_0, ivec2(gl_VertexIndex % " << DIM << ", gl_VertexIndex / " << DIM << "), color);\n"
1701 				"  gl_PointSize = 1.0f;\n"
1702 				"  gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1703 				"}\n";
1704 
1705 			programCollection.glslSources.add("test") << glu::VertexSource(vss.str())
1706 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1707 			break;
1708 		}
1709 	case STAGE_FRAGMENT:
1710 		{
1711 			std::stringstream vss;
1712 			vss <<
1713 				"#version 450 core\n"
1714 				"void main()\n"
1715 				"{\n"
1716 				// full-viewport quad
1717 				"  gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * float(gl_VertexIndex&1), 1);\n"
1718 				"}\n";
1719 
1720 			programCollection.glslSources.add("vert") << glu::VertexSource(vss.str())
1721 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1722 
1723 			std::stringstream fss;
1724 			fss << support
1725 				<< decls.str() <<
1726 				"void main()\n"
1727 				"{\n"
1728 				<< code <<
1729 				"  imageStore(image0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1730 				"}\n";
1731 
1732 			programCollection.glslSources.add("test") << glu::FragmentSource(fss.str())
1733 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1734 			break;
1735 		}
1736 	}
1737 
1738 	// The 64-bit conditions below are redundant. Can we support the below shader for other than 64-bit formats?
1739 	if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && is64BitFormat)
1740 	{
1741 		const std::string	ivecCords = (m_data.viewType == VK_IMAGE_VIEW_TYPE_2D ? "ivec2(gx, gy)" : "ivec3(gx, gy, gz)");
1742 		std::stringstream	fillShader;
1743 
1744 		fillShader <<
1745 			"#version 450\n"
1746 			<< SupportR64
1747 			<< "\n"
1748 			"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1749 			"layout (" + getShaderImageFormatQualifier(mapVkFormat(m_data.format)) + ", binding=0) volatile uniform "
1750 			<< string(formatIsSignedInt(m_data.format) ? "i" : "u") + string(is64BitFormat ? "64" : "") << "image" << imageDim << +" u_resultImage;\n"
1751 			"\n"
1752 			"layout(std430, binding = 1) buffer inputBuffer\n"
1753 			"{\n"
1754 			"  int" << (is64BitFormat ? "64_t" : "") << " data[];\n"
1755 			"} inBuffer;\n"
1756 			"\n"
1757 			"void main(void)\n"
1758 			"{\n"
1759 			"  int gx = int(gl_GlobalInvocationID.x);\n"
1760 			"  int gy = int(gl_GlobalInvocationID.y);\n"
1761 			"  int gz = int(gl_GlobalInvocationID.z);\n"
1762 			"  uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n";
1763 
1764 			for(int ndx = 0; ndx < static_cast<int>(m_data.samples); ++ndx)
1765 			{
1766 				fillShader << "  imageStore(u_resultImage, " << ivecCords << ", " << ndx << ", i64vec4(inBuffer.data[index]));\n";
1767 			}
1768 
1769 			fillShader << "}\n";
1770 
1771 		programCollection.glslSources.add("fillShader") << glu::ComputeSource(fillShader.str())
1772 			<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1773 	}
1774 
1775 }
1776 
imageViewTypeToImageType(VkImageViewType type)1777 VkImageType imageViewTypeToImageType (VkImageViewType type)
1778 {
1779 	switch (type)
1780 	{
1781 		case VK_IMAGE_VIEW_TYPE_1D:
1782 		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:		return VK_IMAGE_TYPE_1D;
1783 		case VK_IMAGE_VIEW_TYPE_2D:
1784 		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1785 		case VK_IMAGE_VIEW_TYPE_CUBE:
1786 		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:		return VK_IMAGE_TYPE_2D;
1787 		case VK_IMAGE_VIEW_TYPE_3D:				return VK_IMAGE_TYPE_3D;
1788 		default:
1789 			DE_ASSERT(false);
1790 	}
1791 
1792 	return VK_IMAGE_TYPE_2D;
1793 }
1794 
createInstance(Context & context) const1795 TestInstance* RobustnessExtsTestCase::createInstance (Context& context) const
1796 {
1797 	return new RobustnessExtsTestInstance(context, m_data);
1798 }
1799 
iterate(void)1800 tcu::TestStatus RobustnessExtsTestInstance::iterate (void)
1801 {
1802 	const VkInstance			instance			= m_context.getInstance();
1803 	const InstanceInterface&	vki					= m_context.getInstanceInterface();
1804 	const VkDevice				device				= getLogicalDevice(m_context, m_data);
1805 	const vk::DeviceInterface&	vk					= getDeviceInterface(m_context, m_data);
1806 	const VkPhysicalDevice		physicalDevice		= chooseDevice(vki, instance, m_context.getTestContext().getCommandLine());
1807 	SimpleAllocator				allocator			(vk, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1808 
1809 	Layout layout;
1810 	generateLayout(layout, m_data);
1811 
1812 	// Get needed properties.
1813 	VkPhysicalDeviceProperties2 properties;
1814 	deMemset(&properties, 0, sizeof(properties));
1815 	properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
1816 	void** pNextTail = &properties.pNext;
1817 
1818 #ifndef CTS_USES_VULKANSC
1819 	VkPhysicalDeviceRayTracingPropertiesNV rayTracingProperties;
1820 	deMemset(&rayTracingProperties, 0, sizeof(rayTracingProperties));
1821 	rayTracingProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV;
1822 #endif
1823 
1824 	VkPhysicalDeviceRobustness2PropertiesEXT robustness2Properties;
1825 	deMemset(&robustness2Properties, 0, sizeof(robustness2Properties));
1826 	robustness2Properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT;
1827 
1828 #ifndef CTS_USES_VULKANSC
1829 	if (m_context.isDeviceFunctionalitySupported("VK_NV_ray_tracing"))
1830 	{
1831 		*pNextTail = &rayTracingProperties;
1832 		pNextTail = &rayTracingProperties.pNext;
1833 	}
1834 #endif
1835 
1836 	if (m_context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
1837 	{
1838 		*pNextTail = &robustness2Properties;
1839 		pNextTail = &robustness2Properties.pNext;
1840 	}
1841 
1842 	vki.getPhysicalDeviceProperties2(physicalDevice, &properties);
1843 
1844 	if (m_data.testRobustness2)
1845 	{
1846 		if (robustness2Properties.robustStorageBufferAccessSizeAlignment != 1 &&
1847 			robustness2Properties.robustStorageBufferAccessSizeAlignment != 4)
1848 			return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustStorageBufferAccessSizeAlignment must be 1 or 4");
1849 
1850 		if (robustness2Properties.robustUniformBufferAccessSizeAlignment < 1 ||
1851 			robustness2Properties.robustUniformBufferAccessSizeAlignment > 256 ||
1852 			!deIntIsPow2((int)robustness2Properties.robustUniformBufferAccessSizeAlignment))
1853 			return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustUniformBufferAccessSizeAlignment must be a power of two in [1,256]");
1854 	}
1855 
1856 	VkPipelineBindPoint bindPoint;
1857 
1858 	switch (m_data.stage)
1859 	{
1860 	case STAGE_COMPUTE:
1861 		bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
1862 		break;
1863 #ifndef CTS_USES_VULKANSC
1864 	case STAGE_RAYGEN:
1865 		bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_NV;
1866 		break;
1867 #endif
1868 	default:
1869 		bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1870 		break;
1871 	}
1872 
1873 	Move<vk::VkDescriptorSetLayout>	descriptorSetLayout;
1874 	Move<vk::VkDescriptorPool>		descriptorPool;
1875 	Move<vk::VkDescriptorSet>		descriptorSet;
1876 
1877 	int formatBytes = tcu::getPixelSize(mapVkFormat(m_data.format));
1878 	int numComponents = formatBytes / tcu::getChannelSize(mapVkFormat(m_data.format).type);
1879 
1880 	vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
1881 
1882 	VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1883 
1884 #ifndef CTS_USES_VULKANSC
1885 	VkDescriptorSetLayoutCreateFlags layoutCreateFlags = m_data.pushDescriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
1886 #else
1887 	VkDescriptorSetLayoutCreateFlags layoutCreateFlags = 0;
1888 #endif
1889 
1890 	// Create a layout and allocate a descriptor set for it.
1891 
1892 	const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo =
1893 	{
1894 		vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1895 		DE_NULL,
1896 
1897 		layoutCreateFlags,
1898 		(deUint32)bindings.size(),
1899 		bindings.empty() ? DE_NULL : bindings.data()
1900 	};
1901 
1902 	descriptorSetLayout = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
1903 
1904 	vk::DescriptorPoolBuilder poolBuilder;
1905 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1);
1906 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1);
1907 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1);
1908 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1);
1909 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1);
1910 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1);
1911 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1);
1912 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2);
1913 
1914 	descriptorPool = poolBuilder.build(vk, device, poolCreateFlags, 1u, DE_NULL);
1915 
1916 	const void *pNext = DE_NULL;
1917 
1918 	if (!m_data.pushDescriptor)
1919 		descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout, pNext);
1920 
1921 	de::MovePtr<BufferWithMemory> buffer;
1922 
1923 	deUint8 *bufferPtr = DE_NULL;
1924 	if (!m_data.nullDescriptor)
1925 	{
1926 		// Create a buffer to hold data for all descriptors.
1927 		VkDeviceSize	size = de::max(
1928 			(VkDeviceSize)(m_data.bufferLen ? m_data.bufferLen : 1),
1929 			(VkDeviceSize)256);
1930 
1931 		VkBufferUsageFlags usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
1932 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1933 			m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1934 		{
1935 			size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment);
1936 			usage |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
1937 		}
1938 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1939 				 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1940 		{
1941 			size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment);
1942 			usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1943 		}
1944 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1945 		{
1946 			usage |= VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
1947 		}
1948 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER)
1949 		{
1950 			usage |= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
1951 		}
1952 		else if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1953 		{
1954 			size = m_data.bufferLen;
1955 		}
1956 
1957 		buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1958 			vk, device, allocator, makeBufferCreateInfo(size, usage), MemoryRequirement::HostVisible));
1959 		bufferPtr = (deUint8 *)buffer->getAllocation().getHostPtr();
1960 
1961 		deMemset(bufferPtr, 0x3f, (size_t)size);
1962 
1963 		deMemset(bufferPtr, 0, m_data.bufferLen);
1964 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1965 			m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1966 		{
1967 			deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment));
1968 		}
1969 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1970 				 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1971 		{
1972 			deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment));
1973 		}
1974 	}
1975 
1976 	const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1977 
1978 	Move<VkDescriptorSetLayout>		descriptorSetLayoutR64;
1979 	Move<VkDescriptorPool>			descriptorPoolR64;
1980 	Move<VkDescriptorSet>			descriptorSetFillImage;
1981 	Move<VkShaderModule>			shaderModuleFillImage;
1982 	Move<VkPipelineLayout>			pipelineLayoutFillImage;
1983 	Move<VkPipeline>				pipelineFillImage;
1984 
1985 	Move<VkCommandPool>				cmdPool		= createCommandPool(vk, device, 0, queueFamilyIndex);
1986 	Move<VkCommandBuffer>			cmdBuffer	= allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1987 	VkQueue							queue;
1988 
1989 	vk.getDeviceQueue(device, queueFamilyIndex, 0, &queue);
1990 
1991 	const VkImageSubresourceRange	barrierRange				=
1992 	{
1993 		VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspectFlags	aspectMask;
1994 		0u,							// deUint32				baseMipLevel;
1995 		VK_REMAINING_MIP_LEVELS,	// deUint32				levelCount;
1996 		0u,							// deUint32				baseArrayLayer;
1997 		VK_REMAINING_ARRAY_LAYERS	// deUint32				layerCount;
1998 	};
1999 
2000 	VkImageMemoryBarrier			preImageBarrier				=
2001 	{
2002 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
2003 		DE_NULL,											// const void*			pNext
2004 		0u,													// VkAccessFlags		srcAccessMask
2005 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
2006 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
2007 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,				// VkImageLayout		newLayout
2008 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
2009 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
2010 		DE_NULL,											// VkImage				image
2011 		barrierRange,										// VkImageSubresourceRange	subresourceRange;
2012 	};
2013 
2014 	VkImageMemoryBarrier			postImageBarrier			=
2015 	{
2016 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,		// VkStructureType			sType;
2017 		DE_NULL,									// const void*				pNext;
2018 		VK_ACCESS_TRANSFER_WRITE_BIT,				// VkAccessFlags			srcAccessMask;
2019 		VK_ACCESS_SHADER_READ_BIT,					// VkAccessFlags			dstAccessMask;
2020 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,		// VkImageLayout			oldLayout;
2021 		VK_IMAGE_LAYOUT_GENERAL,					// VkImageLayout			newLayout;
2022 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					srcQueueFamilyIndex;
2023 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					dstQueueFamilyIndex;
2024 		DE_NULL,									// VkImage					image;
2025 		barrierRange,								// VkImageSubresourceRange	subresourceRange;
2026 	};
2027 
2028 	vk::VkClearColorValue			clearValue;
2029 	clearValue.uint32[0] = 0u;
2030 	clearValue.uint32[1] = 0u;
2031 	clearValue.uint32[2] = 0u;
2032 	clearValue.uint32[3] = 0u;
2033 
2034 	beginCommandBuffer(vk, *cmdBuffer, 0u);
2035 
2036 	typedef vk::Unique<vk::VkBufferView>		BufferViewHandleUp;
2037 	typedef de::SharedPtr<BufferViewHandleUp>	BufferViewHandleSp;
2038 	typedef de::SharedPtr<ImageWithMemory>		ImageWithMemorySp;
2039 	typedef de::SharedPtr<Unique<VkImageView> >	VkImageViewSp;
2040 	typedef de::MovePtr<BufferWithMemory>		BufferWithMemoryMp;
2041 
2042 	vector<BufferViewHandleSp>					bufferViews(1);
2043 
2044 	VkImageCreateFlags mutableFormatFlag = 0;
2045 	// The 64-bit image tests use a view format which differs from the image.
2046 	if (formatIsR64(m_data.format))
2047 		mutableFormatFlag = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
2048 	VkImageCreateFlags imageCreateFlags = mutableFormatFlag;
2049 	if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2050 		imageCreateFlags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
2051 
2052 	const bool featureSampledImage = ((getPhysicalDeviceFormatProperties(vki,
2053 										physicalDevice,
2054 										m_data.format).optimalTilingFeatures &
2055 										VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) == VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
2056 
2057 	const VkImageUsageFlags usageSampledImage = (featureSampledImage ? VK_IMAGE_USAGE_SAMPLED_BIT : (VkImageUsageFlagBits)0);
2058 
2059 	const VkImageCreateInfo			outputImageCreateInfo			=
2060 	{
2061 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
2062 		DE_NULL,								// const void*				pNext;
2063 		mutableFormatFlag,						// VkImageCreateFlags		flags;
2064 		VK_IMAGE_TYPE_2D,						// VkImageType				imageType;
2065 		m_data.format,							// VkFormat					format;
2066 		{
2067 			DIM,								// deUint32	width;
2068 			DIM,								// deUint32	height;
2069 			1u									// deUint32	depth;
2070 		},										// VkExtent3D				extent;
2071 		1u,										// deUint32					mipLevels;
2072 		1u,										// deUint32					arrayLayers;
2073 		VK_SAMPLE_COUNT_1_BIT,					// VkSampleCountFlagBits	samples;
2074 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
2075 		VK_IMAGE_USAGE_STORAGE_BIT
2076 		| usageSampledImage
2077 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
2078 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
2079 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
2080 		0u,										// deUint32					queueFamilyIndexCount;
2081 		DE_NULL,								// const deUint32*			pQueueFamilyIndices;
2082 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
2083 	};
2084 
2085 	deUint32 width = m_data.imageDim[0];
2086 	deUint32 height = m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] : 1;
2087 	deUint32 depth = m_data.viewType == VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
2088 	deUint32 layers = m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] :
2089 						m_data.viewType != VK_IMAGE_VIEW_TYPE_1D &&
2090 						m_data.viewType != VK_IMAGE_VIEW_TYPE_2D &&
2091 						m_data.viewType != VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
2092 
2093 	const VkImageUsageFlags usageImage = (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? VK_IMAGE_USAGE_STORAGE_BIT : (VkImageUsageFlagBits)0);
2094 
2095 	const VkImageCreateInfo			imageCreateInfo			=
2096 	{
2097 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
2098 		DE_NULL,								// const void*				pNext;
2099 		imageCreateFlags,						// VkImageCreateFlags		flags;
2100 		imageViewTypeToImageType(m_data.viewType),	// VkImageType				imageType;
2101 		m_data.format,							// VkFormat					format;
2102 		{
2103 			width,								// deUint32	width;
2104 			height,								// deUint32	height;
2105 			depth								// deUint32	depth;
2106 		},										// VkExtent3D				extent;
2107 		1u,										// deUint32					mipLevels;
2108 		layers,									// deUint32					arrayLayers;
2109 		m_data.samples,							// VkSampleCountFlagBits	samples;
2110 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
2111 		usageImage
2112 		| usageSampledImage
2113 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
2114 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
2115 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
2116 		0u,										// deUint32					queueFamilyIndexCount;
2117 		DE_NULL,								// const deUint32*			pQueueFamilyIndices;
2118 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
2119 	};
2120 
2121 	VkImageViewCreateInfo		imageViewCreateInfo		=
2122 	{
2123 		VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,	// VkStructureType			sType;
2124 		DE_NULL,									// const void*				pNext;
2125 		(VkImageViewCreateFlags)0u,					// VkImageViewCreateFlags	flags;
2126 		DE_NULL,									// VkImage					image;
2127 		VK_IMAGE_VIEW_TYPE_2D,						// VkImageViewType			viewType;
2128 		m_data.format,								// VkFormat					format;
2129 		{
2130 			VK_COMPONENT_SWIZZLE_IDENTITY,
2131 			VK_COMPONENT_SWIZZLE_IDENTITY,
2132 			VK_COMPONENT_SWIZZLE_IDENTITY,
2133 			VK_COMPONENT_SWIZZLE_IDENTITY
2134 		},											// VkComponentMapping		 components;
2135 		{
2136 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask;
2137 			0u,										// deUint32				baseMipLevel;
2138 			VK_REMAINING_MIP_LEVELS,				// deUint32				levelCount;
2139 			0u,										// deUint32				baseArrayLayer;
2140 			VK_REMAINING_ARRAY_LAYERS				// deUint32				layerCount;
2141 		}											// VkImageSubresourceRange	subresourceRange;
2142 	};
2143 
2144 	vector<ImageWithMemorySp> images(2);
2145 	vector<VkImageViewSp> imageViews(2);
2146 
2147 	if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2148 	{
2149 		deUint32 *ptr = (deUint32 *)bufferPtr;
2150 		deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2151 	}
2152 
2153 	BufferWithMemoryMp				bufferImageR64;
2154 	BufferWithMemoryMp				bufferOutputImageR64;
2155 	const VkDeviceSize				sizeOutputR64	= 8 * outputImageCreateInfo.extent.width * outputImageCreateInfo.extent.height * outputImageCreateInfo.extent.depth;
2156 	const VkDeviceSize				sizeOneLayers	= 8 * imageCreateInfo.extent.width * imageCreateInfo.extent.height * imageCreateInfo.extent.depth;
2157 	const VkDeviceSize				sizeImageR64	= sizeOneLayers * layers;
2158 
2159 	if (formatIsR64(m_data.format))
2160 	{
2161 		bufferOutputImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2162 			vk, device, allocator,
2163 			makeBufferCreateInfo(sizeOutputR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
2164 			MemoryRequirement::HostVisible));
2165 
2166 		deUint64* bufferUint64Ptr = (deUint64 *)bufferOutputImageR64->getAllocation().getHostPtr();
2167 
2168 		for (int ndx = 0; ndx < static_cast<int>(sizeOutputR64 / 8); ++ndx)
2169 		{
2170 			bufferUint64Ptr[ndx] = 0;
2171 		}
2172 		flushAlloc(vk, device, bufferOutputImageR64->getAllocation());
2173 
2174 		bufferImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2175 			vk, device, allocator,
2176 			makeBufferCreateInfo(sizeImageR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
2177 			MemoryRequirement::HostVisible));
2178 
2179 		for (deUint32 layerNdx = 0; layerNdx < layers; ++layerNdx)
2180 		{
2181 			bufferUint64Ptr = (deUint64 *)bufferImageR64->getAllocation().getHostPtr();
2182 			bufferUint64Ptr = bufferUint64Ptr + ((sizeOneLayers * layerNdx) / 8);
2183 
2184 			for (int ndx = 0; ndx < static_cast<int>(sizeOneLayers / 8); ++ndx)
2185 			{
2186 				bufferUint64Ptr[ndx] = 0x1234567887654321 + ((m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? layerNdx : 0);
2187 			}
2188 		}
2189 		flushAlloc(vk, device, bufferImageR64->getAllocation());
2190 	}
2191 
2192 	for (size_t b = 0; b < bindings.size(); ++b)
2193 	{
2194 		VkDescriptorSetLayoutBinding &binding = bindings[b];
2195 
2196 		if (binding.descriptorCount == 0)
2197 			continue;
2198 		if (b == 1 && m_data.nullDescriptor)
2199 			continue;
2200 
2201 		DE_ASSERT(binding.descriptorCount == 1);
2202 		switch (binding.descriptorType)
2203 		{
2204 		default: DE_ASSERT(0); // Fallthrough
2205 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2206 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2207 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2208 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2209 			{
2210 				deUint32 *ptr = (deUint32 *)bufferPtr;
2211 				deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2212 			}
2213 			break;
2214 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2215 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2216 			{
2217 				deUint32 *ptr = (deUint32 *)bufferPtr;
2218 				deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2219 
2220 				const vk::VkBufferViewCreateInfo viewCreateInfo =
2221 				{
2222 					vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
2223 					DE_NULL,
2224 					(vk::VkBufferViewCreateFlags)0,
2225 					**buffer,								// buffer
2226 					m_data.format,							// format
2227 					(vk::VkDeviceSize)0,					// offset
2228 					(vk::VkDeviceSize)m_data.bufferLen		// range
2229 				};
2230 				vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
2231 				bufferViews[0] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
2232 			}
2233 			break;
2234 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2235 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2236 			{
2237 				if (bindings.size() > 1 &&
2238 					bindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2239 				{
2240 					if (m_data.format == VK_FORMAT_R64_SINT)
2241 						imageViewCreateInfo.format = VK_FORMAT_R32G32_SINT;
2242 
2243 					if (m_data.format == VK_FORMAT_R64_UINT)
2244 						imageViewCreateInfo.format = VK_FORMAT_R32G32_UINT;
2245 				}
2246 
2247 				if (b == 0)
2248 				{
2249 					images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, outputImageCreateInfo, MemoryRequirement::Any));
2250 					imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
2251 				}
2252 				else
2253 				{
2254 					images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2255 					imageViewCreateInfo.viewType = m_data.viewType;
2256 				}
2257 				imageViewCreateInfo.image = **images[b];
2258 				imageViews[b] = VkImageViewSp(new Unique<VkImageView>(createImageView(vk, device, &imageViewCreateInfo, NULL)));
2259 
2260 				VkImage						img			= **images[b];
2261 				const VkBuffer&				bufferR64= ((b == 0) ? *(*bufferOutputImageR64) : *(*(bufferImageR64)));
2262 				const VkImageCreateInfo&	imageInfo	= ((b == 0) ? outputImageCreateInfo : imageCreateInfo);
2263 				const deUint32				clearLayers	= b == 0 ? 1 : layers;
2264 
2265 				if (!formatIsR64(m_data.format))
2266 				{
2267 					preImageBarrier.image	= img;
2268 					if (b == 1)
2269 					{
2270 						if (formatIsFloat(m_data.format))
2271 						{
2272 							deMemcpy(&clearValue.float32[0], layout.refData.data(), layout.refData.size());
2273 						}
2274 						else if (formatIsSignedInt(m_data.format))
2275 						{
2276 							deMemcpy(&clearValue.int32[0], layout.refData.data(), layout.refData.size());
2277 						}
2278 						else
2279 						{
2280 							deMemcpy(&clearValue.uint32[0], layout.refData.data(), layout.refData.size());
2281 						}
2282 					}
2283 					postImageBarrier.image	= img;
2284 
2285 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2286 
2287 					for (unsigned int i = 0; i < clearLayers; ++i)
2288 					{
2289 						const VkImageSubresourceRange	clearRange				=
2290 						{
2291 							VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspectFlags	aspectMask;
2292 							0u,							// deUint32				baseMipLevel;
2293 							VK_REMAINING_MIP_LEVELS,	// deUint32				levelCount;
2294 							i,							// deUint32				baseArrayLayer;
2295 							1							// deUint32				layerCount;
2296 						};
2297 
2298 						vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1, &clearRange);
2299 
2300 						// Use same data for all faces for cube(array), otherwise make value a function of the layer
2301 						if (m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2302 						{
2303 							if (formatIsFloat(m_data.format))
2304 								clearValue.float32[0] += 1;
2305 							else if (formatIsSignedInt(m_data.format))
2306 								clearValue.int32[0] += 1;
2307 							else
2308 								clearValue.uint32[0] += 1;
2309 						}
2310 					}
2311 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2312 				}
2313 				else
2314 				{
2315 					if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && (b == 1))
2316 					{
2317 						const VkImageSubresourceRange	subresourceRange	= makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, clearLayers);
2318 						const VkImageMemoryBarrier		imageBarrierPre		= makeImageMemoryBarrier(0,
2319 																				VK_ACCESS_SHADER_WRITE_BIT,
2320 																				VK_IMAGE_LAYOUT_UNDEFINED,
2321 																				VK_IMAGE_LAYOUT_GENERAL,
2322 																				img,
2323 																				subresourceRange);
2324 						const VkImageMemoryBarrier		imageBarrierPost	= makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
2325 																				VK_ACCESS_SHADER_READ_BIT,
2326 																				VK_IMAGE_LAYOUT_GENERAL,
2327 																				VK_IMAGE_LAYOUT_GENERAL,
2328 																				img,
2329 																				subresourceRange);
2330 
2331 						descriptorSetLayoutR64 =
2332 							DescriptorSetLayoutBuilder()
2333 							.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2334 							.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2335 							.build(vk, device);
2336 
2337 						descriptorPoolR64 =
2338 							DescriptorPoolBuilder()
2339 							.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2340 							.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,1)
2341 							.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 2u);
2342 
2343 						descriptorSetFillImage = makeDescriptorSet(vk,
2344 							device,
2345 							*descriptorPoolR64,
2346 							*descriptorSetLayoutR64);
2347 
2348 						shaderModuleFillImage	= createShaderModule(vk, device, m_context.getBinaryCollection().get("fillShader"), 0);
2349 						pipelineLayoutFillImage	= makePipelineLayout(vk, device, *descriptorSetLayoutR64);
2350 						pipelineFillImage		= makeComputePipeline(vk, device, *pipelineLayoutFillImage, *shaderModuleFillImage);
2351 
2352 						const VkDescriptorImageInfo		descResultImageInfo		= makeDescriptorImageInfo(DE_NULL, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2353 						const VkDescriptorBufferInfo	descResultBufferInfo	= makeDescriptorBufferInfo(bufferR64, 0, sizeImageR64);
2354 
2355 						DescriptorSetUpdateBuilder()
2356 							.writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
2357 							.writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descResultBufferInfo)
2358 							.update(vk, device);
2359 
2360 						vk.cmdPipelineBarrier(*cmdBuffer,
2361 							VK_PIPELINE_STAGE_HOST_BIT,
2362 							VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2363 							(VkDependencyFlags)0,
2364 							0, (const VkMemoryBarrier*)DE_NULL,
2365 							0, (const VkBufferMemoryBarrier*)DE_NULL,
2366 							1, &imageBarrierPre);
2367 
2368 						vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineFillImage);
2369 						vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayoutFillImage, 0u, 1u, &(*descriptorSetFillImage), 0u, DE_NULL);
2370 
2371 						vk.cmdDispatch(*cmdBuffer, imageInfo.extent.width, imageInfo.extent.height, clearLayers);
2372 
2373 						vk.cmdPipelineBarrier(*cmdBuffer,
2374 									VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2375 									VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
2376 									(VkDependencyFlags)0,
2377 									0, (const VkMemoryBarrier*)DE_NULL,
2378 									0, (const VkBufferMemoryBarrier*)DE_NULL,
2379 									1, &imageBarrierPost);
2380 					}
2381 					else
2382 					{
2383 						VkDeviceSize					size			= ((b == 0) ? sizeOutputR64 : sizeImageR64);
2384 						const vector<VkBufferImageCopy>	bufferImageCopy	(1, makeBufferImageCopy(imageInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, clearLayers)));
2385 
2386 						copyBufferToImage(vk,
2387 							*cmdBuffer,
2388 							bufferR64,
2389 							size,
2390 							bufferImageCopy,
2391 							VK_IMAGE_ASPECT_COLOR_BIT,
2392 							1,
2393 							clearLayers, img, VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2394 					}
2395 				}
2396 			}
2397 			break;
2398 		}
2399 	}
2400 
2401 	const VkSamplerCreateInfo	samplerParams	=
2402 	{
2403 		VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,		// VkStructureType			sType;
2404 		DE_NULL,									// const void*				pNext;
2405 		0,											// VkSamplerCreateFlags		flags;
2406 		VK_FILTER_NEAREST,							// VkFilter					magFilter:
2407 		VK_FILTER_NEAREST,							// VkFilter					minFilter;
2408 		VK_SAMPLER_MIPMAP_MODE_NEAREST,				// VkSamplerMipmapMode		mipmapMode;
2409 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeU;
2410 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeV;
2411 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeW;
2412 		0.0f,										// float					mipLodBias;
2413 		VK_FALSE,									// VkBool32					anistoropyEnable;
2414 		1.0f,										// float					maxAnisotropy;
2415 		VK_FALSE,									// VkBool32					compareEnable;
2416 		VK_COMPARE_OP_ALWAYS,						// VkCompareOp				compareOp;
2417 		0.0f,										// float					minLod;
2418 		0.0f,										// float					maxLod;
2419 		formatIsFloat(m_data.format) ?
2420 			VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK :
2421 			VK_BORDER_COLOR_INT_TRANSPARENT_BLACK,	// VkBorderColor			borderColor;
2422 		VK_FALSE									// VkBool32					unnormalizedCoordinates;
2423 	};
2424 
2425 	Move<VkSampler>				sampler			(createSampler(vk, device, &samplerParams));
2426 
2427 	// Flush modified memory.
2428 	if (!m_data.nullDescriptor)
2429 		flushAlloc(vk, device, buffer->getAllocation());
2430 
2431 	const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
2432 	{
2433 		VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,				// sType
2434 		DE_NULL,													// pNext
2435 		(VkPipelineLayoutCreateFlags)0,
2436 		1u,															// setLayoutCount
2437 		&descriptorSetLayout.get(),									// pSetLayouts
2438 		0u,															// pushConstantRangeCount
2439 		DE_NULL,													// pPushConstantRanges
2440 	};
2441 
2442 	Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2443 
2444 	de::MovePtr<BufferWithMemory> copyBuffer;
2445 	copyBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2446 		vk, device, allocator, makeBufferCreateInfo(DIM*DIM*16, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
2447 
2448 	{
2449 		vector<VkDescriptorBufferInfo> bufferInfoVec(2);
2450 		vector<VkDescriptorImageInfo> imageInfoVec(2);
2451 		vector<VkBufferView> bufferViewVec(2);
2452 		vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2453 		int vecIndex = 0;
2454 		int numDynamic = 0;
2455 
2456 #ifndef CTS_USES_VULKANSC
2457 		vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore,
2458 												bufTemplateEntriesBefore,
2459 												texelBufTemplateEntriesBefore;
2460 #endif
2461 
2462 		for (size_t b = 0; b < bindings.size(); ++b)
2463 		{
2464 			VkDescriptorSetLayoutBinding &binding = bindings[b];
2465 			// Construct the declaration for the binding
2466 			if (binding.descriptorCount > 0)
2467 			{
2468 				// output image
2469 				switch (binding.descriptorType)
2470 				{
2471 				case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2472 				case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2473 					// Output image.
2474 					if (b == 1 && m_data.nullDescriptor)
2475 						imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, DE_NULL, VK_IMAGE_LAYOUT_GENERAL);
2476 					else
2477 						imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2478 					break;
2479 				case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2480 				case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2481 					if (b == 1 && m_data.nullDescriptor)
2482 						bufferViewVec[vecIndex] = DE_NULL;
2483 					else
2484 						bufferViewVec[vecIndex] = **bufferViews[0];
2485 					break;
2486 				default:
2487 					// Other descriptor types.
2488 					if (b == 1 && m_data.nullDescriptor)
2489 						bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(DE_NULL, 0, VK_WHOLE_SIZE);
2490 					else
2491 						bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(**buffer, 0, layout.refData.size());
2492 					break;
2493 				}
2494 
2495 				VkWriteDescriptorSet w =
2496 				{
2497 					VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,				// sType
2498 					DE_NULL,											// pNext
2499 					m_data.pushDescriptor ? DE_NULL : *descriptorSet,	// dstSet
2500 					(deUint32)b,										// binding
2501 					0,													// dstArrayElement
2502 					1u,													// descriptorCount
2503 					binding.descriptorType,								// descriptorType
2504 					&imageInfoVec[vecIndex],							// pImageInfo
2505 					&bufferInfoVec[vecIndex],							// pBufferInfo
2506 					&bufferViewVec[vecIndex],							// pTexelBufferView
2507 				};
2508 
2509 #ifndef CTS_USES_VULKANSC
2510 				VkDescriptorUpdateTemplateEntry templateEntry =
2511 				{
2512 					(deUint32)b,				// uint32_t				dstBinding;
2513 					0,							// uint32_t				dstArrayElement;
2514 					1u,							// uint32_t				descriptorCount;
2515 					binding.descriptorType,		// VkDescriptorType		descriptorType;
2516 					0,							// size_t				offset;
2517 					0,							// size_t				stride;
2518 				};
2519 
2520 				switch (binding.descriptorType)
2521 				{
2522 				default: DE_ASSERT(0); // Fallthrough
2523 				case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2524 				case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2525 					templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2526 					imgTemplateEntriesBefore.push_back(templateEntry);
2527 					break;
2528 				case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2529 				case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2530 					templateEntry.offset = vecIndex * sizeof(VkBufferView);
2531 					texelBufTemplateEntriesBefore.push_back(templateEntry);
2532 					break;
2533 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2534 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2535 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2536 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2537 					templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2538 					bufTemplateEntriesBefore.push_back(templateEntry);
2539 					break;
2540 				}
2541 #endif
2542 
2543 				vecIndex++;
2544 
2545 				writesBeforeBindVec.push_back(w);
2546 
2547 				// Count the number of dynamic descriptors in this set.
2548 				if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2549 					binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2550 				{
2551 					numDynamic++;
2552 				}
2553 			}
2554 		}
2555 
2556 		// Make zeros have at least one element so &zeros[0] works
2557 		vector<deUint32> zeros(de::max(1,numDynamic));
2558 		deMemset(&zeros[0], 0, numDynamic * sizeof(deUint32));
2559 
2560 		// Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2561 		if (m_data.useTemplate)
2562 		{
2563 #ifndef CTS_USES_VULKANSC
2564 			VkDescriptorUpdateTemplateCreateInfo templateCreateInfo =
2565 			{
2566 				VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO,	// VkStructureType							sType;
2567 				NULL,														// void*									pNext;
2568 				0,															// VkDescriptorUpdateTemplateCreateFlags	flags;
2569 				0,															// uint32_t									descriptorUpdateEntryCount;
2570 				DE_NULL,													// uint32_t									descriptorUpdateEntryCount;
2571 				m_data.pushDescriptor ?
2572 					VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR :
2573 					VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,		// VkDescriptorUpdateTemplateType			templateType;
2574 				descriptorSetLayout.get(),									// VkDescriptorSetLayout					descriptorSetLayout;
2575 				bindPoint,													// VkPipelineBindPoint						pipelineBindPoint;
2576 				*pipelineLayout,											// VkPipelineLayout							pipelineLayout;
2577 				0,															// uint32_t									set;
2578 			};
2579 
2580 			void *templateVectorData[] =
2581 			{
2582 				imageInfoVec.data(),
2583 				bufferInfoVec.data(),
2584 				bufferViewVec.data(),
2585 			};
2586 
2587 			vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] =
2588 			{
2589 				&imgTemplateEntriesBefore,
2590 				&bufTemplateEntriesBefore,
2591 				&texelBufTemplateEntriesBefore,
2592 			};
2593 
2594 			if (m_data.pushDescriptor)
2595 			{
2596 				for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2597 				{
2598 					if (templateVectorsBefore[i]->size())
2599 					{
2600 						templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2601 						templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2602 						Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2603 						vk.cmdPushDescriptorSetWithTemplateKHR(*cmdBuffer, *descriptorUpdateTemplate, *pipelineLayout, 0, templateVectorData[i]);
2604 					}
2605 				}
2606 			}
2607 			else
2608 			{
2609 				for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2610 				{
2611 					if (templateVectorsBefore[i]->size())
2612 					{
2613 						templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2614 						templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2615 						Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2616 						vk.updateDescriptorSetWithTemplate(device, descriptorSet.get(), *descriptorUpdateTemplate, templateVectorData[i]);
2617 					}
2618 				}
2619 
2620 				vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2621 			}
2622 #endif
2623 		}
2624 		else
2625 		{
2626 			if (m_data.pushDescriptor)
2627 			{
2628 #ifndef CTS_USES_VULKANSC
2629 				if (writesBeforeBindVec.size())
2630 				{
2631 					vk.cmdPushDescriptorSetKHR(*cmdBuffer, bindPoint, *pipelineLayout, 0, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0]);
2632 				}
2633 #endif
2634 			}
2635 			else
2636 			{
2637 				if (writesBeforeBindVec.size())
2638 				{
2639 					vk.updateDescriptorSets(device, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0, NULL);
2640 				}
2641 
2642 				vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2643 			}
2644 		}
2645 	}
2646 
2647 	Move<VkPipeline> pipeline;
2648 	Move<VkRenderPass> renderPass;
2649 	Move<VkFramebuffer> framebuffer;
2650 
2651 	de::MovePtr<BufferWithMemory> sbtBuffer;
2652 
2653 	if (m_data.stage == STAGE_COMPUTE)
2654 	{
2655 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2656 
2657 		pipeline = makeComputePipeline(vk, device, *pipelineLayout, *shader);
2658 
2659 	}
2660 #ifndef CTS_USES_VULKANSC
2661 	else if (m_data.stage == STAGE_RAYGEN)
2662 	{
2663 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2664 
2665 		const VkPipelineShaderStageCreateInfo	shaderCreateInfo =
2666 		{
2667 			VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2668 			DE_NULL,
2669 			(VkPipelineShaderStageCreateFlags)0,
2670 			VK_SHADER_STAGE_RAYGEN_BIT_NV,								// stage
2671 			*shader,													// shader
2672 			"main",
2673 			DE_NULL,													// pSpecializationInfo
2674 		};
2675 
2676 		VkRayTracingShaderGroupCreateInfoNV group =
2677 		{
2678 			VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV,
2679 			DE_NULL,
2680 			VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV,			// type
2681 			0,														// generalShader
2682 			VK_SHADER_UNUSED_KHR,									// closestHitShader
2683 			VK_SHADER_UNUSED_KHR,									// anyHitShader
2684 			VK_SHADER_UNUSED_KHR,									// intersectionShader
2685 		};
2686 
2687 		VkRayTracingPipelineCreateInfoNV pipelineCreateInfo = {
2688 			VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV,	// sType
2689 			DE_NULL,												// pNext
2690 			0,														// flags
2691 			1,														// stageCount
2692 			&shaderCreateInfo,										// pStages
2693 			1,														// groupCount
2694 			&group,													// pGroups
2695 			0,														// maxRecursionDepth
2696 			*pipelineLayout,										// layout
2697 			(vk::VkPipeline)0,										// basePipelineHandle
2698 			0u,														// basePipelineIndex
2699 		};
2700 
2701 		pipeline = createRayTracingPipelineNV(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
2702 
2703 		sbtBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2704 			vk, device, allocator, makeBufferCreateInfo(rayTracingProperties.shaderGroupHandleSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_RAY_TRACING_BIT_NV), MemoryRequirement::HostVisible));
2705 
2706 		deUint32 *ptr = (deUint32 *)sbtBuffer->getAllocation().getHostPtr();
2707 		invalidateAlloc(vk, device, sbtBuffer->getAllocation());
2708 
2709 		vk.getRayTracingShaderGroupHandlesKHR(device, *pipeline, 0, 1, rayTracingProperties.shaderGroupHandleSize, ptr);
2710 	}
2711 #endif
2712 	else
2713 	{
2714 		const VkSubpassDescription		subpassDesc				=
2715 		{
2716 			(VkSubpassDescriptionFlags)0,											// VkSubpassDescriptionFlags	flags
2717 			VK_PIPELINE_BIND_POINT_GRAPHICS,										// VkPipelineBindPoint			pipelineBindPoint
2718 			0u,																		// deUint32						inputAttachmentCount
2719 			DE_NULL,																// const VkAttachmentReference*	pInputAttachments
2720 			0u,																		// deUint32						colorAttachmentCount
2721 			DE_NULL,																// const VkAttachmentReference*	pColorAttachments
2722 			DE_NULL,																// const VkAttachmentReference*	pResolveAttachments
2723 			DE_NULL,																// const VkAttachmentReference*	pDepthStencilAttachment
2724 			0u,																		// deUint32						preserveAttachmentCount
2725 			DE_NULL																	// const deUint32*				pPreserveAttachments
2726 		};
2727 
2728 		const VkSubpassDependency		subpassDependency		=
2729 		{
2730 			VK_SUBPASS_EXTERNAL,							// deUint32				srcSubpass
2731 			0,												// deUint32				dstSubpass
2732 			VK_PIPELINE_STAGE_TRANSFER_BIT,					// VkPipelineStageFlags	srcStageMask
2733 			VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,			// VkPipelineStageFlags	dstStageMask
2734 			VK_ACCESS_TRANSFER_WRITE_BIT,					// VkAccessFlags		srcAccessMask
2735 			VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT,	//	dstAccessMask
2736 			VK_DEPENDENCY_BY_REGION_BIT						// VkDependencyFlags	dependencyFlags
2737 		};
2738 
2739 		const VkRenderPassCreateInfo	renderPassParams		=
2740 		{
2741 			VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,				// VkStructureTypei					sType
2742 			DE_NULL,												// const void*						pNext
2743 			(VkRenderPassCreateFlags)0,								// VkRenderPassCreateFlags			flags
2744 			0u,														// deUint32							attachmentCount
2745 			DE_NULL,												// const VkAttachmentDescription*	pAttachments
2746 			1u,														// deUint32							subpassCount
2747 			&subpassDesc,											// const VkSubpassDescription*		pSubpasses
2748 			1u,														// deUint32							dependencyCount
2749 			&subpassDependency										// const VkSubpassDependency*		pDependencies
2750 		};
2751 
2752 		renderPass = createRenderPass(vk, device, &renderPassParams);
2753 
2754 		const vk::VkFramebufferCreateInfo	framebufferParams	=
2755 		{
2756 			vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,			// sType
2757 			DE_NULL,												// pNext
2758 			(vk::VkFramebufferCreateFlags)0,
2759 			*renderPass,											// renderPass
2760 			0u,														// attachmentCount
2761 			DE_NULL,												// pAttachments
2762 			DIM,													// width
2763 			DIM,													// height
2764 			1u,														// layers
2765 		};
2766 
2767 		framebuffer = createFramebuffer(vk, device, &framebufferParams);
2768 
2769 		const VkVertexInputBindingDescription			vertexInputBindingDescription		=
2770 		{
2771 			0u,								// deUint32			 binding
2772 			(deUint32)formatBytes,			// deUint32			 stride
2773 			VK_VERTEX_INPUT_RATE_VERTEX,	// VkVertexInputRate	inputRate
2774 		};
2775 
2776 		const VkVertexInputAttributeDescription			vertexInputAttributeDescription		=
2777 		{
2778 			0u,								// deUint32	location
2779 			0u,								// deUint32	binding
2780 			m_data.format,					// VkFormat	format
2781 			0u								// deUint32	offset
2782 		};
2783 
2784 		deUint32 numAttribs = m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH ? 1u : 0u;
2785 
2786 		VkPipelineVertexInputStateCreateInfo		vertexInputStateCreateInfo		=
2787 		{
2788 			VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,	// VkStructureType							sType;
2789 			DE_NULL,													// const void*								pNext;
2790 			(VkPipelineVertexInputStateCreateFlags)0,					// VkPipelineVertexInputStateCreateFlags	flags;
2791 			numAttribs,													// deUint32									vertexBindingDescriptionCount;
2792 			&vertexInputBindingDescription,								// const VkVertexInputBindingDescription*	pVertexBindingDescriptions;
2793 			numAttribs,													// deUint32									vertexAttributeDescriptionCount;
2794 			&vertexInputAttributeDescription							// const VkVertexInputAttributeDescription*	pVertexAttributeDescriptions;
2795 		};
2796 
2797 		const VkPipelineInputAssemblyStateCreateInfo	inputAssemblyStateCreateInfo	=
2798 		{
2799 			VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,	// VkStructureType							sType;
2800 			DE_NULL,														// const void*								pNext;
2801 			(VkPipelineInputAssemblyStateCreateFlags)0,						// VkPipelineInputAssemblyStateCreateFlags	flags;
2802 			(m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology						topology;
2803 			VK_FALSE														// VkBool32									primitiveRestartEnable;
2804 		};
2805 
2806 		const VkPipelineRasterizationStateCreateInfo	rasterizationStateCreateInfo	=
2807 		{
2808 			VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,		// VkStructureType							sType;
2809 			DE_NULL,														// const void*								pNext;
2810 			(VkPipelineRasterizationStateCreateFlags)0,						// VkPipelineRasterizationStateCreateFlags	flags;
2811 			VK_FALSE,														// VkBool32									depthClampEnable;
2812 			(m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE,			// VkBool32									rasterizerDiscardEnable;
2813 			VK_POLYGON_MODE_FILL,											// VkPolygonMode							polygonMode;
2814 			VK_CULL_MODE_NONE,												// VkCullModeFlags							cullMode;
2815 			VK_FRONT_FACE_CLOCKWISE,										// VkFrontFace								frontFace;
2816 			VK_FALSE,														// VkBool32									depthBiasEnable;
2817 			0.0f,															// float									depthBiasConstantFactor;
2818 			0.0f,															// float									depthBiasClamp;
2819 			0.0f,															// float									depthBiasSlopeFactor;
2820 			1.0f															// float									lineWidth;
2821 		};
2822 
2823 		const VkPipelineMultisampleStateCreateInfo		multisampleStateCreateInfo =
2824 		{
2825 			VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,	// VkStructureType							sType
2826 			DE_NULL,													// const void*								pNext
2827 			0u,															// VkPipelineMultisampleStateCreateFlags	flags
2828 			VK_SAMPLE_COUNT_1_BIT,										// VkSampleCountFlagBits					rasterizationSamples
2829 			VK_FALSE,													// VkBool32									sampleShadingEnable
2830 			1.0f,														// float									minSampleShading
2831 			DE_NULL,													// const VkSampleMask*						pSampleMask
2832 			VK_FALSE,													// VkBool32									alphaToCoverageEnable
2833 			VK_FALSE													// VkBool32									alphaToOneEnable
2834 		};
2835 
2836 		VkViewport viewport = makeViewport(DIM, DIM);
2837 		VkRect2D scissor = makeRect2D(DIM, DIM);
2838 
2839 		const VkPipelineViewportStateCreateInfo			viewportStateCreateInfo				=
2840 		{
2841 			VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,	// VkStructureType							sType
2842 			DE_NULL,												// const void*								pNext
2843 			(VkPipelineViewportStateCreateFlags)0,					// VkPipelineViewportStateCreateFlags		flags
2844 			1u,														// deUint32									viewportCount
2845 			&viewport,												// const VkViewport*						pViewports
2846 			1u,														// deUint32									scissorCount
2847 			&scissor												// const VkRect2D*							pScissors
2848 		};
2849 
2850 		Move<VkShaderModule> fs;
2851 		Move<VkShaderModule> vs;
2852 
2853 		deUint32 numStages;
2854 		if (m_data.stage == STAGE_VERTEX)
2855 		{
2856 			vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2857 			fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0); // bogus
2858 			numStages = 1u;
2859 		}
2860 		else
2861 		{
2862 			vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0);
2863 			fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2864 			numStages = 2u;
2865 		}
2866 
2867 		VkPipelineShaderStageCreateInfo	shaderCreateInfo[2] =
2868 		{
2869 			{
2870 				VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2871 				DE_NULL,
2872 				(VkPipelineShaderStageCreateFlags)0,
2873 				VK_SHADER_STAGE_VERTEX_BIT,									// stage
2874 				*vs,														// shader
2875 				"main",
2876 				DE_NULL,													// pSpecializationInfo
2877 			},
2878 			{
2879 				VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2880 				DE_NULL,
2881 				(VkPipelineShaderStageCreateFlags)0,
2882 				VK_SHADER_STAGE_FRAGMENT_BIT,								// stage
2883 				*fs,														// shader
2884 				"main",
2885 				DE_NULL,													// pSpecializationInfo
2886 			}
2887 		};
2888 
2889 		VkGraphicsPipelineCreateInfo				graphicsPipelineCreateInfo		=
2890 		{
2891 			VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,	// VkStructureType									sType;
2892 			DE_NULL,											// const void*										pNext;
2893 			(VkPipelineCreateFlags)0,							// VkPipelineCreateFlags							flags;
2894 			numStages,											// deUint32											stageCount;
2895 			&shaderCreateInfo[0],								// const VkPipelineShaderStageCreateInfo*			pStages;
2896 			&vertexInputStateCreateInfo,						// const VkPipelineVertexInputStateCreateInfo*		pVertexInputState;
2897 			&inputAssemblyStateCreateInfo,						// const VkPipelineInputAssemblyStateCreateInfo*	pInputAssemblyState;
2898 			DE_NULL,											// const VkPipelineTessellationStateCreateInfo*		pTessellationState;
2899 			&viewportStateCreateInfo,							// const VkPipelineViewportStateCreateInfo*			pViewportState;
2900 			&rasterizationStateCreateInfo,						// const VkPipelineRasterizationStateCreateInfo*	pRasterizationState;
2901 			&multisampleStateCreateInfo,						// const VkPipelineMultisampleStateCreateInfo*		pMultisampleState;
2902 			DE_NULL,											// const VkPipelineDepthStencilStateCreateInfo*		pDepthStencilState;
2903 			DE_NULL,											// const VkPipelineColorBlendStateCreateInfo*		pColorBlendState;
2904 			DE_NULL,											// const VkPipelineDynamicStateCreateInfo*			pDynamicState;
2905 			pipelineLayout.get(),								// VkPipelineLayout									layout;
2906 			renderPass.get(),									// VkRenderPass										renderPass;
2907 			0u,													// deUint32											subpass;
2908 			DE_NULL,											// VkPipeline										basePipelineHandle;
2909 			0													// int												basePipelineIndex;
2910 		};
2911 
2912 #ifndef CTS_USES_VULKANSC
2913 		VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2914 		if (m_data.testPipelineRobustness)
2915 		{
2916 			pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2917 
2918 			if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2919 			{
2920 				graphicsPipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2921 			}
2922 			else if (m_data.stage == STAGE_VERTEX)
2923 			{
2924 				shaderCreateInfo[0].pNext = &pipelineRobustnessInfo;
2925 			}
2926 			else
2927 			{
2928 				shaderCreateInfo[1].pNext = &pipelineRobustnessInfo;
2929 			}
2930 		}
2931 #endif
2932 
2933 		pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
2934 	}
2935 
2936 	const VkImageMemoryBarrier imageBarrier =
2937 	{
2938 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
2939 		DE_NULL,											// const void*			pNext
2940 		0u,													// VkAccessFlags		srcAccessMask
2941 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
2942 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
2943 		VK_IMAGE_LAYOUT_GENERAL,							// VkImageLayout		newLayout
2944 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
2945 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
2946 		**images[0],										// VkImage				image
2947 		{
2948 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask
2949 			0u,										// uint32_t				baseMipLevel
2950 			1u,										// uint32_t				mipLevels,
2951 			0u,										// uint32_t				baseArray
2952 			1u,										// uint32_t				arraySize
2953 		}
2954 	};
2955 
2956 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2957 							(VkDependencyFlags)0,
2958 							0, (const VkMemoryBarrier*)DE_NULL,
2959 							0, (const VkBufferMemoryBarrier*)DE_NULL,
2960 							1, &imageBarrier);
2961 
2962 	vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
2963 
2964 	if (!formatIsR64(m_data.format))
2965 	{
2966 		VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2967 		VkClearValue clearColor = makeClearValueColorU32(0,0,0,0);
2968 
2969 		vk.cmdClearColorImage(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
2970 	}
2971 	else
2972 	{
2973 		const vector<VkBufferImageCopy>	bufferImageCopy(1, makeBufferImageCopy(outputImageCreateInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1)));
2974 		copyBufferToImage(vk,
2975 			*cmdBuffer,
2976 			*(*bufferOutputImageR64),
2977 			sizeOutputR64,
2978 			bufferImageCopy,
2979 			VK_IMAGE_ASPECT_COLOR_BIT,
2980 			1,
2981 			1, **images[0], VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2982 	}
2983 
2984 	VkMemoryBarrier					memBarrier =
2985 	{
2986 		VK_STRUCTURE_TYPE_MEMORY_BARRIER,	// sType
2987 		DE_NULL,							// pNext
2988 		0u,									// srcAccessMask
2989 		0u,									// dstAccessMask
2990 	};
2991 
2992 	memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
2993 	memBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2994 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages,
2995 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
2996 
2997 	if (m_data.stage == STAGE_COMPUTE)
2998 	{
2999 		vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
3000 	}
3001 #ifndef CTS_USES_VULKANSC
3002 	else if (m_data.stage == STAGE_RAYGEN)
3003 	{
3004 		vk.cmdTraceRaysNV(*cmdBuffer,
3005 			**sbtBuffer, 0,
3006 			DE_NULL, 0, 0,
3007 			DE_NULL, 0, 0,
3008 			DE_NULL, 0, 0,
3009 			DIM, DIM, 1);
3010 	}
3011 #endif
3012 	else
3013 	{
3014 		beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer,
3015 						makeRect2D(DIM, DIM),
3016 						0, DE_NULL, VK_SUBPASS_CONTENTS_INLINE);
3017 		// Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
3018 		if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3019 		{
3020 			VkDeviceSize zeroOffset = 0;
3021 			VkBuffer b = m_data.nullDescriptor ? DE_NULL : **buffer;
3022 			vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &b, &zeroOffset);
3023 			vk.cmdDraw(*cmdBuffer, 1000u, 1u, 0u, 0u);
3024 		}
3025 		if (m_data.stage == STAGE_VERTEX)
3026 		{
3027 			vk.cmdDraw(*cmdBuffer, DIM*DIM, 1u, 0u, 0u);
3028 		}
3029 		else
3030 		{
3031 			vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
3032 		}
3033 		endRenderPass(vk, *cmdBuffer);
3034 	}
3035 
3036 	memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
3037 	memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
3038 	vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT,
3039 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
3040 
3041 	const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(DIM, DIM, 1u),
3042 															 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
3043 	vk.cmdCopyImageToBuffer(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, &copyRegion);
3044 
3045 	endCommandBuffer(vk, *cmdBuffer);
3046 
3047 	submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
3048 
3049 	void *ptr = copyBuffer->getAllocation().getHostPtr();
3050 
3051 	invalidateAlloc(vk, device, copyBuffer->getAllocation());
3052 
3053 	qpTestResult res = QP_TEST_RESULT_PASS;
3054 
3055 	for (deUint32 i = 0; i < DIM*DIM; ++i)
3056 	{
3057 		if (formatIsFloat(m_data.format))
3058 		{
3059 			if (((float *)ptr)[i * numComponents] != 1.0f)
3060 			{
3061 				res = QP_TEST_RESULT_FAIL;
3062 			}
3063 		}
3064 		else if (formatIsR64(m_data.format))
3065 		{
3066 			if (((deUint64 *)ptr)[i * numComponents] != 1)
3067 			{
3068 				res = QP_TEST_RESULT_FAIL;
3069 			}
3070 		}
3071 		else
3072 		{
3073 			if (((deUint32 *)ptr)[i * numComponents] != 1)
3074 			{
3075 				res = QP_TEST_RESULT_FAIL;
3076 			}
3077 		}
3078 	}
3079 
3080 	return tcu::TestStatus(res, qpGetTestResultName(res));
3081 }
3082 
3083 }	// anonymous
3084 
createTests(tcu::TestCaseGroup * group,bool robustness2,bool pipelineRobustness)3085 static void createTests (tcu::TestCaseGroup* group, bool robustness2, bool pipelineRobustness)
3086 {
3087 	tcu::TestContext& testCtx = group->getTestContext();
3088 
3089 	typedef struct
3090 	{
3091 		deUint32				count;
3092 		const char*				name;
3093 		const char*				description;
3094 	} TestGroupCase;
3095 
3096 	TestGroupCase fmtCases[] =
3097 	{
3098 		{ VK_FORMAT_R32_SINT,				"r32i",		""		},
3099 		{ VK_FORMAT_R32_UINT,				"r32ui",	""		},
3100 		{ VK_FORMAT_R32_SFLOAT,				"r32f",		""		},
3101 		{ VK_FORMAT_R32G32_SINT,			"rg32i",	""		},
3102 		{ VK_FORMAT_R32G32_UINT,			"rg32ui",	""		},
3103 		{ VK_FORMAT_R32G32_SFLOAT,			"rg32f",	""		},
3104 		{ VK_FORMAT_R32G32B32A32_SINT,		"rgba32i",	""		},
3105 		{ VK_FORMAT_R32G32B32A32_UINT,		"rgba32ui",	""		},
3106 		{ VK_FORMAT_R32G32B32A32_SFLOAT,	"rgba32f",	""		},
3107 		{ VK_FORMAT_R64_SINT,				"r64i",		""		},
3108 		{ VK_FORMAT_R64_UINT,				"r64ui",	""		},
3109 	};
3110 
3111 	TestGroupCase fullDescCases[] =
3112 	{
3113 		{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,				"uniform_buffer",			""		},
3114 		{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,				"storage_buffer",			""		},
3115 		{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,		"uniform_buffer_dynamic",	""		},
3116 		{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,		"storage_buffer_dynamic",	""		},
3117 		{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,			"uniform_texel_buffer",		""		},
3118 		{ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,			"storage_texel_buffer",		""		},
3119 		{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,					"storage_image",			""		},
3120 		{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,		"sampled_image",			""		},
3121 		{ VERTEX_ATTRIBUTE_FETCH,							"vertex_attribute_fetch",	""		},
3122 	};
3123 
3124 	TestGroupCase imgDescCases[] =
3125 	{
3126 		{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,					"storage_image",			""		},
3127 		{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,		"sampled_image",			""		},
3128 	};
3129 
3130 	TestGroupCase fullLenCases32Bit[] =
3131 	{
3132 		{ ~0U,			"null_descriptor",	""		},
3133 		{ 0,			"img",				""		},
3134 		{ 4,			"len_4",			""		},
3135 		{ 8,			"len_8",			""		},
3136 		{ 12,			"len_12",			""		},
3137 		{ 16,			"len_16",			""		},
3138 		{ 20,			"len_20",			""		},
3139 		{ 31,			"len_31",			""		},
3140 		{ 32,			"len_32",			""		},
3141 		{ 33,			"len_33",			""		},
3142 		{ 35,			"len_35",			""		},
3143 		{ 36,			"len_36",			""		},
3144 		{ 39,			"len_39",			""		},
3145 		{ 40,			"len_41",			""		},
3146 		{ 252,			"len_252",			""		},
3147 		{ 256,			"len_256",			""		},
3148 		{ 260,			"len_260",			""		},
3149 	};
3150 
3151 	TestGroupCase fullLenCases64Bit[] =
3152 	{
3153 		{ ~0U,			"null_descriptor",	""		},
3154 		{ 0,			"img",				""		},
3155 		{ 8,			"len_8",			""		},
3156 		{ 16,			"len_16",			""		},
3157 		{ 24,			"len_24",			""		},
3158 		{ 32,			"len_32",			""		},
3159 		{ 40,			"len_40",			""		},
3160 		{ 62,			"len_62",			""		},
3161 		{ 64,			"len_64",			""		},
3162 		{ 66,			"len_66",			""		},
3163 		{ 70,			"len_70",			""		},
3164 		{ 72,			"len_72",			""		},
3165 		{ 78,			"len_78",			""		},
3166 		{ 80,			"len_80",			""		},
3167 		{ 504,			"len_504",			""		},
3168 		{ 512,			"len_512",			""		},
3169 		{ 520,			"len_520",			""		},
3170 	};
3171 
3172 	TestGroupCase imgLenCases[] =
3173 	{
3174 		{ 0,	"img",	""		},
3175 	};
3176 
3177 	TestGroupCase viewCases[] =
3178 	{
3179 		{ VK_IMAGE_VIEW_TYPE_1D,			"1d",			""		},
3180 		{ VK_IMAGE_VIEW_TYPE_2D,			"2d",			""		},
3181 		{ VK_IMAGE_VIEW_TYPE_3D,			"3d",			""		},
3182 		{ VK_IMAGE_VIEW_TYPE_CUBE,			"cube",			""		},
3183 		{ VK_IMAGE_VIEW_TYPE_1D_ARRAY,		"1d_array",		""		},
3184 		{ VK_IMAGE_VIEW_TYPE_2D_ARRAY,		"2d_array",		""		},
3185 		{ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,	"cube_array",	""		},
3186 	};
3187 
3188 	TestGroupCase sampCases[] =
3189 	{
3190 		{ VK_SAMPLE_COUNT_1_BIT,			"samples_1",	""		},
3191 		{ VK_SAMPLE_COUNT_4_BIT,			"samples_4",	""		},
3192 	};
3193 
3194 	TestGroupCase stageCases[] =
3195 	{
3196 		{ STAGE_COMPUTE,	"comp",		"compute"	},
3197 		{ STAGE_FRAGMENT,	"frag",		"fragment"	},
3198 		{ STAGE_VERTEX,		"vert",		"vertex"	},
3199 #ifndef CTS_USES_VULKANSC
3200 		{ STAGE_RAYGEN,		"rgen",		"raygen"	},
3201 #endif
3202 	};
3203 
3204 	TestGroupCase volCases[] =
3205 	{
3206 		{ 0,			"nonvolatile",	""		},
3207 		{ 1,			"volatile",		""		},
3208 	};
3209 
3210 	TestGroupCase unrollCases[] =
3211 	{
3212 		{ 0,			"dontunroll",	""		},
3213 		{ 1,			"unroll",		""		},
3214 	};
3215 
3216 	TestGroupCase tempCases[] =
3217 	{
3218 		{ 0,			"notemplate",	""		},
3219 #ifndef CTS_USES_VULKANSC
3220 		{ 1,			"template",		""		},
3221 #endif
3222 	};
3223 
3224 	TestGroupCase pushCases[] =
3225 	{
3226 		{ 0,			"bind",			""		},
3227 #ifndef CTS_USES_VULKANSC
3228 		{ 1,			"push",			""		},
3229 #endif
3230 	};
3231 
3232 	TestGroupCase fmtQualCases[] =
3233 	{
3234 		{ 0,			"no_fmt_qual",	""		},
3235 		{ 1,			"fmt_qual",		""		},
3236 	};
3237 
3238 	TestGroupCase readOnlyCases[] =
3239 	{
3240 		{ 0,			"readwrite",	""		},
3241 		{ 1,			"readonly",		""		},
3242 	};
3243 
3244 	for (int pushNdx = 0; pushNdx < DE_LENGTH_OF_ARRAY(pushCases); pushNdx++)
3245 	{
3246 		de::MovePtr<tcu::TestCaseGroup> pushGroup(new tcu::TestCaseGroup(testCtx, pushCases[pushNdx].name, pushCases[pushNdx].name));
3247 		for (int tempNdx = 0; tempNdx < DE_LENGTH_OF_ARRAY(tempCases); tempNdx++)
3248 		{
3249 			de::MovePtr<tcu::TestCaseGroup> tempGroup(new tcu::TestCaseGroup(testCtx, tempCases[tempNdx].name, tempCases[tempNdx].name));
3250 			for (int fmtNdx = 0; fmtNdx < DE_LENGTH_OF_ARRAY(fmtCases); fmtNdx++)
3251 			{
3252 				de::MovePtr<tcu::TestCaseGroup> fmtGroup(new tcu::TestCaseGroup(testCtx, fmtCases[fmtNdx].name, fmtCases[fmtNdx].name));
3253 
3254 				// Avoid too much duplication by excluding certain test cases
3255 				if (pipelineRobustness &&
3256 				    !(fmtCases[fmtNdx].count == VK_FORMAT_R32_UINT || fmtCases[fmtNdx].count == VK_FORMAT_R32G32B32A32_SFLOAT || fmtCases[fmtNdx].count == VK_FORMAT_R64_SINT))
3257 				{
3258 					continue;
3259 				}
3260 
3261 				int fmtSize = tcu::getPixelSize(mapVkFormat((VkFormat)fmtCases[fmtNdx].count));
3262 
3263 				for (int unrollNdx = 0; unrollNdx < DE_LENGTH_OF_ARRAY(unrollCases); unrollNdx++)
3264 				{
3265 					de::MovePtr<tcu::TestCaseGroup> unrollGroup(new tcu::TestCaseGroup(testCtx, unrollCases[unrollNdx].name, unrollCases[unrollNdx].name));
3266 
3267 					// Avoid too much duplication by excluding certain test cases
3268 					if (unrollNdx > 0 && pipelineRobustness)
3269 						continue;
3270 
3271 					for (int volNdx = 0; volNdx < DE_LENGTH_OF_ARRAY(volCases); volNdx++)
3272 					{
3273 						de::MovePtr<tcu::TestCaseGroup> volGroup(new tcu::TestCaseGroup(testCtx, volCases[volNdx].name, volCases[volNdx].name));
3274 
3275 						int numDescCases = robustness2 ? DE_LENGTH_OF_ARRAY(fullDescCases) : DE_LENGTH_OF_ARRAY(imgDescCases);
3276 						TestGroupCase *descCases = robustness2 ? fullDescCases : imgDescCases;
3277 
3278 						for (int descNdx = 0; descNdx < numDescCases; descNdx++)
3279 						{
3280 							de::MovePtr<tcu::TestCaseGroup> descGroup(new tcu::TestCaseGroup(testCtx, descCases[descNdx].name, descCases[descNdx].name));
3281 
3282 							// Avoid too much duplication by excluding certain test cases
3283 							if (pipelineRobustness &&
3284 								!(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3285 									descCases[descNdx].count == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3286 							{
3287 								continue;
3288 							}
3289 
3290 							for (int roNdx = 0; roNdx < DE_LENGTH_OF_ARRAY(readOnlyCases); roNdx++)
3291 							{
3292 								de::MovePtr<tcu::TestCaseGroup> rwGroup(new tcu::TestCaseGroup(testCtx, readOnlyCases[roNdx].name, readOnlyCases[roNdx].name));
3293 
3294 								// readonly cases are just for storage_buffer
3295 								if (readOnlyCases[roNdx].count != 0 &&
3296 									descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER &&
3297 									descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
3298 									continue;
3299 
3300 								if (pipelineRobustness &&
3301 									readOnlyCases[roNdx].count != 0)
3302 								{
3303 									continue;
3304 								}
3305 
3306 								for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
3307 								{
3308 									de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name, fmtQualCases[fmtQualNdx].name));
3309 
3310 									// format qualifier is only used for storage image and storage texel buffers
3311 									if (fmtQualCases[fmtQualNdx].count &&
3312 										!(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
3313 										continue;
3314 
3315 									if (pushCases[pushNdx].count &&
3316 										(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3317 										continue;
3318 
3319 									const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
3320 									int numLenCases = robustness2 ? DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) : DE_LENGTH_OF_ARRAY(imgLenCases);
3321 									TestGroupCase *lenCases = robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
3322 
3323 									for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
3324 									{
3325 										if (lenCases[lenNdx].count != ~0U)
3326 										{
3327 											bool bufferLen = lenCases[lenNdx].count != 0;
3328 											bool bufferDesc = descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
3329 											if (bufferLen != bufferDesc)
3330 												continue;
3331 
3332 											// Add template tests cases only for null_descriptor cases
3333 											if (tempCases[tempNdx].count)
3334 												continue;
3335 										}
3336 
3337 										if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
3338 											((lenCases[lenNdx].count % fmtSize) != 0) &&
3339 											lenCases[lenNdx].count != ~0U)
3340 										{
3341 											continue;
3342 										}
3343 
3344 										// Avoid too much duplication by excluding certain test cases
3345 										if (pipelineRobustness && robustness2 &&
3346 											(lenCases[lenNdx].count == 0 || ((lenCases[lenNdx].count & (lenCases[lenNdx].count - 1)) != 0)))
3347 										{
3348 											continue;
3349 										}
3350 
3351 										// "volatile" only applies to storage images/buffers
3352 										if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
3353 											continue;
3354 
3355 
3356 										de::MovePtr<tcu::TestCaseGroup> lenGroup(new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name, lenCases[lenNdx].name));
3357 										for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
3358 										{
3359 											de::MovePtr<tcu::TestCaseGroup> sampGroup(new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name, sampCases[sampNdx].name));
3360 
3361 											// Avoid too much duplication by excluding certain test cases
3362 											if (pipelineRobustness && sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3363 											    continue;
3364 
3365 											for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
3366 											{
3367 												if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
3368 													descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3369 													descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
3370 												{
3371 													// buffer descriptors don't have different dimensionalities. Only test "1D"
3372 													continue;
3373 												}
3374 
3375 												if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D && viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
3376 													sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3377 												{
3378 													continue;
3379 												}
3380 
3381 												// Avoid too much duplication by excluding certain test cases
3382 												if (pipelineRobustness &&
3383 													!(viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_1D || viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D || viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D_ARRAY))
3384 												{
3385 													continue;
3386 												}
3387 
3388 												de::MovePtr<tcu::TestCaseGroup> viewGroup(new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name, viewCases[viewNdx].name));
3389 												for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
3390 												{
3391 													Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
3392 													VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
3393 													VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
3394 #ifndef CTS_USES_VULKANSC
3395 													if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
3396 													{
3397 														allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
3398 														allPipelineStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV;
3399 
3400 														if (pipelineRobustness)
3401 															continue;
3402 													}
3403 #endif // CTS_USES_VULKANSC
3404 													if ((lenCases[lenNdx].count == ~0U) && pipelineRobustness)
3405 														continue;
3406 
3407 													if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
3408 														currentStage != STAGE_VERTEX)
3409 														continue;
3410 
3411 													deUint32 imageDim[3] = {5, 11, 6};
3412 													if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
3413 														viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
3414 														imageDim[1] = imageDim[0];
3415 
3416 													CaseDef c =
3417 													{
3418 														(VkFormat)fmtCases[fmtNdx].count,								// VkFormat format;
3419 														currentStage,													// Stage stage;
3420 														allShaderStages,												// VkFlags allShaderStages;
3421 														allPipelineStages,												// VkFlags allPipelineStages;
3422 														(int)descCases[descNdx].count,									// VkDescriptorType descriptorType;
3423 														(VkImageViewType)viewCases[viewNdx].count,						// VkImageViewType viewType;
3424 														(VkSampleCountFlagBits)sampCases[sampNdx].count,				// VkSampleCountFlagBits samples;
3425 														(int)lenCases[lenNdx].count,									// int bufferLen;
3426 														(bool)unrollCases[unrollNdx].count,								// bool unroll;
3427 														(bool)volCases[volNdx].count,									// bool vol;
3428 														(bool)(lenCases[lenNdx].count == ~0U),							// bool nullDescriptor
3429 														(bool)tempCases[tempNdx].count,									// bool useTemplate
3430 														(bool)fmtQualCases[fmtQualNdx].count,							// bool formatQualifier
3431 														(bool)pushCases[pushNdx].count,									// bool pushDescriptor;
3432 														(bool)robustness2,												// bool testRobustness2;
3433 														(bool)pipelineRobustness,										// bool testPipelineRobustness;
3434 														{ imageDim[0], imageDim[1], imageDim[2] },						// deUint32 imageDim[3];
3435 														(bool)(readOnlyCases[roNdx].count == 1),						// bool readOnly;
3436 													};
3437 
3438 													viewGroup->addChild(new RobustnessExtsTestCase(testCtx, stageCases[stageNdx].name, stageCases[stageNdx].name, c));
3439 												}
3440 												sampGroup->addChild(viewGroup.release());
3441 											}
3442 											lenGroup->addChild(sampGroup.release());
3443 										}
3444 										fmtQualGroup->addChild(lenGroup.release());
3445 									}
3446 									// Put storage_buffer tests in separate readonly vs readwrite groups. Other types
3447 									// go directly into descGroup
3448 									if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3449 										descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3450 										rwGroup->addChild(fmtQualGroup.release());
3451 									} else {
3452 										descGroup->addChild(fmtQualGroup.release());
3453 									}
3454 								}
3455 								if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3456 									descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3457 									descGroup->addChild(rwGroup.release());
3458 								}
3459 							}
3460 							volGroup->addChild(descGroup.release());
3461 						}
3462 						unrollGroup->addChild(volGroup.release());
3463 					}
3464 					fmtGroup->addChild(unrollGroup.release());
3465 				}
3466 				tempGroup->addChild(fmtGroup.release());
3467 			}
3468 			pushGroup->addChild(tempGroup.release());
3469 		}
3470 		group->addChild(pushGroup.release());
3471 	}
3472 }
3473 
createRobustness2Tests(tcu::TestCaseGroup * group)3474 static void createRobustness2Tests (tcu::TestCaseGroup* group)
3475 {
3476 	createTests(group, /*robustness2=*/true, /*pipelineRobustness=*/false);
3477 }
3478 
createImageRobustnessTests(tcu::TestCaseGroup * group)3479 static void createImageRobustnessTests (tcu::TestCaseGroup* group)
3480 {
3481 	createTests(group, /*robustness2=*/false, /*pipelineRobustness=*/false);
3482 }
3483 
3484 #ifndef CTS_USES_VULKANSC
createPipelineRobustnessTests(tcu::TestCaseGroup * group)3485 static void createPipelineRobustnessTests (tcu::TestCaseGroup* group)
3486 {
3487 	tcu::TestContext& testCtx = group->getTestContext();
3488 
3489 	tcu::TestCaseGroup *robustness2Group = new tcu::TestCaseGroup(testCtx, "robustness2", "robustness2");
3490 
3491 	createTests(robustness2Group, /*robustness2=*/true, /*pipelineRobustness=*/true);
3492 
3493 	group->addChild(robustness2Group);
3494 
3495 	tcu::TestCaseGroup *imageRobustness2Group = new tcu::TestCaseGroup(testCtx, "image_robustness", "image_robustness");
3496 
3497 	createTests(imageRobustness2Group, /*robustness2=*/false, /*pipelineRobustness=*/true);
3498 
3499 	group->addChild(imageRobustness2Group);
3500 }
3501 #endif
3502 
cleanupGroup(tcu::TestCaseGroup * group)3503 static void cleanupGroup (tcu::TestCaseGroup* group)
3504 {
3505 	DE_UNREF(group);
3506 	// Destroy singleton objects.
3507 	Robustness2Int64AtomicsSingleton::destroy();
3508 	ImageRobustnessInt64AtomicsSingleton::destroy();
3509 	ImageRobustnessSingleton::destroy();
3510 	Robustness2Singleton::destroy();
3511 	PipelineRobustnessImageRobustnessSingleton::destroy();
3512 	PipelineRobustnessRobustness2Singleton::destroy();
3513 	PipelineRobustnessImageRobustnessInt64AtomicsSingleton::destroy();
3514 	PipelineRobustnessRobustness2Int64AtomicsSingleton::destroy();
3515 	Robustness2Int64AtomicsScalarSingleton::destroy();
3516 	ImageRobustnessInt64AtomicsScalarSingleton::destroy();
3517 	ImageRobustnessScalarSingleton::destroy();
3518 	Robustness2ScalarSingleton::destroy();
3519 	PipelineRobustnessImageRobustnessScalarSingleton::destroy();
3520 	PipelineRobustnessRobustness2ScalarSingleton::destroy();
3521 	PipelineRobustnessImageRobustnessInt64AtomicsScalarSingleton::destroy();
3522 	PipelineRobustnessRobustness2Int64AtomicsScalarSingleton::destroy();
3523 }
3524 
createRobustness2Tests(tcu::TestContext & testCtx)3525 tcu::TestCaseGroup* createRobustness2Tests (tcu::TestContext& testCtx)
3526 {
3527 	return createTestGroup(testCtx, "robustness2", "VK_EXT_robustness2 tests",
3528 							createRobustness2Tests, cleanupGroup);
3529 }
3530 
createImageRobustnessTests(tcu::TestContext & testCtx)3531 tcu::TestCaseGroup* createImageRobustnessTests (tcu::TestContext& testCtx)
3532 {
3533 	return createTestGroup(testCtx, "image_robustness", "VK_EXT_image_robustness tests",
3534 							createImageRobustnessTests, cleanupGroup);
3535 }
3536 
3537 #ifndef CTS_USES_VULKANSC
createPipelineRobustnessTests(tcu::TestContext & testCtx)3538 tcu::TestCaseGroup* createPipelineRobustnessTests (tcu::TestContext& testCtx)
3539 {
3540 	return createTestGroup(testCtx, "pipeline_robustness", "VK_EXT_pipeline_robustness tests",
3541 							createPipelineRobustnessTests, cleanupGroup);
3542 }
3543 #endif
3544 
3545 }	// robustness
3546 }	// vkt
3547