• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2017-2019 The Khronos Group Inc.
6  * Copyright (c) 2018-2020 NVIDIA Corporation
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *	  http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Vulkan robustness2 tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktRobustnessExtsTests.hpp"
26 
27 #include "vkBufferWithMemory.hpp"
28 #include "vkImageWithMemory.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkDeviceUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkObjUtil.hpp"
36 #include "vkBarrierUtil.hpp"
37 #include "vktRobustnessUtil.hpp"
38 
39 #include "vktTestGroupUtil.hpp"
40 #include "vktTestCase.hpp"
41 
42 #include "deDefs.h"
43 #include "deMath.h"
44 #include "deRandom.h"
45 #include "deSharedPtr.hpp"
46 #include "deString.h"
47 
48 #include "tcuVectorType.hpp"
49 #include "tcuTestCase.hpp"
50 #include "tcuTestLog.hpp"
51 
52 #include <string>
53 #include <sstream>
54 #include <algorithm>
55 #include <limits>
56 
57 namespace vkt
58 {
59 namespace robustness
60 {
61 namespace
62 {
63 using namespace vk;
64 using namespace std;
65 using de::SharedPtr;
66 
67 enum RobustnessFeatureBits
68 {
69 	RF_IMG_ROBUSTNESS		= (1		),
70 	RF_ROBUSTNESS2			= (1 << 1	),
71 	SIF_INT64ATOMICS		= (1 << 2	),
72 	RF_PIPELINE_ROBUSTNESS	= (1 << 3	),
73 	SBL_SCALAR_BLOCK_LAYOUT	= (1 << 4	),
74 };
75 
76 using RobustnessFeatures = deUint32;
77 
78 // Class to wrap a singleton device with the indicated robustness features.
79 template <RobustnessFeatures FEATURES>
80 class SingletonDevice
81 {
SingletonDevice(Context & context)82 	SingletonDevice	(Context& context)
83 		: m_context(context)
84 		, m_logicalDevice()
85 	{
86 		// Note we are already checking the needed features are available in checkSupport().
87 		VkPhysicalDeviceRobustness2FeaturesEXT				robustness2Features				= initVulkanStructure();
88 		VkPhysicalDeviceImageRobustnessFeaturesEXT			imageRobustnessFeatures			= initVulkanStructure();
89 		VkPhysicalDeviceScalarBlockLayoutFeatures			scalarBlockLayoutFeatures		= initVulkanStructure();
90 		VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT	shaderImageAtomicInt64Features	= initVulkanStructure();
91 		VkPhysicalDeviceFeatures2							features2						= initVulkanStructure();
92 
93 		if (FEATURES & SBL_SCALAR_BLOCK_LAYOUT)
94 		{
95 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"));
96 			scalarBlockLayoutFeatures.pNext = features2.pNext;
97 			features2.pNext = &scalarBlockLayoutFeatures;
98 		}
99 
100 		if (FEATURES & RF_IMG_ROBUSTNESS)
101 		{
102 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"));
103 
104 			if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
105 			{
106 				imageRobustnessFeatures.pNext = features2.pNext;
107 				features2.pNext = &imageRobustnessFeatures;
108 			}
109 		}
110 
111 		if (FEATURES & RF_ROBUSTNESS2)
112 		{
113 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_robustness2"));
114 
115 			if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
116 			{
117 				robustness2Features.pNext = features2.pNext;
118 				features2.pNext = &robustness2Features;
119 			}
120 		}
121 
122 #ifndef CTS_USES_VULKANSC
123 		VkPhysicalDevicePipelineRobustnessFeaturesEXT		pipelineRobustnessFeatures = initVulkanStructure();
124 		if (FEATURES & RF_PIPELINE_ROBUSTNESS)
125 		{
126 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"));
127 			pipelineRobustnessFeatures.pNext = features2.pNext;
128 			features2.pNext = &pipelineRobustnessFeatures;
129 		}
130 #endif
131 
132 		if (FEATURES & SIF_INT64ATOMICS)
133 		{
134 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_shader_image_atomic_int64"));
135 			shaderImageAtomicInt64Features.pNext = features2.pNext;
136 			features2.pNext = &shaderImageAtomicInt64Features;
137 		}
138 
139 		const auto&	vki				= m_context.getInstanceInterface();
140 		const auto	instance		= m_context.getInstance();
141 		const auto	physicalDevice	= chooseDevice(vki, instance, context.getTestContext().getCommandLine());
142 
143 		vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
144 		m_logicalDevice = createRobustBufferAccessDevice(context, &features2);
145 
146 #ifndef CTS_USES_VULKANSC
147 		m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), instance, *m_logicalDevice));
148 #else
149 		m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), instance, *m_logicalDevice, context.getTestContext().getCommandLine(), context.getResourceInterface(), m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_logicalDevice));
150 #endif // CTS_USES_VULKANSC
151 	}
152 
153 public:
~SingletonDevice()154 	~SingletonDevice()
155 	{
156 	}
157 
getDevice(Context & context)158 	static VkDevice getDevice(Context& context)
159 	{
160 		if (!m_singletonDevice)
161 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
162 		DE_ASSERT(m_singletonDevice);
163 		return m_singletonDevice->m_logicalDevice.get();
164 	}
getDeviceInterface(Context & context)165 	static const DeviceInterface& getDeviceInterface(Context& context)
166 	{
167 		if (!m_singletonDevice)
168 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
169 		DE_ASSERT(m_singletonDevice);
170 		return *(m_singletonDevice->m_deviceDriver.get());
171 	}
172 
destroy()173 	static void destroy()
174 	{
175 		m_singletonDevice.clear();
176 	}
177 
178 private:
179 	const Context&								m_context;
180 	Move<vk::VkDevice>							m_logicalDevice;
181 #ifndef CTS_USES_VULKANSC
182 	de::MovePtr<vk::DeviceDriver>				m_deviceDriver;
183 #else
184 	de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter>	m_deviceDriver;
185 #endif // CTS_USES_VULKANSC
186 
187 	static SharedPtr<SingletonDevice<FEATURES>>	m_singletonDevice;
188 };
189 
190 template <RobustnessFeatures FEATURES>
191 SharedPtr<SingletonDevice<FEATURES>> SingletonDevice<FEATURES>::m_singletonDevice;
192 
193 constexpr RobustnessFeatures kImageRobustness			= RF_IMG_ROBUSTNESS;
194 constexpr RobustnessFeatures kRobustness2				= RF_ROBUSTNESS2;
195 constexpr RobustnessFeatures kPipelineRobustness		= RF_PIPELINE_ROBUSTNESS;
196 constexpr RobustnessFeatures kShaderImageInt64Atomics	= SIF_INT64ATOMICS;
197 constexpr RobustnessFeatures kScalarBlockLayout			= SBL_SCALAR_BLOCK_LAYOUT;
198 
199 using ImageRobustnessSingleton	= SingletonDevice<kImageRobustness>;
200 using Robustness2Singleton		= SingletonDevice<kRobustness2>;
201 
202 using ImageRobustnessScalarSingleton	= SingletonDevice<kImageRobustness | kScalarBlockLayout>;
203 using Robustness2ScalarSingleton		= SingletonDevice<kRobustness2 | kScalarBlockLayout>;
204 
205 using PipelineRobustnessImageRobustnessSingleton	= SingletonDevice<kImageRobustness | kPipelineRobustness>;
206 using PipelineRobustnessRobustness2Singleton		= SingletonDevice<kRobustness2 | kPipelineRobustness>;
207 
208 using PipelineRobustnessImageRobustnessScalarSingleton	= SingletonDevice<kImageRobustness | kPipelineRobustness | kScalarBlockLayout>;
209 using PipelineRobustnessRobustness2ScalarSingleton		= SingletonDevice<kRobustness2 | kPipelineRobustness | kScalarBlockLayout>;
210 
211 using ImageRobustnessInt64AtomicsSingleton	= SingletonDevice<kImageRobustness | kShaderImageInt64Atomics>;
212 using Robustness2Int64AtomicsSingleton		= SingletonDevice<kRobustness2 | kShaderImageInt64Atomics>;
213 
214 using ImageRobustnessInt64AtomicsScalarSingleton	= SingletonDevice<kImageRobustness | kShaderImageInt64Atomics | kScalarBlockLayout>;
215 using Robustness2Int64AtomicsScalarSingleton		= SingletonDevice<kRobustness2 | kShaderImageInt64Atomics | kScalarBlockLayout>;
216 
217 using PipelineRobustnessImageRobustnessInt64AtomicsSingleton	= SingletonDevice<kImageRobustness | kPipelineRobustness | kShaderImageInt64Atomics>;
218 using PipelineRobustnessRobustness2Int64AtomicsSingleton		= SingletonDevice<kRobustness2 | kPipelineRobustness | kShaderImageInt64Atomics>;
219 
220 using PipelineRobustnessImageRobustnessInt64AtomicsScalarSingleton	= SingletonDevice<kImageRobustness | kPipelineRobustness | kShaderImageInt64Atomics | kScalarBlockLayout>;
221 using PipelineRobustnessRobustness2Int64AtomicsScalarSingleton		= SingletonDevice<kRobustness2 | kPipelineRobustness | kShaderImageInt64Atomics | kScalarBlockLayout>;
222 
223 // Render target / compute grid dimensions
224 static const deUint32 DIM = 8;
225 
226 // treated as a phony VkDescriptorType value
227 #define VERTEX_ATTRIBUTE_FETCH 999
228 
229 typedef enum
230 {
231 	STAGE_COMPUTE = 0,
232 	STAGE_VERTEX,
233 	STAGE_FRAGMENT,
234 	STAGE_RAYGEN
235 } Stage;
236 
237 struct CaseDef
238 {
239 	VkFormat format;
240 	Stage stage;
241 	VkFlags allShaderStages;
242 	VkFlags allPipelineStages;
243 	int/*VkDescriptorType*/ descriptorType;
244 	VkImageViewType viewType;
245 	VkSampleCountFlagBits samples;
246 	int bufferLen;
247 	bool unroll;
248 	bool vol;
249 	bool nullDescriptor;
250 	bool useTemplate;
251 	bool formatQualifier;
252 	bool pushDescriptor;
253 	bool testRobustness2;
254 	bool testPipelineRobustness;
255 	deUint32 imageDim[3]; // width, height, depth or layers
256 	bool readOnly;
257 
needsScalarBlockLayoutvkt::robustness::__anon9fae15cf0111::CaseDef258 	bool needsScalarBlockLayout() const
259 	{
260 		bool scalarNeeded = false;
261 
262 		switch (descriptorType)
263 		{
264 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
265 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
266 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
267 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
268 			scalarNeeded = true;
269 			break;
270 		default:
271 			scalarNeeded = false;
272 			break;
273 		}
274 
275 		return scalarNeeded;
276 	}
277 };
278 
formatIsR64(const VkFormat & f)279 static bool formatIsR64(const VkFormat& f)
280 {
281 	switch (f)
282 	{
283 	case VK_FORMAT_R64_SINT:
284 	case VK_FORMAT_R64_UINT:
285 		return true;
286 	default:
287 		return false;
288 	}
289 }
290 
291 // Returns the appropriate singleton device for the given case.
getLogicalDevice(Context & ctx,const CaseDef & caseDef)292 VkDevice getLogicalDevice (Context& ctx, const CaseDef& caseDef)
293 {
294 	if (caseDef.needsScalarBlockLayout())
295 	{
296 		if (caseDef.testPipelineRobustness)
297 		{
298 			if (formatIsR64(caseDef.format))
299 			{
300 				if (caseDef.testRobustness2)
301 					return PipelineRobustnessRobustness2Int64AtomicsScalarSingleton::getDevice(ctx);
302 				return PipelineRobustnessImageRobustnessInt64AtomicsScalarSingleton::getDevice(ctx);
303 			}
304 
305 			if (caseDef.testRobustness2)
306 				return PipelineRobustnessRobustness2ScalarSingleton::getDevice(ctx);
307 			return PipelineRobustnessImageRobustnessScalarSingleton::getDevice(ctx);
308 		}
309 
310 		if (formatIsR64(caseDef.format))
311 		{
312 			if (caseDef.testRobustness2)
313 				return Robustness2Int64AtomicsScalarSingleton::getDevice(ctx);
314 			return ImageRobustnessInt64AtomicsScalarSingleton::getDevice(ctx);
315 		}
316 
317 		if (caseDef.testRobustness2)
318 			return Robustness2ScalarSingleton::getDevice(ctx);
319 		return ImageRobustnessScalarSingleton::getDevice(ctx);
320 	}
321 
322 	if (caseDef.testPipelineRobustness)
323 	{
324 		if (formatIsR64(caseDef.format))
325 		{
326 			if (caseDef.testRobustness2)
327 				return PipelineRobustnessRobustness2Int64AtomicsSingleton::getDevice(ctx);
328 			return PipelineRobustnessImageRobustnessInt64AtomicsSingleton::getDevice(ctx);
329 		}
330 
331 		if (caseDef.testRobustness2)
332 			return PipelineRobustnessRobustness2Singleton::getDevice(ctx);
333 		return PipelineRobustnessImageRobustnessSingleton::getDevice(ctx);
334 	}
335 
336 	if (formatIsR64(caseDef.format))
337 	{
338 		if (caseDef.testRobustness2)
339 			return Robustness2Int64AtomicsSingleton::getDevice(ctx);
340 		return ImageRobustnessInt64AtomicsSingleton::getDevice(ctx);
341 	}
342 
343 	if (caseDef.testRobustness2)
344 		return Robustness2Singleton::getDevice(ctx);
345 	return ImageRobustnessSingleton::getDevice(ctx);
346 }
347 
348 // Returns the appropriate singleton device driver for the given case.
getDeviceInterface(Context & ctx,const CaseDef & caseDef)349 const DeviceInterface& getDeviceInterface(Context& ctx, const CaseDef& caseDef)
350 {
351 	if (caseDef.needsScalarBlockLayout())
352 	{
353 		if (formatIsR64(caseDef.format))
354 		{
355 			if (caseDef.testRobustness2)
356 				return Robustness2Int64AtomicsScalarSingleton::getDeviceInterface(ctx);
357 			return ImageRobustnessInt64AtomicsScalarSingleton::getDeviceInterface(ctx);
358 		}
359 
360 		if (caseDef.testRobustness2)
361 			return Robustness2ScalarSingleton::getDeviceInterface(ctx);
362 		return ImageRobustnessScalarSingleton::getDeviceInterface(ctx);
363 	}
364 
365 	if (formatIsR64(caseDef.format))
366 	{
367 		if (caseDef.testRobustness2)
368 			return Robustness2Int64AtomicsSingleton::getDeviceInterface(ctx);
369 		return ImageRobustnessInt64AtomicsSingleton::getDeviceInterface(ctx);
370 	}
371 
372 	if (caseDef.testRobustness2)
373 		return Robustness2Singleton::getDeviceInterface(ctx);
374 	return ImageRobustnessSingleton::getDeviceInterface(ctx);
375 }
376 
377 
378 class Layout
379 {
380 public:
381 	vector<VkDescriptorSetLayoutBinding> layoutBindings;
382 	vector<deUint8> refData;
383 };
384 
385 
386 class RobustnessExtsTestInstance : public TestInstance
387 {
388 public:
389 						RobustnessExtsTestInstance		(Context& context, const CaseDef& data);
390 						~RobustnessExtsTestInstance	(void);
391 	tcu::TestStatus		iterate								(void);
392 private:
393 	CaseDef				m_data;
394 };
395 
RobustnessExtsTestInstance(Context & context,const CaseDef & data)396 RobustnessExtsTestInstance::RobustnessExtsTestInstance (Context& context, const CaseDef& data)
397 	: vkt::TestInstance		(context)
398 	, m_data				(data)
399 {
400 }
401 
~RobustnessExtsTestInstance(void)402 RobustnessExtsTestInstance::~RobustnessExtsTestInstance (void)
403 {
404 }
405 
406 class RobustnessExtsTestCase : public TestCase
407 {
408 	public:
409 								RobustnessExtsTestCase		(tcu::TestContext& context, const char* name, const char* desc, const CaseDef data);
410 								~RobustnessExtsTestCase	(void);
411 	virtual	void				initPrograms					(SourceCollections& programCollection) const;
412 	virtual TestInstance*		createInstance					(Context& context) const;
413 	virtual void				checkSupport					(Context& context) const;
414 
415 private:
416 	CaseDef					m_data;
417 };
418 
RobustnessExtsTestCase(tcu::TestContext & context,const char * name,const char * desc,const CaseDef data)419 RobustnessExtsTestCase::RobustnessExtsTestCase (tcu::TestContext& context, const char* name, const char* desc, const CaseDef data)
420 	: vkt::TestCase	(context, name, desc)
421 	, m_data		(data)
422 {
423 }
424 
~RobustnessExtsTestCase(void)425 RobustnessExtsTestCase::~RobustnessExtsTestCase	(void)
426 {
427 }
428 
formatIsFloat(const VkFormat & f)429 static bool formatIsFloat(const VkFormat& f)
430 {
431 	switch (f)
432 	{
433 	case VK_FORMAT_R32_SFLOAT:
434 	case VK_FORMAT_R32G32_SFLOAT:
435 	case VK_FORMAT_R32G32B32A32_SFLOAT:
436 		return true;
437 	default:
438 		return false;
439 	}
440 }
441 
formatIsSignedInt(const VkFormat & f)442 static bool formatIsSignedInt(const VkFormat& f)
443 {
444 	switch (f)
445 	{
446 	case VK_FORMAT_R32_SINT:
447 	case VK_FORMAT_R64_SINT:
448 	case VK_FORMAT_R32G32_SINT:
449 	case VK_FORMAT_R32G32B32A32_SINT:
450 		return true;
451 	default:
452 		return false;
453 	}
454 }
455 
supportsStores(int descriptorType)456 static bool supportsStores(int descriptorType)
457 {
458 	switch (descriptorType)
459 	{
460 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
461 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
462 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
463 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
464 		return true;
465 	default:
466 		return false;
467 	}
468 }
469 
470 #ifndef CTS_USES_VULKANSC
getPipelineRobustnessInfo(bool robustness2,int descriptorType)471 static VkPipelineRobustnessCreateInfoEXT getPipelineRobustnessInfo(bool robustness2, int descriptorType)
472 {
473 	VkPipelineRobustnessCreateInfoEXT robustnessCreateInfo = initVulkanStructure();
474 
475 	switch (descriptorType)
476 	{
477 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
478 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
479 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
480 			robustnessCreateInfo.storageBuffers	= (robustness2
481 												? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
482 												: VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
483 			break;
484 
485 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
486 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
487 			robustnessCreateInfo.images	= (robustness2
488 										? VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT
489 										: VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT);
490 			break;
491 
492 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
493 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
494 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
495 			robustnessCreateInfo.uniformBuffers	= (robustness2
496 												? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
497 												: VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
498 			break;
499 
500 		case VERTEX_ATTRIBUTE_FETCH:
501 			robustnessCreateInfo.vertexInputs	= (robustness2
502 												? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
503 												: VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
504 			break;
505 
506 		default:
507 			DE_ASSERT(0);
508 	}
509 
510 	return robustnessCreateInfo;
511 }
512 #endif
513 
checkSupport(Context & context) const514 void RobustnessExtsTestCase::checkSupport(Context& context) const
515 {
516 	const auto&	vki				= context.getInstanceInterface();
517 	const auto	physicalDevice	= context.getPhysicalDevice();
518 
519 	// We need to query feature support using the physical device instead of using the reported context features because robustness2
520 	// and image robustness are always disabled in the default device but they may be available.
521 	VkPhysicalDeviceRobustness2FeaturesEXT				robustness2Features				= initVulkanStructure();
522 	VkPhysicalDeviceImageRobustnessFeaturesEXT			imageRobustnessFeatures			= initVulkanStructure();
523 	VkPhysicalDeviceScalarBlockLayoutFeatures			scalarLayoutFeatures			= initVulkanStructure();
524 	VkPhysicalDeviceFeatures2							features2						= initVulkanStructure();
525 
526 	context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
527 
528 	if (context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"))
529 	{
530 		scalarLayoutFeatures.pNext = features2.pNext;
531 		features2.pNext = &scalarLayoutFeatures;
532 	}
533 
534 	if (context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"))
535 	{
536 		imageRobustnessFeatures.pNext = features2.pNext;
537 		features2.pNext = &imageRobustnessFeatures;
538 	}
539 
540 	if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
541 	{
542 		robustness2Features.pNext = features2.pNext;
543 		features2.pNext = &robustness2Features;
544 	}
545 
546 #ifndef CTS_USES_VULKANSC
547 	VkPhysicalDevicePipelineRobustnessFeaturesEXT		pipelineRobustnessFeatures = initVulkanStructure();
548 	if (context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"))
549 	{
550 		pipelineRobustnessFeatures.pNext = features2.pNext;
551 		features2.pNext = &pipelineRobustnessFeatures;
552 	}
553 #endif
554 
555 	context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
556 	vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
557 
558 	if (formatIsR64(m_data.format))
559 	{
560 		context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
561 
562 		VkFormatProperties formatProperties;
563 		vki.getPhysicalDeviceFormatProperties(physicalDevice, m_data.format, &formatProperties);
564 
565 #ifndef CTS_USES_VULKANSC
566 		const VkFormatProperties3KHR formatProperties3 = context.getFormatProperties(m_data.format);
567 #endif // CTS_USES_VULKANSC
568 
569 		switch (m_data.descriptorType)
570 		{
571 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
572 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)
573 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT is not supported");
574 			break;
575 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
576 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)
577 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT is not supported");
578 #ifndef CTS_USES_VULKANSC
579 			if ((formatProperties3.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR) != VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR)
580 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT is not supported");
581 #endif // CTS_USES_VULKANSC
582 			break;
583 		case VERTEX_ATTRIBUTE_FETCH:
584 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
585 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
586 			break;
587 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
588 			if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
589 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
590 			break;
591 		default: DE_ASSERT(true);
592 		}
593 
594 		if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
595 		{
596 			if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
597 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT is not supported");
598 		}
599 	}
600 
601 	// Check needed properties and features
602 	if (m_data.needsScalarBlockLayout() && !scalarLayoutFeatures.scalarBlockLayout)
603 		TCU_THROW(NotSupportedError, "Scalar block layout not supported");
604 
605 	if (m_data.stage == STAGE_VERTEX && !features2.features.vertexPipelineStoresAndAtomics)
606 		TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
607 
608 	if (m_data.stage == STAGE_FRAGMENT && !features2.features.fragmentStoresAndAtomics)
609 		TCU_THROW(NotSupportedError, "Fragment shader stores not supported");
610 
611 	if (m_data.stage == STAGE_RAYGEN)
612 		context.requireDeviceFunctionality("VK_NV_ray_tracing");
613 
614 	switch (m_data.descriptorType)
615 	{
616 	default: DE_ASSERT(0); // Fallthrough
617 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
618 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
619 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
620 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
621 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
622 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
623 	case VERTEX_ATTRIBUTE_FETCH:
624 		if (m_data.testRobustness2)
625 		{
626 			if (!robustness2Features.robustBufferAccess2)
627 				TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
628 		}
629 		else
630 		{
631 			// This case is not tested here.
632 			DE_ASSERT(false);
633 		}
634 		break;
635 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
636 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
637 		if (m_data.testRobustness2)
638 		{
639 			if (!robustness2Features.robustImageAccess2)
640 				TCU_THROW(NotSupportedError, "robustImageAccess2 not supported");
641 		}
642 		else
643 		{
644 			if (!imageRobustnessFeatures.robustImageAccess)
645 				TCU_THROW(NotSupportedError, "robustImageAccess not supported");
646 		}
647 		break;
648 	}
649 
650 	if (m_data.nullDescriptor && !robustness2Features.nullDescriptor)
651 		TCU_THROW(NotSupportedError, "nullDescriptor not supported");
652 
653 	// The fill shader for 64-bit multisample image tests uses a storage image.
654 	if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && formatIsR64(m_data.format) &&
655 		!features2.features.shaderStorageImageMultisample)
656 		TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
657 
658 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
659 		m_data.samples != VK_SAMPLE_COUNT_1_BIT &&
660 		!features2.features.shaderStorageImageMultisample)
661 		TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
662 
663 	if ((m_data.useTemplate || formatIsR64(m_data.format)) && !context.contextSupports(vk::ApiVersion(0, 1, 1, 0)))
664 		TCU_THROW(NotSupportedError, "Vulkan 1.1 not supported");
665 
666 #ifndef CTS_USES_VULKANSC
667 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
668 		!m_data.formatQualifier)
669 	{
670 		const VkFormatProperties3 formatProperties = context.getFormatProperties(m_data.format);
671 		if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
672 			TCU_THROW(NotSupportedError, "Format does not support reading without format");
673 		if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
674 			TCU_THROW(NotSupportedError, "Format does not support writing without format");
675 	}
676 #else
677 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
678 		!m_data.formatQualifier &&
679 		(!features2.features.shaderStorageImageReadWithoutFormat || !features2.features.shaderStorageImageWriteWithoutFormat))
680 		TCU_THROW(NotSupportedError, "shaderStorageImageReadWithoutFormat or shaderStorageImageWriteWithoutFormat not supported");
681 #endif // CTS_USES_VULKANSC
682 
683 	if (m_data.pushDescriptor)
684 		context.requireDeviceFunctionality("VK_KHR_push_descriptor");
685 
686 	if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !features2.features.imageCubeArray)
687 		TCU_THROW(NotSupportedError, "Cube array image view type not supported");
688 
689 	if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") && !context.getDeviceFeatures().robustBufferAccess)
690 		TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
691 
692 #ifndef CTS_USES_VULKANSC
693 	if (m_data.testPipelineRobustness && !pipelineRobustnessFeatures.pipelineRobustness)
694 		TCU_THROW(NotSupportedError, "pipelineRobustness not supported");
695 #endif
696 }
697 
generateLayout(Layout & layout,const CaseDef & caseDef)698 void generateLayout(Layout &layout, const CaseDef &caseDef)
699 {
700 	vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
701 	int numBindings = caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH ? 2 : 1;
702 	bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
703 
704 	for (deUint32 b = 0; b < layout.layoutBindings.size(); ++b)
705 	{
706 		VkDescriptorSetLayoutBinding &binding = bindings[b];
707 		binding.binding = b;
708 		binding.pImmutableSamplers = NULL;
709 		binding.stageFlags = caseDef.allShaderStages;
710 		binding.descriptorCount = 1;
711 
712 		// Output image
713 		if (b == 0)
714 			binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
715 		else if (caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH)
716 			binding.descriptorType = (VkDescriptorType)caseDef.descriptorType;
717 	}
718 
719 	if (caseDef.nullDescriptor)
720 		return;
721 
722 	if (caseDef.bufferLen == 0)
723 	{
724 		// Clear color values for image tests
725 		static deUint32 urefData[4]		= { 0x12345678, 0x23456789, 0x34567890, 0x45678901 };
726 		static deUint64 urefData64[4]	= { 0x1234567887654321, 0x234567899, 0x345678909, 0x456789019 };
727 		static float frefData[4]		= { 123.f, 234.f, 345.f, 456.f };
728 
729 		if (formatIsR64(caseDef.format))
730 		{
731 			layout.refData.resize(32);
732 			deUint64 *ptr = (deUint64 *)layout.refData.data();
733 
734 			for (unsigned int i = 0; i < 4; ++i)
735 			{
736 				ptr[i] = urefData64[i];
737 			}
738 		}
739 		else
740 		{
741 			layout.refData.resize(16);
742 			deMemcpy(layout.refData.data(), formatIsFloat(caseDef.format) ? (const void *)frefData : (const void *)urefData, sizeof(frefData));
743 		}
744 	}
745 	else
746 	{
747 		layout.refData.resize(caseDef.bufferLen & (formatIsR64(caseDef.format) ? ~7: ~3));
748 		for (unsigned int i = 0; i < caseDef.bufferLen / (formatIsR64(caseDef.format) ? sizeof(deUint64) : sizeof(deUint32)); ++i)
749 		{
750 			if (formatIsFloat(caseDef.format))
751 			{
752 				float *f = (float *)layout.refData.data() + i;
753 				*f = 2.0f*(float)i + 3.0f;
754 			}
755 			if (formatIsR64(caseDef.format))
756 			{
757 				deUint64 *u = (deUint64 *)layout.refData.data() + i;
758 				*u = 2 * i + 3;
759 			}
760 			else
761 			{
762 				int *u = (int *)layout.refData.data() + i;
763 				*u = 2*i + 3;
764 			}
765 		}
766 	}
767 }
768 
genFetch(const CaseDef & caseDef,int numComponents,const string & vecType,const string & coord,const string & lod)769 static string genFetch(const CaseDef &caseDef, int numComponents, const string& vecType, const string& coord, const string& lod)
770 {
771 	std::stringstream s;
772 	// Fetch from the descriptor.
773 	switch (caseDef.descriptorType)
774 	{
775 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
776 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
777 		s << vecType << "(ubo0_1.val[" << coord << "]";
778 		for (int i = numComponents; i < 4; ++i) s << ", 0";
779 		s << ")";
780 		break;
781 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
782 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
783 		s << vecType << "(ssbo0_1.val[" << coord << "]";
784 		for (int i = numComponents; i < 4; ++i) s << ", 0";
785 		s << ")";
786 		break;
787 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
788 		s << "texelFetch(texbo0_1, " << coord << ")";
789 		break;
790 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
791 		s << "imageLoad(image0_1, " << coord << ")";
792 		break;
793 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
794 		if (caseDef.samples > VK_SAMPLE_COUNT_1_BIT)
795 			s << "texelFetch(texture0_1, " << coord << ")";
796 		else
797 			s << "texelFetch(texture0_1, " << coord << ", " << lod << ")";
798 		break;
799 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
800 		s << "imageLoad(image0_1, " << coord << ")";
801 		break;
802 	case VERTEX_ATTRIBUTE_FETCH:
803 		s << "attr";
804 		break;
805 	default: DE_ASSERT(0);
806 	}
807 	return s.str();
808 }
809 
810 static const int storeValue = 123;
811 
812 // Get the value stored by genStore.
getStoreValue(int descriptorType,int numComponents,const string & vecType,const string & bufType)813 static string getStoreValue(int descriptorType, int numComponents, const string& vecType, const string& bufType)
814 {
815 	std::stringstream s;
816 	switch (descriptorType)
817 	{
818 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
819 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
820 		s << vecType  << "(" << bufType << "(" << storeValue << ")";
821 		for (int i = numComponents; i < 4; ++i) s << ", 0";
822 		s << ")";
823 		break;
824 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
825 		s << vecType << "(" << storeValue << ")";
826 		break;
827 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
828 		s << vecType << "(" << storeValue << ")";
829 		break;
830 	default: DE_ASSERT(0);
831 	}
832 	return s.str();
833 }
834 
genStore(int descriptorType,const string & vecType,const string & bufType,const string & coord)835 static string genStore(int descriptorType, const string& vecType, const string& bufType, const string& coord)
836 {
837 	std::stringstream s;
838 	// Store to the descriptor.
839 	switch (descriptorType)
840 	{
841 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
842 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
843 		s << "ssbo0_1.val[" << coord << "] = " << bufType << "(" << storeValue << ")";
844 		break;
845 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
846 		s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
847 		break;
848 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
849 		s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
850 		break;
851 	default: DE_ASSERT(0);
852 	}
853 	return s.str();
854 }
855 
genAtomic(int descriptorType,const string & bufType,const string & coord)856 static string genAtomic(int descriptorType, const string& bufType, const string& coord)
857 {
858 	std::stringstream s;
859 	// Store to the descriptor. The value doesn't matter, since we only test out of bounds coordinates.
860 	switch (descriptorType)
861 	{
862 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
863 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
864 		s << "atomicAdd(ssbo0_1.val[" << coord << "], " << bufType << "(10))";
865 		break;
866 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
867 		s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
868 		break;
869 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
870 		s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
871 		break;
872 	default: DE_ASSERT(0);
873 	}
874 	return s.str();
875 }
876 
getShaderImageFormatQualifier(const tcu::TextureFormat & format)877 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
878 {
879 	const char* orderPart;
880 	const char* typePart;
881 
882 	switch (format.order)
883 	{
884 		case tcu::TextureFormat::R:		orderPart = "r";	break;
885 		case tcu::TextureFormat::RG:	orderPart = "rg";	break;
886 		case tcu::TextureFormat::RGB:	orderPart = "rgb";	break;
887 		case tcu::TextureFormat::RGBA:	orderPart = "rgba";	break;
888 
889 		default:
890 			DE_FATAL("Impossible");
891 			orderPart = DE_NULL;
892 	}
893 
894 	switch (format.type)
895 	{
896 		case tcu::TextureFormat::FLOAT:				typePart = "32f";		break;
897 		case tcu::TextureFormat::HALF_FLOAT:		typePart = "16f";		break;
898 
899 		case tcu::TextureFormat::UNSIGNED_INT64:	typePart = "64ui";		break;
900 		case tcu::TextureFormat::UNSIGNED_INT32:	typePart = "32ui";		break;
901 		case tcu::TextureFormat::UNSIGNED_INT16:	typePart = "16ui";		break;
902 		case tcu::TextureFormat::UNSIGNED_INT8:		typePart = "8ui";		break;
903 
904 		case tcu::TextureFormat::SIGNED_INT64:		typePart = "64i";		break;
905 		case tcu::TextureFormat::SIGNED_INT32:		typePart = "32i";		break;
906 		case tcu::TextureFormat::SIGNED_INT16:		typePart = "16i";		break;
907 		case tcu::TextureFormat::SIGNED_INT8:		typePart = "8i";		break;
908 
909 		case tcu::TextureFormat::UNORM_INT16:		typePart = "16";		break;
910 		case tcu::TextureFormat::UNORM_INT8:		typePart = "8";			break;
911 
912 		case tcu::TextureFormat::SNORM_INT16:		typePart = "16_snorm";	break;
913 		case tcu::TextureFormat::SNORM_INT8:		typePart = "8_snorm";	break;
914 
915 		default:
916 			DE_FATAL("Impossible");
917 			typePart = DE_NULL;
918 	}
919 
920 	return std::string() + orderPart + typePart;
921 }
922 
genCoord(string c,int numCoords,VkSampleCountFlagBits samples,int dim)923 string genCoord(string c, int numCoords, VkSampleCountFlagBits samples, int dim)
924 {
925 	if (numCoords == 1)
926 		return c;
927 
928 	if (samples != VK_SAMPLE_COUNT_1_BIT)
929 		numCoords--;
930 
931 	string coord = "ivec" + to_string(numCoords) + "(";
932 
933 	for (int i = 0; i < numCoords; ++i)
934 	{
935 		if (i == dim)
936 			coord += c;
937 		else
938 			coord += "0";
939 		if (i < numCoords - 1)
940 			coord += ", ";
941 	}
942 	coord += ")";
943 
944 	// Append sample coordinate
945 	if (samples != VK_SAMPLE_COUNT_1_BIT)
946 	{
947 		coord += ", ";
948 		if (dim == numCoords)
949 			coord += c;
950 		else
951 			coord += "0";
952 	}
953 	return coord;
954 }
955 
956 // Normalized coordinates. Divide by "imageDim" and add 0.25 so we're not on a pixel boundary.
genCoordNorm(const CaseDef & caseDef,string c,int numCoords,int numNormalizedCoords,int dim)957 string genCoordNorm(const CaseDef &caseDef, string c, int numCoords, int numNormalizedCoords, int dim)
958 {
959 	// dim can be 3 for cube_array. Reuse the number of layers in that case.
960 	dim = std::min(dim, 2);
961 
962 	if (numCoords == 1)
963 		return c + " / float(" + to_string(caseDef.imageDim[dim]) + ")";
964 
965 	string coord = "vec" + to_string(numCoords) + "(";
966 
967 	for (int i = 0; i < numCoords; ++i)
968 	{
969 		if (i == dim)
970 			coord += c;
971 		else
972 			coord += "0.25";
973 		if (i < numNormalizedCoords)
974 			coord += " / float(" + to_string(caseDef.imageDim[dim]) + ")";
975 		if (i < numCoords - 1)
976 			coord += ", ";
977 	}
978 	coord += ")";
979 	return coord;
980 }
981 
initPrograms(SourceCollections & programCollection) const982 void RobustnessExtsTestCase::initPrograms (SourceCollections& programCollection) const
983 {
984 	VkFormat format = m_data.format;
985 
986 	Layout layout;
987 	generateLayout(layout, m_data);
988 
989 	if (layout.layoutBindings.size() > 1 &&
990 		layout.layoutBindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
991 	{
992 		if (format == VK_FORMAT_R64_SINT)
993 			format = VK_FORMAT_R32G32_SINT;
994 
995 		if (format == VK_FORMAT_R64_UINT)
996 			format = VK_FORMAT_R32G32_UINT;
997 	}
998 
999 	std::stringstream decls, checks;
1000 
1001 	const string	r64			= formatIsR64(format) ? "64" : "";
1002 	const string	i64Type		= formatIsR64(format) ? "64_t" : "";
1003 	const string	vecType		= formatIsFloat(format) ? "vec4" : (formatIsSignedInt(format) ? ("i" + r64 + "vec4") : ("u" + r64 + "vec4"));
1004 	const string	qLevelType	= vecType == "vec4" ? "float" : ((vecType == "ivec4") || (vecType == "i64vec4")) ? ("int" + i64Type) : ("uint" + i64Type);
1005 
1006 	decls << "uvec4 abs(uvec4 x) { return x; }\n";
1007 	if (formatIsR64(format))
1008 		decls << "u64vec4 abs(u64vec4 x) { return x; }\n";
1009 	decls << "int smod(int a, int b) { if (a < 0) a += b*(abs(a)/b+1); return a%b; }\n";
1010 
1011 
1012 	const int	componetsSize = (formatIsR64(format) ? 8 : 4);
1013 	int			refDataNumElements = deIntRoundToPow2(((int)layout.refData.size() / componetsSize), 4);
1014 	// Pad reference data to include zeros, up to max value of robustUniformBufferAccessSizeAlignment (256).
1015 	// robustStorageBufferAccessSizeAlignment is 4, so no extra padding needed.
1016 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1017 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1018 	{
1019 		refDataNumElements = deIntRoundToPow2(refDataNumElements, 256 / (formatIsR64(format) ? 8 : 4));
1020 	}
1021 	if (m_data.nullDescriptor)
1022 		refDataNumElements = 4;
1023 
1024 	if (formatIsFloat(format))
1025 	{
1026 		decls << "float refData[" << refDataNumElements << "] = {";
1027 		int i;
1028 		for (i = 0; i < (int)layout.refData.size() / 4; ++i)
1029 		{
1030 			if (i != 0)
1031 				decls << ", ";
1032 			decls << ((const float *)layout.refData.data())[i];
1033 		}
1034 		while (i < refDataNumElements)
1035 		{
1036 			if (i != 0)
1037 				decls << ", ";
1038 			decls << "0";
1039 			i++;
1040 		}
1041 	}
1042 	else if (formatIsR64(format))
1043 	{
1044 		decls << "int" << i64Type << " refData[" << refDataNumElements << "] = {";
1045 		int i;
1046 		for (i = 0; i < (int)layout.refData.size() / 8; ++i)
1047 		{
1048 			if (i != 0)
1049 				decls << ", ";
1050 			decls << ((const deUint64 *)layout.refData.data())[i] << "l";
1051 		}
1052 		while (i < refDataNumElements)
1053 		{
1054 			if (i != 0)
1055 				decls << ", ";
1056 			decls << "0l";
1057 			i++;
1058 		}
1059 	}
1060 	else
1061 	{
1062 		decls << "int" << " refData[" << refDataNumElements << "] = {";
1063 		int i;
1064 		for (i = 0; i < (int)layout.refData.size() / 4; ++i)
1065 		{
1066 			if (i != 0)
1067 				decls << ", ";
1068 			decls << ((const int *)layout.refData.data())[i];
1069 		}
1070 		while (i < refDataNumElements)
1071 		{
1072 			if (i != 0)
1073 				decls << ", ";
1074 			decls << "0";
1075 			i++;
1076 		}
1077 	}
1078 
1079 	decls << "};\n";
1080 	decls << vecType << " zzzz = " << vecType << "(0);\n";
1081 	decls << vecType << " zzzo = " << vecType << "(0, 0, 0, 1);\n";
1082 	decls << vecType << " expectedIB;\n";
1083 
1084 	string imgprefix = (formatIsFloat(format) ? "" : formatIsSignedInt(format) ? "i" : "u") + r64;
1085 	string imgqualif = (m_data.formatQualifier) ? getShaderImageFormatQualifier(mapVkFormat(format)) + ", " : "";
1086 	string outputimgqualif = getShaderImageFormatQualifier(mapVkFormat(format));
1087 
1088 	string imageDim = "";
1089 	int numCoords, numNormalizedCoords;
1090 	bool layered = false;
1091 	switch (m_data.viewType)
1092 	{
1093 		default: DE_ASSERT(0); // Fallthrough
1094 		case VK_IMAGE_VIEW_TYPE_1D:			imageDim = "1D";		numCoords = 1;	numNormalizedCoords = 1;	break;
1095 		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	imageDim = "1DArray";	numCoords = 2;	numNormalizedCoords = 1;	layered = true;	break;
1096 		case VK_IMAGE_VIEW_TYPE_2D:			imageDim = "2D";		numCoords = 2;	numNormalizedCoords = 2;	break;
1097 		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	imageDim = "2DArray";	numCoords = 3;	numNormalizedCoords = 2;	layered = true;	break;
1098 		case VK_IMAGE_VIEW_TYPE_3D:			imageDim = "3D";		numCoords = 3;	numNormalizedCoords = 3;	break;
1099 		case VK_IMAGE_VIEW_TYPE_CUBE:		imageDim = "Cube";		numCoords = 3;	numNormalizedCoords = 3;	break;
1100 		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:	imageDim = "CubeArray";	numCoords = 4;	numNormalizedCoords = 3;	layered = true;	break;
1101 	}
1102 	if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
1103 	{
1104 		switch (m_data.viewType)
1105 		{
1106 			default: DE_ASSERT(0); // Fallthrough
1107 			case VK_IMAGE_VIEW_TYPE_2D:			imageDim = "2DMS";		break;
1108 			case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	imageDim = "2DMSArray";	break;
1109 		}
1110 		numCoords++;
1111 	}
1112 	bool dataDependsOnLayer = (m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_data.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) && !m_data.nullDescriptor;
1113 
1114 	// Special case imageLoad(imageCubeArray, ...) which uses ivec3
1115 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
1116 		m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1117 	{
1118 		numCoords = 3;
1119 	}
1120 
1121 	int numComponents = tcu::getPixelSize(mapVkFormat(format)) / tcu::getChannelSize(mapVkFormat(format).type);
1122 	string bufType;
1123 	if (numComponents == 1)
1124 		bufType = string(formatIsFloat(format) ? "float" : formatIsSignedInt(format) ? "int" : "uint") + i64Type;
1125 	else
1126 		bufType = imgprefix + "vec" + std::to_string(numComponents);
1127 
1128 	// For UBO's, which have a declared size in the shader, don't access outside that size.
1129 	bool declaredSize = false;
1130 	switch (m_data.descriptorType) {
1131 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1132 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1133 		declaredSize = true;
1134 		break;
1135 	default:
1136 		break;
1137 	}
1138 
1139 	checks << "  int inboundcoords, clampedLayer;\n";
1140 	checks << "  " << vecType << " expectedIB2;\n";
1141 	if (m_data.unroll)
1142 	{
1143 		if (declaredSize)
1144 			checks << "  [[unroll]] for (int c = 0; c <= 10; ++c) {\n";
1145 		else
1146 			checks << "  [[unroll]] for (int c = -10; c <= 10; ++c) {\n";
1147 	}
1148 	else
1149 	{
1150 		if (declaredSize)
1151 			checks << "  [[dont_unroll]] for (int c = 1023; c >= 0; --c) {\n";
1152 		else
1153 			checks << "  [[dont_unroll]] for (int c = 1050; c >= -1050; --c) {\n";
1154 	}
1155 
1156 	if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1157 		checks << "    int idx = smod(gl_VertexIndex * " << numComponents << ", " << refDataNumElements << ");\n";
1158 	else
1159 		checks << "    int idx = smod(c * " << numComponents << ", " << refDataNumElements << ");\n";
1160 
1161 	decls << "layout(" << outputimgqualif << ", set = 0, binding = 0) uniform " << imgprefix << "image2D image0_0;\n";
1162 
1163 	const char *vol = m_data.vol ? "volatile " : "";
1164 	const char *ro = m_data.readOnly ? "readonly " : "";
1165 
1166 	// Construct the declaration for the binding
1167 	switch (m_data.descriptorType)
1168 	{
1169 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1170 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1171 		decls << "layout(scalar, set = 0, binding = 1) uniform ubodef0_1 { " << bufType << " val[1024]; } ubo0_1;\n";
1172 		break;
1173 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1174 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1175 		decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1 { " << bufType << " val[]; } ssbo0_1;\n";
1176 		decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1_pad { vec4 pad; " << bufType << " val[]; } ssbo0_1_pad;\n";
1177 		break;
1178 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1179 		switch(format)
1180 		{
1181 		case VK_FORMAT_R64_SINT:
1182 			decls << "layout(set = 0, binding = 1) uniform itextureBuffer texbo0_1;\n";
1183 			break;
1184 		case VK_FORMAT_R64_UINT:
1185 			decls << "layout(set = 0, binding = 1) uniform utextureBuffer texbo0_1;\n";
1186 			break;
1187 		default:
1188 			decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "textureBuffer texbo0_1;\n";
1189 		}
1190 		break;
1191 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1192 		decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "imageBuffer image0_1;\n";
1193 		break;
1194 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1195 		decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "image" << imageDim << " image0_1;\n";
1196 		break;
1197 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1198 		switch (format)
1199 		{
1200 		case VK_FORMAT_R64_SINT:
1201 			decls << "layout(set = 0, binding = 1) uniform isampler" << imageDim << " texture0_1; \n";
1202 			break;
1203 		case VK_FORMAT_R64_UINT:
1204 			decls << "layout(set = 0, binding = 1) uniform usampler" << imageDim << " texture0_1; \n";
1205 			break;
1206 		default:
1207 			decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "sampler" << imageDim << " texture0_1;\n";
1208 			break;
1209 		}
1210 		break;
1211 	case VERTEX_ATTRIBUTE_FETCH:
1212 		if (formatIsR64(format))
1213 		{
1214 			decls << "layout(location = 0) in " << (formatIsSignedInt(format) ? ("int64_t") : ("uint64_t")) << " attr;\n";
1215 		}
1216 		else
1217 		{
1218 			decls << "layout(location = 0) in " << vecType << " attr;\n";
1219 		}
1220 		break;
1221 	default: DE_ASSERT(0);
1222 	}
1223 
1224 	string expectedOOB;
1225 	string defaultw;
1226 
1227 	switch (m_data.descriptorType)
1228 	{
1229 	default: DE_ASSERT(0); // Fallthrough
1230 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1231 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1232 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1233 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1234 		expectedOOB = "zzzz";
1235 		defaultw = "0";
1236 		break;
1237 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1238 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1239 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1240 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1241 	case VERTEX_ATTRIBUTE_FETCH:
1242 		if (numComponents == 1)
1243 		{
1244 			expectedOOB = "zzzo";
1245 		}
1246 		else if (numComponents == 2)
1247 		{
1248 			expectedOOB = "zzzo";
1249 		}
1250 		else
1251 		{
1252 			expectedOOB = "zzzz";
1253 		}
1254 		defaultw = "1";
1255 		break;
1256 	}
1257 
1258 	string idx;
1259 	switch (m_data.descriptorType)
1260 	{
1261 	default: DE_ASSERT(0); // Fallthrough
1262 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1263 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1264 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1265 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1266 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1267 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1268 	case VERTEX_ATTRIBUTE_FETCH:
1269 		idx = "idx";
1270 		break;
1271 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1272 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1273 		idx = "0";
1274 		break;
1275 	}
1276 
1277 	if (m_data.nullDescriptor)
1278 	{
1279 		checks << "    expectedIB = zzzz;\n";
1280 		checks << "    inboundcoords = 0;\n";
1281 		checks << "    int paddedinboundcoords = 0;\n";
1282 		// Vertex attribute fetch still gets format conversion applied
1283 		if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
1284 			expectedOOB = "zzzz";
1285 	}
1286 	else
1287 	{
1288 		checks << "    expectedIB.x = refData[" << idx << "];\n";
1289 		if (numComponents > 1)
1290 		{
1291 			checks << "    expectedIB.y = refData[" << idx << "+1];\n";
1292 		}
1293 		else
1294 		{
1295 			checks << "    expectedIB.y = 0;\n";
1296 		}
1297 		if (numComponents > 2)
1298 		{
1299 			checks << "    expectedIB.z = refData[" << idx << "+2];\n";
1300 			checks << "    expectedIB.w = refData[" << idx << "+3];\n";
1301 		}
1302 		else
1303 		{
1304 			checks << "    expectedIB.z = 0;\n";
1305 			checks << "    expectedIB.w = " << defaultw << ";\n";
1306 		}
1307 
1308 		switch (m_data.descriptorType)
1309 		{
1310 		default: DE_ASSERT(0); // Fallthrough
1311 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1312 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1313 			// UBOs can either strictly bounds check against inboundcoords, or can
1314 			// return the contents from memory for the range padded up to paddedinboundcoords.
1315 			checks << "    int paddedinboundcoords = " << refDataNumElements / numComponents << ";\n";
1316 			// fallthrough
1317 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1318 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1319 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1320 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1321 		case VERTEX_ATTRIBUTE_FETCH:
1322 			checks << "    inboundcoords = " << layout.refData.size() / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32)) / numComponents << ";\n";
1323 			break;
1324 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1325 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1326 			// set per-component below
1327 			break;
1328 		}
1329 	}
1330 
1331 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1332 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
1333 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1334 		 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1335 		 !m_data.readOnly)
1336 	{
1337 		for (int i = 0; i < numCoords; ++i)
1338 		{
1339 			// Treat i==3 coord (cube array layer) like i == 2
1340 			deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1341 			if (!m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1342 				checks << "    inboundcoords = " << coordDim << ";\n";
1343 
1344 			string coord = genCoord("c", numCoords, m_data.samples, i);
1345 			string inboundcoords =
1346 				m_data.nullDescriptor ? "0" :
1347 				(m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1) ? to_string(m_data.samples) : "inboundcoords";
1348 
1349 			checks << "    if (c < 0 || c >= " << inboundcoords << ") " << genStore(m_data.descriptorType, vecType, bufType, coord) << ";\n";
1350 			if (m_data.formatQualifier &&
1351 				(format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT))
1352 			{
1353 				checks << "    if (c < 0 || c >= " << inboundcoords << ") " << genAtomic(m_data.descriptorType, bufType, coord) << ";\n";
1354 			}
1355 		}
1356 	}
1357 
1358 	for (int i = 0; i < numCoords; ++i)
1359 	{
1360 		// Treat i==3 coord (cube array layer) like i == 2
1361 		deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1362 		if (!m_data.nullDescriptor)
1363 		{
1364 			switch (m_data.descriptorType)
1365 			{
1366 			default:
1367 				break;
1368 			case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1369 			case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1370 				checks << "    inboundcoords = " << coordDim << ";\n";
1371 				break;
1372 			}
1373 		}
1374 
1375 		string coord = genCoord("c", numCoords, m_data.samples, i);
1376 
1377 		if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1378 		{
1379 			if (formatIsR64(format))
1380 			{
1381 				checks << "    temp.x = attr;\n";
1382 				checks << "    temp.y = 0l;\n";
1383 				checks << "    temp.z = 0l;\n";
1384 				checks << "    temp.w = 0l;\n";
1385 				checks << "    if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp.x -= expectedIB.x; else temp -= zzzz;\n";
1386 			}
1387 			else
1388 			{
1389 				checks << "    temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1390 				checks << "    if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp -= expectedIB; else temp -= " << expectedOOB << ";\n";
1391 			}
1392 			// Accumulate any incorrect values.
1393 			checks << "    accum += abs(temp);\n";
1394 		}
1395 		// Skip texelFetch testing for cube(array) - texelFetch doesn't support it
1396 		if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH &&
1397 			!(m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1398 			  (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)))
1399 		{
1400 			checks << "    temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1401 
1402 			checks << "    expectedIB2 = expectedIB;\n";
1403 
1404 			// Expected data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1405 			if (dataDependsOnLayer && i == numNormalizedCoords)
1406 				checks << "    if (c >= 0 && c < inboundcoords) expectedIB2 += " << vecType << "(c, 0, 0, 0);\n";
1407 
1408 			if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1)
1409 			{
1410 				if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1411 				{
1412 					checks << "    if (temp == zzzz) temp = " << vecType << "(0);\n";
1413 					if (m_data.formatQualifier && numComponents < 4)
1414 						checks << "    else if (temp == zzzo) temp = " << vecType << "(0);\n";
1415 					checks << "    else temp = " << vecType << "(1);\n";
1416 				}
1417 				else
1418 					// multisample coord doesn't have defined behavior for OOB, so just set temp to 0.
1419 					checks << "    if (c >= 0 && c < " << m_data.samples << ") temp -= expectedIB2; else temp = " << vecType << "(0);\n";
1420 			}
1421 			else
1422 			{
1423 				// Storage buffers may be split into per-component loads. Generate a second
1424 				// expected out of bounds value where some subset of the components are
1425 				// actually in-bounds. If both loads and stores are split into per-component
1426 				// accesses, then the result value can be a mix of storeValue and zero.
1427 				string expectedOOB2 = expectedOOB;
1428 				string expectedOOB3 = expectedOOB;
1429 				if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1430 					 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1431 					 !m_data.nullDescriptor)
1432 				{
1433 					int len = m_data.bufferLen & (formatIsR64(format) ? ~7 : ~3);
1434 					int mod = (int)((len / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32))) % numComponents);
1435 					string sstoreValue = de::toString(storeValue);
1436 					switch (mod)
1437 					{
1438 					case 0:
1439 						break;
1440 					case 1:
1441 						expectedOOB2 = vecType + "(expectedIB2.x, 0, 0, 0)";
1442 						expectedOOB3 = vecType + "(" + sstoreValue + ", 0, 0, 0)";
1443 						break;
1444 					case 2:
1445 						expectedOOB2 = vecType + "(expectedIB2.xy, 0, 0)";
1446 						expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", 0, 0)";
1447 						break;
1448 					case 3:
1449 						expectedOOB2 = vecType + "(expectedIB2.xyz, 0)";
1450 						expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", " + sstoreValue + ", 0)";
1451 						break;
1452 					}
1453 				}
1454 
1455 				// Entirely in-bounds.
1456 				checks << "    if (c >= 0 && c < inboundcoords) {\n"
1457 						  "       if (temp == expectedIB2) temp = " << vecType << "(0); else temp = " << vecType << "(1);\n"
1458 						  "    }\n";
1459 
1460 				// normal out-of-bounds value
1461 				if (m_data.testRobustness2)
1462 					checks << "    else if (temp == " << expectedOOB << ") temp = " << vecType << "(0);\n";
1463 				else
1464 					// image_robustness relaxes alpha which is allowed to be zero or one
1465 					checks << "    else if (temp == zzzz || temp == zzzo) temp = " << vecType << "(0);\n";
1466 
1467 				if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1468 					m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1469 				{
1470 					checks << "    else if (c >= 0 && c < paddedinboundcoords && temp == expectedIB2) temp = " << vecType << "(0);\n";
1471 				}
1472 
1473 				// null descriptor loads with image format layout qualifier that doesn't include alpha may return alpha=1
1474 				if (m_data.nullDescriptor && m_data.formatQualifier &&
1475 					(m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) &&
1476 					numComponents < 4)
1477 					checks << "    else if (temp == zzzo) temp = " << vecType << "(0);\n";
1478 
1479 				// non-volatile value replaced with stored value
1480 				if (supportsStores(m_data.descriptorType) && !m_data.vol) {
1481 					checks << "    else if (temp == " << getStoreValue(m_data.descriptorType, numComponents, vecType, bufType) << ") temp = " << vecType << "(0);\n";
1482 
1483 					if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) {
1484 
1485 						for (int mask = (numComponents*numComponents) - 2; mask > 0; mask--) {
1486 							checks << "    else if (temp == " << vecType << "(";
1487 							for (int vecIdx = 0; vecIdx < 4; vecIdx++) {
1488 								if (mask & (1 << vecIdx)) checks << storeValue;
1489 								else checks << "0";
1490 
1491 								if (vecIdx != 3) checks << ",";
1492 							}
1493 							checks << ")) temp = " << vecType << "(0);\n";
1494 						}
1495 					}
1496 				}
1497 
1498 				// value straddling the boundary, returning a partial vector
1499 				if (expectedOOB2 != expectedOOB)
1500 					checks << "    else if (c == inboundcoords && temp == " << expectedOOB2 << ") temp = " << vecType << "(0);\n";
1501 				if (expectedOOB3 != expectedOOB)
1502 					checks << "    else if (c == inboundcoords && temp == " << expectedOOB3 << ") temp = " << vecType << "(0);\n";
1503 
1504 				// failure
1505 				checks << "    else temp = " << vecType << "(1);\n";
1506 			}
1507 			// Accumulate any incorrect values.
1508 			checks << "    accum += abs(temp);\n";
1509 
1510 			// Only the full robustness2 extension provides guarantees about out-of-bounds mip levels.
1511 			if (m_data.testRobustness2 && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER && m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1512 			{
1513 				// Fetch from an out of bounds mip level. Expect this to always return the OOB value.
1514 				string coord0 = genCoord("0", numCoords, m_data.samples, i);
1515 				checks << "    if (c != 0) temp = " << genFetch(m_data, numComponents, vecType, coord0, "c") << "; else temp = " << vecType << "(0);\n";
1516 				checks << "    if (c != 0) temp -= " << expectedOOB << ";\n";
1517 				checks << "    accum += abs(temp);\n";
1518 			}
1519 		}
1520 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1521 			m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1522 		{
1523 			string coordNorm = genCoordNorm(m_data, "(c+0.25)", numCoords, numNormalizedCoords, i);
1524 
1525 			checks << "    expectedIB2 = expectedIB;\n";
1526 
1527 			// Data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1528 			if (dataDependsOnLayer && i == numNormalizedCoords)
1529 			{
1530 				checks << "    clampedLayer = clamp(c, 0, " << coordDim-1 << ");\n";
1531 				checks << "    expectedIB2 += " << vecType << "(clampedLayer, 0, 0, 0);\n";
1532 			}
1533 
1534 			stringstream normexpected;
1535 			// Cubemap fetches are always in-bounds. Layer coordinate is clamped, so is always in-bounds.
1536 			if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE ||
1537 				m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
1538 				(layered && i == numCoords-1))
1539 				normexpected << "    temp -= expectedIB2;\n";
1540 			else
1541 			{
1542 				normexpected << "    if (c >= 0 && c < inboundcoords)\n";
1543 				normexpected << "        temp -= expectedIB2;\n";
1544 				normexpected << "    else\n";
1545 				if (m_data.testRobustness2)
1546 					normexpected << "        temp -= " << expectedOOB << ";\n";
1547 				else
1548 					// image_robustness relaxes alpha which is allowed to be zero or one
1549 					normexpected << "        temp = " << vecType << "((temp == zzzz || temp == zzzo) ? 0 : 1);\n";
1550 			}
1551 
1552 			checks << "    temp = texture(texture0_1, " << coordNorm << ");\n";
1553 			checks << normexpected.str();
1554 			checks << "    accum += abs(temp);\n";
1555 			checks << "    temp = textureLod(texture0_1, " << coordNorm << ", 0.0f);\n";
1556 			checks << normexpected.str();
1557 			checks << "    accum += abs(temp);\n";
1558 			checks << "    temp = textureGrad(texture0_1, " << coordNorm << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ");\n";
1559 			checks << normexpected.str();
1560 			checks << "    accum += abs(temp);\n";
1561 		}
1562 		if (m_data.nullDescriptor)
1563 		{
1564 			const char *sizeswiz;
1565 			switch (m_data.viewType)
1566 			{
1567 				default: DE_ASSERT(0); // Fallthrough
1568 				case VK_IMAGE_VIEW_TYPE_1D:			sizeswiz = ".xxxx";	break;
1569 				case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	sizeswiz = ".xyxx";	break;
1570 				case VK_IMAGE_VIEW_TYPE_2D:			sizeswiz = ".xyxx";	break;
1571 				case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	sizeswiz = ".xyzx";	break;
1572 				case VK_IMAGE_VIEW_TYPE_3D:			sizeswiz = ".xyzx";	break;
1573 				case VK_IMAGE_VIEW_TYPE_CUBE:		sizeswiz = ".xyxx";	break;
1574 				case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:	sizeswiz = ".xyzx";	break;
1575 			}
1576 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1577 			{
1578 				if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1579 				{
1580 					checks << "    temp = textureSize(texture0_1, 0)" << sizeswiz <<";\n";
1581 					checks << "    accum += abs(temp);\n";
1582 
1583 					// checking textureSize with clearly out of range LOD values
1584 					checks << "    temp = textureSize(texture0_1, " << -i << ")" << sizeswiz <<";\n";
1585 					checks << "    accum += abs(temp);\n";
1586 					checks << "    temp = textureSize(texture0_1, " << (std::numeric_limits<deInt32>::max() - i) << ")" << sizeswiz <<";\n";
1587 					checks << "    accum += abs(temp);\n";
1588 				}
1589 				else
1590 				{
1591 					checks << "    temp = textureSize(texture0_1)" << sizeswiz <<";\n";
1592 					checks << "    accum += abs(temp);\n";
1593 					checks << "    temp = textureSamples(texture0_1).xxxx;\n";
1594 					checks << "    accum += abs(temp);\n";
1595 				}
1596 			}
1597 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1598 			{
1599 				if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1600 				{
1601 					checks << "    temp = imageSize(image0_1)" << sizeswiz <<";\n";
1602 					checks << "    accum += abs(temp);\n";
1603 				}
1604 				else
1605 				{
1606 					checks << "    temp = imageSize(image0_1)" << sizeswiz <<";\n";
1607 					checks << "    accum += abs(temp);\n";
1608 					checks << "    temp = imageSamples(image0_1).xxxx;\n";
1609 					checks << "    accum += abs(temp);\n";
1610 				}
1611 			}
1612 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1613 				m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1614 			{
1615 				// expect zero for runtime-sized array .length()
1616 				checks << "    temp = " << vecType << "(ssbo0_1.val.length());\n";
1617 				checks << "    accum += abs(temp);\n";
1618 				checks << "    temp = " << vecType << "(ssbo0_1_pad.val.length());\n";
1619 				checks << "    accum += abs(temp);\n";
1620 			}
1621 		}
1622 	}
1623 	checks << "  }\n";
1624 
1625 	// outside the coordinates loop because we only need to call it once
1626 	if (m_data.nullDescriptor &&
1627 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1628 		m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1629 	{
1630 		checks << "  temp_ql = " << qLevelType << "(textureQueryLevels(texture0_1));\n";
1631 		checks << "  temp = " << vecType << "(temp_ql);\n";
1632 		checks << "  accum += abs(temp);\n";
1633 
1634 		if (m_data.stage == STAGE_FRAGMENT)
1635 		{
1636 			// as here we only want to check that textureQueryLod returns 0 when
1637 			// texture0_1 is null, we don't need to use the actual texture coordinates
1638 			// (and modify the vertex shader below to do so). Any coordinates are fine.
1639 			// gl_FragCoord has been selected "randomly", instead of selecting 0 for example.
1640 			std::string lod_str = (numNormalizedCoords == 1) ? ");" : (numNormalizedCoords == 2) ? "y);" : "yz);";
1641 			checks << "  vec2 lod = textureQueryLod(texture0_1, gl_FragCoord.x" << lod_str << "\n";
1642 			checks << "  temp_ql = " << qLevelType << "(ceil(abs(lod.x) + abs(lod.y)));\n";
1643 			checks << "  temp = " << vecType << "(temp_ql);\n";
1644 			checks << "  accum += abs(temp);\n";
1645 		}
1646 	}
1647 
1648 
1649 	const bool		needsScalarLayout	= m_data.needsScalarBlockLayout();
1650 	const uint32_t	shaderBuildOptions	= (needsScalarLayout
1651 										? static_cast<uint32_t>(vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS)
1652 										: 0u);
1653 
1654 	const bool is64BitFormat = formatIsR64(m_data.format);
1655 	std::string support =	"#version 460 core\n"
1656 							"#extension GL_EXT_nonuniform_qualifier : enable\n" +
1657 							(needsScalarLayout ? std::string("#extension GL_EXT_scalar_block_layout : enable\n") : std::string()) +
1658 							"#extension GL_EXT_samplerless_texture_functions : enable\n"
1659 							"#extension GL_EXT_control_flow_attributes : enable\n"
1660 							"#extension GL_EXT_shader_image_load_formatted : enable\n";
1661 	std::string SupportR64 =	"#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
1662 								"#extension GL_EXT_shader_image_int64 : require\n";
1663 	if (is64BitFormat)
1664 		support += SupportR64;
1665 	if (m_data.stage == STAGE_RAYGEN)
1666 		support += "#extension GL_NV_ray_tracing : require\n";
1667 
1668 	std::string code =	"  " + vecType + " accum = " + vecType + "(0);\n"
1669 						"  " + vecType + " temp;\n"
1670 						"  " + qLevelType + " temp_ql;\n" +
1671 						checks.str() +
1672 						"  " + vecType + " color = (accum != " + vecType + "(0)) ? " + vecType + "(0,0,0,0) : " + vecType + "(1,0,0,1);\n";
1673 
1674 	switch (m_data.stage)
1675 	{
1676 	default: DE_ASSERT(0); // Fallthrough
1677 	case STAGE_COMPUTE:
1678 		{
1679 			std::stringstream css;
1680 			css << support
1681 				<< decls.str() <<
1682 				"layout(local_size_x = 1, local_size_y = 1) in;\n"
1683 				"void main()\n"
1684 				"{\n"
1685 				<< code <<
1686 				"  imageStore(image0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1687 				"}\n";
1688 
1689 			programCollection.glslSources.add("test") << glu::ComputeSource(css.str())
1690 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1691 			break;
1692 		}
1693 	case STAGE_RAYGEN:
1694 		{
1695 			std::stringstream css;
1696 			css << support
1697 				<< decls.str() <<
1698 				"void main()\n"
1699 				"{\n"
1700 				<< code <<
1701 				"  imageStore(image0_0, ivec2(gl_LaunchIDNV.xy), color);\n"
1702 				"}\n";
1703 
1704 			programCollection.glslSources.add("test") << glu::RaygenSource(css.str())
1705 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1706 			break;
1707 		}
1708 	case STAGE_VERTEX:
1709 		{
1710 			std::stringstream vss;
1711 			vss << support
1712 				<< decls.str() <<
1713 				"void main()\n"
1714 				"{\n"
1715 				<< code <<
1716 				"  imageStore(image0_0, ivec2(gl_VertexIndex % " << DIM << ", gl_VertexIndex / " << DIM << "), color);\n"
1717 				"  gl_PointSize = 1.0f;\n"
1718 				"  gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1719 				"}\n";
1720 
1721 			programCollection.glslSources.add("test") << glu::VertexSource(vss.str())
1722 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1723 			break;
1724 		}
1725 	case STAGE_FRAGMENT:
1726 		{
1727 			std::stringstream vss;
1728 			vss <<
1729 				"#version 450 core\n"
1730 				"void main()\n"
1731 				"{\n"
1732 				// full-viewport quad
1733 				"  gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * float(gl_VertexIndex&1), 1);\n"
1734 				"}\n";
1735 
1736 			programCollection.glslSources.add("vert") << glu::VertexSource(vss.str())
1737 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1738 
1739 			std::stringstream fss;
1740 			fss << support
1741 				<< decls.str() <<
1742 				"void main()\n"
1743 				"{\n"
1744 				<< code <<
1745 				"  imageStore(image0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1746 				"}\n";
1747 
1748 			programCollection.glslSources.add("test") << glu::FragmentSource(fss.str())
1749 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1750 			break;
1751 		}
1752 	}
1753 
1754 	// The 64-bit conditions below are redundant. Can we support the below shader for other than 64-bit formats?
1755 	if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && is64BitFormat)
1756 	{
1757 		const std::string	ivecCords = (m_data.viewType == VK_IMAGE_VIEW_TYPE_2D ? "ivec2(gx, gy)" : "ivec3(gx, gy, gz)");
1758 		std::stringstream	fillShader;
1759 
1760 		fillShader <<
1761 			"#version 450\n"
1762 			<< SupportR64
1763 			<< "\n"
1764 			"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1765 			"layout (" + getShaderImageFormatQualifier(mapVkFormat(m_data.format)) + ", binding=0) volatile uniform "
1766 			<< string(formatIsSignedInt(m_data.format) ? "i" : "u") + string(is64BitFormat ? "64" : "") << "image" << imageDim << +" u_resultImage;\n"
1767 			"\n"
1768 			"layout(std430, binding = 1) buffer inputBuffer\n"
1769 			"{\n"
1770 			"  int" << (is64BitFormat ? "64_t" : "") << " data[];\n"
1771 			"} inBuffer;\n"
1772 			"\n"
1773 			"void main(void)\n"
1774 			"{\n"
1775 			"  int gx = int(gl_GlobalInvocationID.x);\n"
1776 			"  int gy = int(gl_GlobalInvocationID.y);\n"
1777 			"  int gz = int(gl_GlobalInvocationID.z);\n"
1778 			"  uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n";
1779 
1780 			for(int ndx = 0; ndx < static_cast<int>(m_data.samples); ++ndx)
1781 			{
1782 				fillShader << "  imageStore(u_resultImage, " << ivecCords << ", " << ndx << ", i64vec4(inBuffer.data[index]));\n";
1783 			}
1784 
1785 			fillShader << "}\n";
1786 
1787 		programCollection.glslSources.add("fillShader") << glu::ComputeSource(fillShader.str())
1788 			<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1789 	}
1790 
1791 }
1792 
imageViewTypeToImageType(VkImageViewType type)1793 VkImageType imageViewTypeToImageType (VkImageViewType type)
1794 {
1795 	switch (type)
1796 	{
1797 		case VK_IMAGE_VIEW_TYPE_1D:
1798 		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:		return VK_IMAGE_TYPE_1D;
1799 		case VK_IMAGE_VIEW_TYPE_2D:
1800 		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1801 		case VK_IMAGE_VIEW_TYPE_CUBE:
1802 		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:		return VK_IMAGE_TYPE_2D;
1803 		case VK_IMAGE_VIEW_TYPE_3D:				return VK_IMAGE_TYPE_3D;
1804 		default:
1805 			DE_ASSERT(false);
1806 	}
1807 
1808 	return VK_IMAGE_TYPE_2D;
1809 }
1810 
createInstance(Context & context) const1811 TestInstance* RobustnessExtsTestCase::createInstance (Context& context) const
1812 {
1813 	return new RobustnessExtsTestInstance(context, m_data);
1814 }
1815 
iterate(void)1816 tcu::TestStatus RobustnessExtsTestInstance::iterate (void)
1817 {
1818 	const VkInstance			instance			= m_context.getInstance();
1819 	const InstanceInterface&	vki					= m_context.getInstanceInterface();
1820 	const VkDevice				device				= getLogicalDevice(m_context, m_data);
1821 	const vk::DeviceInterface&	vk					= getDeviceInterface(m_context, m_data);
1822 	const VkPhysicalDevice		physicalDevice		= chooseDevice(vki, instance, m_context.getTestContext().getCommandLine());
1823 	SimpleAllocator				allocator			(vk, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1824 
1825 	Layout layout;
1826 	generateLayout(layout, m_data);
1827 
1828 	// Get needed properties.
1829 	VkPhysicalDeviceProperties2 properties;
1830 	deMemset(&properties, 0, sizeof(properties));
1831 	properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
1832 	void** pNextTail = &properties.pNext;
1833 
1834 #ifndef CTS_USES_VULKANSC
1835 	VkPhysicalDeviceRayTracingPropertiesNV rayTracingProperties;
1836 	deMemset(&rayTracingProperties, 0, sizeof(rayTracingProperties));
1837 	rayTracingProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV;
1838 #endif
1839 
1840 	VkPhysicalDeviceRobustness2PropertiesEXT robustness2Properties;
1841 	deMemset(&robustness2Properties, 0, sizeof(robustness2Properties));
1842 	robustness2Properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT;
1843 
1844 #ifndef CTS_USES_VULKANSC
1845 	if (m_context.isDeviceFunctionalitySupported("VK_NV_ray_tracing"))
1846 	{
1847 		*pNextTail = &rayTracingProperties;
1848 		pNextTail = &rayTracingProperties.pNext;
1849 	}
1850 #endif
1851 
1852 	if (m_context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
1853 	{
1854 		*pNextTail = &robustness2Properties;
1855 		pNextTail = &robustness2Properties.pNext;
1856 	}
1857 
1858 	vki.getPhysicalDeviceProperties2(physicalDevice, &properties);
1859 
1860 	if (m_data.testRobustness2)
1861 	{
1862 		if (robustness2Properties.robustStorageBufferAccessSizeAlignment != 1 &&
1863 			robustness2Properties.robustStorageBufferAccessSizeAlignment != 4)
1864 			return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustStorageBufferAccessSizeAlignment must be 1 or 4");
1865 
1866 		if (robustness2Properties.robustUniformBufferAccessSizeAlignment < 1 ||
1867 			robustness2Properties.robustUniformBufferAccessSizeAlignment > 256 ||
1868 			!deIntIsPow2((int)robustness2Properties.robustUniformBufferAccessSizeAlignment))
1869 			return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustUniformBufferAccessSizeAlignment must be a power of two in [1,256]");
1870 	}
1871 
1872 	VkPipelineBindPoint bindPoint;
1873 
1874 	switch (m_data.stage)
1875 	{
1876 	case STAGE_COMPUTE:
1877 		bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
1878 		break;
1879 #ifndef CTS_USES_VULKANSC
1880 	case STAGE_RAYGEN:
1881 		bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_NV;
1882 		break;
1883 #endif
1884 	default:
1885 		bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1886 		break;
1887 	}
1888 
1889 	Move<vk::VkDescriptorSetLayout>	descriptorSetLayout;
1890 	Move<vk::VkDescriptorPool>		descriptorPool;
1891 	Move<vk::VkDescriptorSet>		descriptorSet;
1892 
1893 	int formatBytes = tcu::getPixelSize(mapVkFormat(m_data.format));
1894 	int numComponents = formatBytes / tcu::getChannelSize(mapVkFormat(m_data.format).type);
1895 
1896 	vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
1897 
1898 	VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1899 
1900 #ifndef CTS_USES_VULKANSC
1901 	VkDescriptorSetLayoutCreateFlags layoutCreateFlags = m_data.pushDescriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
1902 #else
1903 	VkDescriptorSetLayoutCreateFlags layoutCreateFlags = 0;
1904 #endif
1905 
1906 	// Create a layout and allocate a descriptor set for it.
1907 
1908 	const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo =
1909 	{
1910 		vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1911 		DE_NULL,
1912 
1913 		layoutCreateFlags,
1914 		(deUint32)bindings.size(),
1915 		bindings.empty() ? DE_NULL : bindings.data()
1916 	};
1917 
1918 	descriptorSetLayout = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
1919 
1920 	vk::DescriptorPoolBuilder poolBuilder;
1921 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1);
1922 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1);
1923 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1);
1924 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1);
1925 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1);
1926 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1);
1927 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1);
1928 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2);
1929 
1930 	descriptorPool = poolBuilder.build(vk, device, poolCreateFlags, 1u, DE_NULL);
1931 
1932 	const void *pNext = DE_NULL;
1933 
1934 	if (!m_data.pushDescriptor)
1935 		descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout, pNext);
1936 
1937 	de::MovePtr<BufferWithMemory> buffer;
1938 
1939 	deUint8 *bufferPtr = DE_NULL;
1940 	if (!m_data.nullDescriptor)
1941 	{
1942 		// Create a buffer to hold data for all descriptors.
1943 		VkDeviceSize	size = de::max(
1944 			(VkDeviceSize)(m_data.bufferLen ? m_data.bufferLen : 1),
1945 			(VkDeviceSize)256);
1946 
1947 		VkBufferUsageFlags usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
1948 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1949 			m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1950 		{
1951 			size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment);
1952 			usage |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
1953 		}
1954 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1955 				 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1956 		{
1957 			size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment);
1958 			usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1959 		}
1960 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1961 		{
1962 			usage |= VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
1963 		}
1964 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER)
1965 		{
1966 			usage |= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
1967 		}
1968 		else if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1969 		{
1970 			size = m_data.bufferLen;
1971 		}
1972 
1973 		buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1974 			vk, device, allocator, makeBufferCreateInfo(size, usage), MemoryRequirement::HostVisible));
1975 		bufferPtr = (deUint8 *)buffer->getAllocation().getHostPtr();
1976 
1977 		deMemset(bufferPtr, 0x3f, (size_t)size);
1978 
1979 		deMemset(bufferPtr, 0, m_data.bufferLen);
1980 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1981 			m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1982 		{
1983 			deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment));
1984 		}
1985 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1986 				 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1987 		{
1988 			deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment));
1989 		}
1990 	}
1991 
1992 	const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1993 
1994 	Move<VkDescriptorSetLayout>		descriptorSetLayoutR64;
1995 	Move<VkDescriptorPool>			descriptorPoolR64;
1996 	Move<VkDescriptorSet>			descriptorSetFillImage;
1997 	Move<VkShaderModule>			shaderModuleFillImage;
1998 	Move<VkPipelineLayout>			pipelineLayoutFillImage;
1999 	Move<VkPipeline>				pipelineFillImage;
2000 
2001 	Move<VkCommandPool>				cmdPool		= createCommandPool(vk, device, 0, queueFamilyIndex);
2002 	Move<VkCommandBuffer>			cmdBuffer	= allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2003 	VkQueue							queue;
2004 
2005 	vk.getDeviceQueue(device, queueFamilyIndex, 0, &queue);
2006 
2007 	const VkImageSubresourceRange	barrierRange				=
2008 	{
2009 		VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspectFlags	aspectMask;
2010 		0u,							// deUint32				baseMipLevel;
2011 		VK_REMAINING_MIP_LEVELS,	// deUint32				levelCount;
2012 		0u,							// deUint32				baseArrayLayer;
2013 		VK_REMAINING_ARRAY_LAYERS	// deUint32				layerCount;
2014 	};
2015 
2016 	VkImageMemoryBarrier			preImageBarrier				=
2017 	{
2018 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
2019 		DE_NULL,											// const void*			pNext
2020 		0u,													// VkAccessFlags		srcAccessMask
2021 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
2022 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
2023 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,				// VkImageLayout		newLayout
2024 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
2025 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
2026 		DE_NULL,											// VkImage				image
2027 		barrierRange,										// VkImageSubresourceRange	subresourceRange;
2028 	};
2029 
2030 	VkImageMemoryBarrier			postImageBarrier			=
2031 	{
2032 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,		// VkStructureType			sType;
2033 		DE_NULL,									// const void*				pNext;
2034 		VK_ACCESS_TRANSFER_WRITE_BIT,				// VkAccessFlags			srcAccessMask;
2035 		VK_ACCESS_SHADER_READ_BIT,					// VkAccessFlags			dstAccessMask;
2036 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,		// VkImageLayout			oldLayout;
2037 		VK_IMAGE_LAYOUT_GENERAL,					// VkImageLayout			newLayout;
2038 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					srcQueueFamilyIndex;
2039 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					dstQueueFamilyIndex;
2040 		DE_NULL,									// VkImage					image;
2041 		barrierRange,								// VkImageSubresourceRange	subresourceRange;
2042 	};
2043 
2044 	vk::VkClearColorValue			clearValue;
2045 	clearValue.uint32[0] = 0u;
2046 	clearValue.uint32[1] = 0u;
2047 	clearValue.uint32[2] = 0u;
2048 	clearValue.uint32[3] = 0u;
2049 
2050 	beginCommandBuffer(vk, *cmdBuffer, 0u);
2051 
2052 	typedef vk::Unique<vk::VkBufferView>		BufferViewHandleUp;
2053 	typedef de::SharedPtr<BufferViewHandleUp>	BufferViewHandleSp;
2054 	typedef de::SharedPtr<ImageWithMemory>		ImageWithMemorySp;
2055 	typedef de::SharedPtr<Unique<VkImageView> >	VkImageViewSp;
2056 	typedef de::MovePtr<BufferWithMemory>		BufferWithMemoryMp;
2057 
2058 	vector<BufferViewHandleSp>					bufferViews(1);
2059 
2060 	VkImageCreateFlags mutableFormatFlag = 0;
2061 	// The 64-bit image tests use a view format which differs from the image.
2062 	if (formatIsR64(m_data.format))
2063 		mutableFormatFlag = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
2064 	VkImageCreateFlags imageCreateFlags = mutableFormatFlag;
2065 	if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2066 		imageCreateFlags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
2067 
2068 	const bool featureSampledImage = ((getPhysicalDeviceFormatProperties(vki,
2069 										physicalDevice,
2070 										m_data.format).optimalTilingFeatures &
2071 										VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) == VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
2072 
2073 	const VkImageUsageFlags usageSampledImage = (featureSampledImage ? VK_IMAGE_USAGE_SAMPLED_BIT : (VkImageUsageFlagBits)0);
2074 
2075 	const VkImageCreateInfo			outputImageCreateInfo			=
2076 	{
2077 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
2078 		DE_NULL,								// const void*				pNext;
2079 		mutableFormatFlag,						// VkImageCreateFlags		flags;
2080 		VK_IMAGE_TYPE_2D,						// VkImageType				imageType;
2081 		m_data.format,							// VkFormat					format;
2082 		{
2083 			DIM,								// deUint32	width;
2084 			DIM,								// deUint32	height;
2085 			1u									// deUint32	depth;
2086 		},										// VkExtent3D				extent;
2087 		1u,										// deUint32					mipLevels;
2088 		1u,										// deUint32					arrayLayers;
2089 		VK_SAMPLE_COUNT_1_BIT,					// VkSampleCountFlagBits	samples;
2090 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
2091 		VK_IMAGE_USAGE_STORAGE_BIT
2092 		| usageSampledImage
2093 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
2094 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
2095 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
2096 		0u,										// deUint32					queueFamilyIndexCount;
2097 		DE_NULL,								// const deUint32*			pQueueFamilyIndices;
2098 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
2099 	};
2100 
2101 	deUint32 width = m_data.imageDim[0];
2102 	deUint32 height = m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] : 1;
2103 	deUint32 depth = m_data.viewType == VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
2104 	deUint32 layers = m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] :
2105 						m_data.viewType != VK_IMAGE_VIEW_TYPE_1D &&
2106 						m_data.viewType != VK_IMAGE_VIEW_TYPE_2D &&
2107 						m_data.viewType != VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
2108 
2109 	const VkImageUsageFlags usageImage = (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? VK_IMAGE_USAGE_STORAGE_BIT : (VkImageUsageFlagBits)0);
2110 
2111 	const VkImageCreateInfo			imageCreateInfo			=
2112 	{
2113 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
2114 		DE_NULL,								// const void*				pNext;
2115 		imageCreateFlags,						// VkImageCreateFlags		flags;
2116 		imageViewTypeToImageType(m_data.viewType),	// VkImageType				imageType;
2117 		m_data.format,							// VkFormat					format;
2118 		{
2119 			width,								// deUint32	width;
2120 			height,								// deUint32	height;
2121 			depth								// deUint32	depth;
2122 		},										// VkExtent3D				extent;
2123 		1u,										// deUint32					mipLevels;
2124 		layers,									// deUint32					arrayLayers;
2125 		m_data.samples,							// VkSampleCountFlagBits	samples;
2126 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
2127 		usageImage
2128 		| usageSampledImage
2129 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
2130 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
2131 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
2132 		0u,										// deUint32					queueFamilyIndexCount;
2133 		DE_NULL,								// const deUint32*			pQueueFamilyIndices;
2134 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
2135 	};
2136 
2137 	VkImageViewCreateInfo		imageViewCreateInfo		=
2138 	{
2139 		VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,	// VkStructureType			sType;
2140 		DE_NULL,									// const void*				pNext;
2141 		(VkImageViewCreateFlags)0u,					// VkImageViewCreateFlags	flags;
2142 		DE_NULL,									// VkImage					image;
2143 		VK_IMAGE_VIEW_TYPE_2D,						// VkImageViewType			viewType;
2144 		m_data.format,								// VkFormat					format;
2145 		{
2146 			VK_COMPONENT_SWIZZLE_IDENTITY,
2147 			VK_COMPONENT_SWIZZLE_IDENTITY,
2148 			VK_COMPONENT_SWIZZLE_IDENTITY,
2149 			VK_COMPONENT_SWIZZLE_IDENTITY
2150 		},											// VkComponentMapping		 components;
2151 		{
2152 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask;
2153 			0u,										// deUint32				baseMipLevel;
2154 			VK_REMAINING_MIP_LEVELS,				// deUint32				levelCount;
2155 			0u,										// deUint32				baseArrayLayer;
2156 			VK_REMAINING_ARRAY_LAYERS				// deUint32				layerCount;
2157 		}											// VkImageSubresourceRange	subresourceRange;
2158 	};
2159 
2160 	vector<ImageWithMemorySp> images(2);
2161 	vector<VkImageViewSp> imageViews(2);
2162 
2163 	if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2164 	{
2165 		deUint32 *ptr = (deUint32 *)bufferPtr;
2166 		deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2167 	}
2168 
2169 	BufferWithMemoryMp				bufferImageR64;
2170 	BufferWithMemoryMp				bufferOutputImageR64;
2171 	const VkDeviceSize				sizeOutputR64	= 8 * outputImageCreateInfo.extent.width * outputImageCreateInfo.extent.height * outputImageCreateInfo.extent.depth;
2172 	const VkDeviceSize				sizeOneLayers	= 8 * imageCreateInfo.extent.width * imageCreateInfo.extent.height * imageCreateInfo.extent.depth;
2173 	const VkDeviceSize				sizeImageR64	= sizeOneLayers * layers;
2174 
2175 	if (formatIsR64(m_data.format))
2176 	{
2177 		bufferOutputImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2178 			vk, device, allocator,
2179 			makeBufferCreateInfo(sizeOutputR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
2180 			MemoryRequirement::HostVisible));
2181 
2182 		deUint64* bufferUint64Ptr = (deUint64 *)bufferOutputImageR64->getAllocation().getHostPtr();
2183 
2184 		for (int ndx = 0; ndx < static_cast<int>(sizeOutputR64 / 8); ++ndx)
2185 		{
2186 			bufferUint64Ptr[ndx] = 0;
2187 		}
2188 		flushAlloc(vk, device, bufferOutputImageR64->getAllocation());
2189 
2190 		bufferImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2191 			vk, device, allocator,
2192 			makeBufferCreateInfo(sizeImageR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
2193 			MemoryRequirement::HostVisible));
2194 
2195 		for (deUint32 layerNdx = 0; layerNdx < layers; ++layerNdx)
2196 		{
2197 			bufferUint64Ptr = (deUint64 *)bufferImageR64->getAllocation().getHostPtr();
2198 			bufferUint64Ptr = bufferUint64Ptr + ((sizeOneLayers * layerNdx) / 8);
2199 
2200 			for (int ndx = 0; ndx < static_cast<int>(sizeOneLayers / 8); ++ndx)
2201 			{
2202 				bufferUint64Ptr[ndx] = 0x1234567887654321 + ((m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? layerNdx : 0);
2203 			}
2204 		}
2205 		flushAlloc(vk, device, bufferImageR64->getAllocation());
2206 	}
2207 
2208 	for (size_t b = 0; b < bindings.size(); ++b)
2209 	{
2210 		VkDescriptorSetLayoutBinding &binding = bindings[b];
2211 
2212 		if (binding.descriptorCount == 0)
2213 			continue;
2214 		if (b == 1 && m_data.nullDescriptor)
2215 			continue;
2216 
2217 		DE_ASSERT(binding.descriptorCount == 1);
2218 		switch (binding.descriptorType)
2219 		{
2220 		default: DE_ASSERT(0); // Fallthrough
2221 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2222 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2223 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2224 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2225 			{
2226 				deUint32 *ptr = (deUint32 *)bufferPtr;
2227 				deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2228 			}
2229 			break;
2230 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2231 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2232 			{
2233 				deUint32 *ptr = (deUint32 *)bufferPtr;
2234 				deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2235 
2236 				const vk::VkBufferViewCreateInfo viewCreateInfo =
2237 				{
2238 					vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
2239 					DE_NULL,
2240 					(vk::VkBufferViewCreateFlags)0,
2241 					**buffer,								// buffer
2242 					m_data.format,							// format
2243 					(vk::VkDeviceSize)0,					// offset
2244 					(vk::VkDeviceSize)m_data.bufferLen		// range
2245 				};
2246 				vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
2247 				bufferViews[0] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
2248 			}
2249 			break;
2250 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2251 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2252 			{
2253 				if (bindings.size() > 1 &&
2254 					bindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2255 				{
2256 					if (m_data.format == VK_FORMAT_R64_SINT)
2257 						imageViewCreateInfo.format = VK_FORMAT_R32G32_SINT;
2258 
2259 					if (m_data.format == VK_FORMAT_R64_UINT)
2260 						imageViewCreateInfo.format = VK_FORMAT_R32G32_UINT;
2261 				}
2262 
2263 				if (b == 0)
2264 				{
2265 					images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, outputImageCreateInfo, MemoryRequirement::Any));
2266 					imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
2267 				}
2268 				else
2269 				{
2270 					images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2271 					imageViewCreateInfo.viewType = m_data.viewType;
2272 				}
2273 				imageViewCreateInfo.image = **images[b];
2274 				imageViews[b] = VkImageViewSp(new Unique<VkImageView>(createImageView(vk, device, &imageViewCreateInfo, NULL)));
2275 
2276 				VkImage						img			= **images[b];
2277 				const VkBuffer&				bufferR64= ((b == 0) ? *(*bufferOutputImageR64) : *(*(bufferImageR64)));
2278 				const VkImageCreateInfo&	imageInfo	= ((b == 0) ? outputImageCreateInfo : imageCreateInfo);
2279 				const deUint32				clearLayers	= b == 0 ? 1 : layers;
2280 
2281 				if (!formatIsR64(m_data.format))
2282 				{
2283 					preImageBarrier.image	= img;
2284 					if (b == 1)
2285 					{
2286 						if (formatIsFloat(m_data.format))
2287 						{
2288 							deMemcpy(&clearValue.float32[0], layout.refData.data(), layout.refData.size());
2289 						}
2290 						else if (formatIsSignedInt(m_data.format))
2291 						{
2292 							deMemcpy(&clearValue.int32[0], layout.refData.data(), layout.refData.size());
2293 						}
2294 						else
2295 						{
2296 							deMemcpy(&clearValue.uint32[0], layout.refData.data(), layout.refData.size());
2297 						}
2298 					}
2299 					postImageBarrier.image	= img;
2300 
2301 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2302 
2303 					for (unsigned int i = 0; i < clearLayers; ++i)
2304 					{
2305 						const VkImageSubresourceRange	clearRange				=
2306 						{
2307 							VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspectFlags	aspectMask;
2308 							0u,							// deUint32				baseMipLevel;
2309 							VK_REMAINING_MIP_LEVELS,	// deUint32				levelCount;
2310 							i,							// deUint32				baseArrayLayer;
2311 							1							// deUint32				layerCount;
2312 						};
2313 
2314 						vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1, &clearRange);
2315 
2316 						// Use same data for all faces for cube(array), otherwise make value a function of the layer
2317 						if (m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2318 						{
2319 							if (formatIsFloat(m_data.format))
2320 								clearValue.float32[0] += 1;
2321 							else if (formatIsSignedInt(m_data.format))
2322 								clearValue.int32[0] += 1;
2323 							else
2324 								clearValue.uint32[0] += 1;
2325 						}
2326 					}
2327 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2328 				}
2329 				else
2330 				{
2331 					if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && (b == 1))
2332 					{
2333 						const VkImageSubresourceRange	subresourceRange	= makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, clearLayers);
2334 						const VkImageMemoryBarrier		imageBarrierPre		= makeImageMemoryBarrier(0,
2335 																				VK_ACCESS_SHADER_WRITE_BIT,
2336 																				VK_IMAGE_LAYOUT_UNDEFINED,
2337 																				VK_IMAGE_LAYOUT_GENERAL,
2338 																				img,
2339 																				subresourceRange);
2340 						const VkImageMemoryBarrier		imageBarrierPost	= makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
2341 																				VK_ACCESS_SHADER_READ_BIT,
2342 																				VK_IMAGE_LAYOUT_GENERAL,
2343 																				VK_IMAGE_LAYOUT_GENERAL,
2344 																				img,
2345 																				subresourceRange);
2346 
2347 						descriptorSetLayoutR64 =
2348 							DescriptorSetLayoutBuilder()
2349 							.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2350 							.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2351 							.build(vk, device);
2352 
2353 						descriptorPoolR64 =
2354 							DescriptorPoolBuilder()
2355 							.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2356 							.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,1)
2357 							.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 2u);
2358 
2359 						descriptorSetFillImage = makeDescriptorSet(vk,
2360 							device,
2361 							*descriptorPoolR64,
2362 							*descriptorSetLayoutR64);
2363 
2364 						shaderModuleFillImage	= createShaderModule(vk, device, m_context.getBinaryCollection().get("fillShader"), 0);
2365 						pipelineLayoutFillImage	= makePipelineLayout(vk, device, *descriptorSetLayoutR64);
2366 						pipelineFillImage		= makeComputePipeline(vk, device, *pipelineLayoutFillImage, *shaderModuleFillImage);
2367 
2368 						const VkDescriptorImageInfo		descResultImageInfo		= makeDescriptorImageInfo(DE_NULL, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2369 						const VkDescriptorBufferInfo	descResultBufferInfo	= makeDescriptorBufferInfo(bufferR64, 0, sizeImageR64);
2370 
2371 						DescriptorSetUpdateBuilder()
2372 							.writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
2373 							.writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descResultBufferInfo)
2374 							.update(vk, device);
2375 
2376 						vk.cmdPipelineBarrier(*cmdBuffer,
2377 							VK_PIPELINE_STAGE_HOST_BIT,
2378 							VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2379 							(VkDependencyFlags)0,
2380 							0, (const VkMemoryBarrier*)DE_NULL,
2381 							0, (const VkBufferMemoryBarrier*)DE_NULL,
2382 							1, &imageBarrierPre);
2383 
2384 						vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineFillImage);
2385 						vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayoutFillImage, 0u, 1u, &(*descriptorSetFillImage), 0u, DE_NULL);
2386 
2387 						vk.cmdDispatch(*cmdBuffer, imageInfo.extent.width, imageInfo.extent.height, clearLayers);
2388 
2389 						vk.cmdPipelineBarrier(*cmdBuffer,
2390 									VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2391 									VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
2392 									(VkDependencyFlags)0,
2393 									0, (const VkMemoryBarrier*)DE_NULL,
2394 									0, (const VkBufferMemoryBarrier*)DE_NULL,
2395 									1, &imageBarrierPost);
2396 					}
2397 					else
2398 					{
2399 						VkDeviceSize					size			= ((b == 0) ? sizeOutputR64 : sizeImageR64);
2400 						const vector<VkBufferImageCopy>	bufferImageCopy	(1, makeBufferImageCopy(imageInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, clearLayers)));
2401 
2402 						copyBufferToImage(vk,
2403 							*cmdBuffer,
2404 							bufferR64,
2405 							size,
2406 							bufferImageCopy,
2407 							VK_IMAGE_ASPECT_COLOR_BIT,
2408 							1,
2409 							clearLayers, img, VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2410 					}
2411 				}
2412 			}
2413 			break;
2414 		}
2415 	}
2416 
2417 	const VkSamplerCreateInfo	samplerParams	=
2418 	{
2419 		VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,		// VkStructureType			sType;
2420 		DE_NULL,									// const void*				pNext;
2421 		0,											// VkSamplerCreateFlags		flags;
2422 		VK_FILTER_NEAREST,							// VkFilter					magFilter:
2423 		VK_FILTER_NEAREST,							// VkFilter					minFilter;
2424 		VK_SAMPLER_MIPMAP_MODE_NEAREST,				// VkSamplerMipmapMode		mipmapMode;
2425 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeU;
2426 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeV;
2427 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeW;
2428 		0.0f,										// float					mipLodBias;
2429 		VK_FALSE,									// VkBool32					anistoropyEnable;
2430 		1.0f,										// float					maxAnisotropy;
2431 		VK_FALSE,									// VkBool32					compareEnable;
2432 		VK_COMPARE_OP_ALWAYS,						// VkCompareOp				compareOp;
2433 		0.0f,										// float					minLod;
2434 		0.0f,										// float					maxLod;
2435 		formatIsFloat(m_data.format) ?
2436 			VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK :
2437 			VK_BORDER_COLOR_INT_TRANSPARENT_BLACK,	// VkBorderColor			borderColor;
2438 		VK_FALSE									// VkBool32					unnormalizedCoordinates;
2439 	};
2440 
2441 	Move<VkSampler>				sampler			(createSampler(vk, device, &samplerParams));
2442 
2443 	// Flush modified memory.
2444 	if (!m_data.nullDescriptor)
2445 		flushAlloc(vk, device, buffer->getAllocation());
2446 
2447 	const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
2448 	{
2449 		VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,				// sType
2450 		DE_NULL,													// pNext
2451 		(VkPipelineLayoutCreateFlags)0,
2452 		1u,															// setLayoutCount
2453 		&descriptorSetLayout.get(),									// pSetLayouts
2454 		0u,															// pushConstantRangeCount
2455 		DE_NULL,													// pPushConstantRanges
2456 	};
2457 
2458 	Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2459 
2460 	de::MovePtr<BufferWithMemory> copyBuffer;
2461 	copyBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2462 		vk, device, allocator, makeBufferCreateInfo(DIM*DIM*16, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
2463 
2464 	{
2465 		vector<VkDescriptorBufferInfo> bufferInfoVec(2);
2466 		vector<VkDescriptorImageInfo> imageInfoVec(2);
2467 		vector<VkBufferView> bufferViewVec(2);
2468 		vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2469 		int vecIndex = 0;
2470 		int numDynamic = 0;
2471 
2472 #ifndef CTS_USES_VULKANSC
2473 		vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore,
2474 												bufTemplateEntriesBefore,
2475 												texelBufTemplateEntriesBefore;
2476 #endif
2477 
2478 		for (size_t b = 0; b < bindings.size(); ++b)
2479 		{
2480 			VkDescriptorSetLayoutBinding &binding = bindings[b];
2481 			// Construct the declaration for the binding
2482 			if (binding.descriptorCount > 0)
2483 			{
2484 				// output image
2485 				switch (binding.descriptorType)
2486 				{
2487 				case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2488 				case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2489 					// Output image.
2490 					if (b == 1 && m_data.nullDescriptor)
2491 						imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, DE_NULL, VK_IMAGE_LAYOUT_GENERAL);
2492 					else
2493 						imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2494 					break;
2495 				case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2496 				case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2497 					if (b == 1 && m_data.nullDescriptor)
2498 						bufferViewVec[vecIndex] = DE_NULL;
2499 					else
2500 						bufferViewVec[vecIndex] = **bufferViews[0];
2501 					break;
2502 				default:
2503 					// Other descriptor types.
2504 					if (b == 1 && m_data.nullDescriptor)
2505 						bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(DE_NULL, 0, VK_WHOLE_SIZE);
2506 					else
2507 						bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(**buffer, 0, layout.refData.size());
2508 					break;
2509 				}
2510 
2511 				VkWriteDescriptorSet w =
2512 				{
2513 					VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,				// sType
2514 					DE_NULL,											// pNext
2515 					m_data.pushDescriptor ? DE_NULL : *descriptorSet,	// dstSet
2516 					(deUint32)b,										// binding
2517 					0,													// dstArrayElement
2518 					1u,													// descriptorCount
2519 					binding.descriptorType,								// descriptorType
2520 					&imageInfoVec[vecIndex],							// pImageInfo
2521 					&bufferInfoVec[vecIndex],							// pBufferInfo
2522 					&bufferViewVec[vecIndex],							// pTexelBufferView
2523 				};
2524 
2525 #ifndef CTS_USES_VULKANSC
2526 				VkDescriptorUpdateTemplateEntry templateEntry =
2527 				{
2528 					(deUint32)b,				// uint32_t				dstBinding;
2529 					0,							// uint32_t				dstArrayElement;
2530 					1u,							// uint32_t				descriptorCount;
2531 					binding.descriptorType,		// VkDescriptorType		descriptorType;
2532 					0,							// size_t				offset;
2533 					0,							// size_t				stride;
2534 				};
2535 
2536 				switch (binding.descriptorType)
2537 				{
2538 				default: DE_ASSERT(0); // Fallthrough
2539 				case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2540 				case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2541 					templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2542 					imgTemplateEntriesBefore.push_back(templateEntry);
2543 					break;
2544 				case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2545 				case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2546 					templateEntry.offset = vecIndex * sizeof(VkBufferView);
2547 					texelBufTemplateEntriesBefore.push_back(templateEntry);
2548 					break;
2549 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2550 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2551 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2552 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2553 					templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2554 					bufTemplateEntriesBefore.push_back(templateEntry);
2555 					break;
2556 				}
2557 #endif
2558 
2559 				vecIndex++;
2560 
2561 				writesBeforeBindVec.push_back(w);
2562 
2563 				// Count the number of dynamic descriptors in this set.
2564 				if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2565 					binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2566 				{
2567 					numDynamic++;
2568 				}
2569 			}
2570 		}
2571 
2572 		// Make zeros have at least one element so &zeros[0] works
2573 		vector<deUint32> zeros(de::max(1,numDynamic));
2574 		deMemset(&zeros[0], 0, numDynamic * sizeof(deUint32));
2575 
2576 		// Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2577 		if (m_data.useTemplate)
2578 		{
2579 #ifndef CTS_USES_VULKANSC
2580 			VkDescriptorUpdateTemplateCreateInfo templateCreateInfo =
2581 			{
2582 				VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO,	// VkStructureType							sType;
2583 				NULL,														// void*									pNext;
2584 				0,															// VkDescriptorUpdateTemplateCreateFlags	flags;
2585 				0,															// uint32_t									descriptorUpdateEntryCount;
2586 				DE_NULL,													// uint32_t									descriptorUpdateEntryCount;
2587 				m_data.pushDescriptor ?
2588 					VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR :
2589 					VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,		// VkDescriptorUpdateTemplateType			templateType;
2590 				descriptorSetLayout.get(),									// VkDescriptorSetLayout					descriptorSetLayout;
2591 				bindPoint,													// VkPipelineBindPoint						pipelineBindPoint;
2592 				*pipelineLayout,											// VkPipelineLayout							pipelineLayout;
2593 				0,															// uint32_t									set;
2594 			};
2595 
2596 			void *templateVectorData[] =
2597 			{
2598 				imageInfoVec.data(),
2599 				bufferInfoVec.data(),
2600 				bufferViewVec.data(),
2601 			};
2602 
2603 			vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] =
2604 			{
2605 				&imgTemplateEntriesBefore,
2606 				&bufTemplateEntriesBefore,
2607 				&texelBufTemplateEntriesBefore,
2608 			};
2609 
2610 			if (m_data.pushDescriptor)
2611 			{
2612 				for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2613 				{
2614 					if (templateVectorsBefore[i]->size())
2615 					{
2616 						templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2617 						templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2618 						Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2619 						vk.cmdPushDescriptorSetWithTemplateKHR(*cmdBuffer, *descriptorUpdateTemplate, *pipelineLayout, 0, templateVectorData[i]);
2620 					}
2621 				}
2622 			}
2623 			else
2624 			{
2625 				for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2626 				{
2627 					if (templateVectorsBefore[i]->size())
2628 					{
2629 						templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2630 						templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2631 						Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2632 						vk.updateDescriptorSetWithTemplate(device, descriptorSet.get(), *descriptorUpdateTemplate, templateVectorData[i]);
2633 					}
2634 				}
2635 
2636 				vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2637 			}
2638 #endif
2639 		}
2640 		else
2641 		{
2642 			if (m_data.pushDescriptor)
2643 			{
2644 #ifndef CTS_USES_VULKANSC
2645 				if (writesBeforeBindVec.size())
2646 				{
2647 					vk.cmdPushDescriptorSetKHR(*cmdBuffer, bindPoint, *pipelineLayout, 0, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0]);
2648 				}
2649 #endif
2650 			}
2651 			else
2652 			{
2653 				if (writesBeforeBindVec.size())
2654 				{
2655 					vk.updateDescriptorSets(device, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0, NULL);
2656 				}
2657 
2658 				vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2659 			}
2660 		}
2661 	}
2662 
2663 	Move<VkPipeline> pipeline;
2664 	Move<VkRenderPass> renderPass;
2665 	Move<VkFramebuffer> framebuffer;
2666 
2667 	de::MovePtr<BufferWithMemory> sbtBuffer;
2668 
2669 	if (m_data.stage == STAGE_COMPUTE)
2670 	{
2671 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2672 
2673 		const VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
2674 		{
2675 			VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,	// VkStructureType						sType;
2676 			nullptr,												// const void*							pNext;
2677 			static_cast<VkPipelineShaderStageCreateFlags>(0u),		// VkPipelineShaderStageCreateFlags		flags;
2678 			VK_SHADER_STAGE_COMPUTE_BIT,							// VkShaderStageFlagBits				stage;
2679 			*shader,												// VkShaderModule						module;
2680 			"main",													// const char*							pName;
2681 			nullptr,												// const VkSpecializationInfo*			pSpecializationInfo;
2682 		};
2683 
2684 		VkComputePipelineCreateInfo pipelineCreateInfo =
2685 		{
2686 			VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,			// VkStructureType					sType;
2687 			nullptr,												// const void*						pNext;
2688 			static_cast<VkPipelineCreateFlags>(0u),					// VkPipelineCreateFlags			flags;
2689 			pipelineShaderStageParams,								// VkPipelineShaderStageCreateInfo	stage;
2690 			*pipelineLayout,										// VkPipelineLayout					layout;
2691 			DE_NULL,												// VkPipeline						basePipelineHandle;
2692 			0,														// deInt32							basePipelineIndex;
2693 		};
2694 
2695 #ifndef CTS_USES_VULKANSC
2696 		VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2697 		if (m_data.testPipelineRobustness)
2698 		{
2699 			pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2700 			pipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2701 		}
2702 #endif
2703 
2704 		pipeline = createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo);
2705 
2706 	}
2707 #ifndef CTS_USES_VULKANSC
2708 	else if (m_data.stage == STAGE_RAYGEN)
2709 	{
2710 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2711 
2712 		const VkPipelineShaderStageCreateInfo	shaderCreateInfo =
2713 		{
2714 			VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2715 			DE_NULL,
2716 			(VkPipelineShaderStageCreateFlags)0,
2717 			VK_SHADER_STAGE_RAYGEN_BIT_NV,								// stage
2718 			*shader,													// shader
2719 			"main",
2720 			DE_NULL,													// pSpecializationInfo
2721 		};
2722 
2723 		VkRayTracingShaderGroupCreateInfoNV group =
2724 		{
2725 			VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV,
2726 			DE_NULL,
2727 			VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV,			// type
2728 			0,														// generalShader
2729 			VK_SHADER_UNUSED_KHR,									// closestHitShader
2730 			VK_SHADER_UNUSED_KHR,									// anyHitShader
2731 			VK_SHADER_UNUSED_KHR,									// intersectionShader
2732 		};
2733 
2734 		VkRayTracingPipelineCreateInfoNV pipelineCreateInfo = {
2735 			VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV,	// sType
2736 			DE_NULL,												// pNext
2737 			0,														// flags
2738 			1,														// stageCount
2739 			&shaderCreateInfo,										// pStages
2740 			1,														// groupCount
2741 			&group,													// pGroups
2742 			0,														// maxRecursionDepth
2743 			*pipelineLayout,										// layout
2744 			(vk::VkPipeline)0,										// basePipelineHandle
2745 			0u,														// basePipelineIndex
2746 		};
2747 
2748 		VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2749 		if (m_data.testPipelineRobustness)
2750 		{
2751 			pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2752 			pipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2753 		}
2754 
2755 		pipeline = createRayTracingPipelineNV(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
2756 
2757 		sbtBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2758 			vk, device, allocator, makeBufferCreateInfo(rayTracingProperties.shaderGroupHandleSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_RAY_TRACING_BIT_NV), MemoryRequirement::HostVisible));
2759 
2760 		deUint32 *ptr = (deUint32 *)sbtBuffer->getAllocation().getHostPtr();
2761 		invalidateAlloc(vk, device, sbtBuffer->getAllocation());
2762 
2763 		vk.getRayTracingShaderGroupHandlesKHR(device, *pipeline, 0, 1, rayTracingProperties.shaderGroupHandleSize, ptr);
2764 	}
2765 #endif
2766 	else
2767 	{
2768 		const VkSubpassDescription		subpassDesc				=
2769 		{
2770 			(VkSubpassDescriptionFlags)0,											// VkSubpassDescriptionFlags	flags
2771 			VK_PIPELINE_BIND_POINT_GRAPHICS,										// VkPipelineBindPoint			pipelineBindPoint
2772 			0u,																		// deUint32						inputAttachmentCount
2773 			DE_NULL,																// const VkAttachmentReference*	pInputAttachments
2774 			0u,																		// deUint32						colorAttachmentCount
2775 			DE_NULL,																// const VkAttachmentReference*	pColorAttachments
2776 			DE_NULL,																// const VkAttachmentReference*	pResolveAttachments
2777 			DE_NULL,																// const VkAttachmentReference*	pDepthStencilAttachment
2778 			0u,																		// deUint32						preserveAttachmentCount
2779 			DE_NULL																	// const deUint32*				pPreserveAttachments
2780 		};
2781 
2782 		const VkSubpassDependency		subpassDependency		=
2783 		{
2784 			VK_SUBPASS_EXTERNAL,							// deUint32				srcSubpass
2785 			0,												// deUint32				dstSubpass
2786 			VK_PIPELINE_STAGE_TRANSFER_BIT,					// VkPipelineStageFlags	srcStageMask
2787 			VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,			// VkPipelineStageFlags	dstStageMask
2788 			VK_ACCESS_TRANSFER_WRITE_BIT,					// VkAccessFlags		srcAccessMask
2789 			VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT,	//	dstAccessMask
2790 			VK_DEPENDENCY_BY_REGION_BIT						// VkDependencyFlags	dependencyFlags
2791 		};
2792 
2793 		const VkRenderPassCreateInfo	renderPassParams		=
2794 		{
2795 			VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,				// VkStructureTypei					sType
2796 			DE_NULL,												// const void*						pNext
2797 			(VkRenderPassCreateFlags)0,								// VkRenderPassCreateFlags			flags
2798 			0u,														// deUint32							attachmentCount
2799 			DE_NULL,												// const VkAttachmentDescription*	pAttachments
2800 			1u,														// deUint32							subpassCount
2801 			&subpassDesc,											// const VkSubpassDescription*		pSubpasses
2802 			1u,														// deUint32							dependencyCount
2803 			&subpassDependency										// const VkSubpassDependency*		pDependencies
2804 		};
2805 
2806 		renderPass = createRenderPass(vk, device, &renderPassParams);
2807 
2808 		const vk::VkFramebufferCreateInfo	framebufferParams	=
2809 		{
2810 			vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,			// sType
2811 			DE_NULL,												// pNext
2812 			(vk::VkFramebufferCreateFlags)0,
2813 			*renderPass,											// renderPass
2814 			0u,														// attachmentCount
2815 			DE_NULL,												// pAttachments
2816 			DIM,													// width
2817 			DIM,													// height
2818 			1u,														// layers
2819 		};
2820 
2821 		framebuffer = createFramebuffer(vk, device, &framebufferParams);
2822 
2823 		const VkVertexInputBindingDescription			vertexInputBindingDescription		=
2824 		{
2825 			0u,								// deUint32			 binding
2826 			(deUint32)formatBytes,			// deUint32			 stride
2827 			VK_VERTEX_INPUT_RATE_VERTEX,	// VkVertexInputRate	inputRate
2828 		};
2829 
2830 		const VkVertexInputAttributeDescription			vertexInputAttributeDescription		=
2831 		{
2832 			0u,								// deUint32	location
2833 			0u,								// deUint32	binding
2834 			m_data.format,					// VkFormat	format
2835 			0u								// deUint32	offset
2836 		};
2837 
2838 		deUint32 numAttribs = m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH ? 1u : 0u;
2839 
2840 		VkPipelineVertexInputStateCreateInfo		vertexInputStateCreateInfo		=
2841 		{
2842 			VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,	// VkStructureType							sType;
2843 			DE_NULL,													// const void*								pNext;
2844 			(VkPipelineVertexInputStateCreateFlags)0,					// VkPipelineVertexInputStateCreateFlags	flags;
2845 			numAttribs,													// deUint32									vertexBindingDescriptionCount;
2846 			&vertexInputBindingDescription,								// const VkVertexInputBindingDescription*	pVertexBindingDescriptions;
2847 			numAttribs,													// deUint32									vertexAttributeDescriptionCount;
2848 			&vertexInputAttributeDescription							// const VkVertexInputAttributeDescription*	pVertexAttributeDescriptions;
2849 		};
2850 
2851 		const VkPipelineInputAssemblyStateCreateInfo	inputAssemblyStateCreateInfo	=
2852 		{
2853 			VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,	// VkStructureType							sType;
2854 			DE_NULL,														// const void*								pNext;
2855 			(VkPipelineInputAssemblyStateCreateFlags)0,						// VkPipelineInputAssemblyStateCreateFlags	flags;
2856 			(m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology						topology;
2857 			VK_FALSE														// VkBool32									primitiveRestartEnable;
2858 		};
2859 
2860 		const VkPipelineRasterizationStateCreateInfo	rasterizationStateCreateInfo	=
2861 		{
2862 			VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,		// VkStructureType							sType;
2863 			DE_NULL,														// const void*								pNext;
2864 			(VkPipelineRasterizationStateCreateFlags)0,						// VkPipelineRasterizationStateCreateFlags	flags;
2865 			VK_FALSE,														// VkBool32									depthClampEnable;
2866 			(m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE,			// VkBool32									rasterizerDiscardEnable;
2867 			VK_POLYGON_MODE_FILL,											// VkPolygonMode							polygonMode;
2868 			VK_CULL_MODE_NONE,												// VkCullModeFlags							cullMode;
2869 			VK_FRONT_FACE_CLOCKWISE,										// VkFrontFace								frontFace;
2870 			VK_FALSE,														// VkBool32									depthBiasEnable;
2871 			0.0f,															// float									depthBiasConstantFactor;
2872 			0.0f,															// float									depthBiasClamp;
2873 			0.0f,															// float									depthBiasSlopeFactor;
2874 			1.0f															// float									lineWidth;
2875 		};
2876 
2877 		const VkPipelineMultisampleStateCreateInfo		multisampleStateCreateInfo =
2878 		{
2879 			VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,	// VkStructureType							sType
2880 			DE_NULL,													// const void*								pNext
2881 			0u,															// VkPipelineMultisampleStateCreateFlags	flags
2882 			VK_SAMPLE_COUNT_1_BIT,										// VkSampleCountFlagBits					rasterizationSamples
2883 			VK_FALSE,													// VkBool32									sampleShadingEnable
2884 			1.0f,														// float									minSampleShading
2885 			DE_NULL,													// const VkSampleMask*						pSampleMask
2886 			VK_FALSE,													// VkBool32									alphaToCoverageEnable
2887 			VK_FALSE													// VkBool32									alphaToOneEnable
2888 		};
2889 
2890 		VkViewport viewport = makeViewport(DIM, DIM);
2891 		VkRect2D scissor = makeRect2D(DIM, DIM);
2892 
2893 		const VkPipelineViewportStateCreateInfo			viewportStateCreateInfo				=
2894 		{
2895 			VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,	// VkStructureType							sType
2896 			DE_NULL,												// const void*								pNext
2897 			(VkPipelineViewportStateCreateFlags)0,					// VkPipelineViewportStateCreateFlags		flags
2898 			1u,														// deUint32									viewportCount
2899 			&viewport,												// const VkViewport*						pViewports
2900 			1u,														// deUint32									scissorCount
2901 			&scissor												// const VkRect2D*							pScissors
2902 		};
2903 
2904 		Move<VkShaderModule> fs;
2905 		Move<VkShaderModule> vs;
2906 
2907 		deUint32 numStages;
2908 		if (m_data.stage == STAGE_VERTEX)
2909 		{
2910 			vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2911 			fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0); // bogus
2912 			numStages = 1u;
2913 		}
2914 		else
2915 		{
2916 			vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0);
2917 			fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2918 			numStages = 2u;
2919 		}
2920 
2921 		VkPipelineShaderStageCreateInfo	shaderCreateInfo[2] =
2922 		{
2923 			{
2924 				VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2925 				DE_NULL,
2926 				(VkPipelineShaderStageCreateFlags)0,
2927 				VK_SHADER_STAGE_VERTEX_BIT,									// stage
2928 				*vs,														// shader
2929 				"main",
2930 				DE_NULL,													// pSpecializationInfo
2931 			},
2932 			{
2933 				VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2934 				DE_NULL,
2935 				(VkPipelineShaderStageCreateFlags)0,
2936 				VK_SHADER_STAGE_FRAGMENT_BIT,								// stage
2937 				*fs,														// shader
2938 				"main",
2939 				DE_NULL,													// pSpecializationInfo
2940 			}
2941 		};
2942 
2943 		VkGraphicsPipelineCreateInfo				graphicsPipelineCreateInfo		=
2944 		{
2945 			VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,	// VkStructureType									sType;
2946 			DE_NULL,											// const void*										pNext;
2947 			(VkPipelineCreateFlags)0,							// VkPipelineCreateFlags							flags;
2948 			numStages,											// deUint32											stageCount;
2949 			&shaderCreateInfo[0],								// const VkPipelineShaderStageCreateInfo*			pStages;
2950 			&vertexInputStateCreateInfo,						// const VkPipelineVertexInputStateCreateInfo*		pVertexInputState;
2951 			&inputAssemblyStateCreateInfo,						// const VkPipelineInputAssemblyStateCreateInfo*	pInputAssemblyState;
2952 			DE_NULL,											// const VkPipelineTessellationStateCreateInfo*		pTessellationState;
2953 			&viewportStateCreateInfo,							// const VkPipelineViewportStateCreateInfo*			pViewportState;
2954 			&rasterizationStateCreateInfo,						// const VkPipelineRasterizationStateCreateInfo*	pRasterizationState;
2955 			&multisampleStateCreateInfo,						// const VkPipelineMultisampleStateCreateInfo*		pMultisampleState;
2956 			DE_NULL,											// const VkPipelineDepthStencilStateCreateInfo*		pDepthStencilState;
2957 			DE_NULL,											// const VkPipelineColorBlendStateCreateInfo*		pColorBlendState;
2958 			DE_NULL,											// const VkPipelineDynamicStateCreateInfo*			pDynamicState;
2959 			pipelineLayout.get(),								// VkPipelineLayout									layout;
2960 			renderPass.get(),									// VkRenderPass										renderPass;
2961 			0u,													// deUint32											subpass;
2962 			DE_NULL,											// VkPipeline										basePipelineHandle;
2963 			0													// int												basePipelineIndex;
2964 		};
2965 
2966 #ifndef CTS_USES_VULKANSC
2967 		VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2968 		if (m_data.testPipelineRobustness)
2969 		{
2970 			pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2971 
2972 			if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2973 			{
2974 				graphicsPipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2975 			}
2976 			else if (m_data.stage == STAGE_VERTEX)
2977 			{
2978 				shaderCreateInfo[0].pNext = &pipelineRobustnessInfo;
2979 			}
2980 			else
2981 			{
2982 				shaderCreateInfo[1].pNext = &pipelineRobustnessInfo;
2983 			}
2984 		}
2985 #endif
2986 
2987 		pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
2988 	}
2989 
2990 	const VkImageMemoryBarrier imageBarrier =
2991 	{
2992 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
2993 		DE_NULL,											// const void*			pNext
2994 		0u,													// VkAccessFlags		srcAccessMask
2995 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
2996 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
2997 		VK_IMAGE_LAYOUT_GENERAL,							// VkImageLayout		newLayout
2998 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
2999 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
3000 		**images[0],										// VkImage				image
3001 		{
3002 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask
3003 			0u,										// uint32_t				baseMipLevel
3004 			1u,										// uint32_t				mipLevels,
3005 			0u,										// uint32_t				baseArray
3006 			1u,										// uint32_t				arraySize
3007 		}
3008 	};
3009 
3010 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
3011 							(VkDependencyFlags)0,
3012 							0, (const VkMemoryBarrier*)DE_NULL,
3013 							0, (const VkBufferMemoryBarrier*)DE_NULL,
3014 							1, &imageBarrier);
3015 
3016 	vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
3017 
3018 	if (!formatIsR64(m_data.format))
3019 	{
3020 		VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
3021 		VkClearValue clearColor = makeClearValueColorU32(0,0,0,0);
3022 
3023 		vk.cmdClearColorImage(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
3024 	}
3025 	else
3026 	{
3027 		const vector<VkBufferImageCopy>	bufferImageCopy(1, makeBufferImageCopy(outputImageCreateInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1)));
3028 		copyBufferToImage(vk,
3029 			*cmdBuffer,
3030 			*(*bufferOutputImageR64),
3031 			sizeOutputR64,
3032 			bufferImageCopy,
3033 			VK_IMAGE_ASPECT_COLOR_BIT,
3034 			1,
3035 			1, **images[0], VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
3036 	}
3037 
3038 	VkMemoryBarrier					memBarrier =
3039 	{
3040 		VK_STRUCTURE_TYPE_MEMORY_BARRIER,	// sType
3041 		DE_NULL,							// pNext
3042 		0u,									// srcAccessMask
3043 		0u,									// dstAccessMask
3044 	};
3045 
3046 	memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
3047 	memBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
3048 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages,
3049 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
3050 
3051 	if (m_data.stage == STAGE_COMPUTE)
3052 	{
3053 		vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
3054 	}
3055 #ifndef CTS_USES_VULKANSC
3056 	else if (m_data.stage == STAGE_RAYGEN)
3057 	{
3058 		vk.cmdTraceRaysNV(*cmdBuffer,
3059 			**sbtBuffer, 0,
3060 			DE_NULL, 0, 0,
3061 			DE_NULL, 0, 0,
3062 			DE_NULL, 0, 0,
3063 			DIM, DIM, 1);
3064 	}
3065 #endif
3066 	else
3067 	{
3068 		beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer,
3069 						makeRect2D(DIM, DIM),
3070 						0, DE_NULL, VK_SUBPASS_CONTENTS_INLINE);
3071 		// Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
3072 		if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3073 		{
3074 			VkDeviceSize zeroOffset = 0;
3075 			VkBuffer b = m_data.nullDescriptor ? DE_NULL : **buffer;
3076 			vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &b, &zeroOffset);
3077 			vk.cmdDraw(*cmdBuffer, 1000u, 1u, 0u, 0u);
3078 		}
3079 		if (m_data.stage == STAGE_VERTEX)
3080 		{
3081 			vk.cmdDraw(*cmdBuffer, DIM*DIM, 1u, 0u, 0u);
3082 		}
3083 		else
3084 		{
3085 			vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
3086 		}
3087 		endRenderPass(vk, *cmdBuffer);
3088 	}
3089 
3090 	memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
3091 	memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
3092 	vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT,
3093 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
3094 
3095 	const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(DIM, DIM, 1u),
3096 															 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
3097 	vk.cmdCopyImageToBuffer(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, &copyRegion);
3098 
3099 	memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
3100 	memBarrier.dstAccessMask = VK_ACCESS_HOST_READ_BIT;
3101 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
3102 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
3103 
3104 	endCommandBuffer(vk, *cmdBuffer);
3105 
3106 	submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
3107 
3108 	void *ptr = copyBuffer->getAllocation().getHostPtr();
3109 
3110 	invalidateAlloc(vk, device, copyBuffer->getAllocation());
3111 
3112 	qpTestResult res = QP_TEST_RESULT_PASS;
3113 
3114 	for (deUint32 i = 0; i < DIM*DIM; ++i)
3115 	{
3116 		if (formatIsFloat(m_data.format))
3117 		{
3118 			if (((float *)ptr)[i * numComponents] != 1.0f)
3119 			{
3120 				res = QP_TEST_RESULT_FAIL;
3121 			}
3122 		}
3123 		else if (formatIsR64(m_data.format))
3124 		{
3125 			if (((deUint64 *)ptr)[i * numComponents] != 1)
3126 			{
3127 				res = QP_TEST_RESULT_FAIL;
3128 			}
3129 		}
3130 		else
3131 		{
3132 			if (((deUint32 *)ptr)[i * numComponents] != 1)
3133 			{
3134 				res = QP_TEST_RESULT_FAIL;
3135 			}
3136 		}
3137 	}
3138 
3139 	return tcu::TestStatus(res, qpGetTestResultName(res));
3140 }
3141 
3142 }	// anonymous
3143 
createTests(tcu::TestCaseGroup * group,bool robustness2,bool pipelineRobustness)3144 static void createTests (tcu::TestCaseGroup* group, bool robustness2, bool pipelineRobustness)
3145 {
3146 	tcu::TestContext& testCtx = group->getTestContext();
3147 
3148 	typedef struct
3149 	{
3150 		deUint32				count;
3151 		const char*				name;
3152 		const char*				description;
3153 	} TestGroupCase;
3154 
3155 	TestGroupCase fmtCases[] =
3156 	{
3157 		{ VK_FORMAT_R32_SINT,				"r32i",		""		},
3158 		{ VK_FORMAT_R32_UINT,				"r32ui",	""		},
3159 		{ VK_FORMAT_R32_SFLOAT,				"r32f",		""		},
3160 		{ VK_FORMAT_R32G32_SINT,			"rg32i",	""		},
3161 		{ VK_FORMAT_R32G32_UINT,			"rg32ui",	""		},
3162 		{ VK_FORMAT_R32G32_SFLOAT,			"rg32f",	""		},
3163 		{ VK_FORMAT_R32G32B32A32_SINT,		"rgba32i",	""		},
3164 		{ VK_FORMAT_R32G32B32A32_UINT,		"rgba32ui",	""		},
3165 		{ VK_FORMAT_R32G32B32A32_SFLOAT,	"rgba32f",	""		},
3166 		{ VK_FORMAT_R64_SINT,				"r64i",		""		},
3167 		{ VK_FORMAT_R64_UINT,				"r64ui",	""		},
3168 	};
3169 
3170 	TestGroupCase fullDescCases[] =
3171 	{
3172 		{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,				"uniform_buffer",			""		},
3173 		{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,				"storage_buffer",			""		},
3174 		{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,		"uniform_buffer_dynamic",	""		},
3175 		{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,		"storage_buffer_dynamic",	""		},
3176 		{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,			"uniform_texel_buffer",		""		},
3177 		{ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,			"storage_texel_buffer",		""		},
3178 		{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,					"storage_image",			""		},
3179 		{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,		"sampled_image",			""		},
3180 		{ VERTEX_ATTRIBUTE_FETCH,							"vertex_attribute_fetch",	""		},
3181 	};
3182 
3183 	TestGroupCase imgDescCases[] =
3184 	{
3185 		{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,					"storage_image",			""		},
3186 		{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,		"sampled_image",			""		},
3187 	};
3188 
3189 	TestGroupCase fullLenCases32Bit[] =
3190 	{
3191 		{ ~0U,			"null_descriptor",	""		},
3192 		{ 0,			"img",				""		},
3193 		{ 4,			"len_4",			""		},
3194 		{ 8,			"len_8",			""		},
3195 		{ 12,			"len_12",			""		},
3196 		{ 16,			"len_16",			""		},
3197 		{ 20,			"len_20",			""		},
3198 		{ 31,			"len_31",			""		},
3199 		{ 32,			"len_32",			""		},
3200 		{ 33,			"len_33",			""		},
3201 		{ 35,			"len_35",			""		},
3202 		{ 36,			"len_36",			""		},
3203 		{ 39,			"len_39",			""		},
3204 		{ 40,			"len_41",			""		},
3205 		{ 252,			"len_252",			""		},
3206 		{ 256,			"len_256",			""		},
3207 		{ 260,			"len_260",			""		},
3208 	};
3209 
3210 	TestGroupCase fullLenCases64Bit[] =
3211 	{
3212 		{ ~0U,			"null_descriptor",	""		},
3213 		{ 0,			"img",				""		},
3214 		{ 8,			"len_8",			""		},
3215 		{ 16,			"len_16",			""		},
3216 		{ 24,			"len_24",			""		},
3217 		{ 32,			"len_32",			""		},
3218 		{ 40,			"len_40",			""		},
3219 		{ 62,			"len_62",			""		},
3220 		{ 64,			"len_64",			""		},
3221 		{ 66,			"len_66",			""		},
3222 		{ 70,			"len_70",			""		},
3223 		{ 72,			"len_72",			""		},
3224 		{ 78,			"len_78",			""		},
3225 		{ 80,			"len_80",			""		},
3226 		{ 504,			"len_504",			""		},
3227 		{ 512,			"len_512",			""		},
3228 		{ 520,			"len_520",			""		},
3229 	};
3230 
3231 	TestGroupCase imgLenCases[] =
3232 	{
3233 		{ 0,	"img",	""		},
3234 	};
3235 
3236 	TestGroupCase viewCases[] =
3237 	{
3238 		{ VK_IMAGE_VIEW_TYPE_1D,			"1d",			""		},
3239 		{ VK_IMAGE_VIEW_TYPE_2D,			"2d",			""		},
3240 		{ VK_IMAGE_VIEW_TYPE_3D,			"3d",			""		},
3241 		{ VK_IMAGE_VIEW_TYPE_CUBE,			"cube",			""		},
3242 		{ VK_IMAGE_VIEW_TYPE_1D_ARRAY,		"1d_array",		""		},
3243 		{ VK_IMAGE_VIEW_TYPE_2D_ARRAY,		"2d_array",		""		},
3244 		{ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,	"cube_array",	""		},
3245 	};
3246 
3247 	TestGroupCase sampCases[] =
3248 	{
3249 		{ VK_SAMPLE_COUNT_1_BIT,			"samples_1",	""		},
3250 		{ VK_SAMPLE_COUNT_4_BIT,			"samples_4",	""		},
3251 	};
3252 
3253 	TestGroupCase stageCases[] =
3254 	{
3255 		{ STAGE_COMPUTE,	"comp",		"compute"	},
3256 		{ STAGE_FRAGMENT,	"frag",		"fragment"	},
3257 		{ STAGE_VERTEX,		"vert",		"vertex"	},
3258 #ifndef CTS_USES_VULKANSC
3259 		{ STAGE_RAYGEN,		"rgen",		"raygen"	},
3260 #endif
3261 	};
3262 
3263 	TestGroupCase volCases[] =
3264 	{
3265 		{ 0,			"nonvolatile",	""		},
3266 		{ 1,			"volatile",		""		},
3267 	};
3268 
3269 	TestGroupCase unrollCases[] =
3270 	{
3271 		{ 0,			"dontunroll",	""		},
3272 		{ 1,			"unroll",		""		},
3273 	};
3274 
3275 	TestGroupCase tempCases[] =
3276 	{
3277 		{ 0,			"notemplate",	""		},
3278 #ifndef CTS_USES_VULKANSC
3279 		{ 1,			"template",		""		},
3280 #endif
3281 	};
3282 
3283 	TestGroupCase pushCases[] =
3284 	{
3285 		{ 0,			"bind",			""		},
3286 #ifndef CTS_USES_VULKANSC
3287 		{ 1,			"push",			""		},
3288 #endif
3289 	};
3290 
3291 	TestGroupCase fmtQualCases[] =
3292 	{
3293 		{ 0,			"no_fmt_qual",	""		},
3294 		{ 1,			"fmt_qual",		""		},
3295 	};
3296 
3297 	TestGroupCase readOnlyCases[] =
3298 	{
3299 		{ 0,			"readwrite",	""		},
3300 		{ 1,			"readonly",		""		},
3301 	};
3302 
3303 	for (int pushNdx = 0; pushNdx < DE_LENGTH_OF_ARRAY(pushCases); pushNdx++)
3304 	{
3305 		de::MovePtr<tcu::TestCaseGroup> pushGroup(new tcu::TestCaseGroup(testCtx, pushCases[pushNdx].name, pushCases[pushNdx].name));
3306 		for (int tempNdx = 0; tempNdx < DE_LENGTH_OF_ARRAY(tempCases); tempNdx++)
3307 		{
3308 			de::MovePtr<tcu::TestCaseGroup> tempGroup(new tcu::TestCaseGroup(testCtx, tempCases[tempNdx].name, tempCases[tempNdx].name));
3309 			for (int fmtNdx = 0; fmtNdx < DE_LENGTH_OF_ARRAY(fmtCases); fmtNdx++)
3310 			{
3311 				de::MovePtr<tcu::TestCaseGroup> fmtGroup(new tcu::TestCaseGroup(testCtx, fmtCases[fmtNdx].name, fmtCases[fmtNdx].name));
3312 
3313 				// Avoid too much duplication by excluding certain test cases
3314 				if (pipelineRobustness &&
3315 				    !(fmtCases[fmtNdx].count == VK_FORMAT_R32_UINT || fmtCases[fmtNdx].count == VK_FORMAT_R32G32B32A32_SFLOAT || fmtCases[fmtNdx].count == VK_FORMAT_R64_SINT))
3316 				{
3317 					continue;
3318 				}
3319 
3320 				int fmtSize = tcu::getPixelSize(mapVkFormat((VkFormat)fmtCases[fmtNdx].count));
3321 
3322 				for (int unrollNdx = 0; unrollNdx < DE_LENGTH_OF_ARRAY(unrollCases); unrollNdx++)
3323 				{
3324 					de::MovePtr<tcu::TestCaseGroup> unrollGroup(new tcu::TestCaseGroup(testCtx, unrollCases[unrollNdx].name, unrollCases[unrollNdx].name));
3325 
3326 					// Avoid too much duplication by excluding certain test cases
3327 					if (unrollNdx > 0 && pipelineRobustness)
3328 						continue;
3329 
3330 					for (int volNdx = 0; volNdx < DE_LENGTH_OF_ARRAY(volCases); volNdx++)
3331 					{
3332 						de::MovePtr<tcu::TestCaseGroup> volGroup(new tcu::TestCaseGroup(testCtx, volCases[volNdx].name, volCases[volNdx].name));
3333 
3334 						int numDescCases = robustness2 ? DE_LENGTH_OF_ARRAY(fullDescCases) : DE_LENGTH_OF_ARRAY(imgDescCases);
3335 						TestGroupCase *descCases = robustness2 ? fullDescCases : imgDescCases;
3336 
3337 						for (int descNdx = 0; descNdx < numDescCases; descNdx++)
3338 						{
3339 							de::MovePtr<tcu::TestCaseGroup> descGroup(new tcu::TestCaseGroup(testCtx, descCases[descNdx].name, descCases[descNdx].name));
3340 
3341 							// Avoid too much duplication by excluding certain test cases
3342 							if (pipelineRobustness &&
3343 								!(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3344 									descCases[descNdx].count == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3345 							{
3346 								continue;
3347 							}
3348 
3349 							for (int roNdx = 0; roNdx < DE_LENGTH_OF_ARRAY(readOnlyCases); roNdx++)
3350 							{
3351 								de::MovePtr<tcu::TestCaseGroup> rwGroup(new tcu::TestCaseGroup(testCtx, readOnlyCases[roNdx].name, readOnlyCases[roNdx].name));
3352 
3353 								// readonly cases are just for storage_buffer
3354 								if (readOnlyCases[roNdx].count != 0 &&
3355 									descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER &&
3356 									descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
3357 									continue;
3358 
3359 								if (pipelineRobustness &&
3360 									readOnlyCases[roNdx].count != 0)
3361 								{
3362 									continue;
3363 								}
3364 
3365 								for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
3366 								{
3367 									de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name, fmtQualCases[fmtQualNdx].name));
3368 
3369 									// format qualifier is only used for storage image and storage texel buffers
3370 									if (fmtQualCases[fmtQualNdx].count &&
3371 										!(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
3372 										continue;
3373 
3374 									if (pushCases[pushNdx].count &&
3375 										(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3376 										continue;
3377 
3378 									const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
3379 									int numLenCases = robustness2 ? DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) : DE_LENGTH_OF_ARRAY(imgLenCases);
3380 									TestGroupCase *lenCases = robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
3381 
3382 									for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
3383 									{
3384 										if (lenCases[lenNdx].count != ~0U)
3385 										{
3386 											bool bufferLen = lenCases[lenNdx].count != 0;
3387 											bool bufferDesc = descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
3388 											if (bufferLen != bufferDesc)
3389 												continue;
3390 
3391 											// Add template tests cases only for null_descriptor cases
3392 											if (tempCases[tempNdx].count)
3393 												continue;
3394 										}
3395 
3396 										if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
3397 											((lenCases[lenNdx].count % fmtSize) != 0) &&
3398 											lenCases[lenNdx].count != ~0U)
3399 										{
3400 											continue;
3401 										}
3402 
3403 										// Avoid too much duplication by excluding certain test cases
3404 										if (pipelineRobustness && robustness2 &&
3405 											(lenCases[lenNdx].count == 0 || ((lenCases[lenNdx].count & (lenCases[lenNdx].count - 1)) != 0)))
3406 										{
3407 											continue;
3408 										}
3409 
3410 										// "volatile" only applies to storage images/buffers
3411 										if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
3412 											continue;
3413 
3414 
3415 										de::MovePtr<tcu::TestCaseGroup> lenGroup(new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name, lenCases[lenNdx].name));
3416 										for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
3417 										{
3418 											de::MovePtr<tcu::TestCaseGroup> sampGroup(new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name, sampCases[sampNdx].name));
3419 
3420 											// Avoid too much duplication by excluding certain test cases
3421 											if (pipelineRobustness && sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3422 											    continue;
3423 
3424 											for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
3425 											{
3426 												if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
3427 													descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3428 													descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
3429 												{
3430 													// buffer descriptors don't have different dimensionalities. Only test "1D"
3431 													continue;
3432 												}
3433 
3434 												if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D && viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
3435 													sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3436 												{
3437 													continue;
3438 												}
3439 
3440 												// Avoid too much duplication by excluding certain test cases
3441 												if (pipelineRobustness &&
3442 													!(viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_1D || viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D || viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D_ARRAY))
3443 												{
3444 													continue;
3445 												}
3446 
3447 												de::MovePtr<tcu::TestCaseGroup> viewGroup(new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name, viewCases[viewNdx].name));
3448 												for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
3449 												{
3450 													Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
3451 													VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
3452 													VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
3453 #ifndef CTS_USES_VULKANSC
3454 													if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
3455 													{
3456 														allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
3457 														allPipelineStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV;
3458 
3459 														if (pipelineRobustness)
3460 															continue;
3461 													}
3462 #endif // CTS_USES_VULKANSC
3463 													if ((lenCases[lenNdx].count == ~0U) && pipelineRobustness)
3464 														continue;
3465 
3466 													if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
3467 														currentStage != STAGE_VERTEX)
3468 														continue;
3469 
3470 													deUint32 imageDim[3] = {5, 11, 6};
3471 													if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
3472 														viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
3473 														imageDim[1] = imageDim[0];
3474 
3475 													CaseDef c =
3476 													{
3477 														(VkFormat)fmtCases[fmtNdx].count,								// VkFormat format;
3478 														currentStage,													// Stage stage;
3479 														allShaderStages,												// VkFlags allShaderStages;
3480 														allPipelineStages,												// VkFlags allPipelineStages;
3481 														(int)descCases[descNdx].count,									// VkDescriptorType descriptorType;
3482 														(VkImageViewType)viewCases[viewNdx].count,						// VkImageViewType viewType;
3483 														(VkSampleCountFlagBits)sampCases[sampNdx].count,				// VkSampleCountFlagBits samples;
3484 														(int)lenCases[lenNdx].count,									// int bufferLen;
3485 														(bool)unrollCases[unrollNdx].count,								// bool unroll;
3486 														(bool)volCases[volNdx].count,									// bool vol;
3487 														(bool)(lenCases[lenNdx].count == ~0U),							// bool nullDescriptor
3488 														(bool)tempCases[tempNdx].count,									// bool useTemplate
3489 														(bool)fmtQualCases[fmtQualNdx].count,							// bool formatQualifier
3490 														(bool)pushCases[pushNdx].count,									// bool pushDescriptor;
3491 														(bool)robustness2,												// bool testRobustness2;
3492 														(bool)pipelineRobustness,										// bool testPipelineRobustness;
3493 														{ imageDim[0], imageDim[1], imageDim[2] },						// deUint32 imageDim[3];
3494 														(bool)(readOnlyCases[roNdx].count == 1),						// bool readOnly;
3495 													};
3496 
3497 													viewGroup->addChild(new RobustnessExtsTestCase(testCtx, stageCases[stageNdx].name, stageCases[stageNdx].name, c));
3498 												}
3499 												sampGroup->addChild(viewGroup.release());
3500 											}
3501 											lenGroup->addChild(sampGroup.release());
3502 										}
3503 										fmtQualGroup->addChild(lenGroup.release());
3504 									}
3505 									// Put storage_buffer tests in separate readonly vs readwrite groups. Other types
3506 									// go directly into descGroup
3507 									if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3508 										descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3509 										rwGroup->addChild(fmtQualGroup.release());
3510 									} else {
3511 										descGroup->addChild(fmtQualGroup.release());
3512 									}
3513 								}
3514 								if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3515 									descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3516 									descGroup->addChild(rwGroup.release());
3517 								}
3518 							}
3519 							volGroup->addChild(descGroup.release());
3520 						}
3521 						unrollGroup->addChild(volGroup.release());
3522 					}
3523 					fmtGroup->addChild(unrollGroup.release());
3524 				}
3525 				tempGroup->addChild(fmtGroup.release());
3526 			}
3527 			pushGroup->addChild(tempGroup.release());
3528 		}
3529 		group->addChild(pushGroup.release());
3530 	}
3531 }
3532 
createRobustness2Tests(tcu::TestCaseGroup * group)3533 static void createRobustness2Tests (tcu::TestCaseGroup* group)
3534 {
3535 	createTests(group, /*robustness2=*/true, /*pipelineRobustness=*/false);
3536 }
3537 
createImageRobustnessTests(tcu::TestCaseGroup * group)3538 static void createImageRobustnessTests (tcu::TestCaseGroup* group)
3539 {
3540 	createTests(group, /*robustness2=*/false, /*pipelineRobustness=*/false);
3541 }
3542 
3543 #ifndef CTS_USES_VULKANSC
createPipelineRobustnessTests(tcu::TestCaseGroup * group)3544 static void createPipelineRobustnessTests (tcu::TestCaseGroup* group)
3545 {
3546 	tcu::TestContext& testCtx = group->getTestContext();
3547 
3548 	tcu::TestCaseGroup *robustness2Group = new tcu::TestCaseGroup(testCtx, "robustness2", "robustness2");
3549 
3550 	createTests(robustness2Group, /*robustness2=*/true, /*pipelineRobustness=*/true);
3551 
3552 	group->addChild(robustness2Group);
3553 
3554 	tcu::TestCaseGroup *imageRobustness2Group = new tcu::TestCaseGroup(testCtx, "image_robustness", "image_robustness");
3555 
3556 	createTests(imageRobustness2Group, /*robustness2=*/false, /*pipelineRobustness=*/true);
3557 
3558 	group->addChild(imageRobustness2Group);
3559 }
3560 #endif
3561 
cleanupGroup(tcu::TestCaseGroup * group)3562 static void cleanupGroup (tcu::TestCaseGroup* group)
3563 {
3564 	DE_UNREF(group);
3565 	// Destroy singleton objects.
3566 	Robustness2Int64AtomicsSingleton::destroy();
3567 	ImageRobustnessInt64AtomicsSingleton::destroy();
3568 	ImageRobustnessSingleton::destroy();
3569 	Robustness2Singleton::destroy();
3570 	PipelineRobustnessImageRobustnessSingleton::destroy();
3571 	PipelineRobustnessRobustness2Singleton::destroy();
3572 	PipelineRobustnessImageRobustnessInt64AtomicsSingleton::destroy();
3573 	PipelineRobustnessRobustness2Int64AtomicsSingleton::destroy();
3574 	Robustness2Int64AtomicsScalarSingleton::destroy();
3575 	ImageRobustnessInt64AtomicsScalarSingleton::destroy();
3576 	ImageRobustnessScalarSingleton::destroy();
3577 	Robustness2ScalarSingleton::destroy();
3578 	PipelineRobustnessImageRobustnessScalarSingleton::destroy();
3579 	PipelineRobustnessRobustness2ScalarSingleton::destroy();
3580 	PipelineRobustnessImageRobustnessInt64AtomicsScalarSingleton::destroy();
3581 	PipelineRobustnessRobustness2Int64AtomicsScalarSingleton::destroy();
3582 }
3583 
createRobustness2Tests(tcu::TestContext & testCtx)3584 tcu::TestCaseGroup* createRobustness2Tests (tcu::TestContext& testCtx)
3585 {
3586 	return createTestGroup(testCtx, "robustness2", "VK_EXT_robustness2 tests",
3587 							createRobustness2Tests, cleanupGroup);
3588 }
3589 
createImageRobustnessTests(tcu::TestContext & testCtx)3590 tcu::TestCaseGroup* createImageRobustnessTests (tcu::TestContext& testCtx)
3591 {
3592 	return createTestGroup(testCtx, "image_robustness", "VK_EXT_image_robustness tests",
3593 							createImageRobustnessTests, cleanupGroup);
3594 }
3595 
3596 #ifndef CTS_USES_VULKANSC
createPipelineRobustnessTests(tcu::TestContext & testCtx)3597 tcu::TestCaseGroup* createPipelineRobustnessTests (tcu::TestContext& testCtx)
3598 {
3599 	return createTestGroup(testCtx, "pipeline_robustness", "VK_EXT_pipeline_robustness tests",
3600 							createPipelineRobustnessTests, cleanupGroup);
3601 }
3602 #endif
3603 
3604 }	// robustness
3605 }	// vkt
3606