• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 The Khronos Group Inc.
6  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7  * Copyright (c) 2016 The Android Open Source Project
8  *
9  * Licensed under the Apache License, Version 2.0 (the "License");
10  * you may not use this file except in compliance with the License.
11  * You may obtain a copy of the License at
12  *
13  *      http://www.apache.org/licenses/LICENSE-2.0
14  *
15  * Unless required by applicable law or agreed to in writing, software
16  * distributed under the License is distributed on an "AS IS" BASIS,
17  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18  * See the License for the specific language governing permissions and
19  * limitations under the License.
20  *
21  *//*!
22  * \file
23  * \brief Opaque type (sampler, buffer, atomic counter, ...) indexing tests.
24  *//*--------------------------------------------------------------------*/
25 
26 #include "vktOpaqueTypeIndexingTests.hpp"
27 
28 #include "vkRefUtil.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkTypeUtil.hpp"
32 #include "vkQueryUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 
35 #include "tcuTexture.hpp"
36 #include "tcuTestLog.hpp"
37 #include "tcuVectorUtil.hpp"
38 #include "tcuTextureUtil.hpp"
39 
40 #include "deStringUtil.hpp"
41 #include "deSharedPtr.hpp"
42 #include "deRandom.hpp"
43 #include "deSTLUtil.hpp"
44 
45 #include "vktShaderExecutor.hpp"
46 
47 #include <sstream>
48 
49 namespace vkt
50 {
51 namespace shaderexecutor
52 {
53 
54 namespace
55 {
56 
57 using de::UniquePtr;
58 using de::MovePtr;
59 using de::SharedPtr;
60 using std::vector;
61 
62 using namespace vk;
63 
64 typedef SharedPtr<Unique<VkSampler> > VkSamplerSp;
65 
66 // Buffer helper
67 
68 class Buffer
69 {
70 public:
71 								Buffer				(Context& context, VkBufferUsageFlags usage, size_t size);
72 
getBuffer(void) const73 	VkBuffer					getBuffer			(void) const { return *m_buffer;					}
getHostPtr(void) const74 	void*						getHostPtr			(void) const { return m_allocation->getHostPtr();	}
75 	void						flush				(void);
76 	void						invalidate			(void);
77 
78 private:
79 	const DeviceInterface&		m_vkd;
80 	const VkDevice				m_device;
81 	const Unique<VkBuffer>		m_buffer;
82 	const UniquePtr<Allocation>	m_allocation;
83 };
84 
85 typedef de::SharedPtr<Buffer> BufferSp;
86 
createBuffer(const DeviceInterface & vkd,VkDevice device,VkDeviceSize size,VkBufferUsageFlags usageFlags)87 Move<VkBuffer> createBuffer (const DeviceInterface& vkd, VkDevice device, VkDeviceSize size, VkBufferUsageFlags usageFlags)
88 {
89 	const VkBufferCreateInfo	createInfo		=
90 	{
91 		VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
92 		DE_NULL,
93 		(VkBufferCreateFlags)0,
94 		size,
95 		usageFlags,
96 		VK_SHARING_MODE_EXCLUSIVE,
97 		0u,
98 		DE_NULL
99 	};
100 	return createBuffer(vkd, device, &createInfo);
101 }
102 
allocateAndBindMemory(const DeviceInterface & vkd,VkDevice device,Allocator & allocator,VkBuffer buffer)103 MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkBuffer buffer)
104 {
105 	MovePtr<Allocation>		alloc	(allocator.allocate(getBufferMemoryRequirements(vkd, device, buffer), MemoryRequirement::HostVisible));
106 
107 	VK_CHECK(vkd.bindBufferMemory(device, buffer, alloc->getMemory(), alloc->getOffset()));
108 
109 	return alloc;
110 }
111 
Buffer(Context & context,VkBufferUsageFlags usage,size_t size)112 Buffer::Buffer (Context& context, VkBufferUsageFlags usage, size_t size)
113 	: m_vkd			(context.getDeviceInterface())
114 	, m_device		(context.getDevice())
115 	, m_buffer		(createBuffer			(context.getDeviceInterface(),
116 											 context.getDevice(),
117 											 (VkDeviceSize)size,
118 											 usage))
119 	, m_allocation	(allocateAndBindMemory	(context.getDeviceInterface(),
120 											 context.getDevice(),
121 											 context.getDefaultAllocator(),
122 											 *m_buffer))
123 {
124 }
125 
flush(void)126 void Buffer::flush (void)
127 {
128 	flushMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
129 }
130 
invalidate(void)131 void Buffer::invalidate (void)
132 {
133 	invalidateMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
134 }
135 
createUniformIndexBuffer(Context & context,int numIndices,const int * indices)136 MovePtr<Buffer> createUniformIndexBuffer (Context& context, int numIndices, const int* indices)
137 {
138 	MovePtr<Buffer>		buffer	(new Buffer(context, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, sizeof(int)*numIndices));
139 	int* const			bufPtr	= (int*)buffer->getHostPtr();
140 
141 	for (int ndx = 0; ndx < numIndices; ++ndx)
142 		bufPtr[ndx] = indices[ndx];
143 
144 	buffer->flush();
145 
146 	return buffer;
147 }
148 
149 // Tests
150 
151 enum IndexExprType
152 {
153 	INDEX_EXPR_TYPE_CONST_LITERAL	= 0,
154 	INDEX_EXPR_TYPE_CONST_EXPRESSION,
155 	INDEX_EXPR_TYPE_UNIFORM,
156 	INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,
157 
158 	INDEX_EXPR_TYPE_LAST
159 };
160 
161 enum TextureType
162 {
163 	TEXTURE_TYPE_1D = 0,
164 	TEXTURE_TYPE_1D_ARRAY,
165 	TEXTURE_TYPE_2D,
166 	TEXTURE_TYPE_CUBE,
167 	TEXTURE_TYPE_2D_ARRAY,
168 	TEXTURE_TYPE_3D,
169 
170 	TEXTURE_TYPE_LAST
171 };
172 
173 class OpaqueTypeIndexingCase : public TestCase
174 {
175 public:
176 										OpaqueTypeIndexingCase		(tcu::TestContext&			testCtx,
177 																	 const char*				name,
178 																	 const char*				description,
179 																	 const glu::ShaderType		shaderType,
180 																	 const IndexExprType		indexExprType);
181 	virtual								~OpaqueTypeIndexingCase		(void);
182 
initPrograms(vk::SourceCollections & programCollection) const183 	virtual void						initPrograms				(vk::SourceCollections& programCollection) const
184 										{
185 											generateSources(m_shaderType, m_shaderSpec, programCollection);
186 										}
187 
188 	virtual	void						checkSupport				(Context& context) const;
189 
190 protected:
191 	const char*							m_name;
192 	const glu::ShaderType				m_shaderType;
193 	const IndexExprType					m_indexExprType;
194 	ShaderSpec							m_shaderSpec;
195 };
196 
OpaqueTypeIndexingCase(tcu::TestContext & testCtx,const char * name,const char * description,const glu::ShaderType shaderType,const IndexExprType indexExprType)197 OpaqueTypeIndexingCase::OpaqueTypeIndexingCase (tcu::TestContext&			testCtx,
198 												const char*					name,
199 												const char*					description,
200 												const glu::ShaderType		shaderType,
201 												const IndexExprType			indexExprType)
202 	: TestCase			(testCtx, name, description)
203 	, m_name			(name)
204 	, m_shaderType		(shaderType)
205 	, m_indexExprType	(indexExprType)
206 {
207 }
208 
~OpaqueTypeIndexingCase(void)209 OpaqueTypeIndexingCase::~OpaqueTypeIndexingCase (void)
210 {
211 }
212 
checkSupport(Context & context) const213 void OpaqueTypeIndexingCase::checkSupport (Context& context) const
214 {
215 	checkSupportShader(context, m_shaderType);
216 }
217 
218 class OpaqueTypeIndexingTestInstance : public TestInstance
219 {
220 public:
221 										OpaqueTypeIndexingTestInstance		(Context&					context,
222 																			 const glu::ShaderType		shaderType,
223 																			 const ShaderSpec&			shaderSpec,
224 																			 const char*				name,
225 																			 const IndexExprType		indexExprType);
226 	virtual								~OpaqueTypeIndexingTestInstance		(void);
227 
228 	virtual tcu::TestStatus				iterate								(void) = 0;
229 
230 protected:
231 	void								checkSupported						(const VkDescriptorType descriptorType);
232 
233 protected:
234 	tcu::TestContext&					m_testCtx;
235 	const glu::ShaderType				m_shaderType;
236 	const ShaderSpec&					m_shaderSpec;
237 	const char*							m_name;
238 	const IndexExprType					m_indexExprType;
239 };
240 
OpaqueTypeIndexingTestInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,const IndexExprType indexExprType)241 OpaqueTypeIndexingTestInstance::OpaqueTypeIndexingTestInstance (Context&					context,
242 																const glu::ShaderType		shaderType,
243 																const ShaderSpec&			shaderSpec,
244 																const char*					name,
245 																const IndexExprType			indexExprType)
246 	: TestInstance		(context)
247 	, m_testCtx			(context.getTestContext())
248 	, m_shaderType		(shaderType)
249 	, m_shaderSpec		(shaderSpec)
250 	, m_name			(name)
251 	, m_indexExprType	(indexExprType)
252 {
253 }
254 
~OpaqueTypeIndexingTestInstance(void)255 OpaqueTypeIndexingTestInstance::~OpaqueTypeIndexingTestInstance (void)
256 {
257 }
258 
checkSupported(const VkDescriptorType descriptorType)259 void OpaqueTypeIndexingTestInstance::checkSupported (const VkDescriptorType descriptorType)
260 {
261 	const VkPhysicalDeviceFeatures& deviceFeatures = m_context.getDeviceFeatures();
262 
263 	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
264 	{
265 		switch (descriptorType)
266 		{
267 			case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
268 				if (!deviceFeatures.shaderSampledImageArrayDynamicIndexing)
269 					TCU_THROW(NotSupportedError, "Dynamic indexing of sampler arrays is not supported");
270 				break;
271 
272 			case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
273 				if (!deviceFeatures.shaderUniformBufferArrayDynamicIndexing)
274 					TCU_THROW(NotSupportedError, "Dynamic indexing of uniform buffer arrays is not supported");
275 				break;
276 
277 			case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
278 				if (!deviceFeatures.shaderStorageBufferArrayDynamicIndexing)
279 					TCU_THROW(NotSupportedError, "Dynamic indexing of storage buffer arrays is not supported");
280 				break;
281 
282 			default:
283 				break;
284 		}
285 	}
286 }
287 
declareUniformIndexVars(std::ostream & str,deUint32 bindingLocation,const char * varPrefix,int numVars)288 static void declareUniformIndexVars (std::ostream& str, deUint32 bindingLocation, const char* varPrefix, int numVars)
289 {
290 	str << "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = " << bindingLocation << ", std140) uniform Indices\n{\n";
291 
292 	for (int varNdx = 0; varNdx < numVars; varNdx++)
293 		str << "\thighp int " << varPrefix << varNdx << ";\n";
294 
295 	str << "};\n";
296 }
297 
getTextureType(glu::DataType samplerType)298 static TextureType getTextureType (glu::DataType samplerType)
299 {
300 	switch (samplerType)
301 	{
302 		case glu::TYPE_SAMPLER_1D:
303 		case glu::TYPE_INT_SAMPLER_1D:
304 		case glu::TYPE_UINT_SAMPLER_1D:
305 		case glu::TYPE_SAMPLER_1D_SHADOW:
306 			return TEXTURE_TYPE_1D;
307 
308 		case glu::TYPE_SAMPLER_1D_ARRAY:
309 		case glu::TYPE_INT_SAMPLER_1D_ARRAY:
310 		case glu::TYPE_UINT_SAMPLER_1D_ARRAY:
311 		case glu::TYPE_SAMPLER_1D_ARRAY_SHADOW:
312 			return TEXTURE_TYPE_1D_ARRAY;
313 
314 		case glu::TYPE_SAMPLER_2D:
315 		case glu::TYPE_INT_SAMPLER_2D:
316 		case glu::TYPE_UINT_SAMPLER_2D:
317 		case glu::TYPE_SAMPLER_2D_SHADOW:
318 			return TEXTURE_TYPE_2D;
319 
320 		case glu::TYPE_SAMPLER_CUBE:
321 		case glu::TYPE_INT_SAMPLER_CUBE:
322 		case glu::TYPE_UINT_SAMPLER_CUBE:
323 		case glu::TYPE_SAMPLER_CUBE_SHADOW:
324 			return TEXTURE_TYPE_CUBE;
325 
326 		case glu::TYPE_SAMPLER_2D_ARRAY:
327 		case glu::TYPE_INT_SAMPLER_2D_ARRAY:
328 		case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
329 		case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
330 			return TEXTURE_TYPE_2D_ARRAY;
331 
332 		case glu::TYPE_SAMPLER_3D:
333 		case glu::TYPE_INT_SAMPLER_3D:
334 		case glu::TYPE_UINT_SAMPLER_3D:
335 			return TEXTURE_TYPE_3D;
336 
337 		default:
338 			throw tcu::InternalError("Invalid sampler type");
339 	}
340 }
341 
isShadowSampler(glu::DataType samplerType)342 static bool isShadowSampler (glu::DataType samplerType)
343 {
344 	return	samplerType == glu::TYPE_SAMPLER_1D_SHADOW			||
345 			samplerType == glu::TYPE_SAMPLER_1D_ARRAY_SHADOW	||
346 			samplerType == glu::TYPE_SAMPLER_2D_SHADOW			||
347 			samplerType == glu::TYPE_SAMPLER_2D_ARRAY_SHADOW	||
348 			samplerType == glu::TYPE_SAMPLER_CUBE_SHADOW;
349 }
350 
getSamplerOutputType(glu::DataType samplerType)351 static glu::DataType getSamplerOutputType (glu::DataType samplerType)
352 {
353 	switch (samplerType)
354 	{
355 		case glu::TYPE_SAMPLER_1D:
356 		case glu::TYPE_SAMPLER_1D_ARRAY:
357 		case glu::TYPE_SAMPLER_2D:
358 		case glu::TYPE_SAMPLER_CUBE:
359 		case glu::TYPE_SAMPLER_2D_ARRAY:
360 		case glu::TYPE_SAMPLER_3D:
361 			return glu::TYPE_FLOAT_VEC4;
362 
363 		case glu::TYPE_SAMPLER_1D_SHADOW:
364 		case glu::TYPE_SAMPLER_1D_ARRAY_SHADOW:
365 		case glu::TYPE_SAMPLER_2D_SHADOW:
366 		case glu::TYPE_SAMPLER_CUBE_SHADOW:
367 		case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
368 			return glu::TYPE_FLOAT;
369 
370 		case glu::TYPE_INT_SAMPLER_1D:
371 		case glu::TYPE_INT_SAMPLER_1D_ARRAY:
372 		case glu::TYPE_INT_SAMPLER_2D:
373 		case glu::TYPE_INT_SAMPLER_CUBE:
374 		case glu::TYPE_INT_SAMPLER_2D_ARRAY:
375 		case glu::TYPE_INT_SAMPLER_3D:
376 			return glu::TYPE_INT_VEC4;
377 
378 		case glu::TYPE_UINT_SAMPLER_1D:
379 		case glu::TYPE_UINT_SAMPLER_1D_ARRAY:
380 		case glu::TYPE_UINT_SAMPLER_2D:
381 		case glu::TYPE_UINT_SAMPLER_CUBE:
382 		case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
383 		case glu::TYPE_UINT_SAMPLER_3D:
384 			return glu::TYPE_UINT_VEC4;
385 
386 		default:
387 			throw tcu::InternalError("Invalid sampler type");
388 	}
389 }
390 
getSamplerTextureFormat(glu::DataType samplerType)391 static tcu::TextureFormat getSamplerTextureFormat (glu::DataType samplerType)
392 {
393 	const glu::DataType		outType			= getSamplerOutputType(samplerType);
394 	const glu::DataType		outScalarType	= glu::getDataTypeScalarType(outType);
395 
396 	switch (outScalarType)
397 	{
398 		case glu::TYPE_FLOAT:
399 			if (isShadowSampler(samplerType))
400 				return tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
401 			else
402 				return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
403 
404 		case glu::TYPE_INT:		return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT8);
405 		case glu::TYPE_UINT:	return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8);
406 
407 		default:
408 			throw tcu::InternalError("Invalid sampler type");
409 	}
410 }
411 
getSamplerCoordType(glu::DataType samplerType)412 static glu::DataType getSamplerCoordType (glu::DataType samplerType)
413 {
414 	const TextureType	texType		= getTextureType(samplerType);
415 	int					numCoords	= 0;
416 
417 	switch (texType)
418 	{
419 		case TEXTURE_TYPE_1D:		numCoords = 1;	break;
420 		case TEXTURE_TYPE_1D_ARRAY:	numCoords = 2;	break;
421 		case TEXTURE_TYPE_2D:		numCoords = 2;	break;
422 		case TEXTURE_TYPE_2D_ARRAY:	numCoords = 3;	break;
423 		case TEXTURE_TYPE_CUBE:		numCoords = 3;	break;
424 		case TEXTURE_TYPE_3D:		numCoords = 3;	break;
425 		default:
426 			DE_ASSERT(false);
427 	}
428 
429 	if (samplerType == glu::TYPE_SAMPLER_1D_SHADOW)
430 		numCoords = 3;
431 	else if (isShadowSampler(samplerType))
432 		numCoords += 1;
433 
434 	DE_ASSERT(de::inRange(numCoords, 1, 4));
435 
436 	return numCoords == 1 ? glu::TYPE_FLOAT : glu::getDataTypeFloatVec(numCoords);
437 }
438 
fillTextureData(const tcu::PixelBufferAccess & access,de::Random & rnd)439 static void fillTextureData (const tcu::PixelBufferAccess& access, de::Random& rnd)
440 {
441 	DE_ASSERT(access.getHeight() == 1 && access.getDepth() == 1);
442 
443 	if (access.getFormat().order == tcu::TextureFormat::D)
444 	{
445 		// \note Texture uses odd values, lookup even values to avoid precision issues.
446 		const float values[] = { 0.1f, 0.3f, 0.5f, 0.7f, 0.9f };
447 
448 		for (int ndx = 0; ndx < access.getWidth(); ndx++)
449 			access.setPixDepth(rnd.choose<float>(DE_ARRAY_BEGIN(values), DE_ARRAY_END(values)), ndx, 0);
450 	}
451 	else
452 	{
453 		TCU_CHECK_INTERNAL(access.getFormat().order == tcu::TextureFormat::RGBA && access.getFormat().getPixelSize() == 4);
454 
455 		for (int ndx = 0; ndx < access.getWidth(); ndx++)
456 			*((deUint32*)access.getDataPtr() + ndx) = rnd.getUint32();
457 	}
458 }
459 
getVkImageType(TextureType texType)460 static vk::VkImageType getVkImageType (TextureType texType)
461 {
462 	switch (texType)
463 	{
464 		case TEXTURE_TYPE_1D:
465 		case TEXTURE_TYPE_1D_ARRAY:		return vk::VK_IMAGE_TYPE_1D;
466 		case TEXTURE_TYPE_2D:
467 		case TEXTURE_TYPE_2D_ARRAY:		return vk::VK_IMAGE_TYPE_2D;
468 		case TEXTURE_TYPE_CUBE:			return vk::VK_IMAGE_TYPE_2D;
469 		case TEXTURE_TYPE_3D:			return vk::VK_IMAGE_TYPE_3D;
470 		default:
471 			DE_FATAL("Impossible");
472 			return (vk::VkImageType)0;
473 	}
474 }
475 
getVkImageViewType(TextureType texType)476 static vk::VkImageViewType getVkImageViewType (TextureType texType)
477 {
478 	switch (texType)
479 	{
480 		case TEXTURE_TYPE_1D:			return vk::VK_IMAGE_VIEW_TYPE_1D;
481 		case TEXTURE_TYPE_1D_ARRAY:		return vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY;
482 		case TEXTURE_TYPE_2D:			return vk::VK_IMAGE_VIEW_TYPE_2D;
483 		case TEXTURE_TYPE_2D_ARRAY:		return vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY;
484 		case TEXTURE_TYPE_CUBE:			return vk::VK_IMAGE_VIEW_TYPE_CUBE;
485 		case TEXTURE_TYPE_3D:			return vk::VK_IMAGE_VIEW_TYPE_3D;
486 		default:
487 			DE_FATAL("Impossible");
488 			return (vk::VkImageViewType)0;
489 	}
490 }
491 
492 //! Test image with 1-pixel dimensions and no mipmaps
493 class TestImage
494 {
495 public:
496 								TestImage		(Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue);
497 
getImageView(void) const498 	VkImageView					getImageView	(void) const { return *m_imageView; }
499 
500 private:
501 	const Unique<VkImage>		m_image;
502 	const UniquePtr<Allocation>	m_allocation;
503 	const Unique<VkImageView>	m_imageView;
504 };
505 
createTestImage(const DeviceInterface & vkd,VkDevice device,TextureType texType,tcu::TextureFormat format)506 Move<VkImage> createTestImage (const DeviceInterface& vkd, VkDevice device, TextureType texType, tcu::TextureFormat format)
507 {
508 	const VkImageCreateInfo		createInfo		=
509 	{
510 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
511 		DE_NULL,
512 		(texType == TEXTURE_TYPE_CUBE ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : (VkImageCreateFlags)0),
513 		getVkImageType(texType),
514 		mapTextureFormat(format),
515 		makeExtent3D(1, 1, 1),
516 		1u,
517 		(texType == TEXTURE_TYPE_CUBE) ? 6u : 1u,
518 		VK_SAMPLE_COUNT_1_BIT,
519 		VK_IMAGE_TILING_OPTIMAL,
520 		VK_IMAGE_USAGE_SAMPLED_BIT|VK_IMAGE_USAGE_TRANSFER_DST_BIT,
521 		VK_SHARING_MODE_EXCLUSIVE,
522 		0u,
523 		DE_NULL,
524 		VK_IMAGE_LAYOUT_UNDEFINED
525 	};
526 
527 	return createImage(vkd, device, &createInfo);
528 }
529 
allocateAndBindMemory(const DeviceInterface & vkd,VkDevice device,Allocator & allocator,VkImage image)530 de::MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkImage image)
531 {
532 	de::MovePtr<Allocation>		alloc	= allocator.allocate(getImageMemoryRequirements(vkd, device, image), MemoryRequirement::Any);
533 
534 	VK_CHECK(vkd.bindImageMemory(device, image, alloc->getMemory(), alloc->getOffset()));
535 
536 	return alloc;
537 }
538 
createTestImageView(const DeviceInterface & vkd,VkDevice device,VkImage image,TextureType texType,tcu::TextureFormat format)539 Move<VkImageView> createTestImageView (const DeviceInterface& vkd, VkDevice device, VkImage image, TextureType texType, tcu::TextureFormat format)
540 {
541 	const bool					isDepthImage	= format.order == tcu::TextureFormat::D;
542 	const VkImageViewCreateInfo	createInfo		=
543 	{
544 		VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
545 		DE_NULL,
546 		(VkImageViewCreateFlags)0,
547 		image,
548 		getVkImageViewType(texType),
549 		mapTextureFormat(format),
550 		{
551 			VK_COMPONENT_SWIZZLE_IDENTITY,
552 			VK_COMPONENT_SWIZZLE_IDENTITY,
553 			VK_COMPONENT_SWIZZLE_IDENTITY,
554 			VK_COMPONENT_SWIZZLE_IDENTITY,
555 		},
556 		{
557 			(VkImageAspectFlags)(isDepthImage ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT),
558 			0u,
559 			1u,
560 			0u,
561 			(texType == TEXTURE_TYPE_CUBE ? 6u : 1u)
562 		}
563 	};
564 
565 	return createImageView(vkd, device, &createInfo);
566 }
567 
TestImage(Context & context,TextureType texType,tcu::TextureFormat format,const void * colorValue)568 TestImage::TestImage (Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue)
569 	: m_image		(createTestImage		(context.getDeviceInterface(), context.getDevice(), texType, format))
570 	, m_allocation	(allocateAndBindMemory	(context.getDeviceInterface(), context.getDevice(), context.getDefaultAllocator(), *m_image))
571 	, m_imageView	(createTestImageView	(context.getDeviceInterface(), context.getDevice(), *m_image, texType, format))
572 {
573 	const DeviceInterface&		vkd					= context.getDeviceInterface();
574 	const VkDevice				device				= context.getDevice();
575 
576 	const size_t				pixelSize			= (size_t)format.getPixelSize();
577 	const deUint32				numLayers			= (texType == TEXTURE_TYPE_CUBE) ? 6u : 1u;
578 	const size_t				numReplicas			= (size_t)numLayers;
579 	const size_t				stagingBufferSize	= pixelSize*numReplicas;
580 
581 	const VkBufferCreateInfo	stagingBufferInfo	=
582 	{
583 		VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
584 		DE_NULL,
585 		(VkBufferCreateFlags)0u,
586 		(VkDeviceSize)stagingBufferSize,
587 		(VkBufferCreateFlags)VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
588 		VK_SHARING_MODE_EXCLUSIVE,
589 		0u,
590 		DE_NULL,
591 	};
592 	const Unique<VkBuffer>		stagingBuffer		(createBuffer(vkd, device, &stagingBufferInfo));
593 	const UniquePtr<Allocation>	alloc				(context.getDefaultAllocator().allocate(getBufferMemoryRequirements(vkd, device, *stagingBuffer), MemoryRequirement::HostVisible));
594 
595 	VK_CHECK(vkd.bindBufferMemory(device, *stagingBuffer, alloc->getMemory(), alloc->getOffset()));
596 
597 	for (size_t ndx = 0; ndx < numReplicas; ++ndx)
598 		deMemcpy((deUint8*)alloc->getHostPtr() + ndx*pixelSize, colorValue, pixelSize);
599 
600 	flushMappedMemoryRange(vkd, device, alloc->getMemory(), alloc->getOffset(), VK_WHOLE_SIZE);
601 
602 	{
603 		const VkImageAspectFlags		imageAspect		= (VkImageAspectFlags)(format.order == tcu::TextureFormat::D ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT);
604 		const VkBufferImageCopy			copyInfo		=
605 		{
606 			0u,
607 			1u,
608 			1u,
609 			{
610 				imageAspect,
611 				0u,
612 				0u,
613 				numLayers
614 			},
615 			{ 0u, 0u, 0u },
616 			{ 1u, 1u, 1u }
617 		};
618 
619 		copyBufferToImage(vkd, device, context.getUniversalQueue(), context.getUniversalQueueFamilyIndex(), *stagingBuffer, stagingBufferSize, vector<VkBufferImageCopy>(1, copyInfo), DE_NULL, imageAspect, 1u, numLayers, *m_image);
620 	}
621 }
622 
623 typedef SharedPtr<TestImage> TestImageSp;
624 
625 // SamplerIndexingCaseInstance
626 
627 class SamplerIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
628 {
629 public:
630 	enum
631 	{
632 		NUM_INVOCATIONS		= 64,
633 		NUM_SAMPLERS		= 8,
634 		NUM_LOOKUPS			= 4
635 	};
636 
637 								SamplerIndexingCaseInstance		(Context&					context,
638 																 const glu::ShaderType		shaderType,
639 																 const ShaderSpec&			shaderSpec,
640 																 const char*				name,
641 																 glu::DataType				samplerType,
642 																 const IndexExprType		indexExprType,
643 																 const std::vector<int>&	lookupIndices);
644 	virtual						~SamplerIndexingCaseInstance	(void);
645 
646 	virtual tcu::TestStatus		iterate							(void);
647 
648 protected:
649 	const glu::DataType			m_samplerType;
650 	const std::vector<int>		m_lookupIndices;
651 };
652 
SamplerIndexingCaseInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,glu::DataType samplerType,const IndexExprType indexExprType,const std::vector<int> & lookupIndices)653 SamplerIndexingCaseInstance::SamplerIndexingCaseInstance (Context&						context,
654 														  const glu::ShaderType			shaderType,
655 														  const ShaderSpec&				shaderSpec,
656 														  const char*					name,
657 														  glu::DataType					samplerType,
658 														  const IndexExprType			indexExprType,
659 														  const std::vector<int>&		lookupIndices)
660 	: OpaqueTypeIndexingTestInstance	(context, shaderType, shaderSpec, name, indexExprType)
661 	, m_samplerType						(samplerType)
662 	, m_lookupIndices					(lookupIndices)
663 {
664 }
665 
~SamplerIndexingCaseInstance(void)666 SamplerIndexingCaseInstance::~SamplerIndexingCaseInstance (void)
667 {
668 }
669 
isIntegerFormat(const tcu::TextureFormat & format)670 bool isIntegerFormat (const tcu::TextureFormat& format)
671 {
672 	const tcu::TextureChannelClass	chnClass	= tcu::getTextureChannelClass(format.type);
673 
674 	return chnClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ||
675 		   chnClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER;
676 }
677 
iterate(void)678 tcu::TestStatus SamplerIndexingCaseInstance::iterate (void)
679 {
680 	const int						numInvocations		= SamplerIndexingCaseInstance::NUM_INVOCATIONS;
681 	const int						numSamplers			= SamplerIndexingCaseInstance::NUM_SAMPLERS;
682 	const int						numLookups			= SamplerIndexingCaseInstance::NUM_LOOKUPS;
683 	const glu::DataType				coordType			= getSamplerCoordType(m_samplerType);
684 	const glu::DataType				outputType			= getSamplerOutputType(m_samplerType);
685 	const tcu::TextureFormat		texFormat			= getSamplerTextureFormat(m_samplerType);
686 	const int						outLookupStride		= numInvocations*getDataTypeScalarSize(outputType);
687 	vector<float>					coords;
688 	vector<deUint32>				outData;
689 	vector<deUint8>					texData				(numSamplers * texFormat.getPixelSize());
690 	const tcu::PixelBufferAccess	refTexAccess		(texFormat, numSamplers, 1, 1, &texData[0]);
691 	de::Random						rnd					(deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
692 	const TextureType				texType				= getTextureType(m_samplerType);
693 	const tcu::Sampler::FilterMode	filterMode			= (isShadowSampler(m_samplerType) || isIntegerFormat(texFormat)) ? tcu::Sampler::NEAREST : tcu::Sampler::LINEAR;
694 
695 	// The shadow sampler with unnormalized coordinates is only used with the reference texture. Actual samplers in shaders use normalized coords.
696 	const tcu::Sampler				refSampler			= isShadowSampler(m_samplerType)
697 																? tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
698 																				filterMode, filterMode, 0.0f, false /* non-normalized */,
699 																				tcu::Sampler::COMPAREMODE_LESS, 0, tcu::Vec4(0.0f), true)
700 																: tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
701 																				filterMode, filterMode, 0.0f, true,
702 																				tcu::Sampler::COMPAREMODE_NONE, 0, tcu::Vec4(0.0f), true);
703 
704 	const DeviceInterface&			vkd					= m_context.getDeviceInterface();
705 	const VkDevice					device				= m_context.getDevice();
706 	vector<TestImageSp>				images;
707 	vector<VkSamplerSp>				samplers;
708 	MovePtr<Buffer>					indexBuffer;
709 	Move<VkDescriptorSetLayout>		extraResourcesLayout;
710 	Move<VkDescriptorPool>			extraResourcesSetPool;
711 	Move<VkDescriptorSet>			extraResourcesSet;
712 
713 	checkSupported(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
714 
715 	coords.resize(numInvocations * getDataTypeScalarSize(coordType));
716 
717 	if (texType == TEXTURE_TYPE_CUBE)
718 	{
719 		if (isShadowSampler(m_samplerType))
720 		{
721 			for (size_t i = 0; i < coords.size() / 4; i++)
722 			{
723 				coords[4 * i] = 1.0f;
724 				coords[4 * i + 1] = coords[4 * i + 2] = coords[4 * i + 3] = 0.0f;
725 			}
726 		}
727 		else
728 		{
729 			for (size_t i = 0; i < coords.size() / 3; i++)
730 			{
731 				coords[3 * i] = 1.0f;
732 				coords[3 * i + 1] = coords[3 * i + 2] = 0.0f;
733 			}
734 		}
735 	}
736 
737 	if (isShadowSampler(m_samplerType))
738 	{
739 		// Use different comparison value per invocation.
740 		// \note Texture uses odd values, comparison even values.
741 		const int	numCoordComps	= getDataTypeScalarSize(coordType);
742 		const float	cmpValues[]		= { 0.0f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f };
743 
744 		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
745 			coords[invocationNdx*numCoordComps + (numCoordComps-1)] = rnd.choose<float>(DE_ARRAY_BEGIN(cmpValues), DE_ARRAY_END(cmpValues));
746 	}
747 
748 	fillTextureData(refTexAccess, rnd);
749 
750 	outData.resize(numLookups*outLookupStride);
751 
752 	for (int ndx = 0; ndx < numSamplers; ++ndx)
753 	{
754 		images.push_back(TestImageSp(new TestImage(m_context, texType, texFormat, &texData[ndx * texFormat.getPixelSize()])));
755 
756 		{
757 			tcu::Sampler	samplerCopy	(refSampler);
758 			samplerCopy.normalizedCoords = true;
759 
760 			{
761 				const VkSamplerCreateInfo	samplerParams	= mapSampler(samplerCopy, texFormat);
762 				samplers.push_back(VkSamplerSp(new Unique<VkSampler>(createSampler(vkd, device, &samplerParams))));
763 			}
764 		}
765 	}
766 
767 	if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
768 		indexBuffer = createUniformIndexBuffer(m_context, numLookups, &m_lookupIndices[0]);
769 
770 	{
771 		const VkDescriptorSetLayoutBinding		bindings[]	=
772 		{
773 			{ 0u,						VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,	(deUint32)numSamplers,		VK_SHADER_STAGE_ALL,	DE_NULL		},
774 			{ (deUint32)numSamplers,	VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,			1u,							VK_SHADER_STAGE_ALL,	DE_NULL		}
775 		};
776 		const VkDescriptorSetLayoutCreateInfo	layoutInfo	=
777 		{
778 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
779 			DE_NULL,
780 			(VkDescriptorSetLayoutCreateFlags)0u,
781 			DE_LENGTH_OF_ARRAY(bindings),
782 			bindings,
783 		};
784 
785 		extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
786 	}
787 
788 	{
789 		const VkDescriptorPoolSize			poolSizes[]	=
790 		{
791 			{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,	(deUint32)numSamplers	},
792 			{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,			1u,						}
793 		};
794 		const VkDescriptorPoolCreateInfo	poolInfo	=
795 		{
796 			VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
797 			DE_NULL,
798 			(VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
799 			1u,		// maxSets
800 			DE_LENGTH_OF_ARRAY(poolSizes),
801 			poolSizes,
802 		};
803 
804 		extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
805 	}
806 
807 	{
808 		const VkDescriptorSetAllocateInfo	allocInfo	=
809 		{
810 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
811 			DE_NULL,
812 			*extraResourcesSetPool,
813 			1u,
814 			&extraResourcesLayout.get(),
815 		};
816 
817 		extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
818 	}
819 
820 	{
821 		vector<VkDescriptorImageInfo>	imageInfos			(numSamplers);
822 		const VkWriteDescriptorSet		descriptorWrite		=
823 		{
824 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
825 			DE_NULL,
826 			*extraResourcesSet,
827 			0u,		// dstBinding
828 			0u,		// dstArrayElement
829 			(deUint32)numSamplers,
830 			VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
831 			&imageInfos[0],
832 			(const VkDescriptorBufferInfo*)DE_NULL,
833 			(const VkBufferView*)DE_NULL,
834 		};
835 
836 		for (int ndx = 0; ndx < numSamplers; ++ndx)
837 		{
838 			imageInfos[ndx].sampler		= **samplers[ndx];
839 			imageInfos[ndx].imageView	= images[ndx]->getImageView();
840 			imageInfos[ndx].imageLayout	= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
841 		}
842 
843 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
844 	}
845 
846 	if (indexBuffer)
847 	{
848 		const VkDescriptorBufferInfo	bufferInfo	=
849 		{
850 			indexBuffer->getBuffer(),
851 			0u,
852 			VK_WHOLE_SIZE
853 		};
854 		const VkWriteDescriptorSet		descriptorWrite		=
855 		{
856 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
857 			DE_NULL,
858 			*extraResourcesSet,
859 			(deUint32)numSamplers,	// dstBinding
860 			0u,						// dstArrayElement
861 			1u,
862 			VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
863 			(const VkDescriptorImageInfo*)DE_NULL,
864 			&bufferInfo,
865 			(const VkBufferView*)DE_NULL,
866 		};
867 
868 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
869 	}
870 
871 	{
872 		std::vector<void*>			inputs;
873 		std::vector<void*>			outputs;
874 		std::vector<int>			expandedIndices;
875 		UniquePtr<ShaderExecutor>	executor		(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
876 
877 		inputs.push_back(&coords[0]);
878 
879 		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
880 		{
881 			expandedIndices.resize(numInvocations * m_lookupIndices.size());
882 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
883 			{
884 				for (int invNdx = 0; invNdx < numInvocations; invNdx++)
885 					expandedIndices[lookupNdx*numInvocations + invNdx] = m_lookupIndices[lookupNdx];
886 			}
887 
888 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
889 				inputs.push_back(&expandedIndices[lookupNdx*numInvocations]);
890 		}
891 
892 		for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
893 			outputs.push_back(&outData[outLookupStride*lookupNdx]);
894 
895 		executor->execute(numInvocations, &inputs[0], &outputs[0], *extraResourcesSet);
896 	}
897 
898 	{
899 		tcu::TestLog&		log				= m_context.getTestContext().getLog();
900 		tcu::TestStatus		testResult		= tcu::TestStatus::pass("Pass");
901 
902 		if (isShadowSampler(m_samplerType))
903 		{
904 			const int			numCoordComps	= getDataTypeScalarSize(coordType);
905 
906 			TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 1);
907 
908 			// Each invocation may have different results.
909 			for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
910 			{
911 				const float	coord	= coords[invocationNdx*numCoordComps + (numCoordComps-1)];
912 
913 				for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
914 				{
915 					const int		texNdx		= m_lookupIndices[lookupNdx];
916 					const float		result		= *((const float*)(const deUint8*)&outData[lookupNdx*outLookupStride + invocationNdx]);
917 					const float		reference	= refTexAccess.sample2DCompare(refSampler, tcu::Sampler::NEAREST, coord, (float)texNdx, 0.0f, tcu::IVec3(0));
918 
919 					if (de::abs(result-reference) > 0.005f)
920 					{
921 						log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx << ", lookup " << lookupNdx << ": expected "
922 							<< reference << ", got " << result
923 							<< tcu::TestLog::EndMessage;
924 
925 						if (testResult.getCode() == QP_TEST_RESULT_PASS)
926 							testResult = tcu::TestStatus::fail("Got invalid lookup result");
927 					}
928 				}
929 			}
930 		}
931 		else
932 		{
933 			TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 4);
934 
935 			// Validate results from first invocation
936 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
937 			{
938 				const int		texNdx	= m_lookupIndices[lookupNdx];
939 				const deUint8*	resPtr	= (const deUint8*)&outData[lookupNdx*outLookupStride];
940 				bool			isOk;
941 
942 				if (outputType == glu::TYPE_FLOAT_VEC4)
943 				{
944 					const float			threshold		= 1.0f / 256.0f;
945 					const tcu::Vec4		reference		= refTexAccess.getPixel(texNdx, 0);
946 					const float*		floatPtr		= (const float*)resPtr;
947 					const tcu::Vec4		result			(floatPtr[0], floatPtr[1], floatPtr[2], floatPtr[3]);
948 
949 					isOk = boolAll(lessThanEqual(abs(reference-result), tcu::Vec4(threshold)));
950 
951 					if (!isOk)
952 					{
953 						log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
954 							<< reference << ", got " << result
955 							<< tcu::TestLog::EndMessage;
956 					}
957 				}
958 				else
959 				{
960 					const tcu::UVec4	reference		= refTexAccess.getPixelUint(texNdx, 0);
961 					const deUint32*		uintPtr			= (const deUint32*)resPtr;
962 					const tcu::UVec4	result			(uintPtr[0], uintPtr[1], uintPtr[2], uintPtr[3]);
963 
964 					isOk = boolAll(equal(reference, result));
965 
966 					if (!isOk)
967 					{
968 						log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
969 							<< reference << ", got " << result
970 							<< tcu::TestLog::EndMessage;
971 					}
972 				}
973 
974 				if (!isOk && testResult.getCode() == QP_TEST_RESULT_PASS)
975 					testResult = tcu::TestStatus::fail("Got invalid lookup result");
976 			}
977 
978 			// Check results of other invocations against first one
979 			for (int invocationNdx = 1; invocationNdx < numInvocations; invocationNdx++)
980 			{
981 				for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
982 				{
983 					const deUint32*		refPtr		= &outData[lookupNdx*outLookupStride];
984 					const deUint32*		resPtr		= refPtr + invocationNdx*4;
985 					bool				isOk		= true;
986 
987 					for (int ndx = 0; ndx < 4; ndx++)
988 						isOk = isOk && (refPtr[ndx] == resPtr[ndx]);
989 
990 					if (!isOk)
991 					{
992 						log << tcu::TestLog::Message << "ERROR: invocation " << invocationNdx << " result "
993 							<< tcu::formatArray(tcu::Format::HexIterator<deUint32>(resPtr), tcu::Format::HexIterator<deUint32>(resPtr+4))
994 							<< " for lookup " << lookupNdx << " doesn't match result from first invocation "
995 							<< tcu::formatArray(tcu::Format::HexIterator<deUint32>(refPtr), tcu::Format::HexIterator<deUint32>(refPtr+4))
996 							<< tcu::TestLog::EndMessage;
997 
998 						if (testResult.getCode() == QP_TEST_RESULT_PASS)
999 							testResult = tcu::TestStatus::fail("Inconsistent lookup results");
1000 					}
1001 				}
1002 			}
1003 		}
1004 
1005 		return testResult;
1006 	}
1007 }
1008 
1009 class SamplerIndexingCase : public OpaqueTypeIndexingCase
1010 {
1011 public:
1012 								SamplerIndexingCase			(tcu::TestContext&			testCtx,
1013 															 const char*				name,
1014 															 const char*				description,
1015 															 const glu::ShaderType		shaderType,
1016 															 glu::DataType				samplerType,
1017 															 IndexExprType				indexExprType);
1018 	virtual						~SamplerIndexingCase		(void);
1019 
1020 	virtual TestInstance*		createInstance				(Context& ctx) const;
1021 
1022 private:
1023 								SamplerIndexingCase			(const SamplerIndexingCase&);
1024 	SamplerIndexingCase&		operator=					(const SamplerIndexingCase&);
1025 
1026 	void						createShaderSpec			(void);
1027 
1028 	const glu::DataType			m_samplerType;
1029 	const int					m_numSamplers;
1030 	const int					m_numLookups;
1031 	std::vector<int>			m_lookupIndices;
1032 };
1033 
SamplerIndexingCase(tcu::TestContext & testCtx,const char * name,const char * description,const glu::ShaderType shaderType,glu::DataType samplerType,IndexExprType indexExprType)1034 SamplerIndexingCase::SamplerIndexingCase (tcu::TestContext&			testCtx,
1035 										  const char*				name,
1036 										  const char*				description,
1037 										  const glu::ShaderType		shaderType,
1038 										  glu::DataType				samplerType,
1039 										  IndexExprType				indexExprType)
1040 	: OpaqueTypeIndexingCase	(testCtx, name, description, shaderType, indexExprType)
1041 	, m_samplerType				(samplerType)
1042 	, m_numSamplers				(SamplerIndexingCaseInstance::NUM_SAMPLERS)
1043 	, m_numLookups				(SamplerIndexingCaseInstance::NUM_LOOKUPS)
1044 	, m_lookupIndices			(m_numLookups)
1045 {
1046 	createShaderSpec();
1047 	init();
1048 }
1049 
~SamplerIndexingCase(void)1050 SamplerIndexingCase::~SamplerIndexingCase (void)
1051 {
1052 }
1053 
createInstance(Context & ctx) const1054 TestInstance* SamplerIndexingCase::createInstance (Context& ctx) const
1055 {
1056 	return new SamplerIndexingCaseInstance(ctx,
1057 										   m_shaderType,
1058 										   m_shaderSpec,
1059 										   m_name,
1060 										   m_samplerType,
1061 										   m_indexExprType,
1062 										   m_lookupIndices);
1063 }
1064 
createShaderSpec(void)1065 void SamplerIndexingCase::createShaderSpec (void)
1066 {
1067 	de::Random			rnd				(deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1068 	const char*			samplersName	= "texSampler";
1069 	const char*			coordsName		= "coords";
1070 	const char*			indicesPrefix	= "index";
1071 	const char*			resultPrefix	= "result";
1072 	const glu::DataType	coordType		= getSamplerCoordType(m_samplerType);
1073 	const glu::DataType	outType			= getSamplerOutputType(m_samplerType);
1074 	std::ostringstream	global, code;
1075 
1076 	for (int ndx = 0; ndx < m_numLookups; ndx++)
1077 		m_lookupIndices[ndx] = rnd.getInt(0, m_numSamplers-1);
1078 
1079 	m_shaderSpec.inputs.push_back(Symbol(coordsName, glu::VarType(coordType, glu::PRECISION_HIGHP)));
1080 
1081 	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1082 		global << "#extension GL_EXT_gpu_shader5 : require\n";
1083 
1084 	if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1085 		global << "const highp int indexBase = 1;\n";
1086 
1087 	global <<
1088 		"layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << m_numSamplers << "];\n";
1089 
1090 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1091 	{
1092 		for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1093 		{
1094 			const std::string varName = indicesPrefix + de::toString(lookupNdx);
1095 			m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1096 		}
1097 	}
1098 	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1099 		declareUniformIndexVars(global, (deUint32)m_numSamplers, indicesPrefix, m_numLookups);
1100 
1101 	for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1102 	{
1103 		const std::string varName = resultPrefix + de::toString(lookupNdx);
1104 		m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(outType, glu::PRECISION_HIGHP)));
1105 	}
1106 
1107 	for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1108 	{
1109 		code << resultPrefix << "" << lookupNdx << " = texture(" << samplersName << "[";
1110 
1111 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1112 			code << m_lookupIndices[lookupNdx];
1113 		else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1114 			code << "indexBase + " << (m_lookupIndices[lookupNdx]-1);
1115 		else
1116 			code << indicesPrefix << lookupNdx;
1117 
1118 		code << "], " << coordsName << ");\n";
1119 	}
1120 
1121 	m_shaderSpec.globalDeclarations	= global.str();
1122 	m_shaderSpec.source				= code.str();
1123 }
1124 
1125 enum BlockType
1126 {
1127 	BLOCKTYPE_UNIFORM = 0,
1128 	BLOCKTYPE_BUFFER,
1129 
1130 	BLOCKTYPE_LAST
1131 };
1132 
1133 class BlockArrayIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
1134 {
1135 public:
1136 	enum
1137 	{
1138 		NUM_INVOCATIONS		= 32,
1139 		NUM_INSTANCES		= 4,
1140 		NUM_READS			= 4
1141 	};
1142 
1143 	enum Flags
1144 	{
1145 		FLAG_USE_STORAGE_BUFFER	= (1<<0)	// Use VK_KHR_storage_buffer_storage_class
1146 	};
1147 
1148 									BlockArrayIndexingCaseInstance	(Context&						context,
1149 																	 const glu::ShaderType			shaderType,
1150 																	 const ShaderSpec&				shaderSpec,
1151 																	 const char*					name,
1152 																	 BlockType						blockType,
1153 																	 const deUint32					flags,
1154 																	 const IndexExprType			indexExprType,
1155 																	 const std::vector<int>&		readIndices,
1156 																	 const std::vector<deUint32>&	inValues);
1157 	virtual							~BlockArrayIndexingCaseInstance	(void);
1158 
1159 	virtual tcu::TestStatus			iterate							(void);
1160 
1161 private:
1162 	const BlockType					m_blockType;
1163 	const deUint32					m_flags;
1164 	const std::vector<int>&			m_readIndices;
1165 	const std::vector<deUint32>&	m_inValues;
1166 };
1167 
BlockArrayIndexingCaseInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,BlockType blockType,const deUint32 flags,const IndexExprType indexExprType,const std::vector<int> & readIndices,const std::vector<deUint32> & inValues)1168 BlockArrayIndexingCaseInstance::BlockArrayIndexingCaseInstance (Context&						context,
1169 																const glu::ShaderType			shaderType,
1170 																const ShaderSpec&				shaderSpec,
1171 																const char*						name,
1172 																BlockType						blockType,
1173 																const deUint32					flags,
1174 																const IndexExprType				indexExprType,
1175 																const std::vector<int>&			readIndices,
1176 																const std::vector<deUint32>&	inValues)
1177 	: OpaqueTypeIndexingTestInstance	(context, shaderType, shaderSpec, name, indexExprType)
1178 	, m_blockType						(blockType)
1179 	, m_flags							(flags)
1180 	, m_readIndices						(readIndices)
1181 	, m_inValues						(inValues)
1182 {
1183 }
1184 
~BlockArrayIndexingCaseInstance(void)1185 BlockArrayIndexingCaseInstance::~BlockArrayIndexingCaseInstance (void)
1186 {
1187 }
1188 
iterate(void)1189 tcu::TestStatus BlockArrayIndexingCaseInstance::iterate (void)
1190 {
1191 	const int					numInvocations		= NUM_INVOCATIONS;
1192 	const int					numReads			= NUM_READS;
1193 	std::vector<deUint32>		outValues			(numInvocations*numReads);
1194 
1195 	tcu::TestLog&				log					= m_context.getTestContext().getLog();
1196 	tcu::TestStatus				testResult			= tcu::TestStatus::pass("Pass");
1197 
1198 	std::vector<int>			expandedIndices;
1199 	std::vector<void*>			inputs;
1200 	std::vector<void*>			outputs;
1201 	const VkBufferUsageFlags	bufferUsage			= m_blockType == BLOCKTYPE_UNIFORM ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1202 	const VkDescriptorType		descriptorType		= m_blockType == BLOCKTYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1203 
1204 	const DeviceInterface&		vkd					= m_context.getDeviceInterface();
1205 	const VkDevice				device				= m_context.getDevice();
1206 
1207 	// \note Using separate buffer per element - might want to test
1208 	// offsets & single buffer in the future.
1209 	vector<BufferSp>			buffers				(m_inValues.size());
1210 	MovePtr<Buffer>				indexBuffer;
1211 
1212 	Move<VkDescriptorSetLayout>	extraResourcesLayout;
1213 	Move<VkDescriptorPool>		extraResourcesSetPool;
1214 	Move<VkDescriptorSet>		extraResourcesSet;
1215 
1216 	checkSupported(descriptorType);
1217 
1218 	if ((m_flags & FLAG_USE_STORAGE_BUFFER) != 0)
1219 	{
1220 		if (!m_context.isDeviceFunctionalitySupported("VK_KHR_storage_buffer_storage_class"))
1221 			TCU_THROW(NotSupportedError, "VK_KHR_storage_buffer_storage_class is not supported");
1222 	}
1223 
1224 	for (size_t bufferNdx = 0; bufferNdx < m_inValues.size(); ++bufferNdx)
1225 	{
1226 		buffers[bufferNdx] = BufferSp(new Buffer(m_context, bufferUsage, sizeof(deUint32)));
1227 		*(deUint32*)buffers[bufferNdx]->getHostPtr() = m_inValues[bufferNdx];
1228 		buffers[bufferNdx]->flush();
1229 	}
1230 
1231 	if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1232 		indexBuffer = createUniformIndexBuffer(m_context, numReads, &m_readIndices[0]);
1233 
1234 	{
1235 		const VkDescriptorSetLayoutBinding		bindings[]	=
1236 		{
1237 			{ 0u,							descriptorType,						(deUint32)m_inValues.size(),	VK_SHADER_STAGE_ALL,	DE_NULL		},
1238 			{ (deUint32)m_inValues.size(),	VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,								VK_SHADER_STAGE_ALL,	DE_NULL		}
1239 		};
1240 		const VkDescriptorSetLayoutCreateInfo	layoutInfo	=
1241 		{
1242 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1243 			DE_NULL,
1244 			(VkDescriptorSetLayoutCreateFlags)0u,
1245 			DE_LENGTH_OF_ARRAY(bindings),
1246 			bindings,
1247 		};
1248 
1249 		extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
1250 	}
1251 
1252 	{
1253 		const VkDescriptorPoolSize			poolSizes[]	=
1254 		{
1255 			{ descriptorType,						(deUint32)m_inValues.size()	},
1256 			{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,							}
1257 		};
1258 		const VkDescriptorPoolCreateInfo	poolInfo	=
1259 		{
1260 			VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
1261 			DE_NULL,
1262 			(VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1263 			1u,		// maxSets
1264 			DE_LENGTH_OF_ARRAY(poolSizes),
1265 			poolSizes,
1266 		};
1267 
1268 		extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
1269 	}
1270 
1271 	{
1272 		const VkDescriptorSetAllocateInfo	allocInfo	=
1273 		{
1274 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1275 			DE_NULL,
1276 			*extraResourcesSetPool,
1277 			1u,
1278 			&extraResourcesLayout.get(),
1279 		};
1280 
1281 		extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
1282 	}
1283 
1284 	{
1285 		vector<VkDescriptorBufferInfo>	bufferInfos			(m_inValues.size());
1286 		const VkWriteDescriptorSet		descriptorWrite		=
1287 		{
1288 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1289 			DE_NULL,
1290 			*extraResourcesSet,
1291 			0u,		// dstBinding
1292 			0u,		// dstArrayElement
1293 			(deUint32)m_inValues.size(),
1294 			descriptorType,
1295 			(const VkDescriptorImageInfo*)DE_NULL,
1296 			&bufferInfos[0],
1297 			(const VkBufferView*)DE_NULL,
1298 		};
1299 
1300 		for (size_t ndx = 0; ndx < m_inValues.size(); ++ndx)
1301 		{
1302 			bufferInfos[ndx].buffer		= buffers[ndx]->getBuffer();
1303 			bufferInfos[ndx].offset		= 0u;
1304 			bufferInfos[ndx].range		= VK_WHOLE_SIZE;
1305 		}
1306 
1307 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1308 	}
1309 
1310 	if (indexBuffer)
1311 	{
1312 		const VkDescriptorBufferInfo	bufferInfo	=
1313 		{
1314 			indexBuffer->getBuffer(),
1315 			0u,
1316 			VK_WHOLE_SIZE
1317 		};
1318 		const VkWriteDescriptorSet		descriptorWrite		=
1319 		{
1320 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1321 			DE_NULL,
1322 			*extraResourcesSet,
1323 			(deUint32)m_inValues.size(),	// dstBinding
1324 			0u,								// dstArrayElement
1325 			1u,
1326 			VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1327 			(const VkDescriptorImageInfo*)DE_NULL,
1328 			&bufferInfo,
1329 			(const VkBufferView*)DE_NULL,
1330 		};
1331 
1332 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1333 	}
1334 
1335 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1336 	{
1337 		expandedIndices.resize(numInvocations * m_readIndices.size());
1338 
1339 		for (int readNdx = 0; readNdx < numReads; readNdx++)
1340 		{
1341 			int* dst = &expandedIndices[numInvocations*readNdx];
1342 			std::fill(dst, dst+numInvocations, m_readIndices[readNdx]);
1343 		}
1344 
1345 		for (int readNdx = 0; readNdx < numReads; readNdx++)
1346 			inputs.push_back(&expandedIndices[readNdx*numInvocations]);
1347 	}
1348 
1349 	for (int readNdx = 0; readNdx < numReads; readNdx++)
1350 		outputs.push_back(&outValues[readNdx*numInvocations]);
1351 
1352 	{
1353 		UniquePtr<ShaderExecutor>	executor	(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
1354 
1355 		executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
1356 	}
1357 
1358 	for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1359 	{
1360 		for (int readNdx = 0; readNdx < numReads; readNdx++)
1361 		{
1362 			const deUint32	refValue	= m_inValues[m_readIndices[readNdx]];
1363 			const deUint32	resValue	= outValues[readNdx*numInvocations + invocationNdx];
1364 
1365 			if (refValue != resValue)
1366 			{
1367 				log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
1368 					<< ", read " << readNdx << ": expected "
1369 					<< tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
1370 					<< tcu::TestLog::EndMessage;
1371 
1372 				if (testResult.getCode() == QP_TEST_RESULT_PASS)
1373 					testResult = tcu::TestStatus::fail("Invalid result value");
1374 			}
1375 		}
1376 	}
1377 
1378 	return testResult;
1379 }
1380 
1381 class BlockArrayIndexingCase : public OpaqueTypeIndexingCase
1382 {
1383 public:
1384 								BlockArrayIndexingCase		(tcu::TestContext&			testCtx,
1385 															 const char*				name,
1386 															 const char*				description,
1387 															 BlockType					blockType,
1388 															 IndexExprType				indexExprType,
1389 															 const glu::ShaderType		shaderType,
1390 															 deUint32					flags = 0u);
1391 	virtual						~BlockArrayIndexingCase		(void);
1392 
1393 	virtual TestInstance*		createInstance				(Context& ctx) const;
1394 	virtual	void				checkSupport				(Context& context) const;
1395 
1396 private:
1397 								BlockArrayIndexingCase		(const BlockArrayIndexingCase&);
1398 	BlockArrayIndexingCase&		operator=					(const BlockArrayIndexingCase&);
1399 
1400 	void						createShaderSpec			(void);
1401 
1402 	const BlockType				m_blockType;
1403 	const deUint32				m_flags;
1404 	std::vector<int>			m_readIndices;
1405 	std::vector<deUint32>		m_inValues;
1406 };
1407 
BlockArrayIndexingCase(tcu::TestContext & testCtx,const char * name,const char * description,BlockType blockType,IndexExprType indexExprType,const glu::ShaderType shaderType,deUint32 flags)1408 BlockArrayIndexingCase::BlockArrayIndexingCase (tcu::TestContext&			testCtx,
1409 												const char*					name,
1410 												const char*					description,
1411 												BlockType					blockType,
1412 												IndexExprType				indexExprType,
1413 												const glu::ShaderType		shaderType,
1414 												deUint32					flags)
1415 	: OpaqueTypeIndexingCase	(testCtx, name, description, shaderType, indexExprType)
1416 	, m_blockType				(blockType)
1417 	, m_flags					(flags)
1418 	, m_readIndices				(BlockArrayIndexingCaseInstance::NUM_READS)
1419 	, m_inValues				(BlockArrayIndexingCaseInstance::NUM_INSTANCES)
1420 {
1421 	createShaderSpec();
1422 	init();
1423 }
1424 
~BlockArrayIndexingCase(void)1425 BlockArrayIndexingCase::~BlockArrayIndexingCase (void)
1426 {
1427 }
1428 
checkSupport(Context & context) const1429 void BlockArrayIndexingCase::checkSupport(Context &context) const
1430 {
1431 	OpaqueTypeIndexingCase::checkSupport(context);
1432 
1433 	uint32_t maxDescriptorStorageBuffers = (uint32_t)(m_inValues.size());
1434 
1435 	switch (m_shaderType)
1436 	{
1437 	case glu::SHADERTYPE_VERTEX:
1438 	case glu::SHADERTYPE_TESSELLATION_CONTROL:
1439 	case glu::SHADERTYPE_TESSELLATION_EVALUATION:
1440 	case glu::SHADERTYPE_GEOMETRY:
1441 	case glu::SHADERTYPE_FRAGMENT:
1442 		// No extra storage buffers
1443 		break;
1444 	case glu::SHADERTYPE_COMPUTE:
1445 		// From ComputerShaderExecutor class
1446 		maxDescriptorStorageBuffers += 2u;
1447 		break;
1448 	default:
1449 		TCU_THROW(InternalError, "Unsupported shader type");
1450 	}
1451 
1452 	if (maxDescriptorStorageBuffers > context.getDeviceProperties2().properties.limits.maxPerStageDescriptorStorageBuffers)
1453 		TCU_THROW(NotSupportedError, "Driver supports less maxPerStageDescriptorStorageBuffers than the ones required");
1454 }
1455 
createInstance(Context & ctx) const1456 TestInstance* BlockArrayIndexingCase::createInstance (Context& ctx) const
1457 {
1458 	return new BlockArrayIndexingCaseInstance(ctx,
1459 											  m_shaderType,
1460 											  m_shaderSpec,
1461 											  m_name,
1462 											  m_blockType,
1463 											  m_flags,
1464 											  m_indexExprType,
1465 											  m_readIndices,
1466 											  m_inValues);
1467 }
1468 
createShaderSpec(void)1469 void BlockArrayIndexingCase::createShaderSpec (void)
1470 {
1471 	const int			numInstances	= BlockArrayIndexingCaseInstance::NUM_INSTANCES;
1472 	const int			numReads		= BlockArrayIndexingCaseInstance::NUM_READS;
1473 	de::Random			rnd				(deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
1474 	const char*			blockName		= "Block";
1475 	const char*			instanceName	= "block";
1476 	const char*			indicesPrefix	= "index";
1477 	const char*			resultPrefix	= "result";
1478 	const char*			interfaceName	= m_blockType == BLOCKTYPE_UNIFORM ? "uniform" : "readonly buffer";
1479 	std::ostringstream	global, code;
1480 
1481 	for (int readNdx = 0; readNdx < numReads; readNdx++)
1482 		m_readIndices[readNdx] = rnd.getInt(0, numInstances-1);
1483 
1484 	for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
1485 		m_inValues[instanceNdx] = rnd.getUint32();
1486 
1487 	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1488 		global << "#extension GL_EXT_gpu_shader5 : require\n";
1489 
1490 	if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1491 		global << "const highp int indexBase = 1;\n";
1492 
1493 	global <<
1494 		"layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) " << interfaceName << " " << blockName << "\n"
1495 		"{\n"
1496 		"	highp uint value;\n"
1497 		"} " << instanceName << "[" << numInstances << "];\n";
1498 
1499 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1500 	{
1501 		for (int readNdx = 0; readNdx < numReads; readNdx++)
1502 		{
1503 			const std::string varName = indicesPrefix + de::toString(readNdx);
1504 			m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1505 		}
1506 	}
1507 	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1508 		declareUniformIndexVars(global, (deUint32)m_inValues.size(), indicesPrefix, numReads);
1509 
1510 	for (int readNdx = 0; readNdx < numReads; readNdx++)
1511 	{
1512 		const std::string varName = resultPrefix + de::toString(readNdx);
1513 		m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
1514 	}
1515 
1516 	for (int readNdx = 0; readNdx < numReads; readNdx++)
1517 	{
1518 		code << resultPrefix << readNdx << " = " << instanceName << "[";
1519 
1520 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1521 			code << m_readIndices[readNdx];
1522 		else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1523 			code << "indexBase + " << (m_readIndices[readNdx]-1);
1524 		else
1525 			code << indicesPrefix << readNdx;
1526 
1527 		code << "].value;\n";
1528 	}
1529 
1530 	m_shaderSpec.globalDeclarations	= global.str();
1531 	m_shaderSpec.source				= code.str();
1532 
1533 	if ((m_flags & BlockArrayIndexingCaseInstance::FLAG_USE_STORAGE_BUFFER) != 0)
1534 		m_shaderSpec.buildOptions.flags |= vk::ShaderBuildOptions::FLAG_USE_STORAGE_BUFFER_STORAGE_CLASS;
1535 }
1536 
1537 class AtomicCounterIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
1538 {
1539 public:
1540 	enum
1541 	{
1542 		NUM_INVOCATIONS		= 32,
1543 		NUM_COUNTERS		= 4,
1544 		NUM_OPS				= 4
1545 	};
1546 
1547 								AtomicCounterIndexingCaseInstance	(Context&					context,
1548 																	 const glu::ShaderType		shaderType,
1549 																	 const ShaderSpec&			shaderSpec,
1550 																	 const char*				name,
1551 																	 const std::vector<int>&	opIndices,
1552 																	 const IndexExprType		indexExprType);
1553 	virtual						~AtomicCounterIndexingCaseInstance	(void);
1554 
1555 	virtual	tcu::TestStatus		iterate								(void);
1556 
1557 private:
1558 	const std::vector<int>&		m_opIndices;
1559 };
1560 
AtomicCounterIndexingCaseInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,const std::vector<int> & opIndices,const IndexExprType indexExprType)1561 AtomicCounterIndexingCaseInstance::AtomicCounterIndexingCaseInstance (Context&					context,
1562 																	  const glu::ShaderType		shaderType,
1563 																	  const ShaderSpec&			shaderSpec,
1564 																	  const char*				name,
1565 																	  const std::vector<int>&	opIndices,
1566 																	  const IndexExprType		indexExprType)
1567 	: OpaqueTypeIndexingTestInstance	(context, shaderType, shaderSpec, name, indexExprType)
1568 	, m_opIndices						(opIndices)
1569 {
1570 }
1571 
~AtomicCounterIndexingCaseInstance(void)1572 AtomicCounterIndexingCaseInstance::~AtomicCounterIndexingCaseInstance (void)
1573 {
1574 }
1575 
iterate(void)1576 tcu::TestStatus AtomicCounterIndexingCaseInstance::iterate (void)
1577 {
1578 	const int					numInvocations		= NUM_INVOCATIONS;
1579 	const int					numCounters			= NUM_COUNTERS;
1580 	const int					numOps				= NUM_OPS;
1581 	std::vector<int>			expandedIndices;
1582 	std::vector<void*>			inputs;
1583 	std::vector<void*>			outputs;
1584 	std::vector<deUint32>		outValues			(numInvocations*numOps);
1585 
1586 	const DeviceInterface&			vkd				= m_context.getDeviceInterface();
1587 	const VkDevice					device			= m_context.getDevice();
1588 	const VkPhysicalDeviceFeatures& deviceFeatures	= m_context.getDeviceFeatures();
1589 
1590 	//Check stores and atomic operation support.
1591 	switch (m_shaderType)
1592 	{
1593 		case glu::SHADERTYPE_VERTEX:
1594 		case glu::SHADERTYPE_TESSELLATION_CONTROL:
1595 		case glu::SHADERTYPE_TESSELLATION_EVALUATION:
1596 		case glu::SHADERTYPE_GEOMETRY:
1597 			if(!deviceFeatures.vertexPipelineStoresAndAtomics)
1598 				TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in Vertex, Tessellation, and Geometry shader.");
1599 			break;
1600 		case glu::SHADERTYPE_FRAGMENT:
1601 			if(!deviceFeatures.fragmentStoresAndAtomics)
1602 				TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in fragment shader.");
1603 			break;
1604 		case glu::SHADERTYPE_COMPUTE:
1605 			break;
1606 		default:
1607 			throw tcu::InternalError("Unsupported shader type");
1608 	}
1609 
1610 	// \note Using separate buffer per element - might want to test
1611 	// offsets & single buffer in the future.
1612 	Buffer						atomicOpBuffer		(m_context, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, sizeof(deUint32)*numCounters);
1613 	MovePtr<Buffer>				indexBuffer;
1614 
1615 	Move<VkDescriptorSetLayout>	extraResourcesLayout;
1616 	Move<VkDescriptorPool>		extraResourcesSetPool;
1617 	Move<VkDescriptorSet>		extraResourcesSet;
1618 
1619 	checkSupported(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1620 
1621 	deMemset(atomicOpBuffer.getHostPtr(), 0, sizeof(deUint32)*numCounters);
1622 	atomicOpBuffer.flush();
1623 
1624 	if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1625 		indexBuffer = createUniformIndexBuffer(m_context, numOps, &m_opIndices[0]);
1626 
1627 	{
1628 		const VkDescriptorSetLayoutBinding		bindings[]	=
1629 		{
1630 			{ 0u,	VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,	1u,	VK_SHADER_STAGE_ALL,	DE_NULL		},
1631 			{ 1u,	VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,	VK_SHADER_STAGE_ALL,	DE_NULL		}
1632 		};
1633 		const VkDescriptorSetLayoutCreateInfo	layoutInfo	=
1634 		{
1635 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1636 			DE_NULL,
1637 			(VkDescriptorSetLayoutCreateFlags)0u,
1638 			DE_LENGTH_OF_ARRAY(bindings),
1639 			bindings,
1640 		};
1641 
1642 		extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
1643 	}
1644 
1645 	{
1646 		const VkDescriptorPoolSize			poolSizes[]	=
1647 		{
1648 			{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,	1u,	},
1649 			{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,	}
1650 		};
1651 		const VkDescriptorPoolCreateInfo	poolInfo	=
1652 		{
1653 			VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
1654 			DE_NULL,
1655 			(VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1656 			1u,		// maxSets
1657 			DE_LENGTH_OF_ARRAY(poolSizes),
1658 			poolSizes,
1659 		};
1660 
1661 		extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
1662 	}
1663 
1664 	{
1665 		const VkDescriptorSetAllocateInfo	allocInfo	=
1666 		{
1667 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1668 			DE_NULL,
1669 			*extraResourcesSetPool,
1670 			1u,
1671 			&extraResourcesLayout.get(),
1672 		};
1673 
1674 		extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
1675 	}
1676 
1677 	{
1678 		const VkDescriptorBufferInfo	bufferInfo			=
1679 		{
1680 			atomicOpBuffer.getBuffer(),
1681 			0u,
1682 			VK_WHOLE_SIZE
1683 		};
1684 		const VkWriteDescriptorSet		descriptorWrite		=
1685 		{
1686 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1687 			DE_NULL,
1688 			*extraResourcesSet,
1689 			0u,		// dstBinding
1690 			0u,		// dstArrayElement
1691 			1u,
1692 			VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
1693 			(const VkDescriptorImageInfo*)DE_NULL,
1694 			&bufferInfo,
1695 			(const VkBufferView*)DE_NULL,
1696 		};
1697 
1698 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1699 	}
1700 
1701 	if (indexBuffer)
1702 	{
1703 		const VkDescriptorBufferInfo	bufferInfo	=
1704 		{
1705 			indexBuffer->getBuffer(),
1706 			0u,
1707 			VK_WHOLE_SIZE
1708 		};
1709 		const VkWriteDescriptorSet		descriptorWrite		=
1710 		{
1711 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1712 			DE_NULL,
1713 			*extraResourcesSet,
1714 			1u,		// dstBinding
1715 			0u,		// dstArrayElement
1716 			1u,
1717 			VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1718 			(const VkDescriptorImageInfo*)DE_NULL,
1719 			&bufferInfo,
1720 			(const VkBufferView*)DE_NULL,
1721 		};
1722 
1723 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1724 	}
1725 
1726 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1727 	{
1728 		expandedIndices.resize(numInvocations * m_opIndices.size());
1729 
1730 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1731 		{
1732 			int* dst = &expandedIndices[numInvocations*opNdx];
1733 			std::fill(dst, dst+numInvocations, m_opIndices[opNdx]);
1734 		}
1735 
1736 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1737 			inputs.push_back(&expandedIndices[opNdx*numInvocations]);
1738 	}
1739 
1740 	for (int opNdx = 0; opNdx < numOps; opNdx++)
1741 		outputs.push_back(&outValues[opNdx*numInvocations]);
1742 
1743 	{
1744 		UniquePtr<ShaderExecutor>	executor	(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
1745 
1746 		executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
1747 	}
1748 
1749 	{
1750 		tcu::TestLog&						  log						= m_context.getTestContext().getLog();
1751 		tcu::TestStatus						  testResult					= tcu::TestStatus::pass("Pass");
1752 		std::vector<int>					  numHits					(numCounters, 0);	// Number of hits per counter.
1753 		std::vector<deUint32>				  counterValues				(numCounters);
1754 		std::vector<std::map<deUint32, int> > resultValueHitCountMaps	(numCounters);
1755 
1756 
1757 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1758 			numHits[m_opIndices[opNdx]] += 1;
1759 
1760 		// Read counter values
1761 		{
1762 			const void* mapPtr = atomicOpBuffer.getHostPtr();
1763 			DE_ASSERT(mapPtr != DE_NULL);
1764 			atomicOpBuffer.invalidate();
1765 			std::copy((const deUint32*)mapPtr, (const deUint32*)mapPtr + numCounters, &counterValues[0]);
1766 		}
1767 
1768 		// Verify counter values
1769 		for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1770 		{
1771 			const deUint32		refCount	= (deUint32)(numHits[counterNdx]*numInvocations);
1772 			const deUint32		resCount	= counterValues[counterNdx];
1773 
1774 			bool foundInvalidCtrValue = false;
1775 
1776 			if(resCount < refCount)
1777 			{
1778 				log << tcu::TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
1779 					<< ", expected value greater than or equal to " << refCount
1780 					<< tcu::TestLog::EndMessage;
1781 
1782 				foundInvalidCtrValue = true;
1783 			}
1784 			else if (refCount == 0 && resCount != 0)
1785 			{
1786 				log << tcu::TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
1787 					<< ", expected " << refCount
1788 					<< tcu::TestLog::EndMessage;
1789 
1790 				foundInvalidCtrValue = true;
1791 			}
1792 
1793 			if (foundInvalidCtrValue == true)
1794 			{
1795 				if (testResult.getCode() == QP_TEST_RESULT_PASS)
1796 					testResult = tcu::TestStatus::fail("Invalid atomic counter value");
1797 			}
1798 		}
1799 
1800 		// Verify result values from shaders
1801 		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1802 		{
1803 			for (int opNdx = 0; opNdx < numOps; opNdx++)
1804 			{
1805 				const int		counterNdx	= m_opIndices[opNdx];
1806 				const deUint32	resValue	= outValues[opNdx*numInvocations + invocationNdx];
1807 				const bool		rangeOk		= de::inBounds(resValue, 0u, counterValues[counterNdx]);
1808 
1809 				if (resultValueHitCountMaps[counterNdx].count(resValue) == 0)
1810 					resultValueHitCountMaps[counterNdx][resValue] = 1;
1811 				else
1812 					resultValueHitCountMaps[counterNdx][resValue] += 1;
1813 
1814 				if (!rangeOk)
1815 				{
1816 					log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
1817 						<< ", op " << opNdx << ": got invalid result value "
1818 						<< resValue
1819 						<< tcu::TestLog::EndMessage;
1820 
1821 					if (testResult.getCode() == QP_TEST_RESULT_PASS)
1822 						testResult = tcu::TestStatus::fail("Invalid result value");
1823 				}
1824 			}
1825 		}
1826 
1827 		for (int ctrIdx = 0; ctrIdx < numCounters; ctrIdx++)
1828 		{
1829 			std::map<deUint32, int>::iterator hitCountItr;
1830 			for (hitCountItr = resultValueHitCountMaps[ctrIdx].begin(); hitCountItr != resultValueHitCountMaps[ctrIdx].end(); hitCountItr++)
1831 			{
1832 				if(hitCountItr->second > 1)
1833 				{
1834 					log << tcu::TestLog::Message << "ERROR: Duplicate result value from counter " << ctrIdx << "."
1835 						<<" Value " << hitCountItr->first << " found " << hitCountItr->second << " times."
1836 						<< tcu::TestLog::EndMessage;
1837 
1838 					if (testResult.getCode() == QP_TEST_RESULT_PASS)
1839 						testResult = tcu::TestStatus::fail("Invalid result value");
1840 				}
1841 			}
1842 		}
1843 
1844 		return testResult;
1845 	}
1846 }
1847 
1848 class AtomicCounterIndexingCase : public OpaqueTypeIndexingCase
1849 {
1850 public:
1851 								AtomicCounterIndexingCase	(tcu::TestContext&			testCtx,
1852 															 const char*				name,
1853 															 const char*				description,
1854 															 IndexExprType				indexExprType,
1855 															 const glu::ShaderType		shaderType);
1856 	virtual						~AtomicCounterIndexingCase	(void);
1857 
1858 	virtual TestInstance*		createInstance				(Context& ctx) const;
1859 
1860 private:
1861 								AtomicCounterIndexingCase	(const BlockArrayIndexingCase&);
1862 	AtomicCounterIndexingCase&	operator=					(const BlockArrayIndexingCase&);
1863 
1864 	void						createShaderSpec			(void);
1865 
1866 	std::vector<int>			m_opIndices;
1867 };
1868 
AtomicCounterIndexingCase(tcu::TestContext & testCtx,const char * name,const char * description,IndexExprType indexExprType,const glu::ShaderType shaderType)1869 AtomicCounterIndexingCase::AtomicCounterIndexingCase (tcu::TestContext&			testCtx,
1870 													  const char*				name,
1871 													  const char*				description,
1872 													  IndexExprType				indexExprType,
1873 													  const glu::ShaderType		shaderType)
1874 	: OpaqueTypeIndexingCase	(testCtx, name, description, shaderType, indexExprType)
1875 	, m_opIndices				(AtomicCounterIndexingCaseInstance::NUM_OPS)
1876 {
1877 	createShaderSpec();
1878 	init();
1879 }
1880 
~AtomicCounterIndexingCase(void)1881 AtomicCounterIndexingCase::~AtomicCounterIndexingCase (void)
1882 {
1883 }
1884 
createInstance(Context & ctx) const1885 TestInstance* AtomicCounterIndexingCase::createInstance (Context& ctx) const
1886 {
1887 	return new AtomicCounterIndexingCaseInstance(ctx,
1888 												 m_shaderType,
1889 												 m_shaderSpec,
1890 												 m_name,
1891 												 m_opIndices,
1892 												 m_indexExprType);
1893 }
1894 
createShaderSpec(void)1895 void AtomicCounterIndexingCase::createShaderSpec (void)
1896 {
1897 	const int				numCounters		= AtomicCounterIndexingCaseInstance::NUM_COUNTERS;
1898 	const int				numOps			= AtomicCounterIndexingCaseInstance::NUM_OPS;
1899 	de::Random				rnd				(deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1900 
1901 	for (int opNdx = 0; opNdx < numOps; opNdx++)
1902 		m_opIndices[opNdx] = rnd.getInt(0, numOps-1);
1903 
1904 	{
1905 		const char*			indicesPrefix	= "index";
1906 		const char*			resultPrefix	= "result";
1907 		std::ostringstream	global, code;
1908 
1909 		if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1910 			global << "#extension GL_EXT_gpu_shader5 : require\n";
1911 
1912 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1913 			global << "const highp int indexBase = 1;\n";
1914 
1915 		global <<
1916 			"layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0, std430) buffer AtomicBuffer { highp uint counter[" << numCounters << "]; };\n";
1917 
1918 		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1919 		{
1920 			for (int opNdx = 0; opNdx < numOps; opNdx++)
1921 			{
1922 				const std::string varName = indicesPrefix + de::toString(opNdx);
1923 				m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1924 			}
1925 		}
1926 		else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1927 			declareUniformIndexVars(global, 1, indicesPrefix, numOps);
1928 
1929 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1930 		{
1931 			const std::string varName = resultPrefix + de::toString(opNdx);
1932 			m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
1933 		}
1934 
1935 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1936 		{
1937 			code << resultPrefix << opNdx << " = atomicAdd(counter[";
1938 
1939 			if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1940 				code << m_opIndices[opNdx];
1941 			else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1942 				code << "indexBase + " << (m_opIndices[opNdx]-1);
1943 			else
1944 				code << indicesPrefix << opNdx;
1945 
1946 			code << "], uint(1));\n";
1947 		}
1948 
1949 		m_shaderSpec.globalDeclarations	= global.str();
1950 		m_shaderSpec.source				= code.str();
1951 	}
1952 }
1953 
1954 class OpaqueTypeIndexingTests : public tcu::TestCaseGroup
1955 {
1956 public:
1957 								OpaqueTypeIndexingTests		(tcu::TestContext& testCtx);
1958 	virtual						~OpaqueTypeIndexingTests	(void);
1959 
1960 	virtual void				init						(void);
1961 
1962 private:
1963 								OpaqueTypeIndexingTests		(const OpaqueTypeIndexingTests&);
1964 	OpaqueTypeIndexingTests&	operator=					(const OpaqueTypeIndexingTests&);
1965 };
1966 
OpaqueTypeIndexingTests(tcu::TestContext & testCtx)1967 OpaqueTypeIndexingTests::OpaqueTypeIndexingTests (tcu::TestContext& testCtx)
1968 	: tcu::TestCaseGroup(testCtx, "opaque_type_indexing", "Opaque Type Indexing Tests")
1969 {
1970 }
1971 
~OpaqueTypeIndexingTests(void)1972 OpaqueTypeIndexingTests::~OpaqueTypeIndexingTests (void)
1973 {
1974 }
1975 
init(void)1976 void OpaqueTypeIndexingTests::init (void)
1977 {
1978 	static const struct
1979 	{
1980 		IndexExprType	type;
1981 		const char*		name;
1982 		const char*		description;
1983 	} indexingTypes[] =
1984 	{
1985 		{ INDEX_EXPR_TYPE_CONST_LITERAL,	"const_literal",		"Indexing by constant literal"					},
1986 		{ INDEX_EXPR_TYPE_CONST_EXPRESSION,	"const_expression",		"Indexing by constant expression"				},
1987 		{ INDEX_EXPR_TYPE_UNIFORM,			"uniform",				"Indexing by uniform value"						},
1988 		{ INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,	"dynamically_uniform",	"Indexing by dynamically uniform expression"	}
1989 	};
1990 
1991 	static const struct
1992 	{
1993 		glu::ShaderType	type;
1994 		const char*		name;
1995 	} shaderTypes[] =
1996 	{
1997 		{ glu::SHADERTYPE_VERTEX,					"vertex"	},
1998 		{ glu::SHADERTYPE_FRAGMENT,					"fragment"	},
1999 		{ glu::SHADERTYPE_GEOMETRY,					"geometry"	},
2000 		{ glu::SHADERTYPE_TESSELLATION_CONTROL,		"tess_ctrl"	},
2001 		{ glu::SHADERTYPE_TESSELLATION_EVALUATION,	"tess_eval"	},
2002 		{ glu::SHADERTYPE_COMPUTE,					"compute"	}
2003 	};
2004 
2005 	// .sampler
2006 	{
2007 		static const glu::DataType samplerTypes[] =
2008 		{
2009 			glu::TYPE_SAMPLER_1D,
2010 			glu::TYPE_SAMPLER_1D_ARRAY,
2011 			glu::TYPE_SAMPLER_1D_ARRAY_SHADOW,
2012 			glu::TYPE_SAMPLER_2D,
2013 			glu::TYPE_SAMPLER_CUBE,
2014 			glu::TYPE_SAMPLER_2D_ARRAY,
2015 			glu::TYPE_SAMPLER_3D,
2016 			glu::TYPE_SAMPLER_1D_SHADOW,
2017 			glu::TYPE_SAMPLER_2D_SHADOW,
2018 			glu::TYPE_SAMPLER_CUBE_SHADOW,
2019 			glu::TYPE_SAMPLER_2D_ARRAY_SHADOW,
2020 			glu::TYPE_INT_SAMPLER_1D,
2021 			glu::TYPE_INT_SAMPLER_1D_ARRAY,
2022 			glu::TYPE_INT_SAMPLER_2D,
2023 			glu::TYPE_INT_SAMPLER_CUBE,
2024 			glu::TYPE_INT_SAMPLER_2D_ARRAY,
2025 			glu::TYPE_INT_SAMPLER_3D,
2026 			glu::TYPE_UINT_SAMPLER_1D,
2027 			glu::TYPE_UINT_SAMPLER_1D_ARRAY,
2028 			glu::TYPE_UINT_SAMPLER_2D,
2029 			glu::TYPE_UINT_SAMPLER_CUBE,
2030 			glu::TYPE_UINT_SAMPLER_2D_ARRAY,
2031 			glu::TYPE_UINT_SAMPLER_3D,
2032 		};
2033 
2034 		tcu::TestCaseGroup* const samplerGroup = new tcu::TestCaseGroup(m_testCtx, "sampler", "Sampler Array Indexing Tests");
2035 		addChild(samplerGroup);
2036 
2037 		for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
2038 		{
2039 			const IndexExprType			indexExprType	= indexingTypes[indexTypeNdx].type;
2040 			tcu::TestCaseGroup* const	indexGroup		= new tcu::TestCaseGroup(m_testCtx, indexingTypes[indexTypeNdx].name, indexingTypes[indexTypeNdx].description);
2041 			samplerGroup->addChild(indexGroup);
2042 
2043 			for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
2044 			{
2045 				const glu::ShaderType		shaderType		= shaderTypes[shaderTypeNdx].type;
2046 				tcu::TestCaseGroup* const	shaderGroup		= new tcu::TestCaseGroup(m_testCtx, shaderTypes[shaderTypeNdx].name, "");
2047 				indexGroup->addChild(shaderGroup);
2048 
2049 				// \note [pyry] In Vulkan CTS 1.0.2 sampler groups should not cover tess/geom stages
2050 				if ((shaderType != glu::SHADERTYPE_VERTEX)		&&
2051 					(shaderType != glu::SHADERTYPE_FRAGMENT)	&&
2052 					(shaderType != glu::SHADERTYPE_COMPUTE))
2053 					continue;
2054 
2055 				for (int samplerTypeNdx = 0; samplerTypeNdx < DE_LENGTH_OF_ARRAY(samplerTypes); samplerTypeNdx++)
2056 				{
2057 					const glu::DataType	samplerType	= samplerTypes[samplerTypeNdx];
2058 					const char*			samplerName	= getDataTypeName(samplerType);
2059 					const std::string	caseName	= de::toLower(samplerName);
2060 
2061 					shaderGroup->addChild(new SamplerIndexingCase(m_testCtx, caseName.c_str(), "", shaderType, samplerType, indexExprType));
2062 				}
2063 			}
2064 		}
2065 	}
2066 
2067 	// .ubo / .ssbo / .atomic_counter
2068 	{
2069 		tcu::TestCaseGroup* const	uboGroup			= new tcu::TestCaseGroup(m_testCtx, "ubo",								"Uniform Block Instance Array Indexing Tests");
2070 		tcu::TestCaseGroup* const	ssboGroup			= new tcu::TestCaseGroup(m_testCtx, "ssbo",								"Buffer Block Instance Array Indexing Tests");
2071 		tcu::TestCaseGroup* const	ssboStorageBufGroup	= new tcu::TestCaseGroup(m_testCtx, "ssbo_storage_buffer_decoration",	"Buffer Block (new StorageBuffer decoration) Instance Array Indexing Tests");
2072 		tcu::TestCaseGroup* const	acGroup				= new tcu::TestCaseGroup(m_testCtx, "atomic_counter",					"Atomic Counter Array Indexing Tests");
2073 		addChild(uboGroup);
2074 		addChild(ssboGroup);
2075 		addChild(ssboStorageBufGroup);
2076 		addChild(acGroup);
2077 
2078 		for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
2079 		{
2080 			const IndexExprType		indexExprType		= indexingTypes[indexTypeNdx].type;
2081 			const char*				indexExprName		= indexingTypes[indexTypeNdx].name;
2082 			const char*				indexExprDesc		= indexingTypes[indexTypeNdx].description;
2083 
2084 			for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
2085 			{
2086 				const glu::ShaderType	shaderType		= shaderTypes[shaderTypeNdx].type;
2087 				const std::string		name			= std::string(indexExprName) + "_" + shaderTypes[shaderTypeNdx].name;
2088 
2089 				uboGroup->addChild	(new BlockArrayIndexingCase		(m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_UNIFORM,	indexExprType, shaderType));
2090 				acGroup->addChild	(new AtomicCounterIndexingCase	(m_testCtx, name.c_str(), indexExprDesc, indexExprType, shaderType));
2091 				ssboGroup->addChild	(new BlockArrayIndexingCase	(m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_BUFFER, indexExprType, shaderType));
2092 				ssboStorageBufGroup->addChild	(new BlockArrayIndexingCase	(m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_BUFFER, indexExprType, shaderType, (deUint32)BlockArrayIndexingCaseInstance::FLAG_USE_STORAGE_BUFFER));
2093 			}
2094 		}
2095 	}
2096 }
2097 
2098 } // anonymous
2099 
createOpaqueTypeIndexingTests(tcu::TestContext & testCtx)2100 tcu::TestCaseGroup* createOpaqueTypeIndexingTests (tcu::TestContext& testCtx)
2101 {
2102 	return new OpaqueTypeIndexingTests(testCtx);
2103 }
2104 
2105 } // shaderexecutor
2106 } // vkt
2107