• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 The Khronos Group Inc.
6  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7  * Copyright (c) 2016 The Android Open Source Project
8  *
9  * Licensed under the Apache License, Version 2.0 (the "License");
10  * you may not use this file except in compliance with the License.
11  * You may obtain a copy of the License at
12  *
13  *      http://www.apache.org/licenses/LICENSE-2.0
14  *
15  * Unless required by applicable law or agreed to in writing, software
16  * distributed under the License is distributed on an "AS IS" BASIS,
17  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18  * See the License for the specific language governing permissions and
19  * limitations under the License.
20  *
21  *//*!
22  * \file
23  * \brief Opaque type (sampler, buffer, atomic counter, ...) indexing tests.
24  *//*--------------------------------------------------------------------*/
25 
26 #include "vktOpaqueTypeIndexingTests.hpp"
27 
28 #include "vkRefUtil.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkTypeUtil.hpp"
32 #include "vkQueryUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 
35 #include "tcuTexture.hpp"
36 #include "tcuTestLog.hpp"
37 #include "tcuVectorUtil.hpp"
38 #include "tcuTextureUtil.hpp"
39 
40 #include "deStringUtil.hpp"
41 #include "deSharedPtr.hpp"
42 #include "deRandom.hpp"
43 #include "deSTLUtil.hpp"
44 
45 #include "vktShaderExecutor.hpp"
46 
47 #include <sstream>
48 
49 namespace vkt
50 {
51 namespace shaderexecutor
52 {
53 
54 namespace
55 {
56 
57 using de::UniquePtr;
58 using de::MovePtr;
59 using de::SharedPtr;
60 using std::vector;
61 
62 using namespace vk;
63 
64 typedef SharedPtr<Unique<VkSampler> > VkSamplerSp;
65 
66 // Buffer helper
67 
68 class Buffer
69 {
70 public:
71 								Buffer				(Context& context, VkBufferUsageFlags usage, size_t size);
72 
getBuffer(void) const73 	VkBuffer					getBuffer			(void) const { return *m_buffer;					}
getHostPtr(void) const74 	void*						getHostPtr			(void) const { return m_allocation->getHostPtr();	}
75 	void						flush				(void);
76 	void						invalidate			(void);
77 
78 private:
79 	const DeviceInterface&		m_vkd;
80 	const VkDevice				m_device;
81 	const Unique<VkBuffer>		m_buffer;
82 	const UniquePtr<Allocation>	m_allocation;
83 };
84 
85 typedef de::SharedPtr<Buffer> BufferSp;
86 
createBuffer(const DeviceInterface & vkd,VkDevice device,VkDeviceSize size,VkBufferUsageFlags usageFlags)87 Move<VkBuffer> createBuffer (const DeviceInterface& vkd, VkDevice device, VkDeviceSize size, VkBufferUsageFlags usageFlags)
88 {
89 	const VkBufferCreateInfo	createInfo		=
90 	{
91 		VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
92 		DE_NULL,
93 		(VkBufferCreateFlags)0,
94 		size,
95 		usageFlags,
96 		VK_SHARING_MODE_EXCLUSIVE,
97 		0u,
98 		DE_NULL
99 	};
100 	return createBuffer(vkd, device, &createInfo);
101 }
102 
allocateAndBindMemory(const DeviceInterface & vkd,VkDevice device,Allocator & allocator,VkBuffer buffer)103 MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkBuffer buffer)
104 {
105 	MovePtr<Allocation>		alloc	(allocator.allocate(getBufferMemoryRequirements(vkd, device, buffer), MemoryRequirement::HostVisible));
106 
107 	VK_CHECK(vkd.bindBufferMemory(device, buffer, alloc->getMemory(), alloc->getOffset()));
108 
109 	return alloc;
110 }
111 
Buffer(Context & context,VkBufferUsageFlags usage,size_t size)112 Buffer::Buffer (Context& context, VkBufferUsageFlags usage, size_t size)
113 	: m_vkd			(context.getDeviceInterface())
114 	, m_device		(context.getDevice())
115 	, m_buffer		(createBuffer			(context.getDeviceInterface(),
116 											 context.getDevice(),
117 											 (VkDeviceSize)size,
118 											 usage))
119 	, m_allocation	(allocateAndBindMemory	(context.getDeviceInterface(),
120 											 context.getDevice(),
121 											 context.getDefaultAllocator(),
122 											 *m_buffer))
123 {
124 }
125 
flush(void)126 void Buffer::flush (void)
127 {
128 	flushMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
129 }
130 
invalidate(void)131 void Buffer::invalidate (void)
132 {
133 	invalidateMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
134 }
135 
createUniformIndexBuffer(Context & context,int numIndices,const int * indices)136 MovePtr<Buffer> createUniformIndexBuffer (Context& context, int numIndices, const int* indices)
137 {
138 	MovePtr<Buffer>		buffer	(new Buffer(context, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, sizeof(int)*numIndices));
139 	int* const			bufPtr	= (int*)buffer->getHostPtr();
140 
141 	for (int ndx = 0; ndx < numIndices; ++ndx)
142 		bufPtr[ndx] = indices[ndx];
143 
144 	buffer->flush();
145 
146 	return buffer;
147 }
148 
149 // Tests
150 
151 enum IndexExprType
152 {
153 	INDEX_EXPR_TYPE_CONST_LITERAL	= 0,
154 	INDEX_EXPR_TYPE_CONST_EXPRESSION,
155 	INDEX_EXPR_TYPE_UNIFORM,
156 	INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,
157 
158 	INDEX_EXPR_TYPE_LAST
159 };
160 
161 enum TextureType
162 {
163 	TEXTURE_TYPE_1D = 0,
164 	TEXTURE_TYPE_2D,
165 	TEXTURE_TYPE_CUBE,
166 	TEXTURE_TYPE_2D_ARRAY,
167 	TEXTURE_TYPE_3D,
168 
169 	TEXTURE_TYPE_LAST
170 };
171 
172 class OpaqueTypeIndexingCase : public TestCase
173 {
174 public:
175 										OpaqueTypeIndexingCase		(tcu::TestContext&			testCtx,
176 																	 const char*				name,
177 																	 const char*				description,
178 																	 const glu::ShaderType		shaderType,
179 																	 const IndexExprType		indexExprType);
180 	virtual								~OpaqueTypeIndexingCase		(void);
181 
initPrograms(vk::SourceCollections & programCollection) const182 	virtual void						initPrograms				(vk::SourceCollections& programCollection) const
183 										{
184 											generateSources(m_shaderType, m_shaderSpec, programCollection);
185 										}
186 
187 protected:
188 	const char*							m_name;
189 	const glu::ShaderType				m_shaderType;
190 	const IndexExprType					m_indexExprType;
191 	ShaderSpec							m_shaderSpec;
192 };
193 
OpaqueTypeIndexingCase(tcu::TestContext & testCtx,const char * name,const char * description,const glu::ShaderType shaderType,const IndexExprType indexExprType)194 OpaqueTypeIndexingCase::OpaqueTypeIndexingCase (tcu::TestContext&			testCtx,
195 												const char*					name,
196 												const char*					description,
197 												const glu::ShaderType		shaderType,
198 												const IndexExprType			indexExprType)
199 	: TestCase			(testCtx, name, description)
200 	, m_name			(name)
201 	, m_shaderType		(shaderType)
202 	, m_indexExprType	(indexExprType)
203 {
204 }
205 
~OpaqueTypeIndexingCase(void)206 OpaqueTypeIndexingCase::~OpaqueTypeIndexingCase (void)
207 {
208 }
209 
210 class OpaqueTypeIndexingTestInstance : public TestInstance
211 {
212 public:
213 										OpaqueTypeIndexingTestInstance		(Context&					context,
214 																			 const glu::ShaderType		shaderType,
215 																			 const ShaderSpec&			shaderSpec,
216 																			 const char*				name,
217 																			 const IndexExprType		indexExprType);
218 	virtual								~OpaqueTypeIndexingTestInstance		(void);
219 
220 	virtual tcu::TestStatus				iterate								(void) = 0;
221 
222 protected:
223 	void								checkSupported						(const VkDescriptorType descriptorType);
224 
225 protected:
226 	tcu::TestContext&					m_testCtx;
227 	const glu::ShaderType				m_shaderType;
228 	const ShaderSpec&					m_shaderSpec;
229 	const char*							m_name;
230 	const IndexExprType					m_indexExprType;
231 };
232 
OpaqueTypeIndexingTestInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,const IndexExprType indexExprType)233 OpaqueTypeIndexingTestInstance::OpaqueTypeIndexingTestInstance (Context&					context,
234 																const glu::ShaderType		shaderType,
235 																const ShaderSpec&			shaderSpec,
236 																const char*					name,
237 																const IndexExprType			indexExprType)
238 	: TestInstance		(context)
239 	, m_testCtx			(context.getTestContext())
240 	, m_shaderType		(shaderType)
241 	, m_shaderSpec		(shaderSpec)
242 	, m_name			(name)
243 	, m_indexExprType	(indexExprType)
244 {
245 }
246 
~OpaqueTypeIndexingTestInstance(void)247 OpaqueTypeIndexingTestInstance::~OpaqueTypeIndexingTestInstance (void)
248 {
249 }
250 
checkSupported(const VkDescriptorType descriptorType)251 void OpaqueTypeIndexingTestInstance::checkSupported (const VkDescriptorType descriptorType)
252 {
253 	const VkPhysicalDeviceFeatures& deviceFeatures = m_context.getDeviceFeatures();
254 
255 	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
256 	{
257 		switch (descriptorType)
258 		{
259 			case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
260 				if (!deviceFeatures.shaderSampledImageArrayDynamicIndexing)
261 					TCU_THROW(NotSupportedError, "Dynamic indexing of sampler arrays is not supported");
262 				break;
263 
264 			case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
265 				if (!deviceFeatures.shaderUniformBufferArrayDynamicIndexing)
266 					TCU_THROW(NotSupportedError, "Dynamic indexing of uniform buffer arrays is not supported");
267 				break;
268 
269 			case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
270 				if (!deviceFeatures.shaderStorageBufferArrayDynamicIndexing)
271 					TCU_THROW(NotSupportedError, "Dynamic indexing of storage buffer arrays is not supported");
272 				break;
273 
274 			default:
275 				break;
276 		}
277 	}
278 }
279 
declareUniformIndexVars(std::ostream & str,deUint32 bindingLocation,const char * varPrefix,int numVars)280 static void declareUniformIndexVars (std::ostream& str, deUint32 bindingLocation, const char* varPrefix, int numVars)
281 {
282 	str << "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = " << bindingLocation << ", std140) uniform Indices\n{\n";
283 
284 	for (int varNdx = 0; varNdx < numVars; varNdx++)
285 		str << "\thighp int " << varPrefix << varNdx << ";\n";
286 
287 	str << "};\n";
288 }
289 
getTextureType(glu::DataType samplerType)290 static TextureType getTextureType (glu::DataType samplerType)
291 {
292 	switch (samplerType)
293 	{
294 		case glu::TYPE_SAMPLER_1D:
295 		case glu::TYPE_INT_SAMPLER_1D:
296 		case glu::TYPE_UINT_SAMPLER_1D:
297 		case glu::TYPE_SAMPLER_1D_SHADOW:
298 			return TEXTURE_TYPE_1D;
299 
300 		case glu::TYPE_SAMPLER_2D:
301 		case glu::TYPE_INT_SAMPLER_2D:
302 		case glu::TYPE_UINT_SAMPLER_2D:
303 		case glu::TYPE_SAMPLER_2D_SHADOW:
304 			return TEXTURE_TYPE_2D;
305 
306 		case glu::TYPE_SAMPLER_CUBE:
307 		case glu::TYPE_INT_SAMPLER_CUBE:
308 		case glu::TYPE_UINT_SAMPLER_CUBE:
309 		case glu::TYPE_SAMPLER_CUBE_SHADOW:
310 			return TEXTURE_TYPE_CUBE;
311 
312 		case glu::TYPE_SAMPLER_2D_ARRAY:
313 		case glu::TYPE_INT_SAMPLER_2D_ARRAY:
314 		case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
315 		case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
316 			return TEXTURE_TYPE_2D_ARRAY;
317 
318 		case glu::TYPE_SAMPLER_3D:
319 		case glu::TYPE_INT_SAMPLER_3D:
320 		case glu::TYPE_UINT_SAMPLER_3D:
321 			return TEXTURE_TYPE_3D;
322 
323 		default:
324 			throw tcu::InternalError("Invalid sampler type");
325 	}
326 }
327 
isShadowSampler(glu::DataType samplerType)328 static bool isShadowSampler (glu::DataType samplerType)
329 {
330 	return samplerType == glu::TYPE_SAMPLER_1D_SHADOW		||
331 		   samplerType == glu::TYPE_SAMPLER_2D_SHADOW		||
332 		   samplerType == glu::TYPE_SAMPLER_2D_ARRAY_SHADOW	||
333 		   samplerType == glu::TYPE_SAMPLER_CUBE_SHADOW;
334 }
335 
getSamplerOutputType(glu::DataType samplerType)336 static glu::DataType getSamplerOutputType (glu::DataType samplerType)
337 {
338 	switch (samplerType)
339 	{
340 		case glu::TYPE_SAMPLER_1D:
341 		case glu::TYPE_SAMPLER_2D:
342 		case glu::TYPE_SAMPLER_CUBE:
343 		case glu::TYPE_SAMPLER_2D_ARRAY:
344 		case glu::TYPE_SAMPLER_3D:
345 			return glu::TYPE_FLOAT_VEC4;
346 
347 		case glu::TYPE_SAMPLER_1D_SHADOW:
348 		case glu::TYPE_SAMPLER_2D_SHADOW:
349 		case glu::TYPE_SAMPLER_CUBE_SHADOW:
350 		case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
351 			return glu::TYPE_FLOAT;
352 
353 		case glu::TYPE_INT_SAMPLER_1D:
354 		case glu::TYPE_INT_SAMPLER_2D:
355 		case glu::TYPE_INT_SAMPLER_CUBE:
356 		case glu::TYPE_INT_SAMPLER_2D_ARRAY:
357 		case glu::TYPE_INT_SAMPLER_3D:
358 			return glu::TYPE_INT_VEC4;
359 
360 		case glu::TYPE_UINT_SAMPLER_1D:
361 		case glu::TYPE_UINT_SAMPLER_2D:
362 		case glu::TYPE_UINT_SAMPLER_CUBE:
363 		case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
364 		case glu::TYPE_UINT_SAMPLER_3D:
365 			return glu::TYPE_UINT_VEC4;
366 
367 		default:
368 			throw tcu::InternalError("Invalid sampler type");
369 	}
370 }
371 
getSamplerTextureFormat(glu::DataType samplerType)372 static tcu::TextureFormat getSamplerTextureFormat (glu::DataType samplerType)
373 {
374 	const glu::DataType		outType			= getSamplerOutputType(samplerType);
375 	const glu::DataType		outScalarType	= glu::getDataTypeScalarType(outType);
376 
377 	switch (outScalarType)
378 	{
379 		case glu::TYPE_FLOAT:
380 			if (isShadowSampler(samplerType))
381 				return tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
382 			else
383 				return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
384 
385 		case glu::TYPE_INT:		return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT8);
386 		case glu::TYPE_UINT:	return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8);
387 
388 		default:
389 			throw tcu::InternalError("Invalid sampler type");
390 	}
391 }
392 
getSamplerCoordType(glu::DataType samplerType)393 static glu::DataType getSamplerCoordType (glu::DataType samplerType)
394 {
395 	const TextureType	texType		= getTextureType(samplerType);
396 	int					numCoords	= 0;
397 
398 	switch (texType)
399 	{
400 		case TEXTURE_TYPE_1D:		numCoords = 1;	break;
401 		case TEXTURE_TYPE_2D:		numCoords = 2;	break;
402 		case TEXTURE_TYPE_2D_ARRAY:	numCoords = 3;	break;
403 		case TEXTURE_TYPE_CUBE:		numCoords = 3;	break;
404 		case TEXTURE_TYPE_3D:		numCoords = 3;	break;
405 		default:
406 			DE_ASSERT(false);
407 	}
408 
409 	if (isShadowSampler(samplerType))
410 		numCoords += 1;
411 
412 	DE_ASSERT(de::inRange(numCoords, 1, 4));
413 
414 	return numCoords == 1 ? glu::TYPE_FLOAT : glu::getDataTypeFloatVec(numCoords);
415 }
416 
fillTextureData(const tcu::PixelBufferAccess & access,de::Random & rnd)417 static void fillTextureData (const tcu::PixelBufferAccess& access, de::Random& rnd)
418 {
419 	DE_ASSERT(access.getHeight() == 1 && access.getDepth() == 1);
420 
421 	if (access.getFormat().order == tcu::TextureFormat::D)
422 	{
423 		// \note Texture uses odd values, lookup even values to avoid precision issues.
424 		const float values[] = { 0.1f, 0.3f, 0.5f, 0.7f, 0.9f };
425 
426 		for (int ndx = 0; ndx < access.getWidth(); ndx++)
427 			access.setPixDepth(rnd.choose<float>(DE_ARRAY_BEGIN(values), DE_ARRAY_END(values)), ndx, 0);
428 	}
429 	else
430 	{
431 		TCU_CHECK_INTERNAL(access.getFormat().order == tcu::TextureFormat::RGBA && access.getFormat().getPixelSize() == 4);
432 
433 		for (int ndx = 0; ndx < access.getWidth(); ndx++)
434 			*((deUint32*)access.getDataPtr() + ndx) = rnd.getUint32();
435 	}
436 }
437 
getVkImageType(TextureType texType)438 static vk::VkImageType getVkImageType (TextureType texType)
439 {
440 	switch (texType)
441 	{
442 		case TEXTURE_TYPE_1D:			return vk::VK_IMAGE_TYPE_1D;
443 		case TEXTURE_TYPE_2D:
444 		case TEXTURE_TYPE_2D_ARRAY:		return vk::VK_IMAGE_TYPE_2D;
445 		case TEXTURE_TYPE_CUBE:			return vk::VK_IMAGE_TYPE_2D;
446 		case TEXTURE_TYPE_3D:			return vk::VK_IMAGE_TYPE_3D;
447 		default:
448 			DE_FATAL("Impossible");
449 			return (vk::VkImageType)0;
450 	}
451 }
452 
getVkImageViewType(TextureType texType)453 static vk::VkImageViewType getVkImageViewType (TextureType texType)
454 {
455 	switch (texType)
456 	{
457 		case TEXTURE_TYPE_1D:			return vk::VK_IMAGE_VIEW_TYPE_1D;
458 		case TEXTURE_TYPE_2D:			return vk::VK_IMAGE_VIEW_TYPE_2D;
459 		case TEXTURE_TYPE_2D_ARRAY:		return vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY;
460 		case TEXTURE_TYPE_CUBE:			return vk::VK_IMAGE_VIEW_TYPE_CUBE;
461 		case TEXTURE_TYPE_3D:			return vk::VK_IMAGE_VIEW_TYPE_3D;
462 		default:
463 			DE_FATAL("Impossible");
464 			return (vk::VkImageViewType)0;
465 	}
466 }
467 
468 //! Test image with 1-pixel dimensions and no mipmaps
469 class TestImage
470 {
471 public:
472 								TestImage		(Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue);
473 
getImageView(void) const474 	VkImageView					getImageView	(void) const { return *m_imageView; }
475 
476 private:
477 	const Unique<VkImage>		m_image;
478 	const UniquePtr<Allocation>	m_allocation;
479 	const Unique<VkImageView>	m_imageView;
480 };
481 
createTestImage(const DeviceInterface & vkd,VkDevice device,TextureType texType,tcu::TextureFormat format)482 Move<VkImage> createTestImage (const DeviceInterface& vkd, VkDevice device, TextureType texType, tcu::TextureFormat format)
483 {
484 	const VkImageCreateInfo		createInfo		=
485 	{
486 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
487 		DE_NULL,
488 		(texType == TEXTURE_TYPE_CUBE ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : (VkImageCreateFlags)0),
489 		getVkImageType(texType),
490 		mapTextureFormat(format),
491 		makeExtent3D(1, 1, 1),
492 		1u,
493 		(texType == TEXTURE_TYPE_CUBE) ? 6u : 1u,
494 		VK_SAMPLE_COUNT_1_BIT,
495 		VK_IMAGE_TILING_OPTIMAL,
496 		VK_IMAGE_USAGE_SAMPLED_BIT|VK_IMAGE_USAGE_TRANSFER_DST_BIT,
497 		VK_SHARING_MODE_EXCLUSIVE,
498 		0u,
499 		DE_NULL,
500 		VK_IMAGE_LAYOUT_UNDEFINED
501 	};
502 
503 	return createImage(vkd, device, &createInfo);
504 }
505 
allocateAndBindMemory(const DeviceInterface & vkd,VkDevice device,Allocator & allocator,VkImage image)506 de::MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkImage image)
507 {
508 	de::MovePtr<Allocation>		alloc	= allocator.allocate(getImageMemoryRequirements(vkd, device, image), MemoryRequirement::Any);
509 
510 	VK_CHECK(vkd.bindImageMemory(device, image, alloc->getMemory(), alloc->getOffset()));
511 
512 	return alloc;
513 }
514 
createTestImageView(const DeviceInterface & vkd,VkDevice device,VkImage image,TextureType texType,tcu::TextureFormat format)515 Move<VkImageView> createTestImageView (const DeviceInterface& vkd, VkDevice device, VkImage image, TextureType texType, tcu::TextureFormat format)
516 {
517 	const bool					isDepthImage	= format.order == tcu::TextureFormat::D;
518 	const VkImageViewCreateInfo	createInfo		=
519 	{
520 		VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
521 		DE_NULL,
522 		(VkImageViewCreateFlags)0,
523 		image,
524 		getVkImageViewType(texType),
525 		mapTextureFormat(format),
526 		{
527 			VK_COMPONENT_SWIZZLE_IDENTITY,
528 			VK_COMPONENT_SWIZZLE_IDENTITY,
529 			VK_COMPONENT_SWIZZLE_IDENTITY,
530 			VK_COMPONENT_SWIZZLE_IDENTITY,
531 		},
532 		{
533 			(VkImageAspectFlags)(isDepthImage ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT),
534 			0u,
535 			1u,
536 			0u,
537 			(texType == TEXTURE_TYPE_CUBE ? 6u : 1u)
538 		}
539 	};
540 
541 	return createImageView(vkd, device, &createInfo);
542 }
543 
TestImage(Context & context,TextureType texType,tcu::TextureFormat format,const void * colorValue)544 TestImage::TestImage (Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue)
545 	: m_image		(createTestImage		(context.getDeviceInterface(), context.getDevice(), texType, format))
546 	, m_allocation	(allocateAndBindMemory	(context.getDeviceInterface(), context.getDevice(), context.getDefaultAllocator(), *m_image))
547 	, m_imageView	(createTestImageView	(context.getDeviceInterface(), context.getDevice(), *m_image, texType, format))
548 {
549 	const DeviceInterface&		vkd					= context.getDeviceInterface();
550 	const VkDevice				device				= context.getDevice();
551 
552 	const size_t				pixelSize			= (size_t)format.getPixelSize();
553 	const deUint32				numLayers			= (texType == TEXTURE_TYPE_CUBE) ? 6u : 1u;
554 	const size_t				numReplicas			= (size_t)numLayers;
555 	const size_t				stagingBufferSize	= pixelSize*numReplicas;
556 
557 	const VkBufferCreateInfo	stagingBufferInfo	=
558 	{
559 		VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
560 		DE_NULL,
561 		(VkBufferCreateFlags)0u,
562 		(VkDeviceSize)stagingBufferSize,
563 		(VkBufferCreateFlags)VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
564 		VK_SHARING_MODE_EXCLUSIVE,
565 		0u,
566 		DE_NULL,
567 	};
568 	const Unique<VkBuffer>		stagingBuffer		(createBuffer(vkd, device, &stagingBufferInfo));
569 	const UniquePtr<Allocation>	alloc				(context.getDefaultAllocator().allocate(getBufferMemoryRequirements(vkd, device, *stagingBuffer), MemoryRequirement::HostVisible));
570 
571 	VK_CHECK(vkd.bindBufferMemory(device, *stagingBuffer, alloc->getMemory(), alloc->getOffset()));
572 
573 	for (size_t ndx = 0; ndx < numReplicas; ++ndx)
574 		deMemcpy((deUint8*)alloc->getHostPtr() + ndx*pixelSize, colorValue, pixelSize);
575 
576 	flushMappedMemoryRange(vkd, device, alloc->getMemory(), alloc->getOffset(), VK_WHOLE_SIZE);
577 
578 	{
579 		const VkImageAspectFlags		imageAspect		= (VkImageAspectFlags)(format.order == tcu::TextureFormat::D ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT);
580 		const VkBufferImageCopy			copyInfo		=
581 		{
582 			0u,
583 			1u,
584 			1u,
585 			{
586 				imageAspect,
587 				0u,
588 				0u,
589 				numLayers
590 			},
591 			{ 0u, 0u, 0u },
592 			{ 1u, 1u, 1u }
593 		};
594 
595 		copyBufferToImage(vkd, device, context.getUniversalQueue(), context.getUniversalQueueFamilyIndex(), *stagingBuffer, stagingBufferSize, vector<VkBufferImageCopy>(1, copyInfo), DE_NULL, imageAspect, 1u, numLayers, *m_image);
596 	}
597 }
598 
599 typedef SharedPtr<TestImage> TestImageSp;
600 
601 // SamplerIndexingCaseInstance
602 
603 class SamplerIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
604 {
605 public:
606 	enum
607 	{
608 		NUM_INVOCATIONS		= 64,
609 		NUM_SAMPLERS		= 8,
610 		NUM_LOOKUPS			= 4
611 	};
612 
613 								SamplerIndexingCaseInstance		(Context&					context,
614 																 const glu::ShaderType		shaderType,
615 																 const ShaderSpec&			shaderSpec,
616 																 const char*				name,
617 																 glu::DataType				samplerType,
618 																 const IndexExprType		indexExprType,
619 																 const std::vector<int>&	lookupIndices);
620 	virtual						~SamplerIndexingCaseInstance	(void);
621 
622 	virtual tcu::TestStatus		iterate							(void);
623 
624 protected:
625 	const glu::DataType			m_samplerType;
626 	const std::vector<int>		m_lookupIndices;
627 };
628 
SamplerIndexingCaseInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,glu::DataType samplerType,const IndexExprType indexExprType,const std::vector<int> & lookupIndices)629 SamplerIndexingCaseInstance::SamplerIndexingCaseInstance (Context&						context,
630 														  const glu::ShaderType			shaderType,
631 														  const ShaderSpec&				shaderSpec,
632 														  const char*					name,
633 														  glu::DataType					samplerType,
634 														  const IndexExprType			indexExprType,
635 														  const std::vector<int>&		lookupIndices)
636 	: OpaqueTypeIndexingTestInstance	(context, shaderType, shaderSpec, name, indexExprType)
637 	, m_samplerType						(samplerType)
638 	, m_lookupIndices					(lookupIndices)
639 {
640 }
641 
~SamplerIndexingCaseInstance(void)642 SamplerIndexingCaseInstance::~SamplerIndexingCaseInstance (void)
643 {
644 }
645 
isIntegerFormat(const tcu::TextureFormat & format)646 bool isIntegerFormat (const tcu::TextureFormat& format)
647 {
648 	const tcu::TextureChannelClass	chnClass	= tcu::getTextureChannelClass(format.type);
649 
650 	return chnClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ||
651 		   chnClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER;
652 }
653 
iterate(void)654 tcu::TestStatus SamplerIndexingCaseInstance::iterate (void)
655 {
656 	const int						numInvocations		= SamplerIndexingCaseInstance::NUM_INVOCATIONS;
657 	const int						numSamplers			= SamplerIndexingCaseInstance::NUM_SAMPLERS;
658 	const int						numLookups			= SamplerIndexingCaseInstance::NUM_LOOKUPS;
659 	const glu::DataType				coordType			= getSamplerCoordType(m_samplerType);
660 	const glu::DataType				outputType			= getSamplerOutputType(m_samplerType);
661 	const tcu::TextureFormat		texFormat			= getSamplerTextureFormat(m_samplerType);
662 	const int						outLookupStride		= numInvocations*getDataTypeScalarSize(outputType);
663 	vector<float>					coords;
664 	vector<deUint32>				outData;
665 	vector<deUint8>					texData				(numSamplers * texFormat.getPixelSize());
666 	const tcu::PixelBufferAccess	refTexAccess		(texFormat, numSamplers, 1, 1, &texData[0]);
667 	de::Random						rnd					(deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
668 	const TextureType				texType				= getTextureType(m_samplerType);
669 	const tcu::Sampler::FilterMode	filterMode			= (isShadowSampler(m_samplerType) || isIntegerFormat(texFormat)) ? tcu::Sampler::NEAREST : tcu::Sampler::LINEAR;
670 
671 	// The shadow sampler with unnormalized coordinates is only used with the reference texture. Actual samplers in shaders use normalized coords.
672 	const tcu::Sampler				refSampler			= isShadowSampler(m_samplerType)
673 																? tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
674 																				filterMode, filterMode, 0.0f, false /* non-normalized */,
675 																				tcu::Sampler::COMPAREMODE_LESS)
676 																: tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
677 																				filterMode, filterMode);
678 
679 	const DeviceInterface&			vkd					= m_context.getDeviceInterface();
680 	const VkDevice					device				= m_context.getDevice();
681 	vector<TestImageSp>				images;
682 	vector<VkSamplerSp>				samplers;
683 	MovePtr<Buffer>					indexBuffer;
684 	Move<VkDescriptorSetLayout>		extraResourcesLayout;
685 	Move<VkDescriptorPool>			extraResourcesSetPool;
686 	Move<VkDescriptorSet>			extraResourcesSet;
687 
688 	checkSupported(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
689 
690 	coords.resize(numInvocations * getDataTypeScalarSize(coordType));
691 
692 	if (texType == TEXTURE_TYPE_CUBE)
693 	{
694 		if (isShadowSampler(m_samplerType))
695 		{
696 			for (size_t i = 0; i < coords.size() / 4; i++)
697 			{
698 				coords[4 * i] = 1.0f;
699 				coords[4 * i + 1] = coords[4 * i + 2] = coords[4 * i + 3] = 0.0f;
700 			}
701 		}
702 		else
703 		{
704 			for (size_t i = 0; i < coords.size() / 3; i++)
705 			{
706 				coords[3 * i] = 1.0f;
707 				coords[3 * i + 1] = coords[3 * i + 2] = 0.0f;
708 			}
709 		}
710 	}
711 
712 	if (isShadowSampler(m_samplerType))
713 	{
714 		// Use different comparison value per invocation.
715 		// \note Texture uses odd values, comparison even values.
716 		const int	numCoordComps	= getDataTypeScalarSize(coordType);
717 		const float	cmpValues[]		= { 0.0f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f };
718 
719 		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
720 			coords[invocationNdx*numCoordComps + (numCoordComps-1)] = rnd.choose<float>(DE_ARRAY_BEGIN(cmpValues), DE_ARRAY_END(cmpValues));
721 	}
722 
723 	fillTextureData(refTexAccess, rnd);
724 
725 	outData.resize(numLookups*outLookupStride);
726 
727 	for (int ndx = 0; ndx < numSamplers; ++ndx)
728 	{
729 		images.push_back(TestImageSp(new TestImage(m_context, texType, texFormat, &texData[ndx * texFormat.getPixelSize()])));
730 
731 		{
732 			tcu::Sampler	samplerCopy	(refSampler);
733 			samplerCopy.normalizedCoords = true;
734 
735 			{
736 				const VkSamplerCreateInfo	samplerParams	= mapSampler(samplerCopy, texFormat);
737 				samplers.push_back(VkSamplerSp(new Unique<VkSampler>(createSampler(vkd, device, &samplerParams))));
738 			}
739 		}
740 	}
741 
742 	if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
743 		indexBuffer = createUniformIndexBuffer(m_context, numLookups, &m_lookupIndices[0]);
744 
745 	{
746 		const VkDescriptorSetLayoutBinding		bindings[]	=
747 		{
748 			{ 0u,						VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,	(deUint32)numSamplers,		VK_SHADER_STAGE_ALL,	DE_NULL		},
749 			{ (deUint32)numSamplers,	VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,			1u,							VK_SHADER_STAGE_ALL,	DE_NULL		}
750 		};
751 		const VkDescriptorSetLayoutCreateInfo	layoutInfo	=
752 		{
753 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
754 			DE_NULL,
755 			(VkDescriptorSetLayoutCreateFlags)0u,
756 			DE_LENGTH_OF_ARRAY(bindings),
757 			bindings,
758 		};
759 
760 		extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
761 	}
762 
763 	{
764 		const VkDescriptorPoolSize			poolSizes[]	=
765 		{
766 			{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,	(deUint32)numSamplers	},
767 			{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,			1u,						}
768 		};
769 		const VkDescriptorPoolCreateInfo	poolInfo	=
770 		{
771 			VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
772 			DE_NULL,
773 			(VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
774 			1u,		// maxSets
775 			DE_LENGTH_OF_ARRAY(poolSizes),
776 			poolSizes,
777 		};
778 
779 		extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
780 	}
781 
782 	{
783 		const VkDescriptorSetAllocateInfo	allocInfo	=
784 		{
785 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
786 			DE_NULL,
787 			*extraResourcesSetPool,
788 			1u,
789 			&extraResourcesLayout.get(),
790 		};
791 
792 		extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
793 	}
794 
795 	{
796 		vector<VkDescriptorImageInfo>	imageInfos			(numSamplers);
797 		const VkWriteDescriptorSet		descriptorWrite		=
798 		{
799 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
800 			DE_NULL,
801 			*extraResourcesSet,
802 			0u,		// dstBinding
803 			0u,		// dstArrayElement
804 			(deUint32)numSamplers,
805 			VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
806 			&imageInfos[0],
807 			(const VkDescriptorBufferInfo*)DE_NULL,
808 			(const VkBufferView*)DE_NULL,
809 		};
810 
811 		for (int ndx = 0; ndx < numSamplers; ++ndx)
812 		{
813 			imageInfos[ndx].sampler		= **samplers[ndx];
814 			imageInfos[ndx].imageView	= images[ndx]->getImageView();
815 			imageInfos[ndx].imageLayout	= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
816 		}
817 
818 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
819 	}
820 
821 	if (indexBuffer)
822 	{
823 		const VkDescriptorBufferInfo	bufferInfo	=
824 		{
825 			indexBuffer->getBuffer(),
826 			0u,
827 			VK_WHOLE_SIZE
828 		};
829 		const VkWriteDescriptorSet		descriptorWrite		=
830 		{
831 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
832 			DE_NULL,
833 			*extraResourcesSet,
834 			(deUint32)numSamplers,	// dstBinding
835 			0u,						// dstArrayElement
836 			1u,
837 			VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
838 			(const VkDescriptorImageInfo*)DE_NULL,
839 			&bufferInfo,
840 			(const VkBufferView*)DE_NULL,
841 		};
842 
843 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
844 	}
845 
846 	{
847 		std::vector<void*>			inputs;
848 		std::vector<void*>			outputs;
849 		std::vector<int>			expandedIndices;
850 		UniquePtr<ShaderExecutor>	executor		(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
851 
852 		inputs.push_back(&coords[0]);
853 
854 		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
855 		{
856 			expandedIndices.resize(numInvocations * m_lookupIndices.size());
857 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
858 			{
859 				for (int invNdx = 0; invNdx < numInvocations; invNdx++)
860 					expandedIndices[lookupNdx*numInvocations + invNdx] = m_lookupIndices[lookupNdx];
861 			}
862 
863 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
864 				inputs.push_back(&expandedIndices[lookupNdx*numInvocations]);
865 		}
866 
867 		for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
868 			outputs.push_back(&outData[outLookupStride*lookupNdx]);
869 
870 		executor->execute(numInvocations, &inputs[0], &outputs[0], *extraResourcesSet);
871 	}
872 
873 	{
874 		tcu::TestLog&		log				= m_context.getTestContext().getLog();
875 		tcu::TestStatus		testResult		= tcu::TestStatus::pass("Pass");
876 
877 		if (isShadowSampler(m_samplerType))
878 		{
879 			const int			numCoordComps	= getDataTypeScalarSize(coordType);
880 
881 			TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 1);
882 
883 			// Each invocation may have different results.
884 			for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
885 			{
886 				const float	coord	= coords[invocationNdx*numCoordComps + (numCoordComps-1)];
887 
888 				for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
889 				{
890 					const int		texNdx		= m_lookupIndices[lookupNdx];
891 					const float		result		= *((const float*)(const deUint8*)&outData[lookupNdx*outLookupStride + invocationNdx]);
892 					const float		reference	= refTexAccess.sample2DCompare(refSampler, tcu::Sampler::NEAREST, coord, (float)texNdx, 0.0f, tcu::IVec3(0));
893 
894 					if (de::abs(result-reference) > 0.005f)
895 					{
896 						log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx << ", lookup " << lookupNdx << ": expected "
897 							<< reference << ", got " << result
898 							<< tcu::TestLog::EndMessage;
899 
900 						if (testResult.getCode() == QP_TEST_RESULT_PASS)
901 							testResult = tcu::TestStatus::fail("Got invalid lookup result");
902 					}
903 				}
904 			}
905 		}
906 		else
907 		{
908 			TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 4);
909 
910 			// Validate results from first invocation
911 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
912 			{
913 				const int		texNdx	= m_lookupIndices[lookupNdx];
914 				const deUint8*	resPtr	= (const deUint8*)&outData[lookupNdx*outLookupStride];
915 				bool			isOk;
916 
917 				if (outputType == glu::TYPE_FLOAT_VEC4)
918 				{
919 					const float			threshold		= 1.0f / 256.0f;
920 					const tcu::Vec4		reference		= refTexAccess.getPixel(texNdx, 0);
921 					const float*		floatPtr		= (const float*)resPtr;
922 					const tcu::Vec4		result			(floatPtr[0], floatPtr[1], floatPtr[2], floatPtr[3]);
923 
924 					isOk = boolAll(lessThanEqual(abs(reference-result), tcu::Vec4(threshold)));
925 
926 					if (!isOk)
927 					{
928 						log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
929 							<< reference << ", got " << result
930 							<< tcu::TestLog::EndMessage;
931 					}
932 				}
933 				else
934 				{
935 					const tcu::UVec4	reference		= refTexAccess.getPixelUint(texNdx, 0);
936 					const deUint32*		uintPtr			= (const deUint32*)resPtr;
937 					const tcu::UVec4	result			(uintPtr[0], uintPtr[1], uintPtr[2], uintPtr[3]);
938 
939 					isOk = boolAll(equal(reference, result));
940 
941 					if (!isOk)
942 					{
943 						log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
944 							<< reference << ", got " << result
945 							<< tcu::TestLog::EndMessage;
946 					}
947 				}
948 
949 				if (!isOk && testResult.getCode() == QP_TEST_RESULT_PASS)
950 					testResult = tcu::TestStatus::fail("Got invalid lookup result");
951 			}
952 
953 			// Check results of other invocations against first one
954 			for (int invocationNdx = 1; invocationNdx < numInvocations; invocationNdx++)
955 			{
956 				for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
957 				{
958 					const deUint32*		refPtr		= &outData[lookupNdx*outLookupStride];
959 					const deUint32*		resPtr		= refPtr + invocationNdx*4;
960 					bool				isOk		= true;
961 
962 					for (int ndx = 0; ndx < 4; ndx++)
963 						isOk = isOk && (refPtr[ndx] == resPtr[ndx]);
964 
965 					if (!isOk)
966 					{
967 						log << tcu::TestLog::Message << "ERROR: invocation " << invocationNdx << " result "
968 							<< tcu::formatArray(tcu::Format::HexIterator<deUint32>(resPtr), tcu::Format::HexIterator<deUint32>(resPtr+4))
969 							<< " for lookup " << lookupNdx << " doesn't match result from first invocation "
970 							<< tcu::formatArray(tcu::Format::HexIterator<deUint32>(refPtr), tcu::Format::HexIterator<deUint32>(refPtr+4))
971 							<< tcu::TestLog::EndMessage;
972 
973 						if (testResult.getCode() == QP_TEST_RESULT_PASS)
974 							testResult = tcu::TestStatus::fail("Inconsistent lookup results");
975 					}
976 				}
977 			}
978 		}
979 
980 		return testResult;
981 	}
982 }
983 
984 class SamplerIndexingCase : public OpaqueTypeIndexingCase
985 {
986 public:
987 								SamplerIndexingCase			(tcu::TestContext&			testCtx,
988 															 const char*				name,
989 															 const char*				description,
990 															 const glu::ShaderType		shaderType,
991 															 glu::DataType				samplerType,
992 															 IndexExprType				indexExprType);
993 	virtual						~SamplerIndexingCase		(void);
994 
995 	virtual TestInstance*		createInstance				(Context& ctx) const;
996 
997 private:
998 								SamplerIndexingCase			(const SamplerIndexingCase&);
999 	SamplerIndexingCase&		operator=					(const SamplerIndexingCase&);
1000 
1001 	void						createShaderSpec			(void);
1002 
1003 	const glu::DataType			m_samplerType;
1004 	const int					m_numSamplers;
1005 	const int					m_numLookups;
1006 	std::vector<int>			m_lookupIndices;
1007 };
1008 
SamplerIndexingCase(tcu::TestContext & testCtx,const char * name,const char * description,const glu::ShaderType shaderType,glu::DataType samplerType,IndexExprType indexExprType)1009 SamplerIndexingCase::SamplerIndexingCase (tcu::TestContext&			testCtx,
1010 										  const char*				name,
1011 										  const char*				description,
1012 										  const glu::ShaderType		shaderType,
1013 										  glu::DataType				samplerType,
1014 										  IndexExprType				indexExprType)
1015 	: OpaqueTypeIndexingCase	(testCtx, name, description, shaderType, indexExprType)
1016 	, m_samplerType				(samplerType)
1017 	, m_numSamplers				(SamplerIndexingCaseInstance::NUM_SAMPLERS)
1018 	, m_numLookups				(SamplerIndexingCaseInstance::NUM_LOOKUPS)
1019 	, m_lookupIndices			(m_numLookups)
1020 {
1021 	createShaderSpec();
1022 	init();
1023 }
1024 
~SamplerIndexingCase(void)1025 SamplerIndexingCase::~SamplerIndexingCase (void)
1026 {
1027 }
1028 
createInstance(Context & ctx) const1029 TestInstance* SamplerIndexingCase::createInstance (Context& ctx) const
1030 {
1031 	return new SamplerIndexingCaseInstance(ctx,
1032 										   m_shaderType,
1033 										   m_shaderSpec,
1034 										   m_name,
1035 										   m_samplerType,
1036 										   m_indexExprType,
1037 										   m_lookupIndices);
1038 }
1039 
createShaderSpec(void)1040 void SamplerIndexingCase::createShaderSpec (void)
1041 {
1042 	de::Random			rnd				(deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1043 	const char*			samplersName	= "texSampler";
1044 	const char*			coordsName		= "coords";
1045 	const char*			indicesPrefix	= "index";
1046 	const char*			resultPrefix	= "result";
1047 	const glu::DataType	coordType		= getSamplerCoordType(m_samplerType);
1048 	const glu::DataType	outType			= getSamplerOutputType(m_samplerType);
1049 	std::ostringstream	global, code;
1050 
1051 	for (int ndx = 0; ndx < m_numLookups; ndx++)
1052 		m_lookupIndices[ndx] = rnd.getInt(0, m_numSamplers-1);
1053 
1054 	m_shaderSpec.inputs.push_back(Symbol(coordsName, glu::VarType(coordType, glu::PRECISION_HIGHP)));
1055 
1056 	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1057 		global << "#extension GL_EXT_gpu_shader5 : require\n";
1058 
1059 	if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1060 		global << "const highp int indexBase = 1;\n";
1061 
1062 	global <<
1063 		"layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << m_numSamplers << "];\n";
1064 
1065 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1066 	{
1067 		for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1068 		{
1069 			const std::string varName = indicesPrefix + de::toString(lookupNdx);
1070 			m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1071 		}
1072 	}
1073 	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1074 		declareUniformIndexVars(global, (deUint32)m_numSamplers, indicesPrefix, m_numLookups);
1075 
1076 	for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1077 	{
1078 		const std::string varName = resultPrefix + de::toString(lookupNdx);
1079 		m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(outType, glu::PRECISION_HIGHP)));
1080 	}
1081 
1082 	for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1083 	{
1084 		code << resultPrefix << "" << lookupNdx << " = texture(" << samplersName << "[";
1085 
1086 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1087 			code << m_lookupIndices[lookupNdx];
1088 		else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1089 			code << "indexBase + " << (m_lookupIndices[lookupNdx]-1);
1090 		else
1091 			code << indicesPrefix << lookupNdx;
1092 
1093 		code << "], " << coordsName << ");\n";
1094 	}
1095 
1096 	m_shaderSpec.globalDeclarations	= global.str();
1097 	m_shaderSpec.source				= code.str();
1098 }
1099 
1100 enum BlockType
1101 {
1102 	BLOCKTYPE_UNIFORM = 0,
1103 	BLOCKTYPE_BUFFER,
1104 
1105 	BLOCKTYPE_LAST
1106 };
1107 
1108 class BlockArrayIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
1109 {
1110 public:
1111 	enum
1112 	{
1113 		NUM_INVOCATIONS		= 32,
1114 		NUM_INSTANCES		= 4,
1115 		NUM_READS			= 4
1116 	};
1117 
1118 	enum Flags
1119 	{
1120 		FLAG_USE_STORAGE_BUFFER	= (1<<0)	// Use VK_KHR_storage_buffer_storage_class
1121 	};
1122 
1123 									BlockArrayIndexingCaseInstance	(Context&						context,
1124 																	 const glu::ShaderType			shaderType,
1125 																	 const ShaderSpec&				shaderSpec,
1126 																	 const char*					name,
1127 																	 BlockType						blockType,
1128 																	 const deUint32					flags,
1129 																	 const IndexExprType			indexExprType,
1130 																	 const std::vector<int>&		readIndices,
1131 																	 const std::vector<deUint32>&	inValues);
1132 	virtual							~BlockArrayIndexingCaseInstance	(void);
1133 
1134 	virtual tcu::TestStatus			iterate							(void);
1135 
1136 private:
1137 	const BlockType					m_blockType;
1138 	const deUint32					m_flags;
1139 	const std::vector<int>&			m_readIndices;
1140 	const std::vector<deUint32>&	m_inValues;
1141 };
1142 
BlockArrayIndexingCaseInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,BlockType blockType,const deUint32 flags,const IndexExprType indexExprType,const std::vector<int> & readIndices,const std::vector<deUint32> & inValues)1143 BlockArrayIndexingCaseInstance::BlockArrayIndexingCaseInstance (Context&						context,
1144 																const glu::ShaderType			shaderType,
1145 																const ShaderSpec&				shaderSpec,
1146 																const char*						name,
1147 																BlockType						blockType,
1148 																const deUint32					flags,
1149 																const IndexExprType				indexExprType,
1150 																const std::vector<int>&			readIndices,
1151 																const std::vector<deUint32>&	inValues)
1152 	: OpaqueTypeIndexingTestInstance	(context, shaderType, shaderSpec, name, indexExprType)
1153 	, m_blockType						(blockType)
1154 	, m_flags							(flags)
1155 	, m_readIndices						(readIndices)
1156 	, m_inValues						(inValues)
1157 {
1158 }
1159 
~BlockArrayIndexingCaseInstance(void)1160 BlockArrayIndexingCaseInstance::~BlockArrayIndexingCaseInstance (void)
1161 {
1162 }
1163 
iterate(void)1164 tcu::TestStatus BlockArrayIndexingCaseInstance::iterate (void)
1165 {
1166 	const int					numInvocations		= NUM_INVOCATIONS;
1167 	const int					numReads			= NUM_READS;
1168 	std::vector<deUint32>		outValues			(numInvocations*numReads);
1169 
1170 	tcu::TestLog&				log					= m_context.getTestContext().getLog();
1171 	tcu::TestStatus				testResult			= tcu::TestStatus::pass("Pass");
1172 
1173 	std::vector<int>			expandedIndices;
1174 	std::vector<void*>			inputs;
1175 	std::vector<void*>			outputs;
1176 	const VkBufferUsageFlags	bufferUsage			= m_blockType == BLOCKTYPE_UNIFORM ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1177 	const VkDescriptorType		descriptorType		= m_blockType == BLOCKTYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1178 
1179 	const DeviceInterface&		vkd					= m_context.getDeviceInterface();
1180 	const VkDevice				device				= m_context.getDevice();
1181 
1182 	// \note Using separate buffer per element - might want to test
1183 	// offsets & single buffer in the future.
1184 	vector<BufferSp>			buffers				(m_inValues.size());
1185 	MovePtr<Buffer>				indexBuffer;
1186 
1187 	Move<VkDescriptorSetLayout>	extraResourcesLayout;
1188 	Move<VkDescriptorPool>		extraResourcesSetPool;
1189 	Move<VkDescriptorSet>		extraResourcesSet;
1190 
1191 	checkSupported(descriptorType);
1192 
1193 	if ((m_flags & FLAG_USE_STORAGE_BUFFER) != 0)
1194 	{
1195 		if (!isDeviceExtensionSupported(m_context.getUsedApiVersion(), m_context.getDeviceExtensions(), "VK_KHR_storage_buffer_storage_class"))
1196 			TCU_THROW(NotSupportedError, "VK_KHR_storage_buffer_storage_class is not supported");
1197 	}
1198 
1199 	for (size_t bufferNdx = 0; bufferNdx < m_inValues.size(); ++bufferNdx)
1200 	{
1201 		buffers[bufferNdx] = BufferSp(new Buffer(m_context, bufferUsage, sizeof(deUint32)));
1202 		*(deUint32*)buffers[bufferNdx]->getHostPtr() = m_inValues[bufferNdx];
1203 		buffers[bufferNdx]->flush();
1204 	}
1205 
1206 	if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1207 		indexBuffer = createUniformIndexBuffer(m_context, numReads, &m_readIndices[0]);
1208 
1209 	{
1210 		const VkDescriptorSetLayoutBinding		bindings[]	=
1211 		{
1212 			{ 0u,							descriptorType,						(deUint32)m_inValues.size(),	VK_SHADER_STAGE_ALL,	DE_NULL		},
1213 			{ (deUint32)m_inValues.size(),	VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,								VK_SHADER_STAGE_ALL,	DE_NULL		}
1214 		};
1215 		const VkDescriptorSetLayoutCreateInfo	layoutInfo	=
1216 		{
1217 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1218 			DE_NULL,
1219 			(VkDescriptorSetLayoutCreateFlags)0u,
1220 			DE_LENGTH_OF_ARRAY(bindings),
1221 			bindings,
1222 		};
1223 
1224 		extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
1225 	}
1226 
1227 	{
1228 		const VkDescriptorPoolSize			poolSizes[]	=
1229 		{
1230 			{ descriptorType,						(deUint32)m_inValues.size()	},
1231 			{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,							}
1232 		};
1233 		const VkDescriptorPoolCreateInfo	poolInfo	=
1234 		{
1235 			VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
1236 			DE_NULL,
1237 			(VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1238 			1u,		// maxSets
1239 			DE_LENGTH_OF_ARRAY(poolSizes),
1240 			poolSizes,
1241 		};
1242 
1243 		extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
1244 	}
1245 
1246 	{
1247 		const VkDescriptorSetAllocateInfo	allocInfo	=
1248 		{
1249 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1250 			DE_NULL,
1251 			*extraResourcesSetPool,
1252 			1u,
1253 			&extraResourcesLayout.get(),
1254 		};
1255 
1256 		extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
1257 	}
1258 
1259 	{
1260 		vector<VkDescriptorBufferInfo>	bufferInfos			(m_inValues.size());
1261 		const VkWriteDescriptorSet		descriptorWrite		=
1262 		{
1263 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1264 			DE_NULL,
1265 			*extraResourcesSet,
1266 			0u,		// dstBinding
1267 			0u,		// dstArrayElement
1268 			(deUint32)m_inValues.size(),
1269 			descriptorType,
1270 			(const VkDescriptorImageInfo*)DE_NULL,
1271 			&bufferInfos[0],
1272 			(const VkBufferView*)DE_NULL,
1273 		};
1274 
1275 		for (size_t ndx = 0; ndx < m_inValues.size(); ++ndx)
1276 		{
1277 			bufferInfos[ndx].buffer		= buffers[ndx]->getBuffer();
1278 			bufferInfos[ndx].offset		= 0u;
1279 			bufferInfos[ndx].range		= VK_WHOLE_SIZE;
1280 		}
1281 
1282 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1283 	}
1284 
1285 	if (indexBuffer)
1286 	{
1287 		const VkDescriptorBufferInfo	bufferInfo	=
1288 		{
1289 			indexBuffer->getBuffer(),
1290 			0u,
1291 			VK_WHOLE_SIZE
1292 		};
1293 		const VkWriteDescriptorSet		descriptorWrite		=
1294 		{
1295 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1296 			DE_NULL,
1297 			*extraResourcesSet,
1298 			(deUint32)m_inValues.size(),	// dstBinding
1299 			0u,								// dstArrayElement
1300 			1u,
1301 			VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1302 			(const VkDescriptorImageInfo*)DE_NULL,
1303 			&bufferInfo,
1304 			(const VkBufferView*)DE_NULL,
1305 		};
1306 
1307 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1308 	}
1309 
1310 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1311 	{
1312 		expandedIndices.resize(numInvocations * m_readIndices.size());
1313 
1314 		for (int readNdx = 0; readNdx < numReads; readNdx++)
1315 		{
1316 			int* dst = &expandedIndices[numInvocations*readNdx];
1317 			std::fill(dst, dst+numInvocations, m_readIndices[readNdx]);
1318 		}
1319 
1320 		for (int readNdx = 0; readNdx < numReads; readNdx++)
1321 			inputs.push_back(&expandedIndices[readNdx*numInvocations]);
1322 	}
1323 
1324 	for (int readNdx = 0; readNdx < numReads; readNdx++)
1325 		outputs.push_back(&outValues[readNdx*numInvocations]);
1326 
1327 	{
1328 		UniquePtr<ShaderExecutor>	executor	(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
1329 
1330 		executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
1331 	}
1332 
1333 	for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1334 	{
1335 		for (int readNdx = 0; readNdx < numReads; readNdx++)
1336 		{
1337 			const deUint32	refValue	= m_inValues[m_readIndices[readNdx]];
1338 			const deUint32	resValue	= outValues[readNdx*numInvocations + invocationNdx];
1339 
1340 			if (refValue != resValue)
1341 			{
1342 				log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
1343 					<< ", read " << readNdx << ": expected "
1344 					<< tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
1345 					<< tcu::TestLog::EndMessage;
1346 
1347 				if (testResult.getCode() == QP_TEST_RESULT_PASS)
1348 					testResult = tcu::TestStatus::fail("Invalid result value");
1349 			}
1350 		}
1351 	}
1352 
1353 	return testResult;
1354 }
1355 
1356 class BlockArrayIndexingCase : public OpaqueTypeIndexingCase
1357 {
1358 public:
1359 								BlockArrayIndexingCase		(tcu::TestContext&			testCtx,
1360 															 const char*				name,
1361 															 const char*				description,
1362 															 BlockType					blockType,
1363 															 IndexExprType				indexExprType,
1364 															 const glu::ShaderType		shaderType,
1365 															 deUint32					flags = 0u);
1366 	virtual						~BlockArrayIndexingCase		(void);
1367 
1368 	virtual TestInstance*		createInstance				(Context& ctx) const;
1369 
1370 private:
1371 								BlockArrayIndexingCase		(const BlockArrayIndexingCase&);
1372 	BlockArrayIndexingCase&		operator=					(const BlockArrayIndexingCase&);
1373 
1374 	void						createShaderSpec			(void);
1375 
1376 	const BlockType				m_blockType;
1377 	const deUint32				m_flags;
1378 	std::vector<int>			m_readIndices;
1379 	std::vector<deUint32>		m_inValues;
1380 };
1381 
BlockArrayIndexingCase(tcu::TestContext & testCtx,const char * name,const char * description,BlockType blockType,IndexExprType indexExprType,const glu::ShaderType shaderType,deUint32 flags)1382 BlockArrayIndexingCase::BlockArrayIndexingCase (tcu::TestContext&			testCtx,
1383 												const char*					name,
1384 												const char*					description,
1385 												BlockType					blockType,
1386 												IndexExprType				indexExprType,
1387 												const glu::ShaderType		shaderType,
1388 												deUint32					flags)
1389 	: OpaqueTypeIndexingCase	(testCtx, name, description, shaderType, indexExprType)
1390 	, m_blockType				(blockType)
1391 	, m_flags					(flags)
1392 	, m_readIndices				(BlockArrayIndexingCaseInstance::NUM_READS)
1393 	, m_inValues				(BlockArrayIndexingCaseInstance::NUM_INSTANCES)
1394 {
1395 	createShaderSpec();
1396 	init();
1397 }
1398 
~BlockArrayIndexingCase(void)1399 BlockArrayIndexingCase::~BlockArrayIndexingCase (void)
1400 {
1401 }
1402 
createInstance(Context & ctx) const1403 TestInstance* BlockArrayIndexingCase::createInstance (Context& ctx) const
1404 {
1405 	return new BlockArrayIndexingCaseInstance(ctx,
1406 											  m_shaderType,
1407 											  m_shaderSpec,
1408 											  m_name,
1409 											  m_blockType,
1410 											  m_flags,
1411 											  m_indexExprType,
1412 											  m_readIndices,
1413 											  m_inValues);
1414 }
1415 
createShaderSpec(void)1416 void BlockArrayIndexingCase::createShaderSpec (void)
1417 {
1418 	const int			numInstances	= BlockArrayIndexingCaseInstance::NUM_INSTANCES;
1419 	const int			numReads		= BlockArrayIndexingCaseInstance::NUM_READS;
1420 	de::Random			rnd				(deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
1421 	const char*			blockName		= "Block";
1422 	const char*			instanceName	= "block";
1423 	const char*			indicesPrefix	= "index";
1424 	const char*			resultPrefix	= "result";
1425 	const char*			interfaceName	= m_blockType == BLOCKTYPE_UNIFORM ? "uniform" : "buffer";
1426 	std::ostringstream	global, code;
1427 
1428 	for (int readNdx = 0; readNdx < numReads; readNdx++)
1429 		m_readIndices[readNdx] = rnd.getInt(0, numInstances-1);
1430 
1431 	for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
1432 		m_inValues[instanceNdx] = rnd.getUint32();
1433 
1434 	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1435 		global << "#extension GL_EXT_gpu_shader5 : require\n";
1436 
1437 	if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1438 		global << "const highp int indexBase = 1;\n";
1439 
1440 	global <<
1441 		"layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) " << interfaceName << " " << blockName << "\n"
1442 		"{\n"
1443 		"	highp uint value;\n"
1444 		"} " << instanceName << "[" << numInstances << "];\n";
1445 
1446 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1447 	{
1448 		for (int readNdx = 0; readNdx < numReads; readNdx++)
1449 		{
1450 			const std::string varName = indicesPrefix + de::toString(readNdx);
1451 			m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1452 		}
1453 	}
1454 	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1455 		declareUniformIndexVars(global, (deUint32)m_inValues.size(), indicesPrefix, numReads);
1456 
1457 	for (int readNdx = 0; readNdx < numReads; readNdx++)
1458 	{
1459 		const std::string varName = resultPrefix + de::toString(readNdx);
1460 		m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
1461 	}
1462 
1463 	for (int readNdx = 0; readNdx < numReads; readNdx++)
1464 	{
1465 		code << resultPrefix << readNdx << " = " << instanceName << "[";
1466 
1467 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1468 			code << m_readIndices[readNdx];
1469 		else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1470 			code << "indexBase + " << (m_readIndices[readNdx]-1);
1471 		else
1472 			code << indicesPrefix << readNdx;
1473 
1474 		code << "].value;\n";
1475 	}
1476 
1477 	m_shaderSpec.globalDeclarations	= global.str();
1478 	m_shaderSpec.source				= code.str();
1479 
1480 	if ((m_flags & BlockArrayIndexingCaseInstance::FLAG_USE_STORAGE_BUFFER) != 0)
1481 		m_shaderSpec.buildOptions.flags |= vk::ShaderBuildOptions::FLAG_USE_STORAGE_BUFFER_STORAGE_CLASS;
1482 }
1483 
1484 class AtomicCounterIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
1485 {
1486 public:
1487 	enum
1488 	{
1489 		NUM_INVOCATIONS		= 32,
1490 		NUM_COUNTERS		= 4,
1491 		NUM_OPS				= 4
1492 	};
1493 
1494 								AtomicCounterIndexingCaseInstance	(Context&					context,
1495 																	 const glu::ShaderType		shaderType,
1496 																	 const ShaderSpec&			shaderSpec,
1497 																	 const char*				name,
1498 																	 const std::vector<int>&	opIndices,
1499 																	 const IndexExprType		indexExprType);
1500 	virtual						~AtomicCounterIndexingCaseInstance	(void);
1501 
1502 	virtual	tcu::TestStatus		iterate								(void);
1503 
1504 private:
1505 	const std::vector<int>&		m_opIndices;
1506 };
1507 
AtomicCounterIndexingCaseInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,const std::vector<int> & opIndices,const IndexExprType indexExprType)1508 AtomicCounterIndexingCaseInstance::AtomicCounterIndexingCaseInstance (Context&					context,
1509 																	  const glu::ShaderType		shaderType,
1510 																	  const ShaderSpec&			shaderSpec,
1511 																	  const char*				name,
1512 																	  const std::vector<int>&	opIndices,
1513 																	  const IndexExprType		indexExprType)
1514 	: OpaqueTypeIndexingTestInstance	(context, shaderType, shaderSpec, name, indexExprType)
1515 	, m_opIndices						(opIndices)
1516 {
1517 }
1518 
~AtomicCounterIndexingCaseInstance(void)1519 AtomicCounterIndexingCaseInstance::~AtomicCounterIndexingCaseInstance (void)
1520 {
1521 }
1522 
iterate(void)1523 tcu::TestStatus AtomicCounterIndexingCaseInstance::iterate (void)
1524 {
1525 	const int					numInvocations		= NUM_INVOCATIONS;
1526 	const int					numCounters			= NUM_COUNTERS;
1527 	const int					numOps				= NUM_OPS;
1528 	std::vector<int>			expandedIndices;
1529 	std::vector<void*>			inputs;
1530 	std::vector<void*>			outputs;
1531 	std::vector<deUint32>		outValues			(numInvocations*numOps);
1532 
1533 	const DeviceInterface&			vkd				= m_context.getDeviceInterface();
1534 	const VkDevice					device			= m_context.getDevice();
1535 	const VkPhysicalDeviceFeatures& deviceFeatures	= m_context.getDeviceFeatures();
1536 
1537 	//Check stores and atomic operation support.
1538 	switch (m_shaderType)
1539 	{
1540 		case glu::SHADERTYPE_VERTEX:
1541 		case glu::SHADERTYPE_TESSELLATION_CONTROL:
1542 		case glu::SHADERTYPE_TESSELLATION_EVALUATION:
1543 		case glu::SHADERTYPE_GEOMETRY:
1544 			if(!deviceFeatures.vertexPipelineStoresAndAtomics)
1545 				TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in Vertex, Tessellation, and Geometry shader.");
1546 			break;
1547 		case glu::SHADERTYPE_FRAGMENT:
1548 			if(!deviceFeatures.fragmentStoresAndAtomics)
1549 				TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in fragment shader.");
1550 			break;
1551 		case glu::SHADERTYPE_COMPUTE:
1552 			break;
1553 		default:
1554 			throw tcu::InternalError("Unsupported shader type");
1555 	}
1556 
1557 	// \note Using separate buffer per element - might want to test
1558 	// offsets & single buffer in the future.
1559 	Buffer						atomicOpBuffer		(m_context, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, sizeof(deUint32)*numCounters);
1560 	MovePtr<Buffer>				indexBuffer;
1561 
1562 	Move<VkDescriptorSetLayout>	extraResourcesLayout;
1563 	Move<VkDescriptorPool>		extraResourcesSetPool;
1564 	Move<VkDescriptorSet>		extraResourcesSet;
1565 
1566 	checkSupported(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1567 
1568 	deMemset(atomicOpBuffer.getHostPtr(), 0, sizeof(deUint32)*numCounters);
1569 	atomicOpBuffer.flush();
1570 
1571 	if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1572 		indexBuffer = createUniformIndexBuffer(m_context, numOps, &m_opIndices[0]);
1573 
1574 	{
1575 		const VkDescriptorSetLayoutBinding		bindings[]	=
1576 		{
1577 			{ 0u,	VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,	1u,	VK_SHADER_STAGE_ALL,	DE_NULL		},
1578 			{ 1u,	VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,	VK_SHADER_STAGE_ALL,	DE_NULL		}
1579 		};
1580 		const VkDescriptorSetLayoutCreateInfo	layoutInfo	=
1581 		{
1582 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1583 			DE_NULL,
1584 			(VkDescriptorSetLayoutCreateFlags)0u,
1585 			DE_LENGTH_OF_ARRAY(bindings),
1586 			bindings,
1587 		};
1588 
1589 		extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
1590 	}
1591 
1592 	{
1593 		const VkDescriptorPoolSize			poolSizes[]	=
1594 		{
1595 			{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,	1u,	},
1596 			{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,	}
1597 		};
1598 		const VkDescriptorPoolCreateInfo	poolInfo	=
1599 		{
1600 			VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
1601 			DE_NULL,
1602 			(VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1603 			1u,		// maxSets
1604 			DE_LENGTH_OF_ARRAY(poolSizes),
1605 			poolSizes,
1606 		};
1607 
1608 		extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
1609 	}
1610 
1611 	{
1612 		const VkDescriptorSetAllocateInfo	allocInfo	=
1613 		{
1614 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1615 			DE_NULL,
1616 			*extraResourcesSetPool,
1617 			1u,
1618 			&extraResourcesLayout.get(),
1619 		};
1620 
1621 		extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
1622 	}
1623 
1624 	{
1625 		const VkDescriptorBufferInfo	bufferInfo			=
1626 		{
1627 			atomicOpBuffer.getBuffer(),
1628 			0u,
1629 			VK_WHOLE_SIZE
1630 		};
1631 		const VkWriteDescriptorSet		descriptorWrite		=
1632 		{
1633 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1634 			DE_NULL,
1635 			*extraResourcesSet,
1636 			0u,		// dstBinding
1637 			0u,		// dstArrayElement
1638 			1u,
1639 			VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
1640 			(const VkDescriptorImageInfo*)DE_NULL,
1641 			&bufferInfo,
1642 			(const VkBufferView*)DE_NULL,
1643 		};
1644 
1645 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1646 	}
1647 
1648 	if (indexBuffer)
1649 	{
1650 		const VkDescriptorBufferInfo	bufferInfo	=
1651 		{
1652 			indexBuffer->getBuffer(),
1653 			0u,
1654 			VK_WHOLE_SIZE
1655 		};
1656 		const VkWriteDescriptorSet		descriptorWrite		=
1657 		{
1658 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1659 			DE_NULL,
1660 			*extraResourcesSet,
1661 			1u,		// dstBinding
1662 			0u,		// dstArrayElement
1663 			1u,
1664 			VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1665 			(const VkDescriptorImageInfo*)DE_NULL,
1666 			&bufferInfo,
1667 			(const VkBufferView*)DE_NULL,
1668 		};
1669 
1670 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1671 	}
1672 
1673 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1674 	{
1675 		expandedIndices.resize(numInvocations * m_opIndices.size());
1676 
1677 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1678 		{
1679 			int* dst = &expandedIndices[numInvocations*opNdx];
1680 			std::fill(dst, dst+numInvocations, m_opIndices[opNdx]);
1681 		}
1682 
1683 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1684 			inputs.push_back(&expandedIndices[opNdx*numInvocations]);
1685 	}
1686 
1687 	for (int opNdx = 0; opNdx < numOps; opNdx++)
1688 		outputs.push_back(&outValues[opNdx*numInvocations]);
1689 
1690 	{
1691 		UniquePtr<ShaderExecutor>	executor	(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
1692 
1693 		executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
1694 	}
1695 
1696 	{
1697 		tcu::TestLog&					log				= m_context.getTestContext().getLog();
1698 		tcu::TestStatus					testResult		= tcu::TestStatus::pass("Pass");
1699 		std::vector<int>				numHits			(numCounters, 0);	// Number of hits per counter.
1700 		std::vector<deUint32>			counterValues	(numCounters);
1701 		std::vector<std::vector<bool> >	counterMasks	(numCounters);
1702 
1703 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1704 			numHits[m_opIndices[opNdx]] += 1;
1705 
1706 		// Read counter values
1707 		{
1708 			const void* mapPtr = atomicOpBuffer.getHostPtr();
1709 			DE_ASSERT(mapPtr != DE_NULL);
1710 			atomicOpBuffer.invalidate();
1711 			std::copy((const deUint32*)mapPtr, (const deUint32*)mapPtr + numCounters, &counterValues[0]);
1712 		}
1713 
1714 		// Verify counter values
1715 		for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1716 		{
1717 			const deUint32		refCount	= (deUint32)(numHits[counterNdx]*numInvocations);
1718 			const deUint32		resCount	= counterValues[counterNdx];
1719 
1720 			if (refCount != resCount)
1721 			{
1722 				log << tcu::TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
1723 					<< ", expected " << refCount
1724 					<< tcu::TestLog::EndMessage;
1725 
1726 				if (testResult.getCode() == QP_TEST_RESULT_PASS)
1727 					testResult = tcu::TestStatus::fail("Invalid atomic counter value");
1728 			}
1729 		}
1730 
1731 		// Allocate bitmasks - one bit per each valid result value
1732 		for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1733 		{
1734 			const int	counterValue	= numHits[counterNdx]*numInvocations;
1735 			counterMasks[counterNdx].resize(counterValue, false);
1736 		}
1737 
1738 		// Verify result values from shaders
1739 		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1740 		{
1741 			for (int opNdx = 0; opNdx < numOps; opNdx++)
1742 			{
1743 				const int		counterNdx	= m_opIndices[opNdx];
1744 				const deUint32	resValue	= outValues[opNdx*numInvocations + invocationNdx];
1745 				const bool		rangeOk		= de::inBounds(resValue, 0u, (deUint32)counterMasks[counterNdx].size());
1746 				const bool		notSeen		= rangeOk && !counterMasks[counterNdx][resValue];
1747 				const bool		isOk		= rangeOk && notSeen;
1748 
1749 				if (!isOk)
1750 				{
1751 					log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
1752 						<< ", op " << opNdx << ": got invalid result value "
1753 						<< resValue
1754 						<< tcu::TestLog::EndMessage;
1755 
1756 					if (testResult.getCode() == QP_TEST_RESULT_PASS)
1757 						testResult = tcu::TestStatus::fail("Invalid result value");
1758 				}
1759 				else
1760 				{
1761 					// Mark as used - no other invocation should see this value from same counter.
1762 					counterMasks[counterNdx][resValue] = true;
1763 				}
1764 			}
1765 		}
1766 
1767 		if (testResult.getCode() == QP_TEST_RESULT_PASS)
1768 		{
1769 			// Consistency check - all masks should be 1 now
1770 			for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1771 			{
1772 				for (std::vector<bool>::const_iterator i = counterMasks[counterNdx].begin(); i != counterMasks[counterNdx].end(); i++)
1773 					TCU_CHECK_INTERNAL(*i);
1774 			}
1775 		}
1776 
1777 		return testResult;
1778 	}
1779 }
1780 
1781 class AtomicCounterIndexingCase : public OpaqueTypeIndexingCase
1782 {
1783 public:
1784 								AtomicCounterIndexingCase	(tcu::TestContext&			testCtx,
1785 															 const char*				name,
1786 															 const char*				description,
1787 															 IndexExprType				indexExprType,
1788 															 const glu::ShaderType		shaderType);
1789 	virtual						~AtomicCounterIndexingCase	(void);
1790 
1791 	virtual TestInstance*		createInstance				(Context& ctx) const;
1792 
1793 private:
1794 								AtomicCounterIndexingCase	(const BlockArrayIndexingCase&);
1795 	AtomicCounterIndexingCase&	operator=					(const BlockArrayIndexingCase&);
1796 
1797 	void						createShaderSpec			(void);
1798 
1799 	std::vector<int>			m_opIndices;
1800 };
1801 
AtomicCounterIndexingCase(tcu::TestContext & testCtx,const char * name,const char * description,IndexExprType indexExprType,const glu::ShaderType shaderType)1802 AtomicCounterIndexingCase::AtomicCounterIndexingCase (tcu::TestContext&			testCtx,
1803 													  const char*				name,
1804 													  const char*				description,
1805 													  IndexExprType				indexExprType,
1806 													  const glu::ShaderType		shaderType)
1807 	: OpaqueTypeIndexingCase	(testCtx, name, description, shaderType, indexExprType)
1808 	, m_opIndices				(AtomicCounterIndexingCaseInstance::NUM_OPS)
1809 {
1810 	createShaderSpec();
1811 	init();
1812 }
1813 
~AtomicCounterIndexingCase(void)1814 AtomicCounterIndexingCase::~AtomicCounterIndexingCase (void)
1815 {
1816 }
1817 
createInstance(Context & ctx) const1818 TestInstance* AtomicCounterIndexingCase::createInstance (Context& ctx) const
1819 {
1820 	return new AtomicCounterIndexingCaseInstance(ctx,
1821 												 m_shaderType,
1822 												 m_shaderSpec,
1823 												 m_name,
1824 												 m_opIndices,
1825 												 m_indexExprType);
1826 }
1827 
createShaderSpec(void)1828 void AtomicCounterIndexingCase::createShaderSpec (void)
1829 {
1830 	const int				numCounters		= AtomicCounterIndexingCaseInstance::NUM_COUNTERS;
1831 	const int				numOps			= AtomicCounterIndexingCaseInstance::NUM_OPS;
1832 	de::Random				rnd				(deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1833 
1834 	for (int opNdx = 0; opNdx < numOps; opNdx++)
1835 		m_opIndices[opNdx] = rnd.getInt(0, numOps-1);
1836 
1837 	{
1838 		const char*			indicesPrefix	= "index";
1839 		const char*			resultPrefix	= "result";
1840 		std::ostringstream	global, code;
1841 
1842 		if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1843 			global << "#extension GL_EXT_gpu_shader5 : require\n";
1844 
1845 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1846 			global << "const highp int indexBase = 1;\n";
1847 
1848 		global <<
1849 			"layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0, std430) buffer AtomicBuffer { highp uint counter[" << numCounters << "]; };\n";
1850 
1851 		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1852 		{
1853 			for (int opNdx = 0; opNdx < numOps; opNdx++)
1854 			{
1855 				const std::string varName = indicesPrefix + de::toString(opNdx);
1856 				m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1857 			}
1858 		}
1859 		else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1860 			declareUniformIndexVars(global, 1, indicesPrefix, numOps);
1861 
1862 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1863 		{
1864 			const std::string varName = resultPrefix + de::toString(opNdx);
1865 			m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
1866 		}
1867 
1868 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1869 		{
1870 			code << resultPrefix << opNdx << " = atomicAdd(counter[";
1871 
1872 			if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1873 				code << m_opIndices[opNdx];
1874 			else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1875 				code << "indexBase + " << (m_opIndices[opNdx]-1);
1876 			else
1877 				code << indicesPrefix << opNdx;
1878 
1879 			code << "], uint(1));\n";
1880 		}
1881 
1882 		m_shaderSpec.globalDeclarations	= global.str();
1883 		m_shaderSpec.source				= code.str();
1884 	}
1885 }
1886 
1887 class OpaqueTypeIndexingTests : public tcu::TestCaseGroup
1888 {
1889 public:
1890 								OpaqueTypeIndexingTests		(tcu::TestContext& testCtx);
1891 	virtual						~OpaqueTypeIndexingTests	(void);
1892 
1893 	virtual void				init						(void);
1894 
1895 private:
1896 								OpaqueTypeIndexingTests		(const OpaqueTypeIndexingTests&);
1897 	OpaqueTypeIndexingTests&	operator=					(const OpaqueTypeIndexingTests&);
1898 };
1899 
OpaqueTypeIndexingTests(tcu::TestContext & testCtx)1900 OpaqueTypeIndexingTests::OpaqueTypeIndexingTests (tcu::TestContext& testCtx)
1901 	: tcu::TestCaseGroup(testCtx, "opaque_type_indexing", "Opaque Type Indexing Tests")
1902 {
1903 }
1904 
~OpaqueTypeIndexingTests(void)1905 OpaqueTypeIndexingTests::~OpaqueTypeIndexingTests (void)
1906 {
1907 }
1908 
init(void)1909 void OpaqueTypeIndexingTests::init (void)
1910 {
1911 	static const struct
1912 	{
1913 		IndexExprType	type;
1914 		const char*		name;
1915 		const char*		description;
1916 	} indexingTypes[] =
1917 	{
1918 		{ INDEX_EXPR_TYPE_CONST_LITERAL,	"const_literal",		"Indexing by constant literal"					},
1919 		{ INDEX_EXPR_TYPE_CONST_EXPRESSION,	"const_expression",		"Indexing by constant expression"				},
1920 		{ INDEX_EXPR_TYPE_UNIFORM,			"uniform",				"Indexing by uniform value"						},
1921 		{ INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,	"dynamically_uniform",	"Indexing by dynamically uniform expression"	}
1922 	};
1923 
1924 	static const struct
1925 	{
1926 		glu::ShaderType	type;
1927 		const char*		name;
1928 	} shaderTypes[] =
1929 	{
1930 		{ glu::SHADERTYPE_VERTEX,					"vertex"	},
1931 		{ glu::SHADERTYPE_FRAGMENT,					"fragment"	},
1932 		{ glu::SHADERTYPE_GEOMETRY,					"geometry"	},
1933 		{ glu::SHADERTYPE_TESSELLATION_CONTROL,		"tess_ctrl"	},
1934 		{ glu::SHADERTYPE_TESSELLATION_EVALUATION,	"tess_eval"	},
1935 		{ glu::SHADERTYPE_COMPUTE,					"compute"	}
1936 	};
1937 
1938 	// .sampler
1939 	{
1940 		static const glu::DataType samplerTypes[] =
1941 		{
1942 			// \note 1D images will be added by a later extension.
1943 //			glu::TYPE_SAMPLER_1D,
1944 			glu::TYPE_SAMPLER_2D,
1945 			glu::TYPE_SAMPLER_CUBE,
1946 			glu::TYPE_SAMPLER_2D_ARRAY,
1947 			glu::TYPE_SAMPLER_3D,
1948 //			glu::TYPE_SAMPLER_1D_SHADOW,
1949 			glu::TYPE_SAMPLER_2D_SHADOW,
1950 			glu::TYPE_SAMPLER_CUBE_SHADOW,
1951 			glu::TYPE_SAMPLER_2D_ARRAY_SHADOW,
1952 //			glu::TYPE_INT_SAMPLER_1D,
1953 			glu::TYPE_INT_SAMPLER_2D,
1954 			glu::TYPE_INT_SAMPLER_CUBE,
1955 			glu::TYPE_INT_SAMPLER_2D_ARRAY,
1956 			glu::TYPE_INT_SAMPLER_3D,
1957 //			glu::TYPE_UINT_SAMPLER_1D,
1958 			glu::TYPE_UINT_SAMPLER_2D,
1959 			glu::TYPE_UINT_SAMPLER_CUBE,
1960 			glu::TYPE_UINT_SAMPLER_2D_ARRAY,
1961 			glu::TYPE_UINT_SAMPLER_3D,
1962 		};
1963 
1964 		tcu::TestCaseGroup* const samplerGroup = new tcu::TestCaseGroup(m_testCtx, "sampler", "Sampler Array Indexing Tests");
1965 		addChild(samplerGroup);
1966 
1967 		for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1968 		{
1969 			const IndexExprType			indexExprType	= indexingTypes[indexTypeNdx].type;
1970 			tcu::TestCaseGroup* const	indexGroup		= new tcu::TestCaseGroup(m_testCtx, indexingTypes[indexTypeNdx].name, indexingTypes[indexTypeNdx].description);
1971 			samplerGroup->addChild(indexGroup);
1972 
1973 			for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1974 			{
1975 				const glu::ShaderType		shaderType		= shaderTypes[shaderTypeNdx].type;
1976 				tcu::TestCaseGroup* const	shaderGroup		= new tcu::TestCaseGroup(m_testCtx, shaderTypes[shaderTypeNdx].name, "");
1977 				indexGroup->addChild(shaderGroup);
1978 
1979 				// \note [pyry] In Vulkan CTS 1.0.2 sampler groups should not cover tess/geom stages
1980 				if ((shaderType != glu::SHADERTYPE_VERTEX)		&&
1981 					(shaderType != glu::SHADERTYPE_FRAGMENT)	&&
1982 					(shaderType != glu::SHADERTYPE_COMPUTE))
1983 					continue;
1984 
1985 				for (int samplerTypeNdx = 0; samplerTypeNdx < DE_LENGTH_OF_ARRAY(samplerTypes); samplerTypeNdx++)
1986 				{
1987 					const glu::DataType	samplerType	= samplerTypes[samplerTypeNdx];
1988 					const char*			samplerName	= getDataTypeName(samplerType);
1989 					const std::string	caseName	= de::toLower(samplerName);
1990 
1991 					shaderGroup->addChild(new SamplerIndexingCase(m_testCtx, caseName.c_str(), "", shaderType, samplerType, indexExprType));
1992 				}
1993 			}
1994 		}
1995 	}
1996 
1997 	// .ubo / .ssbo / .atomic_counter
1998 	{
1999 		tcu::TestCaseGroup* const	uboGroup			= new tcu::TestCaseGroup(m_testCtx, "ubo",								"Uniform Block Instance Array Indexing Tests");
2000 		tcu::TestCaseGroup* const	ssboGroup			= new tcu::TestCaseGroup(m_testCtx, "ssbo",								"Buffer Block Instance Array Indexing Tests");
2001 		tcu::TestCaseGroup* const	ssboStorageBufGroup	= new tcu::TestCaseGroup(m_testCtx, "ssbo_storage_buffer_decoration",	"Buffer Block (new StorageBuffer decoration) Instance Array Indexing Tests");
2002 		tcu::TestCaseGroup* const	acGroup				= new tcu::TestCaseGroup(m_testCtx, "atomic_counter",					"Atomic Counter Array Indexing Tests");
2003 		addChild(uboGroup);
2004 		addChild(ssboGroup);
2005 		addChild(ssboStorageBufGroup);
2006 		addChild(acGroup);
2007 
2008 		for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
2009 		{
2010 			const IndexExprType		indexExprType		= indexingTypes[indexTypeNdx].type;
2011 			const char*				indexExprName		= indexingTypes[indexTypeNdx].name;
2012 			const char*				indexExprDesc		= indexingTypes[indexTypeNdx].description;
2013 
2014 			for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
2015 			{
2016 				const glu::ShaderType	shaderType		= shaderTypes[shaderTypeNdx].type;
2017 				const std::string		name			= std::string(indexExprName) + "_" + shaderTypes[shaderTypeNdx].name;
2018 
2019 				// \note [pyry] In Vulkan CTS 1.0.2 ubo/ssbo/atomic_counter groups should not cover tess/geom stages
2020 				if ((shaderType == glu::SHADERTYPE_VERTEX)		||
2021 					(shaderType == glu::SHADERTYPE_FRAGMENT)	||
2022 					(shaderType == glu::SHADERTYPE_COMPUTE))
2023 				{
2024 					uboGroup->addChild	(new BlockArrayIndexingCase		(m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_UNIFORM,	indexExprType, shaderType));
2025 					acGroup->addChild	(new AtomicCounterIndexingCase	(m_testCtx, name.c_str(), indexExprDesc, indexExprType, shaderType));
2026 
2027 					if (indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL || indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
2028 						ssboGroup->addChild	(new BlockArrayIndexingCase	(m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_BUFFER, indexExprType, shaderType));
2029 				}
2030 
2031 				if (indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL || indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
2032 					ssboStorageBufGroup->addChild	(new BlockArrayIndexingCase	(m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_BUFFER, indexExprType, shaderType, (deUint32)BlockArrayIndexingCaseInstance::FLAG_USE_STORAGE_BUFFER));
2033 			}
2034 		}
2035 	}
2036 }
2037 
2038 } // anonymous
2039 
createOpaqueTypeIndexingTests(tcu::TestContext & testCtx)2040 tcu::TestCaseGroup* createOpaqueTypeIndexingTests (tcu::TestContext& testCtx)
2041 {
2042 	return new OpaqueTypeIndexingTests(testCtx);
2043 }
2044 
2045 } // shaderexecutor
2046 } // vkt
2047