• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-------------------------------------------------------------------------
2  * drawElements Quality Program OpenGL ES 3.1 Module
3  * -------------------------------------------------
4  *
5  * Copyright 2014 The Android Open Source Project
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Opaque type (sampler, buffer, atomic counter, ...) indexing tests.
22  *
23  * \todo [2014-03-05 pyry] Extend with following:
24  *  + sampler: different filtering modes, multiple sizes, incomplete textures
25  *  + SSBO: write, atomic op, unsized array .length()
26  *//*--------------------------------------------------------------------*/
27 
28 #include "es31fOpaqueTypeIndexingTests.hpp"
29 #include "tcuTexture.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuFormatUtil.hpp"
32 #include "tcuVectorUtil.hpp"
33 #include "gluShaderUtil.hpp"
34 #include "gluShaderProgram.hpp"
35 #include "gluObjectWrapper.hpp"
36 #include "gluTextureUtil.hpp"
37 #include "gluRenderContext.hpp"
38 #include "gluProgramInterfaceQuery.hpp"
39 #include "gluContextInfo.hpp"
40 #include "glsShaderExecUtil.hpp"
41 #include "glwFunctions.hpp"
42 #include "glwEnums.hpp"
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
46 
47 #include <sstream>
48 
49 namespace deqp
50 {
51 namespace gles31
52 {
53 namespace Functional
54 {
55 
56 namespace
57 {
58 
59 using namespace gls::ShaderExecUtil;
60 using namespace glu;
61 using std::string;
62 using std::vector;
63 using tcu::TextureFormat;
64 using tcu::TestLog;
65 
66 typedef de::UniquePtr<ShaderExecutor> ShaderExecutorPtr;
67 
68 enum IndexExprType
69 {
70 	INDEX_EXPR_TYPE_CONST_LITERAL	= 0,
71 	INDEX_EXPR_TYPE_CONST_EXPRESSION,
72 	INDEX_EXPR_TYPE_UNIFORM,
73 	INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,
74 
75 	INDEX_EXPR_TYPE_LAST
76 };
77 
78 enum TextureType
79 {
80 	TEXTURE_TYPE_1D = 0,
81 	TEXTURE_TYPE_2D,
82 	TEXTURE_TYPE_CUBE,
83 	TEXTURE_TYPE_2D_ARRAY,
84 	TEXTURE_TYPE_3D,
85 	TEXTURE_TYPE_CUBE_ARRAY,
86 
87 	TEXTURE_TYPE_LAST
88 };
89 
declareUniformIndexVars(std::ostream & str,const char * varPrefix,int numVars)90 static void declareUniformIndexVars (std::ostream& str, const char* varPrefix, int numVars)
91 {
92 	for (int varNdx = 0; varNdx < numVars; varNdx++)
93 		str << "uniform highp int " << varPrefix << varNdx << ";\n";
94 }
95 
uploadUniformIndices(const glw::Functions & gl,deUint32 program,const char * varPrefix,int numIndices,const int * indices)96 static void uploadUniformIndices (const glw::Functions& gl, deUint32 program, const char* varPrefix, int numIndices, const int* indices)
97 {
98 	for (int varNdx = 0; varNdx < numIndices; varNdx++)
99 	{
100 		const string	varName		= varPrefix + de::toString(varNdx);
101 		const int		loc			= gl.getUniformLocation(program, varName.c_str());
102 		TCU_CHECK_MSG(loc >= 0, ("No location assigned for uniform '" + varName + "'").c_str());
103 
104 		gl.uniform1i(loc, indices[varNdx]);
105 	}
106 }
107 
108 template<typename T>
maxElement(const std::vector<T> & elements)109 static T maxElement (const std::vector<T>& elements)
110 {
111 	T maxElem = elements[0];
112 
113 	for (size_t ndx = 1; ndx < elements.size(); ndx++)
114 		maxElem = de::max(maxElem, elements[ndx]);
115 
116 	return maxElem;
117 }
118 
getTextureType(glu::DataType samplerType)119 static TextureType getTextureType (glu::DataType samplerType)
120 {
121 	switch (samplerType)
122 	{
123 		case glu::TYPE_SAMPLER_1D:
124 		case glu::TYPE_INT_SAMPLER_1D:
125 		case glu::TYPE_UINT_SAMPLER_1D:
126 		case glu::TYPE_SAMPLER_1D_SHADOW:
127 			return TEXTURE_TYPE_1D;
128 
129 		case glu::TYPE_SAMPLER_2D:
130 		case glu::TYPE_INT_SAMPLER_2D:
131 		case glu::TYPE_UINT_SAMPLER_2D:
132 		case glu::TYPE_SAMPLER_2D_SHADOW:
133 			return TEXTURE_TYPE_2D;
134 
135 		case glu::TYPE_SAMPLER_CUBE:
136 		case glu::TYPE_INT_SAMPLER_CUBE:
137 		case glu::TYPE_UINT_SAMPLER_CUBE:
138 		case glu::TYPE_SAMPLER_CUBE_SHADOW:
139 			return TEXTURE_TYPE_CUBE;
140 
141 		case glu::TYPE_SAMPLER_2D_ARRAY:
142 		case glu::TYPE_INT_SAMPLER_2D_ARRAY:
143 		case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
144 		case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
145 			return TEXTURE_TYPE_2D_ARRAY;
146 
147 		case glu::TYPE_SAMPLER_3D:
148 		case glu::TYPE_INT_SAMPLER_3D:
149 		case glu::TYPE_UINT_SAMPLER_3D:
150 			return TEXTURE_TYPE_3D;
151 
152 		case glu::TYPE_SAMPLER_CUBE_ARRAY:
153 		case glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW:
154 		case glu::TYPE_INT_SAMPLER_CUBE_ARRAY:
155 		case glu::TYPE_UINT_SAMPLER_CUBE_ARRAY:
156 			return TEXTURE_TYPE_CUBE_ARRAY;
157 
158 		default:
159 			TCU_THROW(InternalError, "Invalid sampler type");
160 	}
161 }
162 
isShadowSampler(glu::DataType samplerType)163 static bool isShadowSampler (glu::DataType samplerType)
164 {
165 	return samplerType == glu::TYPE_SAMPLER_1D_SHADOW		||
166 		   samplerType == glu::TYPE_SAMPLER_2D_SHADOW		||
167 		   samplerType == glu::TYPE_SAMPLER_2D_ARRAY_SHADOW	||
168 		   samplerType == glu::TYPE_SAMPLER_CUBE_SHADOW		||
169 		   samplerType == glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW;
170 }
171 
getSamplerOutputType(glu::DataType samplerType)172 static glu::DataType getSamplerOutputType (glu::DataType samplerType)
173 {
174 	switch (samplerType)
175 	{
176 		case glu::TYPE_SAMPLER_1D:
177 		case glu::TYPE_SAMPLER_2D:
178 		case glu::TYPE_SAMPLER_CUBE:
179 		case glu::TYPE_SAMPLER_2D_ARRAY:
180 		case glu::TYPE_SAMPLER_3D:
181 		case glu::TYPE_SAMPLER_CUBE_ARRAY:
182 			return glu::TYPE_FLOAT_VEC4;
183 
184 		case glu::TYPE_SAMPLER_1D_SHADOW:
185 		case glu::TYPE_SAMPLER_2D_SHADOW:
186 		case glu::TYPE_SAMPLER_CUBE_SHADOW:
187 		case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
188 		case glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW:
189 			return glu::TYPE_FLOAT;
190 
191 		case glu::TYPE_INT_SAMPLER_1D:
192 		case glu::TYPE_INT_SAMPLER_2D:
193 		case glu::TYPE_INT_SAMPLER_CUBE:
194 		case glu::TYPE_INT_SAMPLER_2D_ARRAY:
195 		case glu::TYPE_INT_SAMPLER_3D:
196 		case glu::TYPE_INT_SAMPLER_CUBE_ARRAY:
197 			return glu::TYPE_INT_VEC4;
198 
199 		case glu::TYPE_UINT_SAMPLER_1D:
200 		case glu::TYPE_UINT_SAMPLER_2D:
201 		case glu::TYPE_UINT_SAMPLER_CUBE:
202 		case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
203 		case glu::TYPE_UINT_SAMPLER_3D:
204 		case glu::TYPE_UINT_SAMPLER_CUBE_ARRAY:
205 			return glu::TYPE_UINT_VEC4;
206 
207 		default:
208 			TCU_THROW(InternalError, "Invalid sampler type");
209 	}
210 }
211 
getSamplerTextureFormat(glu::DataType samplerType)212 static tcu::TextureFormat getSamplerTextureFormat (glu::DataType samplerType)
213 {
214 	const glu::DataType		outType			= getSamplerOutputType(samplerType);
215 	const glu::DataType		outScalarType	= glu::getDataTypeScalarType(outType);
216 
217 	switch (outScalarType)
218 	{
219 		case glu::TYPE_FLOAT:
220 			if (isShadowSampler(samplerType))
221 				return tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
222 			else
223 				return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
224 
225 		case glu::TYPE_INT:		return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT8);
226 		case glu::TYPE_UINT:	return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8);
227 
228 		default:
229 			TCU_THROW(InternalError, "Invalid sampler type");
230 	}
231 }
232 
getSamplerCoordType(glu::DataType samplerType)233 static glu::DataType getSamplerCoordType (glu::DataType samplerType)
234 {
235 	const TextureType	texType		= getTextureType(samplerType);
236 	int					numCoords	= 0;
237 
238 	switch (texType)
239 	{
240 		case TEXTURE_TYPE_1D:			numCoords = 1;	break;
241 		case TEXTURE_TYPE_2D:			numCoords = 2;	break;
242 		case TEXTURE_TYPE_2D_ARRAY:		numCoords = 3;	break;
243 		case TEXTURE_TYPE_CUBE:			numCoords = 3;	break;
244 		case TEXTURE_TYPE_3D:			numCoords = 3;	break;
245 		case TEXTURE_TYPE_CUBE_ARRAY:	numCoords = 4;	break;
246 		default:
247 			TCU_THROW(InternalError, "Invalid texture type");
248 	}
249 
250 	if (isShadowSampler(samplerType) && samplerType != TYPE_SAMPLER_CUBE_ARRAY_SHADOW)
251 		numCoords += 1;
252 
253 	DE_ASSERT(de::inRange(numCoords, 1, 4));
254 
255 	return numCoords == 1 ? glu::TYPE_FLOAT : glu::getDataTypeFloatVec(numCoords);
256 }
257 
getGLTextureTarget(TextureType texType)258 static deUint32 getGLTextureTarget (TextureType texType)
259 {
260 	switch (texType)
261 	{
262 		case TEXTURE_TYPE_1D:			return GL_TEXTURE_1D;
263 		case TEXTURE_TYPE_2D:			return GL_TEXTURE_2D;
264 		case TEXTURE_TYPE_2D_ARRAY:		return GL_TEXTURE_2D_ARRAY;
265 		case TEXTURE_TYPE_CUBE:			return GL_TEXTURE_CUBE_MAP;
266 		case TEXTURE_TYPE_3D:			return GL_TEXTURE_3D;
267 		case TEXTURE_TYPE_CUBE_ARRAY:	return GL_TEXTURE_CUBE_MAP_ARRAY;
268 		default:
269 			TCU_THROW(InternalError, "Invalid texture type");
270 	}
271 }
272 
setupTexture(const glw::Functions & gl,deUint32 texture,glu::DataType samplerType,tcu::TextureFormat texFormat,const void * color)273 static void setupTexture (const glw::Functions&	gl,
274 						  deUint32				texture,
275 						  glu::DataType			samplerType,
276 						  tcu::TextureFormat	texFormat,
277 						  const void*			color)
278 {
279 	const TextureType			texType		= getTextureType(samplerType);
280 	const deUint32				texTarget	= getGLTextureTarget(texType);
281 	const deUint32				intFormat	= glu::getInternalFormat(texFormat);
282 	const glu::TransferFormat	transferFmt	= glu::getTransferFormat(texFormat);
283 
284 	// \todo [2014-03-04 pyry] Use larger than 1x1 textures?
285 
286 	gl.bindTexture(texTarget, texture);
287 
288 	switch (texType)
289 	{
290 		case TEXTURE_TYPE_1D:
291 			gl.texStorage1D(texTarget, 1, intFormat, 1);
292 			gl.texSubImage1D(texTarget, 0, 0, 1, transferFmt.format, transferFmt.dataType, color);
293 			break;
294 
295 		case TEXTURE_TYPE_2D:
296 			gl.texStorage2D(texTarget, 1, intFormat, 1, 1);
297 			gl.texSubImage2D(texTarget, 0, 0, 0, 1, 1, transferFmt.format, transferFmt.dataType, color);
298 			break;
299 
300 		case TEXTURE_TYPE_2D_ARRAY:
301 		case TEXTURE_TYPE_3D:
302 			gl.texStorage3D(texTarget, 1, intFormat, 1, 1, 1);
303 			gl.texSubImage3D(texTarget, 0, 0, 0, 0, 1, 1, 1, transferFmt.format, transferFmt.dataType, color);
304 			break;
305 
306 		case TEXTURE_TYPE_CUBE_ARRAY:
307 			gl.texStorage3D(texTarget, 1, intFormat, 1, 1, 6);
308 			for (int zoffset = 0; zoffset < 6; ++zoffset)
309 				for (int face = 0; face < tcu::CUBEFACE_LAST; face++)
310 					gl.texSubImage3D(texTarget, 0, 0, 0, zoffset, 1, 1, 1, transferFmt.format, transferFmt.dataType, color);
311 			break;
312 
313 		case TEXTURE_TYPE_CUBE:
314 			gl.texStorage2D(texTarget, 1, intFormat, 1, 1);
315 			for (int face = 0; face < tcu::CUBEFACE_LAST; face++)
316 				gl.texSubImage2D(glu::getGLCubeFace((tcu::CubeFace)face), 0, 0, 0, 1, 1, transferFmt.format, transferFmt.dataType, color);
317 			break;
318 
319 		default:
320 			TCU_THROW(InternalError, "Invalid texture type");
321 	}
322 
323 	gl.texParameteri(texTarget, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
324 	gl.texParameteri(texTarget, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
325 
326 	if (isShadowSampler(samplerType))
327 		gl.texParameteri(texTarget, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
328 
329 	GLU_EXPECT_NO_ERROR(gl.getError(), "Texture setup failed");
330 }
331 
332 class SamplerIndexingCase : public TestCase
333 {
334 public:
335 							SamplerIndexingCase			(Context& context, const char* name, const char* description, glu::ShaderType shaderType, glu::DataType samplerType, IndexExprType indexExprType);
336 							~SamplerIndexingCase		(void);
337 
338 	void					init						(void);
339 	IterateResult			iterate						(void);
340 
341 private:
342 							SamplerIndexingCase			(const SamplerIndexingCase&);
343 	SamplerIndexingCase&	operator=					(const SamplerIndexingCase&);
344 
345 	void					getShaderSpec				(ShaderSpec* spec, int numSamplers, int numLookups, const int* lookupIndices, const RenderContext& renderContext) const;
346 
347 	const glu::ShaderType	m_shaderType;
348 	const glu::DataType		m_samplerType;
349 	const IndexExprType		m_indexExprType;
350 };
351 
SamplerIndexingCase(Context & context,const char * name,const char * description,glu::ShaderType shaderType,glu::DataType samplerType,IndexExprType indexExprType)352 SamplerIndexingCase::SamplerIndexingCase (Context& context, const char* name, const char* description, glu::ShaderType shaderType, glu::DataType samplerType, IndexExprType indexExprType)
353 	: TestCase			(context, name, description)
354 	, m_shaderType		(shaderType)
355 	, m_samplerType		(samplerType)
356 	, m_indexExprType	(indexExprType)
357 {
358 }
359 
~SamplerIndexingCase(void)360 SamplerIndexingCase::~SamplerIndexingCase (void)
361 {
362 }
363 
init(void)364 void SamplerIndexingCase::init (void)
365 {
366 	if (!contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)))
367 	{
368 		if (m_shaderType == SHADERTYPE_GEOMETRY)
369 			TCU_CHECK_AND_THROW(NotSupportedError,
370 				m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
371 				"GL_EXT_geometry_shader extension is required to run geometry shader tests.");
372 
373 		if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
374 			TCU_CHECK_AND_THROW(NotSupportedError,
375 				m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
376 				"GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
377 
378 		if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
379 			TCU_CHECK_AND_THROW(NotSupportedError,
380 				m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
381 				"GL_EXT_gpu_shader5 extension is required for dynamic indexing of sampler arrays.");
382 
383 		if (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY
384 			|| m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW
385 			|| m_samplerType == TYPE_INT_SAMPLER_CUBE_ARRAY
386 			|| m_samplerType == TYPE_UINT_SAMPLER_CUBE_ARRAY)
387 		{
388 			TCU_CHECK_AND_THROW(NotSupportedError,
389 				m_context.getContextInfo().isExtensionSupported("GL_EXT_texture_cube_map_array"),
390 				"GL_EXT_texture_cube_map_array extension is required for cube map arrays.");
391 		}
392 	}
393 }
394 
getShaderSpec(ShaderSpec * spec,int numSamplers,int numLookups,const int * lookupIndices,const RenderContext & renderContext) const395 void SamplerIndexingCase::getShaderSpec (ShaderSpec* spec, int numSamplers, int numLookups, const int* lookupIndices, const RenderContext& renderContext) const
396 {
397 	const char*			samplersName	= "sampler";
398 	const char*			coordsName		= "coords";
399 	const char*			indicesPrefix	= "index";
400 	const char*			resultPrefix	= "result";
401 	const DataType		coordType		= getSamplerCoordType(m_samplerType);
402 	const DataType		outType			= getSamplerOutputType(m_samplerType);
403 	const bool			isES32			= contextSupports(renderContext.getType(), glu::ApiType::es(3, 2));
404 	std::ostringstream	global;
405 	std::ostringstream	code;
406 
407 	spec->inputs.push_back(Symbol(coordsName, VarType(coordType, PRECISION_HIGHP)));
408 
409 	if (!isES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
410 		global << "#extension GL_EXT_gpu_shader5 : require\n";
411 
412 	if (!isES32
413 		&& (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY
414 			|| m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW
415 			|| m_samplerType == TYPE_INT_SAMPLER_CUBE_ARRAY
416 			|| m_samplerType == TYPE_UINT_SAMPLER_CUBE_ARRAY))
417 	{
418 		global << "#extension GL_EXT_texture_cube_map_array: require\n";
419 	}
420 
421 	if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
422 		global << "const highp int indexBase = 1;\n";
423 
424 	global <<
425 		"uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << numSamplers << "];\n";
426 
427 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
428 	{
429 		for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
430 		{
431 			const string varName = indicesPrefix + de::toString(lookupNdx);
432 			spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
433 		}
434 	}
435 	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
436 		declareUniformIndexVars(global, indicesPrefix, numLookups);
437 
438 	for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
439 	{
440 		const string varName = resultPrefix + de::toString(lookupNdx);
441 		spec->outputs.push_back(Symbol(varName, VarType(outType, PRECISION_HIGHP)));
442 	}
443 
444 	for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
445 	{
446 		code << resultPrefix << "" << lookupNdx << " = texture(" << samplersName << "[";
447 
448 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
449 			code << lookupIndices[lookupNdx];
450 		else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
451 			code << "indexBase + " << (lookupIndices[lookupNdx]-1);
452 		else
453 			code << indicesPrefix << lookupNdx;
454 
455 
456 		code << "], " << coordsName << (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW ? ", 0.0" : "") << ");\n";
457 	}
458 
459 	spec->version				= isES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
460 	spec->globalDeclarations	= global.str();
461 	spec->source				= code.str();
462 }
463 
fillTextureData(const tcu::PixelBufferAccess & access,de::Random & rnd)464 static void fillTextureData (const tcu::PixelBufferAccess& access, de::Random& rnd)
465 {
466 	DE_ASSERT(access.getHeight() == 1 && access.getDepth() == 1);
467 
468 	if (access.getFormat().order == TextureFormat::D)
469 	{
470 		// \note Texture uses odd values, lookup even values to avoid precision issues.
471 		const float values[] = { 0.1f, 0.3f, 0.5f, 0.7f, 0.9f };
472 
473 		for (int ndx = 0; ndx < access.getWidth(); ndx++)
474 			access.setPixDepth(rnd.choose<float>(DE_ARRAY_BEGIN(values), DE_ARRAY_END(values)), ndx, 0);
475 	}
476 	else
477 	{
478 		TCU_CHECK_INTERNAL(access.getFormat().order == TextureFormat::RGBA && access.getFormat().getPixelSize() == 4);
479 
480 		for (int ndx = 0; ndx < access.getWidth(); ndx++)
481 			*((deUint32*)access.getDataPtr() + ndx) = rnd.getUint32();
482 	}
483 }
484 
iterate(void)485 SamplerIndexingCase::IterateResult SamplerIndexingCase::iterate (void)
486 {
487 	const int						numInvocations		= 64;
488 	const int						numSamplers			= 8;
489 	const int						numLookups			= 4;
490 	const DataType					coordType			= getSamplerCoordType(m_samplerType);
491 	const DataType					outputType			= getSamplerOutputType(m_samplerType);
492 	const TextureFormat				texFormat			= getSamplerTextureFormat(m_samplerType);
493 	const int						outLookupStride		= numInvocations*getDataTypeScalarSize(outputType);
494 	vector<int>						lookupIndices		(numLookups);
495 	vector<float>					coords;
496 	vector<deUint32>				outData;
497 	vector<deUint8>					texData				(numSamplers * texFormat.getPixelSize());
498 	const tcu::PixelBufferAccess	refTexAccess		(texFormat, numSamplers, 1, 1, &texData[0]);
499 
500 	ShaderSpec						shaderSpec;
501 	de::Random						rnd					(deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
502 
503 	for (int ndx = 0; ndx < numLookups; ndx++)
504 		lookupIndices[ndx] = rnd.getInt(0, numSamplers-1);
505 
506 	getShaderSpec(&shaderSpec, numSamplers, numLookups, &lookupIndices[0], m_context.getRenderContext());
507 
508 	coords.resize(numInvocations * getDataTypeScalarSize(coordType));
509 
510 	if (m_samplerType != TYPE_SAMPLER_CUBE_ARRAY_SHADOW && isShadowSampler(m_samplerType))
511 	{
512 		// Use different comparison value per invocation.
513 		// \note Texture uses odd values, comparison even values.
514 		const int	numCoordComps	= getDataTypeScalarSize(coordType);
515 		const float	cmpValues[]		= { 0.0f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f };
516 
517 		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
518 			coords[invocationNdx*numCoordComps + (numCoordComps-1)] = rnd.choose<float>(DE_ARRAY_BEGIN(cmpValues), DE_ARRAY_END(cmpValues));
519 	}
520 
521 	fillTextureData(refTexAccess, rnd);
522 
523 	outData.resize(numLookups*outLookupStride);
524 
525 	{
526 		const RenderContext&	renderCtx		= m_context.getRenderContext();
527 		const glw::Functions&	gl				= renderCtx.getFunctions();
528 		ShaderExecutorPtr		executor		(createExecutor(m_context.getRenderContext(), m_shaderType, shaderSpec));
529 		TextureVector			textures		(renderCtx, numSamplers);
530 		vector<void*>			inputs;
531 		vector<void*>			outputs;
532 		vector<int>				expandedIndices;
533 		const int				maxIndex		= maxElement(lookupIndices);
534 
535 		m_testCtx.getLog() << *executor;
536 
537 		if (!executor->isOk())
538 			TCU_FAIL("Compile failed");
539 
540 		executor->useProgram();
541 
542 		// \todo [2014-03-05 pyry] Do we want to randomize tex unit assignments?
543 		for (int samplerNdx = 0; samplerNdx < numSamplers; samplerNdx++)
544 		{
545 			const string	samplerName	= string("sampler[") + de::toString(samplerNdx) + "]";
546 			const int		samplerLoc	= gl.getUniformLocation(executor->getProgram(), samplerName.c_str());
547 
548 			if (samplerNdx > maxIndex && samplerLoc < 0)
549 				continue; // Unused uniform eliminated by compiler
550 
551 			TCU_CHECK_MSG(samplerLoc >= 0, (string("No location for uniform '") + samplerName + "' found").c_str());
552 
553 			gl.activeTexture(GL_TEXTURE0 + samplerNdx);
554 			setupTexture(gl, textures[samplerNdx], m_samplerType, texFormat, &texData[samplerNdx*texFormat.getPixelSize()]);
555 
556 			gl.uniform1i(samplerLoc, samplerNdx);
557 		}
558 
559 		inputs.push_back(&coords[0]);
560 
561 		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
562 		{
563 			expandedIndices.resize(numInvocations * lookupIndices.size());
564 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
565 			{
566 				for (int invNdx = 0; invNdx < numInvocations; invNdx++)
567 					expandedIndices[lookupNdx*numInvocations + invNdx] = lookupIndices[lookupNdx];
568 			}
569 
570 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
571 				inputs.push_back(&expandedIndices[lookupNdx*numInvocations]);
572 		}
573 		else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
574 			uploadUniformIndices(gl, executor->getProgram(), "index", numLookups, &lookupIndices[0]);
575 
576 		for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
577 			outputs.push_back(&outData[outLookupStride*lookupNdx]);
578 
579 		GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
580 
581 		executor->execute(numInvocations, &inputs[0], &outputs[0]);
582 	}
583 
584 	m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
585 
586 	if (isShadowSampler(m_samplerType))
587 	{
588 		const tcu::Sampler	refSampler		(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
589 											 tcu::Sampler::NEAREST, tcu::Sampler::NEAREST, 0.0f, false /* non-normalized */,
590 											 tcu::Sampler::COMPAREMODE_LESS);
591 		const int			numCoordComps	= getDataTypeScalarSize(coordType);
592 
593 		TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 1);
594 
595 		// Each invocation may have different results.
596 		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
597 		{
598 			const float	coord	= coords[invocationNdx*numCoordComps + (numCoordComps-1)];
599 
600 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
601 			{
602 				const int		texNdx		= lookupIndices[lookupNdx];
603 				const float		result		= *((const float*)(const deUint8*)&outData[lookupNdx*outLookupStride + invocationNdx]);
604 				const float		reference	= refTexAccess.sample2DCompare(refSampler, tcu::Sampler::NEAREST, coord, (float)texNdx, 0.0f, tcu::IVec3(0));
605 
606 				if (de::abs(result-reference) > 0.005f)
607 				{
608 					m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx << ", lookup " << lookupNdx << ": expected "
609 														   << reference << ", got " << result
610 									   << TestLog::EndMessage;
611 
612 					if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
613 						m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Got invalid lookup result");
614 				}
615 			}
616 		}
617 	}
618 	else
619 	{
620 		TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 4);
621 
622 		// Validate results from first invocation
623 		for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
624 		{
625 			const int		texNdx	= lookupIndices[lookupNdx];
626 			const deUint8*	resPtr	= (const deUint8*)&outData[lookupNdx*outLookupStride];
627 			bool			isOk;
628 
629 			if (outputType == TYPE_FLOAT_VEC4)
630 			{
631 				const float			threshold		= 1.0f / 256.0f;
632 				const tcu::Vec4		reference		= refTexAccess.getPixel(texNdx, 0);
633 				const float*		floatPtr		= (const float*)resPtr;
634 				const tcu::Vec4		result			(floatPtr[0], floatPtr[1], floatPtr[2], floatPtr[3]);
635 
636 				isOk = boolAll(lessThanEqual(abs(reference-result), tcu::Vec4(threshold)));
637 
638 				if (!isOk)
639 				{
640 					m_testCtx.getLog() << TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
641 														   << reference << ", got " << result
642 									   << TestLog::EndMessage;
643 				}
644 			}
645 			else
646 			{
647 				const tcu::UVec4	reference		= refTexAccess.getPixelUint(texNdx, 0);
648 				const deUint32*		uintPtr			= (const deUint32*)resPtr;
649 				const tcu::UVec4	result			(uintPtr[0], uintPtr[1], uintPtr[2], uintPtr[3]);
650 
651 				isOk = boolAll(equal(reference, result));
652 
653 				if (!isOk)
654 				{
655 					m_testCtx.getLog() << TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
656 														   << reference << ", got " << result
657 									   << TestLog::EndMessage;
658 				}
659 			}
660 
661 			if (!isOk && m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
662 				m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Got invalid lookup result");
663 		}
664 
665 		// Check results of other invocations against first one
666 		for (int invocationNdx = 1; invocationNdx < numInvocations; invocationNdx++)
667 		{
668 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
669 			{
670 				const deUint32*		refPtr		= &outData[lookupNdx*outLookupStride];
671 				const deUint32*		resPtr		= refPtr + invocationNdx*4;
672 				bool				isOk		= true;
673 
674 				for (int ndx = 0; ndx < 4; ndx++)
675 					isOk = isOk && (refPtr[ndx] == resPtr[ndx]);
676 
677 				if (!isOk)
678 				{
679 					m_testCtx.getLog() << TestLog::Message << "ERROR: invocation " << invocationNdx << " result "
680 														   << tcu::formatArray(tcu::Format::HexIterator<deUint32>(resPtr), tcu::Format::HexIterator<deUint32>(resPtr+4))
681 														   << " for lookup " << lookupNdx << " doesn't match result from first invocation "
682 														   << tcu::formatArray(tcu::Format::HexIterator<deUint32>(refPtr), tcu::Format::HexIterator<deUint32>(refPtr+4))
683 									   << TestLog::EndMessage;
684 
685 					if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
686 						m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Inconsistent lookup results");
687 				}
688 			}
689 		}
690 	}
691 
692 	return STOP;
693 }
694 
695 class BlockArrayIndexingCase : public TestCase
696 {
697 public:
698 	enum BlockType
699 	{
700 		BLOCKTYPE_UNIFORM = 0,
701 		BLOCKTYPE_BUFFER,
702 
703 		BLOCKTYPE_LAST
704 	};
705 								BlockArrayIndexingCase		(Context& context, const char* name, const char* description, BlockType blockType, IndexExprType indexExprType, ShaderType shaderType);
706 								~BlockArrayIndexingCase		(void);
707 
708 	void						init						(void);
709 	IterateResult				iterate						(void);
710 
711 private:
712 								BlockArrayIndexingCase		(const BlockArrayIndexingCase&);
713 	BlockArrayIndexingCase&		operator=					(const BlockArrayIndexingCase&);
714 
715 	void						getShaderSpec				(ShaderSpec* spec, int numInstances, int numReads, const int* readIndices, const RenderContext& renderContext) const;
716 
717 	const BlockType				m_blockType;
718 	const IndexExprType			m_indexExprType;
719 	const ShaderType			m_shaderType;
720 
721 	const int					m_numInstances;
722 };
723 
BlockArrayIndexingCase(Context & context,const char * name,const char * description,BlockType blockType,IndexExprType indexExprType,ShaderType shaderType)724 BlockArrayIndexingCase::BlockArrayIndexingCase (Context& context, const char* name, const char* description, BlockType blockType, IndexExprType indexExprType, ShaderType shaderType)
725 	: TestCase			(context, name, description)
726 	, m_blockType		(blockType)
727 	, m_indexExprType	(indexExprType)
728 	, m_shaderType		(shaderType)
729 	, m_numInstances	(4)
730 {
731 }
732 
~BlockArrayIndexingCase(void)733 BlockArrayIndexingCase::~BlockArrayIndexingCase (void)
734 {
735 }
736 
init(void)737 void BlockArrayIndexingCase::init (void)
738 {
739 	if (!contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)))
740 	{
741 		if (m_shaderType == SHADERTYPE_GEOMETRY)
742 			TCU_CHECK_AND_THROW(NotSupportedError,
743 				m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
744 				"GL_EXT_geometry_shader extension is required to run geometry shader tests.");
745 
746 		if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
747 			TCU_CHECK_AND_THROW(NotSupportedError,
748 				m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
749 				"GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
750 
751 		if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
752 			TCU_CHECK_AND_THROW(NotSupportedError,
753 				m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
754 				"GL_EXT_gpu_shader5 extension is required for dynamic indexing of interface blocks.");
755 	}
756 
757 	if (m_blockType == BLOCKTYPE_BUFFER)
758 	{
759 		const deUint32 limitPnames[] =
760 		{
761 			GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS,
762 			GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS,
763 			GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS,
764 			GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS,
765 			GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS,
766 			GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS
767 		};
768 
769 		const glw::Functions&	gl			= m_context.getRenderContext().getFunctions();
770 		int						maxBlocks	= 0;
771 
772 		gl.getIntegerv(limitPnames[m_shaderType], &maxBlocks);
773 		GLU_EXPECT_NO_ERROR(gl.getError(), "glGetIntegerv()");
774 
775 		if (maxBlocks < 2 + m_numInstances)
776 			throw tcu::NotSupportedError("Not enough shader storage blocks supported for shader type");
777 	}
778 }
779 
getShaderSpec(ShaderSpec * spec,int numInstances,int numReads,const int * readIndices,const RenderContext & renderContext) const780 void BlockArrayIndexingCase::getShaderSpec (ShaderSpec* spec, int numInstances, int numReads, const int* readIndices, const RenderContext& renderContext) const
781 {
782 	const int			binding			= 2;
783 	const char*			blockName		= "Block";
784 	const char*			instanceName	= "block";
785 	const char*			indicesPrefix	= "index";
786 	const char*			resultPrefix	= "result";
787 	const char*			interfaceName	= m_blockType == BLOCKTYPE_UNIFORM ? "uniform" : "buffer";
788 	const char*			layout			= m_blockType == BLOCKTYPE_UNIFORM ? "std140" : "std430";
789 	const bool			isES32			= contextSupports(renderContext.getType(), glu::ApiType::es(3, 2));
790 	std::ostringstream	global;
791 	std::ostringstream	code;
792 
793 	if (!isES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
794 		global << "#extension GL_EXT_gpu_shader5 : require\n";
795 
796 	if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
797 		global << "const highp int indexBase = 1;\n";
798 
799 	global <<
800 		"layout(" << layout << ", binding = " << binding << ") " << interfaceName << " " << blockName << "\n"
801 		"{\n"
802 		"	uint value;\n"
803 		"} " << instanceName << "[" << numInstances << "];\n";
804 
805 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
806 	{
807 		for (int readNdx = 0; readNdx < numReads; readNdx++)
808 		{
809 			const string varName = indicesPrefix + de::toString(readNdx);
810 			spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
811 		}
812 	}
813 	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
814 		declareUniformIndexVars(global, indicesPrefix, numReads);
815 
816 	for (int readNdx = 0; readNdx < numReads; readNdx++)
817 	{
818 		const string varName = resultPrefix + de::toString(readNdx);
819 		spec->outputs.push_back(Symbol(varName, VarType(TYPE_UINT, PRECISION_HIGHP)));
820 	}
821 
822 	for (int readNdx = 0; readNdx < numReads; readNdx++)
823 	{
824 		code << resultPrefix << readNdx << " = " << instanceName << "[";
825 
826 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
827 			code << readIndices[readNdx];
828 		else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
829 			code << "indexBase + " << (readIndices[readNdx]-1);
830 		else
831 			code << indicesPrefix << readNdx;
832 
833 		code << "].value;\n";
834 	}
835 
836 	spec->version				= isES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
837 	spec->globalDeclarations	= global.str();
838 	spec->source				= code.str();
839 }
840 
iterate(void)841 BlockArrayIndexingCase::IterateResult BlockArrayIndexingCase::iterate (void)
842 {
843 	const int			numInvocations		= 32;
844 	const int			numInstances		= m_numInstances;
845 	const int			numReads			= 4;
846 	vector<int>			readIndices			(numReads);
847 	vector<deUint32>	inValues			(numInstances);
848 	vector<deUint32>	outValues			(numInvocations*numReads);
849 	ShaderSpec			shaderSpec;
850 	de::Random			rnd					(deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
851 
852 	for (int readNdx = 0; readNdx < numReads; readNdx++)
853 		readIndices[readNdx] = rnd.getInt(0, numInstances-1);
854 
855 	for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
856 		inValues[instanceNdx] = rnd.getUint32();
857 
858 	getShaderSpec(&shaderSpec, numInstances, numReads, &readIndices[0], m_context.getRenderContext());
859 
860 	{
861 		const RenderContext&	renderCtx		= m_context.getRenderContext();
862 		const glw::Functions&	gl				= renderCtx.getFunctions();
863 		const int				baseBinding		= 2;
864 		const BufferVector		buffers			(renderCtx, numInstances);
865 		const deUint32			bufTarget		= m_blockType == BLOCKTYPE_BUFFER ? GL_SHADER_STORAGE_BUFFER : GL_UNIFORM_BUFFER;
866 		ShaderExecutorPtr		shaderExecutor	(createExecutor(renderCtx, m_shaderType, shaderSpec));
867 		vector<int>				expandedIndices;
868 		vector<void*>			inputs;
869 		vector<void*>			outputs;
870 
871 		m_testCtx.getLog() << *shaderExecutor;
872 
873 		if (!shaderExecutor->isOk())
874 			TCU_FAIL("Compile failed");
875 
876 		shaderExecutor->useProgram();
877 
878 		for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
879 		{
880 			gl.bindBuffer(bufTarget, buffers[instanceNdx]);
881 			gl.bufferData(bufTarget, (glw::GLsizeiptr)sizeof(deUint32), &inValues[instanceNdx], GL_STATIC_DRAW);
882 			gl.bindBufferBase(bufTarget, baseBinding+instanceNdx, buffers[instanceNdx]);
883 		}
884 
885 		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
886 		{
887 			expandedIndices.resize(numInvocations * readIndices.size());
888 
889 			for (int readNdx = 0; readNdx < numReads; readNdx++)
890 			{
891 				int* dst = &expandedIndices[numInvocations*readNdx];
892 				std::fill(dst, dst+numInvocations, readIndices[readNdx]);
893 			}
894 
895 			for (int readNdx = 0; readNdx < numReads; readNdx++)
896 				inputs.push_back(&expandedIndices[readNdx*numInvocations]);
897 		}
898 		else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
899 			uploadUniformIndices(gl, shaderExecutor->getProgram(), "index", numReads, &readIndices[0]);
900 
901 		for (int readNdx = 0; readNdx < numReads; readNdx++)
902 			outputs.push_back(&outValues[readNdx*numInvocations]);
903 
904 		GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
905 
906 		shaderExecutor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
907 	}
908 
909 	m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
910 
911 	for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
912 	{
913 		for (int readNdx = 0; readNdx < numReads; readNdx++)
914 		{
915 			const deUint32	refValue	= inValues[readIndices[readNdx]];
916 			const deUint32	resValue	= outValues[readNdx*numInvocations + invocationNdx];
917 
918 			if (refValue != resValue)
919 			{
920 				m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx
921 													   << ", read " << readNdx << ": expected "
922 													   << tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
923 								   << TestLog::EndMessage;
924 
925 				if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
926 					m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid result value");
927 			}
928 		}
929 	}
930 
931 	return STOP;
932 }
933 
934 class AtomicCounterIndexingCase : public TestCase
935 {
936 public:
937 								AtomicCounterIndexingCase		(Context& context, const char* name, const char* description, IndexExprType indexExprType, ShaderType shaderType);
938 								~AtomicCounterIndexingCase		(void);
939 
940 	void						init							(void);
941 	IterateResult				iterate							(void);
942 
943 private:
944 								AtomicCounterIndexingCase		(const AtomicCounterIndexingCase&);
945 	AtomicCounterIndexingCase&	operator=						(const AtomicCounterIndexingCase&);
946 
947 	void						getShaderSpec					(ShaderSpec* spec, int numCounters, int numOps, const int* opIndices, const RenderContext& renderContext) const;
948 
949 	const IndexExprType			m_indexExprType;
950 	const glu::ShaderType		m_shaderType;
951 	deInt32						m_numCounters;
952 };
953 
AtomicCounterIndexingCase(Context & context,const char * name,const char * description,IndexExprType indexExprType,ShaderType shaderType)954 AtomicCounterIndexingCase::AtomicCounterIndexingCase (Context& context, const char* name, const char* description, IndexExprType indexExprType, ShaderType shaderType)
955 	: TestCase			(context, name, description)
956 	, m_indexExprType	(indexExprType)
957 	, m_shaderType		(shaderType)
958 	, m_numCounters		(0)
959 {
960 }
961 
~AtomicCounterIndexingCase(void)962 AtomicCounterIndexingCase::~AtomicCounterIndexingCase (void)
963 {
964 }
965 
getMaxAtomicCounterEnum(glu::ShaderType type)966 deUint32 getMaxAtomicCounterEnum (glu::ShaderType type)
967 {
968 	switch (type)
969 	{
970 		case glu::SHADERTYPE_VERTEX:					return GL_MAX_VERTEX_ATOMIC_COUNTERS;
971 		case glu::SHADERTYPE_FRAGMENT:					return GL_MAX_FRAGMENT_ATOMIC_COUNTERS;
972 		case glu::SHADERTYPE_GEOMETRY:					return GL_MAX_GEOMETRY_ATOMIC_COUNTERS;
973 		case glu::SHADERTYPE_COMPUTE:					return GL_MAX_COMPUTE_ATOMIC_COUNTERS;
974 		case glu::SHADERTYPE_TESSELLATION_CONTROL:		return GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS;
975 		case glu::SHADERTYPE_TESSELLATION_EVALUATION:	return GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS;
976 
977 		default:
978 			DE_FATAL("Unknown shader type");
979 			return -1;
980 	}
981 }
982 
init(void)983 void AtomicCounterIndexingCase::init (void)
984 {
985 	if (!contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)))
986 	{
987 		if (m_shaderType == SHADERTYPE_GEOMETRY)
988 			TCU_CHECK_AND_THROW(NotSupportedError,
989 				m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
990 				"GL_EXT_geometry_shader extension is required to run geometry shader tests.");
991 
992 		if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
993 			TCU_CHECK_AND_THROW(NotSupportedError,
994 				m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
995 				"GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
996 
997 		if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
998 			TCU_CHECK_AND_THROW(NotSupportedError,
999 				m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
1000 				"GL_EXT_gpu_shader5 extension is required for dynamic indexing of atomic counters.");
1001 	}
1002 
1003 	{
1004 		m_context.getRenderContext().getFunctions().getIntegerv(getMaxAtomicCounterEnum(m_shaderType),
1005 																&m_numCounters);
1006 
1007 		if (m_numCounters < 1)
1008 		{
1009 			const string message =  "Atomic counters not supported in " + string(glu::getShaderTypeName(m_shaderType)) + " shader";
1010 			TCU_THROW(NotSupportedError, message.c_str());
1011 		}
1012 	}
1013 }
1014 
getShaderSpec(ShaderSpec * spec,int numCounters,int numOps,const int * opIndices,const RenderContext & renderContext) const1015 void AtomicCounterIndexingCase::getShaderSpec (ShaderSpec* spec, int numCounters, int numOps, const int* opIndices, const RenderContext& renderContext) const
1016 {
1017 	const char*			indicesPrefix	= "index";
1018 	const char*			resultPrefix	= "result";
1019 	const bool			isES32			= contextSupports(renderContext.getType(), glu::ApiType::es(3, 2));
1020 	std::ostringstream	global;
1021 	std::ostringstream	code;
1022 
1023 	if (!isES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
1024 		global << "#extension GL_EXT_gpu_shader5 : require\n";
1025 
1026 	if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1027 		global << "const highp int indexBase = 1;\n";
1028 
1029 	global <<
1030 		"layout(binding = 0) uniform atomic_uint counter[" << numCounters << "];\n";
1031 
1032 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1033 	{
1034 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1035 		{
1036 			const string varName = indicesPrefix + de::toString(opNdx);
1037 			spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
1038 		}
1039 	}
1040 	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1041 		declareUniformIndexVars(global, indicesPrefix, numOps);
1042 
1043 	for (int opNdx = 0; opNdx < numOps; opNdx++)
1044 	{
1045 		const string varName = resultPrefix + de::toString(opNdx);
1046 		spec->outputs.push_back(Symbol(varName, VarType(TYPE_UINT, PRECISION_HIGHP)));
1047 	}
1048 
1049 	for (int opNdx = 0; opNdx < numOps; opNdx++)
1050 	{
1051 		code << resultPrefix << opNdx << " = atomicCounterIncrement(counter[";
1052 
1053 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1054 			code << opIndices[opNdx];
1055 		else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1056 			code << "indexBase + " << (opIndices[opNdx]-1);
1057 		else
1058 			code << indicesPrefix << opNdx;
1059 
1060 		code << "]);\n";
1061 	}
1062 
1063 	spec->version				= isES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
1064 	spec->globalDeclarations	= global.str();
1065 	spec->source				= code.str();
1066 }
1067 
iterate(void)1068 AtomicCounterIndexingCase::IterateResult AtomicCounterIndexingCase::iterate (void)
1069 {
1070 	const RenderContext&	renderCtx			= m_context.getRenderContext();
1071 	const glw::Functions&	gl					= renderCtx.getFunctions();
1072 	const Buffer			counterBuffer		(renderCtx);
1073 
1074 	const int				numInvocations		= 32;
1075 	const int				numOps				= 4;
1076 	vector<int>				opIndices			(numOps);
1077 	vector<deUint32>		outValues			(numInvocations*numOps);
1078 	ShaderSpec				shaderSpec;
1079 	de::Random				rnd					(deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1080 
1081 	for (int opNdx = 0; opNdx < numOps; opNdx++)
1082 		opIndices[opNdx] = rnd.getInt(0, numOps-1);
1083 
1084 	getShaderSpec(&shaderSpec, m_numCounters, numOps, &opIndices[0], m_context.getRenderContext());
1085 
1086 	{
1087 		const BufferVector		buffers			(renderCtx, m_numCounters);
1088 		ShaderExecutorPtr		shaderExecutor	(createExecutor(renderCtx, m_shaderType, shaderSpec));
1089 		vector<int>				expandedIndices;
1090 		vector<void*>			inputs;
1091 		vector<void*>			outputs;
1092 
1093 		m_testCtx.getLog() << *shaderExecutor;
1094 
1095 		if (!shaderExecutor->isOk())
1096 			TCU_FAIL("Compile failed");
1097 
1098 		{
1099 			const int				bufSize		= getProgramResourceInt(gl, shaderExecutor->getProgram(), GL_ATOMIC_COUNTER_BUFFER, 0, GL_BUFFER_DATA_SIZE);
1100 			const int				maxNdx		= maxElement(opIndices);
1101 			std::vector<deUint8>	emptyData	(m_numCounters*4, 0);
1102 
1103 			if (bufSize < (maxNdx+1)*4)
1104 				TCU_FAIL((string("GL reported invalid buffer size " + de::toString(bufSize)).c_str()));
1105 
1106 			gl.bindBuffer(GL_ATOMIC_COUNTER_BUFFER, *counterBuffer);
1107 			gl.bufferData(GL_ATOMIC_COUNTER_BUFFER, (glw::GLsizeiptr)emptyData.size(), &emptyData[0], GL_STATIC_DRAW);
1108 			gl.bindBufferBase(GL_ATOMIC_COUNTER_BUFFER, 0, *counterBuffer);
1109 			GLU_EXPECT_NO_ERROR(gl.getError(), "Atomic counter buffer initialization failed");
1110 		}
1111 
1112 		shaderExecutor->useProgram();
1113 
1114 		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1115 		{
1116 			expandedIndices.resize(numInvocations * opIndices.size());
1117 
1118 			for (int opNdx = 0; opNdx < numOps; opNdx++)
1119 			{
1120 				int* dst = &expandedIndices[numInvocations*opNdx];
1121 				std::fill(dst, dst+numInvocations, opIndices[opNdx]);
1122 			}
1123 
1124 			for (int opNdx = 0; opNdx < numOps; opNdx++)
1125 				inputs.push_back(&expandedIndices[opNdx*numInvocations]);
1126 		}
1127 		else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1128 			uploadUniformIndices(gl, shaderExecutor->getProgram(), "index", numOps, &opIndices[0]);
1129 
1130 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1131 			outputs.push_back(&outValues[opNdx*numInvocations]);
1132 
1133 		GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
1134 
1135 		shaderExecutor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
1136 	}
1137 
1138 	m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
1139 
1140 	{
1141 		vector<int>				numHits			(m_numCounters, 0);	// Number of hits per counter.
1142 		vector<deUint32>		counterValues	(m_numCounters);
1143 		vector<vector<bool> >	counterMasks	(m_numCounters);
1144 
1145 		for (int opNdx = 0; opNdx < numOps; opNdx++)
1146 			numHits[opIndices[opNdx]] += 1;
1147 
1148 		// Read counter values
1149 		{
1150 			const void* mapPtr = DE_NULL;
1151 
1152 			try
1153 			{
1154 				mapPtr = gl.mapBufferRange(GL_ATOMIC_COUNTER_BUFFER, 0, m_numCounters*4, GL_MAP_READ_BIT);
1155 				GLU_EXPECT_NO_ERROR(gl.getError(), "glMapBufferRange(GL_ATOMIC_COUNTER_BUFFER)");
1156 				TCU_CHECK(mapPtr);
1157 				std::copy((const deUint32*)mapPtr, (const deUint32*)mapPtr + m_numCounters, &counterValues[0]);
1158 				gl.unmapBuffer(GL_ATOMIC_COUNTER_BUFFER);
1159 			}
1160 			catch (...)
1161 			{
1162 				if (mapPtr)
1163 					gl.unmapBuffer(GL_ATOMIC_COUNTER_BUFFER);
1164 				throw;
1165 			}
1166 		}
1167 
1168 		// Verify counter values
1169 		for (int counterNdx = 0; counterNdx < m_numCounters; counterNdx++)
1170 		{
1171 			const deUint32		refCount	= (deUint32)(numHits[counterNdx]*numInvocations);
1172 			const deUint32		resCount	= counterValues[counterNdx];
1173 
1174 			if (refCount != resCount)
1175 			{
1176 				m_testCtx.getLog() << TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
1177 													   << ", expected " << refCount
1178 								   << TestLog::EndMessage;
1179 
1180 				if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1181 					m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid atomic counter value");
1182 			}
1183 		}
1184 
1185 		// Allocate bitmasks - one bit per each valid result value
1186 		for (int counterNdx = 0; counterNdx < m_numCounters; counterNdx++)
1187 		{
1188 			const int	counterValue	= numHits[counterNdx]*numInvocations;
1189 			counterMasks[counterNdx].resize(counterValue, false);
1190 		}
1191 
1192 		// Verify result values from shaders
1193 		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1194 		{
1195 			for (int opNdx = 0; opNdx < numOps; opNdx++)
1196 			{
1197 				const int		counterNdx	= opIndices[opNdx];
1198 				const deUint32	resValue	= outValues[opNdx*numInvocations + invocationNdx];
1199 				const bool		rangeOk		= de::inBounds(resValue, 0u, (deUint32)counterMasks[counterNdx].size());
1200 				const bool		notSeen		= rangeOk && !counterMasks[counterNdx][resValue];
1201 				const bool		isOk		= rangeOk && notSeen;
1202 
1203 				if (!isOk)
1204 				{
1205 					m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx
1206 														   << ", op " << opNdx << ": got invalid result value "
1207 														   << resValue
1208 									   << TestLog::EndMessage;
1209 
1210 					if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1211 						m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid result value");
1212 				}
1213 				else
1214 				{
1215 					// Mark as used - no other invocation should see this value from same counter.
1216 					counterMasks[counterNdx][resValue] = true;
1217 				}
1218 			}
1219 		}
1220 
1221 		if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1222 		{
1223 			// Consistency check - all masks should be 1 now
1224 			for (int counterNdx = 0; counterNdx < m_numCounters; counterNdx++)
1225 			{
1226 				for (vector<bool>::const_iterator i = counterMasks[counterNdx].begin(); i != counterMasks[counterNdx].end(); i++)
1227 					TCU_CHECK_INTERNAL(*i);
1228 			}
1229 		}
1230 	}
1231 
1232 	return STOP;
1233 }
1234 
1235 } // anonymous
1236 
OpaqueTypeIndexingTests(Context & context)1237 OpaqueTypeIndexingTests::OpaqueTypeIndexingTests (Context& context)
1238 	: TestCaseGroup(context, "opaque_type_indexing", "Opaque Type Indexing Tests")
1239 {
1240 }
1241 
~OpaqueTypeIndexingTests(void)1242 OpaqueTypeIndexingTests::~OpaqueTypeIndexingTests (void)
1243 {
1244 }
1245 
init(void)1246 void OpaqueTypeIndexingTests::init (void)
1247 {
1248 	static const struct
1249 	{
1250 		IndexExprType	type;
1251 		const char*		name;
1252 		const char*		description;
1253 	} indexingTypes[] =
1254 	{
1255 		{ INDEX_EXPR_TYPE_CONST_LITERAL,	"const_literal",		"Indexing by constant literal"					},
1256 		{ INDEX_EXPR_TYPE_CONST_EXPRESSION,	"const_expression",		"Indexing by constant expression"				},
1257 		{ INDEX_EXPR_TYPE_UNIFORM,			"uniform",				"Indexing by uniform value"						},
1258 		{ INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,	"dynamically_uniform",	"Indexing by dynamically uniform expression"	}
1259 	};
1260 
1261 	static const struct
1262 	{
1263 		ShaderType		type;
1264 		const char*		name;
1265 	} shaderTypes[] =
1266 	{
1267 		{ SHADERTYPE_VERTEX,					"vertex"					},
1268 		{ SHADERTYPE_FRAGMENT,					"fragment"					},
1269 		{ SHADERTYPE_COMPUTE,					"compute"					},
1270 		{ SHADERTYPE_GEOMETRY,					"geometry"					},
1271 		{ SHADERTYPE_TESSELLATION_CONTROL,		"tessellation_control"		},
1272 		{ SHADERTYPE_TESSELLATION_EVALUATION,	"tessellation_evaluation"	}
1273 	};
1274 
1275 	// .sampler
1276 	{
1277 		static const DataType samplerTypes[] =
1278 		{
1279 			// \note 1D images will be added by a later extension.
1280 //			TYPE_SAMPLER_1D,
1281 			TYPE_SAMPLER_2D,
1282 			TYPE_SAMPLER_CUBE,
1283 			TYPE_SAMPLER_2D_ARRAY,
1284 			TYPE_SAMPLER_3D,
1285 //			TYPE_SAMPLER_1D_SHADOW,
1286 			TYPE_SAMPLER_2D_SHADOW,
1287 			TYPE_SAMPLER_CUBE_SHADOW,
1288 			TYPE_SAMPLER_2D_ARRAY_SHADOW,
1289 //			TYPE_INT_SAMPLER_1D,
1290 			TYPE_INT_SAMPLER_2D,
1291 			TYPE_INT_SAMPLER_CUBE,
1292 			TYPE_INT_SAMPLER_2D_ARRAY,
1293 			TYPE_INT_SAMPLER_3D,
1294 //			TYPE_UINT_SAMPLER_1D,
1295 			TYPE_UINT_SAMPLER_2D,
1296 			TYPE_UINT_SAMPLER_CUBE,
1297 			TYPE_UINT_SAMPLER_2D_ARRAY,
1298 			TYPE_UINT_SAMPLER_3D,
1299 			TYPE_SAMPLER_CUBE_ARRAY,
1300 			TYPE_SAMPLER_CUBE_ARRAY_SHADOW,
1301 			TYPE_INT_SAMPLER_CUBE_ARRAY,
1302 			TYPE_UINT_SAMPLER_CUBE_ARRAY
1303 		};
1304 
1305 		tcu::TestCaseGroup* const samplerGroup = new tcu::TestCaseGroup(m_testCtx, "sampler", "Sampler Array Indexing Tests");
1306 		addChild(samplerGroup);
1307 
1308 		for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1309 		{
1310 			const IndexExprType			indexExprType	= indexingTypes[indexTypeNdx].type;
1311 			tcu::TestCaseGroup* const	indexGroup		= new tcu::TestCaseGroup(m_testCtx, indexingTypes[indexTypeNdx].name, indexingTypes[indexTypeNdx].description);
1312 			samplerGroup->addChild(indexGroup);
1313 
1314 			for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1315 			{
1316 				const ShaderType			shaderType		= shaderTypes[shaderTypeNdx].type;
1317 				tcu::TestCaseGroup* const	shaderGroup		= new tcu::TestCaseGroup(m_testCtx, shaderTypes[shaderTypeNdx].name, "");
1318 				indexGroup->addChild(shaderGroup);
1319 
1320 				for (int samplerTypeNdx = 0; samplerTypeNdx < DE_LENGTH_OF_ARRAY(samplerTypes); samplerTypeNdx++)
1321 				{
1322 					const DataType	samplerType	= samplerTypes[samplerTypeNdx];
1323 					const char*		samplerName	= getDataTypeName(samplerType);
1324 					const string	caseName	= de::toLower(samplerName);
1325 
1326 					shaderGroup->addChild(new SamplerIndexingCase(m_context, caseName.c_str(), "", shaderType, samplerType, indexExprType));
1327 				}
1328 			}
1329 		}
1330 	}
1331 
1332 	// .ubo / .ssbo / .atomic_counter
1333 	{
1334 		tcu::TestCaseGroup* const	uboGroup	= new tcu::TestCaseGroup(m_testCtx, "ubo",				"Uniform Block Instance Array Indexing Tests");
1335 		tcu::TestCaseGroup* const	ssboGroup	= new tcu::TestCaseGroup(m_testCtx, "ssbo",				"Buffer Block Instance Array Indexing Tests");
1336 		tcu::TestCaseGroup* const	acGroup		= new tcu::TestCaseGroup(m_testCtx, "atomic_counter",	"Atomic Counter Array Indexing Tests");
1337 		addChild(uboGroup);
1338 		addChild(ssboGroup);
1339 		addChild(acGroup);
1340 
1341 		for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1342 		{
1343 			const IndexExprType		indexExprType		= indexingTypes[indexTypeNdx].type;
1344 			const char*				indexExprName		= indexingTypes[indexTypeNdx].name;
1345 			const char*				indexExprDesc		= indexingTypes[indexTypeNdx].description;
1346 
1347 			for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1348 			{
1349 				const ShaderType		shaderType		= shaderTypes[shaderTypeNdx].type;
1350 				const string			name			= string(indexExprName) + "_" + shaderTypes[shaderTypeNdx].name;
1351 
1352 				uboGroup->addChild	(new BlockArrayIndexingCase		(m_context, name.c_str(), indexExprDesc, BlockArrayIndexingCase::BLOCKTYPE_UNIFORM,	indexExprType, shaderType));
1353 				acGroup->addChild	(new AtomicCounterIndexingCase	(m_context, name.c_str(), indexExprDesc, indexExprType, shaderType));
1354 
1355 				if (indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL || indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1356 					ssboGroup->addChild	(new BlockArrayIndexingCase		(m_context, name.c_str(), indexExprDesc, BlockArrayIndexingCase::BLOCKTYPE_BUFFER,	indexExprType, shaderType));
1357 			}
1358 		}
1359 	}
1360 }
1361 
1362 } // Functional
1363 } // gles31
1364 } // deqp
1365