1 /*-------------------------------------------------------------------------
2 * drawElements Quality Program OpenGL ES 3.1 Module
3 * -------------------------------------------------
4 *
5 * Copyright 2014 The Android Open Source Project
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Opaque type (sampler, buffer, atomic counter, ...) indexing tests.
22 *
23 * \todo [2014-03-05 pyry] Extend with following:
24 * + sampler: different filtering modes, multiple sizes, incomplete textures
25 * + SSBO: write, atomic op, unsized array .length()
26 *//*--------------------------------------------------------------------*/
27
28 #include "es31fOpaqueTypeIndexingTests.hpp"
29 #include "tcuTexture.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuFormatUtil.hpp"
32 #include "tcuVectorUtil.hpp"
33 #include "gluShaderUtil.hpp"
34 #include "gluShaderProgram.hpp"
35 #include "gluObjectWrapper.hpp"
36 #include "gluTextureUtil.hpp"
37 #include "gluRenderContext.hpp"
38 #include "gluProgramInterfaceQuery.hpp"
39 #include "gluContextInfo.hpp"
40 #include "glsShaderExecUtil.hpp"
41 #include "glwFunctions.hpp"
42 #include "glwEnums.hpp"
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
46
47 #include <sstream>
48
49 namespace deqp
50 {
51 namespace gles31
52 {
53 namespace Functional
54 {
55
56 namespace
57 {
58
59 using namespace gls::ShaderExecUtil;
60 using namespace glu;
61 using std::string;
62 using std::vector;
63 using tcu::TextureFormat;
64 using tcu::TestLog;
65
66 typedef de::UniquePtr<ShaderExecutor> ShaderExecutorPtr;
67
68 enum IndexExprType
69 {
70 INDEX_EXPR_TYPE_CONST_LITERAL = 0,
71 INDEX_EXPR_TYPE_CONST_EXPRESSION,
72 INDEX_EXPR_TYPE_UNIFORM,
73 INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,
74
75 INDEX_EXPR_TYPE_LAST
76 };
77
78 enum TextureType
79 {
80 TEXTURE_TYPE_1D = 0,
81 TEXTURE_TYPE_2D,
82 TEXTURE_TYPE_CUBE,
83 TEXTURE_TYPE_2D_ARRAY,
84 TEXTURE_TYPE_3D,
85 TEXTURE_TYPE_CUBE_ARRAY,
86
87 TEXTURE_TYPE_LAST
88 };
89
declareUniformIndexVars(std::ostream & str,const char * varPrefix,int numVars)90 static void declareUniformIndexVars (std::ostream& str, const char* varPrefix, int numVars)
91 {
92 for (int varNdx = 0; varNdx < numVars; varNdx++)
93 str << "uniform highp int " << varPrefix << varNdx << ";\n";
94 }
95
uploadUniformIndices(const glw::Functions & gl,deUint32 program,const char * varPrefix,int numIndices,const int * indices)96 static void uploadUniformIndices (const glw::Functions& gl, deUint32 program, const char* varPrefix, int numIndices, const int* indices)
97 {
98 for (int varNdx = 0; varNdx < numIndices; varNdx++)
99 {
100 const string varName = varPrefix + de::toString(varNdx);
101 const int loc = gl.getUniformLocation(program, varName.c_str());
102 TCU_CHECK_MSG(loc >= 0, ("No location assigned for uniform '" + varName + "'").c_str());
103
104 gl.uniform1i(loc, indices[varNdx]);
105 }
106 }
107
108 template<typename T>
maxElement(const std::vector<T> & elements)109 static T maxElement (const std::vector<T>& elements)
110 {
111 T maxElem = elements[0];
112
113 for (size_t ndx = 1; ndx < elements.size(); ndx++)
114 maxElem = de::max(maxElem, elements[ndx]);
115
116 return maxElem;
117 }
118
getTextureType(glu::DataType samplerType)119 static TextureType getTextureType (glu::DataType samplerType)
120 {
121 switch (samplerType)
122 {
123 case glu::TYPE_SAMPLER_1D:
124 case glu::TYPE_INT_SAMPLER_1D:
125 case glu::TYPE_UINT_SAMPLER_1D:
126 case glu::TYPE_SAMPLER_1D_SHADOW:
127 return TEXTURE_TYPE_1D;
128
129 case glu::TYPE_SAMPLER_2D:
130 case glu::TYPE_INT_SAMPLER_2D:
131 case glu::TYPE_UINT_SAMPLER_2D:
132 case glu::TYPE_SAMPLER_2D_SHADOW:
133 return TEXTURE_TYPE_2D;
134
135 case glu::TYPE_SAMPLER_CUBE:
136 case glu::TYPE_INT_SAMPLER_CUBE:
137 case glu::TYPE_UINT_SAMPLER_CUBE:
138 case glu::TYPE_SAMPLER_CUBE_SHADOW:
139 return TEXTURE_TYPE_CUBE;
140
141 case glu::TYPE_SAMPLER_2D_ARRAY:
142 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
143 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
144 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
145 return TEXTURE_TYPE_2D_ARRAY;
146
147 case glu::TYPE_SAMPLER_3D:
148 case glu::TYPE_INT_SAMPLER_3D:
149 case glu::TYPE_UINT_SAMPLER_3D:
150 return TEXTURE_TYPE_3D;
151
152 case glu::TYPE_SAMPLER_CUBE_ARRAY:
153 case glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW:
154 case glu::TYPE_INT_SAMPLER_CUBE_ARRAY:
155 case glu::TYPE_UINT_SAMPLER_CUBE_ARRAY:
156 return TEXTURE_TYPE_CUBE_ARRAY;
157
158 default:
159 TCU_THROW(InternalError, "Invalid sampler type");
160 }
161 }
162
isShadowSampler(glu::DataType samplerType)163 static bool isShadowSampler (glu::DataType samplerType)
164 {
165 return samplerType == glu::TYPE_SAMPLER_1D_SHADOW ||
166 samplerType == glu::TYPE_SAMPLER_2D_SHADOW ||
167 samplerType == glu::TYPE_SAMPLER_2D_ARRAY_SHADOW ||
168 samplerType == glu::TYPE_SAMPLER_CUBE_SHADOW ||
169 samplerType == glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW;
170 }
171
getSamplerOutputType(glu::DataType samplerType)172 static glu::DataType getSamplerOutputType (glu::DataType samplerType)
173 {
174 switch (samplerType)
175 {
176 case glu::TYPE_SAMPLER_1D:
177 case glu::TYPE_SAMPLER_2D:
178 case glu::TYPE_SAMPLER_CUBE:
179 case glu::TYPE_SAMPLER_2D_ARRAY:
180 case glu::TYPE_SAMPLER_3D:
181 case glu::TYPE_SAMPLER_CUBE_ARRAY:
182 return glu::TYPE_FLOAT_VEC4;
183
184 case glu::TYPE_SAMPLER_1D_SHADOW:
185 case glu::TYPE_SAMPLER_2D_SHADOW:
186 case glu::TYPE_SAMPLER_CUBE_SHADOW:
187 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
188 case glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW:
189 return glu::TYPE_FLOAT;
190
191 case glu::TYPE_INT_SAMPLER_1D:
192 case glu::TYPE_INT_SAMPLER_2D:
193 case glu::TYPE_INT_SAMPLER_CUBE:
194 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
195 case glu::TYPE_INT_SAMPLER_3D:
196 case glu::TYPE_INT_SAMPLER_CUBE_ARRAY:
197 return glu::TYPE_INT_VEC4;
198
199 case glu::TYPE_UINT_SAMPLER_1D:
200 case glu::TYPE_UINT_SAMPLER_2D:
201 case glu::TYPE_UINT_SAMPLER_CUBE:
202 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
203 case glu::TYPE_UINT_SAMPLER_3D:
204 case glu::TYPE_UINT_SAMPLER_CUBE_ARRAY:
205 return glu::TYPE_UINT_VEC4;
206
207 default:
208 TCU_THROW(InternalError, "Invalid sampler type");
209 }
210 }
211
getSamplerTextureFormat(glu::DataType samplerType)212 static tcu::TextureFormat getSamplerTextureFormat (glu::DataType samplerType)
213 {
214 const glu::DataType outType = getSamplerOutputType(samplerType);
215 const glu::DataType outScalarType = glu::getDataTypeScalarType(outType);
216
217 switch (outScalarType)
218 {
219 case glu::TYPE_FLOAT:
220 if (isShadowSampler(samplerType))
221 return tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
222 else
223 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
224
225 case glu::TYPE_INT: return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT8);
226 case glu::TYPE_UINT: return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8);
227
228 default:
229 TCU_THROW(InternalError, "Invalid sampler type");
230 }
231 }
232
getSamplerCoordType(glu::DataType samplerType)233 static glu::DataType getSamplerCoordType (glu::DataType samplerType)
234 {
235 const TextureType texType = getTextureType(samplerType);
236 int numCoords = 0;
237
238 switch (texType)
239 {
240 case TEXTURE_TYPE_1D: numCoords = 1; break;
241 case TEXTURE_TYPE_2D: numCoords = 2; break;
242 case TEXTURE_TYPE_2D_ARRAY: numCoords = 3; break;
243 case TEXTURE_TYPE_CUBE: numCoords = 3; break;
244 case TEXTURE_TYPE_3D: numCoords = 3; break;
245 case TEXTURE_TYPE_CUBE_ARRAY: numCoords = 4; break;
246 default:
247 TCU_THROW(InternalError, "Invalid texture type");
248 }
249
250 if (isShadowSampler(samplerType) && samplerType != TYPE_SAMPLER_CUBE_ARRAY_SHADOW)
251 numCoords += 1;
252
253 DE_ASSERT(de::inRange(numCoords, 1, 4));
254
255 return numCoords == 1 ? glu::TYPE_FLOAT : glu::getDataTypeFloatVec(numCoords);
256 }
257
getGLTextureTarget(TextureType texType)258 static deUint32 getGLTextureTarget (TextureType texType)
259 {
260 switch (texType)
261 {
262 case TEXTURE_TYPE_1D: return GL_TEXTURE_1D;
263 case TEXTURE_TYPE_2D: return GL_TEXTURE_2D;
264 case TEXTURE_TYPE_2D_ARRAY: return GL_TEXTURE_2D_ARRAY;
265 case TEXTURE_TYPE_CUBE: return GL_TEXTURE_CUBE_MAP;
266 case TEXTURE_TYPE_3D: return GL_TEXTURE_3D;
267 case TEXTURE_TYPE_CUBE_ARRAY: return GL_TEXTURE_CUBE_MAP_ARRAY;
268 default:
269 TCU_THROW(InternalError, "Invalid texture type");
270 }
271 }
272
setupTexture(const glw::Functions & gl,deUint32 texture,glu::DataType samplerType,tcu::TextureFormat texFormat,const void * color)273 static void setupTexture (const glw::Functions& gl,
274 deUint32 texture,
275 glu::DataType samplerType,
276 tcu::TextureFormat texFormat,
277 const void* color)
278 {
279 const TextureType texType = getTextureType(samplerType);
280 const deUint32 texTarget = getGLTextureTarget(texType);
281 const deUint32 intFormat = glu::getInternalFormat(texFormat);
282 const glu::TransferFormat transferFmt = glu::getTransferFormat(texFormat);
283
284 // \todo [2014-03-04 pyry] Use larger than 1x1 textures?
285
286 gl.bindTexture(texTarget, texture);
287
288 switch (texType)
289 {
290 case TEXTURE_TYPE_1D:
291 gl.texStorage1D(texTarget, 1, intFormat, 1);
292 gl.texSubImage1D(texTarget, 0, 0, 1, transferFmt.format, transferFmt.dataType, color);
293 break;
294
295 case TEXTURE_TYPE_2D:
296 gl.texStorage2D(texTarget, 1, intFormat, 1, 1);
297 gl.texSubImage2D(texTarget, 0, 0, 0, 1, 1, transferFmt.format, transferFmt.dataType, color);
298 break;
299
300 case TEXTURE_TYPE_2D_ARRAY:
301 case TEXTURE_TYPE_3D:
302 gl.texStorage3D(texTarget, 1, intFormat, 1, 1, 1);
303 gl.texSubImage3D(texTarget, 0, 0, 0, 0, 1, 1, 1, transferFmt.format, transferFmt.dataType, color);
304 break;
305
306 case TEXTURE_TYPE_CUBE_ARRAY:
307 gl.texStorage3D(texTarget, 1, intFormat, 1, 1, 6);
308 for (int zoffset = 0; zoffset < 6; ++zoffset)
309 for (int face = 0; face < tcu::CUBEFACE_LAST; face++)
310 gl.texSubImage3D(texTarget, 0, 0, 0, zoffset, 1, 1, 1, transferFmt.format, transferFmt.dataType, color);
311 break;
312
313 case TEXTURE_TYPE_CUBE:
314 gl.texStorage2D(texTarget, 1, intFormat, 1, 1);
315 for (int face = 0; face < tcu::CUBEFACE_LAST; face++)
316 gl.texSubImage2D(glu::getGLCubeFace((tcu::CubeFace)face), 0, 0, 0, 1, 1, transferFmt.format, transferFmt.dataType, color);
317 break;
318
319 default:
320 TCU_THROW(InternalError, "Invalid texture type");
321 }
322
323 gl.texParameteri(texTarget, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
324 gl.texParameteri(texTarget, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
325
326 if (isShadowSampler(samplerType))
327 gl.texParameteri(texTarget, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
328
329 GLU_EXPECT_NO_ERROR(gl.getError(), "Texture setup failed");
330 }
331
332 class SamplerIndexingCase : public TestCase
333 {
334 public:
335 SamplerIndexingCase (Context& context, const char* name, const char* description, glu::ShaderType shaderType, glu::DataType samplerType, IndexExprType indexExprType);
336 ~SamplerIndexingCase (void);
337
338 void init (void);
339 IterateResult iterate (void);
340
341 private:
342 SamplerIndexingCase (const SamplerIndexingCase&);
343 SamplerIndexingCase& operator= (const SamplerIndexingCase&);
344
345 void getShaderSpec (ShaderSpec* spec, int numSamplers, int numLookups, const int* lookupIndices, const RenderContext& renderContext) const;
346
347 const glu::ShaderType m_shaderType;
348 const glu::DataType m_samplerType;
349 const IndexExprType m_indexExprType;
350 };
351
SamplerIndexingCase(Context & context,const char * name,const char * description,glu::ShaderType shaderType,glu::DataType samplerType,IndexExprType indexExprType)352 SamplerIndexingCase::SamplerIndexingCase (Context& context, const char* name, const char* description, glu::ShaderType shaderType, glu::DataType samplerType, IndexExprType indexExprType)
353 : TestCase (context, name, description)
354 , m_shaderType (shaderType)
355 , m_samplerType (samplerType)
356 , m_indexExprType (indexExprType)
357 {
358 }
359
~SamplerIndexingCase(void)360 SamplerIndexingCase::~SamplerIndexingCase (void)
361 {
362 }
363
init(void)364 void SamplerIndexingCase::init (void)
365 {
366 const bool supportsES32 = contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)) ||
367 hasExtension(m_context.getRenderContext().getFunctions(), glu::ApiType::core(4, 5), "GL_ARB_ES3_2_compatibility");
368
369 if (!supportsES32)
370 {
371 if (m_shaderType == SHADERTYPE_GEOMETRY)
372 TCU_CHECK_AND_THROW(NotSupportedError,
373 m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
374 "GL_EXT_geometry_shader extension is required to run geometry shader tests.");
375
376 if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
377 TCU_CHECK_AND_THROW(NotSupportedError,
378 m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
379 "GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
380
381 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
382 TCU_CHECK_AND_THROW(NotSupportedError,
383 m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
384 "GL_EXT_gpu_shader5 extension is required for dynamic indexing of sampler arrays.");
385
386 if (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY
387 || m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW
388 || m_samplerType == TYPE_INT_SAMPLER_CUBE_ARRAY
389 || m_samplerType == TYPE_UINT_SAMPLER_CUBE_ARRAY)
390 {
391 TCU_CHECK_AND_THROW(NotSupportedError,
392 m_context.getContextInfo().isExtensionSupported("GL_EXT_texture_cube_map_array"),
393 "GL_EXT_texture_cube_map_array extension is required for cube map arrays.");
394 }
395 }
396 }
397
getShaderSpec(ShaderSpec * spec,int numSamplers,int numLookups,const int * lookupIndices,const RenderContext & renderContext) const398 void SamplerIndexingCase::getShaderSpec (ShaderSpec* spec, int numSamplers, int numLookups, const int* lookupIndices, const RenderContext& renderContext) const
399 {
400 const char* samplersName = "sampler";
401 const char* coordsName = "coords";
402 const char* indicesPrefix = "index";
403 const char* resultPrefix = "result";
404 const DataType coordType = getSamplerCoordType(m_samplerType);
405 const DataType outType = getSamplerOutputType(m_samplerType);
406 const bool supportsES32 = contextSupports(renderContext.getType(), glu::ApiType::es(3, 2)) ||
407 hasExtension(renderContext.getFunctions(), glu::ApiType::core(4, 5), "GL_ARB_ES3_2_compatibility");
408 std::ostringstream global;
409 std::ostringstream code;
410
411 spec->inputs.push_back(Symbol(coordsName, VarType(coordType, PRECISION_HIGHP)));
412
413 if (!supportsES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
414 global << "#extension GL_EXT_gpu_shader5 : require\n";
415
416 if (!supportsES32
417 && (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY
418 || m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW
419 || m_samplerType == TYPE_INT_SAMPLER_CUBE_ARRAY
420 || m_samplerType == TYPE_UINT_SAMPLER_CUBE_ARRAY))
421 {
422 global << "#extension GL_EXT_texture_cube_map_array: require\n";
423 }
424
425 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
426 global << "const highp int indexBase = 1;\n";
427
428 global <<
429 "uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << numSamplers << "];\n";
430
431 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
432 {
433 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
434 {
435 const string varName = indicesPrefix + de::toString(lookupNdx);
436 spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
437 }
438 }
439 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
440 declareUniformIndexVars(global, indicesPrefix, numLookups);
441
442 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
443 {
444 const string varName = resultPrefix + de::toString(lookupNdx);
445 spec->outputs.push_back(Symbol(varName, VarType(outType, PRECISION_HIGHP)));
446 }
447
448 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
449 {
450 code << resultPrefix << "" << lookupNdx << " = texture(" << samplersName << "[";
451
452 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
453 code << lookupIndices[lookupNdx];
454 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
455 code << "indexBase + " << (lookupIndices[lookupNdx]-1);
456 else
457 code << indicesPrefix << lookupNdx;
458
459
460 code << "], " << coordsName << (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW ? ", 0.0" : "") << ");\n";
461 }
462
463 spec->version = supportsES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
464 spec->globalDeclarations = global.str();
465 spec->source = code.str();
466 }
467
fillTextureData(const tcu::PixelBufferAccess & access,de::Random & rnd)468 static void fillTextureData (const tcu::PixelBufferAccess& access, de::Random& rnd)
469 {
470 DE_ASSERT(access.getHeight() == 1 && access.getDepth() == 1);
471
472 if (access.getFormat().order == TextureFormat::D)
473 {
474 // \note Texture uses odd values, lookup even values to avoid precision issues.
475 const float values[] = { 0.1f, 0.3f, 0.5f, 0.7f, 0.9f };
476
477 for (int ndx = 0; ndx < access.getWidth(); ndx++)
478 access.setPixDepth(rnd.choose<float>(DE_ARRAY_BEGIN(values), DE_ARRAY_END(values)), ndx, 0);
479 }
480 else
481 {
482 TCU_CHECK_INTERNAL(access.getFormat().order == TextureFormat::RGBA && access.getFormat().getPixelSize() == 4);
483
484 for (int ndx = 0; ndx < access.getWidth(); ndx++)
485 *((deUint32*)access.getDataPtr() + ndx) = rnd.getUint32();
486 }
487 }
488
iterate(void)489 SamplerIndexingCase::IterateResult SamplerIndexingCase::iterate (void)
490 {
491 const int numInvocations = 64;
492 const int numSamplers = 8;
493 const int numLookups = 4;
494 const DataType coordType = getSamplerCoordType(m_samplerType);
495 const DataType outputType = getSamplerOutputType(m_samplerType);
496 const TextureFormat texFormat = getSamplerTextureFormat(m_samplerType);
497 const int outLookupStride = numInvocations*getDataTypeScalarSize(outputType);
498 vector<int> lookupIndices (numLookups);
499 vector<float> coords;
500 vector<deUint32> outData;
501 vector<deUint8> texData (numSamplers * texFormat.getPixelSize());
502 const tcu::PixelBufferAccess refTexAccess (texFormat, numSamplers, 1, 1, &texData[0]);
503
504 ShaderSpec shaderSpec;
505 de::Random rnd (deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
506
507 for (int ndx = 0; ndx < numLookups; ndx++)
508 lookupIndices[ndx] = rnd.getInt(0, numSamplers-1);
509
510 getShaderSpec(&shaderSpec, numSamplers, numLookups, &lookupIndices[0], m_context.getRenderContext());
511
512 coords.resize(numInvocations * getDataTypeScalarSize(coordType));
513
514 if (m_samplerType != TYPE_SAMPLER_CUBE_ARRAY_SHADOW && isShadowSampler(m_samplerType))
515 {
516 // Use different comparison value per invocation.
517 // \note Texture uses odd values, comparison even values.
518 const int numCoordComps = getDataTypeScalarSize(coordType);
519 const float cmpValues[] = { 0.0f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f };
520
521 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
522 coords[invocationNdx*numCoordComps + (numCoordComps-1)] = rnd.choose<float>(DE_ARRAY_BEGIN(cmpValues), DE_ARRAY_END(cmpValues));
523 }
524
525 fillTextureData(refTexAccess, rnd);
526
527 outData.resize(numLookups*outLookupStride);
528
529 {
530 const RenderContext& renderCtx = m_context.getRenderContext();
531 const glw::Functions& gl = renderCtx.getFunctions();
532 ShaderExecutorPtr executor (createExecutor(m_context.getRenderContext(), m_shaderType, shaderSpec));
533 TextureVector textures (renderCtx, numSamplers);
534 vector<void*> inputs;
535 vector<void*> outputs;
536 vector<int> expandedIndices;
537 const int maxIndex = maxElement(lookupIndices);
538
539 m_testCtx.getLog() << *executor;
540
541 if (!executor->isOk())
542 TCU_FAIL("Compile failed");
543
544 executor->useProgram();
545
546 // \todo [2014-03-05 pyry] Do we want to randomize tex unit assignments?
547 for (int samplerNdx = 0; samplerNdx < numSamplers; samplerNdx++)
548 {
549 const string samplerName = string("sampler[") + de::toString(samplerNdx) + "]";
550 const int samplerLoc = gl.getUniformLocation(executor->getProgram(), samplerName.c_str());
551
552 if (samplerNdx > maxIndex && samplerLoc < 0)
553 continue; // Unused uniform eliminated by compiler
554
555 TCU_CHECK_MSG(samplerLoc >= 0, (string("No location for uniform '") + samplerName + "' found").c_str());
556
557 gl.activeTexture(GL_TEXTURE0 + samplerNdx);
558 setupTexture(gl, textures[samplerNdx], m_samplerType, texFormat, &texData[samplerNdx*texFormat.getPixelSize()]);
559
560 gl.uniform1i(samplerLoc, samplerNdx);
561 }
562
563 inputs.push_back(&coords[0]);
564
565 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
566 {
567 expandedIndices.resize(numInvocations * lookupIndices.size());
568 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
569 {
570 for (int invNdx = 0; invNdx < numInvocations; invNdx++)
571 expandedIndices[lookupNdx*numInvocations + invNdx] = lookupIndices[lookupNdx];
572 }
573
574 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
575 inputs.push_back(&expandedIndices[lookupNdx*numInvocations]);
576 }
577 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
578 uploadUniformIndices(gl, executor->getProgram(), "index", numLookups, &lookupIndices[0]);
579
580 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
581 outputs.push_back(&outData[outLookupStride*lookupNdx]);
582
583 GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
584
585 executor->execute(numInvocations, &inputs[0], &outputs[0]);
586 }
587
588 m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
589
590 if (isShadowSampler(m_samplerType))
591 {
592 const tcu::Sampler refSampler (tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
593 tcu::Sampler::NEAREST, tcu::Sampler::NEAREST, 0.0f, false /* non-normalized */,
594 tcu::Sampler::COMPAREMODE_LESS);
595 const int numCoordComps = getDataTypeScalarSize(coordType);
596
597 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 1);
598
599 // Each invocation may have different results.
600 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
601 {
602 const float coord = coords[invocationNdx*numCoordComps + (numCoordComps-1)];
603
604 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
605 {
606 const int texNdx = lookupIndices[lookupNdx];
607 const float result = *((const float*)(const deUint8*)&outData[lookupNdx*outLookupStride + invocationNdx]);
608 const float reference = refTexAccess.sample2DCompare(refSampler, tcu::Sampler::NEAREST, coord, (float)texNdx, 0.0f, tcu::IVec3(0));
609
610 if (de::abs(result-reference) > 0.005f)
611 {
612 m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx << ", lookup " << lookupNdx << ": expected "
613 << reference << ", got " << result
614 << TestLog::EndMessage;
615
616 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
617 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Got invalid lookup result");
618 }
619 }
620 }
621 }
622 else
623 {
624 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 4);
625
626 // Validate results from first invocation
627 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
628 {
629 const int texNdx = lookupIndices[lookupNdx];
630 const deUint8* resPtr = (const deUint8*)&outData[lookupNdx*outLookupStride];
631 bool isOk;
632
633 if (outputType == TYPE_FLOAT_VEC4)
634 {
635 const float threshold = 1.0f / 256.0f;
636 const tcu::Vec4 reference = refTexAccess.getPixel(texNdx, 0);
637 const float* floatPtr = (const float*)resPtr;
638 const tcu::Vec4 result (floatPtr[0], floatPtr[1], floatPtr[2], floatPtr[3]);
639
640 isOk = boolAll(lessThanEqual(abs(reference-result), tcu::Vec4(threshold)));
641
642 if (!isOk)
643 {
644 m_testCtx.getLog() << TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
645 << reference << ", got " << result
646 << TestLog::EndMessage;
647 }
648 }
649 else
650 {
651 const tcu::UVec4 reference = refTexAccess.getPixelUint(texNdx, 0);
652 const deUint32* uintPtr = (const deUint32*)resPtr;
653 const tcu::UVec4 result (uintPtr[0], uintPtr[1], uintPtr[2], uintPtr[3]);
654
655 isOk = boolAll(equal(reference, result));
656
657 if (!isOk)
658 {
659 m_testCtx.getLog() << TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
660 << reference << ", got " << result
661 << TestLog::EndMessage;
662 }
663 }
664
665 if (!isOk && m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
666 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Got invalid lookup result");
667 }
668
669 // Check results of other invocations against first one
670 for (int invocationNdx = 1; invocationNdx < numInvocations; invocationNdx++)
671 {
672 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
673 {
674 const deUint32* refPtr = &outData[lookupNdx*outLookupStride];
675 const deUint32* resPtr = refPtr + invocationNdx*4;
676 bool isOk = true;
677
678 for (int ndx = 0; ndx < 4; ndx++)
679 isOk = isOk && (refPtr[ndx] == resPtr[ndx]);
680
681 if (!isOk)
682 {
683 m_testCtx.getLog() << TestLog::Message << "ERROR: invocation " << invocationNdx << " result "
684 << tcu::formatArray(tcu::Format::HexIterator<deUint32>(resPtr), tcu::Format::HexIterator<deUint32>(resPtr+4))
685 << " for lookup " << lookupNdx << " doesn't match result from first invocation "
686 << tcu::formatArray(tcu::Format::HexIterator<deUint32>(refPtr), tcu::Format::HexIterator<deUint32>(refPtr+4))
687 << TestLog::EndMessage;
688
689 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
690 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Inconsistent lookup results");
691 }
692 }
693 }
694 }
695
696 return STOP;
697 }
698
699 class BlockArrayIndexingCase : public TestCase
700 {
701 public:
702 enum BlockType
703 {
704 BLOCKTYPE_UNIFORM = 0,
705 BLOCKTYPE_BUFFER,
706
707 BLOCKTYPE_LAST
708 };
709 BlockArrayIndexingCase (Context& context, const char* name, const char* description, BlockType blockType, IndexExprType indexExprType, ShaderType shaderType);
710 ~BlockArrayIndexingCase (void);
711
712 void init (void);
713 IterateResult iterate (void);
714
715 private:
716 BlockArrayIndexingCase (const BlockArrayIndexingCase&);
717 BlockArrayIndexingCase& operator= (const BlockArrayIndexingCase&);
718
719 void getShaderSpec (ShaderSpec* spec, int numInstances, int numReads, const int* readIndices, const RenderContext& renderContext) const;
720
721 const BlockType m_blockType;
722 const IndexExprType m_indexExprType;
723 const ShaderType m_shaderType;
724
725 const int m_numInstances;
726 };
727
BlockArrayIndexingCase(Context & context,const char * name,const char * description,BlockType blockType,IndexExprType indexExprType,ShaderType shaderType)728 BlockArrayIndexingCase::BlockArrayIndexingCase (Context& context, const char* name, const char* description, BlockType blockType, IndexExprType indexExprType, ShaderType shaderType)
729 : TestCase (context, name, description)
730 , m_blockType (blockType)
731 , m_indexExprType (indexExprType)
732 , m_shaderType (shaderType)
733 , m_numInstances (4)
734 {
735 }
736
~BlockArrayIndexingCase(void)737 BlockArrayIndexingCase::~BlockArrayIndexingCase (void)
738 {
739 }
740
init(void)741 void BlockArrayIndexingCase::init (void)
742 {
743 const bool supportsES32 = contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)) ||
744 hasExtension(m_context.getRenderContext().getFunctions(), glu::ApiType::core(4, 5), "GL_ARB_ES3_2_compatibility");
745
746 if (!supportsES32)
747 {
748 if (m_shaderType == SHADERTYPE_GEOMETRY)
749 TCU_CHECK_AND_THROW(NotSupportedError,
750 m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
751 "GL_EXT_geometry_shader extension is required to run geometry shader tests.");
752
753 if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
754 TCU_CHECK_AND_THROW(NotSupportedError,
755 m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
756 "GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
757
758 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
759 TCU_CHECK_AND_THROW(NotSupportedError,
760 m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
761 "GL_EXT_gpu_shader5 extension is required for dynamic indexing of interface blocks.");
762 }
763
764 if (m_blockType == BLOCKTYPE_BUFFER)
765 {
766 const deUint32 limitPnames[] =
767 {
768 GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS,
769 GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS,
770 GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS,
771 GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS,
772 GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS,
773 GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS
774 };
775
776 const glw::Functions& gl = m_context.getRenderContext().getFunctions();
777 int maxBlocks = 0;
778
779 gl.getIntegerv(limitPnames[m_shaderType], &maxBlocks);
780 GLU_EXPECT_NO_ERROR(gl.getError(), "glGetIntegerv()");
781
782 if (maxBlocks < 2 + m_numInstances)
783 throw tcu::NotSupportedError("Not enough shader storage blocks supported for shader type");
784 }
785 }
786
getShaderSpec(ShaderSpec * spec,int numInstances,int numReads,const int * readIndices,const RenderContext & renderContext) const787 void BlockArrayIndexingCase::getShaderSpec (ShaderSpec* spec, int numInstances, int numReads, const int* readIndices, const RenderContext& renderContext) const
788 {
789 const int binding = 2;
790 const char* blockName = "Block";
791 const char* instanceName = "block";
792 const char* indicesPrefix = "index";
793 const char* resultPrefix = "result";
794 const char* interfaceName = m_blockType == BLOCKTYPE_UNIFORM ? "uniform" : "buffer";
795 const char* layout = m_blockType == BLOCKTYPE_UNIFORM ? "std140" : "std430";
796 const bool supportsES32 = contextSupports(renderContext.getType(), glu::ApiType::es(3, 2)) ||
797 hasExtension(renderContext.getFunctions(), glu::ApiType::core(4, 5), "GL_ARB_ES3_2_compatibility");
798 std::ostringstream global;
799 std::ostringstream code;
800
801 if (!supportsES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
802 global << "#extension GL_EXT_gpu_shader5 : require\n";
803
804 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
805 global << "const highp int indexBase = 1;\n";
806
807 global <<
808 "layout(" << layout << ", binding = " << binding << ") " << interfaceName << " " << blockName << "\n"
809 "{\n"
810 " uint value;\n"
811 "} " << instanceName << "[" << numInstances << "];\n";
812
813 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
814 {
815 for (int readNdx = 0; readNdx < numReads; readNdx++)
816 {
817 const string varName = indicesPrefix + de::toString(readNdx);
818 spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
819 }
820 }
821 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
822 declareUniformIndexVars(global, indicesPrefix, numReads);
823
824 for (int readNdx = 0; readNdx < numReads; readNdx++)
825 {
826 const string varName = resultPrefix + de::toString(readNdx);
827 spec->outputs.push_back(Symbol(varName, VarType(TYPE_UINT, PRECISION_HIGHP)));
828 }
829
830 for (int readNdx = 0; readNdx < numReads; readNdx++)
831 {
832 code << resultPrefix << readNdx << " = " << instanceName << "[";
833
834 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
835 code << readIndices[readNdx];
836 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
837 code << "indexBase + " << (readIndices[readNdx]-1);
838 else
839 code << indicesPrefix << readNdx;
840
841 code << "].value;\n";
842 }
843
844 spec->version = supportsES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
845 spec->globalDeclarations = global.str();
846 spec->source = code.str();
847 }
848
iterate(void)849 BlockArrayIndexingCase::IterateResult BlockArrayIndexingCase::iterate (void)
850 {
851 const int numInvocations = 32;
852 const int numInstances = m_numInstances;
853 const int numReads = 4;
854 vector<int> readIndices (numReads);
855 vector<deUint32> inValues (numInstances);
856 vector<deUint32> outValues (numInvocations*numReads);
857 ShaderSpec shaderSpec;
858 de::Random rnd (deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
859
860 for (int readNdx = 0; readNdx < numReads; readNdx++)
861 readIndices[readNdx] = rnd.getInt(0, numInstances-1);
862
863 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
864 inValues[instanceNdx] = rnd.getUint32();
865
866 getShaderSpec(&shaderSpec, numInstances, numReads, &readIndices[0], m_context.getRenderContext());
867
868 {
869 const RenderContext& renderCtx = m_context.getRenderContext();
870 const glw::Functions& gl = renderCtx.getFunctions();
871 const int baseBinding = 2;
872 const BufferVector buffers (renderCtx, numInstances);
873 const deUint32 bufTarget = m_blockType == BLOCKTYPE_BUFFER ? GL_SHADER_STORAGE_BUFFER : GL_UNIFORM_BUFFER;
874 ShaderExecutorPtr shaderExecutor (createExecutor(renderCtx, m_shaderType, shaderSpec));
875 vector<int> expandedIndices;
876 vector<void*> inputs;
877 vector<void*> outputs;
878
879 m_testCtx.getLog() << *shaderExecutor;
880
881 if (!shaderExecutor->isOk())
882 TCU_FAIL("Compile failed");
883
884 shaderExecutor->useProgram();
885
886 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
887 {
888 gl.bindBuffer(bufTarget, buffers[instanceNdx]);
889 gl.bufferData(bufTarget, (glw::GLsizeiptr)sizeof(deUint32), &inValues[instanceNdx], GL_STATIC_DRAW);
890 gl.bindBufferBase(bufTarget, baseBinding+instanceNdx, buffers[instanceNdx]);
891 }
892
893 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
894 {
895 expandedIndices.resize(numInvocations * readIndices.size());
896
897 for (int readNdx = 0; readNdx < numReads; readNdx++)
898 {
899 int* dst = &expandedIndices[numInvocations*readNdx];
900 std::fill(dst, dst+numInvocations, readIndices[readNdx]);
901 }
902
903 for (int readNdx = 0; readNdx < numReads; readNdx++)
904 inputs.push_back(&expandedIndices[readNdx*numInvocations]);
905 }
906 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
907 uploadUniformIndices(gl, shaderExecutor->getProgram(), "index", numReads, &readIndices[0]);
908
909 for (int readNdx = 0; readNdx < numReads; readNdx++)
910 outputs.push_back(&outValues[readNdx*numInvocations]);
911
912 GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
913
914 shaderExecutor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
915 }
916
917 m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
918
919 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
920 {
921 for (int readNdx = 0; readNdx < numReads; readNdx++)
922 {
923 const deUint32 refValue = inValues[readIndices[readNdx]];
924 const deUint32 resValue = outValues[readNdx*numInvocations + invocationNdx];
925
926 if (refValue != resValue)
927 {
928 m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx
929 << ", read " << readNdx << ": expected "
930 << tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
931 << TestLog::EndMessage;
932
933 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
934 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid result value");
935 }
936 }
937 }
938
939 return STOP;
940 }
941
942 class AtomicCounterIndexingCase : public TestCase
943 {
944 public:
945 AtomicCounterIndexingCase (Context& context, const char* name, const char* description, IndexExprType indexExprType, ShaderType shaderType);
946 ~AtomicCounterIndexingCase (void);
947
948 void init (void);
949 IterateResult iterate (void);
950
951 private:
952 AtomicCounterIndexingCase (const AtomicCounterIndexingCase&);
953 AtomicCounterIndexingCase& operator= (const AtomicCounterIndexingCase&);
954
955 void getShaderSpec (ShaderSpec* spec, int numCounters, int numOps, const int* opIndices, const RenderContext& renderContext) const;
956
957 const IndexExprType m_indexExprType;
958 const glu::ShaderType m_shaderType;
959 deInt32 m_numCounters;
960 };
961
AtomicCounterIndexingCase(Context & context,const char * name,const char * description,IndexExprType indexExprType,ShaderType shaderType)962 AtomicCounterIndexingCase::AtomicCounterIndexingCase (Context& context, const char* name, const char* description, IndexExprType indexExprType, ShaderType shaderType)
963 : TestCase (context, name, description)
964 , m_indexExprType (indexExprType)
965 , m_shaderType (shaderType)
966 , m_numCounters (0)
967 {
968 }
969
~AtomicCounterIndexingCase(void)970 AtomicCounterIndexingCase::~AtomicCounterIndexingCase (void)
971 {
972 }
973
getMaxAtomicCounterEnum(glu::ShaderType type)974 deUint32 getMaxAtomicCounterEnum (glu::ShaderType type)
975 {
976 switch (type)
977 {
978 case glu::SHADERTYPE_VERTEX: return GL_MAX_VERTEX_ATOMIC_COUNTERS;
979 case glu::SHADERTYPE_FRAGMENT: return GL_MAX_FRAGMENT_ATOMIC_COUNTERS;
980 case glu::SHADERTYPE_GEOMETRY: return GL_MAX_GEOMETRY_ATOMIC_COUNTERS;
981 case glu::SHADERTYPE_COMPUTE: return GL_MAX_COMPUTE_ATOMIC_COUNTERS;
982 case glu::SHADERTYPE_TESSELLATION_CONTROL: return GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS;
983 case glu::SHADERTYPE_TESSELLATION_EVALUATION: return GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS;
984
985 default:
986 DE_FATAL("Unknown shader type");
987 return -1;
988 }
989 }
990
init(void)991 void AtomicCounterIndexingCase::init (void)
992 {
993 const bool supportsES32 = contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)) ||
994 hasExtension(m_context.getRenderContext().getFunctions(), glu::ApiType::core(4, 5), "GL_ARB_ES3_2_compatibility");
995
996 if (!supportsES32)
997 {
998 if (m_shaderType == SHADERTYPE_GEOMETRY)
999 TCU_CHECK_AND_THROW(NotSupportedError,
1000 m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
1001 "GL_EXT_geometry_shader extension is required to run geometry shader tests.");
1002
1003 if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
1004 TCU_CHECK_AND_THROW(NotSupportedError,
1005 m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
1006 "GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
1007
1008 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
1009 TCU_CHECK_AND_THROW(NotSupportedError,
1010 m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
1011 "GL_EXT_gpu_shader5 extension is required for dynamic indexing of atomic counters.");
1012 }
1013
1014 {
1015 m_context.getRenderContext().getFunctions().getIntegerv(getMaxAtomicCounterEnum(m_shaderType),
1016 &m_numCounters);
1017
1018 if (m_numCounters < 1)
1019 {
1020 const string message = "Atomic counters not supported in " + string(glu::getShaderTypeName(m_shaderType)) + " shader";
1021 TCU_THROW(NotSupportedError, message.c_str());
1022 }
1023 }
1024 }
1025
getShaderSpec(ShaderSpec * spec,int numCounters,int numOps,const int * opIndices,const RenderContext & renderContext) const1026 void AtomicCounterIndexingCase::getShaderSpec (ShaderSpec* spec, int numCounters, int numOps, const int* opIndices, const RenderContext& renderContext) const
1027 {
1028 const char* indicesPrefix = "index";
1029 const char* resultPrefix = "result";
1030 const bool supportsES32 = contextSupports(renderContext.getType(), glu::ApiType::es(3, 2)) ||
1031 hasExtension(renderContext.getFunctions(), glu::ApiType::core(4, 5), "GL_ARB_ES3_2_compatibility");
1032 std::ostringstream global;
1033 std::ostringstream code;
1034
1035 if (!supportsES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
1036 global << "#extension GL_EXT_gpu_shader5 : require\n";
1037
1038 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1039 global << "const highp int indexBase = 1;\n";
1040
1041 global <<
1042 "layout(binding = 0) uniform atomic_uint counter[" << numCounters << "];\n";
1043
1044 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1045 {
1046 for (int opNdx = 0; opNdx < numOps; opNdx++)
1047 {
1048 const string varName = indicesPrefix + de::toString(opNdx);
1049 spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
1050 }
1051 }
1052 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1053 declareUniformIndexVars(global, indicesPrefix, numOps);
1054
1055 for (int opNdx = 0; opNdx < numOps; opNdx++)
1056 {
1057 const string varName = resultPrefix + de::toString(opNdx);
1058 spec->outputs.push_back(Symbol(varName, VarType(TYPE_UINT, PRECISION_HIGHP)));
1059 }
1060
1061 for (int opNdx = 0; opNdx < numOps; opNdx++)
1062 {
1063 code << resultPrefix << opNdx << " = atomicCounterIncrement(counter[";
1064
1065 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1066 code << opIndices[opNdx];
1067 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1068 code << "indexBase + " << (opIndices[opNdx]-1);
1069 else
1070 code << indicesPrefix << opNdx;
1071
1072 code << "]);\n";
1073 }
1074
1075 spec->version = supportsES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
1076 spec->globalDeclarations = global.str();
1077 spec->source = code.str();
1078 }
1079
iterate(void)1080 AtomicCounterIndexingCase::IterateResult AtomicCounterIndexingCase::iterate (void)
1081 {
1082 const RenderContext& renderCtx = m_context.getRenderContext();
1083 const glw::Functions& gl = renderCtx.getFunctions();
1084 const Buffer counterBuffer (renderCtx);
1085
1086 const int numInvocations = 32;
1087 const int numOps = 4;
1088 vector<int> opIndices (numOps);
1089 vector<deUint32> outValues (numInvocations*numOps);
1090 ShaderSpec shaderSpec;
1091 de::Random rnd (deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1092
1093 for (int opNdx = 0; opNdx < numOps; opNdx++)
1094 opIndices[opNdx] = rnd.getInt(0, numOps-1);
1095
1096 getShaderSpec(&shaderSpec, m_numCounters, numOps, &opIndices[0], m_context.getRenderContext());
1097
1098 {
1099 const BufferVector buffers (renderCtx, m_numCounters);
1100 ShaderExecutorPtr shaderExecutor (createExecutor(renderCtx, m_shaderType, shaderSpec));
1101 vector<int> expandedIndices;
1102 vector<void*> inputs;
1103 vector<void*> outputs;
1104
1105 m_testCtx.getLog() << *shaderExecutor;
1106
1107 if (!shaderExecutor->isOk())
1108 TCU_FAIL("Compile failed");
1109
1110 {
1111 const int bufSize = getProgramResourceInt(gl, shaderExecutor->getProgram(), GL_ATOMIC_COUNTER_BUFFER, 0, GL_BUFFER_DATA_SIZE);
1112 const int maxNdx = maxElement(opIndices);
1113 std::vector<deUint8> emptyData (m_numCounters*4, 0);
1114
1115 if (bufSize < (maxNdx+1)*4)
1116 TCU_FAIL((string("GL reported invalid buffer size " + de::toString(bufSize)).c_str()));
1117
1118 gl.bindBuffer(GL_ATOMIC_COUNTER_BUFFER, *counterBuffer);
1119 gl.bufferData(GL_ATOMIC_COUNTER_BUFFER, (glw::GLsizeiptr)emptyData.size(), &emptyData[0], GL_STATIC_DRAW);
1120 gl.bindBufferBase(GL_ATOMIC_COUNTER_BUFFER, 0, *counterBuffer);
1121 GLU_EXPECT_NO_ERROR(gl.getError(), "Atomic counter buffer initialization failed");
1122 }
1123
1124 shaderExecutor->useProgram();
1125
1126 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1127 {
1128 expandedIndices.resize(numInvocations * opIndices.size());
1129
1130 for (int opNdx = 0; opNdx < numOps; opNdx++)
1131 {
1132 int* dst = &expandedIndices[numInvocations*opNdx];
1133 std::fill(dst, dst+numInvocations, opIndices[opNdx]);
1134 }
1135
1136 for (int opNdx = 0; opNdx < numOps; opNdx++)
1137 inputs.push_back(&expandedIndices[opNdx*numInvocations]);
1138 }
1139 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1140 uploadUniformIndices(gl, shaderExecutor->getProgram(), "index", numOps, &opIndices[0]);
1141
1142 for (int opNdx = 0; opNdx < numOps; opNdx++)
1143 outputs.push_back(&outValues[opNdx*numInvocations]);
1144
1145 GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
1146
1147 shaderExecutor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
1148 }
1149
1150 m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
1151
1152 {
1153 vector<int> numHits (m_numCounters, 0); // Number of hits per counter.
1154 vector<deUint32> counterValues (m_numCounters);
1155 vector<vector<bool> > counterMasks (m_numCounters);
1156
1157 for (int opNdx = 0; opNdx < numOps; opNdx++)
1158 numHits[opIndices[opNdx]] += 1;
1159
1160 // Read counter values
1161 {
1162 const void* mapPtr = DE_NULL;
1163
1164 try
1165 {
1166 mapPtr = gl.mapBufferRange(GL_ATOMIC_COUNTER_BUFFER, 0, m_numCounters*4, GL_MAP_READ_BIT);
1167 GLU_EXPECT_NO_ERROR(gl.getError(), "glMapBufferRange(GL_ATOMIC_COUNTER_BUFFER)");
1168 TCU_CHECK(mapPtr);
1169 std::copy((const deUint32*)mapPtr, (const deUint32*)mapPtr + m_numCounters, &counterValues[0]);
1170 gl.unmapBuffer(GL_ATOMIC_COUNTER_BUFFER);
1171 }
1172 catch (...)
1173 {
1174 if (mapPtr)
1175 gl.unmapBuffer(GL_ATOMIC_COUNTER_BUFFER);
1176 throw;
1177 }
1178 }
1179
1180 // Verify counter values
1181 for (int counterNdx = 0; counterNdx < m_numCounters; counterNdx++)
1182 {
1183 const deUint32 refCount = (deUint32)(numHits[counterNdx]*numInvocations);
1184 const deUint32 resCount = counterValues[counterNdx];
1185
1186 if (refCount != resCount)
1187 {
1188 m_testCtx.getLog() << TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
1189 << ", expected " << refCount
1190 << TestLog::EndMessage;
1191
1192 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1193 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid atomic counter value");
1194 }
1195 }
1196
1197 // Allocate bitmasks - one bit per each valid result value
1198 for (int counterNdx = 0; counterNdx < m_numCounters; counterNdx++)
1199 {
1200 const int counterValue = numHits[counterNdx]*numInvocations;
1201 counterMasks[counterNdx].resize(counterValue, false);
1202 }
1203
1204 // Verify result values from shaders
1205 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1206 {
1207 for (int opNdx = 0; opNdx < numOps; opNdx++)
1208 {
1209 const int counterNdx = opIndices[opNdx];
1210 const deUint32 resValue = outValues[opNdx*numInvocations + invocationNdx];
1211 const bool rangeOk = de::inBounds(resValue, 0u, (deUint32)counterMasks[counterNdx].size());
1212 const bool notSeen = rangeOk && !counterMasks[counterNdx][resValue];
1213 const bool isOk = rangeOk && notSeen;
1214
1215 if (!isOk)
1216 {
1217 m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx
1218 << ", op " << opNdx << ": got invalid result value "
1219 << resValue
1220 << TestLog::EndMessage;
1221
1222 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1223 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid result value");
1224 }
1225 else
1226 {
1227 // Mark as used - no other invocation should see this value from same counter.
1228 counterMasks[counterNdx][resValue] = true;
1229 }
1230 }
1231 }
1232
1233 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1234 {
1235 // Consistency check - all masks should be 1 now
1236 for (int counterNdx = 0; counterNdx < m_numCounters; counterNdx++)
1237 {
1238 for (vector<bool>::const_iterator i = counterMasks[counterNdx].begin(); i != counterMasks[counterNdx].end(); i++)
1239 TCU_CHECK_INTERNAL(*i);
1240 }
1241 }
1242 }
1243
1244 return STOP;
1245 }
1246
1247 } // anonymous
1248
OpaqueTypeIndexingTests(Context & context)1249 OpaqueTypeIndexingTests::OpaqueTypeIndexingTests (Context& context)
1250 : TestCaseGroup(context, "opaque_type_indexing", "Opaque Type Indexing Tests")
1251 {
1252 }
1253
~OpaqueTypeIndexingTests(void)1254 OpaqueTypeIndexingTests::~OpaqueTypeIndexingTests (void)
1255 {
1256 }
1257
init(void)1258 void OpaqueTypeIndexingTests::init (void)
1259 {
1260 static const struct
1261 {
1262 IndexExprType type;
1263 const char* name;
1264 const char* description;
1265 } indexingTypes[] =
1266 {
1267 { INDEX_EXPR_TYPE_CONST_LITERAL, "const_literal", "Indexing by constant literal" },
1268 { INDEX_EXPR_TYPE_CONST_EXPRESSION, "const_expression", "Indexing by constant expression" },
1269 { INDEX_EXPR_TYPE_UNIFORM, "uniform", "Indexing by uniform value" },
1270 { INDEX_EXPR_TYPE_DYNAMIC_UNIFORM, "dynamically_uniform", "Indexing by dynamically uniform expression" }
1271 };
1272
1273 static const struct
1274 {
1275 ShaderType type;
1276 const char* name;
1277 } shaderTypes[] =
1278 {
1279 { SHADERTYPE_VERTEX, "vertex" },
1280 { SHADERTYPE_FRAGMENT, "fragment" },
1281 { SHADERTYPE_COMPUTE, "compute" },
1282 { SHADERTYPE_GEOMETRY, "geometry" },
1283 { SHADERTYPE_TESSELLATION_CONTROL, "tessellation_control" },
1284 { SHADERTYPE_TESSELLATION_EVALUATION, "tessellation_evaluation" }
1285 };
1286
1287 // .sampler
1288 {
1289 static const DataType samplerTypes[] =
1290 {
1291 // \note 1D images will be added by a later extension.
1292 // TYPE_SAMPLER_1D,
1293 TYPE_SAMPLER_2D,
1294 TYPE_SAMPLER_CUBE,
1295 TYPE_SAMPLER_2D_ARRAY,
1296 TYPE_SAMPLER_3D,
1297 // TYPE_SAMPLER_1D_SHADOW,
1298 TYPE_SAMPLER_2D_SHADOW,
1299 TYPE_SAMPLER_CUBE_SHADOW,
1300 TYPE_SAMPLER_2D_ARRAY_SHADOW,
1301 // TYPE_INT_SAMPLER_1D,
1302 TYPE_INT_SAMPLER_2D,
1303 TYPE_INT_SAMPLER_CUBE,
1304 TYPE_INT_SAMPLER_2D_ARRAY,
1305 TYPE_INT_SAMPLER_3D,
1306 // TYPE_UINT_SAMPLER_1D,
1307 TYPE_UINT_SAMPLER_2D,
1308 TYPE_UINT_SAMPLER_CUBE,
1309 TYPE_UINT_SAMPLER_2D_ARRAY,
1310 TYPE_UINT_SAMPLER_3D,
1311 TYPE_SAMPLER_CUBE_ARRAY,
1312 TYPE_SAMPLER_CUBE_ARRAY_SHADOW,
1313 TYPE_INT_SAMPLER_CUBE_ARRAY,
1314 TYPE_UINT_SAMPLER_CUBE_ARRAY
1315 };
1316
1317 tcu::TestCaseGroup* const samplerGroup = new tcu::TestCaseGroup(m_testCtx, "sampler", "Sampler Array Indexing Tests");
1318 addChild(samplerGroup);
1319
1320 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1321 {
1322 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
1323 tcu::TestCaseGroup* const indexGroup = new tcu::TestCaseGroup(m_testCtx, indexingTypes[indexTypeNdx].name, indexingTypes[indexTypeNdx].description);
1324 samplerGroup->addChild(indexGroup);
1325
1326 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1327 {
1328 const ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
1329 tcu::TestCaseGroup* const shaderGroup = new tcu::TestCaseGroup(m_testCtx, shaderTypes[shaderTypeNdx].name, "");
1330 indexGroup->addChild(shaderGroup);
1331
1332 for (int samplerTypeNdx = 0; samplerTypeNdx < DE_LENGTH_OF_ARRAY(samplerTypes); samplerTypeNdx++)
1333 {
1334 const DataType samplerType = samplerTypes[samplerTypeNdx];
1335 const char* samplerName = getDataTypeName(samplerType);
1336 const string caseName = de::toLower(samplerName);
1337
1338 shaderGroup->addChild(new SamplerIndexingCase(m_context, caseName.c_str(), "", shaderType, samplerType, indexExprType));
1339 }
1340 }
1341 }
1342 }
1343
1344 // .ubo / .ssbo / .atomic_counter
1345 {
1346 tcu::TestCaseGroup* const uboGroup = new tcu::TestCaseGroup(m_testCtx, "ubo", "Uniform Block Instance Array Indexing Tests");
1347 tcu::TestCaseGroup* const ssboGroup = new tcu::TestCaseGroup(m_testCtx, "ssbo", "Buffer Block Instance Array Indexing Tests");
1348 tcu::TestCaseGroup* const acGroup = new tcu::TestCaseGroup(m_testCtx, "atomic_counter", "Atomic Counter Array Indexing Tests");
1349 addChild(uboGroup);
1350 addChild(ssboGroup);
1351 addChild(acGroup);
1352
1353 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1354 {
1355 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
1356 const char* indexExprName = indexingTypes[indexTypeNdx].name;
1357 const char* indexExprDesc = indexingTypes[indexTypeNdx].description;
1358
1359 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1360 {
1361 const ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
1362 const string name = string(indexExprName) + "_" + shaderTypes[shaderTypeNdx].name;
1363
1364 uboGroup->addChild (new BlockArrayIndexingCase (m_context, name.c_str(), indexExprDesc, BlockArrayIndexingCase::BLOCKTYPE_UNIFORM, indexExprType, shaderType));
1365 acGroup->addChild (new AtomicCounterIndexingCase (m_context, name.c_str(), indexExprDesc, indexExprType, shaderType));
1366
1367 if (indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL || indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1368 ssboGroup->addChild (new BlockArrayIndexingCase (m_context, name.c_str(), indexExprDesc, BlockArrayIndexingCase::BLOCKTYPE_BUFFER, indexExprType, shaderType));
1369 }
1370 }
1371 }
1372 }
1373
1374 } // Functional
1375 } // gles31
1376 } // deqp
1377