1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 Google Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief ShaderLibrary Vulkan implementation
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktShaderLibrary.hpp"
25 #include "vktTestCase.hpp"
26
27 #include "vkPrograms.hpp"
28 #include "vkRef.hpp"
29 #include "vkRefUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkTypeUtil.hpp"
34
35 #include "gluShaderLibrary.hpp"
36 #include "gluShaderUtil.hpp"
37
38 #include "tcuStringTemplate.hpp"
39 #include "tcuTexture.hpp"
40 #include "tcuTestLog.hpp"
41 #include "tcuVector.hpp"
42 #include "tcuVectorUtil.hpp"
43
44 #include "deStringUtil.hpp"
45 #include "deArrayUtil.hpp"
46 #include "deMemory.h"
47
48 #include <sstream>
49 #include <map>
50
51 namespace vkt
52 {
53
54 using std::string;
55 using std::vector;
56 using std::map;
57 using std::pair;
58 using std::ostringstream;
59
60 using de::MovePtr;
61 using de::UniquePtr;
62
63 using glu::ShaderType;
64 using glu::ProgramSources;
65 using glu::DataType;
66
67 using glu::sl::ShaderCaseSpecification;
68 using glu::sl::ProgramSpecializationParams;
69 using glu::sl::RequiredExtension;
70 using glu::sl::Value;
71 using glu::sl::ValueBlock;
72
73 using tcu::TestStatus;
74 using tcu::StringTemplate;
75 using tcu::Vec2;
76 using tcu::ConstPixelBufferAccess;
77 using tcu::TextureFormat;
78 using tcu::TestLog;
79
80 using vk::SourceCollections;
81 using vk::Move;
82 using vk::Unique;
83
84 namespace
85 {
86
87 enum
88 {
89 REFERENCE_UNIFORM_BINDING = 0,
90 USER_UNIFORM_BINDING = 1
91 };
92
getShaderName(ShaderType shaderType,size_t progNdx)93 string getShaderName (ShaderType shaderType, size_t progNdx)
94 {
95 ostringstream str;
96 str << glu::getShaderTypeName(shaderType);
97 if (progNdx > 0)
98 str << "_" << progNdx;
99 return str.str();
100 }
101
genUniformBlock(ostringstream & out,const string & blockName,const string & instanceName,int setNdx,int bindingNdx,const vector<Value> & uniforms)102 void genUniformBlock (ostringstream& out, const string& blockName, const string& instanceName, int setNdx, int bindingNdx, const vector<Value>& uniforms)
103 {
104 out << "layout(";
105
106 if (setNdx != 0)
107 out << "set = " << setNdx << ", ";
108
109 out << "binding = " << bindingNdx << ", std140) uniform " << blockName << "\n"
110 << "{\n";
111
112 for (vector<Value>::const_iterator val = uniforms.begin(); val != uniforms.end(); ++val)
113 out << "\t" << glu::declare(val->type, val->name, 1) << ";\n";
114
115 out << "}";
116
117 if (!instanceName.empty())
118 out << " " << instanceName;
119
120 out << ";\n";
121 }
122
declareReferenceBlock(ostringstream & out,const ValueBlock & valueBlock)123 void declareReferenceBlock (ostringstream& out, const ValueBlock& valueBlock)
124 {
125 if (!valueBlock.outputs.empty())
126 genUniformBlock(out, "Reference", "ref", 0, REFERENCE_UNIFORM_BINDING, valueBlock.outputs);
127 }
128
declareUniforms(ostringstream & out,const ValueBlock & valueBlock)129 void declareUniforms (ostringstream& out, const ValueBlock& valueBlock)
130 {
131 if (!valueBlock.uniforms.empty())
132 genUniformBlock(out, "Uniforms", "", 0, USER_UNIFORM_BINDING, valueBlock.uniforms);
133 }
134
getTransportType(DataType valueType)135 DataType getTransportType (DataType valueType)
136 {
137 if (isDataTypeBoolOrBVec(valueType))
138 return glu::getDataTypeUintVec(getDataTypeScalarSize(valueType));
139 else
140 return valueType;
141 }
142
getNumTransportLocations(DataType valueType)143 int getNumTransportLocations (DataType valueType)
144 {
145 return isDataTypeMatrix(valueType) ? getDataTypeMatrixNumColumns(valueType) : 1;
146 }
147
148 // This functions builds a matching vertex shader for a 'both' case, when
149 // the fragment shader is being tested.
150 // We need to build attributes and varyings for each 'input'.
genVertexShader(const ShaderCaseSpecification & spec)151 string genVertexShader (const ShaderCaseSpecification& spec)
152 {
153 ostringstream res;
154 int curInputLoc = 0;
155 int curOutputLoc = 0;
156
157 res << glu::getGLSLVersionDeclaration(spec.targetVersion) << "\n";
158
159 // Declarations (position + attribute/varying for each input).
160 res << "precision highp float;\n";
161 res << "precision highp int;\n";
162 res << "\n";
163 res << "layout(location = 0) in highp vec4 dEQP_Position;\n";
164 curInputLoc += 1;
165
166 for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
167 {
168 const Value& val = spec.values.inputs[ndx];
169 const DataType valueType = val.type.getBasicType();
170 const DataType transportType = getTransportType(valueType);
171 const char* const transportTypeStr = getDataTypeName(transportType);
172 const int numLocs = getNumTransportLocations(valueType);
173
174 res << "layout(location = " << curInputLoc << ") in " << transportTypeStr << " a_" << val.name << ";\n";
175 res << "layout(location = " << curOutputLoc << ") flat out " << transportTypeStr << " " << (transportType != valueType ? "v_" : "") << val.name << ";\n";
176
177 curInputLoc += numLocs;
178 curOutputLoc += numLocs;
179 }
180 res << "\n";
181
182 // Main function.
183 // - gl_Position = dEQP_Position;
184 // - for each input: write attribute directly to varying
185 res << "void main()\n";
186 res << "{\n";
187 res << " gl_Position = dEQP_Position;\n";
188 for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
189 {
190 const Value& val = spec.values.inputs[ndx];
191 const string& name = val.name;
192
193 res << " " << (getTransportType(val.type.getBasicType()) != val.type.getBasicType() ? "v_" : "")
194 << name << " = a_" << name << ";\n";
195 }
196
197 res << "}\n";
198 return res.str();
199 }
200
genCompareOp(ostringstream & output,const char * dstVec4Var,const ValueBlock & valueBlock,const char * checkVarName)201 void genCompareOp (ostringstream& output, const char* dstVec4Var, const ValueBlock& valueBlock, const char* checkVarName)
202 {
203 bool isFirstOutput = true;
204
205 for (size_t ndx = 0; ndx < valueBlock.outputs.size(); ndx++)
206 {
207 const Value& val = valueBlock.outputs[ndx];
208
209 // Check if we're only interested in one variable (then skip if not the right one).
210 if (checkVarName && val.name != checkVarName)
211 continue;
212
213 // Prefix.
214 if (isFirstOutput)
215 {
216 output << "bool RES = ";
217 isFirstOutput = false;
218 }
219 else
220 output << "RES = RES && ";
221
222 // Generate actual comparison.
223 if (getDataTypeScalarType(val.type.getBasicType()) == glu::TYPE_FLOAT)
224 output << "isOk(" << val.name << ", ref." << val.name << ", 0.05);\n";
225 else
226 output << "isOk(" << val.name << ", ref." << val.name << ");\n";
227 }
228
229 if (isFirstOutput)
230 output << dstVec4Var << " = vec4(1.0);\n";
231 else
232 output << dstVec4Var << " = vec4(RES, RES, RES, 1.0);\n";
233 }
234
genFragmentShader(const ShaderCaseSpecification & spec)235 string genFragmentShader (const ShaderCaseSpecification& spec)
236 {
237 ostringstream shader;
238 ostringstream setup;
239 int curInLoc = 0;
240
241 shader << glu::getGLSLVersionDeclaration(spec.targetVersion) << "\n";
242
243 shader << "precision highp float;\n";
244 shader << "precision highp int;\n";
245 shader << "\n";
246
247 shader << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
248 shader << "\n";
249
250 genCompareFunctions(shader, spec.values, false);
251 shader << "\n";
252
253 // Declarations (varying, reference for each output).
254 for (size_t ndx = 0; ndx < spec.values.outputs.size(); ndx++)
255 {
256 const Value& val = spec.values.outputs[ndx];
257 const DataType valueType = val.type.getBasicType();
258 const char* const valueTypeStr = getDataTypeName(valueType);
259 const DataType transportType = getTransportType(valueType);
260 const char* const transportTypeStr = getDataTypeName(transportType);
261 const int numLocs = getNumTransportLocations(valueType);
262
263 shader << "layout(location = " << curInLoc << ") flat in " << transportTypeStr << " " << (valueType != transportType ? "v_" : "") << val.name << ";\n";
264
265 if (valueType != transportType)
266 setup << " " << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(v_" << val.name << ");\n";
267
268 curInLoc += numLocs;
269 }
270
271 declareReferenceBlock(shader, spec.values);
272
273 shader << "\n";
274 shader << "void main()\n";
275 shader << "{\n";
276
277 shader << setup.str();
278
279 shader << " ";
280 genCompareOp(shader, "dEQP_FragColor", spec.values, DE_NULL);
281
282 shader << "}\n";
283 return shader.str();
284 }
285
286 // Specialize a shader for the vertex shader test case.
specializeVertexShader(const ShaderCaseSpecification & spec,const string & src)287 string specializeVertexShader (const ShaderCaseSpecification& spec, const string& src)
288 {
289 ostringstream decl;
290 ostringstream setup;
291 ostringstream output;
292 int curInputLoc = 0;
293 int curOutputLoc = 0;
294
295 // generated from "both" case
296 DE_ASSERT(spec.caseType == glu::sl::CASETYPE_VERTEX_ONLY);
297
298 // Output (write out position).
299 output << "gl_Position = dEQP_Position;\n";
300
301 // Declarations (position + attribute for each input, varying for each output).
302 decl << "layout(location = 0) in highp vec4 dEQP_Position;\n";
303 curInputLoc += 1;
304
305 for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
306 {
307 const Value& val = spec.values.inputs[ndx];
308 const DataType valueType = val.type.getBasicType();
309 const char* const valueTypeStr = getDataTypeName(valueType);
310 const DataType transportType = getTransportType(valueType);
311 const char* const transportTypeStr = getDataTypeName(transportType);
312 const int numLocs = getNumTransportLocations(valueType);
313
314 decl << "layout(location = " << curInputLoc << ") in ";
315
316 curInputLoc += numLocs;
317
318 if (valueType == transportType)
319 decl << transportTypeStr << " " << val.name << ";\n";
320 else
321 {
322 decl << transportTypeStr << " a_" << val.name << ";\n";
323 setup << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(a_" << val.name << ");\n";
324 }
325 }
326
327 declareUniforms(decl, spec.values);
328
329 for (size_t ndx = 0; ndx < spec.values.outputs.size(); ndx++)
330 {
331 const Value& val = spec.values.outputs[ndx];
332 const DataType valueType = val.type.getBasicType();
333 const char* const valueTypeStr = getDataTypeName(valueType);
334 const DataType transportType = getTransportType(valueType);
335 const char* const transportTypeStr = getDataTypeName(transportType);
336 const int numLocs = getNumTransportLocations(valueType);
337
338 decl << "layout(location = " << curOutputLoc << ") flat out ";
339
340 curOutputLoc += numLocs;
341
342 if (valueType == transportType)
343 decl << transportTypeStr << " " << val.name << ";\n";
344 else
345 {
346 decl << transportTypeStr << " v_" << val.name << ";\n";
347 decl << valueTypeStr << " " << val.name << ";\n";
348
349 output << "v_" << val.name << " = " << transportTypeStr << "(" << val.name << ");\n";
350 }
351 }
352
353 // Shader specialization.
354 map<string, string> params;
355 params.insert(pair<string, string>("DECLARATIONS", decl.str()));
356 params.insert(pair<string, string>("SETUP", setup.str()));
357 params.insert(pair<string, string>("OUTPUT", output.str()));
358 params.insert(pair<string, string>("POSITION_FRAG_COLOR", "gl_Position"));
359
360 StringTemplate tmpl (src);
361 const string baseSrc = tmpl.specialize(params);
362 const string withExt = injectExtensionRequirements(baseSrc, spec.programs[0].requiredExtensions, glu::SHADERTYPE_VERTEX);
363
364 return withExt;
365 }
366
367 // Specialize a shader for the fragment shader test case.
specializeFragmentShader(const ShaderCaseSpecification & spec,const string & src)368 string specializeFragmentShader (const ShaderCaseSpecification& spec, const string& src)
369 {
370 ostringstream decl;
371 ostringstream setup;
372 ostringstream output;
373 int curInputLoc = 0;
374
375 // generated from "both" case
376 DE_ASSERT(spec.caseType == glu::sl::CASETYPE_FRAGMENT_ONLY);
377
378 genCompareFunctions(decl, spec.values, false);
379 genCompareOp(output, "dEQP_FragColor", spec.values, DE_NULL);
380
381 decl << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
382
383 for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
384 {
385 const Value& val = spec.values.inputs[ndx];
386 const DataType valueType = val.type.getBasicType();
387 const char* const valueTypeStr = getDataTypeName(valueType);
388 const DataType transportType = getTransportType(valueType);
389 const char* const transportTypeStr = getDataTypeName(transportType);
390 const int numLocs = getNumTransportLocations(valueType);
391
392 decl << "layout(location = " << curInputLoc << ") flat in ";
393
394 curInputLoc += numLocs;
395
396 if (valueType == transportType)
397 decl << transportTypeStr << " " << val.name << ";\n";
398 else
399 {
400 decl << transportTypeStr << " v_" << val.name << ";\n";
401 setup << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(v_" << val.name << ");\n";
402 }
403 }
404
405 declareUniforms(decl, spec.values);
406 declareReferenceBlock(decl, spec.values);
407
408 for (size_t ndx = 0; ndx < spec.values.outputs.size(); ndx++)
409 {
410 const Value& val = spec.values.outputs[ndx];
411 const DataType basicType = val.type.getBasicType();
412 const char* const refTypeStr = getDataTypeName(basicType);
413
414 decl << refTypeStr << " " << val.name << ";\n";
415 }
416
417 // Shader specialization.
418 map<string, string> params;
419 params.insert(pair<string, string>("DECLARATIONS", decl.str()));
420 params.insert(pair<string, string>("SETUP", setup.str()));
421 params.insert(pair<string, string>("OUTPUT", output.str()));
422 params.insert(pair<string, string>("POSITION_FRAG_COLOR", "dEQP_FragColor"));
423
424 StringTemplate tmpl (src);
425 const string baseSrc = tmpl.specialize(params);
426 const string withExt = injectExtensionRequirements(baseSrc, spec.programs[0].requiredExtensions, glu::SHADERTYPE_FRAGMENT);
427
428 return withExt;
429 }
430
generateVertexSpecialization(const ProgramSpecializationParams & specParams)431 map<string, string> generateVertexSpecialization (const ProgramSpecializationParams& specParams)
432 {
433 ostringstream decl;
434 ostringstream setup;
435 map<string, string> params;
436 int curInputLoc = 0;
437
438 decl << "layout(location = 0) in highp vec4 dEQP_Position;\n";
439 curInputLoc += 1;
440
441 for (size_t ndx = 0; ndx < specParams.caseSpec.values.inputs.size(); ndx++)
442 {
443 const Value& val = specParams.caseSpec.values.inputs[ndx];
444 const DataType valueType = val.type.getBasicType();
445 const char* const valueTypeStr = getDataTypeName(valueType);
446 const DataType transportType = getTransportType(valueType);
447 const char* const transportTypeStr = getDataTypeName(transportType);
448 const int numLocs = getNumTransportLocations(valueType);
449
450 decl << "layout(location = " << curInputLoc << ") in ";
451
452 curInputLoc += numLocs;
453
454 if (valueType == transportType)
455 decl << transportTypeStr << " " << val.name << ";\n";
456 else
457 {
458 decl << transportTypeStr << " a_" << val.name << ";\n";
459 setup << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(a_" << val.name << ");\n";
460 }
461 }
462
463 declareUniforms(decl, specParams.caseSpec.values);
464
465 params.insert(pair<string, string>("VERTEX_DECLARATIONS", decl.str()));
466 params.insert(pair<string, string>("VERTEX_SETUP", setup.str()));
467 params.insert(pair<string, string>("VERTEX_OUTPUT", string("gl_Position = dEQP_Position;\n")));
468
469 return params;
470 }
471
generateFragmentSpecialization(const ProgramSpecializationParams & specParams)472 map<string, string> generateFragmentSpecialization (const ProgramSpecializationParams& specParams)
473 {
474 ostringstream decl;
475 ostringstream output;
476 map<string, string> params;
477
478 genCompareFunctions(decl, specParams.caseSpec.values, false);
479 genCompareOp(output, "dEQP_FragColor", specParams.caseSpec.values, DE_NULL);
480
481 decl << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
482
483 for (size_t ndx = 0; ndx < specParams.caseSpec.values.outputs.size(); ndx++)
484 {
485 const Value& val = specParams.caseSpec.values.outputs[ndx];
486 const char* const refTypeStr = getDataTypeName(val.type.getBasicType());
487
488 decl << refTypeStr << " " << val.name << ";\n";
489 }
490
491 declareReferenceBlock(decl, specParams.caseSpec.values);
492 declareUniforms(decl, specParams.caseSpec.values);
493
494 params.insert(pair<string, string>("FRAGMENT_DECLARATIONS", decl.str()));
495 params.insert(pair<string, string>("FRAGMENT_OUTPUT", output.str()));
496 params.insert(pair<string, string>("FRAG_COLOR", "dEQP_FragColor"));
497
498 return params;
499 }
500
generateGeometrySpecialization(const ProgramSpecializationParams & specParams)501 map<string, string> generateGeometrySpecialization (const ProgramSpecializationParams& specParams)
502 {
503 ostringstream decl;
504 map<string, string> params;
505
506 decl << "layout (triangles) in;\n";
507 decl << "layout (triangle_strip, max_vertices=3) out;\n";
508 decl << "\n";
509
510 declareUniforms(decl, specParams.caseSpec.values);
511
512 params.insert(pair<string, string>("GEOMETRY_DECLARATIONS", decl.str()));
513
514 return params;
515 }
516
generateTessControlSpecialization(const ProgramSpecializationParams & specParams)517 map<string, string> generateTessControlSpecialization (const ProgramSpecializationParams& specParams)
518 {
519 ostringstream decl;
520 ostringstream output;
521 map<string, string> params;
522
523 decl << "layout (vertices=3) out;\n";
524 decl << "\n";
525
526 declareUniforms(decl, specParams.caseSpec.values);
527
528 output << "gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
529 "gl_TessLevelInner[0] = 2.0;\n"
530 "gl_TessLevelInner[1] = 2.0;\n"
531 "gl_TessLevelOuter[0] = 2.0;\n"
532 "gl_TessLevelOuter[1] = 2.0;\n"
533 "gl_TessLevelOuter[2] = 2.0;\n"
534 "gl_TessLevelOuter[3] = 2.0;";
535
536 params.insert(pair<string, string>("TESSELLATION_CONTROL_DECLARATIONS", decl.str()));
537 params.insert(pair<string, string>("TESSELLATION_CONTROL_OUTPUT", output.str()));
538 params.insert(pair<string, string>("GL_MAX_PATCH_VERTICES", de::toString(specParams.maxPatchVertices)));
539
540 return params;
541 }
542
generateTessEvalSpecialization(const ProgramSpecializationParams & specParams)543 map<string, string> generateTessEvalSpecialization (const ProgramSpecializationParams& specParams)
544 {
545 ostringstream decl;
546 ostringstream output;
547 map<string, string> params;
548
549 decl << "layout (triangles) in;\n";
550 decl << "\n";
551
552 declareUniforms(decl, specParams.caseSpec.values);
553
554 output << "gl_Position = gl_TessCoord[0] * gl_in[0].gl_Position + gl_TessCoord[1] * gl_in[1].gl_Position + gl_TessCoord[2] * gl_in[2].gl_Position;\n";
555
556 params.insert(pair<string, string>("TESSELLATION_EVALUATION_DECLARATIONS", decl.str()));
557 params.insert(pair<string, string>("TESSELLATION_EVALUATION_OUTPUT", output.str()));
558 params.insert(pair<string, string>("GL_MAX_PATCH_VERTICES", de::toString(specParams.maxPatchVertices)));
559
560 return params;
561 }
562
specializeShaderSources(ProgramSources & dst,const ProgramSources & src,const ProgramSpecializationParams & specParams,glu::ShaderType shaderType,map<string,string> (* specializationGenerator)(const ProgramSpecializationParams & specParams))563 void specializeShaderSources (ProgramSources& dst,
564 const ProgramSources& src,
565 const ProgramSpecializationParams& specParams,
566 glu::ShaderType shaderType,
567 map<string, string> (*specializationGenerator) (const ProgramSpecializationParams& specParams))
568 {
569 if (!src.sources[shaderType].empty())
570 {
571 const map<string, string> tmplParams = specializationGenerator(specParams);
572
573 for (size_t ndx = 0; ndx < src.sources[shaderType].size(); ++ndx)
574 {
575 const StringTemplate tmpl (src.sources[shaderType][ndx]);
576 const string baseGLSLCode = tmpl.specialize(tmplParams);
577 const string sourceWithExts = injectExtensionRequirements(baseGLSLCode, specParams.requiredExtensions, shaderType);
578
579 dst << glu::ShaderSource(shaderType, sourceWithExts);
580 }
581 }
582 }
583
specializeProgramSources(glu::ProgramSources & dst,const glu::ProgramSources & src,const ProgramSpecializationParams & specParams)584 void specializeProgramSources (glu::ProgramSources& dst,
585 const glu::ProgramSources& src,
586 const ProgramSpecializationParams& specParams)
587 {
588 specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_VERTEX, generateVertexSpecialization);
589 specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_FRAGMENT, generateFragmentSpecialization);
590 specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_GEOMETRY, generateGeometrySpecialization);
591 specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_TESSELLATION_CONTROL, generateTessControlSpecialization);
592 specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_TESSELLATION_EVALUATION, generateTessEvalSpecialization);
593
594 dst << glu::ProgramSeparable(src.separable);
595 }
596
597 struct ValueBufferLayout
598 {
599 struct Entry
600 {
601 int offset;
602 int vecStride; //! Applies to matrices only
603
Entryvkt::__anonae981cff0111::ValueBufferLayout::Entry604 Entry (void) : offset(0), vecStride(0) {}
Entryvkt::__anonae981cff0111::ValueBufferLayout::Entry605 Entry (int offset_, int vecStride_) : offset(offset_), vecStride(vecStride_) {}
606 };
607
608 vector<Entry> entries;
609 int size;
610
ValueBufferLayoutvkt::__anonae981cff0111::ValueBufferLayout611 ValueBufferLayout (void) : size(0) {}
612 };
613
computeStd140Layout(const vector<Value> & values)614 ValueBufferLayout computeStd140Layout (const vector<Value>& values)
615 {
616 ValueBufferLayout layout;
617
618 layout.entries.resize(values.size());
619
620 for (size_t ndx = 0; ndx < values.size(); ++ndx)
621 {
622 const DataType basicType = values[ndx].type.getBasicType();
623 const bool isMatrix = isDataTypeMatrix(basicType);
624 const int numVecs = isMatrix ? getDataTypeMatrixNumColumns(basicType) : 1;
625 const DataType vecType = isMatrix ? glu::getDataTypeFloatVec(getDataTypeMatrixNumRows(basicType)) : basicType;
626 const int vecSize = getDataTypeScalarSize(vecType);
627 const int alignment = ((isMatrix || vecSize == 3) ? 4 : vecSize)*int(sizeof(deUint32));
628
629 layout.size = deAlign32(layout.size, alignment);
630 layout.entries[ndx] = ValueBufferLayout::Entry(layout.size, alignment);
631 layout.size += alignment*(numVecs-1) + vecSize*int(sizeof(deUint32));
632 }
633
634 return layout;
635 }
636
computeStd430Layout(const vector<Value> & values)637 ValueBufferLayout computeStd430Layout (const vector<Value>& values)
638 {
639 ValueBufferLayout layout;
640
641 layout.entries.resize(values.size());
642
643 for (size_t ndx = 0; ndx < values.size(); ++ndx)
644 {
645 const DataType basicType = values[ndx].type.getBasicType();
646 const int numVecs = isDataTypeMatrix(basicType) ? getDataTypeMatrixNumColumns(basicType) : 1;
647 const DataType vecType = isDataTypeMatrix(basicType) ? glu::getDataTypeFloatVec(getDataTypeMatrixNumRows(basicType)) : basicType;
648 const int vecSize = getDataTypeScalarSize(vecType);
649 const int alignment = (vecSize == 3 ? 4 : vecSize)*int(sizeof(deUint32));
650
651 layout.size = deAlign32(layout.size, alignment);
652 layout.entries[ndx] = ValueBufferLayout::Entry(layout.size, alignment);
653 layout.size += alignment*(numVecs-1) + vecSize*int(sizeof(deUint32));
654 }
655
656 return layout;
657 }
658
copyToLayout(void * dst,const ValueBufferLayout::Entry & entryLayout,const Value & value,int arrayNdx)659 void copyToLayout (void* dst, const ValueBufferLayout::Entry& entryLayout, const Value& value, int arrayNdx)
660 {
661 const DataType basicType = value.type.getBasicType();
662 const int scalarSize = getDataTypeScalarSize(basicType);
663 const int numVecs = isDataTypeMatrix(basicType) ? getDataTypeMatrixNumColumns(basicType) : 1;
664 const int numComps = isDataTypeMatrix(basicType) ? getDataTypeMatrixNumRows(basicType) : scalarSize;
665
666 DE_ASSERT(size_t((arrayNdx+1)*scalarSize) <= value.elements.size());
667
668 if (isDataTypeBoolOrBVec(basicType))
669 {
670 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
671 {
672 for (int compNdx = 0; compNdx < numComps; compNdx++)
673 {
674 const deUint32 data = value.elements[arrayNdx*scalarSize + vecNdx*numComps + compNdx].bool32 ? ~0u : 0u;
675
676 deMemcpy((deUint8*)dst + entryLayout.offset + vecNdx*entryLayout.vecStride + compNdx * sizeof(deUint32),
677 &data,
678 sizeof(deUint32));
679 }
680 }
681 }
682 else
683 {
684 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
685 deMemcpy((deUint8*)dst + entryLayout.offset + vecNdx*entryLayout.vecStride,
686 &value.elements[arrayNdx*scalarSize + vecNdx*numComps],
687 numComps*sizeof(deUint32));
688 }
689 }
690
copyToLayout(void * dst,const ValueBufferLayout & layout,const vector<Value> & values,int arrayNdx)691 void copyToLayout (void* dst, const ValueBufferLayout& layout, const vector<Value>& values, int arrayNdx)
692 {
693 DE_ASSERT(layout.entries.size() == values.size());
694
695 for (size_t ndx = 0; ndx < values.size(); ndx++)
696 copyToLayout(dst, layout.entries[ndx], values[ndx], arrayNdx);
697 }
698
getShaderStages(const ShaderCaseSpecification & spec)699 deUint32 getShaderStages (const ShaderCaseSpecification& spec)
700 {
701 if (spec.caseType == glu::sl::CASETYPE_COMPLETE)
702 {
703 deUint32 stages = 0u;
704
705 for (size_t progNdx = 0; progNdx < spec.programs.size(); progNdx++)
706 {
707 for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
708 {
709 if (!spec.programs[progNdx].sources.sources[shaderType].empty())
710 stages |= (1u << shaderType);
711 }
712 }
713
714 return stages;
715 }
716 else
717 return (1u << glu::SHADERTYPE_VERTEX) | (1u << glu::SHADERTYPE_FRAGMENT);
718 }
719
720 class PipelineProgram
721 {
722 public:
723 PipelineProgram (Context& context, const ShaderCaseSpecification& spec);
724
getStages(void) const725 deUint32 getStages (void) const { return m_stages; }
726
hasShader(glu::ShaderType type) const727 bool hasShader (glu::ShaderType type) const { return (m_stages & (1u << type)) != 0; }
getShader(glu::ShaderType type) const728 vk::VkShaderModule getShader (glu::ShaderType type) const { return *m_shaderModules[type]; }
729
730 private:
731 const deUint32 m_stages;
732 Move<vk::VkShaderModule> m_shaderModules[glu::SHADERTYPE_LAST];
733 };
734
PipelineProgram(Context & context,const ShaderCaseSpecification & spec)735 PipelineProgram::PipelineProgram (Context& context, const ShaderCaseSpecification& spec)
736 : m_stages(getShaderStages(spec))
737 {
738 // \note Currently only a single source program is supported as framework lacks SPIR-V linking capability
739 TCU_CHECK_INTERNAL(spec.programs.size() == 1);
740
741 for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
742 {
743 if ((m_stages & (1u << shaderType)) != 0)
744 {
745 m_shaderModules[shaderType] = vk::createShaderModule(context.getDeviceInterface(), context.getDevice(),
746 context.getBinaryCollection().get(getShaderName((glu::ShaderType)shaderType, 0)), 0u);
747 }
748 }
749 }
750
getPipelineShaderStageCreateInfo(const PipelineProgram & program)751 vector<vk::VkPipelineShaderStageCreateInfo> getPipelineShaderStageCreateInfo (const PipelineProgram& program)
752 {
753 vector<vk::VkPipelineShaderStageCreateInfo> infos;
754
755 for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
756 {
757 if (program.hasShader((glu::ShaderType)shaderType))
758 {
759 const vk::VkPipelineShaderStageCreateInfo info =
760 {
761 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // sType
762 DE_NULL, // pNext
763 (vk::VkPipelineShaderStageCreateFlags)0,
764 vk::getVkShaderStage((glu::ShaderType)shaderType), // stage
765 program.getShader((glu::ShaderType)shaderType), // module
766 "main",
767 DE_NULL, // pSpecializationInfo
768 };
769
770 infos.push_back(info);
771 }
772 }
773
774 return infos;
775 }
776
createBuffer(Context & context,vk::VkDeviceSize size,vk::VkBufferUsageFlags usageFlags)777 Move<vk::VkBuffer> createBuffer (Context& context, vk::VkDeviceSize size, vk::VkBufferUsageFlags usageFlags)
778 {
779 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
780 const vk::VkBufferCreateInfo params =
781 {
782 vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
783 DE_NULL, // pNext
784 0u, // flags
785 size, // size
786 usageFlags, // usage
787 vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
788 1u, // queueFamilyCount
789 &queueFamilyIndex, // pQueueFamilyIndices
790 };
791
792 return vk::createBuffer(context.getDeviceInterface(), context.getDevice(), ¶ms);
793 }
794
createImage2D(Context & context,deUint32 width,deUint32 height,vk::VkFormat format,vk::VkImageTiling tiling,vk::VkImageUsageFlags usageFlags)795 Move<vk::VkImage> createImage2D (Context& context, deUint32 width, deUint32 height, vk::VkFormat format, vk::VkImageTiling tiling, vk::VkImageUsageFlags usageFlags)
796 {
797 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
798 const vk::VkImageCreateInfo params =
799 {
800 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
801 DE_NULL, // pNext
802 0u, // flags
803 vk::VK_IMAGE_TYPE_2D, // imageType
804 format, // format
805 { width, height, 1u }, // extent
806 1u, // mipLevels
807 1u, // arraySize
808 vk::VK_SAMPLE_COUNT_1_BIT, // samples
809 tiling, // tiling
810 usageFlags, // usage
811 vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
812 1u, // queueFamilyCount
813 &queueFamilyIndex, // pQueueFamilyIndices
814 vk::VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
815 };
816
817 return vk::createImage(context.getDeviceInterface(), context.getDevice(), ¶ms);
818 }
819
createAttachmentView(Context & context,vk::VkImage image,vk::VkFormat format)820 Move<vk::VkImageView> createAttachmentView (Context& context, vk::VkImage image, vk::VkFormat format)
821 {
822 const vk::VkImageViewCreateInfo params =
823 {
824 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
825 DE_NULL, // pNext
826 0u, // flags
827 image, // image
828 vk::VK_IMAGE_VIEW_TYPE_2D, // viewType
829 format, // format
830 vk::makeComponentMappingRGBA(), // channels
831 {
832 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
833 0u, // baseMipLevel
834 1u, // mipLevels
835 0u, // baseArrayLayer
836 1u, // arraySize
837 }, // subresourceRange
838 };
839
840 return vk::createImageView(context.getDeviceInterface(), context.getDevice(), ¶ms);
841 }
842
createRenderPass(Context & context,vk::VkFormat colorAttFormat)843 Move<vk::VkRenderPass> createRenderPass (Context& context, vk::VkFormat colorAttFormat)
844 {
845 const vk::VkAttachmentDescription colorAttDesc =
846 {
847 0u, // flags
848 colorAttFormat, // format
849 vk::VK_SAMPLE_COUNT_1_BIT, // samples
850 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
851 vk::VK_ATTACHMENT_STORE_OP_STORE, // storeOp
852 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
853 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilStoreOp
854 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // initialLayout
855 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // finalLayout
856 };
857 const vk::VkAttachmentReference colorAttRef =
858 {
859 0u, // attachment
860 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // layout
861 };
862 const vk::VkAttachmentReference dsAttRef =
863 {
864 VK_ATTACHMENT_UNUSED, // attachment
865 vk::VK_IMAGE_LAYOUT_GENERAL, // layout
866 };
867 const vk::VkSubpassDescription subpassDesc =
868 {
869 (vk::VkSubpassDescriptionFlags)0,
870 vk::VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
871 0u, // inputCount
872 DE_NULL, // pInputAttachments
873 1u, // colorCount
874 &colorAttRef, // pColorAttachments
875 DE_NULL, // pResolveAttachments
876 &dsAttRef, // depthStencilAttachment
877 0u, // preserveCount
878 DE_NULL, // pPreserveAttachments
879
880 };
881 const vk::VkRenderPassCreateInfo renderPassParams =
882 {
883 vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // sType
884 DE_NULL, // pNext
885 (vk::VkRenderPassCreateFlags)0,
886 1u, // attachmentCount
887 &colorAttDesc, // pAttachments
888 1u, // subpassCount
889 &subpassDesc, // pSubpasses
890 0u, // dependencyCount
891 DE_NULL, // pDependencies
892 };
893
894 return vk::createRenderPass(context.getDeviceInterface(), context.getDevice(), &renderPassParams);
895 }
896
getVkStageFlags(deUint32 stages)897 vk::VkShaderStageFlags getVkStageFlags (deUint32 stages)
898 {
899 vk::VkShaderStageFlags vkStages = 0u;
900
901 for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
902 {
903 if ((stages & (1u << shaderType)) != 0)
904 vkStages |= vk::getVkShaderStage((glu::ShaderType)shaderType);
905 }
906
907 return vkStages;
908 }
909
createDescriptorSetLayout(Context & context,deUint32 shaderStages)910 Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (Context& context, deUint32 shaderStages)
911 {
912 DE_STATIC_ASSERT(REFERENCE_UNIFORM_BINDING == 0);
913 DE_STATIC_ASSERT(USER_UNIFORM_BINDING == 1);
914
915 return vk::DescriptorSetLayoutBuilder()
916 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_FRAGMENT_BIT)
917 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, getVkStageFlags(shaderStages))
918 .build(context.getDeviceInterface(), context.getDevice());
919 }
920
createPipelineLayout(Context & context,vk::VkDescriptorSetLayout descriptorSetLayout)921 Move<vk::VkPipelineLayout> createPipelineLayout (Context& context, vk::VkDescriptorSetLayout descriptorSetLayout)
922 {
923 const vk::VkPipelineLayoutCreateInfo params =
924 {
925 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
926 DE_NULL, // pNext
927 (vk::VkPipelineLayoutCreateFlags)0,
928 1u, // descriptorSetCount
929 &descriptorSetLayout, // pSetLayouts
930 0u, // pushConstantRangeCount
931 DE_NULL, // pPushConstantRanges
932 };
933
934 return vk::createPipelineLayout(context.getDeviceInterface(), context.getDevice(), ¶ms);
935 }
936
getVecFormat(DataType scalarType,int scalarSize)937 vk::VkFormat getVecFormat (DataType scalarType, int scalarSize)
938 {
939 switch (scalarType)
940 {
941 case glu::TYPE_FLOAT:
942 {
943 const vk::VkFormat vecFmts[] =
944 {
945 vk::VK_FORMAT_R32_SFLOAT,
946 vk::VK_FORMAT_R32G32_SFLOAT,
947 vk::VK_FORMAT_R32G32B32_SFLOAT,
948 vk::VK_FORMAT_R32G32B32A32_SFLOAT,
949 };
950 return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
951 }
952
953 case glu::TYPE_INT:
954 {
955 const vk::VkFormat vecFmts[] =
956 {
957 vk::VK_FORMAT_R32_SINT,
958 vk::VK_FORMAT_R32G32_SINT,
959 vk::VK_FORMAT_R32G32B32_SINT,
960 vk::VK_FORMAT_R32G32B32A32_SINT,
961 };
962 return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
963 }
964
965 case glu::TYPE_UINT:
966 {
967 const vk::VkFormat vecFmts[] =
968 {
969 vk::VK_FORMAT_R32_UINT,
970 vk::VK_FORMAT_R32G32_UINT,
971 vk::VK_FORMAT_R32G32B32_UINT,
972 vk::VK_FORMAT_R32G32B32A32_UINT,
973 };
974 return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
975 }
976
977 case glu::TYPE_BOOL:
978 {
979 const vk::VkFormat vecFmts[] =
980 {
981 vk::VK_FORMAT_R32_UINT,
982 vk::VK_FORMAT_R32G32_UINT,
983 vk::VK_FORMAT_R32G32B32_UINT,
984 vk::VK_FORMAT_R32G32B32A32_UINT,
985 };
986 return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
987 }
988
989 default:
990 DE_FATAL("Unknown scalar type");
991 return vk::VK_FORMAT_R8G8B8A8_UINT;
992 }
993 }
994
getVertexAttributeDescriptions(const vector<Value> & inputValues,const ValueBufferLayout & layout)995 vector<vk::VkVertexInputAttributeDescription> getVertexAttributeDescriptions (const vector<Value>& inputValues, const ValueBufferLayout& layout)
996 {
997 vector<vk::VkVertexInputAttributeDescription> attribs;
998
999 // Position
1000 {
1001 const vk::VkVertexInputAttributeDescription posDesc =
1002 {
1003 0u, // location
1004 0u, // binding
1005 vk::VK_FORMAT_R32G32_SFLOAT, // format
1006 0u, // offset
1007 };
1008
1009 attribs.push_back(posDesc);
1010 }
1011
1012 // Input values
1013 for (size_t inputNdx = 0; inputNdx < inputValues.size(); inputNdx++)
1014 {
1015 const Value& input = inputValues[inputNdx];
1016 const ValueBufferLayout::Entry& layoutEntry = layout.entries[inputNdx];
1017 const DataType basicType = input.type.getBasicType();
1018 const int numVecs = isDataTypeMatrix(basicType)
1019 ? getDataTypeMatrixNumColumns(basicType)
1020 : 1;
1021 const int vecSize = isDataTypeMatrix(basicType)
1022 ? getDataTypeMatrixNumRows(basicType)
1023 : getDataTypeScalarSize(basicType);
1024 const DataType scalarType = getDataTypeScalarType(basicType);
1025 const vk::VkFormat vecFmt = getVecFormat(scalarType, vecSize);
1026
1027 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
1028 {
1029 const deUint32 curLoc = (deUint32)attribs.size();
1030 const deUint32 offset = (deUint32)(layoutEntry.offset + layoutEntry.vecStride*vecNdx);
1031 const vk::VkVertexInputAttributeDescription desc =
1032 {
1033 curLoc, // location
1034 1u, // binding
1035 vecFmt, // format
1036 offset, // offset
1037 };
1038
1039 attribs.push_back(desc);
1040 }
1041 }
1042
1043 return attribs;
1044 }
1045
createPipeline(Context & context,const vector<Value> & inputValues,const ValueBufferLayout & inputLayout,const PipelineProgram & program,vk::VkRenderPass renderPass,vk::VkPipelineLayout pipelineLayout,tcu::UVec2 renderSize)1046 Move<vk::VkPipeline> createPipeline (Context& context,
1047 const vector<Value>& inputValues,
1048 const ValueBufferLayout& inputLayout,
1049 const PipelineProgram& program,
1050 vk::VkRenderPass renderPass,
1051 vk::VkPipelineLayout pipelineLayout,
1052 tcu::UVec2 renderSize)
1053 {
1054 const vector<vk::VkPipelineShaderStageCreateInfo> shaderStageParams (getPipelineShaderStageCreateInfo(program));
1055 const vector<vk::VkVertexInputAttributeDescription> vertexAttribParams (getVertexAttributeDescriptions(inputValues, inputLayout));
1056 const vk::VkPipelineDepthStencilStateCreateInfo depthStencilParams =
1057 {
1058 vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // sType
1059 DE_NULL, // pNext
1060 (vk::VkPipelineDepthStencilStateCreateFlags)0,
1061 VK_FALSE, // depthTestEnable
1062 VK_FALSE, // depthWriteEnable
1063 vk::VK_COMPARE_OP_ALWAYS, // depthCompareOp
1064 VK_FALSE, // depthBoundsTestEnable
1065 VK_FALSE, // stencilTestEnable
1066 {
1067 vk::VK_STENCIL_OP_KEEP, // stencilFailOp;
1068 vk::VK_STENCIL_OP_KEEP, // stencilPassOp;
1069 vk::VK_STENCIL_OP_KEEP, // stencilDepthFailOp;
1070 vk::VK_COMPARE_OP_ALWAYS, // stencilCompareOp;
1071 0u, // stencilCompareMask
1072 0u, // stencilWriteMask
1073 0u, // stencilReference
1074 }, // front;
1075 {
1076 vk::VK_STENCIL_OP_KEEP, // stencilFailOp;
1077 vk::VK_STENCIL_OP_KEEP, // stencilPassOp;
1078 vk::VK_STENCIL_OP_KEEP, // stencilDepthFailOp;
1079 vk::VK_COMPARE_OP_ALWAYS, // stencilCompareOp;
1080 0u, // stencilCompareMask
1081 0u, // stencilWriteMask
1082 0u, // stencilReference
1083 }, // back;
1084 -1.0f, // minDepthBounds
1085 +1.0f, // maxDepthBounds
1086 };
1087 const vk::VkViewport viewport0 =
1088 {
1089 0.0f, // originX
1090 0.0f, // originY
1091 (float)renderSize.x(), // width
1092 (float)renderSize.y(), // height
1093 0.0f, // minDepth
1094 1.0f, // maxDepth
1095 };
1096 const vk::VkRect2D scissor0 =
1097 {
1098 { 0u, 0u }, // offset
1099 { renderSize.x(), renderSize.y() } // extent
1100 };
1101 const vk::VkPipelineViewportStateCreateInfo viewportParams =
1102 {
1103 vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // sType
1104 DE_NULL, // pNext
1105 (vk::VkPipelineViewportStateCreateFlags)0,
1106 1u, // viewportCount
1107 &viewport0, // pViewports
1108 1u, // scissorCount
1109 &scissor0, // pScissors
1110 };
1111 const vk::VkPipelineMultisampleStateCreateInfo multisampleParams =
1112 {
1113 vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // sType
1114 DE_NULL, // pNext
1115 (vk::VkPipelineMultisampleStateCreateFlags)0,
1116 vk::VK_SAMPLE_COUNT_1_BIT, // rasterSamples
1117 DE_FALSE, // sampleShadingEnable
1118 0.0f, // minSampleShading
1119 DE_NULL, // pSampleMask
1120 VK_FALSE, // alphaToCoverageEnable
1121 VK_FALSE, // alphaToOneEnable
1122 };
1123 const vk::VkPipelineRasterizationStateCreateInfo rasterParams =
1124 {
1125 vk::VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // sType
1126 DE_NULL, // pNext
1127 (vk::VkPipelineRasterizationStateCreateFlags)0,
1128 DE_TRUE, // depthClipEnable
1129 DE_FALSE, // rasterizerDiscardEnable
1130 vk::VK_POLYGON_MODE_FILL, // fillMode
1131 vk::VK_CULL_MODE_NONE, // cullMode;
1132 vk::VK_FRONT_FACE_COUNTER_CLOCKWISE, // frontFace;
1133 VK_FALSE, // depthBiasEnable
1134 0.0f, // depthBiasConstantFactor
1135 0.0f, // depthBiasClamp
1136 0.0f, // depthBiasSlopeFactor
1137 1.0f, // lineWidth
1138 };
1139 const vk::VkPipelineInputAssemblyStateCreateInfo inputAssemblyParams =
1140 {
1141 vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // sType
1142 DE_NULL, // pNext
1143 (vk::VkPipelineInputAssemblyStateCreateFlags)0,
1144 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, // topology
1145 DE_FALSE, // primitiveRestartEnable
1146 };
1147 const vk::VkVertexInputBindingDescription vertexBindings[] =
1148 {
1149 {
1150 0u, // binding
1151 (deUint32)sizeof(tcu::Vec2), // stride
1152 vk::VK_VERTEX_INPUT_RATE_VERTEX, // stepRate
1153 },
1154 {
1155 1u, // binding
1156 0u, // stride
1157 vk::VK_VERTEX_INPUT_RATE_INSTANCE, // stepRate
1158 },
1159 };
1160 const vk::VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
1161 {
1162 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // sType
1163 DE_NULL, // pNext
1164 (vk::VkPipelineVertexInputStateCreateFlags)0,
1165 (inputValues.empty() ? 1u : 2u), // bindingCount
1166 vertexBindings, // pVertexBindingDescriptions
1167 (deUint32)vertexAttribParams.size(), // attributeCount
1168 &vertexAttribParams[0], // pVertexAttributeDescriptions
1169 };
1170 const vk::VkColorComponentFlags allCompMask = vk::VK_COLOR_COMPONENT_R_BIT
1171 | vk::VK_COLOR_COMPONENT_G_BIT
1172 | vk::VK_COLOR_COMPONENT_B_BIT
1173 | vk::VK_COLOR_COMPONENT_A_BIT;
1174 const vk::VkPipelineColorBlendAttachmentState attBlendParams =
1175 {
1176 VK_FALSE, // blendEnable
1177 vk::VK_BLEND_FACTOR_ONE, // srcBlendColor
1178 vk::VK_BLEND_FACTOR_ZERO, // destBlendColor
1179 vk::VK_BLEND_OP_ADD, // blendOpColor
1180 vk::VK_BLEND_FACTOR_ONE, // srcBlendAlpha
1181 vk::VK_BLEND_FACTOR_ZERO, // destBlendAlpha
1182 vk::VK_BLEND_OP_ADD, // blendOpAlpha
1183 allCompMask, // componentWriteMask
1184 };
1185 const vk::VkPipelineColorBlendStateCreateInfo blendParams =
1186 {
1187 vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // sType
1188 DE_NULL, // pNext
1189 (vk::VkPipelineColorBlendStateCreateFlags)0,
1190 VK_FALSE, // logicOpEnable
1191 vk::VK_LOGIC_OP_COPY, // logicOp
1192 1u, // attachmentCount
1193 &attBlendParams, // pAttachments
1194 { 0.0f, 0.0f, 0.0f, 0.0f }, // blendConstants
1195 };
1196 const vk::VkGraphicsPipelineCreateInfo pipelineParams =
1197 {
1198 vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // sType
1199 DE_NULL, // pNext
1200 0u, // flags
1201 (deUint32)shaderStageParams.size(), // stageCount
1202 &shaderStageParams[0], // pStages
1203 &vertexInputStateParams, // pVertexInputState
1204 &inputAssemblyParams, // pInputAssemblyState
1205 DE_NULL, // pTessellationState
1206 &viewportParams, // pViewportState
1207 &rasterParams, // pRasterState
1208 &multisampleParams, // pMultisampleState
1209 &depthStencilParams, // pDepthStencilState
1210 &blendParams, // pColorBlendState
1211 (const vk::VkPipelineDynamicStateCreateInfo*)DE_NULL, // pDynamicState
1212 pipelineLayout, // layout
1213 renderPass, // renderPass
1214 0u, // subpass
1215 DE_NULL, // basePipelineHandle
1216 0u, // basePipelineIndex
1217 };
1218
1219 return vk::createGraphicsPipeline(context.getDeviceInterface(), context.getDevice(), DE_NULL, &pipelineParams);
1220 }
1221
createFramebuffer(Context & context,vk::VkRenderPass renderPass,vk::VkImageView colorAttView,int width,int height)1222 Move<vk::VkFramebuffer> createFramebuffer (Context& context, vk::VkRenderPass renderPass, vk::VkImageView colorAttView, int width, int height)
1223 {
1224 const vk::VkFramebufferCreateInfo framebufferParams =
1225 {
1226 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
1227 DE_NULL, // pNext
1228 (vk::VkFramebufferCreateFlags)0,
1229 renderPass, // renderPass
1230 1u, // attachmentCount
1231 &colorAttView, // pAttachments
1232 (deUint32)width, // width
1233 (deUint32)height, // height
1234 1u, // layers
1235 };
1236
1237 return vk::createFramebuffer(context.getDeviceInterface(), context.getDevice(), &framebufferParams);
1238 }
1239
createCommandPool(Context & context)1240 Move<vk::VkCommandPool> createCommandPool (Context& context)
1241 {
1242 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1243 const vk::VkCommandPoolCreateInfo params =
1244 {
1245 vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
1246 DE_NULL, // pNext
1247 (vk::VkCommandPoolCreateFlags)0,
1248 queueFamilyIndex, // queueFamilyIndex
1249 };
1250
1251 return vk::createCommandPool(context.getDeviceInterface(), context.getDevice(), ¶ms);
1252 }
1253
createDescriptorPool(Context & context)1254 Move<vk::VkDescriptorPool> createDescriptorPool (Context& context)
1255 {
1256 return vk::DescriptorPoolBuilder()
1257 .addType(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2u)
1258 .build(context.getDeviceInterface(), context.getDevice(), vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1259 }
1260
allocateDescriptorSet(Context & context,vk::VkDescriptorPool descriptorPool,vk::VkDescriptorSetLayout setLayout)1261 Move<vk::VkDescriptorSet> allocateDescriptorSet (Context& context, vk::VkDescriptorPool descriptorPool, vk::VkDescriptorSetLayout setLayout)
1262 {
1263 const vk::VkDescriptorSetAllocateInfo params =
1264 {
1265 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1266 DE_NULL,
1267 descriptorPool,
1268 1u,
1269 &setLayout
1270 };
1271
1272 return vk::allocateDescriptorSet(context.getDeviceInterface(), context.getDevice(), ¶ms);
1273 }
1274
allocateCommandBuffer(Context & context,vk::VkCommandPool cmdPool)1275 Move<vk::VkCommandBuffer> allocateCommandBuffer (Context& context, vk::VkCommandPool cmdPool)
1276 {
1277 const vk::VkCommandBufferAllocateInfo params =
1278 {
1279 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
1280 DE_NULL, // pNext
1281 cmdPool, // commandPool
1282 vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1283 1u, // bufferCount
1284 };
1285
1286 return vk::allocateCommandBuffer(context.getDeviceInterface(), context.getDevice(), ¶ms);
1287 }
1288
allocateAndBindMemory(Context & context,vk::VkBuffer buffer,vk::MemoryRequirement memReqs)1289 MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkBuffer buffer, vk::MemoryRequirement memReqs)
1290 {
1291 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1292 const vk::VkMemoryRequirements bufReqs = vk::getBufferMemoryRequirements(vkd, context.getDevice(), buffer);
1293 MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(bufReqs, memReqs);
1294
1295 vkd.bindBufferMemory(context.getDevice(), buffer, memory->getMemory(), memory->getOffset());
1296
1297 return memory;
1298 }
1299
allocateAndBindMemory(Context & context,vk::VkImage image,vk::MemoryRequirement memReqs)1300 MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkImage image, vk::MemoryRequirement memReqs)
1301 {
1302 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1303 const vk::VkMemoryRequirements imgReqs = vk::getImageMemoryRequirements(vkd, context.getDevice(), image);
1304 MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(imgReqs, memReqs);
1305
1306 vkd.bindImageMemory(context.getDevice(), image, memory->getMemory(), memory->getOffset());
1307
1308 return memory;
1309 }
1310
writeValuesToMem(Context & context,const vk::Allocation & dst,const ValueBufferLayout & layout,const vector<Value> & values,int arrayNdx)1311 void writeValuesToMem (Context& context, const vk::Allocation& dst, const ValueBufferLayout& layout, const vector<Value>& values, int arrayNdx)
1312 {
1313 copyToLayout(dst.getHostPtr(), layout, values, arrayNdx);
1314
1315 // \note Buffers are not allocated with coherency / uncached requirement so we need to manually flush CPU write caches
1316 flushMappedMemoryRange(context.getDeviceInterface(), context.getDevice(), dst.getMemory(), dst.getOffset(), (vk::VkDeviceSize)layout.size);
1317 }
1318
1319 class ShaderCaseInstance : public TestInstance
1320 {
1321 public:
1322 ShaderCaseInstance (Context& context, const ShaderCaseSpecification& spec);
1323 ~ShaderCaseInstance (void);
1324
1325 TestStatus iterate (void);
1326
1327 private:
1328 enum
1329 {
1330 RENDER_WIDTH = 64,
1331 RENDER_HEIGHT = 64,
1332
1333 POSITIONS_OFFSET = 0,
1334 POSITIONS_SIZE = (int)sizeof(Vec2)*4,
1335
1336 INDICES_OFFSET = POSITIONS_SIZE,
1337 INDICES_SIZE = (int)sizeof(deUint16)*6,
1338
1339 TOTAL_POS_NDX_SIZE = POSITIONS_SIZE+INDICES_SIZE
1340 };
1341
1342 const ShaderCaseSpecification& m_spec;
1343
1344 const Unique<vk::VkBuffer> m_posNdxBuffer;
1345 const UniquePtr<vk::Allocation> m_posNdxMem;
1346
1347 const ValueBufferLayout m_inputLayout;
1348 const Unique<vk::VkBuffer> m_inputBuffer; // Input values (attributes). Can be NULL if no inputs present
1349 const UniquePtr<vk::Allocation> m_inputMem; // Input memory, can be NULL if no input buffer exists
1350
1351 const ValueBufferLayout m_referenceLayout;
1352 const Unique<vk::VkBuffer> m_referenceBuffer; // Output (reference) values. Can be NULL if no outputs present
1353 const UniquePtr<vk::Allocation> m_referenceMem; // Output (reference) memory, can be NULL if no reference buffer exists
1354
1355 const ValueBufferLayout m_uniformLayout;
1356 const Unique<vk::VkBuffer> m_uniformBuffer; // Uniform values. Can be NULL if no uniforms present
1357 const UniquePtr<vk::Allocation> m_uniformMem; // Uniform memory, can be NULL if no uniform buffer exists
1358
1359 const Unique<vk::VkBuffer> m_readImageBuffer;
1360 const UniquePtr<vk::Allocation> m_readImageMem;
1361
1362 const Unique<vk::VkImage> m_rtImage;
1363 const UniquePtr<vk::Allocation> m_rtMem;
1364 const Unique<vk::VkImageView> m_rtView;
1365
1366 const Unique<vk::VkRenderPass> m_renderPass;
1367 const Unique<vk::VkFramebuffer> m_framebuffer;
1368 const PipelineProgram m_program;
1369 const Unique<vk::VkDescriptorSetLayout> m_descriptorSetLayout;
1370 const Unique<vk::VkPipelineLayout> m_pipelineLayout;
1371 const Unique<vk::VkPipeline> m_pipeline;
1372
1373 const Unique<vk::VkDescriptorPool> m_descriptorPool;
1374 const Unique<vk::VkDescriptorSet> m_descriptorSet;
1375
1376 const Unique<vk::VkCommandPool> m_cmdPool;
1377 const Unique<vk::VkCommandBuffer> m_cmdBuffer;
1378
1379 int m_subCaseNdx;
1380 };
1381
ShaderCaseInstance(Context & context,const ShaderCaseSpecification & spec)1382 ShaderCaseInstance::ShaderCaseInstance (Context& context, const ShaderCaseSpecification& spec)
1383 : TestInstance (context)
1384 , m_spec (spec)
1385
1386 , m_posNdxBuffer (createBuffer(context, (vk::VkDeviceSize)TOTAL_POS_NDX_SIZE, vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT|vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT))
1387 , m_posNdxMem (allocateAndBindMemory(context, *m_posNdxBuffer, vk::MemoryRequirement::HostVisible))
1388
1389 , m_inputLayout (computeStd430Layout(spec.values.inputs))
1390 , m_inputBuffer (m_inputLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_inputLayout.size, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) : Move<vk::VkBuffer>())
1391 , m_inputMem (m_inputLayout.size > 0 ? allocateAndBindMemory(context, *m_inputBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
1392
1393 , m_referenceLayout (computeStd140Layout(spec.values.outputs))
1394 , m_referenceBuffer (m_referenceLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_referenceLayout.size, vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : Move<vk::VkBuffer>())
1395 , m_referenceMem (m_referenceLayout.size > 0 ? allocateAndBindMemory(context, *m_referenceBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
1396
1397 , m_uniformLayout (computeStd140Layout(spec.values.uniforms))
1398 , m_uniformBuffer (m_uniformLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_uniformLayout.size, vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : Move<vk::VkBuffer>())
1399 , m_uniformMem (m_uniformLayout.size > 0 ? allocateAndBindMemory(context, *m_uniformBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
1400
1401 , m_readImageBuffer (createBuffer(context, (vk::VkDeviceSize)(RENDER_WIDTH*RENDER_HEIGHT*4), vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT))
1402 , m_readImageMem (allocateAndBindMemory(context, *m_readImageBuffer, vk::MemoryRequirement::HostVisible))
1403
1404 , m_rtImage (createImage2D(context, RENDER_WIDTH, RENDER_HEIGHT, vk::VK_FORMAT_R8G8B8A8_UNORM, vk::VK_IMAGE_TILING_OPTIMAL, vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT))
1405 , m_rtMem (allocateAndBindMemory(context, *m_rtImage, vk::MemoryRequirement::Any))
1406 , m_rtView (createAttachmentView(context, *m_rtImage, vk::VK_FORMAT_R8G8B8A8_UNORM))
1407
1408 , m_renderPass (createRenderPass(context, vk::VK_FORMAT_R8G8B8A8_UNORM))
1409 , m_framebuffer (createFramebuffer(context, *m_renderPass, *m_rtView, RENDER_WIDTH, RENDER_HEIGHT))
1410 , m_program (context, spec)
1411 , m_descriptorSetLayout (createDescriptorSetLayout(context, m_program.getStages()))
1412 , m_pipelineLayout (createPipelineLayout(context, *m_descriptorSetLayout))
1413 , m_pipeline (createPipeline(context, spec.values.inputs, m_inputLayout, m_program, *m_renderPass, *m_pipelineLayout, tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT)))
1414
1415 , m_descriptorPool (createDescriptorPool(context))
1416 , m_descriptorSet (allocateDescriptorSet(context, *m_descriptorPool, *m_descriptorSetLayout))
1417
1418 , m_cmdPool (createCommandPool(context))
1419 , m_cmdBuffer (allocateCommandBuffer(context, *m_cmdPool))
1420
1421 , m_subCaseNdx (0)
1422 {
1423 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1424 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1425
1426 {
1427 const Vec2 s_positions[] =
1428 {
1429 Vec2(-1.0f, -1.0f),
1430 Vec2(-1.0f, +1.0f),
1431 Vec2(+1.0f, -1.0f),
1432 Vec2(+1.0f, +1.0f)
1433 };
1434 const deUint16 s_indices[] =
1435 {
1436 0, 1, 2,
1437 1, 3, 2
1438 };
1439
1440 DE_STATIC_ASSERT(sizeof(s_positions) == POSITIONS_SIZE);
1441 DE_STATIC_ASSERT(sizeof(s_indices) == INDICES_SIZE);
1442
1443 deMemcpy((deUint8*)m_posNdxMem->getHostPtr() + POSITIONS_OFFSET, &s_positions[0], sizeof(s_positions));
1444 deMemcpy((deUint8*)m_posNdxMem->getHostPtr() + INDICES_OFFSET, &s_indices[0], sizeof(s_indices));
1445
1446 flushMappedMemoryRange(m_context.getDeviceInterface(), context.getDevice(), m_posNdxMem->getMemory(), m_posNdxMem->getOffset(), sizeof(s_positions)+sizeof(s_indices));
1447 }
1448
1449 if (!m_spec.values.uniforms.empty())
1450 {
1451 const vk::VkDescriptorBufferInfo bufInfo =
1452 {
1453 *m_uniformBuffer,
1454 (vk::VkDeviceSize)0, // offset
1455 (vk::VkDeviceSize)m_uniformLayout.size
1456 };
1457
1458 vk::DescriptorSetUpdateBuilder()
1459 .writeSingle(*m_descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(USER_UNIFORM_BINDING),
1460 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &bufInfo)
1461 .update(vkd, m_context.getDevice());
1462 }
1463
1464 if (!m_spec.values.outputs.empty())
1465 {
1466 const vk::VkDescriptorBufferInfo bufInfo =
1467 {
1468 *m_referenceBuffer,
1469 (vk::VkDeviceSize)0, // offset
1470 (vk::VkDeviceSize)m_referenceLayout.size
1471 };
1472
1473 vk::DescriptorSetUpdateBuilder()
1474 .writeSingle(*m_descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(REFERENCE_UNIFORM_BINDING),
1475 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &bufInfo)
1476 .update(vkd, m_context.getDevice());
1477 }
1478
1479 // Record command buffer
1480
1481 {
1482 const vk::VkCommandBufferBeginInfo beginInfo =
1483 {
1484 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // sType
1485 DE_NULL, // pNext
1486 0u, // flags
1487 (const vk::VkCommandBufferInheritanceInfo*)DE_NULL,
1488 };
1489
1490 VK_CHECK(vkd.beginCommandBuffer(*m_cmdBuffer, &beginInfo));
1491 }
1492
1493 {
1494 const vk::VkMemoryBarrier vertFlushBarrier =
1495 {
1496 vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
1497 DE_NULL, // pNext
1498 vk::VK_ACCESS_HOST_WRITE_BIT, // srcAccessMask
1499 vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT|vk::VK_ACCESS_UNIFORM_READ_BIT, // dstAccessMask
1500 };
1501 const vk::VkImageMemoryBarrier colorAttBarrier =
1502 {
1503 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1504 DE_NULL, // pNext
1505 0u, // srcAccessMask
1506 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask
1507 vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
1508 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
1509 queueFamilyIndex, // srcQueueFamilyIndex
1510 queueFamilyIndex, // destQueueFamilyIndex
1511 *m_rtImage, // image
1512 {
1513 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1514 0u, // baseMipLevel
1515 1u, // mipLevels
1516 0u, // baseArraySlice
1517 1u, // arraySize
1518 } // subresourceRange
1519 };
1520
1521 vkd.cmdPipelineBarrier(*m_cmdBuffer, vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (vk::VkDependencyFlags)0,
1522 1, &vertFlushBarrier,
1523 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1524 1, &colorAttBarrier);
1525 }
1526
1527 {
1528 const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.125f, 0.25f, 0.75f, 1.0f);
1529 const vk::VkRenderPassBeginInfo passBeginInfo =
1530 {
1531 vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // sType
1532 DE_NULL, // pNext
1533 *m_renderPass, // renderPass
1534 *m_framebuffer, // framebuffer
1535 { { 0, 0 }, { RENDER_WIDTH, RENDER_HEIGHT } }, // renderArea
1536 1u, // clearValueCount
1537 &clearValue, // pClearValues
1538 };
1539
1540 vkd.cmdBeginRenderPass(*m_cmdBuffer, &passBeginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
1541 }
1542
1543 vkd.cmdBindPipeline(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
1544
1545 if (!m_spec.values.uniforms.empty() || !m_spec.values.outputs.empty())
1546 vkd.cmdBindDescriptorSets(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, 1u, &*m_descriptorSet, 0u, DE_NULL);
1547
1548 {
1549 const vk::VkBuffer buffers[] = { *m_posNdxBuffer, *m_inputBuffer };
1550 const vk::VkDeviceSize offsets[] = { POSITIONS_OFFSET, 0u };
1551 const deUint32 numBuffers = buffers[1] != 0 ? 2u : 1u;
1552 vkd.cmdBindVertexBuffers(*m_cmdBuffer, 0u, numBuffers, buffers, offsets);
1553 }
1554
1555 vkd.cmdBindIndexBuffer (*m_cmdBuffer, *m_posNdxBuffer, (vk::VkDeviceSize)INDICES_OFFSET, vk::VK_INDEX_TYPE_UINT16);
1556 vkd.cmdDrawIndexed (*m_cmdBuffer, 6u, 1u, 0u, 0u, 0u);
1557 vkd.cmdEndRenderPass (*m_cmdBuffer);
1558
1559 {
1560 const vk::VkImageMemoryBarrier renderFinishBarrier =
1561 {
1562 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1563 DE_NULL, // pNext
1564 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask
1565 vk::VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
1566 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
1567 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
1568 queueFamilyIndex, // srcQueueFamilyIndex
1569 queueFamilyIndex, // destQueueFamilyIndex
1570 *m_rtImage, // image
1571 {
1572 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1573 0u, // baseMipLevel
1574 1u, // mipLevels
1575 0u, // baseArraySlice
1576 1u, // arraySize
1577 } // subresourceRange
1578 };
1579
1580 vkd.cmdPipelineBarrier(*m_cmdBuffer, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0,
1581 0, (const vk::VkMemoryBarrier*)DE_NULL,
1582 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1583 1, &renderFinishBarrier);
1584 }
1585
1586 {
1587 const vk::VkBufferImageCopy copyParams =
1588 {
1589 (vk::VkDeviceSize)0u, // bufferOffset
1590 (deUint32)RENDER_WIDTH, // bufferRowLength
1591 (deUint32)RENDER_HEIGHT, // bufferImageHeight
1592 {
1593 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspect
1594 0u, // mipLevel
1595 0u, // arrayLayer
1596 1u, // arraySize
1597 }, // imageSubresource
1598 { 0u, 0u, 0u }, // imageOffset
1599 { RENDER_WIDTH, RENDER_HEIGHT, 1u } // imageExtent
1600 };
1601
1602 vkd.cmdCopyImageToBuffer(*m_cmdBuffer, *m_rtImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *m_readImageBuffer, 1u, ©Params);
1603 }
1604
1605 {
1606 const vk::VkBufferMemoryBarrier copyFinishBarrier =
1607 {
1608 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
1609 DE_NULL, // pNext
1610 vk::VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1611 vk::VK_ACCESS_HOST_READ_BIT, // dstAccessMask
1612 queueFamilyIndex, // srcQueueFamilyIndex
1613 queueFamilyIndex, // destQueueFamilyIndex
1614 *m_readImageBuffer, // buffer
1615 0u, // offset
1616 (vk::VkDeviceSize)(RENDER_WIDTH*RENDER_HEIGHT*4) // size
1617 };
1618
1619 vkd.cmdPipelineBarrier(*m_cmdBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0,
1620 0, (const vk::VkMemoryBarrier*)DE_NULL,
1621 1, ©FinishBarrier,
1622 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
1623 }
1624
1625 VK_CHECK(vkd.endCommandBuffer(*m_cmdBuffer));
1626 }
1627
~ShaderCaseInstance(void)1628 ShaderCaseInstance::~ShaderCaseInstance (void)
1629 {
1630 }
1631
getNumSubCases(const ValueBlock & values)1632 int getNumSubCases (const ValueBlock& values)
1633 {
1634 if (!values.outputs.empty())
1635 return int(values.outputs[0].elements.size() / values.outputs[0].type.getScalarSize());
1636 else
1637 return 1; // Always run at least one iteration even if no output values are specified
1638 }
1639
checkResultImage(const ConstPixelBufferAccess & result)1640 bool checkResultImage (const ConstPixelBufferAccess& result)
1641 {
1642 const tcu::IVec4 refPix (255, 255, 255, 255);
1643
1644 for (int y = 0; y < result.getHeight(); y++)
1645 {
1646 for (int x = 0; x < result.getWidth(); x++)
1647 {
1648 const tcu::IVec4 resPix = result.getPixelInt(x, y);
1649
1650 if (boolAny(notEqual(resPix, refPix)))
1651 return false;
1652 }
1653 }
1654
1655 return true;
1656 }
1657
iterate(void)1658 TestStatus ShaderCaseInstance::iterate (void)
1659 {
1660 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
1661 const vk::VkDevice device = m_context.getDevice();
1662 const vk::VkQueue queue = m_context.getUniversalQueue();
1663
1664 if (!m_spec.values.inputs.empty())
1665 writeValuesToMem(m_context, *m_inputMem, m_inputLayout, m_spec.values.inputs, m_subCaseNdx);
1666
1667 if (!m_spec.values.outputs.empty())
1668 writeValuesToMem(m_context, *m_referenceMem, m_referenceLayout, m_spec.values.outputs, m_subCaseNdx);
1669
1670 if (!m_spec.values.uniforms.empty())
1671 writeValuesToMem(m_context, *m_uniformMem, m_uniformLayout, m_spec.values.uniforms, m_subCaseNdx);
1672
1673 {
1674 const vk::VkSubmitInfo submitInfo =
1675 {
1676 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
1677 DE_NULL,
1678 0u, // waitSemaphoreCount
1679 (const vk::VkSemaphore*)0, // pWaitSemaphores
1680 (const vk::VkPipelineStageFlags*)DE_NULL,
1681 1u,
1682 &m_cmdBuffer.get(),
1683 0u, // signalSemaphoreCount
1684 (const vk::VkSemaphore*)0, // pSignalSemaphores
1685 };
1686 const vk::VkFenceCreateInfo fenceParams =
1687 {
1688 vk::VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // sType
1689 DE_NULL, // pNext
1690 0u, // flags
1691 };
1692 const Unique<vk::VkFence> fence (vk::createFence(vkd, device, &fenceParams));
1693
1694 VK_CHECK(vkd.queueSubmit (queue, 1u, &submitInfo, *fence));
1695 VK_CHECK(vkd.waitForFences (device, 1u, &fence.get(), DE_TRUE, ~0ull));
1696 }
1697
1698 {
1699 const ConstPixelBufferAccess imgAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), RENDER_WIDTH, RENDER_HEIGHT, 1, m_readImageMem->getHostPtr());
1700
1701 invalidateMappedMemoryRange(vkd, device, m_readImageMem->getMemory(), m_readImageMem->getOffset(), (vk::VkDeviceSize)(RENDER_WIDTH*RENDER_HEIGHT*4));
1702
1703 if (!checkResultImage(imgAccess))
1704 {
1705 TestLog& log = m_context.getTestContext().getLog();
1706
1707 log << TestLog::Message << "ERROR: Got non-white pixels on sub-case " << m_subCaseNdx << TestLog::EndMessage
1708 << TestLog::Image("Result", "Result", imgAccess);
1709
1710 dumpValues(log, m_spec.values, m_subCaseNdx);
1711
1712 return TestStatus::fail(string("Got invalid pixels at sub-case ") + de::toString(m_subCaseNdx));
1713 }
1714 }
1715
1716 if (++m_subCaseNdx < getNumSubCases(m_spec.values))
1717 return TestStatus::incomplete();
1718 else
1719 return TestStatus::pass("All sub-cases passed");
1720 }
1721
1722 class ShaderCase : public TestCase
1723 {
1724 public:
1725 ShaderCase (tcu::TestContext& testCtx, const string& name, const string& description, const ShaderCaseSpecification& spec);
1726
1727
1728 void initPrograms (SourceCollections& programCollection) const;
1729 TestInstance* createInstance (Context& context) const;
1730
1731 private:
1732 const ShaderCaseSpecification m_spec;
1733 };
1734
ShaderCase(tcu::TestContext & testCtx,const string & name,const string & description,const ShaderCaseSpecification & spec)1735 ShaderCase::ShaderCase (tcu::TestContext& testCtx, const string& name, const string& description, const ShaderCaseSpecification& spec)
1736 : TestCase (testCtx, name, description)
1737 , m_spec (spec)
1738 {
1739 }
1740
initPrograms(SourceCollections & sourceCollection) const1741 void ShaderCase::initPrograms (SourceCollections& sourceCollection) const
1742 {
1743 vector<ProgramSources> specializedSources (m_spec.programs.size());
1744
1745 DE_ASSERT(isValid(m_spec));
1746
1747 if (m_spec.expectResult != glu::sl::EXPECT_PASS)
1748 TCU_THROW(InternalError, "Only EXPECT_PASS is supported");
1749
1750 if (m_spec.caseType == glu::sl::CASETYPE_VERTEX_ONLY)
1751 {
1752 DE_ASSERT(m_spec.programs.size() == 1 && m_spec.programs[0].sources.sources[glu::SHADERTYPE_VERTEX].size() == 1);
1753 specializedSources[0] << glu::VertexSource(specializeVertexShader(m_spec, m_spec.programs[0].sources.sources[glu::SHADERTYPE_VERTEX][0]))
1754 << glu::FragmentSource(genFragmentShader(m_spec));
1755 }
1756 else if (m_spec.caseType == glu::sl::CASETYPE_FRAGMENT_ONLY)
1757 {
1758 DE_ASSERT(m_spec.programs.size() == 1 && m_spec.programs[0].sources.sources[glu::SHADERTYPE_FRAGMENT].size() == 1);
1759 specializedSources[0] << glu::VertexSource(genVertexShader(m_spec))
1760 << glu::FragmentSource(specializeFragmentShader(m_spec, m_spec.programs[0].sources.sources[glu::SHADERTYPE_FRAGMENT][0]));
1761 }
1762 else
1763 {
1764 DE_ASSERT(m_spec.caseType == glu::sl::CASETYPE_COMPLETE);
1765
1766 const int maxPatchVertices = 4; // \todo [2015-08-05 pyry] Query
1767
1768 for (size_t progNdx = 0; progNdx < m_spec.programs.size(); progNdx++)
1769 {
1770 const ProgramSpecializationParams progSpecParams (m_spec, m_spec.programs[progNdx].requiredExtensions, maxPatchVertices);
1771
1772 specializeProgramSources(specializedSources[progNdx], m_spec.programs[progNdx].sources, progSpecParams);
1773 }
1774 }
1775
1776 for (size_t progNdx = 0; progNdx < specializedSources.size(); progNdx++)
1777 {
1778 for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
1779 {
1780 if (!specializedSources[progNdx].sources[shaderType].empty())
1781 {
1782 glu::ProgramSources& curSrc = sourceCollection.glslSources.add(getShaderName((glu::ShaderType)shaderType, progNdx));
1783 curSrc.sources[shaderType] = specializedSources[progNdx].sources[shaderType];
1784 }
1785 }
1786 }
1787 }
1788
createInstance(Context & context) const1789 TestInstance* ShaderCase::createInstance (Context& context) const
1790 {
1791 return new ShaderCaseInstance(context, m_spec);
1792 }
1793
1794 class ShaderCaseFactory : public glu::sl::ShaderCaseFactory
1795 {
1796 public:
ShaderCaseFactory(tcu::TestContext & testCtx)1797 ShaderCaseFactory (tcu::TestContext& testCtx)
1798 : m_testCtx(testCtx)
1799 {
1800 }
1801
createGroup(const string & name,const string & description,const vector<tcu::TestNode * > & children)1802 tcu::TestCaseGroup* createGroup (const string& name, const string& description, const vector<tcu::TestNode*>& children)
1803 {
1804 return new tcu::TestCaseGroup(m_testCtx, name.c_str(), description.c_str(), children);
1805 }
1806
createCase(const string & name,const string & description,const ShaderCaseSpecification & spec)1807 tcu::TestCase* createCase (const string& name, const string& description, const ShaderCaseSpecification& spec)
1808 {
1809 return new ShaderCase(m_testCtx, name, description, spec);
1810 }
1811
1812 private:
1813 tcu::TestContext& m_testCtx;
1814 };
1815
1816 class ShaderLibraryGroup : public tcu::TestCaseGroup
1817 {
1818 public:
ShaderLibraryGroup(tcu::TestContext & testCtx,const string & name,const string & description,const string & filename)1819 ShaderLibraryGroup (tcu::TestContext& testCtx, const string& name, const string& description, const string& filename)
1820 : tcu::TestCaseGroup (testCtx, name.c_str(), description.c_str())
1821 , m_filename (filename)
1822 {
1823 }
1824
init(void)1825 void init (void)
1826 {
1827 ShaderCaseFactory caseFactory (m_testCtx);
1828 const vector<tcu::TestNode*> children = glu::sl::parseFile(m_testCtx.getArchive(), m_filename, &caseFactory);
1829
1830 for (size_t ndx = 0; ndx < children.size(); ndx++)
1831 {
1832 try
1833 {
1834 addChild(children[ndx]);
1835 }
1836 catch (...)
1837 {
1838 for (; ndx < children.size(); ndx++)
1839 delete children[ndx];
1840 throw;
1841 }
1842 }
1843 }
1844
1845 private:
1846 const string m_filename;
1847 };
1848
1849 } // anonymous
1850
createShaderLibraryGroup(tcu::TestContext & testCtx,const string & name,const string & description,const string & filename)1851 MovePtr<tcu::TestCaseGroup> createShaderLibraryGroup (tcu::TestContext& testCtx, const string& name, const string& description, const string& filename)
1852 {
1853 return MovePtr<tcu::TestCaseGroup>(new ShaderLibraryGroup(testCtx, name, description, filename));
1854 }
1855
1856 } // vkt
1857