• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 Google Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief ShaderLibrary Vulkan implementation
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktShaderLibrary.hpp"
25 #include "vktTestCase.hpp"
26 
27 #include "vkPrograms.hpp"
28 #include "vkRef.hpp"
29 #include "vkRefUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkImageUtil.hpp"
35 #include "vkCmdUtil.hpp"
36 #include "vkObjUtil.hpp"
37 
38 #include "gluShaderLibrary.hpp"
39 #include "gluShaderUtil.hpp"
40 
41 #include "tcuStringTemplate.hpp"
42 #include "tcuTexture.hpp"
43 #include "tcuTestLog.hpp"
44 #include "tcuVector.hpp"
45 #include "tcuVectorUtil.hpp"
46 
47 #include "deStringUtil.hpp"
48 #include "deArrayUtil.hpp"
49 #include "deMemory.h"
50 
51 #include <sstream>
52 #include <map>
53 
54 namespace vkt
55 {
56 
57 using std::string;
58 using std::vector;
59 using std::map;
60 using std::pair;
61 using std::ostringstream;
62 
63 using de::MovePtr;
64 using de::UniquePtr;
65 
66 using glu::ShaderType;
67 using glu::ProgramSources;
68 using glu::DataType;
69 
70 using glu::sl::ShaderCaseSpecification;
71 using glu::sl::ProgramSpecializationParams;
72 using glu::sl::RequiredExtension;
73 using glu::sl::Value;
74 using glu::sl::ValueBlock;
75 
76 using tcu::TestStatus;
77 using tcu::StringTemplate;
78 using tcu::Vec2;
79 using tcu::ConstPixelBufferAccess;
80 using tcu::TextureFormat;
81 using tcu::TestLog;
82 
83 using vk::SourceCollections;
84 using vk::Move;
85 using vk::Unique;
86 
87 namespace
88 {
89 
90 enum
91 {
92 	REFERENCE_UNIFORM_BINDING	= 0,
93 	USER_UNIFORM_BINDING		= 1
94 };
95 
getShaderName(ShaderType shaderType,size_t progNdx)96 string getShaderName (ShaderType shaderType, size_t progNdx)
97 {
98 	ostringstream str;
99 	str << glu::getShaderTypeName(shaderType);
100 	if (progNdx > 0)
101 		str << "_" << progNdx;
102 	return str.str();
103 }
104 
genUniformBlock(ostringstream & out,const string & blockName,const string & instanceName,int setNdx,int bindingNdx,const vector<Value> & uniforms)105 void genUniformBlock (ostringstream& out, const string& blockName, const string& instanceName, int setNdx, int bindingNdx, const vector<Value>& uniforms)
106 {
107 	out << "layout(";
108 
109 	if (setNdx != 0)
110 		out << "set = " << setNdx << ", ";
111 
112 	out << "binding = " << bindingNdx << ", std140) uniform " << blockName << "\n"
113 		<< "{\n";
114 
115 	for (vector<Value>::const_iterator val = uniforms.begin(); val != uniforms.end(); ++val)
116 		out << "\t" << glu::declare(val->type, val->name, 1) << ";\n";
117 
118 	out << "}";
119 
120 	if (!instanceName.empty())
121 		out << " " << instanceName;
122 
123 	out << ";\n";
124 }
125 
declareReferenceBlock(ostringstream & out,const ValueBlock & valueBlock)126 void declareReferenceBlock (ostringstream& out, const ValueBlock& valueBlock)
127 {
128 	if (!valueBlock.outputs.empty())
129 		genUniformBlock(out, "Reference", "ref", 0, REFERENCE_UNIFORM_BINDING, valueBlock.outputs);
130 }
131 
declareUniforms(ostringstream & out,const ValueBlock & valueBlock)132 void declareUniforms (ostringstream& out, const ValueBlock& valueBlock)
133 {
134 	if (!valueBlock.uniforms.empty())
135 		genUniformBlock(out, "Uniforms", "", 0, USER_UNIFORM_BINDING, valueBlock.uniforms);
136 }
137 
getTransportType(DataType valueType)138 DataType getTransportType (DataType valueType)
139 {
140 	if (isDataTypeBoolOrBVec(valueType))
141 		return glu::getDataTypeUintVec(getDataTypeScalarSize(valueType));
142 	else
143 		return valueType;
144 }
145 
getNumTransportLocations(DataType valueType)146 int getNumTransportLocations (DataType valueType)
147 {
148 	return isDataTypeMatrix(valueType) ? getDataTypeMatrixNumColumns(valueType) : 1;
149 }
150 
151 // This functions builds a matching vertex shader for a 'both' case, when
152 // the fragment shader is being tested.
153 // We need to build attributes and varyings for each 'input'.
genVertexShader(const ShaderCaseSpecification & spec)154 string genVertexShader (const ShaderCaseSpecification& spec)
155 {
156 	ostringstream	res;
157 	int				curInputLoc		= 0;
158 	int				curOutputLoc	= 0;
159 
160 	res << glu::getGLSLVersionDeclaration(spec.targetVersion) << "\n";
161 
162 	// Declarations (position + attribute/varying for each input).
163 	res << "precision highp float;\n";
164 	res << "precision highp int;\n";
165 	res << "\n";
166 	res << "layout(location = 0) in highp vec4 dEQP_Position;\n";
167 	curInputLoc += 1;
168 
169 	for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
170 	{
171 		const Value&		val					= spec.values.inputs[ndx];
172 		const DataType		valueType			= val.type.getBasicType();
173 		const DataType		transportType		= getTransportType(valueType);
174 		const char* const	transportTypeStr	= getDataTypeName(transportType);
175 		const int			numLocs				= getNumTransportLocations(valueType);
176 
177 		res << "layout(location = " << curInputLoc << ") in " << transportTypeStr << " a_" << val.name << ";\n";
178 		res << "layout(location = " << curOutputLoc << ") flat out " << transportTypeStr << " " << (transportType != valueType ? "v_" : "") << val.name << ";\n";
179 
180 		curInputLoc		+= numLocs;
181 		curOutputLoc	+= numLocs;
182 	}
183 	res << "\n";
184 
185 	// Main function.
186 	// - gl_Position = dEQP_Position;
187 	// - for each input: write attribute directly to varying
188 	res << "void main()\n";
189 	res << "{\n";
190 	res << "	gl_Position = dEQP_Position;\n";
191 	for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
192 	{
193 		const Value&	val		= spec.values.inputs[ndx];
194 		const string&	name	= val.name;
195 
196 		res << "	" << (getTransportType(val.type.getBasicType()) != val.type.getBasicType() ? "v_" : "")
197 			<< name << " = a_" << name << ";\n";
198 	}
199 
200 	res << "}\n";
201 	return res.str();
202 }
203 
genCompareOp(ostringstream & output,const char * dstVec4Var,const ValueBlock & valueBlock,const char * checkVarName)204 void genCompareOp (ostringstream& output, const char* dstVec4Var, const ValueBlock& valueBlock, const char* checkVarName)
205 {
206 	bool isFirstOutput = true;
207 
208 	for (size_t ndx = 0; ndx < valueBlock.outputs.size(); ndx++)
209 	{
210 		const Value&	val		= valueBlock.outputs[ndx];
211 
212 		// Check if we're only interested in one variable (then skip if not the right one).
213 		if (checkVarName && val.name != checkVarName)
214 			continue;
215 
216 		// Prefix.
217 		if (isFirstOutput)
218 		{
219 			output << "bool RES = ";
220 			isFirstOutput = false;
221 		}
222 		else
223 			output << "RES = RES && ";
224 
225 		// Generate actual comparison.
226 		if (getDataTypeScalarType(val.type.getBasicType()) == glu::TYPE_FLOAT)
227 			output << "isOk(" << val.name << ", ref." << val.name << ", 0.05);\n";
228 		else
229 			output << "isOk(" << val.name << ", ref." << val.name << ");\n";
230 	}
231 
232 	if (isFirstOutput)
233 		output << dstVec4Var << " = vec4(1.0);\n";
234 	else
235 		output << dstVec4Var << " = vec4(RES, RES, RES, 1.0);\n";
236 }
237 
genFragmentShader(const ShaderCaseSpecification & spec)238 string genFragmentShader (const ShaderCaseSpecification& spec)
239 {
240 	ostringstream	shader;
241 	ostringstream	setup;
242 	int				curInLoc	= 0;
243 
244 	shader << glu::getGLSLVersionDeclaration(spec.targetVersion) << "\n";
245 
246 	shader << "precision highp float;\n";
247 	shader << "precision highp int;\n";
248 	shader << "\n";
249 
250 	shader << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
251 	shader << "\n";
252 
253 	genCompareFunctions(shader, spec.values, false);
254 	shader << "\n";
255 
256 	// Declarations (varying, reference for each output).
257 	for (size_t ndx = 0; ndx < spec.values.outputs.size(); ndx++)
258 	{
259 		const Value&		val					= spec.values.outputs[ndx];
260 		const DataType		valueType			= val.type.getBasicType();
261 		const char*	const	valueTypeStr		= getDataTypeName(valueType);
262 		const DataType		transportType		= getTransportType(valueType);
263 		const char* const	transportTypeStr	= getDataTypeName(transportType);
264 		const int			numLocs				= getNumTransportLocations(valueType);
265 
266 		shader << "layout(location = " << curInLoc << ") flat in " << transportTypeStr << " " << (valueType != transportType ? "v_" : "") << val.name << ";\n";
267 
268 		if (valueType != transportType)
269 			setup << "	" << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(v_" << val.name << ");\n";
270 
271 		curInLoc += numLocs;
272 	}
273 
274 	declareReferenceBlock(shader, spec.values);
275 
276 	shader << "\n";
277 	shader << "void main()\n";
278 	shader << "{\n";
279 
280 	shader << setup.str();
281 
282 	shader << "	";
283 	genCompareOp(shader, "dEQP_FragColor", spec.values, DE_NULL);
284 
285 	shader << "}\n";
286 	return shader.str();
287 }
288 
289 // Specialize a shader for the vertex shader test case.
specializeVertexShader(const ShaderCaseSpecification & spec,const string & src)290 string specializeVertexShader (const ShaderCaseSpecification& spec, const string& src)
291 {
292 	ostringstream		decl;
293 	ostringstream		setup;
294 	ostringstream		output;
295 	int					curInputLoc		= 0;
296 	int					curOutputLoc	= 0;
297 
298 	// generated from "both" case
299 	DE_ASSERT(spec.caseType == glu::sl::CASETYPE_VERTEX_ONLY);
300 
301 	// Output (write out position).
302 	output << "gl_Position = dEQP_Position;\n";
303 
304 	// Declarations (position + attribute for each input, varying for each output).
305 	decl << "layout(location = 0) in highp vec4 dEQP_Position;\n";
306 	curInputLoc += 1;
307 
308 	for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
309 	{
310 		const Value&		val					= spec.values.inputs[ndx];
311 		const DataType		valueType			= val.type.getBasicType();
312 		const char*	const	valueTypeStr		= getDataTypeName(valueType);
313 		const DataType		transportType		= getTransportType(valueType);
314 		const char* const	transportTypeStr	= getDataTypeName(transportType);
315 		const int			numLocs				= getNumTransportLocations(valueType);
316 
317 		decl << "layout(location = " << curInputLoc << ") in ";
318 
319 		curInputLoc += numLocs;
320 
321 		if (valueType == transportType)
322 			decl << transportTypeStr << " " << val.name << ";\n";
323 		else
324 		{
325 			decl << transportTypeStr << " a_" << val.name << ";\n";
326 			setup << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(a_" << val.name << ");\n";
327 		}
328 	}
329 
330 	declareUniforms(decl, spec.values);
331 
332 	for (size_t ndx = 0; ndx < spec.values.outputs.size(); ndx++)
333 	{
334 		const Value&		val					= spec.values.outputs[ndx];
335 		const DataType		valueType			= val.type.getBasicType();
336 		const char*	const	valueTypeStr		= getDataTypeName(valueType);
337 		const DataType		transportType		= getTransportType(valueType);
338 		const char* const	transportTypeStr	= getDataTypeName(transportType);
339 		const int			numLocs				= getNumTransportLocations(valueType);
340 
341 		decl << "layout(location = " << curOutputLoc << ") flat out ";
342 
343 		curOutputLoc += numLocs;
344 
345 		if (valueType == transportType)
346 			decl << transportTypeStr << " " << val.name << ";\n";
347 		else
348 		{
349 			decl << transportTypeStr << " v_" << val.name << ";\n";
350 			decl << valueTypeStr << " " << val.name << ";\n";
351 
352 			output << "v_" << val.name << " = " << transportTypeStr << "(" << val.name << ");\n";
353 		}
354 	}
355 
356 	// Shader specialization.
357 	map<string, string> params;
358 	params.insert(pair<string, string>("DECLARATIONS", decl.str()));
359 	params.insert(pair<string, string>("SETUP", setup.str()));
360 	params.insert(pair<string, string>("OUTPUT", output.str()));
361 	params.insert(pair<string, string>("POSITION_FRAG_COLOR", "gl_Position"));
362 
363 	StringTemplate	tmpl	(src);
364 	const string	baseSrc	= tmpl.specialize(params);
365 	const string	withExt	= injectExtensionRequirements(baseSrc, spec.programs[0].requiredExtensions, glu::SHADERTYPE_VERTEX);
366 
367 	return withExt;
368 }
369 
370 // Specialize a shader for the fragment shader test case.
specializeFragmentShader(const ShaderCaseSpecification & spec,const string & src)371 string specializeFragmentShader (const ShaderCaseSpecification& spec, const string& src)
372 {
373 	ostringstream		decl;
374 	ostringstream		setup;
375 	ostringstream		output;
376 	int					curInputLoc	= 0;
377 
378 	// generated from "both" case
379 	DE_ASSERT(spec.caseType == glu::sl::CASETYPE_FRAGMENT_ONLY);
380 
381 	genCompareFunctions(decl, spec.values, false);
382 	genCompareOp(output, "dEQP_FragColor", spec.values, DE_NULL);
383 
384 	decl << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
385 
386 	for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
387 	{
388 		const Value&		val					= spec.values.inputs[ndx];
389 		const DataType		valueType			= val.type.getBasicType();
390 		const char*	const	valueTypeStr		= getDataTypeName(valueType);
391 		const DataType		transportType		= getTransportType(valueType);
392 		const char* const	transportTypeStr	= getDataTypeName(transportType);
393 		const int			numLocs				= getNumTransportLocations(valueType);
394 
395 		decl << "layout(location = " << curInputLoc << ") flat in ";
396 
397 		curInputLoc += numLocs;
398 
399 		if (valueType == transportType)
400 			decl << transportTypeStr << " " << val.name << ";\n";
401 		else
402 		{
403 			decl << transportTypeStr << " v_" << val.name << ";\n";
404 			setup << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(v_" << val.name << ");\n";
405 		}
406 	}
407 
408 	declareUniforms(decl, spec.values);
409 	declareReferenceBlock(decl, spec.values);
410 
411 	for (size_t ndx = 0; ndx < spec.values.outputs.size(); ndx++)
412 	{
413 		const Value&		val				= spec.values.outputs[ndx];
414 		const DataType		basicType		= val.type.getBasicType();
415 		const char* const	refTypeStr		= getDataTypeName(basicType);
416 
417 		decl << refTypeStr << " " << val.name << ";\n";
418 	}
419 
420 	// Shader specialization.
421 	map<string, string> params;
422 	params.insert(pair<string, string>("DECLARATIONS", decl.str()));
423 	params.insert(pair<string, string>("SETUP", setup.str()));
424 	params.insert(pair<string, string>("OUTPUT", output.str()));
425 	params.insert(pair<string, string>("POSITION_FRAG_COLOR", "dEQP_FragColor"));
426 
427 	StringTemplate	tmpl	(src);
428 	const string	baseSrc	= tmpl.specialize(params);
429 	const string	withExt	= injectExtensionRequirements(baseSrc, spec.programs[0].requiredExtensions, glu::SHADERTYPE_FRAGMENT);
430 
431 	return withExt;
432 }
433 
generateVertexSpecialization(const ProgramSpecializationParams & specParams)434 map<string, string> generateVertexSpecialization (const ProgramSpecializationParams& specParams)
435 {
436 	ostringstream			decl;
437 	ostringstream			setup;
438 	map<string, string>		params;
439 	int						curInputLoc		= 0;
440 
441 	decl << "layout(location = 0) in highp vec4 dEQP_Position;\n";
442 	curInputLoc += 1;
443 
444 	for (size_t ndx = 0; ndx < specParams.caseSpec.values.inputs.size(); ndx++)
445 	{
446 		const Value&		val					= specParams.caseSpec.values.inputs[ndx];
447 		const DataType		valueType			= val.type.getBasicType();
448 		const char*	const	valueTypeStr		= getDataTypeName(valueType);
449 		const DataType		transportType		= getTransportType(valueType);
450 		const char* const	transportTypeStr	= getDataTypeName(transportType);
451 		const int			numLocs				= getNumTransportLocations(valueType);
452 
453 		decl << "layout(location = " << curInputLoc << ") in ";
454 
455 		curInputLoc += numLocs;
456 
457 		if (valueType == transportType)
458 			decl << transportTypeStr << " " << val.name << ";\n";
459 		else
460 		{
461 			decl << transportTypeStr << " a_" << val.name << ";\n";
462 			setup << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(a_" << val.name << ");\n";
463 		}
464 	}
465 
466 	declareUniforms(decl, specParams.caseSpec.values);
467 
468 	params.insert(pair<string, string>("VERTEX_DECLARATIONS",	decl.str()));
469 	params.insert(pair<string, string>("VERTEX_SETUP",			setup.str()));
470 	params.insert(pair<string, string>("VERTEX_OUTPUT",			string("gl_Position = dEQP_Position;\n")));
471 
472 	return params;
473 }
474 
generateFragmentSpecialization(const ProgramSpecializationParams & specParams)475 map<string, string> generateFragmentSpecialization (const ProgramSpecializationParams& specParams)
476 {
477 	ostringstream		decl;
478 	ostringstream		output;
479 	map<string, string>	params;
480 
481 	genCompareFunctions(decl, specParams.caseSpec.values, false);
482 	genCompareOp(output, "dEQP_FragColor", specParams.caseSpec.values, DE_NULL);
483 
484 	decl << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
485 
486 	for (size_t ndx = 0; ndx < specParams.caseSpec.values.outputs.size(); ndx++)
487 	{
488 		const Value&		val			= specParams.caseSpec.values.outputs[ndx];
489 		const char*	const	refTypeStr	= getDataTypeName(val.type.getBasicType());
490 
491 		decl << refTypeStr << " " << val.name << ";\n";
492 	}
493 
494 	declareReferenceBlock(decl, specParams.caseSpec.values);
495 	declareUniforms(decl, specParams.caseSpec.values);
496 
497 	params.insert(pair<string, string>("FRAGMENT_DECLARATIONS",	decl.str()));
498 	params.insert(pair<string, string>("FRAGMENT_OUTPUT",		output.str()));
499 	params.insert(pair<string, string>("FRAG_COLOR",			"dEQP_FragColor"));
500 
501 	return params;
502 }
503 
generateGeometrySpecialization(const ProgramSpecializationParams & specParams)504 map<string, string> generateGeometrySpecialization (const ProgramSpecializationParams& specParams)
505 {
506 	ostringstream		decl;
507 	map<string, string>	params;
508 
509 	decl << "layout (triangles) in;\n";
510 	decl << "layout (triangle_strip, max_vertices=3) out;\n";
511 	decl << "\n";
512 
513 	declareUniforms(decl, specParams.caseSpec.values);
514 
515 	params.insert(pair<string, string>("GEOMETRY_DECLARATIONS",		decl.str()));
516 
517 	return params;
518 }
519 
generateTessControlSpecialization(const ProgramSpecializationParams & specParams)520 map<string, string> generateTessControlSpecialization (const ProgramSpecializationParams& specParams)
521 {
522 	ostringstream		decl;
523 	ostringstream		output;
524 	map<string, string>	params;
525 
526 	decl << "layout (vertices=3) out;\n";
527 	decl << "\n";
528 
529 	declareUniforms(decl, specParams.caseSpec.values);
530 
531 	output <<	"gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
532 				"gl_TessLevelInner[0] = 2.0;\n"
533 				"gl_TessLevelInner[1] = 2.0;\n"
534 				"gl_TessLevelOuter[0] = 2.0;\n"
535 				"gl_TessLevelOuter[1] = 2.0;\n"
536 				"gl_TessLevelOuter[2] = 2.0;\n"
537 				"gl_TessLevelOuter[3] = 2.0;";
538 
539 	params.insert(pair<string, string>("TESSELLATION_CONTROL_DECLARATIONS",	decl.str()));
540 	params.insert(pair<string, string>("TESSELLATION_CONTROL_OUTPUT",		output.str()));
541 	params.insert(pair<string, string>("GL_MAX_PATCH_VERTICES",				de::toString(specParams.maxPatchVertices)));
542 
543 	return params;
544 }
545 
generateTessEvalSpecialization(const ProgramSpecializationParams & specParams)546 map<string, string> generateTessEvalSpecialization (const ProgramSpecializationParams& specParams)
547 {
548 	ostringstream		decl;
549 	ostringstream		output;
550 	map<string, string>	params;
551 
552 	decl << "layout (triangles) in;\n";
553 	decl << "\n";
554 
555 	declareUniforms(decl, specParams.caseSpec.values);
556 
557 	output <<	"gl_Position = gl_TessCoord[0] * gl_in[0].gl_Position + gl_TessCoord[1] * gl_in[1].gl_Position + gl_TessCoord[2] * gl_in[2].gl_Position;\n";
558 
559 	params.insert(pair<string, string>("TESSELLATION_EVALUATION_DECLARATIONS",	decl.str()));
560 	params.insert(pair<string, string>("TESSELLATION_EVALUATION_OUTPUT",		output.str()));
561 	params.insert(pair<string, string>("GL_MAX_PATCH_VERTICES",					de::toString(specParams.maxPatchVertices)));
562 
563 	return params;
564 }
565 
specializeShaderSources(ProgramSources & dst,const ProgramSources & src,const ProgramSpecializationParams & specParams,glu::ShaderType shaderType,map<string,string> (* specializationGenerator)(const ProgramSpecializationParams & specParams))566 void specializeShaderSources (ProgramSources&						dst,
567 							  const ProgramSources&					src,
568 							  const ProgramSpecializationParams&	specParams,
569 							  glu::ShaderType						shaderType,
570 							  map<string, string>					(*specializationGenerator) (const ProgramSpecializationParams& specParams))
571 {
572 	if (!src.sources[shaderType].empty())
573 	{
574 		const map<string, string>	tmplParams	= specializationGenerator(specParams);
575 
576 		for (size_t ndx = 0; ndx < src.sources[shaderType].size(); ++ndx)
577 		{
578 			const StringTemplate	tmpl			(src.sources[shaderType][ndx]);
579 			const string			baseGLSLCode	= tmpl.specialize(tmplParams);
580 			const string			sourceWithExts	= injectExtensionRequirements(baseGLSLCode, specParams.requiredExtensions, shaderType);
581 
582 			dst << glu::ShaderSource(shaderType, sourceWithExts);
583 		}
584 	}
585 }
586 
specializeProgramSources(glu::ProgramSources & dst,const glu::ProgramSources & src,const ProgramSpecializationParams & specParams)587 void specializeProgramSources (glu::ProgramSources&					dst,
588 							   const glu::ProgramSources&			src,
589 							   const ProgramSpecializationParams&	specParams)
590 {
591 	specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_VERTEX,					generateVertexSpecialization);
592 	specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_FRAGMENT,					generateFragmentSpecialization);
593 	specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_GEOMETRY,					generateGeometrySpecialization);
594 	specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_TESSELLATION_CONTROL,		generateTessControlSpecialization);
595 	specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_TESSELLATION_EVALUATION,	generateTessEvalSpecialization);
596 
597 	dst << glu::ProgramSeparable(src.separable);
598 }
599 
600 struct ValueBufferLayout
601 {
602 	struct Entry
603 	{
604 		int		offset;
605 		int		vecStride;	//! Applies to matrices only
606 
Entryvkt::__anond1e616c90111::ValueBufferLayout::Entry607 		Entry (void) : offset(0), vecStride(0) {}
Entryvkt::__anond1e616c90111::ValueBufferLayout::Entry608 		Entry (int offset_, int vecStride_) : offset(offset_), vecStride(vecStride_) {}
609 	};
610 
611 	vector<Entry>	entries;
612 	int				size;
613 
ValueBufferLayoutvkt::__anond1e616c90111::ValueBufferLayout614 	ValueBufferLayout (void) : size(0) {}
615 };
616 
computeStd140Layout(const vector<Value> & values)617 ValueBufferLayout computeStd140Layout (const vector<Value>& values)
618 {
619 	ValueBufferLayout layout;
620 
621 	layout.entries.resize(values.size());
622 
623 	for (size_t ndx = 0; ndx < values.size(); ++ndx)
624 	{
625 		const DataType	basicType	= values[ndx].type.getBasicType();
626 		const bool		isMatrix	= isDataTypeMatrix(basicType);
627 		const int		numVecs		= isMatrix ? getDataTypeMatrixNumColumns(basicType) : 1;
628 		const DataType	vecType		= isMatrix ? glu::getDataTypeFloatVec(getDataTypeMatrixNumRows(basicType)) : basicType;
629 		const int		vecSize		= getDataTypeScalarSize(vecType);
630 		const int		alignment	= ((isMatrix || vecSize == 3) ? 4 : vecSize)*int(sizeof(deUint32));
631 
632 		layout.size			= deAlign32(layout.size, alignment);
633 		layout.entries[ndx] = ValueBufferLayout::Entry(layout.size, alignment);
634 		layout.size			+= alignment*(numVecs-1) + vecSize*int(sizeof(deUint32));
635 	}
636 
637 	return layout;
638 }
639 
computeStd430Layout(const vector<Value> & values)640 ValueBufferLayout computeStd430Layout (const vector<Value>& values)
641 {
642 	ValueBufferLayout layout;
643 
644 	layout.entries.resize(values.size());
645 
646 	for (size_t ndx = 0; ndx < values.size(); ++ndx)
647 	{
648 		const DataType	basicType	= values[ndx].type.getBasicType();
649 		const int		numVecs		= isDataTypeMatrix(basicType) ? getDataTypeMatrixNumColumns(basicType) : 1;
650 		const DataType	vecType		= isDataTypeMatrix(basicType) ? glu::getDataTypeFloatVec(getDataTypeMatrixNumRows(basicType)) : basicType;
651 		const int		vecSize		= getDataTypeScalarSize(vecType);
652 		const int		alignment	= (vecSize == 3 ? 4 : vecSize)*int(sizeof(deUint32));
653 
654 		layout.size			= deAlign32(layout.size, alignment);
655 		layout.entries[ndx] = ValueBufferLayout::Entry(layout.size, alignment);
656 		layout.size			+= alignment*(numVecs-1) + vecSize*int(sizeof(deUint32));
657 	}
658 
659 	return layout;
660 }
661 
copyToLayout(void * dst,const ValueBufferLayout::Entry & entryLayout,const Value & value,int arrayNdx)662 void copyToLayout (void* dst, const ValueBufferLayout::Entry& entryLayout, const Value& value, int arrayNdx)
663 {
664 	const DataType	basicType	= value.type.getBasicType();
665 	const int		scalarSize	= getDataTypeScalarSize(basicType);
666 	const int		numVecs		= isDataTypeMatrix(basicType) ? getDataTypeMatrixNumColumns(basicType) : 1;
667 	const int		numComps	= isDataTypeMatrix(basicType) ? getDataTypeMatrixNumRows(basicType) : scalarSize;
668 
669 	DE_ASSERT(size_t((arrayNdx+1)*scalarSize) <= value.elements.size());
670 
671 	if (isDataTypeBoolOrBVec(basicType))
672 	{
673 		for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
674 		{
675 			for (int compNdx = 0; compNdx < numComps; compNdx++)
676 			{
677 				const deUint32 data = value.elements[arrayNdx*scalarSize + vecNdx*numComps + compNdx].bool32 ? ~0u : 0u;
678 
679 				deMemcpy((deUint8*)dst + entryLayout.offset + vecNdx*entryLayout.vecStride + compNdx * sizeof(deUint32),
680 						 &data,
681 						 sizeof(deUint32));
682 			}
683 		}
684 	}
685 	else
686 	{
687 		for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
688 			deMemcpy((deUint8*)dst + entryLayout.offset + vecNdx*entryLayout.vecStride,
689 					 &value.elements[arrayNdx*scalarSize + vecNdx*numComps],
690 					 numComps*sizeof(deUint32));
691 	}
692 }
693 
copyToLayout(void * dst,const ValueBufferLayout & layout,const vector<Value> & values,int arrayNdx)694 void copyToLayout (void* dst, const ValueBufferLayout& layout, const vector<Value>& values, int arrayNdx)
695 {
696 	DE_ASSERT(layout.entries.size() == values.size());
697 
698 	for (size_t ndx = 0; ndx < values.size(); ndx++)
699 		copyToLayout(dst, layout.entries[ndx], values[ndx], arrayNdx);
700 }
701 
getShaderStages(const ShaderCaseSpecification & spec)702 deUint32 getShaderStages (const ShaderCaseSpecification& spec)
703 {
704 	if (spec.caseType == glu::sl::CASETYPE_COMPLETE)
705 	{
706 		deUint32	stages	= 0u;
707 
708 		for (size_t progNdx = 0; progNdx < spec.programs.size(); progNdx++)
709 		{
710 			for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
711 			{
712 				if (!spec.programs[progNdx].sources.sources[shaderType].empty())
713 					stages |= (1u << shaderType);
714 			}
715 		}
716 
717 		return stages;
718 	}
719 	else
720 		return (1u << glu::SHADERTYPE_VERTEX) | (1u << glu::SHADERTYPE_FRAGMENT);
721 }
722 
723 class PipelineProgram
724 {
725 public:
726 								PipelineProgram		(Context& context, const ShaderCaseSpecification& spec);
727 
getStages(void) const728 	deUint32					getStages			(void) const					{ return m_stages;							}
729 
hasShader(glu::ShaderType type) const730 	bool						hasShader			(glu::ShaderType type) const	{ return (m_stages & (1u << type)) != 0;	}
getShader(glu::ShaderType type) const731 	vk::VkShaderModule			getShader			(glu::ShaderType type) const	{ return *m_shaderModules[type];			}
732 
733 private:
734 	const deUint32				m_stages;
735 	Move<vk::VkShaderModule>	m_shaderModules[glu::SHADERTYPE_LAST];
736 };
737 
PipelineProgram(Context & context,const ShaderCaseSpecification & spec)738 PipelineProgram::PipelineProgram (Context& context, const ShaderCaseSpecification& spec)
739 	: m_stages(getShaderStages(spec))
740 {
741 	// \note Currently only a single source program is supported as framework lacks SPIR-V linking capability
742 	TCU_CHECK_INTERNAL(spec.programs.size() == 1);
743 
744 	for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
745 	{
746 		if ((m_stages & (1u << shaderType)) != 0)
747 		{
748 			m_shaderModules[shaderType]	= vk::createShaderModule(context.getDeviceInterface(), context.getDevice(),
749 																 context.getBinaryCollection().get(getShaderName((glu::ShaderType)shaderType, 0)), 0u);
750 		}
751 	}
752 }
753 
createBuffer(Context & context,vk::VkDeviceSize size,vk::VkBufferUsageFlags usageFlags)754 Move<vk::VkBuffer> createBuffer (Context& context, vk::VkDeviceSize size, vk::VkBufferUsageFlags usageFlags)
755 {
756 	const deUint32					queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
757 	const vk::VkBufferCreateInfo	params				=
758 	{
759 		vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,	// sType
760 		DE_NULL,									// pNext
761 		0u,											// flags
762 		size,										// size
763 		usageFlags,									// usage
764 		vk::VK_SHARING_MODE_EXCLUSIVE,				// sharingMode
765 		1u,											// queueFamilyCount
766 		&queueFamilyIndex,							// pQueueFamilyIndices
767 	};
768 
769 	return vk::createBuffer(context.getDeviceInterface(), context.getDevice(), &params);
770 }
771 
createImage2D(Context & context,deUint32 width,deUint32 height,vk::VkFormat format,vk::VkImageTiling tiling,vk::VkImageUsageFlags usageFlags)772 Move<vk::VkImage> createImage2D (Context& context, deUint32 width, deUint32 height, vk::VkFormat format, vk::VkImageTiling tiling, vk::VkImageUsageFlags usageFlags)
773 {
774 	const deUint32					queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
775 	const vk::VkImageCreateInfo		params				=
776 	{
777 		vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// sType
778 		DE_NULL,									// pNext
779 		0u,											// flags
780 		vk::VK_IMAGE_TYPE_2D,						// imageType
781 		format,										// format
782 		{ width, height, 1u },						// extent
783 		1u,											// mipLevels
784 		1u,											// arraySize
785 		vk::VK_SAMPLE_COUNT_1_BIT,					// samples
786 		tiling,										// tiling
787 		usageFlags,									// usage
788 		vk::VK_SHARING_MODE_EXCLUSIVE,				// sharingMode
789 		1u,											// queueFamilyCount
790 		&queueFamilyIndex,							// pQueueFamilyIndices
791 		vk::VK_IMAGE_LAYOUT_UNDEFINED,				// initialLayout
792 	};
793 
794 	return vk::createImage(context.getDeviceInterface(), context.getDevice(), &params);
795 }
796 
createAttachmentView(Context & context,vk::VkImage image,vk::VkFormat format)797 Move<vk::VkImageView> createAttachmentView (Context& context, vk::VkImage image, vk::VkFormat format)
798 {
799 	const vk::VkImageViewCreateInfo	params				=
800 	{
801 		vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,		// sType
802 		DE_NULL,											// pNext
803 		0u,													// flags
804 		image,												// image
805 		vk::VK_IMAGE_VIEW_TYPE_2D,							// viewType
806 		format,												// format
807 		vk::makeComponentMappingRGBA(),						// channels
808 		{
809 			vk::VK_IMAGE_ASPECT_COLOR_BIT,						// aspectMask
810 			0u,													// baseMipLevel
811 			1u,													// mipLevels
812 			0u,													// baseArrayLayer
813 			1u,													// arraySize
814 		},													// subresourceRange
815 	};
816 
817 	return vk::createImageView(context.getDeviceInterface(), context.getDevice(), &params);
818 }
819 
createRenderPass(Context & context,vk::VkFormat colorAttFormat,deUint32 size)820 Move<vk::VkRenderPass> createRenderPass (Context& context, vk::VkFormat colorAttFormat, deUint32 size)
821 {
822 	vk::VkAttachmentDescription	colorAttDesc[4];
823 	vk::VkAttachmentReference	colorAttRef[4];
824 
825 	for (deUint32 i = 0; i < size; i++)
826 	{
827 		vk::VkAttachmentDescription	desc =
828 		{
829 			0u,														// flags
830 			colorAttFormat,											// format
831 			vk::VK_SAMPLE_COUNT_1_BIT,								// samples
832 			vk::VK_ATTACHMENT_LOAD_OP_CLEAR,						// loadOp
833 			vk::VK_ATTACHMENT_STORE_OP_STORE,						// storeOp
834 			vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE,					// stencilLoadOp
835 			vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,					// stencilStoreOp
836 			vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,			// initialLayout
837 			vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,			// finalLayout
838 		};
839 		colorAttDesc[i] = desc;
840 
841 		vk::VkAttachmentReference	ref =
842 		{
843 			i,														// attachment
844 			vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,			// layout
845 		};
846 		colorAttRef[i] = ref;
847 	}
848 
849 	const vk::VkAttachmentReference		dsAttRef			=
850 	{
851 		VK_ATTACHMENT_UNUSED,									// attachment
852 		vk::VK_IMAGE_LAYOUT_GENERAL,							// layout
853 	};
854 	const vk::VkSubpassDescription		subpassDesc			=
855 	{
856 		(vk::VkSubpassDescriptionFlags)0,
857 		vk::VK_PIPELINE_BIND_POINT_GRAPHICS,					// pipelineBindPoint
858 		0u,														// inputCount
859 		DE_NULL,												// pInputAttachments
860 		size,													// colorCount
861 		&colorAttRef[0],										// pColorAttachments
862 		DE_NULL,												// pResolveAttachments
863 		&dsAttRef,												// depthStencilAttachment
864 		0u,														// preserveCount
865 		DE_NULL,												// pPreserveAttachments
866 
867 	};
868 	const vk::VkRenderPassCreateInfo	renderPassParams	=
869 	{
870 		vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,			// sType
871 		DE_NULL,												// pNext
872 		(vk::VkRenderPassCreateFlags)0,
873 		size,													// attachmentCount
874 		&colorAttDesc[0],										// pAttachments
875 		1u,														// subpassCount
876 		&subpassDesc,											// pSubpasses
877 		0u,														// dependencyCount
878 		DE_NULL,												// pDependencies
879 	};
880 
881 	return vk::createRenderPass(context.getDeviceInterface(), context.getDevice(), &renderPassParams);
882 }
883 
getVkStageFlags(deUint32 stages)884 vk::VkShaderStageFlags getVkStageFlags (deUint32 stages)
885 {
886 	vk::VkShaderStageFlags	vkStages	= 0u;
887 
888 	for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
889 	{
890 		if ((stages & (1u << shaderType)) != 0)
891 			vkStages |= vk::getVkShaderStage((glu::ShaderType)shaderType);
892 	}
893 
894 	return vkStages;
895 }
896 
createDescriptorSetLayout(Context & context,deUint32 shaderStages)897 Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (Context& context, deUint32 shaderStages)
898 {
899 	DE_STATIC_ASSERT(REFERENCE_UNIFORM_BINDING	== 0);
900 	DE_STATIC_ASSERT(USER_UNIFORM_BINDING		== 1);
901 
902 	return vk::DescriptorSetLayoutBuilder()
903 				.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_FRAGMENT_BIT)
904 				.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, getVkStageFlags(shaderStages))
905 				.build(context.getDeviceInterface(), context.getDevice());
906 }
907 
createPipelineLayout(Context & context,vk::VkDescriptorSetLayout descriptorSetLayout)908 Move<vk::VkPipelineLayout> createPipelineLayout (Context& context, vk::VkDescriptorSetLayout descriptorSetLayout)
909 {
910 	const vk::VkPipelineLayoutCreateInfo	params	=
911 	{
912 		vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,	// sType
913 		DE_NULL,											// pNext
914 		(vk::VkPipelineLayoutCreateFlags)0,
915 		1u,													// descriptorSetCount
916 		&descriptorSetLayout,								// pSetLayouts
917 		0u,													// pushConstantRangeCount
918 		DE_NULL,											// pPushConstantRanges
919 	};
920 
921 	return vk::createPipelineLayout(context.getDeviceInterface(), context.getDevice(), &params);
922 }
923 
getVecFormat(DataType scalarType,int scalarSize)924 vk::VkFormat getVecFormat (DataType scalarType, int scalarSize)
925 {
926 	switch (scalarType)
927 	{
928 		case glu::TYPE_FLOAT:
929 		{
930 			const vk::VkFormat vecFmts[] =
931 			{
932 				vk::VK_FORMAT_R32_SFLOAT,
933 				vk::VK_FORMAT_R32G32_SFLOAT,
934 				vk::VK_FORMAT_R32G32B32_SFLOAT,
935 				vk::VK_FORMAT_R32G32B32A32_SFLOAT,
936 			};
937 			return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
938 		}
939 
940 		case glu::TYPE_INT:
941 		{
942 			const vk::VkFormat vecFmts[] =
943 			{
944 				vk::VK_FORMAT_R32_SINT,
945 				vk::VK_FORMAT_R32G32_SINT,
946 				vk::VK_FORMAT_R32G32B32_SINT,
947 				vk::VK_FORMAT_R32G32B32A32_SINT,
948 			};
949 			return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
950 		}
951 
952 		case glu::TYPE_UINT:
953 		{
954 			const vk::VkFormat vecFmts[] =
955 			{
956 				vk::VK_FORMAT_R32_UINT,
957 				vk::VK_FORMAT_R32G32_UINT,
958 				vk::VK_FORMAT_R32G32B32_UINT,
959 				vk::VK_FORMAT_R32G32B32A32_UINT,
960 			};
961 			return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
962 		}
963 
964 		case glu::TYPE_BOOL:
965 		{
966 			const vk::VkFormat vecFmts[] =
967 			{
968 				vk::VK_FORMAT_R32_UINT,
969 				vk::VK_FORMAT_R32G32_UINT,
970 				vk::VK_FORMAT_R32G32B32_UINT,
971 				vk::VK_FORMAT_R32G32B32A32_UINT,
972 			};
973 			return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
974 		}
975 
976 		default:
977 			DE_FATAL("Unknown scalar type");
978 			return vk::VK_FORMAT_R8G8B8A8_UINT;
979 	}
980 }
981 
getVertexAttributeDescriptions(const vector<Value> & inputValues,const ValueBufferLayout & layout)982 vector<vk::VkVertexInputAttributeDescription> getVertexAttributeDescriptions (const vector<Value>& inputValues, const ValueBufferLayout& layout)
983 {
984 	vector<vk::VkVertexInputAttributeDescription>	attribs;
985 
986 	// Position
987 	{
988 		const vk::VkVertexInputAttributeDescription	posDesc	=
989 		{
990 			0u,								// location
991 			0u,								// binding
992 			vk::VK_FORMAT_R32G32_SFLOAT,	// format
993 			0u,								// offset
994 		};
995 
996 		attribs.push_back(posDesc);
997 	}
998 
999 	// Input values
1000 	for (size_t inputNdx = 0; inputNdx < inputValues.size(); inputNdx++)
1001 	{
1002 		const Value&					input		= inputValues[inputNdx];
1003 		const ValueBufferLayout::Entry&	layoutEntry	= layout.entries[inputNdx];
1004 		const DataType					basicType	= input.type.getBasicType();
1005 		const int						numVecs		= isDataTypeMatrix(basicType)
1006 													? getDataTypeMatrixNumColumns(basicType)
1007 													: 1;
1008 		const int						vecSize		= isDataTypeMatrix(basicType)
1009 													? getDataTypeMatrixNumRows(basicType)
1010 													: getDataTypeScalarSize(basicType);
1011 		const DataType					scalarType	= getDataTypeScalarType(basicType);
1012 		const vk::VkFormat				vecFmt		= getVecFormat(scalarType, vecSize);
1013 
1014 		for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
1015 		{
1016 			const deUint32								curLoc	= (deUint32)attribs.size();
1017 			const deUint32								offset	= (deUint32)(layoutEntry.offset + layoutEntry.vecStride*vecNdx);
1018 			const vk::VkVertexInputAttributeDescription	desc	=
1019 			{
1020 				curLoc,		// location
1021 				1u,			// binding
1022 				vecFmt,		// format
1023 				offset,		// offset
1024 			};
1025 
1026 			attribs.push_back(desc);
1027 		}
1028 	}
1029 
1030 	return attribs;
1031 }
1032 
createPipeline(Context & context,const vector<Value> & inputValues,const ValueBufferLayout & inputLayout,const PipelineProgram & program,vk::VkRenderPass renderPass,vk::VkPipelineLayout pipelineLayout,tcu::UVec2 renderSize,deUint32 size)1033 Move<vk::VkPipeline> createPipeline (Context&					context,
1034 									 const vector<Value>&		inputValues,
1035 									 const ValueBufferLayout&	inputLayout,
1036 									 const PipelineProgram&		program,
1037 									 vk::VkRenderPass			renderPass,
1038 									 vk::VkPipelineLayout		pipelineLayout,
1039 									 tcu::UVec2					renderSize,
1040 									 deUint32					size)
1041 {
1042 	const vk::VkShaderModule							vertShader				= program.hasShader(glu::SHADERTYPE_VERTEX) ? program.getShader(glu::SHADERTYPE_VERTEX) : DE_NULL;
1043 	const vk::VkShaderModule							tessControlShader		= program.hasShader(glu::SHADERTYPE_TESSELLATION_CONTROL) ? program.getShader(glu::SHADERTYPE_TESSELLATION_CONTROL) : DE_NULL;
1044 	const vk::VkShaderModule							tessEvalShader			= program.hasShader(glu::SHADERTYPE_TESSELLATION_EVALUATION) ? program.getShader(glu::SHADERTYPE_TESSELLATION_EVALUATION) : DE_NULL;
1045 	const vk::VkShaderModule							geomShader				= program.hasShader(glu::SHADERTYPE_GEOMETRY) ? program.getShader(glu::SHADERTYPE_GEOMETRY) : DE_NULL;
1046 	const vk::VkShaderModule							fragShader				= program.hasShader(glu::SHADERTYPE_FRAGMENT) ? program.getShader(glu::SHADERTYPE_FRAGMENT) : DE_NULL;
1047 	const vector<vk::VkVertexInputAttributeDescription>	vertexAttribParams		(getVertexAttributeDescriptions(inputValues, inputLayout));
1048 	const vector<vk::VkViewport>						viewports				(1, vk::makeViewport(renderSize));
1049 	const vector<vk::VkRect2D>							scissors				(1, vk::makeRect2D(renderSize));
1050 	const vk::VkVertexInputBindingDescription			vertexBindings[]		=
1051 	{
1052 		{
1053 			0u,																	// binding
1054 			(deUint32)sizeof(tcu::Vec2),										// stride
1055 			vk::VK_VERTEX_INPUT_RATE_VERTEX,									// stepRate
1056 		},
1057 		{
1058 			1u,																	// binding
1059 			0u,																	// stride
1060 			vk::VK_VERTEX_INPUT_RATE_INSTANCE,									// stepRate
1061 		},
1062 	};
1063 	const vk::VkPipelineVertexInputStateCreateInfo		vertexInputStateParams	=
1064 	{
1065 		vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,		// sType
1066 		DE_NULL,															// pNext
1067 		(vk::VkPipelineVertexInputStateCreateFlags)0,
1068 		(inputValues.empty() ? 1u : 2u),									// bindingCount
1069 		vertexBindings,														// pVertexBindingDescriptions
1070 		(deUint32)vertexAttribParams.size(),								// attributeCount
1071 		&vertexAttribParams[0],												// pVertexAttributeDescriptions
1072 	};
1073 	const vk::VkColorComponentFlags						allCompMask				= vk::VK_COLOR_COMPONENT_R_BIT
1074 																				| vk::VK_COLOR_COMPONENT_G_BIT
1075 																				| vk::VK_COLOR_COMPONENT_B_BIT
1076 																				| vk::VK_COLOR_COMPONENT_A_BIT;
1077 	vk::VkPipelineColorBlendAttachmentState				attBlendParams[4];
1078 	for (deUint32 i = 0; i < size; i++)
1079 	{
1080 		vk::VkPipelineColorBlendAttachmentState blend =
1081 		{
1082 			VK_FALSE,															// blendEnable
1083 			vk::VK_BLEND_FACTOR_ONE,											// srcBlendColor
1084 			vk::VK_BLEND_FACTOR_ZERO,											// destBlendColor
1085 			vk::VK_BLEND_OP_ADD,												// blendOpColor
1086 			vk::VK_BLEND_FACTOR_ONE,											// srcBlendAlpha
1087 			vk::VK_BLEND_FACTOR_ZERO,											// destBlendAlpha
1088 			vk::VK_BLEND_OP_ADD,												// blendOpAlpha
1089 			allCompMask,														// componentWriteMask
1090 		};
1091 		attBlendParams[i] = blend;
1092 	}
1093 
1094 	const vk::VkPipelineColorBlendStateCreateInfo		blendParams				=
1095 	{
1096 		vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,		// sType
1097 		DE_NULL,															// pNext
1098 		(vk::VkPipelineColorBlendStateCreateFlags)0,
1099 		VK_FALSE,															// logicOpEnable
1100 		vk::VK_LOGIC_OP_COPY,												// logicOp
1101 		size,																// attachmentCount
1102 		&attBlendParams[0],													// pAttachments
1103 		{ 0.0f, 0.0f, 0.0f, 0.0f },											// blendConstants
1104 	};
1105 
1106 	return vk::makeGraphicsPipeline(context.getDeviceInterface(),				// const DeviceInterface&                        vk
1107 									context.getDevice(),						// const VkDevice                                device
1108 									pipelineLayout,								// const VkPipelineLayout                        pipelineLayout
1109 									vertShader,									// const VkShaderModule                          vertexShaderModule
1110 									tessControlShader,							// const VkShaderModule                          tessellationControlShaderModule
1111 									tessEvalShader,								// const VkShaderModule                          tessellationEvalShaderModule
1112 									geomShader,									// const VkShaderModule                          geometryShaderModule
1113 									fragShader,									// const VkShaderModule                          fragmentShaderModule
1114 									renderPass,									// const VkRenderPass                            renderPass
1115 									viewports,									// const std::vector<VkViewport>&                viewports
1116 									scissors,									// const std::vector<VkRect2D>&                  scissors
1117 									vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,	// const VkPrimitiveTopology                     topology
1118 									0u,											// const deUint32                                subpass
1119 									0u,											// const deUint32                                patchControlPoints
1120 									&vertexInputStateParams,					// const VkPipelineVertexInputStateCreateInfo*   vertexInputStateCreateInfo
1121 									DE_NULL,									// const VkPipelineRasterizationStateCreateInfo* rasterizationStateCreateInfo
1122 									DE_NULL,									// const VkPipelineMultisampleStateCreateInfo*   multisampleStateCreateInfo
1123 									DE_NULL,									// const VkPipelineDepthStencilStateCreateInfo*  depthStencilStateCreateInfo
1124 									&blendParams);								// const VkPipelineColorBlendStateCreateInfo*    colorBlendStateCreateInfo
1125 }
1126 
createFramebuffer(Context & context,vk::VkRenderPass renderPass,Move<vk::VkImageView> colorAttView[4],deUint32 size,int width,int height)1127 Move<vk::VkFramebuffer> createFramebuffer (Context& context, vk::VkRenderPass renderPass, Move<vk::VkImageView> colorAttView[4], deUint32 size, int width, int height)
1128 {
1129 	vk::VkImageView att[4];
1130 	for (deUint32 i = 0; i < size; i++)
1131 	{
1132 		att[i] = *colorAttView[i];
1133 	}
1134 	const vk::VkFramebufferCreateInfo	framebufferParams	=
1135 	{
1136 		vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,	// sType
1137 		DE_NULL,										// pNext
1138 		(vk::VkFramebufferCreateFlags)0,
1139 		renderPass,										// renderPass
1140 		size,											// attachmentCount
1141 		&att[0],										// pAttachments
1142 		(deUint32)width,								// width
1143 		(deUint32)height,								// height
1144 		1u,												// layers
1145 	};
1146 
1147 	return vk::createFramebuffer(context.getDeviceInterface(), context.getDevice(), &framebufferParams);
1148 }
1149 
createCommandPool(Context & context)1150 Move<vk::VkCommandPool> createCommandPool (Context& context)
1151 {
1152 	const deUint32						queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
1153 
1154 	return vk::createCommandPool(context.getDeviceInterface(), context.getDevice(), (vk::VkCommandPoolCreateFlags)0u, queueFamilyIndex);
1155 }
1156 
createDescriptorPool(Context & context)1157 Move<vk::VkDescriptorPool> createDescriptorPool (Context& context)
1158 {
1159 	return vk::DescriptorPoolBuilder()
1160 				.addType(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2u)
1161 				.build(context.getDeviceInterface(), context.getDevice(), vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1162 }
1163 
allocateDescriptorSet(Context & context,vk::VkDescriptorPool descriptorPool,vk::VkDescriptorSetLayout setLayout)1164 Move<vk::VkDescriptorSet> allocateDescriptorSet (Context& context, vk::VkDescriptorPool descriptorPool, vk::VkDescriptorSetLayout setLayout)
1165 {
1166 	const vk::VkDescriptorSetAllocateInfo	params	=
1167 	{
1168 		vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1169 		DE_NULL,
1170 		descriptorPool,
1171 		1u,
1172 		&setLayout
1173 	};
1174 
1175 	return vk::allocateDescriptorSet(context.getDeviceInterface(), context.getDevice(), &params);
1176 }
1177 
allocateCommandBuffer(Context & context,vk::VkCommandPool cmdPool)1178 Move<vk::VkCommandBuffer> allocateCommandBuffer (Context& context, vk::VkCommandPool cmdPool)
1179 {
1180 	return vk::allocateCommandBuffer(context.getDeviceInterface(), context.getDevice(), cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1181 }
1182 
allocateAndBindMemory(Context & context,vk::VkBuffer buffer,vk::MemoryRequirement memReqs)1183 MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkBuffer buffer, vk::MemoryRequirement memReqs)
1184 {
1185 	const vk::DeviceInterface&		vkd		= context.getDeviceInterface();
1186 	const vk::VkMemoryRequirements	bufReqs	= vk::getBufferMemoryRequirements(vkd, context.getDevice(), buffer);
1187 	MovePtr<vk::Allocation>			memory	= context.getDefaultAllocator().allocate(bufReqs, memReqs);
1188 
1189 	vkd.bindBufferMemory(context.getDevice(), buffer, memory->getMemory(), memory->getOffset());
1190 
1191 	return memory;
1192 }
1193 
getRenderTargetFormat(DataType dataType)1194 vk::VkFormat getRenderTargetFormat (DataType dataType)
1195 {
1196 	switch (dataType)
1197 	{
1198 		case glu::TYPE_FLOAT_VEC2:
1199 			return vk::VK_FORMAT_R8G8_UNORM;
1200 		case glu::TYPE_FLOAT_VEC3:
1201 			return vk::VK_FORMAT_R5G6B5_UNORM_PACK16;
1202 		case glu::TYPE_FLOAT_VEC4:
1203 			return vk::VK_FORMAT_R8G8B8A8_UNORM;
1204 		case glu::TYPE_INT_VEC2:
1205 			return vk::VK_FORMAT_R8G8_SINT;
1206 		case glu::TYPE_INT_VEC4:
1207 			return vk::VK_FORMAT_R8G8B8A8_SINT;
1208 		default:
1209 			return vk::VK_FORMAT_R8G8B8A8_UNORM;
1210 	}
1211 }
1212 
allocateAndBindMemory(Context & context,vk::VkImage image,vk::MemoryRequirement memReqs)1213 MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkImage image, vk::MemoryRequirement memReqs)
1214 {
1215 	const vk::DeviceInterface&		vkd		= context.getDeviceInterface();
1216 	const vk::VkMemoryRequirements	imgReqs	= vk::getImageMemoryRequirements(vkd, context.getDevice(), image);
1217 	MovePtr<vk::Allocation>			memory	= context.getDefaultAllocator().allocate(imgReqs, memReqs);
1218 
1219 	vkd.bindImageMemory(context.getDevice(), image, memory->getMemory(), memory->getOffset());
1220 
1221 	return memory;
1222 }
1223 
writeValuesToMem(Context & context,const vk::Allocation & dst,const ValueBufferLayout & layout,const vector<Value> & values,int arrayNdx)1224 void writeValuesToMem (Context& context, const vk::Allocation& dst, const ValueBufferLayout& layout, const vector<Value>& values, int arrayNdx)
1225 {
1226 	copyToLayout(dst.getHostPtr(), layout, values, arrayNdx);
1227 
1228 	// \note Buffers are not allocated with coherency / uncached requirement so we need to manually flush CPU write caches
1229 	flushAlloc(context.getDeviceInterface(), context.getDevice(), dst);
1230 }
1231 
1232 class ShaderCaseInstance : public TestInstance
1233 {
1234 public:
1235 													ShaderCaseInstance              (Context& context, const ShaderCaseSpecification& spec);
1236 													~ShaderCaseInstance		(void);
1237 
1238 	TestStatus										iterate					(void);
1239 
1240 private:
1241 	enum
1242 	{
1243 		RENDER_WIDTH		= 64,
1244 		RENDER_HEIGHT		= 64,
1245 
1246 		POSITIONS_OFFSET	= 0,
1247 		POSITIONS_SIZE		= (int)sizeof(Vec2)*4,
1248 
1249 		INDICES_OFFSET		= POSITIONS_SIZE,
1250 		INDICES_SIZE		= (int)sizeof(deUint16)*6,
1251 
1252 		TOTAL_POS_NDX_SIZE	= POSITIONS_SIZE+INDICES_SIZE
1253 	};
1254 
1255 	const ShaderCaseSpecification&					m_spec;
1256 
1257 	const Unique<vk::VkBuffer>						m_posNdxBuffer;
1258 	const UniquePtr<vk::Allocation>					m_posNdxMem;
1259 
1260 	const ValueBufferLayout							m_inputLayout;
1261 	const Unique<vk::VkBuffer>						m_inputBuffer;			// Input values (attributes). Can be NULL if no inputs present
1262 	const UniquePtr<vk::Allocation>					m_inputMem;				// Input memory, can be NULL if no input buffer exists
1263 
1264 	const ValueBufferLayout							m_referenceLayout;
1265 	const Unique<vk::VkBuffer>						m_referenceBuffer;		// Output (reference) values. Can be NULL if no outputs present
1266 	const UniquePtr<vk::Allocation>					m_referenceMem;			// Output (reference) memory, can be NULL if no reference buffer exists
1267 
1268 	const ValueBufferLayout							m_uniformLayout;
1269 	const Unique<vk::VkBuffer>						m_uniformBuffer;		// Uniform values. Can be NULL if no uniforms present
1270 	const UniquePtr<vk::Allocation>					m_uniformMem;			// Uniform memory, can be NULL if no uniform buffer exists
1271 
1272 	const vk::VkFormat								m_rtFormat;
1273 	deUint32										m_outputCount;
1274 	Move<vk::VkImage>								m_rtImage [4];
1275 	MovePtr<vk::Allocation>							m_rtMem[4];
1276 	Move<vk::VkImageView>							m_rtView[4];
1277 
1278 	Move<vk::VkBuffer>								m_readImageBuffer[4];
1279 	MovePtr<vk::Allocation>							m_readImageMem[4];
1280 
1281 	const Unique<vk::VkRenderPass>					m_renderPass;
1282 	Move<vk::VkFramebuffer>							m_framebuffer;
1283 	const PipelineProgram							m_program;
1284 	const Unique<vk::VkDescriptorSetLayout>			m_descriptorSetLayout;
1285 	const Unique<vk::VkPipelineLayout>				m_pipelineLayout;
1286 	const Unique<vk::VkPipeline>					m_pipeline;
1287 
1288 	const Unique<vk::VkDescriptorPool>				m_descriptorPool;
1289 	const Unique<vk::VkDescriptorSet>				m_descriptorSet;
1290 
1291 	const Unique<vk::VkCommandPool>					m_cmdPool;
1292 	const Unique<vk::VkCommandBuffer>				m_cmdBuffer;
1293 
1294 	int												m_subCaseNdx;
1295 };
1296 
ShaderCaseInstance(Context & context,const ShaderCaseSpecification & spec)1297 ShaderCaseInstance::ShaderCaseInstance (Context& context, const ShaderCaseSpecification& spec)
1298 	: TestInstance			(context)
1299 	, m_spec				(spec)
1300 
1301 	, m_posNdxBuffer		(createBuffer(context, (vk::VkDeviceSize)TOTAL_POS_NDX_SIZE, vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT|vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT))
1302 	, m_posNdxMem			(allocateAndBindMemory(context, *m_posNdxBuffer, vk::MemoryRequirement::HostVisible))
1303 
1304 	, m_inputLayout			(computeStd430Layout(spec.values.inputs))
1305 	, m_inputBuffer			(m_inputLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_inputLayout.size, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) : Move<vk::VkBuffer>())
1306 	, m_inputMem			(m_inputLayout.size > 0 ? allocateAndBindMemory(context, *m_inputBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
1307 
1308 	, m_referenceLayout		(computeStd140Layout(spec.values.outputs))
1309 	, m_referenceBuffer		(m_referenceLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_referenceLayout.size, vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : Move<vk::VkBuffer>())
1310 	, m_referenceMem		(m_referenceLayout.size > 0 ? allocateAndBindMemory(context, *m_referenceBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
1311 
1312 	, m_uniformLayout		(computeStd140Layout(spec.values.uniforms))
1313 	, m_uniformBuffer		(m_uniformLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_uniformLayout.size, vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : Move<vk::VkBuffer>())
1314 	, m_uniformMem			(m_uniformLayout.size > 0 ? allocateAndBindMemory(context, *m_uniformBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
1315 
1316 	, m_rtFormat			(getRenderTargetFormat(spec.outputFormat))
1317 	, m_outputCount			(((deUint32)m_spec.values.outputs.size() == 0 || m_spec.outputType == glu::sl::OUTPUT_RESULT) ? 1 : (deUint32)m_spec.values.outputs.size())
1318 	, m_rtImage				()
1319 	, m_rtMem				()
1320 	, m_rtView				()
1321 
1322 	, m_readImageBuffer		()
1323 	, m_readImageMem		()
1324 
1325 	, m_renderPass			(createRenderPass(context, m_rtFormat, m_outputCount))
1326 	, m_framebuffer			()
1327 	, m_program				(context, spec)
1328 	, m_descriptorSetLayout	(createDescriptorSetLayout(context, m_program.getStages()))
1329 	, m_pipelineLayout		(createPipelineLayout(context, *m_descriptorSetLayout))
1330 	, m_pipeline			(createPipeline(context, spec.values.inputs, m_inputLayout, m_program, *m_renderPass, *m_pipelineLayout, tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT), m_outputCount))
1331 
1332 	, m_descriptorPool		(createDescriptorPool(context))
1333 	, m_descriptorSet		(allocateDescriptorSet(context, *m_descriptorPool, *m_descriptorSetLayout))
1334 
1335 	, m_cmdPool				(createCommandPool(context))
1336 	, m_cmdBuffer			(allocateCommandBuffer(context, *m_cmdPool))
1337 
1338 	, m_subCaseNdx			(0)
1339 {
1340 	{
1341 		// Initialize the resources for each color attachment needed by the shader
1342 		for (deUint32 outNdx = 0; outNdx < m_outputCount; outNdx++)
1343 		{
1344 			m_rtImage[outNdx] = createImage2D(context, RENDER_WIDTH, RENDER_HEIGHT, m_rtFormat, vk::VK_IMAGE_TILING_OPTIMAL, vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|   vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
1345 			m_rtMem[outNdx] = allocateAndBindMemory(context, *m_rtImage[outNdx], vk::MemoryRequirement::Any);
1346 			m_rtView[outNdx] = createAttachmentView(context, *m_rtImage[outNdx], m_rtFormat);
1347 
1348 			m_readImageBuffer[outNdx] = createBuffer(context, (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * tcu::getPixelSize(vk::mapVkFormat(m_rtFormat))), vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT);
1349 			m_readImageMem[outNdx] = allocateAndBindMemory(context, *m_readImageBuffer[outNdx], vk::MemoryRequirement::HostVisible);
1350 		}
1351 		m_framebuffer = createFramebuffer(context, *m_renderPass, m_rtView, m_outputCount, RENDER_WIDTH, RENDER_HEIGHT);
1352 	}
1353 
1354 	const vk::DeviceInterface&	vkd					= context.getDeviceInterface();
1355 	const deUint32				queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
1356 
1357 	{
1358 		const Vec2			s_positions[]	=
1359 		{
1360 			Vec2(-1.0f, -1.0f),
1361 			Vec2(-1.0f, +1.0f),
1362 			Vec2(+1.0f, -1.0f),
1363 			Vec2(+1.0f, +1.0f)
1364 		};
1365 		const deUint16		s_indices[]		=
1366 		{
1367 			0, 1, 2,
1368 			1, 3, 2
1369 		};
1370 
1371 		DE_STATIC_ASSERT(sizeof(s_positions) == POSITIONS_SIZE);
1372 		DE_STATIC_ASSERT(sizeof(s_indices) == INDICES_SIZE);
1373 
1374 		deMemcpy((deUint8*)m_posNdxMem->getHostPtr() + POSITIONS_OFFSET,	&s_positions[0],	sizeof(s_positions));
1375 		deMemcpy((deUint8*)m_posNdxMem->getHostPtr() + INDICES_OFFSET,		&s_indices[0],		sizeof(s_indices));
1376 
1377 		flushAlloc(m_context.getDeviceInterface(), context.getDevice(), *m_posNdxMem);
1378 	}
1379 
1380 	if (!m_spec.values.uniforms.empty())
1381 	{
1382 		const vk::VkDescriptorBufferInfo	bufInfo	=
1383 		{
1384 			*m_uniformBuffer,
1385 			(vk::VkDeviceSize)0,	// offset
1386 			(vk::VkDeviceSize)m_uniformLayout.size
1387 		};
1388 
1389 		vk::DescriptorSetUpdateBuilder()
1390 			.writeSingle(*m_descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(USER_UNIFORM_BINDING),
1391 						 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &bufInfo)
1392 			.update(vkd, m_context.getDevice());
1393 	}
1394 
1395 	if (!m_spec.values.outputs.empty())
1396 	{
1397 		const vk::VkDescriptorBufferInfo	bufInfo	=
1398 		{
1399 			*m_referenceBuffer,
1400 			(vk::VkDeviceSize)0,	// offset
1401 			(vk::VkDeviceSize)m_referenceLayout.size
1402 		};
1403 
1404 		vk::DescriptorSetUpdateBuilder()
1405 			.writeSingle(*m_descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(REFERENCE_UNIFORM_BINDING),
1406 						 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &bufInfo)
1407 			.update(vkd, m_context.getDevice());
1408 	}
1409 
1410 	// Record command buffer
1411 
1412 	beginCommandBuffer(vkd, *m_cmdBuffer, 0u);
1413 
1414 	{
1415 		const vk::VkMemoryBarrier		vertFlushBarrier	=
1416 		{
1417 			vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER,													// sType
1418 			DE_NULL,																				// pNext
1419 			vk::VK_ACCESS_HOST_WRITE_BIT,															// srcAccessMask
1420 			vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT|vk::VK_ACCESS_UNIFORM_READ_BIT,					// dstAccessMask
1421 		};
1422 		vk::VkImageMemoryBarrier	colorAttBarrier	[4];
1423 		for (deUint32 outNdx = 0; outNdx < m_outputCount; outNdx++)
1424 		{
1425 			vk::VkImageMemoryBarrier barrier =
1426 			{
1427 				vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,		// sType
1428 				DE_NULL,										// pNext
1429 				0u,												// srcAccessMask
1430 				vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,		// dstAccessMask
1431 				vk::VK_IMAGE_LAYOUT_UNDEFINED,					// oldLayout
1432 				vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,	// newLayout
1433 				queueFamilyIndex,								// srcQueueFamilyIndex
1434 				queueFamilyIndex,								// destQueueFamilyIndex
1435 				*m_rtImage[outNdx],								// image
1436 				{
1437 					vk::VK_IMAGE_ASPECT_COLOR_BIT,				// aspectMask
1438 					0u,											// baseMipLevel
1439 					1u,											// mipLevels
1440 					0u,											// baseArraySlice
1441 					1u,											// arraySize
1442 				}												// subresourceRange
1443 			};
1444 			colorAttBarrier[outNdx]	= barrier;
1445 		}
1446 		vkd.cmdPipelineBarrier(*m_cmdBuffer, vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (vk::VkDependencyFlags)0,
1447 							   1, &vertFlushBarrier,
1448 							   0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1449 							   m_outputCount, &colorAttBarrier[0]);
1450 	}
1451 
1452 	{
1453 		vk::VkClearValue			clearValue[4];
1454 		for (deUint32 outNdx = 0; outNdx < m_outputCount; outNdx++)
1455 		{
1456 			vk::VkClearValue value = vk::makeClearValueColorF32(0.125f, 0.25f, 0.75f, 1.0f);
1457 			clearValue[outNdx] = value;
1458 		}
1459 		beginRenderPass(vkd, *m_cmdBuffer, *m_renderPass, *m_framebuffer, vk::makeRect2D(0, 0, RENDER_WIDTH, RENDER_HEIGHT), m_outputCount, clearValue);
1460 	}
1461 
1462 	vkd.cmdBindPipeline(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
1463 
1464 	if (!m_spec.values.uniforms.empty() || !m_spec.values.outputs.empty())
1465 		vkd.cmdBindDescriptorSets(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, 1u, &*m_descriptorSet, 0u, DE_NULL);
1466 
1467 	{
1468 		const vk::VkBuffer		buffers[]	= { *m_posNdxBuffer, *m_inputBuffer };
1469 		const vk::VkDeviceSize	offsets[]	= { POSITIONS_OFFSET, 0u };
1470 		const deUint32			numBuffers	= buffers[1] != 0 ? 2u : 1u;
1471 		vkd.cmdBindVertexBuffers(*m_cmdBuffer, 0u, numBuffers, buffers, offsets);
1472 	}
1473 
1474 	vkd.cmdBindIndexBuffer	(*m_cmdBuffer, *m_posNdxBuffer, (vk::VkDeviceSize)INDICES_OFFSET, vk::VK_INDEX_TYPE_UINT16);
1475 	vkd.cmdDrawIndexed		(*m_cmdBuffer, 6u, 1u, 0u, 0u, 0u);
1476 	endRenderPass			(vkd, *m_cmdBuffer);
1477 
1478 	{
1479 		vk::VkImageMemoryBarrier	renderFinishBarrier[4];
1480 		for (deUint32 outNdx = 0; outNdx < m_outputCount; outNdx++)
1481 		{
1482 			vk::VkImageMemoryBarrier	barrier =
1483 			{
1484 				vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,		// sType
1485 				DE_NULL,										// pNext
1486 				vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,		// srcAccessMask
1487 				vk::VK_ACCESS_TRANSFER_READ_BIT,				// dstAccessMask
1488 				vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,	// oldLayout
1489 				vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,		// newLayout
1490 				queueFamilyIndex,								// srcQueueFamilyIndex
1491 				queueFamilyIndex,								// destQueueFamilyIndex
1492 				*m_rtImage[outNdx],								// image
1493 				{
1494 					vk::VK_IMAGE_ASPECT_COLOR_BIT,				// aspectMask
1495 					0u,											// baseMipLevel
1496 					1u,											// mipLevels
1497 					0u,											// baseArraySlice
1498 					1u,											// arraySize
1499 				}												// subresourceRange
1500 			};
1501 			renderFinishBarrier[outNdx] = barrier;
1502         }
1503 
1504 		vkd.cmdPipelineBarrier(*m_cmdBuffer, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0,
1505 							   0, (const vk::VkMemoryBarrier*)DE_NULL,
1506 							   0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1507 							   m_outputCount, &renderFinishBarrier[0]);
1508 	}
1509 
1510 	{
1511 		for (deUint32 outNdx = 0; outNdx < m_outputCount; outNdx++)
1512 		{
1513 			const vk::VkBufferImageCopy	copyParams	=
1514 			{
1515 				(vk::VkDeviceSize)0u,					// bufferOffset
1516 				(deUint32)RENDER_WIDTH,					// bufferRowLength
1517 				(deUint32)RENDER_HEIGHT,				// bufferImageHeight
1518 				{
1519 					vk::VK_IMAGE_ASPECT_COLOR_BIT,			// aspect
1520 					0u,										// mipLevel
1521 					0u,										// arrayLayer
1522 					1u,										// arraySize
1523 				},										// imageSubresource
1524 				{ 0u, 0u, 0u },							// imageOffset
1525 				{ RENDER_WIDTH, RENDER_HEIGHT, 1u }		// imageExtent
1526 			};
1527 
1528 			vkd.cmdCopyImageToBuffer(*m_cmdBuffer, *m_rtImage[outNdx], vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *m_readImageBuffer[outNdx], 1u, &copyParams);
1529 		}
1530 	}
1531 
1532 	{
1533 		const vk::VkDeviceSize			size				= (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * tcu::getPixelSize(vk::mapVkFormat(m_rtFormat)));
1534 		vk::VkBufferMemoryBarrier	copyFinishBarrier[4];
1535 		for (deUint32 outNdx = 0; outNdx < m_outputCount; outNdx++)
1536 		{
1537 			vk::VkBufferMemoryBarrier barrier =
1538 			{
1539 				vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,		// sType
1540 				DE_NULL,											// pNext
1541 				vk::VK_ACCESS_TRANSFER_WRITE_BIT,					// srcAccessMask
1542 				vk::VK_ACCESS_HOST_READ_BIT,						// dstAccessMask
1543 				queueFamilyIndex,									// srcQueueFamilyIndex
1544 				queueFamilyIndex,									// destQueueFamilyIndex
1545 				*m_readImageBuffer[outNdx],									// buffer
1546 				0u,													// offset
1547 				size												// size
1548 			};
1549 			copyFinishBarrier[outNdx] = barrier;
1550 		}
1551 		vkd.cmdPipelineBarrier(*m_cmdBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0,
1552 							   0, (const vk::VkMemoryBarrier*)DE_NULL,
1553 							   m_outputCount, &copyFinishBarrier[0],
1554 							   0, (const vk::VkImageMemoryBarrier*)DE_NULL);
1555 	}
1556 
1557 	endCommandBuffer(vkd, *m_cmdBuffer);
1558 }
1559 
~ShaderCaseInstance(void)1560 ShaderCaseInstance::~ShaderCaseInstance (void)
1561 {
1562 }
1563 
getNumSubCases(const ValueBlock & values)1564 int getNumSubCases (const ValueBlock& values)
1565 {
1566 	if (!values.outputs.empty())
1567 		return int(values.outputs[0].elements.size() / values.outputs[0].type.getScalarSize());
1568 	else
1569 		return 1; // Always run at least one iteration even if no output values are specified
1570 }
1571 
checkResultImage(const ConstPixelBufferAccess & result)1572 bool checkResultImage (const ConstPixelBufferAccess& result)
1573 {
1574 	const tcu::IVec4	refPix	(255, 255, 255, 255);
1575 
1576 	for (int y = 0; y < result.getHeight(); y++)
1577 	{
1578 		for (int x = 0; x < result.getWidth(); x++)
1579 		{
1580 			const tcu::IVec4	resPix	= result.getPixelInt(x, y);
1581 
1582 			if (boolAny(notEqual(resPix, refPix)))
1583 				return false;
1584 		}
1585 	}
1586 
1587 	return true;
1588 }
1589 
checkResultImageWithReference(const ConstPixelBufferAccess & result,tcu::IVec4 refPix)1590 bool checkResultImageWithReference (const ConstPixelBufferAccess& result, tcu::IVec4 refPix)
1591 {
1592 	for (int y = 0; y < result.getHeight(); y++)
1593 	{
1594 		for (int x = 0; x < result.getWidth(); x++)
1595 		{
1596 			const tcu::IVec4	resPix	= result.getPixelInt(x, y);
1597 
1598 			if (boolAny(notEqual(resPix, refPix)))
1599 				return false;
1600 		}
1601 	}
1602 
1603 	return true;
1604 }
iterate(void)1605 TestStatus ShaderCaseInstance::iterate (void)
1606 {
1607 	const vk::DeviceInterface&	vkd		= m_context.getDeviceInterface();
1608 	const vk::VkDevice			device	= m_context.getDevice();
1609 	const vk::VkQueue			queue	= m_context.getUniversalQueue();
1610 
1611 	if (!m_spec.values.inputs.empty())
1612 		writeValuesToMem(m_context, *m_inputMem, m_inputLayout, m_spec.values.inputs, m_subCaseNdx);
1613 
1614 	if (!m_spec.values.outputs.empty())
1615 		writeValuesToMem(m_context, *m_referenceMem, m_referenceLayout, m_spec.values.outputs, m_subCaseNdx);
1616 
1617 	if (!m_spec.values.uniforms.empty())
1618 		writeValuesToMem(m_context, *m_uniformMem, m_uniformLayout, m_spec.values.uniforms, m_subCaseNdx);
1619 
1620 	submitCommandsAndWait(vkd, device, queue, m_cmdBuffer.get());
1621 
1622 	// Result was checked in fragment shader
1623 	if (m_spec.outputType == glu::sl::OUTPUT_RESULT)
1624 	{
1625 		const ConstPixelBufferAccess	imgAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), RENDER_WIDTH, RENDER_HEIGHT, 1, m_readImageMem[0]->getHostPtr());
1626 
1627 		invalidateMappedMemoryRange(vkd, device, m_readImageMem[0]->getMemory(), m_readImageMem[0]->getOffset(), (vk::VkDeviceSize)(RENDER_WIDTH*RENDER_HEIGHT*4));
1628 
1629 		if (!checkResultImage(imgAccess))
1630 		{
1631 			TestLog&	log		= m_context.getTestContext().getLog();
1632 
1633 			log << TestLog::Message << "ERROR: Got non-white pixels on sub-case " << m_subCaseNdx << TestLog::EndMessage
1634 				<< TestLog::Image("Result", "Result", imgAccess);
1635 
1636 			dumpValues(log, m_spec.values, m_subCaseNdx);
1637 
1638 			return TestStatus::fail(string("Got invalid pixels at sub-case ") + de::toString(m_subCaseNdx));
1639 		}
1640 	}
1641 	// Result was written to color buffer
1642 	else
1643 	{
1644 		for (deUint32 outNdx = 0; outNdx < m_outputCount; outNdx++)
1645 		{
1646 			const ConstPixelBufferAccess	imgAccess		(vk::mapVkFormat(m_rtFormat), RENDER_WIDTH, RENDER_HEIGHT, 1, m_readImageMem[outNdx]->getHostPtr());
1647 			const DataType					dataType		= m_spec.values.outputs[outNdx].type.getBasicType();
1648 			const int						numComponents	= getDataTypeScalarSize(dataType);
1649 			tcu::IVec4						reference		(0, 0, 0, 1);
1650 
1651 			for (int refNdx = 0; refNdx < numComponents; refNdx++)
1652 			{
1653 				if (isDataTypeFloatOrVec(dataType))
1654 					reference[refNdx] = (int)m_spec.values.outputs[outNdx].elements[m_subCaseNdx * numComponents + refNdx].float32;
1655 				else if (isDataTypeIntOrIVec(dataType))
1656 					reference[refNdx] = m_spec.values.outputs[outNdx].elements[m_subCaseNdx * numComponents + refNdx].int32;
1657 				else
1658 					DE_FATAL("Unknown data type");
1659 			}
1660 
1661 			invalidateMappedMemoryRange(vkd, device, m_readImageMem[outNdx]->getMemory(), m_readImageMem[outNdx]->getOffset(), (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * tcu::getPixelSize(vk::mapVkFormat(m_rtFormat))));
1662 
1663 			if (!checkResultImageWithReference(imgAccess, reference))
1664 			{
1665 				TestLog&	log		= m_context.getTestContext().getLog();
1666 
1667 				log << TestLog::Message << "ERROR: Got nonmatching pixels on sub-case " << m_subCaseNdx << " output " << outNdx << TestLog::EndMessage
1668 					<< TestLog::Image("Result", "Result", imgAccess);
1669 
1670 				dumpValues(log, m_spec.values, m_subCaseNdx);
1671 
1672 				return TestStatus::fail(string("Got invalid pixels at sub-case ") + de::toString(m_subCaseNdx));
1673 			}
1674 		}
1675 	}
1676 
1677 	if (++m_subCaseNdx < getNumSubCases(m_spec.values))
1678 		return TestStatus::incomplete();
1679 	else
1680 		return TestStatus::pass("All sub-cases passed");
1681 }
1682 
1683 class ShaderCase : public TestCase
1684 {
1685 public:
1686 									ShaderCase		(tcu::TestContext& testCtx, const string& name, const string& description, const ShaderCaseSpecification& spec);
1687 
1688 
1689 	void							initPrograms	(SourceCollections& programCollection) const;
1690 	TestInstance*					createInstance	(Context& context) const;
1691 
1692 private:
1693 	const ShaderCaseSpecification	m_spec;
1694 };
1695 
ShaderCase(tcu::TestContext & testCtx,const string & name,const string & description,const ShaderCaseSpecification & spec)1696 ShaderCase::ShaderCase (tcu::TestContext& testCtx, const string& name, const string& description, const ShaderCaseSpecification& spec)
1697 	: TestCase	(testCtx, name, description)
1698 	, m_spec	(spec)
1699 {
1700 }
1701 
initPrograms(SourceCollections & sourceCollection) const1702 void ShaderCase::initPrograms (SourceCollections& sourceCollection) const
1703 {
1704 	vector<ProgramSources>	specializedSources	(m_spec.programs.size());
1705 
1706 	DE_ASSERT(isValid(m_spec));
1707 
1708 	if (m_spec.expectResult != glu::sl::EXPECT_PASS)
1709 		TCU_THROW(InternalError, "Only EXPECT_PASS is supported");
1710 
1711 	if (m_spec.caseType == glu::sl::CASETYPE_VERTEX_ONLY)
1712 	{
1713 		DE_ASSERT(m_spec.programs.size() == 1 && m_spec.programs[0].sources.sources[glu::SHADERTYPE_VERTEX].size() == 1);
1714 		specializedSources[0] << glu::VertexSource(specializeVertexShader(m_spec, m_spec.programs[0].sources.sources[glu::SHADERTYPE_VERTEX][0]))
1715 							  << glu::FragmentSource(genFragmentShader(m_spec));
1716 	}
1717 	else if (m_spec.caseType == glu::sl::CASETYPE_FRAGMENT_ONLY)
1718 	{
1719 		DE_ASSERT(m_spec.programs.size() == 1 && m_spec.programs[0].sources.sources[glu::SHADERTYPE_FRAGMENT].size() == 1);
1720 		specializedSources[0] << glu::VertexSource(genVertexShader(m_spec))
1721 							  << glu::FragmentSource(specializeFragmentShader(m_spec, m_spec.programs[0].sources.sources[glu::SHADERTYPE_FRAGMENT][0]));
1722 	}
1723 	else
1724 	{
1725 		DE_ASSERT(m_spec.caseType == glu::sl::CASETYPE_COMPLETE);
1726 
1727 		const int	maxPatchVertices	= 4; // \todo [2015-08-05 pyry] Query
1728 
1729 		for (size_t progNdx = 0; progNdx < m_spec.programs.size(); progNdx++)
1730 		{
1731 			const ProgramSpecializationParams	progSpecParams	(m_spec, m_spec.programs[progNdx].requiredExtensions, maxPatchVertices);
1732 
1733 			specializeProgramSources(specializedSources[progNdx], m_spec.programs[progNdx].sources, progSpecParams);
1734 		}
1735 	}
1736 
1737 	for (size_t progNdx = 0; progNdx < specializedSources.size(); progNdx++)
1738 	{
1739 		for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
1740 		{
1741 			if (!specializedSources[progNdx].sources[shaderType].empty())
1742 			{
1743 				vk::GlslSource& curSrc	= sourceCollection.glslSources.add(getShaderName((glu::ShaderType)shaderType, progNdx));
1744 				curSrc.sources[shaderType] = specializedSources[progNdx].sources[shaderType];
1745 			}
1746 		}
1747 	}
1748 }
1749 
createInstance(Context & context) const1750 TestInstance* ShaderCase::createInstance (Context& context) const
1751 {
1752 	return new ShaderCaseInstance(context, m_spec);
1753 }
1754 
1755 class ShaderCaseFactory : public glu::sl::ShaderCaseFactory
1756 {
1757 public:
ShaderCaseFactory(tcu::TestContext & testCtx)1758 	ShaderCaseFactory (tcu::TestContext& testCtx)
1759 		: m_testCtx(testCtx)
1760 	{
1761 	}
1762 
createGroup(const string & name,const string & description,const vector<tcu::TestNode * > & children)1763 	tcu::TestCaseGroup* createGroup (const string& name, const string& description, const vector<tcu::TestNode*>& children)
1764 	{
1765 		return new tcu::TestCaseGroup(m_testCtx, name.c_str(), description.c_str(), children);
1766 	}
1767 
createCase(const string & name,const string & description,const ShaderCaseSpecification & spec)1768 	tcu::TestCase* createCase (const string& name, const string& description, const ShaderCaseSpecification& spec)
1769 	{
1770 		return new ShaderCase(m_testCtx, name, description, spec);
1771 	}
1772 
1773 private:
1774 	tcu::TestContext&	m_testCtx;
1775 };
1776 
1777 class ShaderLibraryGroup : public tcu::TestCaseGroup
1778 {
1779 public:
ShaderLibraryGroup(tcu::TestContext & testCtx,const string & name,const string & description,const string & filename)1780 	ShaderLibraryGroup (tcu::TestContext& testCtx, const string& name, const string& description, const string& filename)
1781 		 : tcu::TestCaseGroup	(testCtx, name.c_str(), description.c_str())
1782 		 , m_filename			(filename)
1783 	{
1784 	}
1785 
init(void)1786 	void init (void)
1787 	{
1788 		ShaderCaseFactory				caseFactory	(m_testCtx);
1789 		const vector<tcu::TestNode*>	children	= glu::sl::parseFile(m_testCtx.getArchive(), m_filename, &caseFactory);
1790 
1791 		for (size_t ndx = 0; ndx < children.size(); ndx++)
1792 		{
1793 			try
1794 			{
1795 				addChild(children[ndx]);
1796 			}
1797 			catch (...)
1798 			{
1799 				for (; ndx < children.size(); ndx++)
1800 					delete children[ndx];
1801 				throw;
1802 			}
1803 		}
1804 	}
1805 
1806 private:
1807 	const string	m_filename;
1808 };
1809 
1810 } // anonymous
1811 
createShaderLibraryGroup(tcu::TestContext & testCtx,const string & name,const string & description,const string & filename)1812 MovePtr<tcu::TestCaseGroup> createShaderLibraryGroup (tcu::TestContext& testCtx, const string& name, const string& description, const string& filename)
1813 {
1814 	return MovePtr<tcu::TestCaseGroup>(new ShaderLibraryGroup(testCtx, name, description, filename));
1815 }
1816 
1817 } // vkt
1818