• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2017 The Khronos Group Inc.
6  * Copyright (c) 2017 Samsung Electronics Co., Ltd.
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Protected memory image access tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktProtectedMemShaderImageAccessTests.hpp"
26 
27 #include "vktProtectedMemContext.hpp"
28 #include "vktProtectedMemUtils.hpp"
29 #include "vktProtectedMemImageValidator.hpp"
30 #include "vktTestCase.hpp"
31 #include "vktTestGroupUtil.hpp"
32 
33 #include "vkPrograms.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkBuilderUtil.hpp"
36 #include "vkImageUtil.hpp"
37 #include "vkCmdUtil.hpp"
38 #include "vkObjUtil.hpp"
39 
40 #include "tcuTestLog.hpp"
41 #include "tcuVector.hpp"
42 #include "tcuTextureUtil.hpp"
43 #include "tcuStringTemplate.hpp"
44 
45 #include "gluTextureTestUtil.hpp"
46 
47 #include "deRandom.hpp"
48 
49 namespace vkt
50 {
51 namespace ProtectedMem
52 {
53 
54 namespace
55 {
56 
57 enum
58 {
59 	RENDER_WIDTH	= 128,
60 	RENDER_HEIGHT	= 128,
61 	IMAGE_WIDTH		= 128,
62 	IMAGE_HEIGHT	= 128,
63 };
64 
65 enum AccessType
66 {
67 	ACCESS_TYPE_SAMPLING = 0,
68 	ACCESS_TYPE_TEXEL_FETCH,
69 	ACCESS_TYPE_IMAGE_LOAD,
70 	ACCESS_TYPE_IMAGE_STORE,
71 	ACCESS_TYPE_IMAGE_ATOMICS,
72 
73 	ACCESS_TYPE_LAST
74 };
75 
76 enum AtomicOperation
77 {
78 	ATOMIC_OPERATION_ADD = 0,
79 	ATOMIC_OPERATION_MIN,
80 	ATOMIC_OPERATION_MAX,
81 	ATOMIC_OPERATION_AND,
82 	ATOMIC_OPERATION_OR,
83 	ATOMIC_OPERATION_XOR,
84 	ATOMIC_OPERATION_EXCHANGE,
85 
86 	ATOMIC_OPERATION_LAST
87 };
88 
89 struct Params
90 {
91 	glu::ShaderType				shaderType;
92 	AccessType					accessType;
93 	vk::VkFormat				imageFormat;
94 	AtomicOperation				atomicOperation;
95 	bool						pipelineProtectedAccess;
96 	bool						useMaintenance5;
97 	vk::VkPipelineCreateFlags	flags;
98 	ProtectionMode				protectionMode;
99 
Paramsvkt::ProtectedMem::__anond36fabec0111::Params100 	Params (void)
101 		: shaderType				(glu::SHADERTYPE_LAST)
102 		, accessType				(ACCESS_TYPE_LAST)
103 		, imageFormat				(vk::VK_FORMAT_UNDEFINED)
104 		, atomicOperation			(ATOMIC_OPERATION_LAST)
105 		, pipelineProtectedAccess	(false)
106 		, useMaintenance5			(false)
107 		, flags						((vk::VkPipelineCreateFlags)0u)
108 		, protectionMode			(PROTECTION_ENABLED)
109 	{}
110 
Paramsvkt::ProtectedMem::__anond36fabec0111::Params111 	Params (const glu::ShaderType			shaderType_,
112 			const AccessType				accessType_,
113 			const vk::VkFormat				imageFormat_,
114 			const AtomicOperation			atomicOperation_,
115 			const bool						pipelineProtectedAccess_,
116 			const vk::VkPipelineCreateFlags flags_)
117 		: shaderType				(shaderType_)
118 		, accessType				(accessType_)
119 		, imageFormat				(imageFormat_)
120 		, atomicOperation			(atomicOperation_)
121 		, pipelineProtectedAccess	(pipelineProtectedAccess_)
122 		, flags						(flags_)
123 		, protectionMode			(PROTECTION_ENABLED)
124 	{
125 #ifndef CTS_USES_VULKANSC
126 		if ((flags_ & vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT) != 0) {
127 			protectionMode = PROTECTION_DISABLED;
128 		}
129 #endif
130 	}
131 };
132 
getSeedValue(const Params & params)133 static deUint32 getSeedValue (const Params& params)
134 {
135 	return deInt32Hash(params.shaderType) ^ deInt32Hash(params.accessType) ^ deInt32Hash(params.imageFormat) ^ deInt32Hash(params.atomicOperation);
136 }
137 
getAtomicOperationCaseName(const AtomicOperation op)138 static std::string getAtomicOperationCaseName (const AtomicOperation op)
139 {
140 	switch (op)
141 	{
142 		case ATOMIC_OPERATION_ADD:			return "add";
143 		case ATOMIC_OPERATION_MIN:			return "min";
144 		case ATOMIC_OPERATION_MAX:			return "max";
145 		case ATOMIC_OPERATION_AND:			return "and";
146 		case ATOMIC_OPERATION_OR:			return "or";
147 		case ATOMIC_OPERATION_XOR:			return "xor";
148 		case ATOMIC_OPERATION_EXCHANGE:		return "exchange";
149 		default:
150 			DE_FATAL("Impossible");
151 			return "";
152 	}
153 }
154 
getAtomicOperationShaderFuncName(const AtomicOperation op)155 static std::string getAtomicOperationShaderFuncName (const AtomicOperation op)
156 {
157 	switch (op)
158 	{
159 		case ATOMIC_OPERATION_ADD:			return "imageAtomicAdd";
160 		case ATOMIC_OPERATION_MIN:			return "imageAtomicMin";
161 		case ATOMIC_OPERATION_MAX:			return "imageAtomicMax";
162 		case ATOMIC_OPERATION_AND:			return "imageAtomicAnd";
163 		case ATOMIC_OPERATION_OR:			return "imageAtomicOr";
164 		case ATOMIC_OPERATION_XOR:			return "imageAtomicXor";
165 		case ATOMIC_OPERATION_EXCHANGE:		return "imageAtomicExchange";
166 		default:
167 			DE_FATAL("Impossible");
168 			return "";
169 	}
170 }
171 
172 //! Computes the result of an atomic operation where "a" is the data operated on and "b" is the parameter to the atomic function.
computeBinaryAtomicOperationResult(const AtomicOperation op,const deInt32 a,const deInt32 b)173 static deInt32 computeBinaryAtomicOperationResult (const AtomicOperation op, const deInt32 a, const deInt32 b)
174 {
175 	switch (op)
176 	{
177 		case ATOMIC_OPERATION_ADD:			return a + b;
178 		case ATOMIC_OPERATION_MIN:			return de::min(a, b);
179 		case ATOMIC_OPERATION_MAX:			return de::max(a, b);
180 		case ATOMIC_OPERATION_AND:			return a & b;
181 		case ATOMIC_OPERATION_OR:			return a | b;
182 		case ATOMIC_OPERATION_XOR:			return a ^ b;
183 		case ATOMIC_OPERATION_EXCHANGE:		return b;
184 		default:
185 			DE_FATAL("Impossible");
186 			return -1;
187 	}
188 }
189 
getShaderImageFormatQualifier(const tcu::TextureFormat & format)190 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
191 {
192 	const char* orderPart;
193 	const char* typePart;
194 
195 	switch (format.order)
196 	{
197 		case tcu::TextureFormat::R:		orderPart = "r";	break;
198 		case tcu::TextureFormat::RG:	orderPart = "rg";	break;
199 		case tcu::TextureFormat::RGB:	orderPart = "rgb";	break;
200 		case tcu::TextureFormat::RGBA:	orderPart = "rgba";	break;
201 
202 		default:
203 			DE_FATAL("Impossible");
204 			orderPart = DE_NULL;
205 	}
206 
207 	switch (format.type)
208 	{
209 		case tcu::TextureFormat::FLOAT:				typePart = "32f";		break;
210 		case tcu::TextureFormat::HALF_FLOAT:		typePart = "16f";		break;
211 
212 		case tcu::TextureFormat::UNSIGNED_INT32:	typePart = "32ui";		break;
213 		case tcu::TextureFormat::UNSIGNED_INT16:	typePart = "16ui";		break;
214 		case tcu::TextureFormat::UNSIGNED_INT8:		typePart = "8ui";		break;
215 
216 		case tcu::TextureFormat::SIGNED_INT32:		typePart = "32i";		break;
217 		case tcu::TextureFormat::SIGNED_INT16:		typePart = "16i";		break;
218 		case tcu::TextureFormat::SIGNED_INT8:		typePart = "8i";		break;
219 
220 		case tcu::TextureFormat::UNORM_INT16:		typePart = "16";		break;
221 		case tcu::TextureFormat::UNORM_INT8:		typePart = "8";			break;
222 
223 		case tcu::TextureFormat::SNORM_INT16:		typePart = "16_snorm";	break;
224 		case tcu::TextureFormat::SNORM_INT8:		typePart = "8_snorm";	break;
225 
226 		default:
227 			DE_FATAL("Impossible");
228 			typePart = DE_NULL;
229 	}
230 
231 	return std::string() + orderPart + typePart;
232 }
233 
getShaderSamplerOrImageType(const tcu::TextureFormat & format,bool isSampler)234 static std::string getShaderSamplerOrImageType (const tcu::TextureFormat& format, bool isSampler)
235 {
236 	const std::string formatPart = tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ? "u" :
237 								   tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER   ? "i" : "";
238 
239 	return formatPart + (isSampler ? "sampler2D" : "image2D");
240 }
241 
242 class ImageAccessTestInstance : public ProtectedTestInstance
243 {
244 public:
245 								ImageAccessTestInstance	(Context&				ctx,
246 														 const ImageValidator&	validator,
247 														 const Params&			params);
248 	virtual tcu::TestStatus		iterate					(void);
249 
250 private:
251 	de::MovePtr<tcu::Texture2D>	createTestTexture2D		(void);
252 	void						calculateAtomicRef		(tcu::Texture2D&		texture2D);
253 	tcu::TestStatus				validateResult			(vk::VkImage			image,
254 														 vk::VkImageLayout		imageLayout,
255 														 const tcu::Texture2D&	texture2D,
256 														 const tcu::Sampler&	refSampler);
257 
258 	tcu::TestStatus				executeFragmentTest		(void);
259 	tcu::TestStatus				executeComputeTest		(void);
260 
261 	const ImageValidator&		m_validator;
262 	const Params				m_params;
263 };
264 
265 class ImageAccessTestCase : public TestCase
266 {
267 public:
ImageAccessTestCase(tcu::TestContext & testCtx,const std::string & name,const Params & params)268 								ImageAccessTestCase		(tcu::TestContext&		testCtx,
269 														 const std::string&		name,
270 														 const Params&			params)
271 									: TestCase		(testCtx, name)
272 									, m_validator	(params.imageFormat)
273 									, m_params		(params)
274 								{
275 								}
276 
~ImageAccessTestCase(void)277 	virtual						~ImageAccessTestCase	(void) {}
createInstance(Context & ctx) const278 	virtual TestInstance*		createInstance			(Context& ctx) const
279 								{
280 									return new ImageAccessTestInstance(ctx, m_validator, m_params);
281 								}
282 	virtual void				initPrograms			(vk::SourceCollections& programCollection) const;
checkSupport(Context & context) const283 	virtual void				checkSupport			(Context& context) const
284 								{
285 									checkProtectedQueueSupport(context);
286 									if (m_params.useMaintenance5)
287 										context.requireDeviceFunctionality("VK_KHR_maintenance5");
288 								}
289 
290 private:
291 	ImageValidator				m_validator;
292 	Params						m_params;
293 };
294 
initPrograms(vk::SourceCollections & programCollection) const295 void ImageAccessTestCase::initPrograms (vk::SourceCollections& programCollection) const
296 {
297 	const tcu::TextureFormat&	texFormat		= mapVkFormat(m_params.imageFormat);
298 	const std::string			imageFormat		= getShaderImageFormatQualifier(texFormat);
299 	const std::string			imageType		= getShaderSamplerOrImageType(texFormat, false);
300 	const std::string			samplerType		= getShaderSamplerOrImageType(texFormat, true);
301 	const std::string			colorVecType	= isIntFormat(m_params.imageFormat)		? "ivec4" :
302 												  isUintFormat(m_params.imageFormat)	? "uvec4" : "vec4";
303 
304 	m_validator.initPrograms(programCollection);
305 
306 	if (m_params.shaderType == glu::SHADERTYPE_FRAGMENT)
307 	{
308 		{
309 			// Vertex shader
310 			const char* vert = "#version 450\n"
311 							   "layout(location = 0) in mediump vec2 a_position;\n"
312 							   "layout(location = 1) in mediump vec2 a_texCoord;\n"
313 							   "layout(location = 0) out mediump vec2 v_texCoord;\n"
314 							   "\n"
315 							   "void main() {\n"
316 							   "    gl_Position = vec4(a_position, 0.0, 1.0);\n"
317 							   "    v_texCoord = a_texCoord;\n"
318 							   "}\n";
319 
320 			programCollection.glslSources.add("vert") << glu::VertexSource(vert);
321 		}
322 
323 		{
324 			// Fragment shader
325 			std::ostringstream frag;
326 			frag << "#version 450\n"
327 					"layout(location = 0) in mediump vec2 v_texCoord;\n"
328 					"layout(location = 0) out highp ${COLOR_VEC_TYPE} o_color;\n";
329 
330 			switch (m_params.accessType)
331 			{
332 				case ACCESS_TYPE_SAMPLING:
333 				case ACCESS_TYPE_TEXEL_FETCH:
334 					frag << "layout(set = 0, binding = 0) uniform highp ${SAMPLER_TYPE} u_sampler;\n";
335 					break;
336 				case ACCESS_TYPE_IMAGE_LOAD:
337 					frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_image;\n";
338 					break;
339 				case ACCESS_TYPE_IMAGE_STORE:
340 					frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_imageA;\n";
341 					frag << "layout(set = 0, binding = 1, ${IMAGE_FORMAT}) writeonly uniform highp ${IMAGE_TYPE} u_imageB;\n";
342 					break;
343 				case ACCESS_TYPE_IMAGE_ATOMICS:
344 					frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) coherent uniform highp ${IMAGE_TYPE} u_image;\n";
345 					break;
346 				default:
347 					DE_FATAL("Impossible");
348 					break;
349 			}
350 
351 			frag << "\n"
352 					"void main() {\n";
353 
354 			switch (m_params.accessType)
355 			{
356 				case ACCESS_TYPE_SAMPLING:
357 					frag << "    o_color = texture(u_sampler, v_texCoord);\n";
358 					break;
359 				case ACCESS_TYPE_TEXEL_FETCH:
360 					frag << "    const highp int lod = 0;\n";
361 					frag << "    o_color = texelFetch(u_sampler, ivec2(v_texCoord), lod);\n";
362 					break;
363 				case ACCESS_TYPE_IMAGE_LOAD:
364 					frag << "    o_color = imageLoad(u_image, ivec2(v_texCoord));\n";
365 					break;
366 				case ACCESS_TYPE_IMAGE_STORE:
367 					frag << "    o_color = imageLoad(u_imageA, ivec2(v_texCoord));\n";
368 					frag << "    imageStore(u_imageB, ivec2(v_texCoord), o_color);\n";
369 					break;
370 				case ACCESS_TYPE_IMAGE_ATOMICS:
371 					frag << "    int gx = int(v_texCoord.x);\n";
372 					frag << "    int gy = int(v_texCoord.y);\n";
373 					frag << "    "
374 						 << getAtomicOperationShaderFuncName(m_params.atomicOperation)
375 						 << "(u_image, ivec2(v_texCoord), "
376 						 << (isUintFormat(m_params.imageFormat) ? "uint" : "int")
377 						 << "(gx*gx + gy*gy));\n";
378 					frag << "    o_color = imageLoad(u_image, ivec2(v_texCoord));\n";
379 					break;
380 				default:
381 					DE_FATAL("Impossible");
382 					break;
383 			}
384 
385 			frag << "}\n";
386 
387 			std::map<std::string, std::string> fragParams;
388 
389 			fragParams["IMAGE_FORMAT"]		= imageFormat;
390 			fragParams["IMAGE_TYPE"]		= imageType;
391 			fragParams["SAMPLER_TYPE"]		= samplerType;
392 			fragParams["COLOR_VEC_TYPE"]	= colorVecType;
393 
394 			programCollection.glslSources.add("frag") << glu::FragmentSource(tcu::StringTemplate(frag.str()).specialize(fragParams));
395 		}
396 	}
397 	else if (m_params.shaderType == glu::SHADERTYPE_COMPUTE)
398 	{
399 		// Compute shader
400 		std::ostringstream comp;
401 		comp << "#version 450\n"
402 				"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
403 				"layout(set = 0, binding = 0, ${IMAGE_FORMAT}) ${RES_MEM_QUALIFIER} uniform highp ${IMAGE_TYPE} u_resultImage;\n";
404 
405 		switch (m_params.accessType)
406 		{
407 			case ACCESS_TYPE_SAMPLING:
408 			case ACCESS_TYPE_TEXEL_FETCH:
409 				comp << "layout(set = 0, binding = 1) uniform highp ${SAMPLER_TYPE} u_sampler;\n";
410 				break;
411 			case ACCESS_TYPE_IMAGE_LOAD:
412 			case ACCESS_TYPE_IMAGE_STORE:
413 				comp << "layout(set = 0, binding = 1, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_srcImage;\n";
414 				break;
415 			case ACCESS_TYPE_IMAGE_ATOMICS:
416 				break;
417 			default:
418 				DE_FATAL("Impossible");
419 				break;
420 		}
421 
422 		comp << "\n"
423 				"void main() {\n"
424 				"    int gx = int(gl_GlobalInvocationID.x);\n"
425 				"    int gy = int(gl_GlobalInvocationID.y);\n";
426 
427 		switch (m_params.accessType)
428 		{
429 			case ACCESS_TYPE_SAMPLING:
430 				comp << "    ${COLOR_VEC_TYPE} color = texture(u_sampler, vec2(float(gx)/" << de::toString((int)IMAGE_WIDTH) << ", float(gy)/" << de::toString((int)IMAGE_HEIGHT) << "));\n";
431 				comp << "    imageStore(u_resultImage, ivec2(gx, gy), color);\n";
432 				break;
433 			case ACCESS_TYPE_TEXEL_FETCH:
434 				comp << "    const highp int lod = 0;\n";
435 				comp << "    ${COLOR_VEC_TYPE} color = texelFetch(u_sampler, ivec2(gx, gy), lod);\n";
436 				comp << "    imageStore(u_resultImage, ivec2(gx, gy), color);\n";
437 				break;
438 			case ACCESS_TYPE_IMAGE_LOAD:
439 			case ACCESS_TYPE_IMAGE_STORE:
440 				comp << "    ${COLOR_VEC_TYPE} color = imageLoad(u_srcImage, ivec2(gx, gy));\n";
441 				comp << "    imageStore(u_resultImage, ivec2(gx, gy), color);\n";
442 				break;
443 			case ACCESS_TYPE_IMAGE_ATOMICS:
444 				comp << "    "
445 					 << getAtomicOperationShaderFuncName(m_params.atomicOperation)
446 					 << "(u_resultImage, ivec2(gx, gy), "
447 					 << (isUintFormat(m_params.imageFormat) ? "uint" : "int")
448 					 << "(gx*gx + gy*gy));\n";
449 				break;
450 			default:
451 				DE_FATAL("Impossible");
452 				break;
453 		}
454 
455 		comp << "}\n";
456 
457 		std::map<std::string, std::string> compParams;
458 
459 		compParams["IMAGE_FORMAT"]		= imageFormat;
460 		compParams["IMAGE_TYPE"]		= imageType;
461 		compParams["SAMPLER_TYPE"]		= samplerType;
462 		compParams["COLOR_VEC_TYPE"]	= colorVecType;
463 		compParams["RES_MEM_QUALIFIER"]	= m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? "coherent" : "writeonly";
464 
465 		programCollection.glslSources.add("comp") << glu::ComputeSource(tcu::StringTemplate(comp.str()).specialize(compParams));
466 	}
467 	else
468 		DE_FATAL("Impossible");
469 }
470 
ImageAccessTestInstance(Context & ctx,const ImageValidator & validator,const Params & params)471 ImageAccessTestInstance::ImageAccessTestInstance (Context&					ctx,
472 												  const ImageValidator&		validator,
473 												  const Params&				params)
474 	: ProtectedTestInstance(ctx, params.pipelineProtectedAccess ? std::vector<std::string>({ "VK_EXT_pipeline_protected_access" }) : std::vector<std::string>())
475 	, m_validator			(validator)
476 	, m_params				(params)
477 {
478 }
479 
createTestTexture2D(void)480 de::MovePtr<tcu::Texture2D> ImageAccessTestInstance::createTestTexture2D (void)
481 {
482 	const tcu::TextureFormat		texFmt		= mapVkFormat(m_params.imageFormat);
483 	const tcu::TextureFormatInfo	fmtInfo		= tcu::getTextureFormatInfo(texFmt);
484 	de::MovePtr<tcu::Texture2D>		texture2D	(new tcu::Texture2D(texFmt, IMAGE_WIDTH, IMAGE_HEIGHT));
485 
486 	// \note generate only the base level
487 	texture2D->allocLevel(0);
488 
489 	const tcu::PixelBufferAccess&	level		= texture2D->getLevel(0);
490 
491 	if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
492 	{
493 		// use a smaller range than the format would allow
494 		const float		cMin	= isIntFormat(m_params.imageFormat) ? -1000.0f : 0.0f;
495 		const float		cMax	= +1000.0f;
496 
497 		fillWithRandomColorTiles(level, tcu::Vec4(cMin, 0, 0, 0), tcu::Vec4(cMax, 0, 0, 0), getSeedValue(m_params));
498 	}
499 	else
500 		fillWithRandomColorTiles(level, fmtInfo.valueMin, fmtInfo.valueMax, getSeedValue(m_params));
501 
502 	return texture2D;
503 }
504 
iterate(void)505 tcu::TestStatus ImageAccessTestInstance::iterate (void)
506 {
507 	switch (m_params.shaderType)
508 	{
509 		case glu::SHADERTYPE_FRAGMENT:	return executeFragmentTest();
510 		case glu::SHADERTYPE_COMPUTE:	return executeComputeTest();
511 		default:
512 			DE_FATAL("Impossible");
513 			return tcu::TestStatus::fail("");
514 	}
515 }
516 
executeComputeTest(void)517 tcu::TestStatus ImageAccessTestInstance::executeComputeTest (void)
518 {
519 	ProtectedContext&					ctx					(m_protectedContext);
520 	const vk::DeviceInterface&			vk					= ctx.getDeviceInterface();
521 	const vk::VkDevice					device				= ctx.getDevice();
522 	const vk::VkQueue					queue				= ctx.getQueue();
523 	const deUint32						queueFamilyIndex	= ctx.getQueueFamilyIndex();
524 
525 	vk::Unique<vk::VkCommandPool>		cmdPool				(makeCommandPool(vk, device, m_params.protectionMode, queueFamilyIndex));
526 
527 	de::MovePtr<tcu::Texture2D>			texture2D			= createTestTexture2D();
528 	const tcu::Sampler					refSampler			= tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
529 																		   tcu::Sampler::NEAREST, tcu::Sampler::NEAREST,
530 																		   00.0f /* LOD threshold */, true /* normalized coords */, tcu::Sampler::COMPAREMODE_NONE,
531 																		   0 /* cmp channel */, tcu::Vec4(0.0f) /* border color */, true /* seamless cube map */);
532 
533 	vk::Unique<vk::VkShaderModule>		computeShader		(vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("comp"), 0));
534 
535 	de::MovePtr<vk::ImageWithMemory>	imageSrc;
536 	de::MovePtr<vk::ImageWithMemory>	imageDst;
537 	vk::Move<vk::VkSampler>				sampler;
538 	vk::Move<vk::VkImageView>			imageViewSrc;
539 	vk::Move<vk::VkImageView>			imageViewDst;
540 
541 	vk::Move<vk::VkDescriptorSetLayout>	descriptorSetLayout;
542 	vk::Move<vk::VkDescriptorPool>		descriptorPool;
543 	vk::Move<vk::VkDescriptorSet>		descriptorSet;
544 
545 	// Create src and dst images
546 	{
547 		vk::VkImageUsageFlags imageUsageFlags = vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT	|
548 												vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT	|
549 												vk::VK_IMAGE_USAGE_SAMPLED_BIT		|
550 												vk::VK_IMAGE_USAGE_STORAGE_BIT;
551 
552 		imageSrc = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex,
553 								 IMAGE_WIDTH, IMAGE_HEIGHT,
554 								 m_params.imageFormat,
555 								 imageUsageFlags);
556 
557 		if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
558 		{
559 			imageDst = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex,
560 									 IMAGE_WIDTH, IMAGE_HEIGHT,
561 									 m_params.imageFormat,
562 									 imageUsageFlags);
563 		}
564 	}
565 
566 	// Upload source image
567 	{
568 		de::MovePtr<vk::ImageWithMemory>	unprotectedImage	= createImage2D(ctx, PROTECTION_DISABLED, queueFamilyIndex,
569 																				IMAGE_WIDTH, IMAGE_HEIGHT,
570 																				m_params.imageFormat,
571 																				vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT);
572 
573 		// Upload data to an unprotected image
574 		uploadImage(m_protectedContext, **unprotectedImage, *texture2D);
575 
576 		// Select vkImageLayout based upon accessType
577 		vk::VkImageLayout imageSrcLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
578 
579 		switch (m_params.accessType)
580 		{
581 			case ACCESS_TYPE_SAMPLING:
582 			case ACCESS_TYPE_TEXEL_FETCH:
583 			{
584 				imageSrcLayout = vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
585 				break;
586 			}
587 			case ACCESS_TYPE_IMAGE_LOAD:
588 			case ACCESS_TYPE_IMAGE_STORE:
589 			case ACCESS_TYPE_IMAGE_ATOMICS:
590 			{
591 				imageSrcLayout = vk::VK_IMAGE_LAYOUT_GENERAL;
592 				break;
593 			}
594 			default:
595 				DE_FATAL("Impossible");
596 				break;
597 		}
598 
599 		// Copy unprotected image to protected image
600 		copyToProtectedImage(m_protectedContext, **unprotectedImage, **imageSrc, imageSrcLayout, IMAGE_WIDTH, IMAGE_HEIGHT, m_params.protectionMode);
601 	}
602 
603 	// Clear dst image
604 	if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS && m_params.protectionMode == PROTECTION_ENABLED)
605 		clearImage(m_protectedContext, **imageDst);
606 
607 	// Create descriptors
608 	{
609 		vk::DescriptorSetLayoutBuilder	layoutBuilder;
610 		vk::DescriptorPoolBuilder		poolBuilder;
611 
612 		switch (m_params.accessType)
613 		{
614 			case ACCESS_TYPE_SAMPLING:
615 			case ACCESS_TYPE_TEXEL_FETCH:
616 				layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
617 				layoutBuilder.addSingleSamplerBinding(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, vk::VK_SHADER_STAGE_COMPUTE_BIT, DE_NULL);
618 				poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
619 				poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u);
620 				break;
621 			case ACCESS_TYPE_IMAGE_LOAD:
622 			case ACCESS_TYPE_IMAGE_STORE:
623 				layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
624 				layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
625 				poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2u);
626 				break;
627 			case ACCESS_TYPE_IMAGE_ATOMICS:
628 				layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
629 				poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
630 				break;
631 			default:
632 				DE_FATAL("Impossible");
633 				break;
634 		}
635 
636 		descriptorSetLayout		= layoutBuilder.build(vk, device);
637 		descriptorPool			= poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
638 		descriptorSet			= makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
639 	}
640 
641 	// Create pipeline layout
642 	vk::Unique<vk::VkPipelineLayout>	pipelineLayout		(makePipelineLayout(vk, device, *descriptorSetLayout));
643 
644 	// Create sampler and image views
645 	{
646 		if (m_params.accessType == ACCESS_TYPE_SAMPLING || m_params.accessType == ACCESS_TYPE_TEXEL_FETCH)
647 		{
648 			const tcu::TextureFormat		texFormat		= mapVkFormat(m_params.imageFormat);
649 			const vk::VkSamplerCreateInfo	samplerParams	= vk::mapSampler(refSampler, texFormat);
650 
651 			sampler = createSampler(vk, device, &samplerParams);
652 		}
653 
654 		imageViewSrc = createImageView(ctx, **imageSrc, m_params.imageFormat);
655 
656 		if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
657 			imageViewDst = createImageView(ctx, **imageDst, m_params.imageFormat);
658 	}
659 
660 	// Update descriptor set information
661 	{
662 		vk::DescriptorSetUpdateBuilder		updateBuilder;
663 
664 		switch (m_params.accessType)
665 		{
666 			case ACCESS_TYPE_SAMPLING:
667 			case ACCESS_TYPE_TEXEL_FETCH:
668 			{
669 				vk::VkDescriptorImageInfo	descStorageImgDst	= makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
670 				vk::VkDescriptorImageInfo	descSampledImgSrc	= makeDescriptorImageInfo(*sampler, *imageViewSrc, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
671 
672 				updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
673 				updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descSampledImgSrc);
674 				break;
675 			}
676 			case ACCESS_TYPE_IMAGE_LOAD:
677 			case ACCESS_TYPE_IMAGE_STORE:
678 			{
679 				vk::VkDescriptorImageInfo	descStorageImgDst	= makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
680 				vk::VkDescriptorImageInfo	descStorageImgSrc	= makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
681 
682 				updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
683 				updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgSrc);
684 				break;
685 			}
686 			case ACCESS_TYPE_IMAGE_ATOMICS:
687 			{
688 				vk::VkDescriptorImageInfo	descStorageImg		= makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
689 
690 				updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
691 				break;
692 			}
693 			default:
694 				DE_FATAL("Impossible");
695 				break;
696 		}
697 
698 		updateBuilder.update(vk, device);
699 	}
700 
701 	// Create validation compute commands & submit
702 	{
703 		const vk::VkPipelineShaderStageCreateInfo pipelineShaderStageParams
704 		{
705 			vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,	// VkStructureType						sType;
706 			nullptr,													// const void*							pNext;
707 			0u,															// VkPipelineShaderStageCreateFlags		flags;
708 			vk::VK_SHADER_STAGE_COMPUTE_BIT,							// VkShaderStageFlagBits				stage;
709 			*computeShader,												// VkShaderModule						module;
710 			"main",														// const char*							pName;
711 			DE_NULL,													// const VkSpecializationInfo*			pSpecializationInfo;
712 		};
713 
714 		vk::VkComputePipelineCreateInfo pipelineCreateInfo
715 		{
716 			vk::VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,			// VkStructureType					sType;
717 			nullptr,													// const void*						pNext;
718 			m_params.flags,												// VkPipelineCreateFlags			flags;
719 			pipelineShaderStageParams,									// VkPipelineShaderStageCreateInfo	stage;
720 			*pipelineLayout,											// VkPipelineLayout					layout;
721 			DE_NULL,													// VkPipeline						basePipelineHandle;
722 			0,															// deInt32							basePipelineIndex;
723 		};
724 
725 #ifndef CTS_USES_VULKANSC
726 		vk::VkPipelineCreateFlags2CreateInfoKHR pipelineFlags2CreateInfo = vk::initVulkanStructure();
727 		if (m_params.useMaintenance5)
728 		{
729 			pipelineFlags2CreateInfo.flags = (vk::VkPipelineCreateFlagBits2KHR)m_params.flags;
730 			pipelineCreateInfo.pNext = &pipelineFlags2CreateInfo;
731 			pipelineCreateInfo.flags = 0;
732 		}
733 #endif // CTS_USES_VULKANSC
734 
735 		vk::Unique<vk::VkPipeline>			pipeline(createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo));
736 
737 		const vk::Unique<vk::VkFence>		fence		(vk::createFence(vk, device));
738 		vk::Unique<vk::VkCommandBuffer>		cmdBuffer	(vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
739 
740 		beginCommandBuffer(vk, *cmdBuffer);
741 
742 		vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
743 		vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
744 		vk.cmdDispatch(*cmdBuffer, (deUint32)IMAGE_WIDTH, (deUint32)IMAGE_HEIGHT, 1u);
745 		endCommandBuffer(vk, *cmdBuffer);
746 
747 		VK_CHECK(queueSubmit(ctx, m_params.protectionMode, queue, *cmdBuffer, *fence, ~0ull));
748 	}
749 
750 	// Calculate reference image
751 	if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
752 		calculateAtomicRef(*texture2D);
753 
754 	// Validate result
755 	{
756 		const vk::VkImage	resultImage		= m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? **imageSrc : **imageDst;
757 
758 		return validateResult(resultImage, vk::VK_IMAGE_LAYOUT_GENERAL, *texture2D, refSampler);
759 	}
760 }
761 
executeFragmentTest(void)762 tcu::TestStatus ImageAccessTestInstance::executeFragmentTest (void)
763 {
764 	ProtectedContext&					ctx					(m_protectedContext);
765 	const vk::DeviceInterface&			vk					= ctx.getDeviceInterface();
766 	const vk::VkDevice					device				= ctx.getDevice();
767 	const vk::VkQueue					queue				= ctx.getQueue();
768 	const deUint32						queueFamilyIndex	= ctx.getQueueFamilyIndex();
769 
770 	// Create output image
771 	de::MovePtr<vk::ImageWithMemory>	colorImage			(createImage2D(ctx, m_params.protectionMode, queueFamilyIndex,
772 																		   RENDER_WIDTH, RENDER_HEIGHT,
773 																		   m_params.imageFormat,
774 																		   vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_SAMPLED_BIT));
775 	vk::Unique<vk::VkImageView>			colorImageView		(createImageView(ctx, **colorImage, m_params.imageFormat));
776 
777 	vk::Unique<vk::VkRenderPass>		renderPass			(createRenderPass(ctx, m_params.imageFormat));
778 	vk::Unique<vk::VkFramebuffer>		framebuffer			(createFramebuffer(ctx, RENDER_WIDTH, RENDER_HEIGHT, *renderPass, *colorImageView));
779 
780 	vk::Unique<vk::VkCommandPool>		cmdPool				(makeCommandPool(vk, device, m_params.protectionMode, queueFamilyIndex));
781 	vk::Unique<vk::VkCommandBuffer>		cmdBuffer			(vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
782 
783 	de::MovePtr<tcu::Texture2D>			texture2D			= createTestTexture2D();
784 	const tcu::Sampler					refSampler			= tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
785 																		   tcu::Sampler::NEAREST, tcu::Sampler::NEAREST,
786 																		   00.0f /* LOD threshold */, true /* normalized coords */, tcu::Sampler::COMPAREMODE_NONE,
787 																		   0 /* cmp channel */, tcu::Vec4(0.0f) /* border color */, true /* seamless cube map */);
788 
789 	vk::Move<vk::VkShaderModule>		vertexShader		= createShaderModule(vk, device, ctx.getBinaryCollection().get("vert"), 0);
790 	vk::Move<vk::VkShaderModule>		fragmentShader		= createShaderModule(vk, device, ctx.getBinaryCollection().get("frag"), 0);
791 
792 	de::MovePtr<vk::ImageWithMemory>	imageSrc;
793 	de::MovePtr<vk::ImageWithMemory>	imageDst;
794 	vk::Move<vk::VkSampler>				sampler;
795 	vk::Move<vk::VkImageView>			imageViewSrc;
796 	vk::Move<vk::VkImageView>			imageViewDst;
797 
798 	vk::Move<vk::VkPipeline>			graphicsPipeline;
799 	vk::Move<vk::VkDescriptorSetLayout>	descriptorSetLayout;
800 	vk::Move<vk::VkDescriptorPool>		descriptorPool;
801 	vk::Move<vk::VkDescriptorSet>		descriptorSet;
802 
803 	// Create src and dst images
804 	{
805 		vk::VkImageUsageFlags imageUsageFlags = vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT	|
806 												vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT	|
807 												vk::VK_IMAGE_USAGE_SAMPLED_BIT;
808 
809 		switch (m_params.accessType)
810 		{
811 			case ACCESS_TYPE_IMAGE_LOAD:
812 			case ACCESS_TYPE_IMAGE_STORE:
813 			case ACCESS_TYPE_IMAGE_ATOMICS:
814 				imageUsageFlags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
815 				break;
816 			default:
817 				break;
818 		}
819 
820 		imageSrc = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex,
821 								 IMAGE_WIDTH, IMAGE_HEIGHT,
822 								 m_params.imageFormat,
823 								 imageUsageFlags);
824 
825 		if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
826 		{
827 			imageDst = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex,
828 									 IMAGE_WIDTH, IMAGE_HEIGHT,
829 									 m_params.imageFormat,
830 									 imageUsageFlags);
831 		}
832 	}
833 
834 	// Select vkImageLayout based upon accessType
835 	vk::VkImageLayout imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
836 
837 	switch (m_params.accessType)
838 	{
839 		case ACCESS_TYPE_SAMPLING:
840 		case ACCESS_TYPE_TEXEL_FETCH:
841 		{
842 			imageLayout = vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
843 			break;
844 		}
845 		case ACCESS_TYPE_IMAGE_LOAD:
846 		case ACCESS_TYPE_IMAGE_STORE:
847 		case ACCESS_TYPE_IMAGE_ATOMICS:
848 		{
849 			imageLayout = vk::VK_IMAGE_LAYOUT_GENERAL;
850 			break;
851 		}
852 		default:
853 			DE_FATAL("Impossible");
854 			break;
855 	}
856 
857 	// Upload source image
858 	{
859 		de::MovePtr<vk::ImageWithMemory>	unprotectedImage	= createImage2D(ctx, PROTECTION_DISABLED, queueFamilyIndex,
860 																				IMAGE_WIDTH, IMAGE_HEIGHT,
861 																				m_params.imageFormat,
862 																				vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT);
863 
864 		// Upload data to an unprotected image
865 		uploadImage(m_protectedContext, **unprotectedImage, *texture2D);
866 
867 		// Copy unprotected image to protected image
868 		copyToProtectedImage(m_protectedContext, **unprotectedImage, **imageSrc, imageLayout, IMAGE_WIDTH, IMAGE_HEIGHT, m_params.protectionMode);
869 	}
870 
871 	// Clear dst image
872 	if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE && m_params.protectionMode == PROTECTION_ENABLED)
873 		clearImage(m_protectedContext, **imageDst);
874 
875 	// Create descriptors
876 	{
877 		vk::DescriptorSetLayoutBuilder	layoutBuilder;
878 		vk::DescriptorPoolBuilder		poolBuilder;
879 
880 		switch (m_params.accessType)
881 		{
882 			case ACCESS_TYPE_SAMPLING:
883 			case ACCESS_TYPE_TEXEL_FETCH:
884 				layoutBuilder.addSingleSamplerBinding(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, vk::VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL);
885 				poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u);
886 				break;
887 			case ACCESS_TYPE_IMAGE_LOAD:
888 				layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
889 				poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
890 				break;
891 			case ACCESS_TYPE_IMAGE_STORE:
892 				layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
893 				layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
894 				poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2u);
895 				break;
896 			case ACCESS_TYPE_IMAGE_ATOMICS:
897 				layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
898 				poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
899 				break;
900 			default:
901 				DE_FATAL("Impossible");
902 				break;
903 		}
904 
905 		descriptorSetLayout		= layoutBuilder.build(vk, device);
906 		descriptorPool			= poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
907 		descriptorSet			= makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
908 	}
909 
910 	// Create pipeline layout
911 	vk::Unique<vk::VkPipelineLayout>	pipelineLayout		(makePipelineLayout(vk, device, *descriptorSetLayout));
912 
913 	// Create sampler and image views
914 	{
915 		if (m_params.accessType == ACCESS_TYPE_SAMPLING || m_params.accessType == ACCESS_TYPE_TEXEL_FETCH)
916 		{
917 			const tcu::TextureFormat		texFormat		= mapVkFormat(m_params.imageFormat);
918 			const vk::VkSamplerCreateInfo	samplerParams	= vk::mapSampler(refSampler, texFormat);
919 
920 			sampler = createSampler(vk, device, &samplerParams);
921 		}
922 
923 		imageViewSrc = createImageView(ctx, **imageSrc, m_params.imageFormat);
924 
925 		if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
926 			imageViewDst = createImageView(ctx, **imageDst, m_params.imageFormat);
927 	}
928 
929 	// Update descriptor set information
930 	{
931 		vk::DescriptorSetUpdateBuilder		updateBuilder;
932 
933 		switch (m_params.accessType)
934 		{
935 			case ACCESS_TYPE_SAMPLING:
936 			case ACCESS_TYPE_TEXEL_FETCH:
937 			{
938 				vk::VkDescriptorImageInfo	descSampledImg		= makeDescriptorImageInfo(*sampler, *imageViewSrc, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
939 
940 				updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descSampledImg);
941 				break;
942 			}
943 			case ACCESS_TYPE_IMAGE_LOAD:
944 			{
945 				vk::VkDescriptorImageInfo	descStorageImg		= makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
946 
947 				updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
948 				break;
949 			}
950 			case ACCESS_TYPE_IMAGE_STORE:
951 			{
952 				vk::VkDescriptorImageInfo	descStorageImgSrc	= makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
953 				vk::VkDescriptorImageInfo	descStorageImgDst	= makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
954 
955 				updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgSrc);
956 				updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
957 				break;
958 			}
959 			case ACCESS_TYPE_IMAGE_ATOMICS:
960 			{
961 				vk::VkDescriptorImageInfo	descStorageImg		= makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
962 
963 				updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
964 				break;
965 			}
966 			default:
967 				DE_FATAL("Impossible");
968 				break;
969 		}
970 
971 		updateBuilder.update(vk, device);
972 	}
973 
974 	// Create vertex buffer and vertex input descriptors
975 	VertexBindings						vertexBindings;
976 	VertexAttribs						vertexAttribs;
977 	de::MovePtr<vk::BufferWithMemory>	vertexBuffer;
978 	{
979 		const float			positions[]		=
980 		{
981 			-1.0f,	-1.0f,
982 			-1.0f,	+1.0f,
983 			+1.0f,	-1.0f,
984 			+1.0f,	+1.0f,
985 		};
986 
987 		std::vector<float>	texCoord;
988 
989 		{
990 			const tcu::Vec2		minCoords		(0.0f, 0.0f);
991 			const tcu::Vec2		maxCoords		= m_params.accessType == ACCESS_TYPE_SAMPLING ?
992 												  tcu::Vec2(1.0f, 1.0f) :
993 												  tcu::Vec2((float)IMAGE_WIDTH - 0.1f, (float)IMAGE_HEIGHT - 0.1f);
994 
995 			glu::TextureTestUtil::computeQuadTexCoord2D(texCoord, minCoords, maxCoords);
996 		}
997 
998 		const deUint32		vertexPositionStrideSize	= (deUint32)sizeof(tcu::Vec2);
999 		const deUint32		vertexTextureStrideSize		= (deUint32)sizeof(tcu::Vec2);
1000 		const deUint32		positionDataSize			= 4 * vertexPositionStrideSize;
1001 		const deUint32		textureCoordDataSize		= 4 * vertexTextureStrideSize;
1002 		const deUint32		vertexBufferSize			= positionDataSize + textureCoordDataSize;
1003 
1004 		{
1005 			const vk::VkVertexInputBindingDescription	vertexInputBindingDescriptions[2]	=
1006 			{
1007 				{
1008 					0u,									// deUint32					binding;
1009 					vertexPositionStrideSize,			// deUint32					strideInBytes;
1010 					vk::VK_VERTEX_INPUT_RATE_VERTEX		// VkVertexInputStepRate	inputRate;
1011 				},
1012 				{
1013 					1u,									// deUint32					binding;
1014 					vertexTextureStrideSize,			// deUint32					strideInBytes;
1015 					vk::VK_VERTEX_INPUT_RATE_VERTEX		// VkVertexInputStepRate	inputRate;
1016 				}
1017 			};
1018 			vertexBindings.push_back(vertexInputBindingDescriptions[0]);
1019 			vertexBindings.push_back(vertexInputBindingDescriptions[1]);
1020 
1021 			const vk::VkVertexInputAttributeDescription	vertexInputAttributeDescriptions[2]	=
1022 			{
1023 				{
1024 					0u,									// deUint32	location;
1025 					0u,									// deUint32	binding;
1026 					vk::VK_FORMAT_R32G32_SFLOAT,		// VkFormat	format;
1027 					0u									// deUint32	offsetInBytes;
1028 				},
1029 				{
1030 					1u,									// deUint32	location;
1031 					1u,									// deUint32	binding;
1032 					vk::VK_FORMAT_R32G32_SFLOAT,		// VkFormat	format;
1033 					positionDataSize					// deUint32	offsetInBytes;
1034 				}
1035 			};
1036 			vertexAttribs.push_back(vertexInputAttributeDescriptions[0]);
1037 			vertexAttribs.push_back(vertexInputAttributeDescriptions[1]);
1038 		}
1039 
1040 		vertexBuffer = makeBuffer(ctx,
1041 								  PROTECTION_DISABLED,
1042 								  queueFamilyIndex,
1043 								  vertexBufferSize,
1044 								  vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
1045 								  vk::MemoryRequirement::HostVisible);
1046 
1047 		deMemcpy(vertexBuffer->getAllocation().getHostPtr(), positions, positionDataSize);
1048 		deMemcpy(reinterpret_cast<deUint8*>(vertexBuffer->getAllocation().getHostPtr()) +  positionDataSize, texCoord.data(), textureCoordDataSize);
1049 		vk::flushAlloc(vk, device, vertexBuffer->getAllocation());
1050 	}
1051 
1052 	// Create pipeline
1053 	graphicsPipeline = makeGraphicsPipeline(vk,
1054 											device,
1055 											*pipelineLayout,
1056 											*renderPass,
1057 											*vertexShader,
1058 											*fragmentShader,
1059 											vertexBindings,
1060 											vertexAttribs,
1061 											tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT),
1062 											vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
1063 											m_params.flags);
1064 
1065 	// Begin cmd buffer
1066 	beginCommandBuffer(vk, *cmdBuffer);
1067 
1068 	// Start image barrier
1069 	{
1070 		const vk::VkImageMemoryBarrier	startImgBarrier		=
1071 		{
1072 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,			// sType
1073 			DE_NULL,											// pNext
1074 			0,													// srcAccessMask
1075 			vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,			// dstAccessMask
1076 			vk::VK_IMAGE_LAYOUT_UNDEFINED,						// oldLayout
1077 			vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,		// newLayout
1078 			queueFamilyIndex,									// srcQueueFamilyIndex
1079 			queueFamilyIndex,									// dstQueueFamilyIndex
1080 			**colorImage,										// image
1081 			{
1082 				vk::VK_IMAGE_ASPECT_COLOR_BIT,					// aspectMask
1083 				0u,												// baseMipLevel
1084 				1u,												// mipLevels
1085 				0u,												// baseArraySlice
1086 				1u,												// subresourceRange
1087 			}
1088 		};
1089 
1090 		vk.cmdPipelineBarrier(*cmdBuffer,
1091 							  vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,				// srcStageMask
1092 							  vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,	// dstStageMask
1093 							  (vk::VkDependencyFlags)0,
1094 							  0, (const vk::VkMemoryBarrier*)DE_NULL,
1095 							  0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1096 							  1, &startImgBarrier);
1097 	}
1098 
1099 	beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, vk::makeRect2D(0, 0, RENDER_WIDTH, RENDER_HEIGHT), tcu::Vec4(0.0f));
1100 
1101 	vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
1102 	vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
1103 
1104 	{
1105 		const vk::VkDeviceSize vertexBufferOffset = 0;
1106 
1107 		vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer->get(), &vertexBufferOffset);
1108 		vk.cmdBindVertexBuffers(*cmdBuffer, 1u, 1u, &vertexBuffer->get(), &vertexBufferOffset);
1109 	}
1110 
1111 	vk.cmdDraw(*cmdBuffer, /*vertexCount*/ 4u, 1u, 0u, 1u);
1112 
1113 	endRenderPass(vk, *cmdBuffer);
1114 
1115 	{
1116 		const vk::VkImageMemoryBarrier	endImgBarrier		=
1117 		{
1118 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,			// sType
1119 			DE_NULL,											// pNext
1120 			vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,			// srcAccessMask
1121 			vk::VK_ACCESS_SHADER_READ_BIT,						// dstAccessMask
1122 			vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,		// oldLayout
1123 			imageLayout,										// newLayout
1124 			queueFamilyIndex,									// srcQueueFamilyIndex
1125 			queueFamilyIndex,									// dstQueueFamilyIndex
1126 			**colorImage,										// image
1127 			{
1128 				vk::VK_IMAGE_ASPECT_COLOR_BIT,					// aspectMask
1129 				0u,												// baseMipLevel
1130 				1u,												// mipLevels
1131 				0u,												// baseArraySlice
1132 				1u,												// subresourceRange
1133 			}
1134 		};
1135 		vk.cmdPipelineBarrier(*cmdBuffer,
1136 							  vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,	// srcStageMask
1137 							  vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,				// dstStageMask
1138 							  (vk::VkDependencyFlags)0,
1139 							  0, (const vk::VkMemoryBarrier*)DE_NULL,
1140 							  0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1141 							  1, &endImgBarrier);
1142 	}
1143 
1144 	endCommandBuffer(vk, *cmdBuffer);
1145 
1146 	// Submit command buffer
1147 	{
1148 		const vk::Unique<vk::VkFence>	fence		(vk::createFence(vk, device));
1149 		VK_CHECK(queueSubmit(ctx, m_params.protectionMode, queue, *cmdBuffer, *fence, ~0ull));
1150 	}
1151 
1152 	// Calculate reference image
1153 	if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
1154 		calculateAtomicRef(*texture2D);
1155 
1156 	// Validate result
1157 	{
1158 		const vk::VkImage	resultImage		= m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS	?	**imageSrc	:
1159 											  m_params.accessType == ACCESS_TYPE_IMAGE_STORE	?	**imageDst	: **colorImage;
1160 
1161 		return validateResult(resultImage, imageLayout, *texture2D, refSampler);
1162 	}
1163 }
1164 
calculateAtomicRef(tcu::Texture2D & texture2D)1165 void ImageAccessTestInstance::calculateAtomicRef (tcu::Texture2D& texture2D)
1166 {
1167 	DE_ASSERT(m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS);
1168 
1169 	const tcu::PixelBufferAccess&	reference	= texture2D.getLevel(0);
1170 
1171 	for (int x = 0; x < reference.getWidth(); ++x)
1172 	for (int y = 0; y < reference.getHeight(); ++y)
1173 	{
1174 		const deInt32	oldX		= reference.getPixelInt(x, y).x();
1175 		const deInt32	atomicArg	= x*x + y*y;
1176 		const deInt32	newX		= computeBinaryAtomicOperationResult(m_params.atomicOperation, oldX, atomicArg);
1177 
1178 		reference.setPixel(tcu::IVec4(newX, 0, 0, 0), x, y);
1179 	}
1180 }
1181 
validateResult(vk::VkImage image,vk::VkImageLayout imageLayout,const tcu::Texture2D & texture2D,const tcu::Sampler & refSampler)1182 tcu::TestStatus ImageAccessTestInstance::validateResult (vk::VkImage image, vk::VkImageLayout imageLayout, const tcu::Texture2D& texture2D, const tcu::Sampler& refSampler)
1183 {
1184 	de::Random			rnd			(getSeedValue(m_params));
1185 	ValidationData		refData;
1186 
1187 	for (int ndx = 0; ndx < 4; ++ndx)
1188 	{
1189 		const float		lod		= 0.0f;
1190 		const float		cx		= rnd.getFloat(0.0f, 1.0f);
1191 		const float		cy		= rnd.getFloat(0.0f, 1.0f);
1192 
1193 		refData.coords[ndx] = tcu::Vec4(cx, cy, 0.0f, 0.0f);
1194 		refData.values[ndx] = texture2D.sample(refSampler, cx, cy, lod);
1195 	}
1196 
1197 	if (!m_validator.validateImage(m_protectedContext, refData, image, m_params.imageFormat, imageLayout))
1198 		return tcu::TestStatus::fail("Something went really wrong");
1199 	else
1200 		return tcu::TestStatus::pass("Everything went OK");
1201 }
1202 
1203 } // anonymous
1204 
createShaderImageAccessTests(tcu::TestContext & testCtx)1205 tcu::TestCaseGroup*	createShaderImageAccessTests (tcu::TestContext& testCtx)
1206 {
1207 	de::MovePtr<tcu::TestCaseGroup> accessGroup (new tcu::TestCaseGroup(testCtx, "access"));
1208 
1209 	static const struct
1210 	{
1211 		glu::ShaderType	type;
1212 		const char*		name;
1213 	} shaderTypes[] =
1214 	{
1215 		// Image access from fragment shader
1216 		{ glu::SHADERTYPE_FRAGMENT,		"fragment"},
1217 		// Image access from compute shader
1218 		{ glu::SHADERTYPE_COMPUTE,		"compute"},
1219 	};
1220 
1221 	static const struct
1222 	{
1223 		AccessType		type;
1224 		const char*		name;
1225 	} accessTypes[] =
1226 	{
1227 		// Sampling test
1228 		{ ACCESS_TYPE_SAMPLING,			"sampling"},
1229 		// Texel fetch test
1230 		{ ACCESS_TYPE_TEXEL_FETCH,		"texelfetch"},
1231 		// Image load test
1232 		{ ACCESS_TYPE_IMAGE_LOAD,		"imageload"},
1233 		// Image store test
1234 		{ ACCESS_TYPE_IMAGE_STORE,		"imagestore"},
1235 		// Image atomics test
1236 		{ ACCESS_TYPE_IMAGE_ATOMICS,	"imageatomics"},
1237 	};
1238 
1239 	static const struct
1240 	{
1241 		vk::VkFormat	format;
1242 		const char*		name;
1243 	} formats[] =
1244 	{
1245 		{ vk::VK_FORMAT_R8G8B8A8_UNORM,	"rgba8"	},
1246 		{ vk::VK_FORMAT_R32_SINT,		"r32i"	},
1247 		{ vk::VK_FORMAT_R32_UINT,		"r32ui"	},
1248 	};
1249 
1250 	static const struct
1251 	{
1252 		bool			pipelineProtectedAccess;
1253 		const char*		name;
1254 	} protectedAccess[] =
1255 	{
1256 		{ false, "default"},
1257 #ifndef CTS_USES_VULKANSC
1258 		{ true, "protected_access"},
1259 #endif
1260 	};
1261 	static const struct
1262 	{
1263 		vk::VkPipelineCreateFlags	flags;
1264 		const char*					name;
1265 	} flags[] =
1266 	{
1267 		{ (vk::VkPipelineCreateFlagBits)0u,						"none"},
1268 #ifndef CTS_USES_VULKANSC
1269 		{ vk::VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT, "protected_access_only"},
1270 		{ vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT,	"no_protected_access"},
1271 #endif
1272 	};
1273 
1274 	for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderTypeNdx)
1275 	{
1276 		const glu::ShaderType				shaderType = shaderTypes[shaderTypeNdx].type;
1277 		de::MovePtr<tcu::TestCaseGroup>		shaderGroup(new tcu::TestCaseGroup(testCtx, shaderTypes[shaderTypeNdx].name));
1278 
1279 		for (int protectedAccessNdx = 0; protectedAccessNdx < DE_LENGTH_OF_ARRAY(protectedAccess); ++protectedAccessNdx) {
1280 			de::MovePtr<tcu::TestCaseGroup>		protectedAccessGroup(new tcu::TestCaseGroup(testCtx, protectedAccess[protectedAccessNdx].name));
1281 			for (int flagsNdx = 0; flagsNdx < DE_LENGTH_OF_ARRAY(flags); ++flagsNdx) {
1282 				de::MovePtr<tcu::TestCaseGroup>		flagsGroup(new tcu::TestCaseGroup(testCtx, flags[flagsNdx].name));
1283 				if (!protectedAccess[protectedAccessNdx].pipelineProtectedAccess && flags[flagsNdx].flags != 0u) continue;
1284 				for (int accessNdx = 0; accessNdx < DE_LENGTH_OF_ARRAY(accessTypes); ++accessNdx)
1285 				{
1286 					const AccessType					accessType = accessTypes[accessNdx].type;
1287 
1288 					if (shaderType == glu::SHADERTYPE_COMPUTE && accessType == ACCESS_TYPE_IMAGE_STORE) // \note already tested in other tests
1289 						continue;
1290 
1291 					de::MovePtr<tcu::TestCaseGroup>		accessTypeGroup(new tcu::TestCaseGroup(testCtx, accessTypes[accessNdx].name));
1292 
1293 					if (accessType == ACCESS_TYPE_IMAGE_ATOMICS)
1294 					{
1295 						for (deUint32 atomicOpI = 0; atomicOpI < ATOMIC_OPERATION_LAST; ++atomicOpI)
1296 						{
1297 							const AtomicOperation				atomicOp = (AtomicOperation)atomicOpI;
1298 							de::MovePtr<tcu::TestCaseGroup>		operationGroup(new tcu::TestCaseGroup(testCtx, getAtomicOperationCaseName(atomicOp).c_str()));
1299 
1300 							for (deUint32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
1301 							{
1302 								const vk::VkFormat		format = formats[formatNdx].format;
1303 
1304 								if (format != vk::VK_FORMAT_R32_UINT && format != vk::VK_FORMAT_R32_SINT)
1305 									continue;
1306 
1307 								operationGroup->addChild(new ImageAccessTestCase(testCtx, formats[formatNdx].name, Params(shaderType, accessType, format, atomicOp, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].flags)));
1308 							}
1309 
1310 							accessTypeGroup->addChild(operationGroup.release());
1311 						}
1312 					}
1313 					else
1314 					{
1315 						for (deUint32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
1316 						{
1317 							const vk::VkFormat		format = formats[formatNdx].format;
1318 
1319 							accessTypeGroup->addChild(new ImageAccessTestCase(testCtx, formats[formatNdx].name, Params(shaderType, accessType, format, ATOMIC_OPERATION_LAST, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].flags)));
1320 						}
1321 					}
1322 
1323 					flagsGroup->addChild(accessTypeGroup.release());
1324 				}
1325 				protectedAccessGroup->addChild(flagsGroup.release());
1326 			}
1327 			shaderGroup->addChild(protectedAccessGroup.release());
1328 		}
1329 
1330 		accessGroup->addChild(shaderGroup.release());
1331 	}
1332 
1333 #ifndef CTS_USES_VULKANSC
1334 	{
1335 		Params params(glu::SHADERTYPE_COMPUTE, ACCESS_TYPE_IMAGE_LOAD, vk::VK_FORMAT_R8G8B8A8_UNORM, ATOMIC_OPERATION_LAST, false, vk::VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT);
1336 		params.useMaintenance5 = true;
1337 		de::MovePtr<tcu::TestCaseGroup> miscGroup(new tcu::TestCaseGroup(testCtx, "misc"));
1338 		miscGroup->addChild(new ImageAccessTestCase(testCtx, "maintenance5_protected_access", params));
1339 		params.flags = vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT;
1340 		miscGroup->addChild(new ImageAccessTestCase(testCtx, "maintenance5_no_protected_access", params));
1341 		accessGroup->addChild(miscGroup.release());
1342 	}
1343 #endif // CTS_USES_VULKANSC
1344 
1345 	return accessGroup.release();
1346 }
1347 
1348 } // ProtectedMem
1349 } // vkt
1350