• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2016 The Khronos Group Inc.
6  * Copyright (c) 2016 The Android Open Source Project
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Multisampled image load/store Tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktImageMultisampleLoadStoreTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktImageTestsUtil.hpp"
28 #include "vktImageLoadStoreUtil.hpp"
29 #include "vktImageTexture.hpp"
30 
31 #include "vkDefs.hpp"
32 #include "vkRef.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkBarrierUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkCmdUtil.hpp"
42 #include "vkObjUtil.hpp"
43 #include "vkBufferWithMemory.hpp"
44 
45 #include "deUniquePtr.hpp"
46 
47 #include "tcuTextureUtil.hpp"
48 #include "tcuTestLog.hpp"
49 
50 #include <string>
51 #include <vector>
52 
53 namespace vkt
54 {
55 namespace image
56 {
57 namespace
58 {
59 using namespace vk;
60 using de::MovePtr;
61 using de::UniquePtr;
62 using tcu::IVec3;
63 
64 static const VkFormat CHECKSUM_IMAGE_FORMAT = VK_FORMAT_R32_SINT;
65 
66 struct CaseDef
67 {
68 	Texture					texture;
69 	VkFormat				format;
70 	VkSampleCountFlagBits	numSamples;
71 	bool					singleLayerBind;
72 };
73 
74 //  Multisampled storage image test.
75 //
76 //  Pass 1: Write a slightly different color pattern per-sample to the whole image.
77 //  Pass 2: Read samples of the same image and check if color values are in the expected range.
78 //          Write back results as a checksum image and verify them on the host.
79 //  Each checksum image pixel should contain an integer equal to the number of samples.
80 
initPrograms(SourceCollections & programCollection,const CaseDef caseDef)81 void initPrograms (SourceCollections& programCollection, const CaseDef caseDef)
82 {
83 	const int			dimension			= (caseDef.singleLayerBind ? caseDef.texture.layerDimension() : caseDef.texture.dimension());
84 	const std::string	texelCoordStr		= (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
85 
86 	const ImageType		usedImageType		= (caseDef.singleLayerBind ? getImageTypeForSingleLayer(caseDef.texture.type()) : caseDef.texture.type());
87 	const std::string	formatQualifierStr	= getShaderImageFormatQualifier(mapVkFormat(caseDef.format));
88 	const std::string	msImageTypeStr		= getShaderImageType(mapVkFormat(caseDef.format), usedImageType, (caseDef.texture.numSamples() > 1));
89 
90 	const std::string	xMax				= de::toString(caseDef.texture.size().x() - 1);
91 	const std::string	yMax				= de::toString(caseDef.texture.size().y() - 1);
92 	const std::string	signednessPrefix	= isUintFormat(caseDef.format) ? "u" : isIntFormat(caseDef.format) ? "i" : "";
93 	const std::string	gvec4Expr			= signednessPrefix + "vec4";
94 	const int			numColorComponents	= tcu::getNumUsedChannels(mapVkFormat(caseDef.format).order);
95 
96 	const float			storeColorScale		= computeStoreColorScale(caseDef.format, caseDef.texture.size());
97 	const float			storeColorBias		= computeStoreColorBias(caseDef.format);
98 	DE_ASSERT(colorScaleAndBiasAreValid(caseDef.format, storeColorScale, storeColorBias));
99 
100 	const std::string	colorScaleExpr		= (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
101 											+ (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
102 	const std::string	colorExpr			=
103 		gvec4Expr + "("
104 		+                           "gx^gy^gz^(sampleNdx >> 5)^(sampleNdx & 31), "		// we "split" sampleNdx to keep this value in [0, 31] range for numSamples = 64 case
105 		+ (numColorComponents > 1 ? "(" + xMax + "-gx)^gy^gz, "              : "0, ")
106 		+ (numColorComponents > 2 ? "gx^(" + yMax + "-gy)^gz, "              : "0, ")
107 		+ (numColorComponents > 3 ? "(" + xMax + "-gx)^(" + yMax + "-gy)^gz" : "1")
108 		+ ")" + colorScaleExpr;
109 
110 	// Store shader
111 	{
112 		std::ostringstream src;
113 		src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
114 			<< "\n"
115 			<< "layout(local_size_x = 1) in;\n"
116 			<< "layout(set = 0, binding = 1, " << formatQualifierStr << ") writeonly uniform " << msImageTypeStr << " u_msImage;\n";
117 
118 		if (caseDef.singleLayerBind)
119 			src << "layout(set = 0, binding = 0) readonly uniform Constants {\n"
120 				<< "    int u_layerNdx;\n"
121 				<< "};\n";
122 
123 		src << "\n"
124 			<< "void main (void)\n"
125 			<< "{\n"
126 			<< "    int gx = int(gl_GlobalInvocationID.x);\n"
127 			<< "    int gy = int(gl_GlobalInvocationID.y);\n"
128 			<< "    int gz = " << (caseDef.singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
129 			<< "\n"
130 			<< "    for (int sampleNdx = 0; sampleNdx < " << caseDef.texture.numSamples() <<"; ++sampleNdx) {\n"
131 			<< "        imageStore(u_msImage, " << texelCoordStr << ", sampleNdx, " << colorExpr << ");\n"
132 			<< "    }\n"
133 			<< "}\n";
134 
135 		programCollection.glslSources.add("comp_store") << glu::ComputeSource(src.str());
136 	}
137 
138 	// Load shader
139 	{
140 		const tcu::TextureFormat	checksumFormat			= mapVkFormat(CHECKSUM_IMAGE_FORMAT);
141 		const std::string			checksumImageTypeStr	= getShaderImageType(checksumFormat, usedImageType);
142 		const bool					useExactCompare			= isIntegerFormat(caseDef.format);
143 
144 		std::ostringstream src;
145 		src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
146 			<< "\n"
147 			<< "layout(local_size_x = 1) in;\n"
148 			<< "layout(set = 0, binding = 1, " << formatQualifierStr << ") readonly  uniform " << msImageTypeStr << " u_msImage;\n"
149 			<< "layout(set = 0, binding = 2, " << getShaderImageFormatQualifier(checksumFormat) << ") writeonly uniform " << checksumImageTypeStr << " u_checksumImage;\n";
150 
151 		if (caseDef.singleLayerBind)
152 			src << "layout(set = 0, binding = 0) readonly uniform Constants {\n"
153 				<< "    int u_layerNdx;\n"
154 				<< "};\n";
155 
156 		src << "\n"
157 			<< "void main (void)\n"
158 			<< "{\n"
159 			<< "    int gx = int(gl_GlobalInvocationID.x);\n"
160 			<< "    int gy = int(gl_GlobalInvocationID.y);\n"
161 			<< "    int gz = " << (caseDef.singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
162 			<< "\n"
163 			<< "    int checksum = 0;\n"
164 			<< "    for (int sampleNdx = 0; sampleNdx < " << caseDef.texture.numSamples() <<"; ++sampleNdx) {\n"
165 			<< "        " << gvec4Expr << " color = imageLoad(u_msImage, " << texelCoordStr << ", sampleNdx);\n";
166 
167 		if (useExactCompare)
168 			src << "        if (color == " << colorExpr << ")\n"
169 				<< "            ++checksum;\n";
170 		else
171 			src << "        " << gvec4Expr << " diff  = abs(abs(color) - abs(" << colorExpr << "));\n"
172 				<< "        if (all(lessThan(diff, " << gvec4Expr << "(0.02))))\n"
173 				<< "            ++checksum;\n";
174 
175 		src << "    }\n"
176 			<< "\n"
177 			<< "    imageStore(u_checksumImage, " << texelCoordStr << ", ivec4(checksum));\n"
178 			<< "}\n";
179 
180 		programCollection.glslSources.add("comp_load") << glu::ComputeSource(src.str());
181 	}
182 }
183 
checkSupport(Context & context,const CaseDef caseDef)184 void checkSupport (Context& context, const CaseDef caseDef)
185 {
186 	context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SHADER_STORAGE_IMAGE_MULTISAMPLE);
187 
188 	VkImageFormatProperties		imageFormatProperties;
189 	const VkResult				imageFormatResult		= context.getInstanceInterface().getPhysicalDeviceImageFormatProperties(
190 		context.getPhysicalDevice(), caseDef.format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_STORAGE_BIT, (VkImageCreateFlags)0, &imageFormatProperties);
191 
192 	if (imageFormatResult == VK_ERROR_FORMAT_NOT_SUPPORTED)
193 		TCU_THROW(NotSupportedError, "Format is not supported");
194 
195 	if ((imageFormatProperties.sampleCounts & caseDef.numSamples) != caseDef.numSamples)
196 		TCU_THROW(NotSupportedError, "Requested sample count is not supported");
197 }
198 
199 //! Helper function to deal with per-layer resources.
insertImageViews(const DeviceInterface & vk,const VkDevice device,const CaseDef & caseDef,const VkFormat format,const VkImage image,std::vector<SharedVkImageView> * const pOutImageViews)200 void insertImageViews (const DeviceInterface& vk, const VkDevice device, const CaseDef& caseDef, const VkFormat format, const VkImage image, std::vector<SharedVkImageView>* const pOutImageViews)
201 {
202 	if (caseDef.singleLayerBind)
203 	{
204 		pOutImageViews->clear();
205 		pOutImageViews->resize(caseDef.texture.numLayers());
206 		for (int layerNdx = 0; layerNdx < caseDef.texture.numLayers(); ++layerNdx)
207 		{
208 			(*pOutImageViews)[layerNdx] = makeVkSharedPtr(makeImageView(
209 				vk, device, image, mapImageViewType(getImageTypeForSingleLayer(caseDef.texture.type())), format,
210 				makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
211 		}
212 	}
213 	else // bind all layers at once
214 	{
215 		pOutImageViews->clear();
216 		pOutImageViews->resize(1);
217 		(*pOutImageViews)[0] = makeVkSharedPtr(makeImageView(
218 			vk, device, image, mapImageViewType(caseDef.texture.type()), format,
219 			makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, caseDef.texture.numLayers())));
220 	}
221 }
222 
223 //! Helper function to deal with per-layer resources.
insertDescriptorSets(const DeviceInterface & vk,const VkDevice device,const CaseDef & caseDef,const VkDescriptorPool descriptorPool,const VkDescriptorSetLayout descriptorSetLayout,std::vector<SharedVkDescriptorSet> * const pOutDescriptorSets)224 void insertDescriptorSets (const DeviceInterface& vk, const VkDevice device, const CaseDef& caseDef, const VkDescriptorPool descriptorPool, const VkDescriptorSetLayout descriptorSetLayout, std::vector<SharedVkDescriptorSet>* const pOutDescriptorSets)
225 {
226 	if (caseDef.singleLayerBind)
227 	{
228 		pOutDescriptorSets->clear();
229 		pOutDescriptorSets->resize(caseDef.texture.numLayers());
230 		for (int layerNdx = 0; layerNdx < caseDef.texture.numLayers(); ++layerNdx)
231 			(*pOutDescriptorSets)[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, descriptorPool, descriptorSetLayout));
232 	}
233 	else // bind all layers at once
234 	{
235 		pOutDescriptorSets->clear();
236 		pOutDescriptorSets->resize(1);
237 		(*pOutDescriptorSets)[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, descriptorPool, descriptorSetLayout));
238 	}
239 }
240 
test(Context & context,const CaseDef caseDef)241 tcu::TestStatus test (Context& context, const CaseDef caseDef)
242 {
243 	const InstanceInterface&	vki					= context.getInstanceInterface();
244 	const VkPhysicalDevice		physDevice			= context.getPhysicalDevice();
245 	const DeviceInterface&		vk					= context.getDeviceInterface();
246 	const VkDevice				device				= context.getDevice();
247 	const VkQueue				queue				= context.getUniversalQueue();
248 	const deUint32				queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
249 	Allocator&					allocator			= context.getDefaultAllocator();
250 
251 	// Images
252 
253 	const UniquePtr<Image> msImage(new Image(
254 		vk, device, allocator, makeImageCreateInfo(caseDef.texture, caseDef.format, VK_IMAGE_USAGE_STORAGE_BIT, 0u), MemoryRequirement::Any));
255 
256 	const UniquePtr<Image> checksumImage(new Image(
257 		vk, device, allocator,
258 		makeImageCreateInfo(Texture(caseDef.texture, 1), CHECKSUM_IMAGE_FORMAT, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
259 		MemoryRequirement::Any));
260 
261 	// Buffer used to pass constants to the shader.
262 
263 	const int					numLayers					= caseDef.texture.numLayers();
264 	const VkDeviceSize			bufferChunkSize				= getOptimalUniformBufferChunkSize(vki, physDevice, sizeof(deInt32));
265 	const VkDeviceSize			constantsBufferSizeBytes	= numLayers * bufferChunkSize;
266 	UniquePtr<BufferWithMemory>	constantsBuffer				(new BufferWithMemory(vk, device, allocator, makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
267 															 MemoryRequirement::HostVisible));
268 
269 	{
270 		const Allocation&	alloc	= constantsBuffer->getAllocation();
271 		deUint8* const		basePtr = static_cast<deUint8*>(alloc.getHostPtr());
272 
273 		deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
274 
275 		for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
276 		{
277 			deInt32* const valuePtr = reinterpret_cast<deInt32*>(basePtr + layerNdx * bufferChunkSize);
278 			*valuePtr = layerNdx;
279 		}
280 
281 		flushAlloc(vk, device, alloc);
282 	}
283 
284 	const VkDeviceSize			resultBufferSizeBytes	= getImageSizeBytes(caseDef.texture.size(), CHECKSUM_IMAGE_FORMAT);
285 	UniquePtr<BufferWithMemory>	resultBuffer			(new BufferWithMemory(vk, device, allocator, makeBufferCreateInfo(resultBufferSizeBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT),
286 														 MemoryRequirement::HostVisible));
287 
288 	{
289 		const Allocation& alloc = resultBuffer->getAllocation();
290 		deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(resultBufferSizeBytes));
291 		flushAlloc(vk, device, alloc);
292 	}
293 
294 	// Descriptors
295 
296 	Unique<VkDescriptorSetLayout> descriptorSetLayout(DescriptorSetLayoutBuilder()
297 		.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
298 		.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
299 		.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
300 		.build(vk, device));
301 
302 	Unique<VkDescriptorPool> descriptorPool(DescriptorPoolBuilder()
303 		.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
304 		.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
305 		.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
306 		.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers));
307 
308 	std::vector<SharedVkDescriptorSet>	allDescriptorSets;
309 	std::vector<SharedVkImageView>		allMultisampledImageViews;
310 	std::vector<SharedVkImageView>		allChecksumImageViews;
311 
312 	insertDescriptorSets(vk, device, caseDef, *descriptorPool, *descriptorSetLayout, &allDescriptorSets);
313 	insertImageViews	(vk, device, caseDef, caseDef.format, **msImage, &allMultisampledImageViews);
314 	insertImageViews	(vk, device, caseDef, CHECKSUM_IMAGE_FORMAT, **checksumImage, &allChecksumImageViews);
315 
316 	// Prepare commands
317 
318 	const Unique<VkPipelineLayout>	pipelineLayout	(makePipelineLayout(vk, device, *descriptorSetLayout));
319 	const Unique<VkCommandPool>		cmdPool			(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
320 	const Unique<VkCommandBuffer>	cmdBuffer		(allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
321 
322 	const tcu::IVec3				workSize				= (caseDef.singleLayerBind ? caseDef.texture.layerSize() : caseDef.texture.size());
323 	const int						loopNumLayers			= (caseDef.singleLayerBind ? numLayers : 1);
324 	const VkImageSubresourceRange	subresourceAllLayers	= makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, caseDef.texture.numLayers());
325 
326 	// Pass 1: Write MS image
327 	{
328 		const Unique<VkShaderModule>	shaderModule	(createShaderModule	(vk, device, context.getBinaryCollection().get("comp_store"), 0));
329 		const Unique<VkPipeline>		pipeline		(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
330 
331 		beginCommandBuffer(vk, *cmdBuffer);
332 		vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
333 
334 		{
335 			const VkImageMemoryBarrier barriers[] =
336 			{
337 				makeImageMemoryBarrier((VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, **msImage, subresourceAllLayers),
338 				makeImageMemoryBarrier((VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, **checksumImage, subresourceAllLayers),
339 			};
340 
341 			vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0,
342 				0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
343 		}
344 
345 		for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
346 		{
347 			const VkDescriptorSet			descriptorSet					= **allDescriptorSets[layerNdx];
348 			const VkDescriptorImageInfo		descriptorMultiImageInfo		= makeDescriptorImageInfo(DE_NULL, **allMultisampledImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
349 			const VkDescriptorBufferInfo	descriptorConstantsBufferInfo	= makeDescriptorBufferInfo(constantsBuffer->get(), layerNdx*bufferChunkSize, bufferChunkSize);
350 
351 			DescriptorSetUpdateBuilder()
352 				.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
353 				.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorMultiImageInfo)
354 				.update(vk, device);
355 
356 			vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
357 			vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
358 		}
359 
360 		endCommandBuffer(vk, *cmdBuffer);
361 		submitCommandsAndWait(vk, device, queue, *cmdBuffer);
362 		context.resetCommandPoolForVKSC(device, *cmdPool);
363 	}
364 
365 	// Pass 2: "Resolve" MS image in compute shader
366 	{
367 		const Unique<VkShaderModule>	shaderModule	(createShaderModule	(vk, device, context.getBinaryCollection().get("comp_load"), 0));
368 		const Unique<VkPipeline>		pipeline		(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
369 
370 		beginCommandBuffer(vk, *cmdBuffer);
371 		vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
372 
373 		{
374 			const VkImageMemoryBarrier barriers[] =
375 			{
376 				makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL, **msImage, subresourceAllLayers),
377 			};
378 
379 			vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0,
380 				0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
381 		}
382 
383 		for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
384 		{
385 			const VkDescriptorSet			descriptorSet					= **allDescriptorSets[layerNdx];
386 			const VkDescriptorImageInfo		descriptorMultiImageInfo		= makeDescriptorImageInfo(DE_NULL, **allMultisampledImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
387 			const VkDescriptorImageInfo		descriptorChecksumImageInfo		= makeDescriptorImageInfo(DE_NULL, **allChecksumImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
388 			const VkDescriptorBufferInfo	descriptorConstantsBufferInfo	= makeDescriptorBufferInfo(constantsBuffer->get(), layerNdx*bufferChunkSize, bufferChunkSize);
389 
390 			DescriptorSetUpdateBuilder()
391 				.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
392 				.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorMultiImageInfo)
393 				.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(2u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorChecksumImageInfo)
394 				.update(vk, device);
395 
396 			vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
397 			vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
398 		}
399 
400 		endCommandBuffer(vk, *cmdBuffer);
401 		submitCommandsAndWait(vk, device, queue, *cmdBuffer);
402 		context.resetCommandPoolForVKSC(device, *cmdPool);
403 	}
404 
405 	// Retrieve result
406 	{
407 		beginCommandBuffer(vk, *cmdBuffer);
408 
409 		{
410 			const VkImageMemoryBarrier barriers[] =
411 			{
412 				makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **checksumImage, subresourceAllLayers),
413 			};
414 			vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
415 				0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
416 		}
417 		{
418 			const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(caseDef.texture.layerSize()), caseDef.texture.numLayers());
419 			vk.cmdCopyImageToBuffer(*cmdBuffer, **checksumImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **resultBuffer, 1u, &copyRegion);
420 		}
421 		{
422 			const VkBufferMemoryBarrier barriers[] =
423 			{
424 				makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **resultBuffer, 0ull, resultBufferSizeBytes),
425 			};
426 			vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0,
427 				0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers, 0u, DE_NULL);
428 		}
429 
430 		endCommandBuffer(vk, *cmdBuffer);
431 		submitCommandsAndWait(vk, device, queue, *cmdBuffer);
432 	}
433 
434 	// Verify
435 	{
436 		const Allocation& alloc = resultBuffer->getAllocation();
437 		invalidateAlloc(vk, device, alloc);
438 
439 		const IVec3		imageSize			= caseDef.texture.size();
440 		const deInt32*	pDataPtr			= static_cast<deInt32*>(alloc.getHostPtr());
441 		const deInt32	expectedChecksum	= caseDef.texture.numSamples();
442 
443 		for (int layer = 0; layer < imageSize.z(); ++layer)
444 		for (int y = 0; y < imageSize.y(); ++y)
445 		for (int x = 0; x < imageSize.x(); ++x)
446 		{
447 			if (*pDataPtr != expectedChecksum)
448 			{
449 				context.getTestContext().getLog()
450 					<< tcu::TestLog::Message << "Some sample colors were incorrect at (x, y, layer) = (" << x << ", " << y << ", " << layer << ")"	<< tcu::TestLog::EndMessage
451 					<< tcu::TestLog::Message << "Checksum value is " << *pDataPtr << " but expected " << expectedChecksum << tcu::TestLog::EndMessage;
452 
453 				return tcu::TestStatus::fail("Some sample colors were incorrect");
454 			}
455 			++pDataPtr;
456 		}
457 
458 		return tcu::TestStatus::pass("OK");
459 	}
460 }
461 
462 } // anonymous ns
463 
createImageMultisampleLoadStoreTests(tcu::TestContext & testCtx)464 tcu::TestCaseGroup* createImageMultisampleLoadStoreTests (tcu::TestContext& testCtx)
465 {
466 	const Texture textures[] =
467 	{
468 		// \note Shader code is tweaked to work with image size of 32, take a look if this needs to be modified.
469 		Texture(IMAGE_TYPE_2D,			tcu::IVec3(32,	32,	1),		1),
470 		Texture(IMAGE_TYPE_2D_ARRAY,	tcu::IVec3(32,	32,	1),		4),
471 	};
472 
473 	static const VkFormat formats[] =
474 	{
475 		VK_FORMAT_R32G32B32A32_SFLOAT,
476 		VK_FORMAT_R16G16B16A16_SFLOAT,
477 		VK_FORMAT_R32_SFLOAT,
478 
479 		VK_FORMAT_R32G32B32A32_UINT,
480 		VK_FORMAT_R16G16B16A16_UINT,
481 		VK_FORMAT_R8G8B8A8_UINT,
482 		VK_FORMAT_R32_UINT,
483 
484 		VK_FORMAT_R32G32B32A32_SINT,
485 		VK_FORMAT_R16G16B16A16_SINT,
486 		VK_FORMAT_R8G8B8A8_SINT,
487 		VK_FORMAT_R32_SINT,
488 
489 		VK_FORMAT_R8G8B8A8_UNORM,
490 
491 		VK_FORMAT_R8G8B8A8_SNORM,
492 	};
493 
494 	static const VkSampleCountFlagBits samples[] =
495 	{
496 		VK_SAMPLE_COUNT_2_BIT,
497 		VK_SAMPLE_COUNT_4_BIT,
498 		VK_SAMPLE_COUNT_8_BIT,
499 		VK_SAMPLE_COUNT_16_BIT,
500 		VK_SAMPLE_COUNT_32_BIT,
501 		VK_SAMPLE_COUNT_64_BIT,
502 	};
503 
504 	MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store_multisample", "Multisampled image store and load"));
505 
506 	for (int baseTextureNdx = 0; baseTextureNdx < DE_LENGTH_OF_ARRAY(textures); ++baseTextureNdx)
507 	{
508 		const Texture&				baseTexture			= textures[baseTextureNdx];
509 		MovePtr<tcu::TestCaseGroup>	imageViewGroup		(new tcu::TestCaseGroup(testCtx, getImageTypeName(baseTexture.type()).c_str(), ""));
510 		const int					numLayerBindModes	= (baseTexture.numLayers() == 1 ? 1 : 2);
511 
512 		for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); ++formatNdx)
513 		for (int layerBindMode = 0; layerBindMode < numLayerBindModes; ++layerBindMode)
514 		{
515 			const bool					singleLayerBind	= (layerBindMode != 0);
516 			const std::string			formatGroupName	= getFormatShortString(formats[formatNdx]) + (singleLayerBind ? "_single_layer" : "");
517 			MovePtr<tcu::TestCaseGroup>	formatGroup		(new tcu::TestCaseGroup(testCtx, formatGroupName.c_str(), ""));
518 
519 			for (int samplesNdx = 0; samplesNdx < DE_LENGTH_OF_ARRAY(samples); ++samplesNdx)
520 			{
521 				const std::string	samplesCaseName = "samples_" + de::toString(samples[samplesNdx]);
522 
523 				const CaseDef		caseDef =
524 				{
525 					Texture(baseTexture, samples[samplesNdx]),
526 					formats[formatNdx],
527 					samples[samplesNdx],
528 					singleLayerBind,
529 				};
530 
531 				addFunctionCaseWithPrograms(formatGroup.get(), samplesCaseName, "", checkSupport, initPrograms, test, caseDef);
532 			}
533 			imageViewGroup->addChild(formatGroup.release());
534 		}
535 		testGroup->addChild(imageViewGroup.release());
536 	}
537 
538 	return testGroup.release();
539 }
540 
541 } // image
542 } // vkt
543