• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2016 The Khronos Group Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*
20  * \file  vktSparseResourcesShaderIntrinsicsBase.cpp
21  * \brief Sparse Resources Shader Intrinsics Base Classes
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktSparseResourcesShaderIntrinsicsBase.hpp"
25 #include "vkCmdUtil.hpp"
26 #include "vkBarrierUtil.hpp"
27 
28 using namespace vk;
29 
30 namespace vkt
31 {
32 namespace sparse
33 {
34 
getOpTypeImageComponent(const tcu::TextureFormat & format)35 std::string getOpTypeImageComponent (const tcu::TextureFormat& format)
36 {
37 	switch (tcu::getTextureChannelClass(format.type))
38 	{
39 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
40 			return "OpTypeInt 32 0";
41 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
42 			return "OpTypeInt 32 1";
43 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
44 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
45 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
46 			return "OpTypeFloat 32";
47 		default:
48 			DE_FATAL("Unexpected channel type");
49 			return "";
50 	}
51 }
52 
getOpTypeImageComponent(const vk::PlanarFormatDescription & description)53 std::string getOpTypeImageComponent (const vk::PlanarFormatDescription& description)
54 {
55 	switch (description.channels[0].type)
56 	{
57 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
58 			return "OpTypeInt 32 0";
59 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
60 			return "OpTypeInt 32 1";
61 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
62 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
63 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
64 			return "OpTypeFloat 32";
65 		default:
66 			DE_FATAL("Unexpected channel type");
67 			return "";
68 	}
69 }
70 
getImageComponentTypeName(const tcu::TextureFormat & format)71 std::string getImageComponentTypeName (const tcu::TextureFormat& format)
72 {
73 	switch (tcu::getTextureChannelClass(format.type))
74 	{
75 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
76 			return "%type_uint";
77 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
78 			return "%type_int";
79 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
80 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
81 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
82 			return "%type_float";
83 		default:
84 			DE_FATAL("Unexpected channel type");
85 			return "";
86 	}
87 }
88 
getImageComponentTypeName(const vk::PlanarFormatDescription & description)89 std::string getImageComponentTypeName (const vk::PlanarFormatDescription& description)
90 {
91 	switch (description.channels[0].type)
92 	{
93 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
94 			return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_uint64" : "%type_uint");
95 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
96 			return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_int64" : "%type_int");
97 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
98 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
99 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
100 			return "%type_float";
101 		default:
102 			DE_FATAL("Unexpected channel type");
103 			return "";
104 	}
105 }
106 
getImageComponentVec4TypeName(const tcu::TextureFormat & format)107 std::string getImageComponentVec4TypeName (const tcu::TextureFormat& format)
108 {
109 	switch (tcu::getTextureChannelClass(format.type))
110 	{
111 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
112 			return "%type_uvec4";
113 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
114 			return "%type_ivec4";
115 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
116 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
117 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
118 			return "%type_vec4";
119 		default:
120 			DE_FATAL("Unexpected channel type");
121 			return "";
122 	}
123 }
124 
getImageComponentVec4TypeName(const vk::PlanarFormatDescription & description)125 std::string getImageComponentVec4TypeName (const vk::PlanarFormatDescription& description)
126 {
127 
128 	switch (description.channels[0].type)
129 	{
130 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
131 			return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_u64vec4" : "%type_uvec4");
132 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
133 			return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_i64vec4" : "%type_ivec4");
134 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
135 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
136 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
137 			return "%type_vec4";
138 		default:
139 			DE_FATAL("Unexpected channel type");
140 			return "";
141 	}
142 }
143 
getOpTypeImageSparse(const ImageType imageType,const tcu::TextureFormat & format,const std::string & componentType,const bool requiresSampler)144 std::string getOpTypeImageSparse (const ImageType			imageType,
145 								  const tcu::TextureFormat&	format,
146 								  const std::string&		componentType,
147 								  const bool				requiresSampler)
148 {
149 	std::ostringstream	src;
150 
151 	src << "OpTypeImage " << componentType << " ";
152 
153 	switch (imageType)
154 	{
155 		case IMAGE_TYPE_1D :
156 			src << "1D 0 0 0 ";
157 		break;
158 		case IMAGE_TYPE_1D_ARRAY :
159 			src << "1D 0 1 0 ";
160 		break;
161 		case IMAGE_TYPE_2D :
162 			src << "2D 0 0 0 ";
163 		break;
164 		case IMAGE_TYPE_2D_ARRAY :
165 			src << "2D 0 1 0 ";
166 		break;
167 		case IMAGE_TYPE_3D :
168 			src << "3D 0 0 0 ";
169 		break;
170 		case IMAGE_TYPE_CUBE :
171 			src << "Cube 0 0 0 ";
172 		break;
173 		case IMAGE_TYPE_CUBE_ARRAY :
174 			src << "Cube 0 1 0 ";
175 		break;
176 		default :
177 			DE_FATAL("Unexpected image type");
178 		break;
179 	}
180 
181 	if (requiresSampler)
182 		src << "1 ";
183 	else
184 		src << "2 ";
185 
186 	switch (format.order)
187 	{
188 		case tcu::TextureFormat::R:
189 			src << "R";
190 		break;
191 		case tcu::TextureFormat::RG:
192 			src << "Rg";
193 			break;
194 		case tcu::TextureFormat::RGB:
195 			src << "Rgb";
196 			break;
197 		case tcu::TextureFormat::RGBA:
198 			src << "Rgba";
199 		break;
200 		default:
201 			DE_FATAL("Unexpected channel order");
202 		break;
203 	}
204 
205 	switch (format.type)
206 	{
207 		case tcu::TextureFormat::SIGNED_INT8:
208 			src << "8i";
209 		break;
210 		case tcu::TextureFormat::SIGNED_INT16:
211 			src << "16i";
212 		break;
213 		case tcu::TextureFormat::SIGNED_INT32:
214 			src << "32i";
215 		break;
216 		case tcu::TextureFormat::UNSIGNED_INT8:
217 			src << "8ui";
218 		break;
219 		case tcu::TextureFormat::UNSIGNED_INT16:
220 			src << "16ui";
221 		break;
222 		case tcu::TextureFormat::UNSIGNED_INT32:
223 			src << "32ui";
224 		break;
225 		case tcu::TextureFormat::SNORM_INT8:
226 			src << "8Snorm";
227 		break;
228 		case tcu::TextureFormat::SNORM_INT16:
229 			src << "16Snorm";
230 		break;
231 		case tcu::TextureFormat::SNORM_INT32:
232 			src << "32Snorm";
233 		break;
234 		case tcu::TextureFormat::UNORM_INT8:
235 			src << "8";
236 		break;
237 		case tcu::TextureFormat::UNORM_INT16:
238 			src << "16";
239 		break;
240 		case tcu::TextureFormat::UNORM_INT32:
241 			src << "32";
242 		break;
243 		default:
244 			DE_FATAL("Unexpected channel type");
245 		break;
246 	}
247 
248 	return src.str();
249 }
250 
getOpTypeImageSparse(const ImageType imageType,const VkFormat format,const std::string & componentType,const bool requiresSampler)251 std::string getOpTypeImageSparse (const ImageType		imageType,
252 								  const VkFormat		format,
253 								  const std::string&	componentType,
254 								  const bool			requiresSampler)
255 {
256 	std::ostringstream	src;
257 
258 	src << "OpTypeImage " << componentType << " ";
259 
260 	switch (imageType)
261 	{
262 		case IMAGE_TYPE_1D :
263 			src << "1D 0 0 0 ";
264 		break;
265 		case IMAGE_TYPE_1D_ARRAY :
266 			src << "1D 0 1 0 ";
267 		break;
268 		case IMAGE_TYPE_2D :
269 			src << "2D 0 0 0 ";
270 		break;
271 		case IMAGE_TYPE_2D_ARRAY :
272 			src << "2D 0 1 0 ";
273 		break;
274 		case IMAGE_TYPE_3D :
275 			src << "3D 0 0 0 ";
276 		break;
277 		case IMAGE_TYPE_CUBE :
278 			src << "Cube 0 0 0 ";
279 		break;
280 		case IMAGE_TYPE_CUBE_ARRAY :
281 			src << "Cube 0 1 0 ";
282 		break;
283 		default :
284 			DE_FATAL("Unexpected image type");
285 		break;
286 	}
287 
288 	if (requiresSampler)
289 		src << "1 ";
290 	else
291 		src << "2 ";
292 
293 	switch (format)
294 	{
295 		case VK_FORMAT_R8_SINT:										src <<	"R8i";			break;
296 		case VK_FORMAT_R16_SINT:									src <<	"R16i";			break;
297 		case VK_FORMAT_R32_SINT:									src <<	"R32i";			break;
298 		case VK_FORMAT_R64_SINT:									src <<	"R64i";			break;
299 		case VK_FORMAT_R8_UINT:										src <<	"R8ui";			break;
300 		case VK_FORMAT_R16_UINT:									src <<	"R16ui";		break;
301 		case VK_FORMAT_R32_UINT:									src <<	"R32ui";		break;
302 		case VK_FORMAT_R64_UINT:									src <<	"R64ui";		break;
303 		case VK_FORMAT_R8_SNORM:									src <<	"R8Snorm";		break;
304 		case VK_FORMAT_R16_SNORM:									src <<	"R16Snorm";		break;
305 		case VK_FORMAT_R8_UNORM:									src <<	"R8";			break;
306 		case VK_FORMAT_R16_UNORM:									src <<	"R16";			break;
307 
308 		case VK_FORMAT_R8G8_SINT:									src <<	"Rg8i";			break;
309 		case VK_FORMAT_R16G16_SINT:									src <<	"Rg16i";		break;
310 		case VK_FORMAT_R32G32_SINT:									src <<	"Rg32i";		break;
311 		case VK_FORMAT_R8G8_UINT:									src <<	"Rg8ui";		break;
312 		case VK_FORMAT_R16G16_UINT:									src <<	"Rg16ui";		break;
313 		case VK_FORMAT_R32G32_UINT:									src <<	"Rg32ui";		break;
314 		case VK_FORMAT_R8G8_SNORM:									src <<	"Rg8Snorm";		break;
315 		case VK_FORMAT_R16G16_SNORM:								src <<	"Rg16Snorm";	break;
316 		case VK_FORMAT_R8G8_UNORM:									src <<	"Rg8";			break;
317 		case VK_FORMAT_R16G16_UNORM:								src <<	"Rg16";			break;
318 
319 		case VK_FORMAT_R8G8B8A8_SINT:								src <<	"Rgba8i";		break;
320 		case VK_FORMAT_R16G16B16A16_SINT:							src <<	"Rgba16i";		break;
321 		case VK_FORMAT_R32G32B32A32_SINT:							src <<	"Rgba32i";		break;
322 		case VK_FORMAT_R8G8B8A8_UINT:								src <<	"Rgba8ui";		break;
323 		case VK_FORMAT_R16G16B16A16_UINT:							src <<	"Rgba16ui";		break;
324 		case VK_FORMAT_R32G32B32A32_UINT:							src <<	"Rgba32ui";		break;
325 		case VK_FORMAT_R8G8B8A8_SNORM:								src <<	"Rgba8Snorm";	break;
326 		case VK_FORMAT_R16G16B16A16_SNORM:							src <<	"Rgba16Snorm";	break;
327 		case VK_FORMAT_R8G8B8A8_UNORM:								src <<	"Rgba8";		break;
328 		case VK_FORMAT_R16G16B16A16_UNORM:							src <<	"Rgba16";		break;
329 
330 		case VK_FORMAT_G8B8G8R8_422_UNORM:							src <<	"Rgba8";		break;
331 		case VK_FORMAT_B8G8R8G8_422_UNORM:							src <<	"Rgba8";		break;
332 		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:					src <<	"Rgba8";		break;
333 		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:					src <<	"Rgba8";		break;
334 		case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:					src <<	"Rgba8";		break;
335 		case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:					src <<	"Rgba8";		break;
336 		case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:					src <<	"Rgba8";		break;
337 		case VK_FORMAT_R10X6_UNORM_PACK16:							src <<	"R16";			break;
338 		case VK_FORMAT_R10X6G10X6_UNORM_2PACK16:					src <<	"Rg16";			break;
339 		case VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16:			src <<	"Rgba16";		break;
340 		case VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16:		src <<	"Rgba16";		break;
341 		case VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16:		src <<	"Rgba16";		break;
342 		case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:	src <<	"Rgba16";		break;
343 		case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:	src <<	"Rgba16";		break;
344 		case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:	src <<	"Rgba16";		break;
345 		case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:	src <<	"Rgba16";		break;
346 		case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:	src <<	"Rgba16";		break;
347 		case VK_FORMAT_R12X4_UNORM_PACK16:							src <<	"R16";			break;
348 		case VK_FORMAT_R12X4G12X4_UNORM_2PACK16:					src <<	"Rg16";			break;
349 		case VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16:			src <<	"Rgba16";		break;
350 		case VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16:		src <<	"Rgba16";		break;
351 		case VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16:		src <<	"Rgba16";		break;
352 		case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:	src <<	"Rgba16";		break;
353 		case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:	src <<	"Rgba16";		break;
354 		case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:	src <<	"Rgba16";		break;
355 		case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:	src <<	"Rgba16";		break;
356 		case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:	src <<	"Rgba16";		break;
357 		case VK_FORMAT_G16B16G16R16_422_UNORM:						src <<	"Rgba16";		break;
358 		case VK_FORMAT_B16G16R16G16_422_UNORM:						src <<	"Rgba16";		break;
359 		case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:				src <<	"Rgba16";		break;
360 		case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:					src <<	"Rgba16";		break;
361 		case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:				src <<	"Rgba16";		break;
362 		case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:					src <<	"Rgba16";		break;
363 		case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:				src <<	"Rgba16";		break;
364 		case VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT:				src <<	"Rgba8";		break;
365 		case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT:src <<	"Rgba16";		break;
366 		case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT:src <<	"Rgba16";		break;
367 		case VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT:				src <<	"Rgba16";		break;
368 
369 		default:
370 			DE_FATAL("Unexpected texture format");
371 			break;
372 	}
373 	return src.str();
374 }
375 
376 
getOpTypeImageResidency(const ImageType imageType)377 std::string getOpTypeImageResidency (const ImageType imageType)
378 {
379 	std::ostringstream	src;
380 
381 	src << "OpTypeImage %type_uint ";
382 
383 	switch (imageType)
384 	{
385 		case IMAGE_TYPE_1D :
386 			src << "1D 0 0 0 2 R32ui";
387 		break;
388 		case IMAGE_TYPE_1D_ARRAY :
389 			src << "1D 0 1 0 2 R32ui";
390 		break;
391 		case IMAGE_TYPE_2D :
392 			src << "2D 0 0 0 2 R32ui";
393 		break;
394 		case IMAGE_TYPE_2D_ARRAY :
395 			src << "2D 0 1 0 2 R32ui";
396 		break;
397 		case IMAGE_TYPE_3D :
398 			src << "3D 0 0 0 2 R32ui";
399 		break;
400 		case IMAGE_TYPE_CUBE :
401 			src << "Cube 0 0 0 2 R32ui";
402 		break;
403 		case IMAGE_TYPE_CUBE_ARRAY :
404 			src << "Cube 0 1 0 2 R32ui";
405 		break;
406 		default :
407 			DE_FATAL("Unexpected image type");
408 		break;
409 	}
410 
411 	return src.str();
412 }
413 
checkSupport(VkImageCreateInfo imageSparseInfo) const414 void SparseShaderIntrinsicsInstanceBase::checkSupport(VkImageCreateInfo imageSparseInfo) const
415 {
416 	const InstanceInterface&			instance				= m_context.getInstanceInterface();
417 	const VkPhysicalDevice				physicalDevice			= m_context.getPhysicalDevice();
418 
419 	if (formatIsR64(m_format))
420 	{
421 		m_context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
422 
423 		if (m_context.getShaderImageAtomicInt64FeaturesEXT().shaderImageInt64Atomics == VK_FALSE)
424 		{
425 			TCU_THROW(NotSupportedError, "shaderImageInt64Atomics is not supported");
426 		}
427 
428 		if (m_context.getShaderImageAtomicInt64FeaturesEXT().sparseImageInt64Atomics == VK_FALSE)
429 		{
430 			TCU_THROW(NotSupportedError, "sparseImageInt64Atomics is not supported for device");
431 		}
432 	}
433 
434 	// Check if device supports sparse operations for image format
435 	if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo))
436 		TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
437 
438 }
439 
iterate(void)440 tcu::TestStatus SparseShaderIntrinsicsInstanceBase::iterate (void)
441 {
442 	const InstanceInterface&			instance				= m_context.getInstanceInterface();
443 	const VkPhysicalDevice				physicalDevice			= m_context.getPhysicalDevice();
444 	VkImageCreateInfo					imageSparseInfo;
445 	VkImageCreateInfo					imageTexelsInfo;
446 	VkImageCreateInfo					imageResidencyInfo;
447 	std::vector <deUint32>				residencyReferenceData;
448 	std::vector<DeviceMemorySp>			deviceMemUniquePtrVec;
449 	const PlanarFormatDescription		formatDescription		= getPlanarFormatDescription(m_format);
450 
451 	imageSparseInfo.sType					= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
452 	imageSparseInfo.pNext					= DE_NULL;
453 	imageSparseInfo.flags					= VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
454 	imageSparseInfo.imageType				= mapImageType(m_imageType);
455 	imageSparseInfo.format					= m_format;
456 	imageSparseInfo.extent					= makeExtent3D(getLayerSize(m_imageType, m_imageSize));
457 	imageSparseInfo.arrayLayers				= getNumLayers(m_imageType, m_imageSize);
458 	imageSparseInfo.samples					= VK_SAMPLE_COUNT_1_BIT;
459 	imageSparseInfo.tiling					= VK_IMAGE_TILING_OPTIMAL;
460 	imageSparseInfo.initialLayout			= VK_IMAGE_LAYOUT_UNDEFINED;
461 	imageSparseInfo.usage					= VK_IMAGE_USAGE_TRANSFER_DST_BIT | imageSparseUsageFlags();
462 	imageSparseInfo.sharingMode				= VK_SHARING_MODE_EXCLUSIVE;
463 	imageSparseInfo.queueFamilyIndexCount	= 0u;
464 	imageSparseInfo.pQueueFamilyIndices		= DE_NULL;
465 
466 	if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
467 	{
468 		imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
469 	}
470 
471 	checkSupport(imageSparseInfo);
472 
473 	{
474 		// Assign maximum allowed mipmap levels to image
475 		VkImageFormatProperties imageFormatProperties;
476 		if (instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
477 			imageSparseInfo.format,
478 			imageSparseInfo.imageType,
479 			imageSparseInfo.tiling,
480 			imageSparseInfo.usage,
481 			imageSparseInfo.flags,
482 			&imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
483 		{
484 			TCU_THROW(NotSupportedError, "Image format does not support sparse operations");
485 		}
486 
487 		imageSparseInfo.mipLevels = getMipmapCount(m_format, formatDescription, imageFormatProperties, imageSparseInfo.extent);
488 	}
489 
490 	{
491 		// Create logical device supporting both sparse and compute/graphics queues
492 		QueueRequirementsVec queueRequirements;
493 		queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
494 		queueRequirements.push_back(QueueRequirements(getQueueFlags(), 1u));
495 
496 		createDeviceSupportingQueues(queueRequirements);
497 	}
498 
499 	// Create queues supporting sparse binding operations and compute/graphics operations
500 	const DeviceInterface&			deviceInterface		= getDeviceInterface();
501 	const Queue&					sparseQueue			= getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
502 	const Queue&					extractQueue		= getQueue(getQueueFlags(), 0);
503 
504 	// Create sparse image
505 	const Unique<VkImage> imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo));
506 
507 	// Create sparse image memory bind semaphore
508 	const Unique<VkSemaphore> memoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
509 
510 	std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements;
511 
512 	deUint32	imageSparseSizeInBytes	= 0;
513 	deUint32	imageSizeInPixels		= 0;
514 
515 	for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
516 	{
517 		for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
518 		{
519 			imageSparseSizeInBytes	+= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
520 			imageSizeInPixels		+= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx) / formatDescription.planes[planeNdx].elementSizeBytes;
521 		}
522 	}
523 
524 	residencyReferenceData.assign(imageSizeInPixels, MEMORY_BLOCK_NOT_BOUND_VALUE);
525 
526 	{
527 		// Get sparse image general memory requirements
528 		const VkMemoryRequirements				imageMemoryRequirements	= getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
529 
530 		// Check if required image memory size does not exceed device limits
531 		if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
532 			TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
533 
534 		DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
535 
536 		const deUint32							memoryType				= findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
537 
538 		if (memoryType == NO_MATCH_FOUND)
539 			return tcu::TestStatus::fail("No matching memory type found");
540 
541 		// Get sparse image sparse memory requirements
542 		sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
543 
544 		DE_ASSERT(sparseMemoryRequirements.size() != 0);
545 
546 		const deUint32							metadataAspectIndex		= getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
547 		deUint32								pixelOffset				= 0u;
548 		std::vector<VkSparseImageMemoryBind>	imageResidencyMemoryBinds;
549 		std::vector<VkSparseMemoryBind>			imageMipTailBinds;
550 
551 		for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
552 		{
553 			const VkImageAspectFlags		aspect				= (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
554 			const deUint32					aspectIndex			= getSparseAspectRequirementsIndex(sparseMemoryRequirements, aspect);
555 
556 			if (aspectIndex == NO_MATCH_FOUND)
557 				TCU_THROW(NotSupportedError, "Not supported image aspect");
558 
559 			VkSparseImageMemoryRequirements	aspectRequirements	= sparseMemoryRequirements[aspectIndex];
560 
561 			DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
562 
563 			VkExtent3D						imageGranularity	= aspectRequirements.formatProperties.imageGranularity;
564 
565 			// Bind memory for each mipmap level
566 			for (deUint32 mipmapNdx = 0; mipmapNdx < aspectRequirements.imageMipTailFirstLod; ++mipmapNdx)
567 			{
568 				const deUint32 mipLevelSizeInPixels = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx) / formatDescription.planes[planeNdx].elementSizeBytes;
569 
570 				if (mipmapNdx % MEMORY_BLOCK_TYPE_COUNT == MEMORY_BLOCK_NOT_BOUND)
571 				{
572 					pixelOffset += mipLevelSizeInPixels;
573 					continue;
574 				}
575 
576 				for (deUint32 pixelNdx = 0u; pixelNdx < mipLevelSizeInPixels; ++pixelNdx)
577 				{
578 					residencyReferenceData[pixelOffset + pixelNdx] = MEMORY_BLOCK_BOUND_VALUE;
579 				}
580 
581 				pixelOffset += mipLevelSizeInPixels;
582 
583 				for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
584 				{
585 					const VkExtent3D			mipExtent		= getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx, mipmapNdx);
586 					const tcu::UVec3			sparseBlocks	= alignedDivide(mipExtent, imageGranularity);
587 					const deUint32				numSparseBlocks	= sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
588 					const VkImageSubresource	subresource		= { aspect, mipmapNdx, layerNdx };
589 
590 					const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(),
591 						imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent);
592 
593 					deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
594 
595 					imageResidencyMemoryBinds.push_back(imageMemoryBind);
596 				}
597 			}
598 
599 			if (aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
600 			{
601 				if (aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)
602 				{
603 					const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
604 						aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
605 
606 					deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
607 
608 					imageMipTailBinds.push_back(imageMipTailMemoryBind);
609 				}
610 				else
611 				{
612 					for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
613 					{
614 						const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
615 							aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
616 
617 						deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
618 
619 						imageMipTailBinds.push_back(imageMipTailMemoryBind);
620 					}
621 				}
622 
623 				for (deUint32 pixelNdx = pixelOffset; pixelNdx < residencyReferenceData.size(); ++pixelNdx)
624 				{
625 					residencyReferenceData[pixelNdx] = MEMORY_BLOCK_BOUND_VALUE;
626 				}
627 			}
628 		}
629 
630 		// Metadata
631 		if (metadataAspectIndex != NO_MATCH_FOUND)
632 		{
633 			const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
634 
635 			const deUint32 metadataBindCount = (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT ? 1u : imageSparseInfo.arrayLayers);
636 			for (deUint32 bindNdx = 0u; bindNdx < metadataBindCount; ++bindNdx)
637 			{
638 				const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
639 					metadataAspectRequirements.imageMipTailSize, memoryType,
640 					metadataAspectRequirements.imageMipTailOffset + bindNdx * metadataAspectRequirements.imageMipTailStride,
641 					VK_SPARSE_MEMORY_BIND_METADATA_BIT);
642 
643 				deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
644 
645 				imageMipTailBinds.push_back(imageMipTailMemoryBind);
646 			}
647 		}
648 
649 		VkBindSparseInfo bindSparseInfo =
650 		{
651 			VK_STRUCTURE_TYPE_BIND_SPARSE_INFO,	//VkStructureType							sType;
652 			DE_NULL,							//const void*								pNext;
653 			0u,									//deUint32									waitSemaphoreCount;
654 			DE_NULL,							//const VkSemaphore*						pWaitSemaphores;
655 			0u,									//deUint32									bufferBindCount;
656 			DE_NULL,							//const VkSparseBufferMemoryBindInfo*		pBufferBinds;
657 			0u,									//deUint32									imageOpaqueBindCount;
658 			DE_NULL,							//const VkSparseImageOpaqueMemoryBindInfo*	pImageOpaqueBinds;
659 			0u,									//deUint32									imageBindCount;
660 			DE_NULL,							//const VkSparseImageMemoryBindInfo*		pImageBinds;
661 			1u,									//deUint32									signalSemaphoreCount;
662 			&memoryBindSemaphore.get()			//const VkSemaphore*						pSignalSemaphores;
663 		};
664 
665 		VkSparseImageMemoryBindInfo			imageResidencyBindInfo;
666 		VkSparseImageOpaqueMemoryBindInfo	imageMipTailBindInfo;
667 
668 		if (imageResidencyMemoryBinds.size() > 0)
669 		{
670 			imageResidencyBindInfo.image		= *imageSparse;
671 			imageResidencyBindInfo.bindCount	= static_cast<deUint32>(imageResidencyMemoryBinds.size());
672 			imageResidencyBindInfo.pBinds		= imageResidencyMemoryBinds.data();
673 
674 			bindSparseInfo.imageBindCount		= 1u;
675 			bindSparseInfo.pImageBinds			= &imageResidencyBindInfo;
676 		}
677 
678 		if (imageMipTailBinds.size() > 0)
679 		{
680 			imageMipTailBindInfo.image			= *imageSparse;
681 			imageMipTailBindInfo.bindCount		= static_cast<deUint32>(imageMipTailBinds.size());
682 			imageMipTailBindInfo.pBinds			= imageMipTailBinds.data();
683 
684 			bindSparseInfo.imageOpaqueBindCount = 1u;
685 			bindSparseInfo.pImageOpaqueBinds	= &imageMipTailBindInfo;
686 		}
687 
688 		// Submit sparse bind commands for execution
689 		VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
690 	}
691 
692 	// Create image to store texels copied from sparse image
693 	imageTexelsInfo.sType					= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
694 	imageTexelsInfo.pNext					= DE_NULL;
695 	imageTexelsInfo.flags					= 0u;
696 	imageTexelsInfo.imageType				= imageSparseInfo.imageType;
697 	imageTexelsInfo.format					= imageSparseInfo.format;
698 	imageTexelsInfo.extent					= imageSparseInfo.extent;
699 	imageTexelsInfo.arrayLayers				= imageSparseInfo.arrayLayers;
700 	imageTexelsInfo.mipLevels				= imageSparseInfo.mipLevels;
701 	imageTexelsInfo.samples					= imageSparseInfo.samples;
702 	imageTexelsInfo.tiling					= VK_IMAGE_TILING_OPTIMAL;
703 	imageTexelsInfo.initialLayout			= VK_IMAGE_LAYOUT_UNDEFINED;
704 	imageTexelsInfo.usage					= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | imageOutputUsageFlags();
705 	imageTexelsInfo.sharingMode				= VK_SHARING_MODE_EXCLUSIVE;
706 	imageTexelsInfo.queueFamilyIndexCount	= 0u;
707 	imageTexelsInfo.pQueueFamilyIndices		= DE_NULL;
708 
709 	if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
710 	{
711 		imageTexelsInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
712 	}
713 
714 	{
715 		VkImageFormatProperties imageFormatProperties;
716 		if (instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
717 			imageTexelsInfo.format,
718 			imageTexelsInfo.imageType,
719 			imageTexelsInfo.tiling,
720 			imageTexelsInfo.usage,
721 			imageTexelsInfo.flags,
722 			&imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
723 		{
724 			TCU_THROW(NotSupportedError, "Image format not supported for its usage ");
725 		}
726 	}
727 
728 	const Unique<VkImage>			imageTexels			(createImage(deviceInterface, getDevice(), &imageTexelsInfo));
729 	const de::UniquePtr<Allocation>	imageTexelsAlloc	(bindImage(deviceInterface, getDevice(), getAllocator(), *imageTexels, MemoryRequirement::Any));
730 
731 	// Create image to store residency info copied from sparse image
732 	imageResidencyInfo			= imageTexelsInfo;
733 	imageResidencyInfo.format	= mapTextureFormat(m_residencyFormat);
734 
735 	{
736 		VkImageFormatProperties imageFormatProperties;
737 		if (instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
738 			imageResidencyInfo.format,
739 			imageResidencyInfo.imageType,
740 			imageResidencyInfo.tiling,
741 			imageResidencyInfo.usage,
742 			imageResidencyInfo.flags,
743 			&imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
744 		{
745 			TCU_THROW(NotSupportedError, "Image format not supported for its usage ");
746 		}
747 	}
748 
749 	const Unique<VkImage>			imageResidency		(createImage(deviceInterface, getDevice(), &imageResidencyInfo));
750 	const de::UniquePtr<Allocation>	imageResidencyAlloc	(bindImage(deviceInterface, getDevice(), getAllocator(), *imageResidency, MemoryRequirement::Any));
751 
752 	std::vector <VkBufferImageCopy> bufferImageSparseCopy(formatDescription.numPlanes * imageSparseInfo.mipLevels);
753 
754 	{
755 		deUint32 bufferOffset = 0u;
756 		for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
757 		{
758 			const VkImageAspectFlags aspect = (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
759 
760 			for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
761 			{
762 				bufferImageSparseCopy[planeNdx*imageSparseInfo.mipLevels + mipmapNdx] =
763 				{
764 					bufferOffset,																		//	VkDeviceSize				bufferOffset;
765 					0u,																					//	deUint32					bufferRowLength;
766 					0u,																					//	deUint32					bufferImageHeight;
767 					makeImageSubresourceLayers(aspect, mipmapNdx, 0u, imageSparseInfo.arrayLayers),		//	VkImageSubresourceLayers	imageSubresource;
768 					makeOffset3D(0, 0, 0),																//	VkOffset3D					imageOffset;
769 					vk::getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx, mipmapNdx)	//	VkExtent3D					imageExtent;
770 				};
771 				bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
772 			}
773 		}
774 	}
775 
776 	// Create command buffer for compute and transfer operations
777 	const Unique<VkCommandPool>		commandPool(makeCommandPool(deviceInterface, getDevice(), extractQueue.queueFamilyIndex));
778 	const Unique<VkCommandBuffer>	commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
779 
780 	// Start recording commands
781 	beginCommandBuffer(deviceInterface, *commandBuffer);
782 
783 	// Create input buffer
784 	const VkBufferCreateInfo		inputBufferCreateInfo	= makeBufferCreateInfo(imageSparseSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
785 	const Unique<VkBuffer>			inputBuffer				(createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
786 	const de::UniquePtr<Allocation>	inputBufferAlloc		(bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
787 
788 	// Fill input buffer with reference data
789 	std::vector<deUint8> referenceData(imageSparseSizeInBytes);
790 
791 	for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
792 	{
793 		for (deUint32 mipmapNdx = 0u; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
794 		{
795 			const deUint32 mipLevelSizeinBytes	= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx);
796 			const deUint32 bufferOffset			= static_cast<deUint32>(bufferImageSparseCopy[mipmapNdx].bufferOffset);
797 
798 			if (formatIsR64(m_format) &&
799 				(m_function == SPARSE_SAMPLE_EXPLICIT_LOD || m_function == SPARSE_SAMPLE_IMPLICIT_LOD || m_function == SPARSE_GATHER))
800 			{
801 				for (deUint32 byteNdx = 0u; byteNdx < mipLevelSizeinBytes/8; byteNdx += 8)
802 				{
803 					void* prtData = &referenceData[bufferOffset + byteNdx];
804 					*(static_cast<deUint64*>(prtData)) = (deUint64)((mipmapNdx + byteNdx) % 0x0FFFFFFF);
805 				}
806 			}
807 			else
808 			{
809 				for (deUint32 byteNdx = 0u; byteNdx < mipLevelSizeinBytes; ++byteNdx)
810 				{
811 					referenceData[bufferOffset + byteNdx] = (deUint8)( (mipmapNdx + byteNdx) % 127u );
812 				}
813 			}
814 		}
815 	}
816 
817 	deMemcpy(inputBufferAlloc->getHostPtr(), referenceData.data(), imageSparseSizeInBytes);
818 	flushAlloc(deviceInterface, getDevice(), *inputBufferAlloc);
819 
820 	{
821 		// Prepare input buffer for data transfer operation
822 		const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier
823 		(
824 			VK_ACCESS_HOST_WRITE_BIT,
825 			VK_ACCESS_TRANSFER_READ_BIT,
826 			*inputBuffer,
827 			0u,
828 			imageSparseSizeInBytes
829 		);
830 
831 		deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
832 	}
833 
834 	{
835 		// Prepare sparse image for data transfer operation
836 		std::vector<VkImageMemoryBarrier> imageSparseTransferDstBarriers;
837 		for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
838 		{
839 			const VkImageAspectFlags aspect = (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
840 
841 			imageSparseTransferDstBarriers.emplace_back(makeImageMemoryBarrier
842 			(
843 				0u,
844 				VK_ACCESS_TRANSFER_WRITE_BIT,
845 				VK_IMAGE_LAYOUT_UNDEFINED,
846 				VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
847 				*imageSparse,
848 				makeImageSubresourceRange(aspect, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers),
849 				sparseQueue.queueFamilyIndex != extractQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
850 				sparseQueue.queueFamilyIndex != extractQueue.queueFamilyIndex ? extractQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED
851 			));
852 		}
853 		deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, static_cast<deUint32>(imageSparseTransferDstBarriers.size()), imageSparseTransferDstBarriers.data());
854 	}
855 
856 	// Copy reference data from input buffer to sparse image
857 	deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageSparseCopy.size()), bufferImageSparseCopy.data());
858 
859 	recordCommands(*commandBuffer, imageSparseInfo, *imageSparse, *imageTexels, *imageResidency);
860 
861 	const VkBufferCreateInfo		bufferTexelsCreateInfo	= makeBufferCreateInfo(imageSparseSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
862 	const Unique<VkBuffer>			bufferTexels			(createBuffer(deviceInterface, getDevice(), &bufferTexelsCreateInfo));
863 	const de::UniquePtr<Allocation>	bufferTexelsAlloc		(bindBuffer(deviceInterface, getDevice(), getAllocator(), *bufferTexels, MemoryRequirement::HostVisible));
864 
865 	// Copy data from texels image to buffer
866 	deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageTexels, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *bufferTexels, static_cast<deUint32>(bufferImageSparseCopy.size()), bufferImageSparseCopy.data());
867 
868 	const deUint32				imageResidencySizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
869 
870 	const VkBufferCreateInfo		bufferResidencyCreateInfo	= makeBufferCreateInfo(imageResidencySizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
871 	const Unique<VkBuffer>			bufferResidency				(createBuffer(deviceInterface, getDevice(), &bufferResidencyCreateInfo));
872 	const de::UniquePtr<Allocation>	bufferResidencyAlloc		(bindBuffer(deviceInterface, getDevice(), getAllocator(), *bufferResidency, MemoryRequirement::HostVisible));
873 
874 	// Copy data from residency image to buffer
875 	std::vector <VkBufferImageCopy> bufferImageResidencyCopy(formatDescription.numPlanes * imageSparseInfo.mipLevels);
876 
877 	{
878 		deUint32 bufferOffset = 0u;
879 		for (deUint32 planeNdx = 0u; planeNdx < formatDescription.numPlanes; ++planeNdx)
880 		{
881 			const VkImageAspectFlags aspect = (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
882 
883 			for (deUint32 mipmapNdx = 0u; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
884 			{
885 				bufferImageResidencyCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx] =
886 				{
887 					bufferOffset,																		//	VkDeviceSize				bufferOffset;
888 					0u,																					//	deUint32					bufferRowLength;
889 					0u,																					//	deUint32					bufferImageHeight;
890 					makeImageSubresourceLayers(aspect, mipmapNdx, 0u, imageSparseInfo.arrayLayers),		//	VkImageSubresourceLayers	imageSubresource;
891 					makeOffset3D(0, 0, 0),																//	VkOffset3D					imageOffset;
892 					vk::getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx, mipmapNdx)	//	VkExtent3D					imageExtent;
893 				};
894 				bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
895 			}
896 		}
897 	}
898 
899 	deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageResidency, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *bufferResidency, static_cast<deUint32>(bufferImageResidencyCopy.size()), bufferImageResidencyCopy.data());
900 
901 	{
902 		VkBufferMemoryBarrier bufferOutputHostReadBarriers[2];
903 
904 		bufferOutputHostReadBarriers[0] = makeBufferMemoryBarrier
905 		(
906 			VK_ACCESS_TRANSFER_WRITE_BIT,
907 			VK_ACCESS_HOST_READ_BIT,
908 			*bufferTexels,
909 			0u,
910 			imageSparseSizeInBytes
911 		);
912 
913 		bufferOutputHostReadBarriers[1] = makeBufferMemoryBarrier
914 		(
915 			VK_ACCESS_TRANSFER_WRITE_BIT,
916 			VK_ACCESS_HOST_READ_BIT,
917 			*bufferResidency,
918 			0u,
919 			imageResidencySizeInBytes
920 		);
921 
922 		deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 2u, bufferOutputHostReadBarriers, 0u, DE_NULL);
923 	}
924 
925 	// End recording commands
926 	endCommandBuffer(deviceInterface, *commandBuffer);
927 
928 	const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
929 
930 	// Submit commands for execution and wait for completion
931 	submitCommandsAndWait(deviceInterface, getDevice(), extractQueue.queueHandle, *commandBuffer, 1u, &memoryBindSemaphore.get(), stageBits);
932 
933 	// Wait for sparse queue to become idle
934 	deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
935 
936 	// Retrieve data from residency buffer to host memory
937 	invalidateAlloc(deviceInterface, getDevice(), *bufferResidencyAlloc);
938 
939 	const deUint32* bufferResidencyData = static_cast<const deUint32*>(bufferResidencyAlloc->getHostPtr());
940 
941 	deUint32 pixelOffsetNotAligned = 0u;
942 	for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
943 	{
944 		for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
945 		{
946 			const deUint32 mipLevelSizeInBytes	= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, mipmapNdx);
947 			const deUint32 pixelOffsetAligned	= static_cast<deUint32>(bufferImageResidencyCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx].bufferOffset) / tcu::getPixelSize(m_residencyFormat);
948 
949 			if (deMemCmp(&bufferResidencyData[pixelOffsetAligned], &residencyReferenceData[pixelOffsetNotAligned], mipLevelSizeInBytes) != 0)
950 				return tcu::TestStatus::fail("Failed");
951 
952 			pixelOffsetNotAligned += mipLevelSizeInBytes / tcu::getPixelSize(m_residencyFormat);
953 		}
954 }
955 	// Retrieve data from texels buffer to host memory
956 	invalidateAlloc(deviceInterface, getDevice(), *bufferTexelsAlloc);
957 
958 	const deUint8* bufferTexelsData = static_cast<const deUint8*>(bufferTexelsAlloc->getHostPtr());
959 
960 	for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
961 	{
962 		const VkImageAspectFlags	aspect		= (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
963 		const deUint32				aspectIndex	= getSparseAspectRequirementsIndex(sparseMemoryRequirements, aspect);
964 
965 		if (aspectIndex == NO_MATCH_FOUND)
966 			TCU_THROW(NotSupportedError, "Not supported image aspect");
967 
968 		VkSparseImageMemoryRequirements	aspectRequirements	= sparseMemoryRequirements[aspectIndex];
969 
970 		for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
971 		{
972 			const deUint32 mipLevelSizeInBytes	= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription,planeNdx, mipmapNdx);
973 			const deUint32 bufferOffset			= static_cast<deUint32>(bufferImageSparseCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx].bufferOffset);
974 
975 			if (mipmapNdx < aspectRequirements.imageMipTailFirstLod)
976 			{
977 				if (mipmapNdx % MEMORY_BLOCK_TYPE_COUNT == MEMORY_BLOCK_BOUND)
978 				{
979 					if (deMemCmp(&bufferTexelsData[bufferOffset], &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
980 						return tcu::TestStatus::fail("Failed");
981 				}
982 				else if (getPhysicalDeviceProperties(instance, physicalDevice).sparseProperties.residencyNonResidentStrict)
983 				{
984 					std::vector<deUint8> zeroData;
985 					zeroData.assign(mipLevelSizeInBytes, 0u);
986 
987 					if (deMemCmp(&bufferTexelsData[bufferOffset], zeroData.data(), mipLevelSizeInBytes) != 0)
988 						return tcu::TestStatus::fail("Failed");
989 				}
990 			}
991 			else
992 			{
993 				if (deMemCmp(&bufferTexelsData[bufferOffset], &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
994 					return tcu::TestStatus::fail("Failed");
995 			}
996 		}
997 	}
998 
999 	return tcu::TestStatus::pass("Passed");
1000 }
1001 
1002 } // sparse
1003 } // vkt
1004