• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2016 The Khronos Group Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*
20  * \file  vktSparseResourcesShaderIntrinsicsBase.cpp
21  * \brief Sparse Resources Shader Intrinsics Base Classes
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktSparseResourcesShaderIntrinsicsBase.hpp"
25 #include "vkCmdUtil.hpp"
26 #include "vkBarrierUtil.hpp"
27 
28 using namespace vk;
29 
30 namespace vkt
31 {
32 namespace sparse
33 {
34 
getOpTypeImageComponent(const tcu::TextureFormat & format)35 std::string getOpTypeImageComponent (const tcu::TextureFormat& format)
36 {
37 	switch (tcu::getTextureChannelClass(format.type))
38 	{
39 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
40 			return "OpTypeInt 32 0";
41 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
42 			return "OpTypeInt 32 1";
43 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
44 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
45 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
46 			return "OpTypeFloat 32";
47 		default:
48 			DE_FATAL("Unexpected channel type");
49 			return "";
50 	}
51 }
52 
getOpTypeImageComponent(const vk::PlanarFormatDescription & description)53 std::string getOpTypeImageComponent (const vk::PlanarFormatDescription& description)
54 {
55 	switch (description.channels[0].type)
56 	{
57 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
58 			return "OpTypeInt 32 0";
59 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
60 			return "OpTypeInt 32 1";
61 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
62 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
63 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
64 			return "OpTypeFloat 32";
65 		default:
66 			DE_FATAL("Unexpected channel type");
67 			return "";
68 	}
69 }
70 
getImageComponentTypeName(const tcu::TextureFormat & format)71 std::string getImageComponentTypeName (const tcu::TextureFormat& format)
72 {
73 	switch (tcu::getTextureChannelClass(format.type))
74 	{
75 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
76 			return "%type_uint";
77 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
78 			return "%type_int";
79 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
80 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
81 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
82 			return "%type_float";
83 		default:
84 			DE_FATAL("Unexpected channel type");
85 			return "";
86 	}
87 }
88 
getImageComponentTypeName(const vk::PlanarFormatDescription & description)89 std::string getImageComponentTypeName (const vk::PlanarFormatDescription& description)
90 {
91 	switch (description.channels[0].type)
92 	{
93 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
94 			return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_uint64" : "%type_uint");
95 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
96 			return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_int64" : "%type_int");
97 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
98 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
99 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
100 			return "%type_float";
101 		default:
102 			DE_FATAL("Unexpected channel type");
103 			return "";
104 	}
105 }
106 
getImageComponentVec4TypeName(const tcu::TextureFormat & format)107 std::string getImageComponentVec4TypeName (const tcu::TextureFormat& format)
108 {
109 	switch (tcu::getTextureChannelClass(format.type))
110 	{
111 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
112 			return "%type_uvec4";
113 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
114 			return "%type_ivec4";
115 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
116 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
117 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
118 			return "%type_vec4";
119 		default:
120 			DE_FATAL("Unexpected channel type");
121 			return "";
122 	}
123 }
124 
getImageComponentVec4TypeName(const vk::PlanarFormatDescription & description)125 std::string getImageComponentVec4TypeName (const vk::PlanarFormatDescription& description)
126 {
127 
128 	switch (description.channels[0].type)
129 	{
130 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
131 			return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_u64vec4" : "%type_uvec4");
132 		case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
133 			return (formatIsR64(description.planes[0].planeCompatibleFormat) ? "%type_i64vec4" : "%type_ivec4");
134 		case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
135 		case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
136 		case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
137 			return "%type_vec4";
138 		default:
139 			DE_FATAL("Unexpected channel type");
140 			return "";
141 	}
142 }
143 
getOpTypeImageSparse(const ImageType imageType,const tcu::TextureFormat & format,const std::string & componentType,const bool requiresSampler)144 std::string getOpTypeImageSparse (const ImageType			imageType,
145 								  const tcu::TextureFormat&	format,
146 								  const std::string&		componentType,
147 								  const bool				requiresSampler)
148 {
149 	std::ostringstream	src;
150 
151 	src << "OpTypeImage " << componentType << " ";
152 
153 	switch (imageType)
154 	{
155 		case IMAGE_TYPE_1D :
156 			src << "1D 0 0 0 ";
157 		break;
158 		case IMAGE_TYPE_1D_ARRAY :
159 			src << "1D 0 1 0 ";
160 		break;
161 		case IMAGE_TYPE_2D :
162 			src << "2D 0 0 0 ";
163 		break;
164 		case IMAGE_TYPE_2D_ARRAY :
165 			src << "2D 0 1 0 ";
166 		break;
167 		case IMAGE_TYPE_3D :
168 			src << "3D 0 0 0 ";
169 		break;
170 		case IMAGE_TYPE_CUBE :
171 			src << "Cube 0 0 0 ";
172 		break;
173 		case IMAGE_TYPE_CUBE_ARRAY :
174 			src << "Cube 0 1 0 ";
175 		break;
176 		default :
177 			DE_FATAL("Unexpected image type");
178 		break;
179 	}
180 
181 	if (requiresSampler)
182 		src << "1 ";
183 	else
184 		src << "2 ";
185 
186 	switch (format.order)
187 	{
188 		case tcu::TextureFormat::R:
189 			src << "R";
190 		break;
191 		case tcu::TextureFormat::RG:
192 			src << "Rg";
193 			break;
194 		case tcu::TextureFormat::RGB:
195 			src << "Rgb";
196 			break;
197 		case tcu::TextureFormat::RGBA:
198 			src << "Rgba";
199 		break;
200 		default:
201 			DE_FATAL("Unexpected channel order");
202 		break;
203 	}
204 
205 	switch (format.type)
206 	{
207 		case tcu::TextureFormat::SIGNED_INT8:
208 			src << "8i";
209 		break;
210 		case tcu::TextureFormat::SIGNED_INT16:
211 			src << "16i";
212 		break;
213 		case tcu::TextureFormat::SIGNED_INT32:
214 			src << "32i";
215 		break;
216 		case tcu::TextureFormat::UNSIGNED_INT8:
217 			src << "8ui";
218 		break;
219 		case tcu::TextureFormat::UNSIGNED_INT16:
220 			src << "16ui";
221 		break;
222 		case tcu::TextureFormat::UNSIGNED_INT32:
223 			src << "32ui";
224 		break;
225 		case tcu::TextureFormat::SNORM_INT8:
226 			src << "8Snorm";
227 		break;
228 		case tcu::TextureFormat::SNORM_INT16:
229 			src << "16Snorm";
230 		break;
231 		case tcu::TextureFormat::SNORM_INT32:
232 			src << "32Snorm";
233 		break;
234 		case tcu::TextureFormat::UNORM_INT8:
235 			src << "8";
236 		break;
237 		case tcu::TextureFormat::UNORM_INT16:
238 			src << "16";
239 		break;
240 		case tcu::TextureFormat::UNORM_INT32:
241 			src << "32";
242 		break;
243 		default:
244 			DE_FATAL("Unexpected channel type");
245 		break;
246 	}
247 
248 	return src.str();
249 }
250 
getOpTypeImageSparse(const ImageType imageType,const VkFormat format,const std::string & componentType,const bool requiresSampler)251 std::string getOpTypeImageSparse (const ImageType		imageType,
252 								  const VkFormat		format,
253 								  const std::string&	componentType,
254 								  const bool			requiresSampler)
255 {
256 	std::ostringstream	src;
257 
258 	src << "OpTypeImage " << componentType << " ";
259 
260 	switch (imageType)
261 	{
262 		case IMAGE_TYPE_1D :
263 			src << "1D 0 0 0 ";
264 		break;
265 		case IMAGE_TYPE_1D_ARRAY :
266 			src << "1D 0 1 0 ";
267 		break;
268 		case IMAGE_TYPE_2D :
269 			src << "2D 0 0 0 ";
270 		break;
271 		case IMAGE_TYPE_2D_ARRAY :
272 			src << "2D 0 1 0 ";
273 		break;
274 		case IMAGE_TYPE_3D :
275 			src << "3D 0 0 0 ";
276 		break;
277 		case IMAGE_TYPE_CUBE :
278 			src << "Cube 0 0 0 ";
279 		break;
280 		case IMAGE_TYPE_CUBE_ARRAY :
281 			src << "Cube 0 1 0 ";
282 		break;
283 		default :
284 			DE_FATAL("Unexpected image type");
285 		break;
286 	}
287 
288 	if (requiresSampler)
289 		src << "1 ";
290 	else
291 		src << "2 ";
292 
293 	switch (format)
294 	{
295 		case VK_FORMAT_R8_SINT:										src <<	"R8i";			break;
296 		case VK_FORMAT_R16_SINT:									src <<	"R16i";			break;
297 		case VK_FORMAT_R32_SINT:									src <<	"R32i";			break;
298 		case VK_FORMAT_R64_SINT:									src <<	"R64i";			break;
299 		case VK_FORMAT_R8_UINT:										src <<	"R8ui";			break;
300 		case VK_FORMAT_R16_UINT:									src <<	"R16ui";		break;
301 		case VK_FORMAT_R32_UINT:									src <<	"R32ui";		break;
302 		case VK_FORMAT_R64_UINT:									src <<	"R64ui";		break;
303 		case VK_FORMAT_R8_SNORM:									src <<	"R8Snorm";		break;
304 		case VK_FORMAT_R16_SNORM:									src <<	"R16Snorm";		break;
305 		case VK_FORMAT_R8_UNORM:									src <<	"R8";			break;
306 		case VK_FORMAT_R16_UNORM:									src <<	"R16";			break;
307 
308 		case VK_FORMAT_R8G8_SINT:									src <<	"Rg8i";			break;
309 		case VK_FORMAT_R16G16_SINT:									src <<	"Rg16i";		break;
310 		case VK_FORMAT_R32G32_SINT:									src <<	"Rg32i";		break;
311 		case VK_FORMAT_R8G8_UINT:									src <<	"Rg8ui";		break;
312 		case VK_FORMAT_R16G16_UINT:									src <<	"Rg16ui";		break;
313 		case VK_FORMAT_R32G32_UINT:									src <<	"Rg32ui";		break;
314 		case VK_FORMAT_R8G8_SNORM:									src <<	"Rg8Snorm";		break;
315 		case VK_FORMAT_R16G16_SNORM:								src <<	"Rg16Snorm";	break;
316 		case VK_FORMAT_R8G8_UNORM:									src <<	"Rg8";			break;
317 		case VK_FORMAT_R16G16_UNORM:								src <<	"Rg16";			break;
318 
319 		case VK_FORMAT_R8G8B8A8_SINT:								src <<	"Rgba8i";		break;
320 		case VK_FORMAT_R16G16B16A16_SINT:							src <<	"Rgba16i";		break;
321 		case VK_FORMAT_R32G32B32A32_SINT:							src <<	"Rgba32i";		break;
322 		case VK_FORMAT_R8G8B8A8_UINT:								src <<	"Rgba8ui";		break;
323 		case VK_FORMAT_R16G16B16A16_UINT:							src <<	"Rgba16ui";		break;
324 		case VK_FORMAT_R32G32B32A32_UINT:							src <<	"Rgba32ui";		break;
325 		case VK_FORMAT_R8G8B8A8_SNORM:								src <<	"Rgba8Snorm";	break;
326 		case VK_FORMAT_R16G16B16A16_SNORM:							src <<	"Rgba16Snorm";	break;
327 		case VK_FORMAT_R8G8B8A8_UNORM:								src <<	"Rgba8";		break;
328 		case VK_FORMAT_R16G16B16A16_UNORM:							src <<	"Rgba16";		break;
329 
330 		case VK_FORMAT_G8B8G8R8_422_UNORM:							src <<	"Rgba8";		break;
331 		case VK_FORMAT_B8G8R8G8_422_UNORM:							src <<	"Rgba8";		break;
332 		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:					src <<	"Rgba8";		break;
333 		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:					src <<	"Rgba8";		break;
334 		case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:					src <<	"Rgba8";		break;
335 		case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:					src <<	"Rgba8";		break;
336 		case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:					src <<	"Rgba8";		break;
337 		case VK_FORMAT_R10X6_UNORM_PACK16:							src <<	"R16";			break;
338 		case VK_FORMAT_R10X6G10X6_UNORM_2PACK16:					src <<	"Rg16";			break;
339 		case VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16:			src <<	"Rgba16";		break;
340 		case VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16:		src <<	"Rgba16";		break;
341 		case VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16:		src <<	"Rgba16";		break;
342 		case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:	src <<	"Rgba16";		break;
343 		case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:	src <<	"Rgba16";		break;
344 		case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:	src <<	"Rgba16";		break;
345 		case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:	src <<	"Rgba16";		break;
346 		case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:	src <<	"Rgba16";		break;
347 		case VK_FORMAT_R12X4_UNORM_PACK16:							src <<	"R16";			break;
348 		case VK_FORMAT_R12X4G12X4_UNORM_2PACK16:					src <<	"Rg16";			break;
349 		case VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16:			src <<	"Rgba16";		break;
350 		case VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16:		src <<	"Rgba16";		break;
351 		case VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16:		src <<	"Rgba16";		break;
352 		case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:	src <<	"Rgba16";		break;
353 		case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:	src <<	"Rgba16";		break;
354 		case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:	src <<	"Rgba16";		break;
355 		case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:	src <<	"Rgba16";		break;
356 		case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:	src <<	"Rgba16";		break;
357 		case VK_FORMAT_G16B16G16R16_422_UNORM:						src <<	"Rgba16";		break;
358 		case VK_FORMAT_B16G16R16G16_422_UNORM:						src <<	"Rgba16";		break;
359 		case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:				src <<	"Rgba16";		break;
360 		case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:					src <<	"Rgba16";		break;
361 		case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:				src <<	"Rgba16";		break;
362 		case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:					src <<	"Rgba16";		break;
363 		case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:				src <<	"Rgba16";		break;
364 		case VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT:				src <<	"Rgba8";		break;
365 		case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT:src <<	"Rgba16";		break;
366 		case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT:src <<	"Rgba16";		break;
367 		case VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT:				src <<	"Rgba16";		break;
368 
369 		default:
370 			DE_FATAL("Unexpected texture format");
371 			break;
372 	}
373 	return src.str();
374 }
375 
376 
getOpTypeImageResidency(const ImageType imageType)377 std::string getOpTypeImageResidency (const ImageType imageType)
378 {
379 	std::ostringstream	src;
380 
381 	src << "OpTypeImage %type_uint ";
382 
383 	switch (imageType)
384 	{
385 		case IMAGE_TYPE_1D :
386 			src << "1D 0 0 0 2 R32ui";
387 		break;
388 		case IMAGE_TYPE_1D_ARRAY :
389 			src << "1D 0 1 0 2 R32ui";
390 		break;
391 		case IMAGE_TYPE_2D :
392 			src << "2D 0 0 0 2 R32ui";
393 		break;
394 		case IMAGE_TYPE_2D_ARRAY :
395 			src << "2D 0 1 0 2 R32ui";
396 		break;
397 		case IMAGE_TYPE_3D :
398 			src << "3D 0 0 0 2 R32ui";
399 		break;
400 		case IMAGE_TYPE_CUBE :
401 			src << "Cube 0 0 0 2 R32ui";
402 		break;
403 		case IMAGE_TYPE_CUBE_ARRAY :
404 			src << "Cube 0 1 0 2 R32ui";
405 		break;
406 		default :
407 			DE_FATAL("Unexpected image type");
408 		break;
409 	}
410 
411 	return src.str();
412 }
413 
checkSupport(VkImageCreateInfo imageSparseInfo) const414 void SparseShaderIntrinsicsInstanceBase::checkSupport(VkImageCreateInfo imageSparseInfo) const
415 {
416 	const InstanceInterface&			instance				= m_context.getInstanceInterface();
417 	const VkPhysicalDevice				physicalDevice			= m_context.getPhysicalDevice();
418 
419 	if (formatIsR64(m_format))
420 	{
421 		m_context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
422 
423 		if (m_context.getShaderImageAtomicInt64FeaturesEXT().shaderImageInt64Atomics == VK_FALSE)
424 		{
425 			TCU_THROW(NotSupportedError, "shaderImageInt64Atomics is not supported");
426 		}
427 
428 		if (m_context.getShaderImageAtomicInt64FeaturesEXT().sparseImageInt64Atomics == VK_FALSE)
429 		{
430 			TCU_THROW(NotSupportedError, "sparseImageInt64Atomics is not supported for device");
431 		}
432 	}
433 
434 	// Check if device supports sparse operations for image format
435 	if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo))
436 		TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
437 }
438 
iterate(void)439 tcu::TestStatus SparseShaderIntrinsicsInstanceBase::iterate (void)
440 {
441 	const InstanceInterface&			instance				= m_context.getInstanceInterface();
442 	const VkPhysicalDevice				physicalDevice			= m_context.getPhysicalDevice();
443 	VkImageCreateInfo					imageSparseInfo;
444 	VkImageCreateInfo					imageTexelsInfo;
445 	VkImageCreateInfo					imageResidencyInfo;
446 	std::vector <deUint32>				residencyReferenceData;
447 	std::vector<DeviceMemorySp>			deviceMemUniquePtrVec;
448 	const PlanarFormatDescription		formatDescription		= getPlanarFormatDescription(m_format);
449 
450 	imageSparseInfo.sType					= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
451 	imageSparseInfo.pNext					= DE_NULL;
452 	imageSparseInfo.flags					= VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
453 	imageSparseInfo.imageType				= mapImageType(m_imageType);
454 	imageSparseInfo.format					= m_format;
455 	imageSparseInfo.extent					= makeExtent3D(getLayerSize(m_imageType, m_imageSize));
456 	imageSparseInfo.arrayLayers				= getNumLayers(m_imageType, m_imageSize);
457 	imageSparseInfo.samples					= VK_SAMPLE_COUNT_1_BIT;
458 	imageSparseInfo.tiling					= VK_IMAGE_TILING_OPTIMAL;
459 	imageSparseInfo.initialLayout			= VK_IMAGE_LAYOUT_UNDEFINED;
460 	imageSparseInfo.usage					= VK_IMAGE_USAGE_TRANSFER_DST_BIT | imageSparseUsageFlags();
461 	imageSparseInfo.sharingMode				= VK_SHARING_MODE_EXCLUSIVE;
462 	imageSparseInfo.queueFamilyIndexCount	= 0u;
463 	imageSparseInfo.pQueueFamilyIndices		= DE_NULL;
464 
465 	if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
466 	{
467 		imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
468 	}
469 
470 	checkSupport(imageSparseInfo);
471 
472 	{
473 		// Assign maximum allowed mipmap levels to image
474 		VkImageFormatProperties imageFormatProperties;
475 		if (instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
476 			imageSparseInfo.format,
477 			imageSparseInfo.imageType,
478 			imageSparseInfo.tiling,
479 			imageSparseInfo.usage,
480 			imageSparseInfo.flags,
481 			&imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
482 		{
483 			TCU_THROW(NotSupportedError, "Image format does not support sparse operations");
484 		}
485 
486 		imageSparseInfo.mipLevels = getMipmapCount(m_format, formatDescription, imageFormatProperties, imageSparseInfo.extent);
487 	}
488 
489 	// Create image to store texels copied from sparse image
490 	imageTexelsInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
491 	imageTexelsInfo.pNext = DE_NULL;
492 	imageTexelsInfo.flags = 0u;
493 	imageTexelsInfo.imageType = imageSparseInfo.imageType;
494 	imageTexelsInfo.format = imageSparseInfo.format;
495 	imageTexelsInfo.extent = imageSparseInfo.extent;
496 	imageTexelsInfo.arrayLayers = imageSparseInfo.arrayLayers;
497 	imageTexelsInfo.mipLevels = imageSparseInfo.mipLevels;
498 	imageTexelsInfo.samples = imageSparseInfo.samples;
499 	imageTexelsInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
500 	imageTexelsInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
501 	imageTexelsInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | imageOutputUsageFlags();
502 	imageTexelsInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
503 	imageTexelsInfo.queueFamilyIndexCount = 0u;
504 	imageTexelsInfo.pQueueFamilyIndices = DE_NULL;
505 
506 	if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
507 	{
508 		imageTexelsInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
509 	}
510 
511 	checkImageSupport(instance, physicalDevice, imageTexelsInfo);
512 
513 	{
514 		// Create logical device supporting both sparse and compute/graphics queues
515 		QueueRequirementsVec queueRequirements;
516 		queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
517 		queueRequirements.push_back(QueueRequirements(getQueueFlags(), 1u));
518 
519 		createDeviceSupportingQueues(queueRequirements, formatIsR64(m_format));
520 	}
521 
522 	// Create queues supporting sparse binding operations and compute/graphics operations
523 	const DeviceInterface&			deviceInterface		= getDeviceInterface();
524 	const Queue&					sparseQueue			= getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
525 	const Queue&					extractQueue		= getQueue(getQueueFlags(), 0);
526 
527 	// Create sparse image
528 	const Unique<VkImage> imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo));
529 
530 	// Create sparse image memory bind semaphore
531 	const Unique<VkSemaphore> memoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
532 
533 	std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements;
534 
535 	deUint32	imageSparseSizeInBytes	= 0;
536 	deUint32	imageSizeInPixels		= 0;
537 
538 	for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
539 	{
540 		for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
541 		{
542 			imageSparseSizeInBytes	+= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
543 			imageSizeInPixels		+= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx) / formatDescription.planes[planeNdx].elementSizeBytes;
544 		}
545 	}
546 
547 	residencyReferenceData.assign(imageSizeInPixels, MEMORY_BLOCK_NOT_BOUND_VALUE);
548 
549 	{
550 		// Get sparse image general memory requirements
551 		const VkMemoryRequirements				imageMemoryRequirements	= getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
552 
553 		// Check if required image memory size does not exceed device limits
554 		if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
555 			TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
556 
557 		DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
558 
559 		const deUint32							memoryType				= findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
560 
561 		if (memoryType == NO_MATCH_FOUND)
562 			return tcu::TestStatus::fail("No matching memory type found");
563 
564 		// Get sparse image sparse memory requirements
565 		sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
566 
567 		DE_ASSERT(sparseMemoryRequirements.size() != 0);
568 
569 		const deUint32							metadataAspectIndex		= getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
570 		deUint32								pixelOffset				= 0u;
571 		std::vector<VkSparseImageMemoryBind>	imageResidencyMemoryBinds;
572 		std::vector<VkSparseMemoryBind>			imageMipTailBinds;
573 
574 		for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
575 		{
576 			const VkImageAspectFlags		aspect				= (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
577 			const deUint32					aspectIndex			= getSparseAspectRequirementsIndex(sparseMemoryRequirements, aspect);
578 
579 			if (aspectIndex == NO_MATCH_FOUND)
580 				TCU_THROW(NotSupportedError, "Not supported image aspect");
581 
582 			VkSparseImageMemoryRequirements	aspectRequirements	= sparseMemoryRequirements[aspectIndex];
583 
584 			DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
585 
586 			VkExtent3D						imageGranularity	= aspectRequirements.formatProperties.imageGranularity;
587 
588 			// Bind memory for each mipmap level
589 			for (deUint32 mipmapNdx = 0; mipmapNdx < aspectRequirements.imageMipTailFirstLod; ++mipmapNdx)
590 			{
591 				const deUint32 mipLevelSizeInPixels = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx) / formatDescription.planes[planeNdx].elementSizeBytes;
592 
593 				if (mipmapNdx % MEMORY_BLOCK_TYPE_COUNT == MEMORY_BLOCK_NOT_BOUND)
594 				{
595 					pixelOffset += mipLevelSizeInPixels;
596 					continue;
597 				}
598 
599 				for (deUint32 pixelNdx = 0u; pixelNdx < mipLevelSizeInPixels; ++pixelNdx)
600 				{
601 					residencyReferenceData[pixelOffset + pixelNdx] = MEMORY_BLOCK_BOUND_VALUE;
602 				}
603 
604 				pixelOffset += mipLevelSizeInPixels;
605 
606 				for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
607 				{
608 					const VkExtent3D			mipExtent		= getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx, mipmapNdx);
609 					const tcu::UVec3			sparseBlocks	= alignedDivide(mipExtent, imageGranularity);
610 					const deUint32				numSparseBlocks	= sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
611 					const VkImageSubresource	subresource		= { aspect, mipmapNdx, layerNdx };
612 
613 					const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(),
614 						imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent);
615 
616 					deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
617 
618 					imageResidencyMemoryBinds.push_back(imageMemoryBind);
619 				}
620 			}
621 
622 			if (aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
623 			{
624 				if (aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)
625 				{
626 					const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
627 						aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
628 
629 					deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
630 
631 					imageMipTailBinds.push_back(imageMipTailMemoryBind);
632 				}
633 				else
634 				{
635 					for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
636 					{
637 						const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
638 							aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
639 
640 						deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
641 
642 						imageMipTailBinds.push_back(imageMipTailMemoryBind);
643 					}
644 				}
645 
646 				for (deUint32 pixelNdx = pixelOffset; pixelNdx < residencyReferenceData.size(); ++pixelNdx)
647 				{
648 					residencyReferenceData[pixelNdx] = MEMORY_BLOCK_BOUND_VALUE;
649 				}
650 			}
651 		}
652 
653 		// Metadata
654 		if (metadataAspectIndex != NO_MATCH_FOUND)
655 		{
656 			const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
657 
658 			const deUint32 metadataBindCount = (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT ? 1u : imageSparseInfo.arrayLayers);
659 			for (deUint32 bindNdx = 0u; bindNdx < metadataBindCount; ++bindNdx)
660 			{
661 				const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
662 					metadataAspectRequirements.imageMipTailSize, memoryType,
663 					metadataAspectRequirements.imageMipTailOffset + bindNdx * metadataAspectRequirements.imageMipTailStride,
664 					VK_SPARSE_MEMORY_BIND_METADATA_BIT);
665 
666 				deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
667 
668 				imageMipTailBinds.push_back(imageMipTailMemoryBind);
669 			}
670 		}
671 
672 		VkBindSparseInfo bindSparseInfo =
673 		{
674 			VK_STRUCTURE_TYPE_BIND_SPARSE_INFO,	//VkStructureType							sType;
675 			DE_NULL,							//const void*								pNext;
676 			0u,									//deUint32									waitSemaphoreCount;
677 			DE_NULL,							//const VkSemaphore*						pWaitSemaphores;
678 			0u,									//deUint32									bufferBindCount;
679 			DE_NULL,							//const VkSparseBufferMemoryBindInfo*		pBufferBinds;
680 			0u,									//deUint32									imageOpaqueBindCount;
681 			DE_NULL,							//const VkSparseImageOpaqueMemoryBindInfo*	pImageOpaqueBinds;
682 			0u,									//deUint32									imageBindCount;
683 			DE_NULL,							//const VkSparseImageMemoryBindInfo*		pImageBinds;
684 			1u,									//deUint32									signalSemaphoreCount;
685 			&memoryBindSemaphore.get()			//const VkSemaphore*						pSignalSemaphores;
686 		};
687 
688 		VkSparseImageMemoryBindInfo			imageResidencyBindInfo;
689 		VkSparseImageOpaqueMemoryBindInfo	imageMipTailBindInfo;
690 
691 		if (imageResidencyMemoryBinds.size() > 0)
692 		{
693 			imageResidencyBindInfo.image		= *imageSparse;
694 			imageResidencyBindInfo.bindCount	= static_cast<deUint32>(imageResidencyMemoryBinds.size());
695 			imageResidencyBindInfo.pBinds		= imageResidencyMemoryBinds.data();
696 
697 			bindSparseInfo.imageBindCount		= 1u;
698 			bindSparseInfo.pImageBinds			= &imageResidencyBindInfo;
699 		}
700 
701 		if (imageMipTailBinds.size() > 0)
702 		{
703 			imageMipTailBindInfo.image			= *imageSparse;
704 			imageMipTailBindInfo.bindCount		= static_cast<deUint32>(imageMipTailBinds.size());
705 			imageMipTailBindInfo.pBinds			= imageMipTailBinds.data();
706 
707 			bindSparseInfo.imageOpaqueBindCount = 1u;
708 			bindSparseInfo.pImageOpaqueBinds	= &imageMipTailBindInfo;
709 		}
710 
711 		// Submit sparse bind commands for execution
712 		VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
713 	}
714 
715 	const Unique<VkImage>			imageTexels			(createImage(deviceInterface, getDevice(), &imageTexelsInfo));
716 	const de::UniquePtr<Allocation>	imageTexelsAlloc	(bindImage(deviceInterface, getDevice(), getAllocator(), *imageTexels, MemoryRequirement::Any));
717 
718 	// Create image to store residency info copied from sparse image
719 	imageResidencyInfo			= imageTexelsInfo;
720 	imageResidencyInfo.format	= mapTextureFormat(m_residencyFormat);
721 
722 	{
723 		VkImageFormatProperties imageFormatProperties;
724 		if (instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
725 			imageResidencyInfo.format,
726 			imageResidencyInfo.imageType,
727 			imageResidencyInfo.tiling,
728 			imageResidencyInfo.usage,
729 			imageResidencyInfo.flags,
730 			&imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
731 		{
732 			TCU_THROW(NotSupportedError, "Image format not supported for its usage ");
733 		}
734 	}
735 
736 	const Unique<VkImage>			imageResidency		(createImage(deviceInterface, getDevice(), &imageResidencyInfo));
737 	const de::UniquePtr<Allocation>	imageResidencyAlloc	(bindImage(deviceInterface, getDevice(), getAllocator(), *imageResidency, MemoryRequirement::Any));
738 
739 	std::vector <VkBufferImageCopy> bufferImageSparseCopy(formatDescription.numPlanes * imageSparseInfo.mipLevels);
740 
741 	{
742 		deUint32 bufferOffset = 0u;
743 		for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
744 		{
745 			const VkImageAspectFlags aspect = (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
746 
747 			for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
748 			{
749 				bufferImageSparseCopy[planeNdx*imageSparseInfo.mipLevels + mipmapNdx] =
750 				{
751 					bufferOffset,																		//	VkDeviceSize				bufferOffset;
752 					0u,																					//	deUint32					bufferRowLength;
753 					0u,																					//	deUint32					bufferImageHeight;
754 					makeImageSubresourceLayers(aspect, mipmapNdx, 0u, imageSparseInfo.arrayLayers),		//	VkImageSubresourceLayers	imageSubresource;
755 					makeOffset3D(0, 0, 0),																//	VkOffset3D					imageOffset;
756 					vk::getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx, mipmapNdx)	//	VkExtent3D					imageExtent;
757 				};
758 				bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
759 			}
760 		}
761 	}
762 
763 	// Create command buffer for compute and transfer operations
764 	const Unique<VkCommandPool>		commandPool(makeCommandPool(deviceInterface, getDevice(), extractQueue.queueFamilyIndex));
765 	const Unique<VkCommandBuffer>	commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
766 
767 	// Start recording commands
768 	beginCommandBuffer(deviceInterface, *commandBuffer);
769 
770 	// Create input buffer
771 	const VkBufferCreateInfo		inputBufferCreateInfo	= makeBufferCreateInfo(imageSparseSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
772 	const Unique<VkBuffer>			inputBuffer				(createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
773 	const de::UniquePtr<Allocation>	inputBufferAlloc		(bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
774 
775 	// Fill input buffer with reference data
776 	std::vector<deUint8> referenceData(imageSparseSizeInBytes);
777 
778 	for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
779 	{
780 		for (deUint32 mipmapNdx = 0u; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
781 		{
782 			const deUint32 mipLevelSizeinBytes	= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx);
783 			const deUint32 bufferOffset			= static_cast<deUint32>(bufferImageSparseCopy[mipmapNdx].bufferOffset);
784 
785 			if (formatIsR64(m_format) &&
786 				(m_function == SPARSE_SAMPLE_EXPLICIT_LOD || m_function == SPARSE_SAMPLE_IMPLICIT_LOD || m_function == SPARSE_GATHER))
787 			{
788 				for (deUint32 byteNdx = 0u; byteNdx < mipLevelSizeinBytes/8; byteNdx += 8)
789 				{
790 					void* prtData = &referenceData[bufferOffset + byteNdx];
791 					*(static_cast<deUint64*>(prtData)) = (deUint64)((mipmapNdx + byteNdx) % 0x0FFFFFFF);
792 				}
793 			}
794 			else
795 			{
796 				for (deUint32 byteNdx = 0u; byteNdx < mipLevelSizeinBytes; ++byteNdx)
797 				{
798 					referenceData[bufferOffset + byteNdx] = (deUint8)( (mipmapNdx + byteNdx) % 127u );
799 				}
800 			}
801 		}
802 	}
803 
804 	deMemcpy(inputBufferAlloc->getHostPtr(), referenceData.data(), imageSparseSizeInBytes);
805 	flushAlloc(deviceInterface, getDevice(), *inputBufferAlloc);
806 
807 	{
808 		// Prepare input buffer for data transfer operation
809 		const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier
810 		(
811 			VK_ACCESS_HOST_WRITE_BIT,
812 			VK_ACCESS_TRANSFER_READ_BIT,
813 			*inputBuffer,
814 			0u,
815 			imageSparseSizeInBytes
816 		);
817 
818 		deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
819 	}
820 
821 	{
822 		// Prepare sparse image for data transfer operation
823 		std::vector<VkImageMemoryBarrier> imageSparseTransferDstBarriers;
824 		for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
825 		{
826 			const VkImageAspectFlags aspect = (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
827 
828 			imageSparseTransferDstBarriers.emplace_back(makeImageMemoryBarrier
829 			(
830 				0u,
831 				VK_ACCESS_TRANSFER_WRITE_BIT,
832 				VK_IMAGE_LAYOUT_UNDEFINED,
833 				VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
834 				*imageSparse,
835 				makeImageSubresourceRange(aspect, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers),
836 				sparseQueue.queueFamilyIndex != extractQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
837 				sparseQueue.queueFamilyIndex != extractQueue.queueFamilyIndex ? extractQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED
838 			));
839 		}
840 		deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, static_cast<deUint32>(imageSparseTransferDstBarriers.size()), imageSparseTransferDstBarriers.data());
841 	}
842 
843 	// Copy reference data from input buffer to sparse image
844 	deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageSparseCopy.size()), bufferImageSparseCopy.data());
845 
846 	recordCommands(*commandBuffer, imageSparseInfo, *imageSparse, *imageTexels, *imageResidency);
847 
848 	const VkBufferCreateInfo		bufferTexelsCreateInfo	= makeBufferCreateInfo(imageSparseSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
849 	const Unique<VkBuffer>			bufferTexels			(createBuffer(deviceInterface, getDevice(), &bufferTexelsCreateInfo));
850 	const de::UniquePtr<Allocation>	bufferTexelsAlloc		(bindBuffer(deviceInterface, getDevice(), getAllocator(), *bufferTexels, MemoryRequirement::HostVisible));
851 
852 	// Copy data from texels image to buffer
853 	deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageTexels, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *bufferTexels, static_cast<deUint32>(bufferImageSparseCopy.size()), bufferImageSparseCopy.data());
854 
855 	const deUint32				imageResidencySizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
856 
857 	const VkBufferCreateInfo		bufferResidencyCreateInfo	= makeBufferCreateInfo(imageResidencySizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
858 	const Unique<VkBuffer>			bufferResidency				(createBuffer(deviceInterface, getDevice(), &bufferResidencyCreateInfo));
859 	const de::UniquePtr<Allocation>	bufferResidencyAlloc		(bindBuffer(deviceInterface, getDevice(), getAllocator(), *bufferResidency, MemoryRequirement::HostVisible));
860 
861 	// Copy data from residency image to buffer
862 	std::vector <VkBufferImageCopy> bufferImageResidencyCopy(formatDescription.numPlanes * imageSparseInfo.mipLevels);
863 
864 	{
865 		deUint32 bufferOffset = 0u;
866 		for (deUint32 planeNdx = 0u; planeNdx < formatDescription.numPlanes; ++planeNdx)
867 		{
868 			const VkImageAspectFlags aspect = (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
869 
870 			for (deUint32 mipmapNdx = 0u; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
871 			{
872 				bufferImageResidencyCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx] =
873 				{
874 					bufferOffset,																		//	VkDeviceSize				bufferOffset;
875 					0u,																					//	deUint32					bufferRowLength;
876 					0u,																					//	deUint32					bufferImageHeight;
877 					makeImageSubresourceLayers(aspect, mipmapNdx, 0u, imageSparseInfo.arrayLayers),		//	VkImageSubresourceLayers	imageSubresource;
878 					makeOffset3D(0, 0, 0),																//	VkOffset3D					imageOffset;
879 					vk::getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx, mipmapNdx)	//	VkExtent3D					imageExtent;
880 				};
881 				bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
882 			}
883 		}
884 	}
885 
886 	deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageResidency, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *bufferResidency, static_cast<deUint32>(bufferImageResidencyCopy.size()), bufferImageResidencyCopy.data());
887 
888 	{
889 		VkBufferMemoryBarrier bufferOutputHostReadBarriers[2];
890 
891 		bufferOutputHostReadBarriers[0] = makeBufferMemoryBarrier
892 		(
893 			VK_ACCESS_TRANSFER_WRITE_BIT,
894 			VK_ACCESS_HOST_READ_BIT,
895 			*bufferTexels,
896 			0u,
897 			imageSparseSizeInBytes
898 		);
899 
900 		bufferOutputHostReadBarriers[1] = makeBufferMemoryBarrier
901 		(
902 			VK_ACCESS_TRANSFER_WRITE_BIT,
903 			VK_ACCESS_HOST_READ_BIT,
904 			*bufferResidency,
905 			0u,
906 			imageResidencySizeInBytes
907 		);
908 
909 		deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 2u, bufferOutputHostReadBarriers, 0u, DE_NULL);
910 	}
911 
912 	// End recording commands
913 	endCommandBuffer(deviceInterface, *commandBuffer);
914 
915 	const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
916 
917 	// Submit commands for execution and wait for completion
918 	submitCommandsAndWait(deviceInterface, getDevice(), extractQueue.queueHandle, *commandBuffer, 1u, &memoryBindSemaphore.get(), stageBits);
919 
920 	// Wait for sparse queue to become idle
921 	deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
922 
923 	// Retrieve data from residency buffer to host memory
924 	invalidateAlloc(deviceInterface, getDevice(), *bufferResidencyAlloc);
925 
926 	const deUint32* bufferResidencyData = static_cast<const deUint32*>(bufferResidencyAlloc->getHostPtr());
927 
928 	deUint32 pixelOffsetNotAligned = 0u;
929 	for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
930 	{
931 		for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
932 		{
933 			const deUint32 mipLevelSizeInBytes	= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, mipmapNdx);
934 			const deUint32 pixelOffsetAligned	= static_cast<deUint32>(bufferImageResidencyCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx].bufferOffset) / tcu::getPixelSize(m_residencyFormat);
935 
936 			if (deMemCmp(&bufferResidencyData[pixelOffsetAligned], &residencyReferenceData[pixelOffsetNotAligned], mipLevelSizeInBytes) != 0)
937 				return tcu::TestStatus::fail("Failed");
938 
939 			pixelOffsetNotAligned += mipLevelSizeInBytes / tcu::getPixelSize(m_residencyFormat);
940 		}
941 }
942 	// Retrieve data from texels buffer to host memory
943 	invalidateAlloc(deviceInterface, getDevice(), *bufferTexelsAlloc);
944 
945 	const deUint8* bufferTexelsData = static_cast<const deUint8*>(bufferTexelsAlloc->getHostPtr());
946 
947 	for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
948 	{
949 		const VkImageAspectFlags	aspect		= (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
950 		const deUint32				aspectIndex	= getSparseAspectRequirementsIndex(sparseMemoryRequirements, aspect);
951 
952 		if (aspectIndex == NO_MATCH_FOUND)
953 			TCU_THROW(NotSupportedError, "Not supported image aspect");
954 
955 		VkSparseImageMemoryRequirements	aspectRequirements	= sparseMemoryRequirements[aspectIndex];
956 
957 		for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
958 		{
959 			const deUint32 mipLevelSizeInBytes	= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription,planeNdx, mipmapNdx);
960 			const deUint32 bufferOffset			= static_cast<deUint32>(bufferImageSparseCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx].bufferOffset);
961 
962 			if (mipmapNdx < aspectRequirements.imageMipTailFirstLod)
963 			{
964 				if (mipmapNdx % MEMORY_BLOCK_TYPE_COUNT == MEMORY_BLOCK_BOUND)
965 				{
966 					if (deMemCmp(&bufferTexelsData[bufferOffset], &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
967 						return tcu::TestStatus::fail("Failed");
968 				}
969 				else if (getPhysicalDeviceProperties(instance, physicalDevice).sparseProperties.residencyNonResidentStrict)
970 				{
971 					std::vector<deUint8> zeroData;
972 					zeroData.assign(mipLevelSizeInBytes, 0u);
973 
974 					if (deMemCmp(&bufferTexelsData[bufferOffset], zeroData.data(), mipLevelSizeInBytes) != 0)
975 						return tcu::TestStatus::fail("Failed");
976 				}
977 			}
978 			else
979 			{
980 				if (deMemCmp(&bufferTexelsData[bufferOffset], &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
981 					return tcu::TestStatus::fail("Failed");
982 			}
983 		}
984 	}
985 
986 	return tcu::TestStatus::pass("Passed");
987 }
988 
989 } // sparse
990 } // vkt
991