1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*
20 * \file vktSparseResourcesShaderIntrinsicsBase.cpp
21 * \brief Sparse Resources Shader Intrinsics Base Classes
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSparseResourcesShaderIntrinsicsBase.hpp"
25 #include "vkCmdUtil.hpp"
26 #include "vkBarrierUtil.hpp"
27
28 using namespace vk;
29
30 namespace vkt
31 {
32 namespace sparse
33 {
34
getOpTypeImageComponent(const tcu::TextureFormat & format)35 std::string getOpTypeImageComponent (const tcu::TextureFormat& format)
36 {
37 switch (tcu::getTextureChannelClass(format.type))
38 {
39 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
40 return "OpTypeInt 32 0";
41 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
42 return "OpTypeInt 32 1";
43 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
44 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
45 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
46 return "OpTypeFloat 32";
47 default:
48 DE_FATAL("Unexpected channel type");
49 return "";
50 }
51 }
52
getOpTypeImageComponent(const vk::PlanarFormatDescription & description)53 std::string getOpTypeImageComponent (const vk::PlanarFormatDescription& description)
54 {
55 switch (description.channels[0].type)
56 {
57 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
58 return "OpTypeInt 32 0";
59 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
60 return "OpTypeInt 32 1";
61 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
62 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
63 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
64 return "OpTypeFloat 32";
65 default:
66 DE_FATAL("Unexpected channel type");
67 return "";
68 }
69 }
70
getImageComponentTypeName(const tcu::TextureFormat & format)71 std::string getImageComponentTypeName (const tcu::TextureFormat& format)
72 {
73 switch (tcu::getTextureChannelClass(format.type))
74 {
75 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
76 return "%type_uint";
77 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
78 return "%type_int";
79 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
80 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
81 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
82 return "%type_float";
83 default:
84 DE_FATAL("Unexpected channel type");
85 return "";
86 }
87 }
88
getImageComponentTypeName(const vk::PlanarFormatDescription & description)89 std::string getImageComponentTypeName (const vk::PlanarFormatDescription& description)
90 {
91 switch (description.channels[0].type)
92 {
93 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
94 return "%type_uint";
95 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
96 return "%type_int";
97 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
98 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
99 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
100 return "%type_float";
101 default:
102 DE_FATAL("Unexpected channel type");
103 return "";
104 }
105 }
106
getImageComponentVec4TypeName(const tcu::TextureFormat & format)107 std::string getImageComponentVec4TypeName (const tcu::TextureFormat& format)
108 {
109 switch (tcu::getTextureChannelClass(format.type))
110 {
111 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
112 return "%type_uvec4";
113 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
114 return "%type_ivec4";
115 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
116 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
117 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
118 return "%type_vec4";
119 default:
120 DE_FATAL("Unexpected channel type");
121 return "";
122 }
123 }
124
getImageComponentVec4TypeName(const vk::PlanarFormatDescription & description)125 std::string getImageComponentVec4TypeName (const vk::PlanarFormatDescription& description)
126 {
127 switch (description.channels[0].type)
128 {
129 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
130 return "%type_uvec4";
131 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
132 return "%type_ivec4";
133 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
134 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
135 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
136 return "%type_vec4";
137 default:
138 DE_FATAL("Unexpected channel type");
139 return "";
140 }
141 }
142
getOpTypeImageSparse(const ImageType imageType,const tcu::TextureFormat & format,const std::string & componentType,const bool requiresSampler)143 std::string getOpTypeImageSparse (const ImageType imageType,
144 const tcu::TextureFormat& format,
145 const std::string& componentType,
146 const bool requiresSampler)
147 {
148 std::ostringstream src;
149
150 src << "OpTypeImage " << componentType << " ";
151
152 switch (imageType)
153 {
154 case IMAGE_TYPE_1D :
155 src << "1D 0 0 0 ";
156 break;
157 case IMAGE_TYPE_1D_ARRAY :
158 src << "1D 0 1 0 ";
159 break;
160 case IMAGE_TYPE_2D :
161 src << "2D 0 0 0 ";
162 break;
163 case IMAGE_TYPE_2D_ARRAY :
164 src << "2D 0 1 0 ";
165 break;
166 case IMAGE_TYPE_3D :
167 src << "3D 0 0 0 ";
168 break;
169 case IMAGE_TYPE_CUBE :
170 src << "Cube 0 0 0 ";
171 break;
172 case IMAGE_TYPE_CUBE_ARRAY :
173 src << "Cube 0 1 0 ";
174 break;
175 default :
176 DE_FATAL("Unexpected image type");
177 break;
178 };
179
180 if (requiresSampler)
181 src << "1 ";
182 else
183 src << "2 ";
184
185 switch (format.order)
186 {
187 case tcu::TextureFormat::R:
188 src << "R";
189 break;
190 case tcu::TextureFormat::RG:
191 src << "Rg";
192 break;
193 case tcu::TextureFormat::RGB:
194 src << "Rgb";
195 break;
196 case tcu::TextureFormat::RGBA:
197 src << "Rgba";
198 break;
199 default:
200 DE_FATAL("Unexpected channel order");
201 break;
202 }
203
204 switch (format.type)
205 {
206 case tcu::TextureFormat::SIGNED_INT8:
207 src << "8i";
208 break;
209 case tcu::TextureFormat::SIGNED_INT16:
210 src << "16i";
211 break;
212 case tcu::TextureFormat::SIGNED_INT32:
213 src << "32i";
214 break;
215 case tcu::TextureFormat::UNSIGNED_INT8:
216 src << "8ui";
217 break;
218 case tcu::TextureFormat::UNSIGNED_INT16:
219 src << "16ui";
220 break;
221 case tcu::TextureFormat::UNSIGNED_INT32:
222 src << "32ui";
223 break;
224 case tcu::TextureFormat::SNORM_INT8:
225 src << "8Snorm";
226 break;
227 case tcu::TextureFormat::SNORM_INT16:
228 src << "16Snorm";
229 break;
230 case tcu::TextureFormat::SNORM_INT32:
231 src << "32Snorm";
232 break;
233 case tcu::TextureFormat::UNORM_INT8:
234 src << "8";
235 break;
236 case tcu::TextureFormat::UNORM_INT16:
237 src << "16";
238 break;
239 case tcu::TextureFormat::UNORM_INT32:
240 src << "32";
241 break;
242 default:
243 DE_FATAL("Unexpected channel type");
244 break;
245 };
246
247 return src.str();
248 }
249
getOpTypeImageSparse(const ImageType imageType,const VkFormat format,const std::string & componentType,const bool requiresSampler)250 std::string getOpTypeImageSparse (const ImageType imageType,
251 const VkFormat format,
252 const std::string& componentType,
253 const bool requiresSampler)
254 {
255 std::ostringstream src;
256
257 src << "OpTypeImage " << componentType << " ";
258
259 switch (imageType)
260 {
261 case IMAGE_TYPE_1D :
262 src << "1D 0 0 0 ";
263 break;
264 case IMAGE_TYPE_1D_ARRAY :
265 src << "1D 0 1 0 ";
266 break;
267 case IMAGE_TYPE_2D :
268 src << "2D 0 0 0 ";
269 break;
270 case IMAGE_TYPE_2D_ARRAY :
271 src << "2D 0 1 0 ";
272 break;
273 case IMAGE_TYPE_3D :
274 src << "3D 0 0 0 ";
275 break;
276 case IMAGE_TYPE_CUBE :
277 src << "Cube 0 0 0 ";
278 break;
279 case IMAGE_TYPE_CUBE_ARRAY :
280 src << "Cube 0 1 0 ";
281 break;
282 default :
283 DE_FATAL("Unexpected image type");
284 break;
285 };
286
287 if (requiresSampler)
288 src << "1 ";
289 else
290 src << "2 ";
291
292 switch (format)
293 {
294 case VK_FORMAT_R8_SINT: src << "R8i"; break;
295 case VK_FORMAT_R16_SINT: src << "R16i"; break;
296 case VK_FORMAT_R32_SINT: src << "R32i"; break;
297 case VK_FORMAT_R8_UINT: src << "R8ui"; break;
298 case VK_FORMAT_R16_UINT: src << "R16ui"; break;
299 case VK_FORMAT_R32_UINT: src << "R32ui"; break;
300 case VK_FORMAT_R8_SNORM: src << "R8Snorm"; break;
301 case VK_FORMAT_R16_SNORM: src << "R16Snorm"; break;
302 case VK_FORMAT_R8_UNORM: src << "R8"; break;
303 case VK_FORMAT_R16_UNORM: src << "R16"; break;
304
305 case VK_FORMAT_R8G8_SINT: src << "Rg8i"; break;
306 case VK_FORMAT_R16G16_SINT: src << "Rg16i"; break;
307 case VK_FORMAT_R32G32_SINT: src << "Rg32i"; break;
308 case VK_FORMAT_R8G8_UINT: src << "Rg8ui"; break;
309 case VK_FORMAT_R16G16_UINT: src << "Rg16ui"; break;
310 case VK_FORMAT_R32G32_UINT: src << "Rg32ui"; break;
311 case VK_FORMAT_R8G8_SNORM: src << "Rg8Snorm"; break;
312 case VK_FORMAT_R16G16_SNORM: src << "Rg16Snorm"; break;
313 case VK_FORMAT_R8G8_UNORM: src << "Rg8"; break;
314 case VK_FORMAT_R16G16_UNORM: src << "Rg16"; break;
315
316 case VK_FORMAT_R8G8B8A8_SINT: src << "Rgba8i"; break;
317 case VK_FORMAT_R16G16B16A16_SINT: src << "Rgba16i"; break;
318 case VK_FORMAT_R32G32B32A32_SINT: src << "Rgba32i"; break;
319 case VK_FORMAT_R8G8B8A8_UINT: src << "Rgba8ui"; break;
320 case VK_FORMAT_R16G16B16A16_UINT: src << "Rgba16ui"; break;
321 case VK_FORMAT_R32G32B32A32_UINT: src << "Rgba32ui"; break;
322 case VK_FORMAT_R8G8B8A8_SNORM: src << "Rgba8Snorm"; break;
323 case VK_FORMAT_R16G16B16A16_SNORM: src << "Rgba16Snorm"; break;
324 case VK_FORMAT_R8G8B8A8_UNORM: src << "Rgba8"; break;
325 case VK_FORMAT_R16G16B16A16_UNORM: src << "Rgba16"; break;
326
327 case VK_FORMAT_G8B8G8R8_422_UNORM: src << "Rgba8"; break;
328 case VK_FORMAT_B8G8R8G8_422_UNORM: src << "Rgba8"; break;
329 case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM: src << "Rgba8"; break;
330 case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM: src << "Rgba8"; break;
331 case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM: src << "Rgba8"; break;
332 case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM: src << "Rgba8"; break;
333 case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM: src << "Rgba8"; break;
334 case VK_FORMAT_R10X6_UNORM_PACK16: src << "R16"; break;
335 case VK_FORMAT_R10X6G10X6_UNORM_2PACK16: src << "Rg16"; break;
336 case VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16: src << "Rgba16"; break;
337 case VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16: src << "Rgba16"; break;
338 case VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16: src << "Rgba16"; break;
339 case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16: src << "Rgba16"; break;
340 case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16: src << "Rgba16"; break;
341 case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16: src << "Rgba16"; break;
342 case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16: src << "Rgba16"; break;
343 case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16: src << "Rgba16"; break;
344 case VK_FORMAT_R12X4_UNORM_PACK16: src << "R16"; break;
345 case VK_FORMAT_R12X4G12X4_UNORM_2PACK16: src << "Rg16"; break;
346 case VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16: src << "Rgba16"; break;
347 case VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16: src << "Rgba16"; break;
348 case VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16: src << "Rgba16"; break;
349 case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16: src << "Rgba16"; break;
350 case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16: src << "Rgba16"; break;
351 case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16: src << "Rgba16"; break;
352 case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16: src << "Rgba16"; break;
353 case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16: src << "Rgba16"; break;
354 case VK_FORMAT_G16B16G16R16_422_UNORM: src << "Rgba16"; break;
355 case VK_FORMAT_B16G16R16G16_422_UNORM: src << "Rgba16"; break;
356 case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM: src << "Rgba16"; break;
357 case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM: src << "Rgba16"; break;
358 case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM: src << "Rgba16"; break;
359 case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM: src << "Rgba16"; break;
360 case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM: src << "Rgba16"; break;
361
362 default:
363 DE_FATAL("Unexpected texture format");
364 break;
365 }
366 return src.str();
367 }
368
369
getOpTypeImageResidency(const ImageType imageType)370 std::string getOpTypeImageResidency (const ImageType imageType)
371 {
372 std::ostringstream src;
373
374 src << "OpTypeImage %type_uint ";
375
376 switch (imageType)
377 {
378 case IMAGE_TYPE_1D :
379 src << "1D 0 0 0 2 R32ui";
380 break;
381 case IMAGE_TYPE_1D_ARRAY :
382 src << "1D 0 1 0 2 R32ui";
383 break;
384 case IMAGE_TYPE_2D :
385 src << "2D 0 0 0 2 R32ui";
386 break;
387 case IMAGE_TYPE_2D_ARRAY :
388 src << "2D 0 1 0 2 R32ui";
389 break;
390 case IMAGE_TYPE_3D :
391 src << "3D 0 0 0 2 R32ui";
392 break;
393 case IMAGE_TYPE_CUBE :
394 src << "Cube 0 0 0 2 R32ui";
395 break;
396 case IMAGE_TYPE_CUBE_ARRAY :
397 src << "Cube 0 1 0 2 R32ui";
398 break;
399 default :
400 DE_FATAL("Unexpected image type");
401 break;
402 };
403
404 return src.str();
405 }
406
iterate(void)407 tcu::TestStatus SparseShaderIntrinsicsInstanceBase::iterate (void)
408 {
409 const InstanceInterface& instance = m_context.getInstanceInterface();
410 const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
411 VkImageCreateInfo imageSparseInfo;
412 VkImageCreateInfo imageTexelsInfo;
413 VkImageCreateInfo imageResidencyInfo;
414 std::vector <deUint32> residencyReferenceData;
415 std::vector<DeviceMemorySp> deviceMemUniquePtrVec;
416 const PlanarFormatDescription formatDescription = getPlanarFormatDescription(m_format);
417
418 imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
419 imageSparseInfo.pNext = DE_NULL;
420 imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
421 imageSparseInfo.imageType = mapImageType(m_imageType);
422 imageSparseInfo.format = m_format;
423 imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize));
424 imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize);
425 imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT;
426 imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
427 imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
428 imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | imageSparseUsageFlags();
429 imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
430 imageSparseInfo.queueFamilyIndexCount = 0u;
431 imageSparseInfo.pQueueFamilyIndices = DE_NULL;
432
433 if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
434 {
435 imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
436 }
437
438 // Check if device supports sparse operations for image format
439 if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo))
440 TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
441
442 {
443 // Assign maximum allowed mipmap levels to image
444 VkImageFormatProperties imageFormatProperties;
445 if (instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
446 imageSparseInfo.format,
447 imageSparseInfo.imageType,
448 imageSparseInfo.tiling,
449 imageSparseInfo.usage,
450 imageSparseInfo.flags,
451 &imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
452 {
453 TCU_THROW(NotSupportedError, "Image format does not support sparse operations");
454 }
455
456 imageSparseInfo.mipLevels = getMipmapCount(m_format, formatDescription, imageFormatProperties, imageSparseInfo.extent);
457 }
458
459 {
460 // Create logical device supporting both sparse and compute/graphics queues
461 QueueRequirementsVec queueRequirements;
462 queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
463 queueRequirements.push_back(QueueRequirements(getQueueFlags(), 1u));
464
465 createDeviceSupportingQueues(queueRequirements);
466 }
467
468 // Create queues supporting sparse binding operations and compute/graphics operations
469 const DeviceInterface& deviceInterface = getDeviceInterface();
470 const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
471 const Queue& extractQueue = getQueue(getQueueFlags(), 0);
472
473 // Create sparse image
474 const Unique<VkImage> imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo));
475
476 // Create sparse image memory bind semaphore
477 const Unique<VkSemaphore> memoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
478
479 std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements;
480
481 deUint32 imageSparseSizeInBytes = 0;
482 deUint32 imageSizeInPixels = 0;
483
484 for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
485 {
486 for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
487 {
488 imageSparseSizeInBytes += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
489 imageSizeInPixels += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx) / formatDescription.planes[planeNdx].elementSizeBytes;
490 }
491 }
492
493 residencyReferenceData.assign(imageSizeInPixels, MEMORY_BLOCK_NOT_BOUND_VALUE);
494
495 {
496 // Get sparse image general memory requirements
497 const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
498
499 // Check if required image memory size does not exceed device limits
500 if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
501 TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
502
503 DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
504
505 const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
506
507 if (memoryType == NO_MATCH_FOUND)
508 return tcu::TestStatus::fail("No matching memory type found");
509
510 // Get sparse image sparse memory requirements
511 sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
512
513 DE_ASSERT(sparseMemoryRequirements.size() != 0);
514
515 const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
516 deUint32 pixelOffset = 0u;
517 std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
518 std::vector<VkSparseMemoryBind> imageMipTailBinds;
519
520 for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
521 {
522 const VkImageAspectFlags aspect = (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
523 const deUint32 aspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, aspect);
524
525 if (aspectIndex == NO_MATCH_FOUND)
526 TCU_THROW(NotSupportedError, "Not supported image aspect");
527
528 VkSparseImageMemoryRequirements aspectRequirements = sparseMemoryRequirements[aspectIndex];
529
530 DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
531
532 VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity;
533
534 // Bind memory for each mipmap level
535 for (deUint32 mipmapNdx = 0; mipmapNdx < aspectRequirements.imageMipTailFirstLod; ++mipmapNdx)
536 {
537 const deUint32 mipLevelSizeInPixels = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx) / formatDescription.planes[planeNdx].elementSizeBytes;
538
539 if (mipmapNdx % MEMORY_BLOCK_TYPE_COUNT == MEMORY_BLOCK_NOT_BOUND)
540 {
541 pixelOffset += mipLevelSizeInPixels;
542 continue;
543 }
544
545 for (deUint32 pixelNdx = 0u; pixelNdx < mipLevelSizeInPixels; ++pixelNdx)
546 {
547 residencyReferenceData[pixelOffset + pixelNdx] = MEMORY_BLOCK_BOUND_VALUE;
548 }
549
550 pixelOffset += mipLevelSizeInPixels;
551
552 for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
553 {
554 const VkExtent3D mipExtent = getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx, mipmapNdx);
555 const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity);
556 const deUint32 numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
557 const VkImageSubresource subresource = { aspect, mipmapNdx, layerNdx };
558
559 const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(),
560 imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent);
561
562 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
563
564 imageResidencyMemoryBinds.push_back(imageMemoryBind);
565 }
566 }
567
568 if (aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
569 {
570 if (aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)
571 {
572 const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
573 aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
574
575 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
576
577 imageMipTailBinds.push_back(imageMipTailMemoryBind);
578 }
579 else
580 {
581 for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
582 {
583 const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
584 aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
585
586 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
587
588 imageMipTailBinds.push_back(imageMipTailMemoryBind);
589 }
590 }
591
592 for (deUint32 pixelNdx = pixelOffset; pixelNdx < residencyReferenceData.size(); ++pixelNdx)
593 {
594 residencyReferenceData[pixelNdx] = MEMORY_BLOCK_BOUND_VALUE;
595 }
596 }
597 }
598
599 // Metadata
600 if (metadataAspectIndex != NO_MATCH_FOUND)
601 {
602 const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
603
604 const deUint32 metadataBindCount = (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT ? 1u : imageSparseInfo.arrayLayers);
605 for (deUint32 bindNdx = 0u; bindNdx < metadataBindCount; ++bindNdx)
606 {
607 const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
608 metadataAspectRequirements.imageMipTailSize, memoryType,
609 metadataAspectRequirements.imageMipTailOffset + bindNdx * metadataAspectRequirements.imageMipTailStride,
610 VK_SPARSE_MEMORY_BIND_METADATA_BIT);
611
612 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
613
614 imageMipTailBinds.push_back(imageMipTailMemoryBind);
615 }
616 }
617
618 VkBindSparseInfo bindSparseInfo =
619 {
620 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
621 DE_NULL, //const void* pNext;
622 0u, //deUint32 waitSemaphoreCount;
623 DE_NULL, //const VkSemaphore* pWaitSemaphores;
624 0u, //deUint32 bufferBindCount;
625 DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
626 0u, //deUint32 imageOpaqueBindCount;
627 DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
628 0u, //deUint32 imageBindCount;
629 DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
630 1u, //deUint32 signalSemaphoreCount;
631 &memoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
632 };
633
634 VkSparseImageMemoryBindInfo imageResidencyBindInfo;
635 VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
636
637 if (imageResidencyMemoryBinds.size() > 0)
638 {
639 imageResidencyBindInfo.image = *imageSparse;
640 imageResidencyBindInfo.bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
641 imageResidencyBindInfo.pBinds = imageResidencyMemoryBinds.data();
642
643 bindSparseInfo.imageBindCount = 1u;
644 bindSparseInfo.pImageBinds = &imageResidencyBindInfo;
645 }
646
647 if (imageMipTailBinds.size() > 0)
648 {
649 imageMipTailBindInfo.image = *imageSparse;
650 imageMipTailBindInfo.bindCount = static_cast<deUint32>(imageMipTailBinds.size());
651 imageMipTailBindInfo.pBinds = imageMipTailBinds.data();
652
653 bindSparseInfo.imageOpaqueBindCount = 1u;
654 bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo;
655 }
656
657 // Submit sparse bind commands for execution
658 VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
659 }
660
661 // Create image to store texels copied from sparse image
662 imageTexelsInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
663 imageTexelsInfo.pNext = DE_NULL;
664 imageTexelsInfo.flags = 0u;
665 imageTexelsInfo.imageType = imageSparseInfo.imageType;
666 imageTexelsInfo.format = imageSparseInfo.format;
667 imageTexelsInfo.extent = imageSparseInfo.extent;
668 imageTexelsInfo.arrayLayers = imageSparseInfo.arrayLayers;
669 imageTexelsInfo.mipLevels = imageSparseInfo.mipLevels;
670 imageTexelsInfo.samples = imageSparseInfo.samples;
671 imageTexelsInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
672 imageTexelsInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
673 imageTexelsInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | imageOutputUsageFlags();
674 imageTexelsInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
675 imageTexelsInfo.queueFamilyIndexCount = 0u;
676 imageTexelsInfo.pQueueFamilyIndices = DE_NULL;
677
678 if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
679 {
680 imageTexelsInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
681 }
682
683 const Unique<VkImage> imageTexels (createImage(deviceInterface, getDevice(), &imageTexelsInfo));
684 const de::UniquePtr<Allocation> imageTexelsAlloc (bindImage(deviceInterface, getDevice(), getAllocator(), *imageTexels, MemoryRequirement::Any));
685
686 // Create image to store residency info copied from sparse image
687 imageResidencyInfo = imageTexelsInfo;
688 imageResidencyInfo.format = mapTextureFormat(m_residencyFormat);
689
690 const Unique<VkImage> imageResidency (createImage(deviceInterface, getDevice(), &imageResidencyInfo));
691 const de::UniquePtr<Allocation> imageResidencyAlloc (bindImage(deviceInterface, getDevice(), getAllocator(), *imageResidency, MemoryRequirement::Any));
692
693 std::vector <VkBufferImageCopy> bufferImageSparseCopy(formatDescription.numPlanes * imageSparseInfo.mipLevels);
694
695 {
696 deUint32 bufferOffset = 0u;
697 for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
698 {
699 const VkImageAspectFlags aspect = (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
700
701 for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
702 {
703 bufferImageSparseCopy[planeNdx*imageSparseInfo.mipLevels + mipmapNdx] =
704 {
705 bufferOffset, // VkDeviceSize bufferOffset;
706 0u, // deUint32 bufferRowLength;
707 0u, // deUint32 bufferImageHeight;
708 makeImageSubresourceLayers(aspect, mipmapNdx, 0u, imageSparseInfo.arrayLayers), // VkImageSubresourceLayers imageSubresource;
709 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
710 vk::getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx, mipmapNdx) // VkExtent3D imageExtent;
711 };
712 bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
713 }
714 }
715 }
716
717 // Create command buffer for compute and transfer operations
718 const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), extractQueue.queueFamilyIndex));
719 const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
720
721 // Start recording commands
722 beginCommandBuffer(deviceInterface, *commandBuffer);
723
724 // Create input buffer
725 const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSparseSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
726 const Unique<VkBuffer> inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
727 const de::UniquePtr<Allocation> inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
728
729 // Fill input buffer with reference data
730 std::vector<deUint8> referenceData(imageSparseSizeInBytes);
731
732 for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
733 {
734 for (deUint32 mipmapNdx = 0u; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
735 {
736 const deUint32 mipLevelSizeinBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx);
737 const deUint32 bufferOffset = static_cast<deUint32>(bufferImageSparseCopy[mipmapNdx].bufferOffset);
738
739 for (deUint32 byteNdx = 0u; byteNdx < mipLevelSizeinBytes; ++byteNdx)
740 {
741 referenceData[bufferOffset + byteNdx] = (deUint8)( (mipmapNdx + byteNdx) % 127u );
742 }
743 }
744 }
745
746 deMemcpy(inputBufferAlloc->getHostPtr(), referenceData.data(), imageSparseSizeInBytes);
747 flushAlloc(deviceInterface, getDevice(), *inputBufferAlloc);
748
749 {
750 // Prepare input buffer for data transfer operation
751 const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier
752 (
753 VK_ACCESS_HOST_WRITE_BIT,
754 VK_ACCESS_TRANSFER_READ_BIT,
755 *inputBuffer,
756 0u,
757 imageSparseSizeInBytes
758 );
759
760 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
761 }
762
763 {
764 // Prepare sparse image for data transfer operation
765 std::vector<VkImageMemoryBarrier> imageSparseTransferDstBarriers;
766 for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
767 {
768 const VkImageAspectFlags aspect = (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
769
770 imageSparseTransferDstBarriers.emplace_back(makeImageMemoryBarrier
771 (
772 0u,
773 VK_ACCESS_TRANSFER_WRITE_BIT,
774 VK_IMAGE_LAYOUT_UNDEFINED,
775 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
776 *imageSparse,
777 makeImageSubresourceRange(aspect, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers),
778 sparseQueue.queueFamilyIndex != extractQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
779 sparseQueue.queueFamilyIndex != extractQueue.queueFamilyIndex ? extractQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED
780 ));
781 }
782 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, static_cast<deUint32>(imageSparseTransferDstBarriers.size()), imageSparseTransferDstBarriers.data());
783 }
784
785 // Copy reference data from input buffer to sparse image
786 deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageSparseCopy.size()), bufferImageSparseCopy.data());
787
788 recordCommands(*commandBuffer, imageSparseInfo, *imageSparse, *imageTexels, *imageResidency);
789
790 const VkBufferCreateInfo bufferTexelsCreateInfo = makeBufferCreateInfo(imageSparseSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
791 const Unique<VkBuffer> bufferTexels (createBuffer(deviceInterface, getDevice(), &bufferTexelsCreateInfo));
792 const de::UniquePtr<Allocation> bufferTexelsAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *bufferTexels, MemoryRequirement::HostVisible));
793
794 // Copy data from texels image to buffer
795 deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageTexels, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *bufferTexels, static_cast<deUint32>(bufferImageSparseCopy.size()), bufferImageSparseCopy.data());
796
797 const deUint32 imageResidencySizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
798
799 const VkBufferCreateInfo bufferResidencyCreateInfo = makeBufferCreateInfo(imageResidencySizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
800 const Unique<VkBuffer> bufferResidency (createBuffer(deviceInterface, getDevice(), &bufferResidencyCreateInfo));
801 const de::UniquePtr<Allocation> bufferResidencyAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *bufferResidency, MemoryRequirement::HostVisible));
802
803 // Copy data from residency image to buffer
804 std::vector <VkBufferImageCopy> bufferImageResidencyCopy(formatDescription.numPlanes * imageSparseInfo.mipLevels);
805
806 {
807 deUint32 bufferOffset = 0u;
808 for (deUint32 planeNdx = 0u; planeNdx < formatDescription.numPlanes; ++planeNdx)
809 {
810 const VkImageAspectFlags aspect = (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
811
812 for (deUint32 mipmapNdx = 0u; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
813 {
814 bufferImageResidencyCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx] =
815 {
816 bufferOffset, // VkDeviceSize bufferOffset;
817 0u, // deUint32 bufferRowLength;
818 0u, // deUint32 bufferImageHeight;
819 makeImageSubresourceLayers(aspect, mipmapNdx, 0u, imageSparseInfo.arrayLayers), // VkImageSubresourceLayers imageSubresource;
820 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
821 vk::getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx, mipmapNdx) // VkExtent3D imageExtent;
822 };
823 bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
824 }
825 }
826 }
827
828 deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageResidency, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *bufferResidency, static_cast<deUint32>(bufferImageResidencyCopy.size()), bufferImageResidencyCopy.data());
829
830 {
831 VkBufferMemoryBarrier bufferOutputHostReadBarriers[2];
832
833 bufferOutputHostReadBarriers[0] = makeBufferMemoryBarrier
834 (
835 VK_ACCESS_TRANSFER_WRITE_BIT,
836 VK_ACCESS_HOST_READ_BIT,
837 *bufferTexels,
838 0u,
839 imageSparseSizeInBytes
840 );
841
842 bufferOutputHostReadBarriers[1] = makeBufferMemoryBarrier
843 (
844 VK_ACCESS_TRANSFER_WRITE_BIT,
845 VK_ACCESS_HOST_READ_BIT,
846 *bufferResidency,
847 0u,
848 imageResidencySizeInBytes
849 );
850
851 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 2u, bufferOutputHostReadBarriers, 0u, DE_NULL);
852 }
853
854 // End recording commands
855 endCommandBuffer(deviceInterface, *commandBuffer);
856
857 const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
858
859 // Submit commands for execution and wait for completion
860 submitCommandsAndWait(deviceInterface, getDevice(), extractQueue.queueHandle, *commandBuffer, 1u, &memoryBindSemaphore.get(), stageBits);
861
862 // Wait for sparse queue to become idle
863 deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
864
865 // Retrieve data from residency buffer to host memory
866 invalidateAlloc(deviceInterface, getDevice(), *bufferResidencyAlloc);
867
868 const deUint32* bufferResidencyData = static_cast<const deUint32*>(bufferResidencyAlloc->getHostPtr());
869
870 deUint32 pixelOffsetNotAligned = 0u;
871 for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
872 {
873 for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
874 {
875 const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, mipmapNdx);
876 const deUint32 pixelOffsetAligned = static_cast<deUint32>(bufferImageResidencyCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx].bufferOffset) / tcu::getPixelSize(m_residencyFormat);
877
878 if (deMemCmp(&bufferResidencyData[pixelOffsetAligned], &residencyReferenceData[pixelOffsetNotAligned], mipLevelSizeInBytes) != 0)
879 return tcu::TestStatus::fail("Failed");
880
881 pixelOffsetNotAligned += mipLevelSizeInBytes / tcu::getPixelSize(m_residencyFormat);
882 }
883 }
884 // Retrieve data from texels buffer to host memory
885 invalidateAlloc(deviceInterface, getDevice(), *bufferTexelsAlloc);
886
887 const deUint8* bufferTexelsData = static_cast<const deUint8*>(bufferTexelsAlloc->getHostPtr());
888
889 for (deUint32 planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
890 {
891 const VkImageAspectFlags aspect = (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
892 const deUint32 aspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, aspect);
893
894 if (aspectIndex == NO_MATCH_FOUND)
895 TCU_THROW(NotSupportedError, "Not supported image aspect");
896
897 VkSparseImageMemoryRequirements aspectRequirements = sparseMemoryRequirements[aspectIndex];
898
899 for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
900 {
901 const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription,planeNdx, mipmapNdx);
902 const deUint32 bufferOffset = static_cast<deUint32>(bufferImageSparseCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx].bufferOffset);
903
904 if (mipmapNdx < aspectRequirements.imageMipTailFirstLod)
905 {
906 if (mipmapNdx % MEMORY_BLOCK_TYPE_COUNT == MEMORY_BLOCK_BOUND)
907 {
908 if (deMemCmp(&bufferTexelsData[bufferOffset], &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
909 return tcu::TestStatus::fail("Failed");
910 }
911 else if (getPhysicalDeviceProperties(instance, physicalDevice).sparseProperties.residencyNonResidentStrict)
912 {
913 std::vector<deUint8> zeroData;
914 zeroData.assign(mipLevelSizeInBytes, 0u);
915
916 if (deMemCmp(&bufferTexelsData[bufferOffset], zeroData.data(), mipLevelSizeInBytes) != 0)
917 return tcu::TestStatus::fail("Failed");
918 }
919 }
920 else
921 {
922 if (deMemCmp(&bufferTexelsData[bufferOffset], &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
923 return tcu::TestStatus::fail("Failed");
924 }
925 }
926 }
927
928 return tcu::TestStatus::pass("Passed");
929 }
930
931 } // sparse
932 } // vkt
933