1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Image load/store Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktImageLoadStoreTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktImageTestsUtil.hpp"
28 #include "vktImageLoadStoreUtil.hpp"
29 #include "vktImageTexture.hpp"
30
31 #include "vkDefs.hpp"
32 #include "vkRef.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkBuilderUtil.hpp"
38 #include "vkQueryUtil.hpp"
39 #include "vkImageUtil.hpp"
40
41 #include "deUniquePtr.hpp"
42 #include "deSharedPtr.hpp"
43 #include "deStringUtil.hpp"
44
45 #include "tcuImageCompare.hpp"
46 #include "tcuTexture.hpp"
47 #include "tcuTextureUtil.hpp"
48 #include "tcuFloat.hpp"
49
50 #include <string>
51 #include <vector>
52
53 using namespace vk;
54
55 namespace vkt
56 {
57 namespace image
58 {
59 namespace
60 {
61
makeBufferImageCopy(const Texture & texture)62 inline VkBufferImageCopy makeBufferImageCopy (const Texture& texture)
63 {
64 return image::makeBufferImageCopy(makeExtent3D(texture.layerSize()), texture.numLayers());
65 }
66
getLayerOrSlice(const Texture & texture,const tcu::ConstPixelBufferAccess access,const int layer)67 tcu::ConstPixelBufferAccess getLayerOrSlice (const Texture& texture, const tcu::ConstPixelBufferAccess access, const int layer)
68 {
69 switch (texture.type())
70 {
71 case IMAGE_TYPE_1D:
72 case IMAGE_TYPE_2D:
73 case IMAGE_TYPE_BUFFER:
74 // Not layered
75 DE_ASSERT(layer == 0);
76 return access;
77
78 case IMAGE_TYPE_1D_ARRAY:
79 return tcu::getSubregion(access, 0, layer, access.getWidth(), 1);
80
81 case IMAGE_TYPE_2D_ARRAY:
82 case IMAGE_TYPE_CUBE:
83 case IMAGE_TYPE_CUBE_ARRAY:
84 case IMAGE_TYPE_3D: // 3d texture is treated as if depth was the layers
85 return tcu::getSubregion(access, 0, 0, layer, access.getWidth(), access.getHeight(), 1);
86
87 default:
88 DE_FATAL("Internal test error");
89 return tcu::ConstPixelBufferAccess();
90 }
91 }
92
93 //! \return true if all layers match in both pixel buffers
comparePixelBuffers(tcu::TestLog & log,const Texture & texture,const VkFormat format,const tcu::ConstPixelBufferAccess reference,const tcu::ConstPixelBufferAccess result)94 bool comparePixelBuffers (tcu::TestLog& log,
95 const Texture& texture,
96 const VkFormat format,
97 const tcu::ConstPixelBufferAccess reference,
98 const tcu::ConstPixelBufferAccess result)
99 {
100 DE_ASSERT(reference.getFormat() == result.getFormat());
101 DE_ASSERT(reference.getSize() == result.getSize());
102
103 const bool intFormat = isIntegerFormat(format);
104 const bool is3d = (texture.type() == IMAGE_TYPE_3D);
105 const int numLayersOrSlices = (is3d ? texture.size().z() : texture.numLayers());
106 const int numCubeFaces = 6;
107
108 int passedLayers = 0;
109 for (int layerNdx = 0; layerNdx < numLayersOrSlices; ++layerNdx)
110 {
111 const std::string comparisonName = "Comparison" + de::toString(layerNdx);
112 const std::string comparisonDesc = "Image Comparison, " +
113 (isCube(texture) ? "face " + de::toString(layerNdx % numCubeFaces) + ", cube " + de::toString(layerNdx / numCubeFaces) :
114 is3d ? "slice " + de::toString(layerNdx) : "layer " + de::toString(layerNdx));
115
116 const tcu::ConstPixelBufferAccess refLayer = getLayerOrSlice(texture, reference, layerNdx);
117 const tcu::ConstPixelBufferAccess resultLayer = getLayerOrSlice(texture, result, layerNdx);
118
119 bool ok = false;
120 if (intFormat)
121 ok = tcu::intThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT);
122 else
123 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
124
125 if (ok)
126 ++passedLayers;
127 }
128 return passedLayers == numLayersOrSlices;
129 }
130
131 //!< Zero out invalid pixels in the image (denormalized, infinite, NaN values)
replaceBadFloatReinterpretValues(const tcu::PixelBufferAccess access)132 void replaceBadFloatReinterpretValues (const tcu::PixelBufferAccess access)
133 {
134 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_FLOATING_POINT);
135
136 for (int z = 0; z < access.getDepth(); ++z)
137 for (int y = 0; y < access.getHeight(); ++y)
138 for (int x = 0; x < access.getWidth(); ++x)
139 {
140 const tcu::Vec4 color(access.getPixel(x, y, z));
141 tcu::Vec4 newColor = color;
142
143 for (int i = 0; i < 4; ++i)
144 {
145 if (access.getFormat().type == tcu::TextureFormat::HALF_FLOAT)
146 {
147 const tcu::Float16 f(color[i]);
148 if (f.isDenorm() || f.isInf() || f.isNaN())
149 newColor[i] = 0.0f;
150 }
151 else
152 {
153 const tcu::Float32 f(color[i]);
154 if (f.isDenorm() || f.isInf() || f.isNaN())
155 newColor[i] = 0.0f;
156 }
157 }
158
159 if (newColor != color)
160 access.setPixel(newColor, x, y, z);
161 }
162 }
163
164 //!< replace invalid pixels in the image (-128)
replaceSnormReinterpretValues(const tcu::PixelBufferAccess access)165 void replaceSnormReinterpretValues (const tcu::PixelBufferAccess access)
166 {
167 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT);
168
169 for (int z = 0; z < access.getDepth(); ++z)
170 for (int y = 0; y < access.getHeight(); ++y)
171 for (int x = 0; x < access.getWidth(); ++x)
172 {
173 const tcu::IVec4 color(access.getPixelInt(x, y, z));
174 tcu::IVec4 newColor = color;
175
176 for (int i = 0; i < 4; ++i)
177 {
178 const deInt32 oldColor(color[i]);
179 if (oldColor == -128) newColor[i] = -127;
180 }
181
182 if (newColor != color)
183 access.setPixel(newColor, x, y, z);
184 }
185 }
186
generateReferenceImage(const tcu::IVec3 & imageSize,const VkFormat imageFormat,const VkFormat readFormat)187 tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat, const VkFormat readFormat)
188 {
189 // Generate a reference image data using the storage format
190
191 tcu::TextureLevel reference(mapVkFormat(imageFormat), imageSize.x(), imageSize.y(), imageSize.z());
192 const tcu::PixelBufferAccess access = reference.getAccess();
193
194 const float storeColorScale = computeStoreColorScale(imageFormat, imageSize);
195 const float storeColorBias = computeStoreColorBias(imageFormat);
196
197 const bool intFormat = isIntegerFormat(imageFormat);
198 const int xMax = imageSize.x() - 1;
199 const int yMax = imageSize.y() - 1;
200
201 for (int z = 0; z < imageSize.z(); ++z)
202 for (int y = 0; y < imageSize.y(); ++y)
203 for (int x = 0; x < imageSize.x(); ++x)
204 {
205 const tcu::IVec4 color(x^y^z, (xMax - x)^y^z, x^(yMax - y)^z, (xMax - x)^(yMax - y)^z);
206
207 if (intFormat)
208 access.setPixel(color, x, y, z);
209 else
210 access.setPixel(color.asFloat()*storeColorScale + storeColorBias, x, y, z);
211 }
212
213 // If the image is to be accessed as a float texture, get rid of invalid values
214
215 if (isFloatFormat(readFormat) && imageFormat != readFormat)
216 replaceBadFloatReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
217 if (isSnormFormat(readFormat) && imageFormat != readFormat)
218 replaceSnormReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
219
220 return reference;
221 }
222
generateReferenceImage(const tcu::IVec3 & imageSize,const VkFormat imageFormat)223 inline tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat)
224 {
225 return generateReferenceImage(imageSize, imageFormat, imageFormat);
226 }
227
flipHorizontally(const tcu::PixelBufferAccess access)228 void flipHorizontally (const tcu::PixelBufferAccess access)
229 {
230 const int xMax = access.getWidth() - 1;
231 const int halfWidth = access.getWidth() / 2;
232
233 if (isIntegerFormat(mapTextureFormat(access.getFormat())))
234 for (int z = 0; z < access.getDepth(); z++)
235 for (int y = 0; y < access.getHeight(); y++)
236 for (int x = 0; x < halfWidth; x++)
237 {
238 const tcu::UVec4 temp = access.getPixelUint(xMax - x, y, z);
239 access.setPixel(access.getPixelUint(x, y, z), xMax - x, y, z);
240 access.setPixel(temp, x, y, z);
241 }
242 else
243 for (int z = 0; z < access.getDepth(); z++)
244 for (int y = 0; y < access.getHeight(); y++)
245 for (int x = 0; x < halfWidth; x++)
246 {
247 const tcu::Vec4 temp = access.getPixel(xMax - x, y, z);
248 access.setPixel(access.getPixel(x, y, z), xMax - x, y, z);
249 access.setPixel(temp, x, y, z);
250 }
251 }
252
formatsAreCompatible(const VkFormat format0,const VkFormat format1)253 inline bool formatsAreCompatible (const VkFormat format0, const VkFormat format1)
254 {
255 return format0 == format1 || mapVkFormat(format0).getPixelSize() == mapVkFormat(format1).getPixelSize();
256 }
257
commandImageWriteBarrierBetweenShaderInvocations(Context & context,const VkCommandBuffer cmdBuffer,const VkImage image,const Texture & texture)258 void commandImageWriteBarrierBetweenShaderInvocations (Context& context, const VkCommandBuffer cmdBuffer, const VkImage image, const Texture& texture)
259 {
260 const DeviceInterface& vk = context.getDeviceInterface();
261
262 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
263 const VkImageMemoryBarrier shaderWriteBarrier = makeImageMemoryBarrier(
264 VK_ACCESS_SHADER_WRITE_BIT, 0u,
265 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
266 image, fullImageSubresourceRange);
267
268 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier);
269 }
270
commandBufferWriteBarrierBeforeHostRead(Context & context,const VkCommandBuffer cmdBuffer,const VkBuffer buffer,const VkDeviceSize bufferSizeBytes)271 void commandBufferWriteBarrierBeforeHostRead (Context& context, const VkCommandBuffer cmdBuffer, const VkBuffer buffer, const VkDeviceSize bufferSizeBytes)
272 {
273 const DeviceInterface& vk = context.getDeviceInterface();
274
275 const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(
276 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
277 buffer, 0ull, bufferSizeBytes);
278
279 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
280 }
281
282 //! Copy all layers of an image to a buffer.
commandCopyImageToBuffer(Context & context,const VkCommandBuffer cmdBuffer,const VkImage image,const VkBuffer buffer,const VkDeviceSize bufferSizeBytes,const Texture & texture)283 void commandCopyImageToBuffer (Context& context,
284 const VkCommandBuffer cmdBuffer,
285 const VkImage image,
286 const VkBuffer buffer,
287 const VkDeviceSize bufferSizeBytes,
288 const Texture& texture)
289 {
290 const DeviceInterface& vk = context.getDeviceInterface();
291
292 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
293 const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
294 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
295 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
296 image, fullImageSubresourceRange);
297
298 const VkBufferImageCopy copyRegion = makeBufferImageCopy(texture);
299
300 const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
301 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
302 buffer, 0ull, bufferSizeBytes);
303
304 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &prepareForTransferBarrier);
305 vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, 1u, ©Region);
306 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©Barrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
307 }
308
309 class StoreTest : public TestCase
310 {
311 public:
312 enum TestFlags
313 {
314 FLAG_SINGLE_LAYER_BIND = 0x1, //!< Run the shader multiple times, each time binding a different layer.
315 };
316
317 StoreTest (tcu::TestContext& testCtx,
318 const std::string& name,
319 const std::string& description,
320 const Texture& texture,
321 const VkFormat format,
322 const TestFlags flags = static_cast<TestFlags>(0));
323
324 void initPrograms (SourceCollections& programCollection) const;
325
326 TestInstance* createInstance (Context& context) const;
327
328 private:
329 const Texture m_texture;
330 const VkFormat m_format;
331 const bool m_singleLayerBind;
332 };
333
StoreTest(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const Texture & texture,const VkFormat format,const TestFlags flags)334 StoreTest::StoreTest (tcu::TestContext& testCtx,
335 const std::string& name,
336 const std::string& description,
337 const Texture& texture,
338 const VkFormat format,
339 const TestFlags flags)
340 : TestCase (testCtx, name, description)
341 , m_texture (texture)
342 , m_format (format)
343 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
344 {
345 if (m_singleLayerBind)
346 DE_ASSERT(m_texture.numLayers() > 1);
347 }
348
initPrograms(SourceCollections & programCollection) const349 void StoreTest::initPrograms (SourceCollections& programCollection) const
350 {
351 const float storeColorScale = computeStoreColorScale(m_format, m_texture.size());
352 const float storeColorBias = computeStoreColorBias(m_format);
353 DE_ASSERT(colorScaleAndBiasAreValid(m_format, storeColorScale, storeColorBias));
354
355 const std::string xMax = de::toString(m_texture.size().x() - 1);
356 const std::string yMax = de::toString(m_texture.size().y() - 1);
357 const std::string signednessPrefix = isUintFormat(m_format) ? "u" : isIntFormat(m_format) ? "i" : "";
358 const std::string colorBaseExpr = signednessPrefix + "vec4("
359 + "gx^gy^gz, "
360 + "(" + xMax + "-gx)^gy^gz, "
361 + "gx^(" + yMax + "-gy)^gz, "
362 + "(" + xMax + "-gx)^(" + yMax + "-gy)^gz)";
363
364 const std::string colorExpr = colorBaseExpr + (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
365 + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
366
367 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
368 const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
369
370 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
371 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
372 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
373
374 std::ostringstream src;
375 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
376 << "\n"
377 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
378 << "layout (binding = 0, " << formatQualifierStr << ") writeonly uniform " << imageTypeStr << " u_image;\n";
379
380 if (m_singleLayerBind)
381 src << "layout (binding = 1) readonly uniform Constants {\n"
382 << " int u_layerNdx;\n"
383 << "};\n";
384
385 src << "\n"
386 << "void main (void)\n"
387 << "{\n"
388 << " int gx = int(gl_GlobalInvocationID.x);\n"
389 << " int gy = int(gl_GlobalInvocationID.y);\n"
390 << " int gz = " << (m_singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
391 << " imageStore(u_image, " << texelCoordStr << ", " << colorExpr << ");\n"
392 << "}\n";
393
394 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
395 }
396
397 //! Generic test iteration algorithm for image tests
398 class BaseTestInstance : public TestInstance
399 {
400 public:
401 BaseTestInstance (Context& context,
402 const Texture& texture,
403 const VkFormat format,
404 const bool singleLayerBind);
405
406 tcu::TestStatus iterate (void);
407
~BaseTestInstance(void)408 virtual ~BaseTestInstance (void) {}
409
410 protected:
411 virtual VkDescriptorSetLayout prepareDescriptors (void) = 0;
412 virtual tcu::TestStatus verifyResult (void) = 0;
413
414 virtual void commandBeforeCompute (const VkCommandBuffer cmdBuffer) = 0;
415 virtual void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer) = 0;
416 virtual void commandAfterCompute (const VkCommandBuffer cmdBuffer) = 0;
417
418 virtual void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
419 const VkPipelineLayout pipelineLayout,
420 const int layerNdx) = 0;
421
422 const Texture m_texture;
423 const VkFormat m_format;
424 const bool m_singleLayerBind;
425 };
426
BaseTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool singleLayerBind)427 BaseTestInstance::BaseTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool singleLayerBind)
428 : TestInstance (context)
429 , m_texture (texture)
430 , m_format (format)
431 , m_singleLayerBind (singleLayerBind)
432 {
433 }
434
iterate(void)435 tcu::TestStatus BaseTestInstance::iterate (void)
436 {
437 const DeviceInterface& vk = m_context.getDeviceInterface();
438 const VkDevice device = m_context.getDevice();
439 const VkQueue queue = m_context.getUniversalQueue();
440 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
441
442 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0));
443
444 const VkDescriptorSetLayout descriptorSetLayout = prepareDescriptors();
445 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
446 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
447
448 const Unique<VkCommandPool> cmdPool(createCommandPool(vk, device, VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT, queueFamilyIndex));
449 const Unique<VkCommandBuffer> cmdBuffer(allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
450
451 beginCommandBuffer(vk, *cmdBuffer);
452
453 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
454 commandBeforeCompute(*cmdBuffer);
455
456 const tcu::IVec3 workSize = (m_singleLayerBind ? m_texture.layerSize() : m_texture.size());
457 const int loopNumLayers = (m_singleLayerBind ? m_texture.numLayers() : 1);
458 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
459 {
460 commandBindDescriptorsForLayer(*cmdBuffer, *pipelineLayout, layerNdx);
461
462 if (layerNdx > 0)
463 commandBetweenShaderInvocations(*cmdBuffer);
464
465 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
466 }
467
468 commandAfterCompute(*cmdBuffer);
469
470 endCommandBuffer(vk, *cmdBuffer);
471
472 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
473
474 return verifyResult();
475 }
476
477 //! Base store test implementation
478 class StoreTestInstance : public BaseTestInstance
479 {
480 public:
481 StoreTestInstance (Context& context,
482 const Texture& texture,
483 const VkFormat format,
484 const bool singleLayerBind);
485
486 protected:
487 tcu::TestStatus verifyResult (void);
488
489 // Add empty implementations for functions that might be not needed
commandBeforeCompute(const VkCommandBuffer)490 void commandBeforeCompute (const VkCommandBuffer) {}
commandBetweenShaderInvocations(const VkCommandBuffer)491 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
commandAfterCompute(const VkCommandBuffer)492 void commandAfterCompute (const VkCommandBuffer) {}
493
494 de::MovePtr<Buffer> m_imageBuffer;
495 const VkDeviceSize m_imageSizeBytes;
496 };
497
StoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool singleLayerBind)498 StoreTestInstance::StoreTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool singleLayerBind)
499 : BaseTestInstance (context, texture, format, singleLayerBind)
500 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
501 {
502 const DeviceInterface& vk = m_context.getDeviceInterface();
503 const VkDevice device = m_context.getDevice();
504 Allocator& allocator = m_context.getDefaultAllocator();
505
506 // A helper buffer with enough space to hold the whole image. Usage flags accommodate all derived test instances.
507
508 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
509 vk, device, allocator,
510 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
511 MemoryRequirement::HostVisible));
512 }
513
verifyResult(void)514 tcu::TestStatus StoreTestInstance::verifyResult (void)
515 {
516 const DeviceInterface& vk = m_context.getDeviceInterface();
517 const VkDevice device = m_context.getDevice();
518
519 const tcu::IVec3 imageSize = m_texture.size();
520 const tcu::TextureLevel reference = generateReferenceImage(imageSize, m_format);
521
522 const Allocation& alloc = m_imageBuffer->getAllocation();
523 invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
524 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_format), imageSize, alloc.getHostPtr());
525
526 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_format, reference.getAccess(), result))
527 return tcu::TestStatus::pass("Passed");
528 else
529 return tcu::TestStatus::fail("Image comparison failed");
530 }
531
532 //! Store test for images
533 class ImageStoreTestInstance : public StoreTestInstance
534 {
535 public:
536 ImageStoreTestInstance (Context& context,
537 const Texture& texture,
538 const VkFormat format,
539 const bool singleLayerBind);
540
541 protected:
542 VkDescriptorSetLayout prepareDescriptors (void);
543 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
544 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
545 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
546
547 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
548 const VkPipelineLayout pipelineLayout,
549 const int layerNdx);
550
551 de::MovePtr<Image> m_image;
552 de::MovePtr<Buffer> m_constantsBuffer;
553 const VkDeviceSize m_constantsBufferChunkSizeBytes;
554 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
555 Move<VkDescriptorPool> m_descriptorPool;
556 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
557 std::vector<SharedVkImageView> m_allImageViews;
558 };
559
ImageStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool singleLayerBind)560 ImageStoreTestInstance::ImageStoreTestInstance (Context& context,
561 const Texture& texture,
562 const VkFormat format,
563 const bool singleLayerBind)
564 : StoreTestInstance (context, texture, format, singleLayerBind)
565 , m_constantsBufferChunkSizeBytes (getOptimalUniformBufferChunkSize(context.getInstanceInterface(), context.getPhysicalDevice(), sizeof(deUint32)))
566 , m_allDescriptorSets (texture.numLayers())
567 , m_allImageViews (texture.numLayers())
568 {
569 const DeviceInterface& vk = m_context.getDeviceInterface();
570 const VkDevice device = m_context.getDevice();
571 Allocator& allocator = m_context.getDefaultAllocator();
572
573 m_image = de::MovePtr<Image>(new Image(
574 vk, device, allocator,
575 makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
576 MemoryRequirement::Any));
577
578 // This buffer will be used to pass constants to the shader
579
580 const int numLayers = m_texture.numLayers();
581 const VkDeviceSize constantsBufferSizeBytes = numLayers * m_constantsBufferChunkSizeBytes;
582 m_constantsBuffer = de::MovePtr<Buffer>(new Buffer(
583 vk, device, allocator,
584 makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
585 MemoryRequirement::HostVisible));
586
587 {
588 const Allocation& alloc = m_constantsBuffer->getAllocation();
589 deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
590
591 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
592
593 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
594 {
595 deUint32* valuePtr = reinterpret_cast<deUint32*>(basePtr + layerNdx * m_constantsBufferChunkSizeBytes);
596 *valuePtr = static_cast<deUint32>(layerNdx);
597 }
598
599 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), constantsBufferSizeBytes);
600 }
601 }
602
prepareDescriptors(void)603 VkDescriptorSetLayout ImageStoreTestInstance::prepareDescriptors (void)
604 {
605 const DeviceInterface& vk = m_context.getDeviceInterface();
606 const VkDevice device = m_context.getDevice();
607
608 const int numLayers = m_texture.numLayers();
609 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
610 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
611 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
612 .build(vk, device);
613
614 m_descriptorPool = DescriptorPoolBuilder()
615 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
616 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
617 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
618
619 if (m_singleLayerBind)
620 {
621 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
622 {
623 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
624 m_allImageViews[layerNdx] = makeVkSharedPtr(makeImageView(
625 vk, device, m_image->get(), mapImageViewType(getImageTypeForSingleLayer(m_texture.type())), m_format,
626 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
627 }
628 }
629 else // bind all layers at once
630 {
631 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
632 m_allImageViews[0] = makeVkSharedPtr(makeImageView(
633 vk, device, m_image->get(), mapImageViewType(m_texture.type()), m_format,
634 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers)));
635 }
636
637 return *m_descriptorSetLayout; // not passing the ownership
638 }
639
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)640 void ImageStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
641 {
642 const DeviceInterface& vk = m_context.getDeviceInterface();
643 const VkDevice device = m_context.getDevice();
644
645 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
646 const VkImageView imageView = **m_allImageViews[layerNdx];
647
648 const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
649
650 // Set the next chunk of the constants buffer. Each chunk begins with layer index that we've set before.
651 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(
652 m_constantsBuffer->get(), layerNdx*m_constantsBufferChunkSizeBytes, m_constantsBufferChunkSizeBytes);
653
654 DescriptorSetUpdateBuilder()
655 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfo)
656 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
657 .update(vk, device);
658 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
659 }
660
commandBeforeCompute(const VkCommandBuffer cmdBuffer)661 void ImageStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
662 {
663 const DeviceInterface& vk = m_context.getDeviceInterface();
664
665 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
666 const VkImageMemoryBarrier setImageLayoutBarrier = makeImageMemoryBarrier(
667 0u, 0u,
668 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
669 m_image->get(), fullImageSubresourceRange);
670
671 const VkDeviceSize constantsBufferSize = m_texture.numLayers() * m_constantsBufferChunkSizeBytes;
672 const VkBufferMemoryBarrier writeConstantsBarrier = makeBufferMemoryBarrier(
673 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
674 m_constantsBuffer->get(), 0ull, constantsBufferSize);
675
676 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &writeConstantsBarrier, 1, &setImageLayoutBarrier);
677 }
678
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)679 void ImageStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
680 {
681 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_image->get(), m_texture);
682 }
683
commandAfterCompute(const VkCommandBuffer cmdBuffer)684 void ImageStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
685 {
686 commandCopyImageToBuffer(m_context, cmdBuffer, m_image->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
687 }
688
689 //! Store test for buffers
690 class BufferStoreTestInstance : public StoreTestInstance
691 {
692 public:
693 BufferStoreTestInstance (Context& context,
694 const Texture& texture,
695 const VkFormat format);
696
697 protected:
698 VkDescriptorSetLayout prepareDescriptors (void);
699 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
700
701 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
702 const VkPipelineLayout pipelineLayout,
703 const int layerNdx);
704
705 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
706 Move<VkDescriptorPool> m_descriptorPool;
707 Move<VkDescriptorSet> m_descriptorSet;
708 Move<VkBufferView> m_bufferView;
709 };
710
BufferStoreTestInstance(Context & context,const Texture & texture,const VkFormat format)711 BufferStoreTestInstance::BufferStoreTestInstance (Context& context,
712 const Texture& texture,
713 const VkFormat format)
714 : StoreTestInstance(context, texture, format, false)
715 {
716 }
717
prepareDescriptors(void)718 VkDescriptorSetLayout BufferStoreTestInstance::prepareDescriptors (void)
719 {
720 const DeviceInterface& vk = m_context.getDeviceInterface();
721 const VkDevice device = m_context.getDevice();
722
723 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
724 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
725 .build(vk, device);
726
727 m_descriptorPool = DescriptorPoolBuilder()
728 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
729 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
730
731 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
732 m_bufferView = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
733
734 return *m_descriptorSetLayout; // not passing the ownership
735 }
736
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)737 void BufferStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
738 {
739 DE_ASSERT(layerNdx == 0);
740 DE_UNREF(layerNdx);
741
742 const VkDevice device = m_context.getDevice();
743 const DeviceInterface& vk = m_context.getDeviceInterface();
744
745 DescriptorSetUpdateBuilder()
746 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferView.get())
747 .update(vk, device);
748 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
749 }
750
commandAfterCompute(const VkCommandBuffer cmdBuffer)751 void BufferStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
752 {
753 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBuffer->get(), m_imageSizeBytes);
754 }
755
756 class LoadStoreTest : public TestCase
757 {
758 public:
759 enum TestFlags
760 {
761 FLAG_SINGLE_LAYER_BIND = 1 << 0, //!< Run the shader multiple times, each time binding a different layer.
762 FLAG_RESTRICT_IMAGES = 1 << 1, //!< If given, images in the shader will be qualified with "restrict".
763 };
764
765 LoadStoreTest (tcu::TestContext& testCtx,
766 const std::string& name,
767 const std::string& description,
768 const Texture& texture,
769 const VkFormat format,
770 const VkFormat imageFormat,
771 const TestFlags flags = static_cast<TestFlags>(0));
772
773 void initPrograms (SourceCollections& programCollection) const;
774 TestInstance* createInstance (Context& context) const;
775
776 private:
777 const Texture m_texture;
778 const VkFormat m_format; //!< Format as accessed in the shader
779 const VkFormat m_imageFormat; //!< Storage format
780 const bool m_singleLayerBind;
781 const bool m_restrictImages;
782 };
783
LoadStoreTest(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const TestFlags flags)784 LoadStoreTest::LoadStoreTest (tcu::TestContext& testCtx,
785 const std::string& name,
786 const std::string& description,
787 const Texture& texture,
788 const VkFormat format,
789 const VkFormat imageFormat,
790 const TestFlags flags)
791 : TestCase (testCtx, name, description)
792 , m_texture (texture)
793 , m_format (format)
794 , m_imageFormat (imageFormat)
795 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
796 , m_restrictImages ((flags & FLAG_RESTRICT_IMAGES) != 0)
797 {
798 if (m_singleLayerBind)
799 DE_ASSERT(m_texture.numLayers() > 1);
800
801 DE_ASSERT(formatsAreCompatible(m_format, m_imageFormat));
802 }
803
initPrograms(SourceCollections & programCollection) const804 void LoadStoreTest::initPrograms (SourceCollections& programCollection) const
805 {
806 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
807 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
808 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
809 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
810 const std::string maybeRestrictStr = (m_restrictImages ? "restrict " : "");
811 const std::string xMax = de::toString(m_texture.size().x() - 1);
812
813 std::ostringstream src;
814 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
815 << "\n"
816 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
817 << "layout (binding = 0, " << formatQualifierStr << ") " << maybeRestrictStr << "readonly uniform " << imageTypeStr << " u_image0;\n"
818 << "layout (binding = 1, " << formatQualifierStr << ") " << maybeRestrictStr << "writeonly uniform " << imageTypeStr << " u_image1;\n"
819 << "\n"
820 << "void main (void)\n"
821 << "{\n"
822 << (dimension == 1 ?
823 " int pos = int(gl_GlobalInvocationID.x);\n"
824 " imageStore(u_image1, pos, imageLoad(u_image0, " + xMax + "-pos));\n"
825 : dimension == 2 ?
826 " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"
827 " imageStore(u_image1, pos, imageLoad(u_image0, ivec2(" + xMax + "-pos.x, pos.y)));\n"
828 : dimension == 3 ?
829 " ivec3 pos = ivec3(gl_GlobalInvocationID);\n"
830 " imageStore(u_image1, pos, imageLoad(u_image0, ivec3(" + xMax + "-pos.x, pos.y, pos.z)));\n"
831 : "")
832 << "}\n";
833
834 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
835 }
836
837 //! Load/store test base implementation
838 class LoadStoreTestInstance : public BaseTestInstance
839 {
840 public:
841 LoadStoreTestInstance (Context& context,
842 const Texture& texture,
843 const VkFormat format,
844 const VkFormat imageFormat,
845 const bool singleLayerBind);
846
847 protected:
848 virtual Buffer* getResultBuffer (void) const = 0; //!< Get the buffer that contains the result image
849
850 tcu::TestStatus verifyResult (void);
851
852 // Add empty implementations for functions that might be not needed
commandBeforeCompute(const VkCommandBuffer)853 void commandBeforeCompute (const VkCommandBuffer) {}
commandBetweenShaderInvocations(const VkCommandBuffer)854 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
commandAfterCompute(const VkCommandBuffer)855 void commandAfterCompute (const VkCommandBuffer) {}
856
857 de::MovePtr<Buffer> m_imageBuffer; //!< Source data and helper buffer
858 const VkDeviceSize m_imageSizeBytes;
859 const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
860 tcu::TextureLevel m_referenceImage; //!< Used as input data and later to verify result image
861 };
862
LoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool singleLayerBind)863 LoadStoreTestInstance::LoadStoreTestInstance (Context& context,
864 const Texture& texture,
865 const VkFormat format,
866 const VkFormat imageFormat,
867 const bool singleLayerBind)
868 : BaseTestInstance (context, texture, format, singleLayerBind)
869 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
870 , m_imageFormat (imageFormat)
871 , m_referenceImage (generateReferenceImage(texture.size(), imageFormat, format))
872 {
873 const DeviceInterface& vk = m_context.getDeviceInterface();
874 const VkDevice device = m_context.getDevice();
875 Allocator& allocator = m_context.getDefaultAllocator();
876
877 // A helper buffer with enough space to hold the whole image.
878
879 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
880 vk, device, allocator,
881 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
882 MemoryRequirement::HostVisible));
883
884 // Copy reference data to buffer for subsequent upload to image.
885
886 const Allocation& alloc = m_imageBuffer->getAllocation();
887 deMemcpy(alloc.getHostPtr(), m_referenceImage.getAccess().getDataPtr(), static_cast<size_t>(m_imageSizeBytes));
888 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
889 }
890
verifyResult(void)891 tcu::TestStatus LoadStoreTestInstance::verifyResult (void)
892 {
893 const DeviceInterface& vk = m_context.getDeviceInterface();
894 const VkDevice device = m_context.getDevice();
895
896 // Apply the same transformation as done in the shader
897 const tcu::PixelBufferAccess reference = m_referenceImage.getAccess();
898 flipHorizontally(reference);
899
900 const Allocation& alloc = getResultBuffer()->getAllocation();
901 invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
902 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(), alloc.getHostPtr());
903
904 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result))
905 return tcu::TestStatus::pass("Passed");
906 else
907 return tcu::TestStatus::fail("Image comparison failed");
908 }
909
910 //! Load/store test for images
911 class ImageLoadStoreTestInstance : public LoadStoreTestInstance
912 {
913 public:
914 ImageLoadStoreTestInstance (Context& context,
915 const Texture& texture,
916 const VkFormat format,
917 const VkFormat imageFormat,
918 const bool singleLayerBind);
919
920 protected:
921 VkDescriptorSetLayout prepareDescriptors (void);
922 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
923 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
924 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
925
926 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
927 const VkPipelineLayout pipelineLayout,
928 const int layerNdx);
929
getResultBuffer(void) const930 Buffer* getResultBuffer (void) const { return m_imageBuffer.get(); }
931
932 de::MovePtr<Image> m_imageSrc;
933 de::MovePtr<Image> m_imageDst;
934 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
935 Move<VkDescriptorPool> m_descriptorPool;
936 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
937 std::vector<SharedVkImageView> m_allSrcImageViews;
938 std::vector<SharedVkImageView> m_allDstImageViews;
939 };
940
ImageLoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool singleLayerBind)941 ImageLoadStoreTestInstance::ImageLoadStoreTestInstance (Context& context,
942 const Texture& texture,
943 const VkFormat format,
944 const VkFormat imageFormat,
945 const bool singleLayerBind)
946 : LoadStoreTestInstance (context, texture, format, imageFormat, singleLayerBind)
947 , m_allDescriptorSets (texture.numLayers())
948 , m_allSrcImageViews (texture.numLayers())
949 , m_allDstImageViews (texture.numLayers())
950 {
951 const DeviceInterface& vk = m_context.getDeviceInterface();
952 const VkDevice device = m_context.getDevice();
953 Allocator& allocator = m_context.getDefaultAllocator();
954 const VkImageCreateFlags imageFlags = (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
955
956 m_imageSrc = de::MovePtr<Image>(new Image(
957 vk, device, allocator,
958 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, imageFlags),
959 MemoryRequirement::Any));
960
961 m_imageDst = de::MovePtr<Image>(new Image(
962 vk, device, allocator,
963 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, imageFlags),
964 MemoryRequirement::Any));
965 }
966
prepareDescriptors(void)967 VkDescriptorSetLayout ImageLoadStoreTestInstance::prepareDescriptors (void)
968 {
969 const VkDevice device = m_context.getDevice();
970 const DeviceInterface& vk = m_context.getDeviceInterface();
971
972 const int numLayers = m_texture.numLayers();
973 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
974 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
975 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
976 .build(vk, device);
977
978 m_descriptorPool = DescriptorPoolBuilder()
979 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
980 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
981 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
982
983 if (m_singleLayerBind)
984 {
985 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
986 {
987 const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
988 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u);
989
990 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
991 m_allSrcImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
992 m_allDstImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
993 }
994 }
995 else // bind all layers at once
996 {
997 const VkImageViewType viewType = mapImageViewType(m_texture.type());
998 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers);
999
1000 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1001 m_allSrcImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1002 m_allDstImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1003 }
1004
1005 return *m_descriptorSetLayout; // not passing the ownership
1006 }
1007
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)1008 void ImageLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1009 {
1010 const VkDevice device = m_context.getDevice();
1011 const DeviceInterface& vk = m_context.getDeviceInterface();
1012
1013 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
1014 const VkImageView srcImageView = **m_allSrcImageViews[layerNdx];
1015 const VkImageView dstImageView = **m_allDstImageViews[layerNdx];
1016
1017 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_GENERAL);
1018 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1019
1020 DescriptorSetUpdateBuilder()
1021 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
1022 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
1023 .update(vk, device);
1024 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1025 }
1026
commandBeforeCompute(const VkCommandBuffer cmdBuffer)1027 void ImageLoadStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1028 {
1029 const DeviceInterface& vk = m_context.getDeviceInterface();
1030
1031 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1032 {
1033 const VkImageMemoryBarrier preCopyImageBarriers[] =
1034 {
1035 makeImageMemoryBarrier(
1036 0u, VK_ACCESS_TRANSFER_WRITE_BIT,
1037 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1038 m_imageSrc->get(), fullImageSubresourceRange),
1039 makeImageMemoryBarrier(
1040 0u, VK_ACCESS_SHADER_WRITE_BIT,
1041 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1042 m_imageDst->get(), fullImageSubresourceRange)
1043 };
1044
1045 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
1046 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1047 m_imageBuffer->get(), 0ull, m_imageSizeBytes);
1048
1049 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1050 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
1051 }
1052 {
1053 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1054 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1055 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
1056 m_imageSrc->get(), fullImageSubresourceRange);
1057
1058 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
1059
1060 vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1061 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
1062 }
1063 }
1064
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)1065 void ImageLoadStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1066 {
1067 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1068 }
1069
commandAfterCompute(const VkCommandBuffer cmdBuffer)1070 void ImageLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1071 {
1072 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1073 }
1074
1075 //! Load/store test for buffers
1076 class BufferLoadStoreTestInstance : public LoadStoreTestInstance
1077 {
1078 public:
1079 BufferLoadStoreTestInstance (Context& context,
1080 const Texture& texture,
1081 const VkFormat format,
1082 const VkFormat imageFormat);
1083
1084 protected:
1085 VkDescriptorSetLayout prepareDescriptors (void);
1086 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1087
1088 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1089 const VkPipelineLayout pipelineLayout,
1090 const int layerNdx);
1091
getResultBuffer(void) const1092 Buffer* getResultBuffer (void) const { return m_imageBufferDst.get(); }
1093
1094 de::MovePtr<Buffer> m_imageBufferDst;
1095 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1096 Move<VkDescriptorPool> m_descriptorPool;
1097 Move<VkDescriptorSet> m_descriptorSet;
1098 Move<VkBufferView> m_bufferViewSrc;
1099 Move<VkBufferView> m_bufferViewDst;
1100 };
1101
BufferLoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat)1102 BufferLoadStoreTestInstance::BufferLoadStoreTestInstance (Context& context,
1103 const Texture& texture,
1104 const VkFormat format,
1105 const VkFormat imageFormat)
1106 : LoadStoreTestInstance(context, texture, format, imageFormat, false)
1107 {
1108 const DeviceInterface& vk = m_context.getDeviceInterface();
1109 const VkDevice device = m_context.getDevice();
1110 Allocator& allocator = m_context.getDefaultAllocator();
1111
1112 // Create a destination buffer.
1113
1114 m_imageBufferDst = de::MovePtr<Buffer>(new Buffer(
1115 vk, device, allocator,
1116 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
1117 MemoryRequirement::HostVisible));
1118 }
1119
prepareDescriptors(void)1120 VkDescriptorSetLayout BufferLoadStoreTestInstance::prepareDescriptors (void)
1121 {
1122 const DeviceInterface& vk = m_context.getDeviceInterface();
1123 const VkDevice device = m_context.getDevice();
1124
1125 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1126 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1127 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1128 .build(vk, device);
1129
1130 m_descriptorPool = DescriptorPoolBuilder()
1131 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1132 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1133 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1134
1135 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1136 m_bufferViewSrc = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
1137 m_bufferViewDst = makeBufferView(vk, device, m_imageBufferDst->get(), m_format, 0ull, m_imageSizeBytes);
1138
1139 return *m_descriptorSetLayout; // not passing the ownership
1140 }
1141
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)1142 void BufferLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1143 {
1144 DE_ASSERT(layerNdx == 0);
1145 DE_UNREF(layerNdx);
1146
1147 const VkDevice device = m_context.getDevice();
1148 const DeviceInterface& vk = m_context.getDeviceInterface();
1149
1150 DescriptorSetUpdateBuilder()
1151 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewSrc.get())
1152 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewDst.get())
1153 .update(vk, device);
1154 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
1155 }
1156
commandAfterCompute(const VkCommandBuffer cmdBuffer)1157 void BufferLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1158 {
1159 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBufferDst->get(), m_imageSizeBytes);
1160 }
1161
createInstance(Context & context) const1162 TestInstance* StoreTest::createInstance (Context& context) const
1163 {
1164 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1165 return new BufferStoreTestInstance(context, m_texture, m_format);
1166 else
1167 return new ImageStoreTestInstance(context, m_texture, m_format, m_singleLayerBind);
1168 }
1169
createInstance(Context & context) const1170 TestInstance* LoadStoreTest::createInstance (Context& context) const
1171 {
1172 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1173 return new BufferLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat);
1174 else
1175 return new ImageLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_singleLayerBind);
1176 }
1177
1178 static const Texture s_textures[] =
1179 {
1180 Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1),
1181 Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8),
1182 Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1),
1183 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8),
1184 Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1),
1185 Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6),
1186 Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2*6),
1187 Texture(IMAGE_TYPE_BUFFER, tcu::IVec3(64, 1, 1), 1),
1188 };
1189
getTestTexture(const ImageType imageType)1190 const Texture& getTestTexture (const ImageType imageType)
1191 {
1192 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1193 if (s_textures[textureNdx].type() == imageType)
1194 return s_textures[textureNdx];
1195
1196 DE_FATAL("Internal error");
1197 return s_textures[0];
1198 }
1199
1200 static const VkFormat s_formats[] =
1201 {
1202 VK_FORMAT_R32G32B32A32_SFLOAT,
1203 VK_FORMAT_R16G16B16A16_SFLOAT,
1204 VK_FORMAT_R32_SFLOAT,
1205
1206 VK_FORMAT_R32G32B32A32_UINT,
1207 VK_FORMAT_R16G16B16A16_UINT,
1208 VK_FORMAT_R8G8B8A8_UINT,
1209 VK_FORMAT_R32_UINT,
1210
1211 VK_FORMAT_R32G32B32A32_SINT,
1212 VK_FORMAT_R16G16B16A16_SINT,
1213 VK_FORMAT_R8G8B8A8_SINT,
1214 VK_FORMAT_R32_SINT,
1215
1216 VK_FORMAT_R8G8B8A8_UNORM,
1217
1218 VK_FORMAT_R8G8B8A8_SNORM,
1219 };
1220
1221 } // anonymous ns
1222
createImageStoreTests(tcu::TestContext & testCtx)1223 tcu::TestCaseGroup* createImageStoreTests (tcu::TestContext& testCtx)
1224 {
1225 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "store", "Plain imageStore() cases"));
1226
1227 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1228 {
1229 const Texture& texture = s_textures[textureNdx];
1230 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1231 const bool isLayered = (texture.numLayers() > 1);
1232
1233 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1234 {
1235 groupByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx]));
1236
1237 if (isLayered)
1238 groupByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
1239 texture, s_formats[formatNdx], StoreTest::FLAG_SINGLE_LAYER_BIND));
1240 }
1241 testGroup->addChild(groupByImageViewType.release());
1242 }
1243
1244 return testGroup.release();
1245 }
1246
createImageLoadStoreTests(tcu::TestContext & testCtx)1247 tcu::TestCaseGroup* createImageLoadStoreTests (tcu::TestContext& testCtx)
1248 {
1249 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store", "Cases with imageLoad() followed by imageStore()"));
1250
1251 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1252 {
1253 const Texture& texture = s_textures[textureNdx];
1254 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1255 const bool isLayered = (texture.numLayers() > 1);
1256
1257 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1258 {
1259 groupByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "",
1260 texture, s_formats[formatNdx], s_formats[formatNdx]));
1261
1262 if (isLayered)
1263 groupByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
1264 texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_SINGLE_LAYER_BIND));
1265 }
1266 testGroup->addChild(groupByImageViewType.release());
1267 }
1268
1269 return testGroup.release();
1270 }
1271
createImageFormatReinterpretTests(tcu::TestContext & testCtx)1272 tcu::TestCaseGroup* createImageFormatReinterpretTests (tcu::TestContext& testCtx)
1273 {
1274 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "format_reinterpret", "Cases with differing texture and image formats"));
1275
1276 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1277 {
1278 const Texture& texture = s_textures[textureNdx];
1279 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1280
1281 for (int imageFormatNdx = 0; imageFormatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++imageFormatNdx)
1282 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1283 {
1284 const std::string caseName = getFormatShortString(s_formats[imageFormatNdx]) + "_" + getFormatShortString(s_formats[formatNdx]);
1285 if (imageFormatNdx != formatNdx && formatsAreCompatible(s_formats[imageFormatNdx], s_formats[formatNdx]))
1286 groupByImageViewType->addChild(new LoadStoreTest(testCtx, caseName, "", texture, s_formats[formatNdx], s_formats[imageFormatNdx]));
1287 }
1288 testGroup->addChild(groupByImageViewType.release());
1289 }
1290
1291 return testGroup.release();
1292 }
1293
createImageQualifierRestrictCase(tcu::TestContext & testCtx,const ImageType imageType,const std::string & name)1294 de::MovePtr<TestCase> createImageQualifierRestrictCase (tcu::TestContext& testCtx, const ImageType imageType, const std::string& name)
1295 {
1296 const VkFormat format = VK_FORMAT_R32G32B32A32_UINT;
1297 const Texture& texture = getTestTexture(imageType);
1298 return de::MovePtr<TestCase>(new LoadStoreTest(testCtx, name, "", texture, format, format, LoadStoreTest::FLAG_RESTRICT_IMAGES));
1299 }
1300
1301 } // image
1302 } // vkt
1303