1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Image load/store Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktImageLoadStoreTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktImageTestsUtil.hpp"
28 #include "vktImageLoadStoreUtil.hpp"
29 #include "vktImageTexture.hpp"
30
31 #include "vkDefs.hpp"
32 #include "vkRef.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkBarrierUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkCmdUtil.hpp"
42 #include "vkObjUtil.hpp"
43
44 #include "deMath.h"
45 #include "deUniquePtr.hpp"
46 #include "deSharedPtr.hpp"
47 #include "deStringUtil.hpp"
48
49 #include "tcuImageCompare.hpp"
50 #include "tcuTexture.hpp"
51 #include "tcuTextureUtil.hpp"
52 #include "tcuFloat.hpp"
53 #include "tcuStringTemplate.hpp"
54
55 #include <string>
56 #include <vector>
57 #include <map>
58
59 using namespace vk;
60
61 namespace vkt
62 {
63 namespace image
64 {
65 namespace
66 {
67
68 // Check for three-component (non-packed) format, i.e. pixel size is a multiple of 3.
formatHasThreeComponents(VkFormat format)69 bool formatHasThreeComponents(VkFormat format)
70 {
71 const tcu::TextureFormat texFormat = mapVkFormat(format);
72 return (getPixelSize(texFormat) % 3) == 0;
73 }
74
getSingleComponentFormat(VkFormat format)75 VkFormat getSingleComponentFormat(VkFormat format)
76 {
77 tcu::TextureFormat texFormat = mapVkFormat(format);
78 texFormat = tcu::TextureFormat(tcu::TextureFormat::R, texFormat.type);
79 return mapTextureFormat(texFormat);
80 }
81
makeBufferImageCopy(const Texture & texture)82 inline VkBufferImageCopy makeBufferImageCopy (const Texture& texture)
83 {
84 return image::makeBufferImageCopy(makeExtent3D(texture.layerSize()), texture.numLayers());
85 }
86
getLayerOrSlice(const Texture & texture,const tcu::ConstPixelBufferAccess access,const int layer)87 tcu::ConstPixelBufferAccess getLayerOrSlice (const Texture& texture, const tcu::ConstPixelBufferAccess access, const int layer)
88 {
89 switch (texture.type())
90 {
91 case IMAGE_TYPE_1D:
92 case IMAGE_TYPE_2D:
93 case IMAGE_TYPE_BUFFER:
94 // Not layered
95 DE_ASSERT(layer == 0);
96 return access;
97
98 case IMAGE_TYPE_1D_ARRAY:
99 return tcu::getSubregion(access, 0, layer, access.getWidth(), 1);
100
101 case IMAGE_TYPE_2D_ARRAY:
102 case IMAGE_TYPE_CUBE:
103 case IMAGE_TYPE_CUBE_ARRAY:
104 case IMAGE_TYPE_3D: // 3d texture is treated as if depth was the layers
105 return tcu::getSubregion(access, 0, 0, layer, access.getWidth(), access.getHeight(), 1);
106
107 default:
108 DE_FATAL("Internal test error");
109 return tcu::ConstPixelBufferAccess();
110 }
111 }
112
113 //! \return the size in bytes of a given level of a mipmap image, including array layers.
getMipmapLevelImageSizeBytes(const Texture & texture,const vk::VkFormat format,const deUint32 mipmapLevel)114 vk::VkDeviceSize getMipmapLevelImageSizeBytes (const Texture& texture, const vk::VkFormat format, const deUint32 mipmapLevel)
115 {
116 tcu::IVec3 size = texture.size(mipmapLevel);
117 return tcu::getPixelSize(vk::mapVkFormat(format)) * size.x() * size.y() * size.z();
118 }
119
120 //! \return the size in bytes of the whole mipmap image, including all mipmap levels and array layers
getMipmapImageTotalSizeBytes(const Texture & texture,const vk::VkFormat format)121 vk::VkDeviceSize getMipmapImageTotalSizeBytes (const Texture& texture, const vk::VkFormat format)
122 {
123 vk::VkDeviceSize size = 0u;
124 deInt32 levelCount = 0u;
125
126 do
127 {
128 size += getMipmapLevelImageSizeBytes(texture, format, levelCount);
129 levelCount++;
130 } while (levelCount < texture.numMipmapLevels());
131 return size;
132 }
133
134 //! \return true if all layers match in both pixel buffers
comparePixelBuffers(tcu::TestLog & log,const Texture & texture,const VkFormat format,const tcu::ConstPixelBufferAccess reference,const tcu::ConstPixelBufferAccess result,const deUint32 mipmapLevel=0u)135 bool comparePixelBuffers (tcu::TestLog& log,
136 const Texture& texture,
137 const VkFormat format,
138 const tcu::ConstPixelBufferAccess reference,
139 const tcu::ConstPixelBufferAccess result,
140 const deUint32 mipmapLevel = 0u)
141 {
142 DE_ASSERT(reference.getFormat() == result.getFormat());
143 DE_ASSERT(reference.getSize() == result.getSize());
144
145 const bool is3d = (texture.type() == IMAGE_TYPE_3D);
146 const int numLayersOrSlices = (is3d ? texture.size(mipmapLevel).z() : texture.numLayers());
147 const int numCubeFaces = 6;
148
149 int passedLayers = 0;
150 for (int layerNdx = 0; layerNdx < numLayersOrSlices; ++layerNdx)
151 {
152 const std::string comparisonName = "Comparison" + de::toString(layerNdx);
153 const std::string comparisonDesc = "Image Comparison, " +
154 (isCube(texture) ? "face " + de::toString(layerNdx % numCubeFaces) + ", cube " + de::toString(layerNdx / numCubeFaces) :
155 is3d ? "slice " + de::toString(layerNdx) : "layer " + de::toString(layerNdx) + " , level " + de::toString(mipmapLevel));
156
157 const tcu::ConstPixelBufferAccess refLayer = getLayerOrSlice(texture, reference, layerNdx);
158 const tcu::ConstPixelBufferAccess resultLayer = getLayerOrSlice(texture, result, layerNdx);
159
160 bool ok = false;
161
162 switch (tcu::getTextureChannelClass(mapVkFormat(format).type))
163 {
164 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
165 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
166 {
167 ok = tcu::intThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT);
168 break;
169 }
170
171 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
172 {
173 // Allow error of minimum representable difference
174 const tcu::Vec4 threshold (1.0f / ((tcu::UVec4(1u) << tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<deUint32>()) - 1u).cast<float>());
175
176 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, threshold, tcu::COMPARE_LOG_RESULT);
177 break;
178 }
179
180 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
181 {
182 // Allow error of minimum representable difference
183 const tcu::Vec4 threshold (1.0f / ((tcu::UVec4(1u) << (tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<deUint32>() - 1u)) - 1u).cast<float>());
184
185 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, threshold, tcu::COMPARE_LOG_RESULT);
186 break;
187 }
188
189 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
190 {
191 // Convert target format ulps to float ulps and allow 1 ulp difference
192 const tcu::UVec4 threshold (tcu::UVec4(1u) << (tcu::UVec4(23) - tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<deUint32>()));
193
194 ok = tcu::floatUlpThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, threshold, tcu::COMPARE_LOG_RESULT);
195 break;
196 }
197
198 default:
199 DE_FATAL("Unknown channel class");
200 }
201
202 if (ok)
203 ++passedLayers;
204 }
205
206 return passedLayers == numLayersOrSlices;
207 }
208
209 //!< Zero out invalid pixels in the image (denormalized, infinite, NaN values)
replaceBadFloatReinterpretValues(const tcu::PixelBufferAccess access)210 void replaceBadFloatReinterpretValues (const tcu::PixelBufferAccess access)
211 {
212 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_FLOATING_POINT);
213
214 for (int z = 0; z < access.getDepth(); ++z)
215 for (int y = 0; y < access.getHeight(); ++y)
216 for (int x = 0; x < access.getWidth(); ++x)
217 {
218 const tcu::Vec4 color(access.getPixel(x, y, z));
219 tcu::Vec4 newColor = color;
220
221 for (int i = 0; i < 4; ++i)
222 {
223 if (access.getFormat().type == tcu::TextureFormat::HALF_FLOAT)
224 {
225 const tcu::Float16 f(color[i]);
226 if (f.isDenorm() || f.isInf() || f.isNaN())
227 newColor[i] = 0.0f;
228 }
229 else
230 {
231 const tcu::Float32 f(color[i]);
232 if (f.isDenorm() || f.isInf() || f.isNaN())
233 newColor[i] = 0.0f;
234 }
235 }
236
237 if (newColor != color)
238 access.setPixel(newColor, x, y, z);
239 }
240 }
241
242 //!< replace invalid pixels in the image (-128)
replaceSnormReinterpretValues(const tcu::PixelBufferAccess access)243 void replaceSnormReinterpretValues (const tcu::PixelBufferAccess access)
244 {
245 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT);
246
247 for (int z = 0; z < access.getDepth(); ++z)
248 for (int y = 0; y < access.getHeight(); ++y)
249 for (int x = 0; x < access.getWidth(); ++x)
250 {
251 const tcu::IVec4 color(access.getPixelInt(x, y, z));
252 tcu::IVec4 newColor = color;
253
254 for (int i = 0; i < 4; ++i)
255 {
256 const deInt32 oldColor(color[i]);
257 if (oldColor == -128) newColor[i] = -127;
258 }
259
260 if (newColor != color)
261 access.setPixel(newColor, x, y, z);
262 }
263 }
264
generateReferenceImage(const tcu::IVec3 & imageSize,const VkFormat imageFormat,const VkFormat readFormat)265 tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat, const VkFormat readFormat)
266 {
267 // Generate a reference image data using the storage format
268
269 tcu::TextureLevel reference(mapVkFormat(imageFormat), imageSize.x(), imageSize.y(), imageSize.z());
270 const tcu::PixelBufferAccess access = reference.getAccess();
271
272 const float storeColorScale = computeStoreColorScale(imageFormat, imageSize);
273 const float storeColorBias = computeStoreColorBias(imageFormat);
274
275 const bool intFormat = isIntegerFormat(imageFormat);
276 const bool storeNegativeValues = isSignedFormat(imageFormat) && (storeColorBias == 0);
277 const int xMax = imageSize.x() - 1;
278 const int yMax = imageSize.y() - 1;
279
280 for (int z = 0; z < imageSize.z(); ++z)
281 for (int y = 0; y < imageSize.y(); ++y)
282 for (int x = 0; x < imageSize.x(); ++x)
283 {
284 tcu::IVec4 color(x^y^z, (xMax - x)^y^z, x^(yMax - y)^z, (xMax - x)^(yMax - y)^z);
285
286 if (storeNegativeValues)
287 color -= tcu::IVec4(deRoundFloatToInt32((float)de::max(xMax, yMax) / 2.0f));
288
289 if (intFormat)
290 access.setPixel(color, x, y, z);
291 else
292 access.setPixel(color.asFloat()*storeColorScale + storeColorBias, x, y, z);
293 }
294
295 // If the image is to be accessed as a float texture, get rid of invalid values
296
297 if (isFloatFormat(readFormat) && imageFormat != readFormat)
298 replaceBadFloatReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
299 if (isSnormFormat(readFormat) && imageFormat != readFormat)
300 replaceSnormReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
301
302 return reference;
303 }
304
generateReferenceImage(const tcu::IVec3 & imageSize,const VkFormat imageFormat)305 inline tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat)
306 {
307 return generateReferenceImage(imageSize, imageFormat, imageFormat);
308 }
309
flipHorizontally(const tcu::PixelBufferAccess access)310 void flipHorizontally (const tcu::PixelBufferAccess access)
311 {
312 const int xMax = access.getWidth() - 1;
313 const int halfWidth = access.getWidth() / 2;
314
315 if (isIntegerFormat(mapTextureFormat(access.getFormat())))
316 for (int z = 0; z < access.getDepth(); z++)
317 for (int y = 0; y < access.getHeight(); y++)
318 for (int x = 0; x < halfWidth; x++)
319 {
320 const tcu::UVec4 temp = access.getPixelUint(xMax - x, y, z);
321 access.setPixel(access.getPixelUint(x, y, z), xMax - x, y, z);
322 access.setPixel(temp, x, y, z);
323 }
324 else
325 for (int z = 0; z < access.getDepth(); z++)
326 for (int y = 0; y < access.getHeight(); y++)
327 for (int x = 0; x < halfWidth; x++)
328 {
329 const tcu::Vec4 temp = access.getPixel(xMax - x, y, z);
330 access.setPixel(access.getPixel(x, y, z), xMax - x, y, z);
331 access.setPixel(temp, x, y, z);
332 }
333 }
334
formatsAreCompatible(const VkFormat format0,const VkFormat format1)335 inline bool formatsAreCompatible (const VkFormat format0, const VkFormat format1)
336 {
337 return format0 == format1 || mapVkFormat(format0).getPixelSize() == mapVkFormat(format1).getPixelSize();
338 }
339
commandImageWriteBarrierBetweenShaderInvocations(Context & context,const VkCommandBuffer cmdBuffer,const VkImage image,const Texture & texture)340 void commandImageWriteBarrierBetweenShaderInvocations (Context& context, const VkCommandBuffer cmdBuffer, const VkImage image, const Texture& texture)
341 {
342 const DeviceInterface& vk = context.getDeviceInterface();
343
344 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, texture.numMipmapLevels(), 0u, texture.numLayers());
345 const VkImageMemoryBarrier shaderWriteBarrier = makeImageMemoryBarrier(
346 VK_ACCESS_SHADER_WRITE_BIT, 0u,
347 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
348 image, fullImageSubresourceRange);
349
350 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier);
351 }
352
commandBufferWriteBarrierBeforeHostRead(Context & context,const VkCommandBuffer cmdBuffer,const VkBuffer buffer,const VkDeviceSize bufferSizeBytes)353 void commandBufferWriteBarrierBeforeHostRead (Context& context, const VkCommandBuffer cmdBuffer, const VkBuffer buffer, const VkDeviceSize bufferSizeBytes)
354 {
355 const DeviceInterface& vk = context.getDeviceInterface();
356
357 const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(
358 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
359 buffer, 0ull, bufferSizeBytes);
360
361 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
362 }
363
364 //! Copy all layers of an image to a buffer.
commandCopyImageToBuffer(Context & context,const VkCommandBuffer cmdBuffer,const VkImage image,const VkBuffer buffer,const VkDeviceSize bufferSizeBytes,const Texture & texture)365 void commandCopyImageToBuffer (Context& context,
366 const VkCommandBuffer cmdBuffer,
367 const VkImage image,
368 const VkBuffer buffer,
369 const VkDeviceSize bufferSizeBytes,
370 const Texture& texture)
371 {
372 const DeviceInterface& vk = context.getDeviceInterface();
373
374 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
375 const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
376 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
377 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
378 image, fullImageSubresourceRange);
379
380 const VkBufferImageCopy copyRegion = makeBufferImageCopy(texture);
381
382 const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
383 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
384 buffer, 0ull, bufferSizeBytes);
385
386 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &prepareForTransferBarrier);
387 vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, 1u, ©Region);
388 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©Barrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
389 }
390
391 //! Copy all layers of a mipmap image to a buffer.
commandCopyMipmapImageToBuffer(Context & context,const VkCommandBuffer cmdBuffer,const VkImage image,const VkFormat imageFormat,const VkBuffer buffer,const VkDeviceSize bufferSizeBytes,const Texture & texture)392 void commandCopyMipmapImageToBuffer (Context& context,
393 const VkCommandBuffer cmdBuffer,
394 const VkImage image,
395 const VkFormat imageFormat,
396 const VkBuffer buffer,
397 const VkDeviceSize bufferSizeBytes,
398 const Texture& texture)
399 {
400 const DeviceInterface& vk = context.getDeviceInterface();
401
402 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, texture.numMipmapLevels(), 0u, texture.numLayers());
403 const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
404 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
405 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
406 image, fullImageSubresourceRange);
407
408 std::vector<VkBufferImageCopy> copyRegions;
409 VkDeviceSize bufferOffset = 0u;
410 for (deInt32 levelNdx = 0; levelNdx < texture.numMipmapLevels(); levelNdx++)
411 {
412 const VkBufferImageCopy copyParams =
413 {
414 bufferOffset, // VkDeviceSize bufferOffset;
415 0u, // deUint32 bufferRowLength;
416 0u, // deUint32 bufferImageHeight;
417 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, levelNdx, 0u, texture.numLayers()), // VkImageSubresourceLayers imageSubresource;
418 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
419 makeExtent3D(texture.layerSize(levelNdx)), // VkExtent3D imageExtent;
420 };
421 copyRegions.push_back(copyParams);
422 bufferOffset += getMipmapLevelImageSizeBytes(texture, imageFormat, levelNdx);
423 }
424
425 const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
426 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
427 buffer, 0ull, bufferSizeBytes);
428
429 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &prepareForTransferBarrier);
430 vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, (deUint32) copyRegions.size(), copyRegions.data());
431 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©Barrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
432 }
433
434 class StoreTest : public TestCase
435 {
436 public:
437 enum TestFlags
438 {
439 FLAG_SINGLE_LAYER_BIND = 0x1, //!< Run the shader multiple times, each time binding a different layer.
440 FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER = 0x2, //!< Declare the format of the images in the shader code
441 FLAG_MINALIGN = 0x4, //!< Use bufferview offset that matches the advertised minimum alignment
442 };
443
444 StoreTest (tcu::TestContext& testCtx,
445 const std::string& name,
446 const std::string& description,
447 const Texture& texture,
448 const VkFormat format,
449 const deUint32 flags = FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER);
450
451 virtual void checkSupport (Context& context) const;
452 void initPrograms (SourceCollections& programCollection) const;
453 TestInstance* createInstance (Context& context) const;
454
455 private:
456 const Texture m_texture;
457 const VkFormat m_format;
458 const bool m_declareImageFormatInShader;
459 const bool m_singleLayerBind;
460 const bool m_minalign;
461 };
462
StoreTest(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const Texture & texture,const VkFormat format,const deUint32 flags)463 StoreTest::StoreTest (tcu::TestContext& testCtx,
464 const std::string& name,
465 const std::string& description,
466 const Texture& texture,
467 const VkFormat format,
468 const deUint32 flags)
469 : TestCase (testCtx, name, description)
470 , m_texture (texture)
471 , m_format (format)
472 , m_declareImageFormatInShader ((flags & FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER) != 0)
473 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
474 , m_minalign ((flags & FLAG_MINALIGN) != 0)
475 {
476 if (m_singleLayerBind)
477 DE_ASSERT(m_texture.numLayers() > 1);
478 }
479
checkSupport(Context & context) const480 void StoreTest::checkSupport (Context& context) const
481 {
482 const VkFormatProperties3KHR formatProperties (context.getFormatProperties(m_format));
483
484 if (!m_declareImageFormatInShader && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
485 TCU_THROW(NotSupportedError, "Format not supported for unformatted stores via storage images");
486
487 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
488 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
489
490 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
491 TCU_THROW(NotSupportedError, "Format not supported for storage images");
492
493 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
494 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
495 }
496
initPrograms(SourceCollections & programCollection) const497 void StoreTest::initPrograms (SourceCollections& programCollection) const
498 {
499 const float storeColorScale = computeStoreColorScale(m_format, m_texture.size());
500 const float storeColorBias = computeStoreColorBias(m_format);
501 DE_ASSERT(colorScaleAndBiasAreValid(m_format, storeColorScale, storeColorBias));
502
503 const deUint32 xMax = m_texture.size().x() - 1;
504 const deUint32 yMax = m_texture.size().y() - 1;
505 const std::string signednessPrefix = isUintFormat(m_format) ? "u" : isIntFormat(m_format) ? "i" : "";
506 const bool storeNegativeValues = isSignedFormat(m_format) && (storeColorBias == 0);
507 bool useClamp = false;
508 std::string colorBaseExpr = signednessPrefix + "vec4("
509 + "gx^gy^gz, "
510 + "(" + de::toString(xMax) + "-gx)^gy^gz, "
511 + "gx^(" + de::toString(yMax) + "-gy)^gz, "
512 + "(" + de::toString(xMax) + "-gx)^(" + de::toString(yMax) + "-gy)^gz)";
513
514 // Large integer values may not be represented with formats with low bit depths
515 if (isIntegerFormat(m_format))
516 {
517 const deInt64 minStoreValue = storeNegativeValues ? 0 - deRoundFloatToInt64((float)de::max(xMax, yMax) / 2.0f) : 0;
518 const deInt64 maxStoreValue = storeNegativeValues ? deRoundFloatToInt64((float)de::max(xMax, yMax) / 2.0f) : de::max(xMax, yMax);
519
520 useClamp = !isRepresentableIntegerValue(tcu::Vector<deInt64, 4>(minStoreValue), mapVkFormat(m_format)) ||
521 !isRepresentableIntegerValue(tcu::Vector<deInt64, 4>(maxStoreValue), mapVkFormat(m_format));
522 }
523
524 // Clamp if integer value cannot be represented with the current format
525 if (useClamp)
526 {
527 const tcu::IVec4 bitDepths = tcu::getTextureFormatBitDepth(mapVkFormat(m_format));
528 tcu::IVec4 minRepresentableValue;
529 tcu::IVec4 maxRepresentableValue;
530
531 switch (tcu::getTextureChannelClass(mapVkFormat(m_format).type))
532 {
533 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
534 {
535 minRepresentableValue = tcu::IVec4(0);
536 maxRepresentableValue = (tcu::IVec4(1) << bitDepths) - tcu::IVec4(1);
537 break;
538 }
539
540 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
541 {
542 minRepresentableValue = -(tcu::IVec4(1) << bitDepths - tcu::IVec4(1));
543 maxRepresentableValue = (tcu::IVec4(1) << (bitDepths - tcu::IVec4(1))) - tcu::IVec4(1);
544 break;
545 }
546
547 default:
548 DE_ASSERT(isIntegerFormat(m_format));
549 }
550
551 colorBaseExpr = "clamp(" + colorBaseExpr + ", "
552 + signednessPrefix + "vec4" + de::toString(minRepresentableValue) + ", "
553 + signednessPrefix + "vec4" + de::toString(maxRepresentableValue) + ")";
554 }
555
556 std::string colorExpr = colorBaseExpr + (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
557 + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
558
559 if (storeNegativeValues)
560 colorExpr += "-" + de::toString(deRoundFloatToInt32((float)deMax32(xMax, yMax) / 2.0f));
561
562 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
563 const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
564
565 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
566 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
567
568 std::ostringstream src;
569 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
570 << "\n"
571 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
572 if (m_declareImageFormatInShader)
573 {
574 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
575 src << "layout (binding = 0, " << formatQualifierStr << ") writeonly uniform " << imageTypeStr << " u_image;\n";
576 }
577 else
578 src << "layout (binding = 0) writeonly uniform " << imageTypeStr << " u_image;\n";
579
580 if (m_singleLayerBind)
581 src << "layout (binding = 1) readonly uniform Constants {\n"
582 << " int u_layerNdx;\n"
583 << "};\n";
584
585 src << "\n"
586 << "void main (void)\n"
587 << "{\n"
588 << " int gx = int(gl_GlobalInvocationID.x);\n"
589 << " int gy = int(gl_GlobalInvocationID.y);\n"
590 << " int gz = " << (m_singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
591 << " imageStore(u_image, " << texelCoordStr << ", " << colorExpr << ");\n"
592 << "}\n";
593
594 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
595 }
596
597 //! Generic test iteration algorithm for image tests
598 class BaseTestInstance : public TestInstance
599 {
600 public:
601 BaseTestInstance (Context& context,
602 const Texture& texture,
603 const VkFormat format,
604 const bool declareImageFormatInShader,
605 const bool singleLayerBind,
606 const bool minalign,
607 const bool bufferLoadUniform);
608
609 tcu::TestStatus iterate (void);
610
~BaseTestInstance(void)611 virtual ~BaseTestInstance (void) {}
612
613 protected:
614 virtual VkDescriptorSetLayout prepareDescriptors (void) = 0;
615 virtual tcu::TestStatus verifyResult (void) = 0;
616
617 virtual void commandBeforeCompute (const VkCommandBuffer cmdBuffer) = 0;
618 virtual void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer) = 0;
619 virtual void commandAfterCompute (const VkCommandBuffer cmdBuffer) = 0;
620
621 virtual void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
622 const VkPipelineLayout pipelineLayout,
623 const int layerNdx) = 0;
624 virtual deUint32 getViewOffset (Context& context,
625 const VkFormat format,
626 bool uniform);
627
628 const Texture m_texture;
629 const VkFormat m_format;
630 const bool m_declareImageFormatInShader;
631 const bool m_singleLayerBind;
632 const bool m_minalign;
633 const bool m_bufferLoadUniform;
634 const deUint32 m_srcViewOffset;
635 const deUint32 m_dstViewOffset;
636 };
637
BaseTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign,const bool bufferLoadUniform)638 BaseTestInstance::BaseTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool declareImageFormatInShader, const bool singleLayerBind, const bool minalign, const bool bufferLoadUniform)
639 : TestInstance (context)
640 , m_texture (texture)
641 , m_format (format)
642 , m_declareImageFormatInShader (declareImageFormatInShader)
643 , m_singleLayerBind (singleLayerBind)
644 , m_minalign (minalign)
645 , m_bufferLoadUniform (bufferLoadUniform)
646 , m_srcViewOffset (getViewOffset(context, format, m_bufferLoadUniform))
647 , m_dstViewOffset (getViewOffset(context, formatHasThreeComponents(format) ? getSingleComponentFormat(format) : format, false))
648 {
649 }
650
iterate(void)651 tcu::TestStatus BaseTestInstance::iterate (void)
652 {
653 const DeviceInterface& vk = m_context.getDeviceInterface();
654 const VkDevice device = m_context.getDevice();
655 const VkQueue queue = m_context.getUniversalQueue();
656 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
657
658 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0));
659
660 const VkDescriptorSetLayout descriptorSetLayout = prepareDescriptors();
661 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
662 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
663
664 const Unique<VkCommandPool> cmdPool(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex));
665 const Unique<VkCommandBuffer> cmdBuffer(allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
666
667 beginCommandBuffer(vk, *cmdBuffer);
668
669 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
670 commandBeforeCompute(*cmdBuffer);
671
672 const tcu::IVec3 workSize = (m_singleLayerBind ? m_texture.layerSize() : m_texture.size());
673 const int loopNumLayers = (m_singleLayerBind ? m_texture.numLayers() : 1);
674 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
675 {
676 commandBindDescriptorsForLayer(*cmdBuffer, *pipelineLayout, layerNdx);
677
678 if (layerNdx > 0)
679 commandBetweenShaderInvocations(*cmdBuffer);
680
681 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
682 }
683
684 commandAfterCompute(*cmdBuffer);
685
686 endCommandBuffer(vk, *cmdBuffer);
687
688 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
689
690 return verifyResult();
691 }
692
693 //! Base store test implementation
694 class StoreTestInstance : public BaseTestInstance
695 {
696 public:
697 StoreTestInstance (Context& context,
698 const Texture& texture,
699 const VkFormat format,
700 const bool declareImageFormatInShader,
701 const bool singleLayerBind,
702 const bool minalign);
703
704 protected:
705 virtual tcu::TestStatus verifyResult (void);
706
707 // Add empty implementations for functions that might be not needed
commandBeforeCompute(const VkCommandBuffer)708 void commandBeforeCompute (const VkCommandBuffer) {}
commandBetweenShaderInvocations(const VkCommandBuffer)709 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
commandAfterCompute(const VkCommandBuffer)710 void commandAfterCompute (const VkCommandBuffer) {}
711
712 de::MovePtr<Buffer> m_imageBuffer;
713 const VkDeviceSize m_imageSizeBytes;
714 };
715
getViewOffset(Context & context,const VkFormat format,bool uniform)716 deUint32 BaseTestInstance::getViewOffset(Context& context,
717 const VkFormat format,
718 bool uniform)
719 {
720 if (m_minalign)
721 {
722 if (!context.getTexelBufferAlignmentFeaturesEXT().texelBufferAlignment)
723 return (deUint32)context.getDeviceProperties().limits.minTexelBufferOffsetAlignment;
724
725 VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT alignmentProperties;
726 deMemset(&alignmentProperties, 0, sizeof(alignmentProperties));
727 alignmentProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT;
728
729 VkPhysicalDeviceProperties2 properties2;
730 deMemset(&properties2, 0, sizeof(properties2));
731 properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
732 properties2.pNext = &alignmentProperties;
733
734 context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties2);
735
736 VkBool32 singleTexelAlignment = uniform ? alignmentProperties.uniformTexelBufferOffsetSingleTexelAlignment :
737 alignmentProperties.storageTexelBufferOffsetSingleTexelAlignment;
738 VkDeviceSize align = uniform ? alignmentProperties.uniformTexelBufferOffsetAlignmentBytes :
739 alignmentProperties.storageTexelBufferOffsetAlignmentBytes;
740
741 VkDeviceSize texelSize = formatHasThreeComponents(format) ? tcu::getChannelSize(vk::mapVkFormat(format).type) : tcu::getPixelSize(vk::mapVkFormat(format));
742
743 if (singleTexelAlignment)
744 align = de::min(align, texelSize);
745
746 return (deUint32)align;
747 }
748
749 return 0;
750 }
751
StoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign)752 StoreTestInstance::StoreTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool declareImageFormatInShader, const bool singleLayerBind, const bool minalign)
753 : BaseTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind, minalign, false)
754 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
755 {
756 const DeviceInterface& vk = m_context.getDeviceInterface();
757 const VkDevice device = m_context.getDevice();
758 Allocator& allocator = m_context.getDefaultAllocator();
759
760 // A helper buffer with enough space to hold the whole image. Usage flags accommodate all derived test instances.
761
762 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
763 vk, device, allocator,
764 makeBufferCreateInfo(m_imageSizeBytes + m_dstViewOffset, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
765 MemoryRequirement::HostVisible));
766 }
767
verifyResult(void)768 tcu::TestStatus StoreTestInstance::verifyResult (void)
769 {
770 const DeviceInterface& vk = m_context.getDeviceInterface();
771 const VkDevice device = m_context.getDevice();
772
773 const tcu::IVec3 imageSize = m_texture.size();
774 const tcu::TextureLevel reference = generateReferenceImage(imageSize, m_format);
775
776 const Allocation& alloc = m_imageBuffer->getAllocation();
777 invalidateAlloc(vk, device, alloc);
778 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_format), imageSize, (const char *)alloc.getHostPtr() + m_dstViewOffset);
779
780 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_format, reference.getAccess(), result))
781 return tcu::TestStatus::pass("Passed");
782 else
783 return tcu::TestStatus::fail("Image comparison failed");
784 }
785
786 //! Store test for images
787 class ImageStoreTestInstance : public StoreTestInstance
788 {
789 public:
790 ImageStoreTestInstance (Context& context,
791 const Texture& texture,
792 const VkFormat format,
793 const bool declareImageFormatInShader,
794 const bool singleLayerBind,
795 const bool minalign);
796
797 protected:
798 VkDescriptorSetLayout prepareDescriptors (void);
799 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
800 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
801 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
802
803 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
804 const VkPipelineLayout pipelineLayout,
805 const int layerNdx);
806
807 de::MovePtr<Image> m_image;
808 de::MovePtr<Buffer> m_constantsBuffer;
809 const VkDeviceSize m_constantsBufferChunkSizeBytes;
810 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
811 Move<VkDescriptorPool> m_descriptorPool;
812 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
813 std::vector<SharedVkImageView> m_allImageViews;
814 };
815
ImageStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign)816 ImageStoreTestInstance::ImageStoreTestInstance (Context& context,
817 const Texture& texture,
818 const VkFormat format,
819 const bool declareImageFormatInShader,
820 const bool singleLayerBind,
821 const bool minalign)
822 : StoreTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind, minalign)
823 , m_constantsBufferChunkSizeBytes (getOptimalUniformBufferChunkSize(context.getInstanceInterface(), context.getPhysicalDevice(), sizeof(deUint32)))
824 , m_allDescriptorSets (texture.numLayers())
825 , m_allImageViews (texture.numLayers())
826 {
827 const DeviceInterface& vk = m_context.getDeviceInterface();
828 const VkDevice device = m_context.getDevice();
829 Allocator& allocator = m_context.getDefaultAllocator();
830
831 m_image = de::MovePtr<Image>(new Image(
832 vk, device, allocator,
833 makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
834 MemoryRequirement::Any));
835
836 // This buffer will be used to pass constants to the shader
837
838 const int numLayers = m_texture.numLayers();
839 const VkDeviceSize constantsBufferSizeBytes = numLayers * m_constantsBufferChunkSizeBytes;
840 m_constantsBuffer = de::MovePtr<Buffer>(new Buffer(
841 vk, device, allocator,
842 makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
843 MemoryRequirement::HostVisible));
844
845 {
846 const Allocation& alloc = m_constantsBuffer->getAllocation();
847 deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
848
849 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
850
851 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
852 {
853 deUint32* valuePtr = reinterpret_cast<deUint32*>(basePtr + layerNdx * m_constantsBufferChunkSizeBytes);
854 *valuePtr = static_cast<deUint32>(layerNdx);
855 }
856
857 flushAlloc(vk, device, alloc);
858 }
859 }
860
prepareDescriptors(void)861 VkDescriptorSetLayout ImageStoreTestInstance::prepareDescriptors (void)
862 {
863 const DeviceInterface& vk = m_context.getDeviceInterface();
864 const VkDevice device = m_context.getDevice();
865
866 const int numLayers = m_texture.numLayers();
867 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
868 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
869 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
870 .build(vk, device);
871
872 m_descriptorPool = DescriptorPoolBuilder()
873 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
874 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
875 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
876
877 if (m_singleLayerBind)
878 {
879 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
880 {
881 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
882 m_allImageViews[layerNdx] = makeVkSharedPtr(makeImageView(
883 vk, device, m_image->get(), mapImageViewType(getImageTypeForSingleLayer(m_texture.type())), m_format,
884 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
885 }
886 }
887 else // bind all layers at once
888 {
889 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
890 m_allImageViews[0] = makeVkSharedPtr(makeImageView(
891 vk, device, m_image->get(), mapImageViewType(m_texture.type()), m_format,
892 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers)));
893 }
894
895 return *m_descriptorSetLayout; // not passing the ownership
896 }
897
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)898 void ImageStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
899 {
900 const DeviceInterface& vk = m_context.getDeviceInterface();
901 const VkDevice device = m_context.getDevice();
902
903 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
904 const VkImageView imageView = **m_allImageViews[layerNdx];
905
906 const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
907
908 // Set the next chunk of the constants buffer. Each chunk begins with layer index that we've set before.
909 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(
910 m_constantsBuffer->get(), layerNdx*m_constantsBufferChunkSizeBytes, m_constantsBufferChunkSizeBytes);
911
912 DescriptorSetUpdateBuilder()
913 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfo)
914 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
915 .update(vk, device);
916 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
917 }
918
commandBeforeCompute(const VkCommandBuffer cmdBuffer)919 void ImageStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
920 {
921 const DeviceInterface& vk = m_context.getDeviceInterface();
922
923 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
924 const VkImageMemoryBarrier setImageLayoutBarrier = makeImageMemoryBarrier(
925 0u, VK_ACCESS_SHADER_WRITE_BIT,
926 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
927 m_image->get(), fullImageSubresourceRange);
928
929 const VkDeviceSize constantsBufferSize = m_texture.numLayers() * m_constantsBufferChunkSizeBytes;
930 const VkBufferMemoryBarrier writeConstantsBarrier = makeBufferMemoryBarrier(
931 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
932 m_constantsBuffer->get(), 0ull, constantsBufferSize);
933
934 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &writeConstantsBarrier, 1, &setImageLayoutBarrier);
935 }
936
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)937 void ImageStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
938 {
939 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_image->get(), m_texture);
940 }
941
commandAfterCompute(const VkCommandBuffer cmdBuffer)942 void ImageStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
943 {
944 commandCopyImageToBuffer(m_context, cmdBuffer, m_image->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
945 }
946
947 //! Store test for buffers
948 class BufferStoreTestInstance : public StoreTestInstance
949 {
950 public:
951 BufferStoreTestInstance (Context& context,
952 const Texture& texture,
953 const VkFormat format,
954 const bool declareImageFormatInShader,
955 const bool minalign);
956
957 protected:
958 VkDescriptorSetLayout prepareDescriptors (void);
959 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
960
961 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
962 const VkPipelineLayout pipelineLayout,
963 const int layerNdx);
964
965 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
966 Move<VkDescriptorPool> m_descriptorPool;
967 Move<VkDescriptorSet> m_descriptorSet;
968 Move<VkBufferView> m_bufferView;
969 };
970
BufferStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool declareImageFormatInShader,const bool minalign)971 BufferStoreTestInstance::BufferStoreTestInstance (Context& context,
972 const Texture& texture,
973 const VkFormat format,
974 const bool declareImageFormatInShader,
975 const bool minalign)
976 : StoreTestInstance(context, texture, format, declareImageFormatInShader, false, minalign)
977 {
978 }
979
prepareDescriptors(void)980 VkDescriptorSetLayout BufferStoreTestInstance::prepareDescriptors (void)
981 {
982 const DeviceInterface& vk = m_context.getDeviceInterface();
983 const VkDevice device = m_context.getDevice();
984
985 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
986 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
987 .build(vk, device);
988
989 m_descriptorPool = DescriptorPoolBuilder()
990 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
991 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
992
993 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
994 m_bufferView = makeBufferView(vk, device, m_imageBuffer->get(), m_format, m_dstViewOffset, m_imageSizeBytes);
995
996 return *m_descriptorSetLayout; // not passing the ownership
997 }
998
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)999 void BufferStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1000 {
1001 DE_ASSERT(layerNdx == 0);
1002 DE_UNREF(layerNdx);
1003
1004 const VkDevice device = m_context.getDevice();
1005 const DeviceInterface& vk = m_context.getDeviceInterface();
1006
1007 DescriptorSetUpdateBuilder()
1008 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferView.get())
1009 .update(vk, device);
1010 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
1011 }
1012
commandAfterCompute(const VkCommandBuffer cmdBuffer)1013 void BufferStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1014 {
1015 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBuffer->get(), m_imageSizeBytes + m_dstViewOffset);
1016 }
1017
1018 class LoadStoreTest : public TestCase
1019 {
1020 public:
1021 enum TestFlags
1022 {
1023 FLAG_SINGLE_LAYER_BIND = 1 << 0, //!< Run the shader multiple times, each time binding a different layer.
1024 FLAG_RESTRICT_IMAGES = 1 << 1, //!< If given, images in the shader will be qualified with "restrict".
1025 FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER = 1 << 2, //!< Declare the format of the images in the shader code
1026 FLAG_MINALIGN = 1 << 3, //!< Use bufferview offset that matches the advertised minimum alignment
1027 FLAG_UNIFORM_TEXEL_BUFFER = 1 << 4, //!< Load from a uniform texel buffer rather than a storage texel buffer
1028 };
1029
1030 LoadStoreTest (tcu::TestContext& testCtx,
1031 const std::string& name,
1032 const std::string& description,
1033 const Texture& texture,
1034 const VkFormat format,
1035 const VkFormat imageFormat,
1036 const deUint32 flags = FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER,
1037 const deBool imageLoadStoreLodAMD = DE_FALSE);
1038
1039 virtual void checkSupport (Context& context) const;
1040 void initPrograms (SourceCollections& programCollection) const;
1041 TestInstance* createInstance (Context& context) const;
1042
1043 private:
1044 const Texture m_texture;
1045 const VkFormat m_format; //!< Format as accessed in the shader
1046 const VkFormat m_imageFormat; //!< Storage format
1047 const bool m_declareImageFormatInShader; //!< Whether the shader will specify the format layout qualifier of the images
1048 const bool m_singleLayerBind;
1049 const bool m_restrictImages;
1050 const bool m_minalign;
1051 bool m_bufferLoadUniform;
1052 const deBool m_imageLoadStoreLodAMD;
1053 };
1054
LoadStoreTest(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const deUint32 flags,const deBool imageLoadStoreLodAMD)1055 LoadStoreTest::LoadStoreTest (tcu::TestContext& testCtx,
1056 const std::string& name,
1057 const std::string& description,
1058 const Texture& texture,
1059 const VkFormat format,
1060 const VkFormat imageFormat,
1061 const deUint32 flags,
1062 const deBool imageLoadStoreLodAMD)
1063 : TestCase (testCtx, name, description)
1064 , m_texture (texture)
1065 , m_format (format)
1066 , m_imageFormat (imageFormat)
1067 , m_declareImageFormatInShader ((flags & FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER) != 0)
1068 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
1069 , m_restrictImages ((flags & FLAG_RESTRICT_IMAGES) != 0)
1070 , m_minalign ((flags & FLAG_MINALIGN) != 0)
1071 , m_bufferLoadUniform ((flags & FLAG_UNIFORM_TEXEL_BUFFER) != 0)
1072 , m_imageLoadStoreLodAMD (imageLoadStoreLodAMD)
1073 {
1074 if (m_singleLayerBind)
1075 DE_ASSERT(m_texture.numLayers() > 1);
1076
1077 DE_ASSERT(formatsAreCompatible(m_format, m_imageFormat));
1078 }
1079
checkSupport(Context & context) const1080 void LoadStoreTest::checkSupport (Context& context) const
1081 {
1082 const VkFormatProperties3KHR formatProperties (context.getFormatProperties(m_format));
1083 const VkFormatProperties3KHR imageFormatProperties (context.getFormatProperties(m_imageFormat));
1084
1085 if (m_imageLoadStoreLodAMD)
1086 context.requireDeviceFunctionality("VK_AMD_shader_image_load_store_lod");
1087
1088 if (!m_bufferLoadUniform && !m_declareImageFormatInShader && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
1089 TCU_THROW(NotSupportedError, "Format not supported for unformatted loads via storage images");
1090
1091 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
1092 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
1093
1094 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1095 TCU_THROW(NotSupportedError, "Format not supported for storage images");
1096
1097 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1098 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1099
1100 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(imageFormatProperties.optimalTilingFeatures))
1101 TCU_THROW(NotSupportedError, "Underlying format not supported at all for images");
1102
1103 if ((m_texture.type() == IMAGE_TYPE_BUFFER) && !(imageFormatProperties.bufferFeatures))
1104 TCU_THROW(NotSupportedError, "Underlying format not supported at all for buffers");
1105
1106 if (formatHasThreeComponents(m_format))
1107 {
1108 // When the source buffer is three-component, the destination buffer is single-component.
1109 VkFormat dstFormat = getSingleComponentFormat(m_format);
1110 const VkFormatProperties3KHR dstFormatProperties (context.getFormatProperties(dstFormat));
1111
1112 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(dstFormatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1113 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1114 }
1115 else
1116 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1117 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1118
1119 if (m_bufferLoadUniform && m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT))
1120 TCU_THROW(NotSupportedError, "Format not supported for uniform texel buffers");
1121 }
1122
initPrograms(SourceCollections & programCollection) const1123 void LoadStoreTest::initPrograms (SourceCollections& programCollection) const
1124 {
1125 const tcu::TextureFormat texFormat = mapVkFormat(m_format);
1126 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
1127 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
1128 const std::string formatQualifierStr = getShaderImageFormatQualifier(texFormat);
1129 const std::string uniformTypeStr = getFormatPrefix(texFormat) + "textureBuffer";
1130 const std::string imageTypeStr = getShaderImageType(texFormat, usedImageType);
1131 const std::string maybeRestrictStr = (m_restrictImages ? "restrict " : "");
1132 const std::string xMax = de::toString(m_texture.size().x() - 1);
1133
1134 std::ostringstream src;
1135 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
1136 << "\n";
1137 if (!m_declareImageFormatInShader)
1138 {
1139 src << "#extension GL_EXT_shader_image_load_formatted : require\n";
1140 }
1141
1142 if (m_imageLoadStoreLodAMD)
1143 {
1144 src << "#extension GL_AMD_shader_image_load_store_lod : require\n";
1145 }
1146
1147 src << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
1148 if (m_bufferLoadUniform)
1149 src << "layout (binding = 0) uniform " << uniformTypeStr << " u_image0;\n";
1150 else if (m_declareImageFormatInShader)
1151 src << "layout (binding = 0, " << formatQualifierStr << ") " << maybeRestrictStr << "readonly uniform " << imageTypeStr << " u_image0;\n";
1152 else
1153 src << "layout (binding = 0) " << maybeRestrictStr << "readonly uniform " << imageTypeStr << " u_image0;\n";
1154
1155 if (formatHasThreeComponents(m_format))
1156 src << "layout (binding = 1) " << maybeRestrictStr << "writeonly uniform " << imageTypeStr << " u_image1;\n";
1157 else
1158 src << "layout (binding = 1, " << formatQualifierStr << ") " << maybeRestrictStr << "writeonly uniform " << imageTypeStr << " u_image1;\n";
1159
1160 src << "\n"
1161 << "void main (void)\n"
1162 << "{\n";
1163 switch (dimension)
1164 {
1165 default: DE_ASSERT(0); // fallthrough
1166 case 1:
1167 if (m_bufferLoadUniform)
1168 {
1169 // for three-component formats, the dst buffer is single-component and the shader
1170 // expands the store into 3 component-wise stores.
1171 std::string type = getFormatPrefix(texFormat) + "vec4";
1172 src << " int pos = int(gl_GlobalInvocationID.x);\n"
1173 " " << type << " t = texelFetch(u_image0, " + xMax + "-pos);\n";
1174 if (formatHasThreeComponents(m_format))
1175 {
1176 src << " imageStore(u_image1, 3*pos+0, " << type << "(t.x));\n";
1177 src << " imageStore(u_image1, 3*pos+1, " << type << "(t.y));\n";
1178 src << " imageStore(u_image1, 3*pos+2, " << type << "(t.z));\n";
1179 }
1180 else
1181 src << " imageStore(u_image1, pos, t);\n";
1182 }
1183 else if (m_imageLoadStoreLodAMD)
1184 {
1185 src <<
1186 " int pos = int(gl_GlobalInvocationID.x);\n";
1187
1188 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1189 {
1190 std::string xMaxSize = de::toString(deMax32(((m_texture.layerSize().x() >> levelNdx) - 1), 1u));
1191 src << " imageStoreLodAMD(u_image1, pos, " + de::toString(levelNdx) + ", imageLoadLodAMD(u_image0, " + xMaxSize + "-pos, " + de::toString(levelNdx) + "));\n";
1192 }
1193 }
1194 else
1195 {
1196 src <<
1197 " int pos = int(gl_GlobalInvocationID.x);\n"
1198 " imageStore(u_image1, pos, imageLoad(u_image0, " + xMax + "-pos));\n";
1199 }
1200 break;
1201 case 2:
1202 if (m_imageLoadStoreLodAMD)
1203 {
1204 src << " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n";
1205
1206 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1207 {
1208 std::string xMaxSize = de::toString(deMax32(((m_texture.layerSize().x() >> levelNdx) - 1), 1u));
1209 src << " imageStoreLodAMD(u_image1, pos, " + de::toString(levelNdx) + ", imageLoadLodAMD(u_image0, ivec2(" + xMaxSize + "-pos.x, pos.y), " + de::toString(levelNdx) + "));\n";
1210 }
1211
1212 }
1213 else
1214 {
1215 src <<
1216 " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"
1217 " imageStore(u_image1, pos, imageLoad(u_image0, ivec2(" + xMax + "-pos.x, pos.y)));\n";
1218 }
1219 break;
1220 case 3:
1221 if (m_imageLoadStoreLodAMD)
1222 {
1223 src << " ivec3 pos = ivec3(gl_GlobalInvocationID);\n";
1224
1225 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1226 {
1227 std::string xMaxSize = de::toString(deMax32(((m_texture.layerSize().x() >> levelNdx) - 1), 1u));
1228 src << " imageStoreLodAMD(u_image1, pos, " + de::toString(levelNdx) + ", imageLoadLodAMD(u_image0, ivec3(" + xMaxSize + "-pos.x, pos.y, pos.z), " + de::toString(levelNdx) + "));\n";
1229 }
1230 }
1231 else
1232 {
1233 src <<
1234 " ivec3 pos = ivec3(gl_GlobalInvocationID);\n"
1235 " imageStore(u_image1, pos, imageLoad(u_image0, ivec3(" + xMax + "-pos.x, pos.y, pos.z)));\n";
1236 }
1237 break;
1238 }
1239 src << "}\n";
1240
1241 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
1242 }
1243
1244 //! Load/store test base implementation
1245 class LoadStoreTestInstance : public BaseTestInstance
1246 {
1247 public:
1248 LoadStoreTestInstance (Context& context,
1249 const Texture& texture,
1250 const VkFormat format,
1251 const VkFormat imageFormat,
1252 const bool declareImageFormatInShader,
1253 const bool singleLayerBind,
1254 const bool minalign,
1255 const bool bufferLoadUniform);
1256
1257 protected:
1258 virtual Buffer* getResultBuffer (void) const = 0; //!< Get the buffer that contains the result image
1259
1260 tcu::TestStatus verifyResult (void);
1261
1262 // Add empty implementations for functions that might be not needed
commandBeforeCompute(const VkCommandBuffer)1263 void commandBeforeCompute (const VkCommandBuffer) {}
commandBetweenShaderInvocations(const VkCommandBuffer)1264 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
commandAfterCompute(const VkCommandBuffer)1265 void commandAfterCompute (const VkCommandBuffer) {}
1266
1267 de::MovePtr<Buffer> m_imageBuffer; //!< Source data and helper buffer
1268 const VkDeviceSize m_imageSizeBytes;
1269 const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
1270 tcu::TextureLevel m_referenceImage; //!< Used as input data and later to verify result image
1271
1272 bool m_bufferLoadUniform;
1273 VkDescriptorType m_bufferLoadDescriptorType;
1274 VkBufferUsageFlagBits m_bufferLoadUsageBit;
1275 };
1276
LoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign,const bool bufferLoadUniform)1277 LoadStoreTestInstance::LoadStoreTestInstance (Context& context,
1278 const Texture& texture,
1279 const VkFormat format,
1280 const VkFormat imageFormat,
1281 const bool declareImageFormatInShader,
1282 const bool singleLayerBind,
1283 const bool minalign,
1284 const bool bufferLoadUniform)
1285 : BaseTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind, minalign, bufferLoadUniform)
1286 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
1287 , m_imageFormat (imageFormat)
1288 , m_referenceImage (generateReferenceImage(texture.size(), imageFormat, format))
1289 , m_bufferLoadUniform (bufferLoadUniform)
1290 {
1291 const DeviceInterface& vk = m_context.getDeviceInterface();
1292 const VkDevice device = m_context.getDevice();
1293 Allocator& allocator = m_context.getDefaultAllocator();
1294
1295 m_bufferLoadDescriptorType = m_bufferLoadUniform ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
1296 m_bufferLoadUsageBit = m_bufferLoadUniform ? VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
1297
1298 // A helper buffer with enough space to hold the whole image.
1299
1300 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
1301 vk, device, allocator,
1302 makeBufferCreateInfo(m_imageSizeBytes + m_srcViewOffset, m_bufferLoadUsageBit | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
1303 MemoryRequirement::HostVisible));
1304
1305 // Copy reference data to buffer for subsequent upload to image.
1306
1307 const Allocation& alloc = m_imageBuffer->getAllocation();
1308 deMemcpy((char *)alloc.getHostPtr() + m_srcViewOffset, m_referenceImage.getAccess().getDataPtr(), static_cast<size_t>(m_imageSizeBytes));
1309 flushAlloc(vk, device, alloc);
1310 }
1311
verifyResult(void)1312 tcu::TestStatus LoadStoreTestInstance::verifyResult (void)
1313 {
1314 const DeviceInterface& vk = m_context.getDeviceInterface();
1315 const VkDevice device = m_context.getDevice();
1316
1317 // Apply the same transformation as done in the shader
1318 const tcu::PixelBufferAccess reference = m_referenceImage.getAccess();
1319 flipHorizontally(reference);
1320
1321 const Allocation& alloc = getResultBuffer()->getAllocation();
1322 invalidateAlloc(vk, device, alloc);
1323 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(), (const char *)alloc.getHostPtr() + m_dstViewOffset);
1324
1325 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result))
1326 return tcu::TestStatus::pass("Passed");
1327 else
1328 return tcu::TestStatus::fail("Image comparison failed");
1329 }
1330
1331 //! Load/store test for images
1332 class ImageLoadStoreTestInstance : public LoadStoreTestInstance
1333 {
1334 public:
1335 ImageLoadStoreTestInstance (Context& context,
1336 const Texture& texture,
1337 const VkFormat format,
1338 const VkFormat imageFormat,
1339 const bool declareImageFormatInShader,
1340 const bool singleLayerBind,
1341 const bool minalign,
1342 const bool bufferLoadUniform);
1343
1344 protected:
1345 VkDescriptorSetLayout prepareDescriptors (void);
1346 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
1347 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
1348 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1349
1350 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1351 const VkPipelineLayout pipelineLayout,
1352 const int layerNdx);
1353
getResultBuffer(void) const1354 Buffer* getResultBuffer (void) const { return m_imageBuffer.get(); }
1355
1356 de::MovePtr<Image> m_imageSrc;
1357 de::MovePtr<Image> m_imageDst;
1358 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1359 Move<VkDescriptorPool> m_descriptorPool;
1360 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
1361 std::vector<SharedVkImageView> m_allSrcImageViews;
1362 std::vector<SharedVkImageView> m_allDstImageViews;
1363 };
1364
ImageLoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign,const bool bufferLoadUniform)1365 ImageLoadStoreTestInstance::ImageLoadStoreTestInstance (Context& context,
1366 const Texture& texture,
1367 const VkFormat format,
1368 const VkFormat imageFormat,
1369 const bool declareImageFormatInShader,
1370 const bool singleLayerBind,
1371 const bool minalign,
1372 const bool bufferLoadUniform)
1373 : LoadStoreTestInstance (context, texture, format, imageFormat, declareImageFormatInShader, singleLayerBind, minalign, bufferLoadUniform)
1374 , m_allDescriptorSets (texture.numLayers())
1375 , m_allSrcImageViews (texture.numLayers())
1376 , m_allDstImageViews (texture.numLayers())
1377 {
1378 const DeviceInterface& vk = m_context.getDeviceInterface();
1379 const VkDevice device = m_context.getDevice();
1380 Allocator& allocator = m_context.getDefaultAllocator();
1381 const VkImageCreateFlags imageFlags = (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
1382
1383 m_imageSrc = de::MovePtr<Image>(new Image(
1384 vk, device, allocator,
1385 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, imageFlags),
1386 MemoryRequirement::Any));
1387
1388 m_imageDst = de::MovePtr<Image>(new Image(
1389 vk, device, allocator,
1390 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, imageFlags),
1391 MemoryRequirement::Any));
1392 }
1393
prepareDescriptors(void)1394 VkDescriptorSetLayout ImageLoadStoreTestInstance::prepareDescriptors (void)
1395 {
1396 const VkDevice device = m_context.getDevice();
1397 const DeviceInterface& vk = m_context.getDeviceInterface();
1398
1399 const int numLayers = m_texture.numLayers();
1400 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1401 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1402 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1403 .build(vk, device);
1404
1405 m_descriptorPool = DescriptorPoolBuilder()
1406 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1407 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1408 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
1409
1410 if (m_singleLayerBind)
1411 {
1412 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
1413 {
1414 const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
1415 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u);
1416
1417 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1418 m_allSrcImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1419 m_allDstImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1420 }
1421 }
1422 else // bind all layers at once
1423 {
1424 const VkImageViewType viewType = mapImageViewType(m_texture.type());
1425 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers);
1426
1427 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1428 m_allSrcImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1429 m_allDstImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1430 }
1431
1432 return *m_descriptorSetLayout; // not passing the ownership
1433 }
1434
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)1435 void ImageLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1436 {
1437 const VkDevice device = m_context.getDevice();
1438 const DeviceInterface& vk = m_context.getDeviceInterface();
1439
1440 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
1441 const VkImageView srcImageView = **m_allSrcImageViews[layerNdx];
1442 const VkImageView dstImageView = **m_allDstImageViews[layerNdx];
1443
1444 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_GENERAL);
1445 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1446
1447 DescriptorSetUpdateBuilder()
1448 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
1449 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
1450 .update(vk, device);
1451 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1452 }
1453
commandBeforeCompute(const VkCommandBuffer cmdBuffer)1454 void ImageLoadStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1455 {
1456 const DeviceInterface& vk = m_context.getDeviceInterface();
1457
1458 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1459 {
1460 const VkImageMemoryBarrier preCopyImageBarriers[] =
1461 {
1462 makeImageMemoryBarrier(
1463 0u, VK_ACCESS_TRANSFER_WRITE_BIT,
1464 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1465 m_imageSrc->get(), fullImageSubresourceRange),
1466 makeImageMemoryBarrier(
1467 0u, VK_ACCESS_SHADER_WRITE_BIT,
1468 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1469 m_imageDst->get(), fullImageSubresourceRange)
1470 };
1471
1472 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
1473 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1474 m_imageBuffer->get(), 0ull, m_imageSizeBytes + m_srcViewOffset);
1475
1476 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1477 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
1478 }
1479 {
1480 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1481 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1482 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
1483 m_imageSrc->get(), fullImageSubresourceRange);
1484
1485 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
1486
1487 vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1488 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
1489 }
1490 }
1491
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)1492 void ImageLoadStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1493 {
1494 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1495 }
1496
commandAfterCompute(const VkCommandBuffer cmdBuffer)1497 void ImageLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1498 {
1499 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1500 }
1501
1502 //! Load/store Lod AMD test for images
1503 class ImageLoadStoreLodAMDTestInstance : public BaseTestInstance
1504 {
1505 public:
1506 ImageLoadStoreLodAMDTestInstance (Context& context,
1507 const Texture& texture,
1508 const VkFormat format,
1509 const VkFormat imageFormat,
1510 const bool declareImageFormatInShader,
1511 const bool singleLayerBind,
1512 const bool minalign,
1513 const bool bufferLoadUniform);
1514
1515 protected:
1516 VkDescriptorSetLayout prepareDescriptors (void);
1517 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
1518 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
1519 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1520
1521 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1522 const VkPipelineLayout pipelineLayout,
1523 const int layerNdx);
1524
getResultBuffer(void) const1525 Buffer* getResultBuffer (void) const { return m_imageBuffer.get(); }
1526 tcu::TestStatus verifyResult (void);
1527
1528 de::MovePtr<Buffer> m_imageBuffer; //!< Source data and helper buffer
1529 const VkDeviceSize m_imageSizeBytes;
1530 const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
1531 std::vector<tcu::TextureLevel> m_referenceImages; //!< Used as input data and later to verify result image
1532
1533 bool m_bufferLoadUniform;
1534 VkDescriptorType m_bufferLoadDescriptorType;
1535 VkBufferUsageFlagBits m_bufferLoadUsageBit;
1536
1537 de::MovePtr<Image> m_imageSrc;
1538 de::MovePtr<Image> m_imageDst;
1539 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1540 Move<VkDescriptorPool> m_descriptorPool;
1541 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
1542 std::vector<SharedVkImageView> m_allSrcImageViews;
1543 std::vector<SharedVkImageView> m_allDstImageViews;
1544
1545 };
1546
ImageLoadStoreLodAMDTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign,const bool bufferLoadUniform)1547 ImageLoadStoreLodAMDTestInstance::ImageLoadStoreLodAMDTestInstance (Context& context,
1548 const Texture& texture,
1549 const VkFormat format,
1550 const VkFormat imageFormat,
1551 const bool declareImageFormatInShader,
1552 const bool singleLayerBind,
1553 const bool minalign,
1554 const bool bufferLoadUniform)
1555 : BaseTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind, minalign, bufferLoadUniform)
1556 , m_imageSizeBytes (getMipmapImageTotalSizeBytes(texture, format))
1557 , m_imageFormat (imageFormat)
1558 , m_bufferLoadUniform (bufferLoadUniform)
1559 , m_allDescriptorSets (texture.numLayers())
1560 , m_allSrcImageViews (texture.numLayers())
1561 , m_allDstImageViews (texture.numLayers())
1562 {
1563 const DeviceInterface& vk = m_context.getDeviceInterface();
1564 const VkDevice device = m_context.getDevice();
1565 Allocator& allocator = m_context.getDefaultAllocator();
1566 const VkImageCreateFlags imageFlags = (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
1567
1568 const VkSampleCountFlagBits samples = static_cast<VkSampleCountFlagBits>(m_texture.numSamples()); // integer and bit mask are aligned, so we can cast like this
1569
1570 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1571 {
1572 tcu::TextureLevel referenceImage = generateReferenceImage(texture.size(levelNdx), imageFormat, format);
1573 m_referenceImages.push_back(referenceImage);
1574 }
1575
1576 m_bufferLoadDescriptorType = m_bufferLoadUniform ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
1577 m_bufferLoadUsageBit = m_bufferLoadUniform ? VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
1578
1579 // A helper buffer with enough space to hold the whole image.
1580 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
1581 vk, device, allocator,
1582 makeBufferCreateInfo(m_imageSizeBytes + m_srcViewOffset, m_bufferLoadUsageBit | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
1583 MemoryRequirement::HostVisible));
1584
1585 // Copy reference data to buffer for subsequent upload to image.
1586 {
1587 const Allocation& alloc = m_imageBuffer->getAllocation();
1588 VkDeviceSize bufferOffset = 0u;
1589 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1590 {
1591 deMemcpy((char *)alloc.getHostPtr() + m_srcViewOffset + bufferOffset, m_referenceImages[levelNdx].getAccess().getDataPtr(), static_cast<size_t>(getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx)));
1592 bufferOffset += getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx);
1593 }
1594 flushAlloc(vk, device, alloc);
1595 }
1596
1597 {
1598 const VkImageCreateInfo imageParamsSrc =
1599 {
1600 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1601 DE_NULL, // const void* pNext;
1602 (isCube(m_texture) ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u) | imageFlags, // VkImageCreateFlags flags;
1603 mapImageType(m_texture.type()), // VkImageType imageType;
1604 m_imageFormat, // VkFormat format;
1605 makeExtent3D(m_texture.layerSize()), // VkExtent3D extent;
1606 (deUint32)m_texture.numMipmapLevels(), // deUint32 mipLevels;
1607 (deUint32)m_texture.numLayers(), // deUint32 arrayLayers;
1608 samples, // VkSampleCountFlagBits samples;
1609 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1610 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
1611 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1612 0u, // deUint32 queueFamilyIndexCount;
1613 DE_NULL, // const deUint32* pQueueFamilyIndices;
1614 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1615 };
1616
1617 m_imageSrc = de::MovePtr<Image>(new Image(
1618 vk, device, allocator,
1619 imageParamsSrc,
1620 MemoryRequirement::Any));
1621 }
1622
1623 {
1624 const VkImageCreateInfo imageParamsDst =
1625 {
1626 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1627 DE_NULL, // const void* pNext;
1628 (isCube(m_texture) ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u) | imageFlags, // VkImageCreateFlags flags;
1629 mapImageType(m_texture.type()), // VkImageType imageType;
1630 m_imageFormat, // VkFormat format;
1631 makeExtent3D(m_texture.layerSize()), // VkExtent3D extent;
1632 (deUint32)m_texture.numMipmapLevels(), // deUint32 mipLevels;
1633 (deUint32)m_texture.numLayers(), // deUint32 arrayLayers;
1634 samples, // VkSampleCountFlagBits samples;
1635 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1636 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // VkImageUsageFlags usage;
1637 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1638 0u, // deUint32 queueFamilyIndexCount;
1639 DE_NULL, // const deUint32* pQueueFamilyIndices;
1640 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1641 };
1642
1643 m_imageDst = de::MovePtr<Image>(new Image(
1644 vk, device, allocator,
1645 imageParamsDst,
1646 MemoryRequirement::Any));
1647 }
1648 }
1649
verifyResult(void)1650 tcu::TestStatus ImageLoadStoreLodAMDTestInstance::verifyResult (void)
1651 {
1652 const DeviceInterface& vk = m_context.getDeviceInterface();
1653 const VkDevice device = m_context.getDevice();
1654
1655 const Allocation& alloc = getResultBuffer()->getAllocation();
1656 invalidateAlloc(vk, device, alloc);
1657
1658 VkDeviceSize bufferOffset = 0;
1659 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1660 {
1661 // Apply the same transformation as done in the shader
1662 const tcu::PixelBufferAccess reference = m_referenceImages[levelNdx].getAccess();
1663 flipHorizontally(reference);
1664
1665 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(levelNdx), (const char *)alloc.getHostPtr() + m_dstViewOffset + bufferOffset);
1666
1667 if (!comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result, levelNdx))
1668 {
1669 std::ostringstream errorMessage;
1670 errorMessage << "Image Level " << levelNdx << " comparison failed";
1671 return tcu::TestStatus::fail(errorMessage.str());
1672 }
1673 bufferOffset += getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx);
1674 }
1675
1676 return tcu::TestStatus::pass("Passed");
1677 }
1678
prepareDescriptors(void)1679 VkDescriptorSetLayout ImageLoadStoreLodAMDTestInstance::prepareDescriptors (void)
1680 {
1681 const VkDevice device = m_context.getDevice();
1682 const DeviceInterface& vk = m_context.getDeviceInterface();
1683
1684 const int numLayers = m_texture.numLayers();
1685 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1686 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1687 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1688 .build(vk, device);
1689
1690 m_descriptorPool = DescriptorPoolBuilder()
1691 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1692 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1693 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
1694
1695 if (m_singleLayerBind)
1696 {
1697 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
1698 {
1699 const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
1700 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, m_texture.numMipmapLevels(), layerNdx, 1u);
1701
1702 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1703 m_allSrcImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1704 m_allDstImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1705 }
1706 }
1707 else // bind all layers at once
1708 {
1709 const VkImageViewType viewType = mapImageViewType(m_texture.type());
1710 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, m_texture.numMipmapLevels(), 0u, numLayers);
1711
1712 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1713 m_allSrcImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1714 m_allDstImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1715 }
1716
1717 return *m_descriptorSetLayout; // not passing the ownership
1718 }
1719
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)1720 void ImageLoadStoreLodAMDTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1721 {
1722 const VkDevice device = m_context.getDevice();
1723 const DeviceInterface& vk = m_context.getDeviceInterface();
1724
1725 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
1726 const VkImageView srcImageView = **m_allSrcImageViews[layerNdx];
1727 const VkImageView dstImageView = **m_allDstImageViews[layerNdx];
1728
1729 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_GENERAL);
1730 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1731
1732 DescriptorSetUpdateBuilder()
1733 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
1734 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
1735 .update(vk, device);
1736 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1737 }
1738
commandBeforeCompute(const VkCommandBuffer cmdBuffer)1739 void ImageLoadStoreLodAMDTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1740 {
1741 const DeviceInterface& vk = m_context.getDeviceInterface();
1742 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, m_texture.numMipmapLevels(), 0u, m_texture.numLayers());
1743 {
1744 const VkImageMemoryBarrier preCopyImageBarriers[] =
1745 {
1746 makeImageMemoryBarrier(
1747 0u, VK_ACCESS_TRANSFER_WRITE_BIT,
1748 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1749 m_imageSrc->get(), fullImageSubresourceRange),
1750 makeImageMemoryBarrier(
1751 0u, VK_ACCESS_SHADER_WRITE_BIT,
1752 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1753 m_imageDst->get(), fullImageSubresourceRange)
1754 };
1755
1756 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
1757 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1758 m_imageBuffer->get(), 0ull, m_imageSizeBytes + m_srcViewOffset);
1759
1760 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1761 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
1762 }
1763 {
1764 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1765 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1766 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
1767 m_imageSrc->get(), fullImageSubresourceRange);
1768
1769 std::vector<VkBufferImageCopy> copyRegions;
1770 VkDeviceSize bufferOffset = 0u;
1771 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1772 {
1773 const VkBufferImageCopy copyParams =
1774 {
1775 bufferOffset, // VkDeviceSize bufferOffset;
1776 0u, // deUint32 bufferRowLength;
1777 0u, // deUint32 bufferImageHeight;
1778 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, levelNdx, 0u, m_texture.numLayers()), // VkImageSubresourceLayers imageSubresource;
1779 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
1780 makeExtent3D(m_texture.layerSize(levelNdx)), // VkExtent3D imageExtent;
1781 };
1782 copyRegions.push_back(copyParams);
1783 bufferOffset += getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx);
1784 }
1785
1786 vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32) copyRegions.size(), copyRegions.data());
1787 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
1788 }
1789 }
1790
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)1791 void ImageLoadStoreLodAMDTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1792 {
1793 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1794 }
1795
commandAfterCompute(const VkCommandBuffer cmdBuffer)1796 void ImageLoadStoreLodAMDTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1797 {
1798 commandCopyMipmapImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageFormat, m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1799 }
1800
1801 //! Load/store test for buffers
1802 class BufferLoadStoreTestInstance : public LoadStoreTestInstance
1803 {
1804 public:
1805 BufferLoadStoreTestInstance (Context& context,
1806 const Texture& texture,
1807 const VkFormat format,
1808 const VkFormat imageFormat,
1809 const bool declareImageFormatInShader,
1810 const bool minalign,
1811 const bool bufferLoadUniform);
1812
1813 protected:
1814 VkDescriptorSetLayout prepareDescriptors (void);
1815 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1816
1817 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1818 const VkPipelineLayout pipelineLayout,
1819 const int layerNdx);
1820
getResultBuffer(void) const1821 Buffer* getResultBuffer (void) const { return m_imageBufferDst.get(); }
1822
1823 de::MovePtr<Buffer> m_imageBufferDst;
1824 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1825 Move<VkDescriptorPool> m_descriptorPool;
1826 Move<VkDescriptorSet> m_descriptorSet;
1827 Move<VkBufferView> m_bufferViewSrc;
1828 Move<VkBufferView> m_bufferViewDst;
1829 };
1830
BufferLoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool declareImageFormatInShader,const bool minalign,const bool bufferLoadUniform)1831 BufferLoadStoreTestInstance::BufferLoadStoreTestInstance (Context& context,
1832 const Texture& texture,
1833 const VkFormat format,
1834 const VkFormat imageFormat,
1835 const bool declareImageFormatInShader,
1836 const bool minalign,
1837 const bool bufferLoadUniform)
1838 : LoadStoreTestInstance(context, texture, format, imageFormat, declareImageFormatInShader, false, minalign, bufferLoadUniform)
1839 {
1840 const DeviceInterface& vk = m_context.getDeviceInterface();
1841 const VkDevice device = m_context.getDevice();
1842 Allocator& allocator = m_context.getDefaultAllocator();
1843
1844 // Create a destination buffer.
1845
1846 m_imageBufferDst = de::MovePtr<Buffer>(new Buffer(
1847 vk, device, allocator,
1848 makeBufferCreateInfo(m_imageSizeBytes + m_dstViewOffset, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
1849 MemoryRequirement::HostVisible));
1850 }
1851
prepareDescriptors(void)1852 VkDescriptorSetLayout BufferLoadStoreTestInstance::prepareDescriptors (void)
1853 {
1854 const DeviceInterface& vk = m_context.getDeviceInterface();
1855 const VkDevice device = m_context.getDevice();
1856
1857 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1858 .addSingleBinding(m_bufferLoadDescriptorType, VK_SHADER_STAGE_COMPUTE_BIT)
1859 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1860 .build(vk, device);
1861
1862 m_descriptorPool = DescriptorPoolBuilder()
1863 .addType(m_bufferLoadDescriptorType)
1864 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1865 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1866
1867 VkFormat dstFormat = formatHasThreeComponents(m_format) ? getSingleComponentFormat(m_format) : m_format;
1868
1869 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1870 m_bufferViewSrc = makeBufferView(vk, device, m_imageBuffer->get(), m_format, m_srcViewOffset, m_imageSizeBytes);
1871 m_bufferViewDst = makeBufferView(vk, device, m_imageBufferDst->get(), dstFormat, m_dstViewOffset, m_imageSizeBytes);
1872
1873 return *m_descriptorSetLayout; // not passing the ownership
1874 }
1875
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)1876 void BufferLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1877 {
1878 DE_ASSERT(layerNdx == 0);
1879 DE_UNREF(layerNdx);
1880
1881 const VkDevice device = m_context.getDevice();
1882 const DeviceInterface& vk = m_context.getDeviceInterface();
1883
1884 DescriptorSetUpdateBuilder()
1885 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), m_bufferLoadDescriptorType, &m_bufferViewSrc.get())
1886 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewDst.get())
1887 .update(vk, device);
1888 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
1889 }
1890
commandAfterCompute(const VkCommandBuffer cmdBuffer)1891 void BufferLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1892 {
1893 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBufferDst->get(), m_imageSizeBytes + m_dstViewOffset);
1894 }
1895
createInstance(Context & context) const1896 TestInstance* StoreTest::createInstance (Context& context) const
1897 {
1898 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1899 return new BufferStoreTestInstance(context, m_texture, m_format, m_declareImageFormatInShader, m_minalign);
1900 else
1901 return new ImageStoreTestInstance(context, m_texture, m_format, m_declareImageFormatInShader, m_singleLayerBind, m_minalign);
1902 }
1903
createInstance(Context & context) const1904 TestInstance* LoadStoreTest::createInstance (Context& context) const
1905 {
1906 if (m_imageLoadStoreLodAMD)
1907 return new ImageLoadStoreLodAMDTestInstance(context, m_texture, m_format, m_imageFormat, m_declareImageFormatInShader, m_singleLayerBind, m_minalign, m_bufferLoadUniform);
1908
1909 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1910 return new BufferLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_declareImageFormatInShader, m_minalign, m_bufferLoadUniform);
1911 else
1912 return new ImageLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_declareImageFormatInShader, m_singleLayerBind, m_minalign, m_bufferLoadUniform);
1913 }
1914
1915 class ImageExtendOperandTestInstance : public BaseTestInstance
1916 {
1917 public:
1918 ImageExtendOperandTestInstance (Context& context,
1919 const Texture& texture,
1920 const VkFormat readFormat,
1921 const VkFormat writeFormat,
1922 bool relaxedPrecision);
1923
~ImageExtendOperandTestInstance(void)1924 virtual ~ImageExtendOperandTestInstance (void) {}
1925
1926 protected:
1927
1928 VkDescriptorSetLayout prepareDescriptors (void);
1929 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
1930 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
1931 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1932
1933 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1934 const VkPipelineLayout pipelineLayout,
1935 const int layerNdx);
1936
1937 tcu::TestStatus verifyResult (void);
1938
1939 protected:
1940
1941 bool m_isSigned;
1942 tcu::TextureLevel m_inputImageData;
1943
1944 de::MovePtr<Image> m_imageSrc; // source image
1945 SharedVkImageView m_imageSrcView;
1946 VkDeviceSize m_imageSrcSize;
1947
1948 de::MovePtr<Image> m_imageDst; // dest image
1949 SharedVkImageView m_imageDstView;
1950 VkFormat m_imageDstFormat;
1951 VkDeviceSize m_imageDstSize;
1952
1953 de::MovePtr<Buffer> m_buffer; // result buffer
1954
1955 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1956 Move<VkDescriptorPool> m_descriptorPool;
1957 SharedVkDescriptorSet m_descriptorSet;
1958
1959 bool m_relaxedPrecision;
1960 };
1961
ImageExtendOperandTestInstance(Context & context,const Texture & texture,const VkFormat readFormat,const VkFormat writeFormat,bool relaxedPrecision)1962 ImageExtendOperandTestInstance::ImageExtendOperandTestInstance (Context& context,
1963 const Texture& texture,
1964 const VkFormat readFormat,
1965 const VkFormat writeFormat,
1966 bool relaxedPrecision)
1967 : BaseTestInstance (context, texture, readFormat, true, true, false, false)
1968 , m_imageDstFormat (writeFormat)
1969 , m_relaxedPrecision (relaxedPrecision)
1970 {
1971 const DeviceInterface& vk = m_context.getDeviceInterface();
1972 const VkDevice device = m_context.getDevice();
1973 Allocator& allocator = m_context.getDefaultAllocator();
1974 const deInt32 width = texture.size().x();
1975 const deInt32 height = texture.size().y();
1976 const tcu::TextureFormat textureFormat = mapVkFormat(m_format);
1977
1978 // Generate reference image
1979 m_isSigned = (getTextureChannelClass(textureFormat.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER);
1980 m_inputImageData.setStorage(textureFormat, width, height, 1);
1981
1982 const tcu::PixelBufferAccess access = m_inputImageData.getAccess();
1983 const int valueStart = (m_isSigned ? (-width / 2) : 0);
1984
1985 for (int x = 0; x < width; ++x)
1986 for (int y = 0; y < height; ++y)
1987 {
1988 const tcu::IVec4 color(valueStart + x, valueStart + y, valueStart, valueStart);
1989 access.setPixel(color, x, y);
1990 }
1991
1992 // Create source image
1993 m_imageSrc = de::MovePtr<Image>(new Image(
1994 vk, device, allocator,
1995 makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, 0u),
1996 MemoryRequirement::Any));
1997
1998 // Create destination image
1999 m_imageDst = de::MovePtr<Image>(new Image(
2000 vk, device, allocator,
2001 makeImageCreateInfo(m_texture, m_imageDstFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
2002 MemoryRequirement::Any));
2003
2004 // Compute image and buffer sizes
2005 m_imageSrcSize = width * height * tcu::getPixelSize(textureFormat);
2006 m_imageDstSize = width * height * tcu::getPixelSize(mapVkFormat(m_imageDstFormat));
2007 VkDeviceSize bufferSizeBytes = de::max(m_imageSrcSize, m_imageDstSize);
2008
2009 // Create helper buffer able to store input data and image write result
2010 m_buffer = de::MovePtr<Buffer>(new Buffer(
2011 vk, device, allocator,
2012 makeBufferCreateInfo(bufferSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
2013 MemoryRequirement::HostVisible));
2014
2015 const Allocation& alloc = m_buffer->getAllocation();
2016 deMemcpy(alloc.getHostPtr(), m_inputImageData.getAccess().getDataPtr(), static_cast<size_t>(m_imageSrcSize));
2017 flushAlloc(vk, device, alloc);
2018 }
2019
prepareDescriptors(void)2020 VkDescriptorSetLayout ImageExtendOperandTestInstance::prepareDescriptors (void)
2021 {
2022 const DeviceInterface& vk = m_context.getDeviceInterface();
2023 const VkDevice device = m_context.getDevice();
2024
2025 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
2026 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2027 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2028 .build(vk, device);
2029
2030 m_descriptorPool = DescriptorPoolBuilder()
2031 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2032 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2033 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1);
2034
2035 const VkImageViewType viewType = mapImageViewType(m_texture.type());
2036 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2037
2038 m_descriptorSet = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
2039 m_imageSrcView = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
2040 m_imageDstView = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_imageDstFormat, subresourceRange));
2041
2042 return *m_descriptorSetLayout; // not passing the ownership
2043 }
2044
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)2045 void ImageExtendOperandTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
2046 {
2047 DE_UNREF(layerNdx);
2048
2049 const DeviceInterface& vk = m_context.getDeviceInterface();
2050 const VkDevice device = m_context.getDevice();
2051 const VkDescriptorSet descriptorSet = **m_descriptorSet;
2052
2053 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, **m_imageSrcView, VK_IMAGE_LAYOUT_GENERAL);
2054 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, **m_imageDstView, VK_IMAGE_LAYOUT_GENERAL);
2055
2056 typedef DescriptorSetUpdateBuilder::Location DSUBL;
2057 DescriptorSetUpdateBuilder()
2058 .writeSingle(descriptorSet, DSUBL::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
2059 .writeSingle(descriptorSet, DSUBL::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
2060 .update(vk, device);
2061 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
2062 }
2063
commandBeforeCompute(const VkCommandBuffer cmdBuffer)2064 void ImageExtendOperandTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
2065 {
2066 const DeviceInterface& vk = m_context.getDeviceInterface();
2067
2068 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
2069 {
2070 const VkImageMemoryBarrier preCopyImageBarriers[] =
2071 {
2072 makeImageMemoryBarrier(
2073 0u, VK_ACCESS_TRANSFER_WRITE_BIT,
2074 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2075 m_imageSrc->get(), fullImageSubresourceRange),
2076 makeImageMemoryBarrier(
2077 0u, VK_ACCESS_SHADER_WRITE_BIT,
2078 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
2079 m_imageDst->get(), fullImageSubresourceRange)
2080 };
2081
2082 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
2083 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
2084 m_buffer->get(), 0ull, m_imageSrcSize);
2085
2086 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
2087 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
2088 }
2089 {
2090 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
2091 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
2092 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
2093 m_imageSrc->get(), fullImageSubresourceRange);
2094
2095 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
2096
2097 vk.cmdCopyBufferToImage(cmdBuffer, m_buffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
2098 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
2099 }
2100 }
2101
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)2102 void ImageExtendOperandTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
2103 {
2104 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
2105 }
2106
commandAfterCompute(const VkCommandBuffer cmdBuffer)2107 void ImageExtendOperandTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
2108 {
2109 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_buffer->get(), m_imageDstSize, m_texture);
2110 }
2111
2112 // Clears the high bits of every pixel in the pixel buffer, leaving only the lowest 16 bits of each component.
clearHighBits(const tcu::PixelBufferAccess & pixels,int width,int height)2113 void clearHighBits (const tcu::PixelBufferAccess& pixels, int width, int height)
2114 {
2115 for (int y = 0; y < height; ++y)
2116 for (int x = 0; x < width; ++x)
2117 {
2118 auto color = pixels.getPixelUint(x, y);
2119 for (int c = 0; c < decltype(color)::SIZE; ++c)
2120 color[c] &= 0xFFFFull;
2121 pixels.setPixel(color, x, y);
2122 }
2123 }
2124
verifyResult(void)2125 tcu::TestStatus ImageExtendOperandTestInstance::verifyResult (void)
2126 {
2127 const DeviceInterface& vk = m_context.getDeviceInterface();
2128 const VkDevice device = m_context.getDevice();
2129 const tcu::IVec3 imageSize = m_texture.size();
2130 const tcu::PixelBufferAccess inputAccess = m_inputImageData.getAccess();
2131 const deInt32 width = inputAccess.getWidth();
2132 const deInt32 height = inputAccess.getHeight();
2133 tcu::TextureLevel refImage (mapVkFormat(m_imageDstFormat), width, height);
2134 tcu::PixelBufferAccess refAccess = refImage.getAccess();
2135
2136 for (int x = 0; x < width; ++x)
2137 for (int y = 0; y < height; ++y)
2138 {
2139 tcu::IVec4 color = inputAccess.getPixelInt(x, y);
2140 refAccess.setPixel(color, x, y);
2141 }
2142
2143 const Allocation& alloc = m_buffer->getAllocation();
2144 invalidateAlloc(vk, device, alloc);
2145 const tcu::PixelBufferAccess result(mapVkFormat(m_imageDstFormat), imageSize, alloc.getHostPtr());
2146
2147 if (m_relaxedPrecision)
2148 {
2149 // Preserve the lowest 16 bits of the reference and result pixels only.
2150 clearHighBits(refAccess, width, height);
2151 clearHighBits(result, width, height);
2152 }
2153
2154 if (tcu::intThresholdCompare (m_context.getTestContext().getLog(), "Comparison", "Comparison", refAccess, result, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT, true/*use64Bits*/))
2155 return tcu::TestStatus::pass("Passed");
2156 else
2157 return tcu::TestStatus::fail("Image comparison failed");
2158 }
2159
2160 enum class ExtendTestType
2161 {
2162 READ = 0,
2163 WRITE = 1,
2164 };
2165
2166 enum class ExtendOperand
2167 {
2168 SIGN_EXTEND = 0,
2169 ZERO_EXTEND = 1
2170 };
2171
2172 class ImageExtendOperandTest : public TestCase
2173 {
2174 public:
2175 ImageExtendOperandTest (tcu::TestContext& testCtx,
2176 const std::string& name,
2177 const Texture texture,
2178 const VkFormat readFormat,
2179 const VkFormat writeFormat,
2180 const bool signedInt,
2181 const bool relaxedPrecision,
2182 ExtendTestType extendTestType);
2183
2184 void checkSupport (Context& context) const;
2185 void initPrograms (SourceCollections& programCollection) const;
2186 TestInstance* createInstance (Context& context) const;
2187
2188 private:
isWriteTest() const2189 bool isWriteTest () const { return (m_extendTestType == ExtendTestType::WRITE); }
2190
2191 const Texture m_texture;
2192 VkFormat m_readFormat;
2193 VkFormat m_writeFormat;
2194 bool m_operandForce; // Use an operand that doesn't match SampledType?
2195 bool m_relaxedPrecision;
2196 ExtendTestType m_extendTestType;
2197 };
2198
ImageExtendOperandTest(tcu::TestContext & testCtx,const std::string & name,const Texture texture,const VkFormat readFormat,const VkFormat writeFormat,const bool operandForce,const bool relaxedPrecision,ExtendTestType extendTestType)2199 ImageExtendOperandTest::ImageExtendOperandTest (tcu::TestContext& testCtx,
2200 const std::string& name,
2201 const Texture texture,
2202 const VkFormat readFormat,
2203 const VkFormat writeFormat,
2204 const bool operandForce,
2205 const bool relaxedPrecision,
2206 ExtendTestType extendTestType)
2207 : TestCase (testCtx, name, "")
2208 , m_texture (texture)
2209 , m_readFormat (readFormat)
2210 , m_writeFormat (writeFormat)
2211 , m_operandForce (operandForce)
2212 , m_relaxedPrecision (relaxedPrecision)
2213 , m_extendTestType (extendTestType)
2214 {
2215 }
2216
checkFormatProperties(Context & context,VkFormat format)2217 void checkFormatProperties (Context& context, VkFormat format)
2218 {
2219 const VkFormatProperties3KHR formatProperties (context.getFormatProperties(format));
2220
2221 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
2222 TCU_THROW(NotSupportedError, "Format not supported for storage images");
2223 }
2224
check64BitSupportIfNeeded(Context & context,VkFormat readFormat,VkFormat writeFormat)2225 void check64BitSupportIfNeeded (Context& context, VkFormat readFormat, VkFormat writeFormat)
2226 {
2227 if (is64BitIntegerFormat(readFormat) || is64BitIntegerFormat(writeFormat))
2228 {
2229 const auto& features = context.getDeviceFeatures();
2230 if (!features.shaderInt64)
2231 TCU_THROW(NotSupportedError, "64-bit integers not supported in shaders");
2232 }
2233 }
2234
checkSupport(Context & context) const2235 void ImageExtendOperandTest::checkSupport (Context& context) const
2236 {
2237 if (!context.requireDeviceFunctionality("VK_KHR_spirv_1_4"))
2238 TCU_THROW(NotSupportedError, "VK_KHR_spirv_1_4 not supported");
2239
2240 check64BitSupportIfNeeded(context, m_readFormat, m_writeFormat);
2241
2242 checkFormatProperties(context, m_readFormat);
2243 checkFormatProperties(context, m_writeFormat);
2244 }
2245
initPrograms(SourceCollections & programCollection) const2246 void ImageExtendOperandTest::initPrograms (SourceCollections& programCollection) const
2247 {
2248 tcu::StringTemplate shaderTemplate(
2249 "OpCapability Shader\n"
2250 "OpCapability StorageImageExtendedFormats\n"
2251
2252 "${capability}"
2253 "${extension}"
2254
2255 "%std450 = OpExtInstImport \"GLSL.std.450\"\n"
2256 "OpMemoryModel Logical GLSL450\n"
2257 "OpEntryPoint GLCompute %main \"main\" %id %src_image_ptr %dst_image_ptr\n"
2258 "OpExecutionMode %main LocalSize 1 1 1\n"
2259
2260 // decorations
2261 "OpDecorate %id BuiltIn GlobalInvocationId\n"
2262
2263 "OpDecorate %src_image_ptr DescriptorSet 0\n"
2264 "OpDecorate %src_image_ptr Binding 0\n"
2265 "OpDecorate %src_image_ptr NonWritable\n"
2266
2267 "${relaxed_precision}"
2268
2269 "OpDecorate %dst_image_ptr DescriptorSet 0\n"
2270 "OpDecorate %dst_image_ptr Binding 1\n"
2271 "OpDecorate %dst_image_ptr NonReadable\n"
2272
2273 // types
2274 "%type_void = OpTypeVoid\n"
2275 "%type_i32 = OpTypeInt 32 1\n"
2276 "%type_u32 = OpTypeInt 32 0\n"
2277 "%type_vec2_i32 = OpTypeVector %type_i32 2\n"
2278 "%type_vec2_u32 = OpTypeVector %type_u32 2\n"
2279 "%type_vec3_i32 = OpTypeVector %type_i32 3\n"
2280 "%type_vec3_u32 = OpTypeVector %type_u32 3\n"
2281 "%type_vec4_i32 = OpTypeVector %type_i32 4\n"
2282 "%type_vec4_u32 = OpTypeVector %type_u32 4\n"
2283 "${extra_types}"
2284
2285 "%type_fun_void = OpTypeFunction %type_void\n"
2286
2287 "${image_types}"
2288
2289 "%type_ptr_in_vec3_u32 = OpTypePointer Input %type_vec3_u32\n"
2290 "%type_ptr_in_u32 = OpTypePointer Input %type_u32\n"
2291
2292 "${image_uniforms}"
2293
2294 // variables
2295 "%id = OpVariable %type_ptr_in_vec3_u32 Input\n"
2296
2297 "${image_variables}"
2298
2299 // main function
2300 "%main = OpFunction %type_void None %type_fun_void\n"
2301 "%label = OpLabel\n"
2302
2303 "${image_load}"
2304
2305 "%idvec = OpLoad %type_vec3_u32 %id\n"
2306 "%id_xy = OpVectorShuffle %type_vec2_u32 %idvec %idvec 0 1\n"
2307 "%coord = OpBitcast %type_vec2_i32 %id_xy\n"
2308 "%value = OpImageRead ${sampled_type_vec4} %src_image %coord ${read_extend_operand}\n"
2309 " OpImageWrite %dst_image %coord %value ${write_extend_operand}\n"
2310 " OpReturn\n"
2311 " OpFunctionEnd\n");
2312
2313 const auto testedFormat = mapVkFormat(isWriteTest() ? m_writeFormat : m_readFormat);
2314 const bool isSigned = (getTextureChannelClass(testedFormat.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER);
2315
2316 const auto isRead64 = is64BitIntegerFormat(m_readFormat);
2317 const auto isWrite64 = is64BitIntegerFormat(m_writeFormat);
2318 DE_ASSERT(isRead64 == isWrite64);
2319
2320 const bool using64Bits = (isRead64 || isWrite64);
2321
2322 // Additional capabilities when needed.
2323 std::string capability;
2324 std::string extension;
2325 std::string extraTypes;
2326
2327 if (using64Bits)
2328 {
2329 extension += "OpExtension \"SPV_EXT_shader_image_int64\"\n";
2330 capability +=
2331 "OpCapability Int64\n"
2332 "OpCapability Int64ImageEXT\n"
2333 ;
2334 extraTypes +=
2335 "%type_i64 = OpTypeInt 64 1\n"
2336 "%type_u64 = OpTypeInt 64 0\n"
2337 "%type_vec3_i64 = OpTypeVector %type_i64 3\n"
2338 "%type_vec3_u64 = OpTypeVector %type_u64 3\n"
2339 "%type_vec4_i64 = OpTypeVector %type_i64 4\n"
2340 "%type_vec4_u64 = OpTypeVector %type_u64 4\n"
2341 ;
2342 }
2343
2344 std::string relaxed = "";
2345 if (m_relaxedPrecision)
2346 relaxed += "OpDecorate %src_image_ptr RelaxedPrecision\n";
2347
2348 // Sampled type depends on the format sign and mismatch force flag.
2349 const bool signedSampleType = ((isSigned && !m_operandForce) || (!isSigned && m_operandForce));
2350 const std::string bits = (using64Bits ? "64" : "32");
2351 const std::string sampledTypePostfix = (signedSampleType ? "i" : "u") + bits;
2352 const std::string extendOperandStr = (isSigned ? "SignExtend" : "ZeroExtend");
2353
2354 std::map<std::string, std::string> specializations =
2355 {
2356 { "image_type_id", "%type_image" },
2357 { "image_uni_ptr_type_id", "%type_ptr_uniform_const_image" },
2358 { "image_var_id", "%src_image_ptr" },
2359 { "image_id", "%src_image" },
2360 { "capability", capability },
2361 { "extension", extension },
2362 { "extra_types", extraTypes },
2363 { "relaxed_precision", relaxed },
2364 { "image_format", getSpirvFormat(m_readFormat) },
2365 { "sampled_type", (std::string("%type_") + sampledTypePostfix) },
2366 { "sampled_type_vec4", (std::string("%type_vec4_") + sampledTypePostfix) },
2367 { "read_extend_operand", (!isWriteTest() ? extendOperandStr : "") },
2368 { "write_extend_operand", (isWriteTest() ? extendOperandStr : "") },
2369 };
2370
2371 // Addidtional parametrization is needed for a case when source and destination textures have same format
2372 tcu::StringTemplate imageTypeTemplate(
2373 "${image_type_id} = OpTypeImage ${sampled_type} 2D 0 0 0 2 ${image_format}\n");
2374 tcu::StringTemplate imageUniformTypeTemplate(
2375 "${image_uni_ptr_type_id} = OpTypePointer UniformConstant ${image_type_id}\n");
2376 tcu::StringTemplate imageVariablesTemplate(
2377 "${image_var_id} = OpVariable ${image_uni_ptr_type_id} UniformConstant\n");
2378 tcu::StringTemplate imageLoadTemplate(
2379 "${image_id} = OpLoad ${image_type_id} ${image_var_id}\n");
2380
2381 std::string imageTypes;
2382 std::string imageUniformTypes;
2383 std::string imageVariables;
2384 std::string imageLoad;
2385
2386 // If input image format is the same as output there is less spir-v definitions
2387 if (m_readFormat == m_writeFormat)
2388 {
2389 imageTypes = imageTypeTemplate.specialize(specializations);
2390 imageUniformTypes = imageUniformTypeTemplate.specialize(specializations);
2391 imageVariables = imageVariablesTemplate.specialize(specializations);
2392 imageLoad = imageLoadTemplate.specialize(specializations);
2393
2394 specializations["image_var_id"] = "%dst_image_ptr";
2395 specializations["image_id"] = "%dst_image";
2396 imageVariables += imageVariablesTemplate.specialize(specializations);
2397 imageLoad += imageLoadTemplate.specialize(specializations);
2398 }
2399 else
2400 {
2401 specializations["image_type_id"] = "%type_src_image";
2402 specializations["image_uni_ptr_type_id"] = "%type_ptr_uniform_const_src_image";
2403 imageTypes = imageTypeTemplate.specialize(specializations);
2404 imageUniformTypes = imageUniformTypeTemplate.specialize(specializations);
2405 imageVariables = imageVariablesTemplate.specialize(specializations);
2406 imageLoad = imageLoadTemplate.specialize(specializations);
2407
2408 specializations["image_format"] = getSpirvFormat(m_writeFormat);
2409 specializations["image_type_id"] = "%type_dst_image";
2410 specializations["image_uni_ptr_type_id"] = "%type_ptr_uniform_const_dst_image";
2411 specializations["image_var_id"] = "%dst_image_ptr";
2412 specializations["image_id"] = "%dst_image";
2413 imageTypes += imageTypeTemplate.specialize(specializations);
2414 imageUniformTypes += imageUniformTypeTemplate.specialize(specializations);
2415 imageVariables += imageVariablesTemplate.specialize(specializations);
2416 imageLoad += imageLoadTemplate.specialize(specializations);
2417 }
2418
2419 specializations["image_types"] = imageTypes;
2420 specializations["image_uniforms"] = imageUniformTypes;
2421 specializations["image_variables"] = imageVariables;
2422 specializations["image_load"] = imageLoad;
2423
2424 // Specialize whole shader and add it to program collection
2425 programCollection.spirvAsmSources.add("comp") << shaderTemplate.specialize(specializations)
2426 << vk::SpirVAsmBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, true);
2427 }
2428
createInstance(Context & context) const2429 TestInstance* ImageExtendOperandTest::createInstance(Context& context) const
2430 {
2431 return new ImageExtendOperandTestInstance(context, m_texture, m_readFormat, m_writeFormat, m_relaxedPrecision);
2432 }
2433
2434 static const Texture s_textures[] =
2435 {
2436 Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1),
2437 Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8),
2438 Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1),
2439 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8),
2440 Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1),
2441 Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6),
2442 Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2*6),
2443 Texture(IMAGE_TYPE_BUFFER, tcu::IVec3(64, 1, 1), 1),
2444 };
2445
getTestTexture(const ImageType imageType)2446 const Texture& getTestTexture (const ImageType imageType)
2447 {
2448 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2449 if (s_textures[textureNdx].type() == imageType)
2450 return s_textures[textureNdx];
2451
2452 DE_FATAL("Internal error");
2453 return s_textures[0];
2454 }
2455
2456 static const VkFormat s_formats[] =
2457 {
2458 VK_FORMAT_R32G32B32A32_SFLOAT,
2459 VK_FORMAT_R16G16B16A16_SFLOAT,
2460 VK_FORMAT_R32_SFLOAT,
2461
2462 VK_FORMAT_R32G32B32A32_UINT,
2463 VK_FORMAT_R16G16B16A16_UINT,
2464 VK_FORMAT_R8G8B8A8_UINT,
2465 VK_FORMAT_R32_UINT,
2466
2467 VK_FORMAT_R32G32B32A32_SINT,
2468 VK_FORMAT_R16G16B16A16_SINT,
2469 VK_FORMAT_R8G8B8A8_SINT,
2470 VK_FORMAT_R32_SINT,
2471
2472 VK_FORMAT_R8G8B8A8_UNORM,
2473
2474 VK_FORMAT_B8G8R8A8_UNORM,
2475 VK_FORMAT_B8G8R8A8_UINT,
2476
2477 VK_FORMAT_R8G8B8A8_SNORM,
2478
2479 VK_FORMAT_B10G11R11_UFLOAT_PACK32,
2480
2481 VK_FORMAT_R32G32_SFLOAT,
2482 VK_FORMAT_R16G16_SFLOAT,
2483 VK_FORMAT_R16_SFLOAT,
2484
2485 VK_FORMAT_A2B10G10R10_UINT_PACK32,
2486 VK_FORMAT_R32G32_UINT,
2487 VK_FORMAT_R16G16_UINT,
2488 VK_FORMAT_R16_UINT,
2489 VK_FORMAT_R8G8_UINT,
2490 VK_FORMAT_R8_UINT,
2491
2492 VK_FORMAT_R32G32_SINT,
2493 VK_FORMAT_R16G16_SINT,
2494 VK_FORMAT_R16_SINT,
2495 VK_FORMAT_R8G8_SINT,
2496 VK_FORMAT_R8_SINT,
2497
2498 VK_FORMAT_A2B10G10R10_UNORM_PACK32,
2499 VK_FORMAT_R16G16B16A16_UNORM,
2500 VK_FORMAT_R16G16B16A16_SNORM,
2501 VK_FORMAT_R16G16_UNORM,
2502 VK_FORMAT_R16_UNORM,
2503 VK_FORMAT_R8G8_UNORM,
2504 VK_FORMAT_R8_UNORM,
2505
2506 VK_FORMAT_R16G16_SNORM,
2507 VK_FORMAT_R16_SNORM,
2508 VK_FORMAT_R8G8_SNORM,
2509 VK_FORMAT_R8_SNORM
2510 };
2511
2512 static const VkFormat s_formatsThreeComponent[] =
2513 {
2514 VK_FORMAT_R8G8B8_UINT,
2515 VK_FORMAT_R8G8B8_SINT,
2516 VK_FORMAT_R8G8B8_UNORM,
2517 VK_FORMAT_R8G8B8_SNORM,
2518 VK_FORMAT_R16G16B16_UINT,
2519 VK_FORMAT_R16G16B16_SINT,
2520 VK_FORMAT_R16G16B16_UNORM,
2521 VK_FORMAT_R16G16B16_SNORM,
2522 VK_FORMAT_R16G16B16_SFLOAT,
2523 VK_FORMAT_R32G32B32_UINT,
2524 VK_FORMAT_R32G32B32_SINT,
2525 VK_FORMAT_R32G32B32_SFLOAT,
2526 };
2527
2528 } // anonymous ns
2529
createImageStoreTests(tcu::TestContext & testCtx)2530 tcu::TestCaseGroup* createImageStoreTests (tcu::TestContext& testCtx)
2531 {
2532 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "store", "Plain imageStore() cases"));
2533 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format", "Declare a format layout qualifier for write images"));
2534 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format", "Do not declare a format layout qualifier for write images"));
2535
2536 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2537 {
2538 const Texture& texture = s_textures[textureNdx];
2539 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2540 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2541 const bool isLayered = (texture.numLayers() > 1);
2542
2543 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2544 {
2545 const bool hasSpirvFmt = hasSpirvFormat(s_formats[formatNdx]);
2546
2547 if (hasSpirvFmt)
2548 groupWithFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx]));
2549 groupWithoutFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], 0));
2550
2551 if (isLayered && hasSpirvFmt)
2552 groupWithFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
2553 texture, s_formats[formatNdx],
2554 StoreTest::FLAG_SINGLE_LAYER_BIND | StoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2555
2556 if (texture.type() == IMAGE_TYPE_BUFFER)
2557 {
2558 if (hasSpirvFmt)
2559 groupWithFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], StoreTest::FLAG_MINALIGN | StoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2560 groupWithoutFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], StoreTest::FLAG_MINALIGN));
2561 }
2562 }
2563
2564 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
2565 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
2566 }
2567
2568 testGroup->addChild(testGroupWithFormat.release());
2569 testGroup->addChild(testGroupWithoutFormat.release());
2570
2571 return testGroup.release();
2572 }
2573
createImageLoadStoreTests(tcu::TestContext & testCtx)2574 tcu::TestCaseGroup* createImageLoadStoreTests (tcu::TestContext& testCtx)
2575 {
2576 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store", "Cases with imageLoad() followed by imageStore()"));
2577 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format", "Declare a format layout qualifier for read images"));
2578 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format", "Do not declare a format layout qualifier for read images"));
2579
2580 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2581 {
2582 const Texture& texture = s_textures[textureNdx];
2583 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2584 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2585 const bool isLayered = (texture.numLayers() > 1);
2586
2587 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2588 {
2589 // These tests always require a SPIR-V format for the write image, even if the read
2590 // image is being used without a format.
2591 if (!hasSpirvFormat(s_formats[formatNdx]))
2592 continue;
2593
2594 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx]));
2595 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx], 0));
2596
2597 if (isLayered)
2598 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
2599 texture, s_formats[formatNdx], s_formats[formatNdx],
2600 LoadStoreTest::FLAG_SINGLE_LAYER_BIND | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2601 if (texture.type() == IMAGE_TYPE_BUFFER)
2602 {
2603 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2604 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign_uniform", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2605 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN));
2606 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign_uniform", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2607 }
2608 }
2609
2610 if (texture.type() == IMAGE_TYPE_BUFFER)
2611 {
2612 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formatsThreeComponent); ++formatNdx)
2613 {
2614 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formatsThreeComponent[formatNdx]) + "_uniform", "", texture, s_formatsThreeComponent[formatNdx], s_formatsThreeComponent[formatNdx], LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2615 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formatsThreeComponent[formatNdx]) + "_minalign_uniform", "", texture, s_formatsThreeComponent[formatNdx], s_formatsThreeComponent[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2616 }
2617 }
2618
2619 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
2620 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
2621 }
2622
2623 testGroup->addChild(testGroupWithFormat.release());
2624 testGroup->addChild(testGroupWithoutFormat.release());
2625
2626 return testGroup.release();
2627 }
2628
createImageLoadStoreLodAMDTests(tcu::TestContext & testCtx)2629 tcu::TestCaseGroup* createImageLoadStoreLodAMDTests (tcu::TestContext& testCtx)
2630 {
2631 static const Texture textures[] =
2632 {
2633 Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8, 1, 6),
2634 Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1, 1, 6),
2635 Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1, 1, 6),
2636 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8, 1, 6),
2637 Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1, 1, 6),
2638 Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6, 1, 6),
2639 Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2*6, 1, 6),
2640 };
2641
2642 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store_lod", "Cases with imageLoad() followed by imageStore()"));
2643 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format", "Declare a format layout qualifier for read images"));
2644 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format", "Do not declare a format layout qualifier for read images"));
2645
2646 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(textures); ++textureNdx)
2647 {
2648 const Texture& texture = textures[textureNdx];
2649 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2650 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2651 const bool isLayered = (texture.numLayers() > 1);
2652
2653 if (texture.type() == IMAGE_TYPE_BUFFER)
2654 continue;
2655
2656 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2657 {
2658 // These tests always require a SPIR-V format for the write image, even if the read
2659 // image is being used without a format.
2660 if (!hasSpirvFormat(s_formats[formatNdx]))
2661 continue;
2662
2663 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER, DE_TRUE));
2664 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx], 0, DE_TRUE));
2665
2666 if (isLayered)
2667 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
2668 texture, s_formats[formatNdx], s_formats[formatNdx],
2669 LoadStoreTest::FLAG_SINGLE_LAYER_BIND | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER, DE_TRUE));
2670 }
2671
2672 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
2673 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
2674 }
2675
2676 testGroup->addChild(testGroupWithFormat.release());
2677 testGroup->addChild(testGroupWithoutFormat.release());
2678
2679 return testGroup.release();
2680 }
2681
createImageFormatReinterpretTests(tcu::TestContext & testCtx)2682 tcu::TestCaseGroup* createImageFormatReinterpretTests (tcu::TestContext& testCtx)
2683 {
2684 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "format_reinterpret", "Cases with differing texture and image formats"));
2685
2686 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2687 {
2688 const Texture& texture = s_textures[textureNdx];
2689 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2690
2691 for (int imageFormatNdx = 0; imageFormatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++imageFormatNdx)
2692 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2693 {
2694 if (!hasSpirvFormat(s_formats[formatNdx]))
2695 continue;
2696
2697 const std::string caseName = getFormatShortString(s_formats[imageFormatNdx]) + "_" + getFormatShortString(s_formats[formatNdx]);
2698 if (imageFormatNdx != formatNdx && formatsAreCompatible(s_formats[imageFormatNdx], s_formats[formatNdx]))
2699 groupByImageViewType->addChild(new LoadStoreTest(testCtx, caseName, "", texture, s_formats[formatNdx], s_formats[imageFormatNdx]));
2700 }
2701 testGroup->addChild(groupByImageViewType.release());
2702 }
2703
2704 return testGroup.release();
2705 }
2706
createImageQualifierRestrictCase(tcu::TestContext & testCtx,const ImageType imageType,const std::string & name)2707 de::MovePtr<TestCase> createImageQualifierRestrictCase (tcu::TestContext& testCtx, const ImageType imageType, const std::string& name)
2708 {
2709 const VkFormat format = VK_FORMAT_R32G32B32A32_UINT;
2710 const Texture& texture = getTestTexture(imageType);
2711 return de::MovePtr<TestCase>(new LoadStoreTest(testCtx, name, "", texture, format, format, LoadStoreTest::FLAG_RESTRICT_IMAGES | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2712 }
2713
2714 namespace
2715 {
2716
relaxedOK(VkFormat format)2717 bool relaxedOK(VkFormat format)
2718 {
2719 tcu::IVec4 bitDepth = tcu::getTextureFormatBitDepth(mapVkFormat(format));
2720 int maxBitDepth = deMax32(deMax32(bitDepth[0], bitDepth[1]), deMax32(bitDepth[2], bitDepth[3]));
2721 return maxBitDepth <= 16;
2722 }
2723
2724 // Get a format used for reading or writing in extension operand tests. These formats allow representing the shader sampled type to
2725 // verify results from read or write operations.
getShaderExtensionOperandFormat(bool isSigned,bool is64Bit)2726 VkFormat getShaderExtensionOperandFormat (bool isSigned, bool is64Bit)
2727 {
2728 const VkFormat formats[] =
2729 {
2730 VK_FORMAT_R32G32B32A32_UINT,
2731 VK_FORMAT_R32G32B32A32_SINT,
2732 VK_FORMAT_R64_UINT,
2733 VK_FORMAT_R64_SINT,
2734 };
2735 return formats[2u * (is64Bit ? 1u : 0u) + (isSigned ? 1u : 0u)];
2736 }
2737
2738 // INT or UINT format?
isIntegralFormat(VkFormat format)2739 bool isIntegralFormat (VkFormat format)
2740 {
2741 return (isIntFormat(format) || isUintFormat(format));
2742 }
2743
2744 // Return the list of formats used for the extension operand tests (SignExten/ZeroExtend).
getExtensionOperandFormatList(void)2745 std::vector<VkFormat> getExtensionOperandFormatList (void)
2746 {
2747 std::vector<VkFormat> formatList;
2748
2749 for (auto format : s_formats)
2750 {
2751 if (isIntegralFormat(format))
2752 formatList.push_back(format);
2753 }
2754
2755 formatList.push_back(VK_FORMAT_R64_SINT);
2756 formatList.push_back(VK_FORMAT_R64_UINT);
2757
2758 return formatList;
2759 }
2760
2761 } // anonymous
2762
createImageExtendOperandsTests(tcu::TestContext & testCtx)2763 tcu::TestCaseGroup* createImageExtendOperandsTests(tcu::TestContext& testCtx)
2764 {
2765 using GroupPtr = de::MovePtr<tcu::TestCaseGroup>;
2766
2767 GroupPtr testGroup(new tcu::TestCaseGroup(testCtx, "extend_operands_spirv1p4", "Cases with SignExtend and ZeroExtend"));
2768
2769 const struct
2770 {
2771 ExtendTestType testType;
2772 const char* name;
2773 } testTypes[] =
2774 {
2775 { ExtendTestType::READ, "read" },
2776 { ExtendTestType::WRITE, "write" },
2777 };
2778
2779 const auto texture = Texture(IMAGE_TYPE_2D, tcu::IVec3(8, 8, 1), 1);
2780 const auto formatList = getExtensionOperandFormatList();
2781
2782 for (const auto format : formatList)
2783 {
2784 const auto isInt = isIntFormat(format);
2785 const auto isUint = isUintFormat(format);
2786 const auto use64Bits = is64BitIntegerFormat(format);
2787
2788 DE_ASSERT(isInt || isUint);
2789
2790 GroupPtr formatGroup (new tcu::TestCaseGroup(testCtx, getFormatShortString(format).c_str(), ""));
2791
2792 for (const auto& testType : testTypes)
2793 {
2794 GroupPtr testTypeGroup (new tcu::TestCaseGroup(testCtx, testType.name, ""));
2795
2796 for (int match = 0; match < 2; ++match)
2797 {
2798 const bool mismatched = (match == 1);
2799 const char* matchGroupName = (mismatched ? "mismatched_sign" : "matched_sign");
2800
2801 // SPIR-V does not allow this kind of sampled type override.
2802 if (mismatched && isUint)
2803 continue;
2804
2805 GroupPtr matchGroup (new tcu::TestCaseGroup(testCtx, matchGroupName, ""));
2806
2807 for (int prec = 0; prec < 2; prec++)
2808 {
2809 const bool relaxedPrecision = (prec != 0);
2810
2811 const char* precisionName = (relaxedPrecision ? "relaxed_precision" : "normal_precision");
2812 const auto signedOther = ((isInt && !mismatched) || (isUint && mismatched));
2813 const auto otherFormat = getShaderExtensionOperandFormat(signedOther, use64Bits);
2814 const auto readFormat = (testType.testType == ExtendTestType::READ ? format : otherFormat);
2815 const auto writeFormat = (testType.testType == ExtendTestType::WRITE ? format : otherFormat);
2816
2817 if (relaxedPrecision && !relaxedOK(readFormat))
2818 continue;
2819
2820 if (!hasSpirvFormat(readFormat) || !hasSpirvFormat(writeFormat))
2821 continue;
2822
2823 matchGroup->addChild(new ImageExtendOperandTest(testCtx, precisionName, texture, readFormat, writeFormat, mismatched, relaxedPrecision, testType.testType));
2824 }
2825
2826 testTypeGroup->addChild(matchGroup.release());
2827 }
2828
2829 formatGroup->addChild(testTypeGroup.release());
2830 }
2831
2832 testGroup->addChild(formatGroup.release());
2833 }
2834
2835 return testGroup.release();
2836 }
2837
2838 } // image
2839 } // vkt
2840