1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Multisampled image load/store Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktImageMultisampleLoadStoreTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktImageTestsUtil.hpp"
28 #include "vktImageLoadStoreUtil.hpp"
29 #include "vktImageTexture.hpp"
30
31 #include "vkDefs.hpp"
32 #include "vkRef.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkBarrierUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkCmdUtil.hpp"
42 #include "vkObjUtil.hpp"
43 #include "vkBufferWithMemory.hpp"
44
45 #include "deUniquePtr.hpp"
46
47 #include "tcuTextureUtil.hpp"
48 #include "tcuTestLog.hpp"
49
50 #include <string>
51 #include <vector>
52
53 namespace vkt
54 {
55 namespace image
56 {
57 namespace
58 {
59 using namespace vk;
60 using de::MovePtr;
61 using de::UniquePtr;
62 using tcu::IVec3;
63
64 static const VkFormat CHECKSUM_IMAGE_FORMAT = VK_FORMAT_R32_SINT;
65
66 struct CaseDef
67 {
68 Texture texture;
69 VkFormat format;
70 VkSampleCountFlagBits numSamples;
71 bool singleLayerBind;
72 };
73
74 // Multisampled storage image test.
75 //
76 // Pass 1: Write a slightly different color pattern per-sample to the whole image.
77 // Pass 2: Read samples of the same image and check if color values are in the expected range.
78 // Write back results as a checksum image and verify them on the host.
79 // Each checksum image pixel should contain an integer equal to the number of samples.
80
initPrograms(SourceCollections & programCollection,const CaseDef caseDef)81 void initPrograms (SourceCollections& programCollection, const CaseDef caseDef)
82 {
83 const int dimension = (caseDef.singleLayerBind ? caseDef.texture.layerDimension() : caseDef.texture.dimension());
84 const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
85
86 const ImageType usedImageType = (caseDef.singleLayerBind ? getImageTypeForSingleLayer(caseDef.texture.type()) : caseDef.texture.type());
87 const bool isAlphaOnly = isAlphaOnlyFormat(caseDef.format);
88 const std::string formatQualifierStr = (isAlphaOnly ? "" : ", " + getShaderImageFormatQualifier(mapVkFormat(caseDef.format)));
89 const std::string msImageTypeStr = getShaderImageType(mapVkFormat(caseDef.format), usedImageType, (caseDef.texture.numSamples() > 1));
90
91 const std::string xMax = de::toString(caseDef.texture.size().x() - 1);
92 const std::string yMax = de::toString(caseDef.texture.size().y() - 1);
93 const std::string signednessPrefix = isUintFormat(caseDef.format) ? "u" : isIntFormat(caseDef.format) ? "i" : "";
94 const std::string gvec4Expr = signednessPrefix + "vec4";
95 const int numColorComponents = (isAlphaOnly ? 4 : tcu::getNumUsedChannels(mapVkFormat(caseDef.format).order)); // Force 4 for A8_UNORM as per the spec.
96
97 const float storeColorScale = computeStoreColorScale(caseDef.format, caseDef.texture.size());
98 const float storeColorBias = computeStoreColorBias(caseDef.format);
99 DE_ASSERT(colorScaleAndBiasAreValid(caseDef.format, storeColorScale, storeColorBias));
100
101 const std::string colorScaleExpr = (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
102 + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
103
104 const std::string red = "gx^gy^gz^(sampleNdx >> 5)^(sampleNdx & 31)"; // we "split" sampleNdx to keep this value in [0, 31] range for numSamples = 64 case
105 const std::string green = "(" + xMax + "-gx)^gy^gz";
106 const std::string blue = "gx^(" + yMax + "-gy)^gz";
107 const std::string alpha = "(" + xMax + "-gx)^(" + yMax + "-gy)^gz";
108 const std::string colorExpr =
109 gvec4Expr + "("
110 + ( (isAlphaOnly ? alpha : red) + ", " ) // For A8_UNORM we switch the alpha and red values.
111 + (numColorComponents > 1 ? green + ", " : "0, " )
112 + (numColorComponents > 2 ? blue + ", " : "0, " )
113 + (numColorComponents > 3 ? (isAlphaOnly ? red : alpha) : "1" )
114 + ")" + colorScaleExpr;
115
116 const std::string expectedColorExpr =
117 gvec4Expr + "("
118 + ( (isAlphaOnly ? "0" : red) + ", " ) // A8_UNORM should result in RGB (0, 0, 0).
119 + ((numColorComponents > 1 && !isAlphaOnly) ? green + ", " : "0, " )
120 + ((numColorComponents > 2 && !isAlphaOnly) ? blue + ", " : "0, " )
121 + ((numColorComponents > 3) ? (isAlphaOnly ? red : alpha) : "1" )
122 + ")" + colorScaleExpr;
123
124 // Store shader
125 {
126 std::ostringstream src;
127 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
128 << (isAlphaOnly ? "#extension GL_EXT_shader_image_load_formatted : require\n" : "")
129 << "\n"
130 << "layout(local_size_x = 1) in;\n"
131 << "layout(set = 0, binding = 1" << formatQualifierStr << ") writeonly uniform " << msImageTypeStr << " u_msImage;\n";
132
133 if (caseDef.singleLayerBind)
134 src << "layout(set = 0, binding = 0) readonly uniform Constants {\n"
135 << " int u_layerNdx;\n"
136 << "};\n";
137
138 src << "\n"
139 << "void main (void)\n"
140 << "{\n"
141 << " int gx = int(gl_GlobalInvocationID.x);\n"
142 << " int gy = int(gl_GlobalInvocationID.y);\n"
143 << " int gz = " << (caseDef.singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
144 << "\n"
145 << " for (int sampleNdx = 0; sampleNdx < " << caseDef.texture.numSamples() <<"; ++sampleNdx) {\n"
146 << " imageStore(u_msImage, " << texelCoordStr << ", sampleNdx, " << colorExpr << ");\n"
147 << " }\n"
148 << "}\n";
149
150 programCollection.glslSources.add("comp_store") << glu::ComputeSource(src.str());
151 }
152
153 // Load shader
154 {
155 const tcu::TextureFormat checksumFormat = mapVkFormat(CHECKSUM_IMAGE_FORMAT);
156 const std::string checksumImageTypeStr = getShaderImageType(checksumFormat, usedImageType);
157 const bool useExactCompare = isIntegerFormat(caseDef.format);
158
159 std::ostringstream src;
160 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
161 << (isAlphaOnly ? "#extension GL_EXT_shader_image_load_formatted : require\n" : "")
162 << "\n"
163 << "layout(local_size_x = 1) in;\n"
164 << "layout(set = 0, binding = 1" << formatQualifierStr << ") readonly uniform " << msImageTypeStr << " u_msImage;\n"
165 << "layout(set = 0, binding = 2, " << getShaderImageFormatQualifier(checksumFormat) << ") writeonly uniform " << checksumImageTypeStr << " u_checksumImage;\n";
166
167 if (caseDef.singleLayerBind)
168 src << "layout(set = 0, binding = 0) readonly uniform Constants {\n"
169 << " int u_layerNdx;\n"
170 << "};\n";
171
172 src << "\n"
173 << "void main (void)\n"
174 << "{\n"
175 << " int gx = int(gl_GlobalInvocationID.x);\n"
176 << " int gy = int(gl_GlobalInvocationID.y);\n"
177 << " int gz = " << (caseDef.singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
178 << "\n"
179 << " int checksum = 0;\n"
180 << " for (int sampleNdx = 0; sampleNdx < " << caseDef.texture.numSamples() <<"; ++sampleNdx) {\n"
181 << " " << gvec4Expr << " color = imageLoad(u_msImage, " << texelCoordStr << ", sampleNdx);\n";
182
183 if (useExactCompare)
184 src << " if (color == " << expectedColorExpr << ")\n"
185 << " ++checksum;\n";
186 else
187 src << " " << gvec4Expr << " diff = abs(abs(color) - abs(" << expectedColorExpr << "));\n"
188 << " if (all(lessThan(diff, " << gvec4Expr << "(0.02))))\n"
189 << " ++checksum;\n";
190
191 src << " }\n"
192 << "\n"
193 << " imageStore(u_checksumImage, " << texelCoordStr << ", ivec4(checksum));\n"
194 << "}\n";
195
196 programCollection.glslSources.add("comp_load") << glu::ComputeSource(src.str());
197 }
198 }
199
checkSupport(Context & context,const CaseDef caseDef)200 void checkSupport (Context& context, const CaseDef caseDef)
201 {
202 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SHADER_STORAGE_IMAGE_MULTISAMPLE);
203
204 #ifndef CTS_USES_VULKANSC
205 if (caseDef.format == VK_FORMAT_A8_UNORM_KHR)
206 context.requireDeviceFunctionality("VK_KHR_maintenance5");
207 #endif // CTS_USES_VULKANSC
208
209 VkImageFormatProperties imageFormatProperties;
210 const auto& vki = context.getInstanceInterface();
211 const auto physicalDevice = context.getPhysicalDevice();
212 const VkResult imageFormatResult = vki.getPhysicalDeviceImageFormatProperties(
213 physicalDevice, caseDef.format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_STORAGE_BIT, (VkImageCreateFlags)0, &imageFormatProperties);
214
215 if (imageFormatResult == VK_ERROR_FORMAT_NOT_SUPPORTED)
216 TCU_THROW(NotSupportedError, "Format is not supported");
217
218 if ((imageFormatProperties.sampleCounts & caseDef.numSamples) != caseDef.numSamples)
219 TCU_THROW(NotSupportedError, "Requested sample count is not supported");
220
221 #ifndef CTS_USES_VULKANSC
222 if (caseDef.format == VK_FORMAT_A8_UNORM_KHR)
223 {
224 const auto formatProperties = context.getFormatProperties(caseDef.format);
225
226 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT) == 0u)
227 TCU_THROW(NotSupportedError, "Format does not support storage reads without format");
228
229 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT) == 0u)
230 TCU_THROW(NotSupportedError, "Format does not support storage writes without format");
231 }
232 #endif // CTS_USES_VULKANSC
233 }
234
235 //! Helper function to deal with per-layer resources.
insertImageViews(const DeviceInterface & vk,const VkDevice device,const CaseDef & caseDef,const VkFormat format,const VkImage image,std::vector<SharedVkImageView> * const pOutImageViews)236 void insertImageViews (const DeviceInterface& vk, const VkDevice device, const CaseDef& caseDef, const VkFormat format, const VkImage image, std::vector<SharedVkImageView>* const pOutImageViews)
237 {
238 if (caseDef.singleLayerBind)
239 {
240 pOutImageViews->clear();
241 pOutImageViews->resize(caseDef.texture.numLayers());
242 for (int layerNdx = 0; layerNdx < caseDef.texture.numLayers(); ++layerNdx)
243 {
244 (*pOutImageViews)[layerNdx] = makeVkSharedPtr(makeImageView(
245 vk, device, image, mapImageViewType(getImageTypeForSingleLayer(caseDef.texture.type())), format,
246 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
247 }
248 }
249 else // bind all layers at once
250 {
251 pOutImageViews->clear();
252 pOutImageViews->resize(1);
253 (*pOutImageViews)[0] = makeVkSharedPtr(makeImageView(
254 vk, device, image, mapImageViewType(caseDef.texture.type()), format,
255 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, caseDef.texture.numLayers())));
256 }
257 }
258
259 //! Helper function to deal with per-layer resources.
insertDescriptorSets(const DeviceInterface & vk,const VkDevice device,const CaseDef & caseDef,const VkDescriptorPool descriptorPool,const VkDescriptorSetLayout descriptorSetLayout,std::vector<SharedVkDescriptorSet> * const pOutDescriptorSets)260 void insertDescriptorSets (const DeviceInterface& vk, const VkDevice device, const CaseDef& caseDef, const VkDescriptorPool descriptorPool, const VkDescriptorSetLayout descriptorSetLayout, std::vector<SharedVkDescriptorSet>* const pOutDescriptorSets)
261 {
262 if (caseDef.singleLayerBind)
263 {
264 pOutDescriptorSets->clear();
265 pOutDescriptorSets->resize(caseDef.texture.numLayers());
266 for (int layerNdx = 0; layerNdx < caseDef.texture.numLayers(); ++layerNdx)
267 (*pOutDescriptorSets)[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, descriptorPool, descriptorSetLayout));
268 }
269 else // bind all layers at once
270 {
271 pOutDescriptorSets->clear();
272 pOutDescriptorSets->resize(1);
273 (*pOutDescriptorSets)[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, descriptorPool, descriptorSetLayout));
274 }
275 }
276
test(Context & context,const CaseDef caseDef)277 tcu::TestStatus test (Context& context, const CaseDef caseDef)
278 {
279 const InstanceInterface& vki = context.getInstanceInterface();
280 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
281 const DeviceInterface& vk = context.getDeviceInterface();
282 const VkDevice device = context.getDevice();
283 const VkQueue queue = context.getUniversalQueue();
284 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
285 Allocator& allocator = context.getDefaultAllocator();
286
287 // Images
288
289 const UniquePtr<Image> msImage(new Image(
290 vk, device, allocator, makeImageCreateInfo(caseDef.texture, caseDef.format, VK_IMAGE_USAGE_STORAGE_BIT, 0u), MemoryRequirement::Any));
291
292 const UniquePtr<Image> checksumImage(new Image(
293 vk, device, allocator,
294 makeImageCreateInfo(Texture(caseDef.texture, 1), CHECKSUM_IMAGE_FORMAT, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
295 MemoryRequirement::Any));
296
297 // Buffer used to pass constants to the shader.
298
299 const int numLayers = caseDef.texture.numLayers();
300 const VkDeviceSize bufferChunkSize = getOptimalUniformBufferChunkSize(vki, physDevice, sizeof(deInt32));
301 const VkDeviceSize constantsBufferSizeBytes = numLayers * bufferChunkSize;
302 UniquePtr<BufferWithMemory> constantsBuffer (new BufferWithMemory(vk, device, allocator, makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
303 MemoryRequirement::HostVisible));
304
305 {
306 const Allocation& alloc = constantsBuffer->getAllocation();
307 deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
308
309 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
310
311 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
312 {
313 deInt32* const valuePtr = reinterpret_cast<deInt32*>(basePtr + layerNdx * bufferChunkSize);
314 *valuePtr = layerNdx;
315 }
316
317 flushAlloc(vk, device, alloc);
318 }
319
320 const VkDeviceSize resultBufferSizeBytes = getImageSizeBytes(caseDef.texture.size(), CHECKSUM_IMAGE_FORMAT);
321 UniquePtr<BufferWithMemory> resultBuffer (new BufferWithMemory(vk, device, allocator, makeBufferCreateInfo(resultBufferSizeBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT),
322 MemoryRequirement::HostVisible));
323
324 {
325 const Allocation& alloc = resultBuffer->getAllocation();
326 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(resultBufferSizeBytes));
327 flushAlloc(vk, device, alloc);
328 }
329
330 // Descriptors
331
332 Unique<VkDescriptorSetLayout> descriptorSetLayout(DescriptorSetLayoutBuilder()
333 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
334 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
335 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
336 .build(vk, device));
337
338 Unique<VkDescriptorPool> descriptorPool(DescriptorPoolBuilder()
339 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
340 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
341 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
342 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers));
343
344 std::vector<SharedVkDescriptorSet> allDescriptorSets;
345 std::vector<SharedVkImageView> allMultisampledImageViews;
346 std::vector<SharedVkImageView> allChecksumImageViews;
347
348 insertDescriptorSets(vk, device, caseDef, *descriptorPool, *descriptorSetLayout, &allDescriptorSets);
349 insertImageViews (vk, device, caseDef, caseDef.format, **msImage, &allMultisampledImageViews);
350 insertImageViews (vk, device, caseDef, CHECKSUM_IMAGE_FORMAT, **checksumImage, &allChecksumImageViews);
351
352 // Prepare commands
353
354 const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
355 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
356 const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
357
358 const tcu::IVec3 workSize = (caseDef.singleLayerBind ? caseDef.texture.layerSize() : caseDef.texture.size());
359 const int loopNumLayers = (caseDef.singleLayerBind ? numLayers : 1);
360 const VkImageSubresourceRange subresourceAllLayers = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, caseDef.texture.numLayers());
361
362 // Pass 1: Write MS image
363 {
364 const Unique<VkShaderModule> shaderModule (createShaderModule (vk, device, context.getBinaryCollection().get("comp_store"), 0));
365 const Unique<VkPipeline> pipeline (makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
366
367 beginCommandBuffer(vk, *cmdBuffer);
368 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
369
370 {
371 const VkImageMemoryBarrier barriers[] =
372 {
373 makeImageMemoryBarrier((VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, **msImage, subresourceAllLayers),
374 makeImageMemoryBarrier((VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, **checksumImage, subresourceAllLayers),
375 };
376
377 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0,
378 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
379 }
380
381 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
382 {
383 const VkDescriptorSet descriptorSet = **allDescriptorSets[layerNdx];
384 const VkDescriptorImageInfo descriptorMultiImageInfo = makeDescriptorImageInfo(DE_NULL, **allMultisampledImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
385 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(constantsBuffer->get(), layerNdx*bufferChunkSize, bufferChunkSize);
386
387 DescriptorSetUpdateBuilder()
388 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
389 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorMultiImageInfo)
390 .update(vk, device);
391
392 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
393 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
394 }
395
396 endCommandBuffer(vk, *cmdBuffer);
397 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
398 context.resetCommandPoolForVKSC(device, *cmdPool);
399 }
400
401 // Pass 2: "Resolve" MS image in compute shader
402 {
403 const Unique<VkShaderModule> shaderModule (createShaderModule (vk, device, context.getBinaryCollection().get("comp_load"), 0));
404 const Unique<VkPipeline> pipeline (makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
405
406 beginCommandBuffer(vk, *cmdBuffer);
407 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
408
409 {
410 const VkImageMemoryBarrier barriers[] =
411 {
412 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL, **msImage, subresourceAllLayers),
413 };
414
415 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0,
416 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
417 }
418
419 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
420 {
421 const VkDescriptorSet descriptorSet = **allDescriptorSets[layerNdx];
422 const VkDescriptorImageInfo descriptorMultiImageInfo = makeDescriptorImageInfo(DE_NULL, **allMultisampledImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
423 const VkDescriptorImageInfo descriptorChecksumImageInfo = makeDescriptorImageInfo(DE_NULL, **allChecksumImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
424 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(constantsBuffer->get(), layerNdx*bufferChunkSize, bufferChunkSize);
425
426 DescriptorSetUpdateBuilder()
427 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
428 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorMultiImageInfo)
429 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(2u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorChecksumImageInfo)
430 .update(vk, device);
431
432 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
433 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
434 }
435
436 endCommandBuffer(vk, *cmdBuffer);
437 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
438 context.resetCommandPoolForVKSC(device, *cmdPool);
439 }
440
441 // Retrieve result
442 {
443 beginCommandBuffer(vk, *cmdBuffer);
444
445 {
446 const VkImageMemoryBarrier barriers[] =
447 {
448 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **checksumImage, subresourceAllLayers),
449 };
450 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
451 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
452 }
453 {
454 const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(caseDef.texture.layerSize()), caseDef.texture.numLayers());
455 vk.cmdCopyImageToBuffer(*cmdBuffer, **checksumImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **resultBuffer, 1u, ©Region);
456 }
457 {
458 const VkBufferMemoryBarrier barriers[] =
459 {
460 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **resultBuffer, 0ull, resultBufferSizeBytes),
461 };
462 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0,
463 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers, 0u, DE_NULL);
464 }
465
466 endCommandBuffer(vk, *cmdBuffer);
467 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
468 }
469
470 // Verify
471 {
472 const Allocation& alloc = resultBuffer->getAllocation();
473 invalidateAlloc(vk, device, alloc);
474
475 const IVec3 imageSize = caseDef.texture.size();
476 const deInt32* pDataPtr = static_cast<deInt32*>(alloc.getHostPtr());
477 const deInt32 expectedChecksum = caseDef.texture.numSamples();
478
479 for (int layer = 0; layer < imageSize.z(); ++layer)
480 for (int y = 0; y < imageSize.y(); ++y)
481 for (int x = 0; x < imageSize.x(); ++x)
482 {
483 if (*pDataPtr != expectedChecksum)
484 {
485 context.getTestContext().getLog()
486 << tcu::TestLog::Message << "Some sample colors were incorrect at (x, y, layer) = (" << x << ", " << y << ", " << layer << ")" << tcu::TestLog::EndMessage
487 << tcu::TestLog::Message << "Checksum value is " << *pDataPtr << " but expected " << expectedChecksum << tcu::TestLog::EndMessage;
488
489 return tcu::TestStatus::fail("Some sample colors were incorrect");
490 }
491 ++pDataPtr;
492 }
493
494 return tcu::TestStatus::pass("OK");
495 }
496 }
497
498 } // anonymous ns
499
createImageMultisampleLoadStoreTests(tcu::TestContext & testCtx)500 tcu::TestCaseGroup* createImageMultisampleLoadStoreTests (tcu::TestContext& testCtx)
501 {
502 const Texture textures[] =
503 {
504 // \note Shader code is tweaked to work with image size of 32, take a look if this needs to be modified.
505 Texture(IMAGE_TYPE_2D, tcu::IVec3(32, 32, 1), 1),
506 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(32, 32, 1), 4),
507 };
508
509 static const VkFormat formats[] =
510 {
511 VK_FORMAT_R32G32B32A32_SFLOAT,
512 VK_FORMAT_R16G16B16A16_SFLOAT,
513 VK_FORMAT_R32_SFLOAT,
514
515 VK_FORMAT_R32G32B32A32_UINT,
516 VK_FORMAT_R16G16B16A16_UINT,
517 VK_FORMAT_R8G8B8A8_UINT,
518 VK_FORMAT_R32_UINT,
519
520 VK_FORMAT_R32G32B32A32_SINT,
521 VK_FORMAT_R16G16B16A16_SINT,
522 VK_FORMAT_R8G8B8A8_SINT,
523 VK_FORMAT_R32_SINT,
524
525 VK_FORMAT_R8G8B8A8_UNORM,
526
527 VK_FORMAT_R8G8B8A8_SNORM,
528
529 #ifndef CTS_USES_VULKANSC
530 VK_FORMAT_A8_UNORM_KHR,
531 #endif // CTS_USES_VULKANSC
532 };
533
534 static const VkSampleCountFlagBits samples[] =
535 {
536 VK_SAMPLE_COUNT_2_BIT,
537 VK_SAMPLE_COUNT_4_BIT,
538 VK_SAMPLE_COUNT_8_BIT,
539 VK_SAMPLE_COUNT_16_BIT,
540 VK_SAMPLE_COUNT_32_BIT,
541 VK_SAMPLE_COUNT_64_BIT,
542 };
543
544 MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store_multisample"));
545
546 for (int baseTextureNdx = 0; baseTextureNdx < DE_LENGTH_OF_ARRAY(textures); ++baseTextureNdx)
547 {
548 const Texture& baseTexture = textures[baseTextureNdx];
549 MovePtr<tcu::TestCaseGroup> imageViewGroup (new tcu::TestCaseGroup(testCtx, getImageTypeName(baseTexture.type()).c_str()));
550 const int numLayerBindModes = (baseTexture.numLayers() == 1 ? 1 : 2);
551
552 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); ++formatNdx)
553 for (int layerBindMode = 0; layerBindMode < numLayerBindModes; ++layerBindMode)
554 {
555 const bool singleLayerBind = (layerBindMode != 0);
556 const std::string formatGroupName = getFormatShortString(formats[formatNdx]) + (singleLayerBind ? "_single_layer" : "");
557 MovePtr<tcu::TestCaseGroup> formatGroup (new tcu::TestCaseGroup(testCtx, formatGroupName.c_str()));
558
559 for (int samplesNdx = 0; samplesNdx < DE_LENGTH_OF_ARRAY(samples); ++samplesNdx)
560 {
561 const std::string samplesCaseName = "samples_" + de::toString(samples[samplesNdx]);
562
563 const CaseDef caseDef =
564 {
565 Texture(baseTexture, samples[samplesNdx]),
566 formats[formatNdx],
567 samples[samplesNdx],
568 singleLayerBind,
569 };
570
571 addFunctionCaseWithPrograms(formatGroup.get(), samplesCaseName, checkSupport, initPrograms, test, caseDef);
572 }
573 imageViewGroup->addChild(formatGroup.release());
574 }
575 testGroup->addChild(imageViewGroup.release());
576 }
577
578 return testGroup.release();
579 }
580
581 } // image
582 } // vkt
583