1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2024 The Khronos Group Inc.
6 * Copyright (c) 2024 Valve Corporation.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Tests for VK_EXT_legacy_vertex_attributes
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktPipelineLegacyAttrTests.hpp"
26 #include "vkBarrierUtil.hpp"
27 #include "vkBuilderUtil.hpp"
28 #include "vkCmdUtil.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkObjUtil.hpp"
31
32 #include "tcuImageCompare.hpp"
33 #include "tcuTextureUtil.hpp"
34
35 #include "deRandom.hpp"
36
37 #include <functional>
38 #include <vector>
39 #include <string>
40 #include <sstream>
41 #include <memory>
42 #include <algorithm>
43 #include <iterator>
44 #include <set>
45
46 namespace vkt
47 {
48 namespace pipeline
49 {
50
51 namespace
52 {
53
54 using namespace vk;
55
56 constexpr uint32_t k32BitsInBytes = 4u;
57
58 enum class ShaderFormat
59 {
60 FLOAT = 0,
61 SIGNED_INT = 1,
62 UNSIGNED_INT = 2,
63 INVALID = 3, // For assertions and default initializations.
64 };
65
66 struct BindingParams
67 {
68 const VkFormat format;
69 const ShaderFormat shaderFormat;
70 const uint32_t bindingStride;
71 const uint32_t attributeOffset;
72 const uint32_t memoryOffset;
73
BindingParamsvkt::pipeline::__anon434be5a80111::BindingParams74 BindingParams(VkFormat format_, ShaderFormat shaderFormat_, uint32_t bindingStride_, uint32_t attributeOffset_,
75 uint32_t memoryOffset_)
76 : format(format_)
77 , shaderFormat(shaderFormat_)
78 , bindingStride(bindingStride_)
79 , attributeOffset(attributeOffset_)
80 , memoryOffset(memoryOffset_)
81 {
82 }
83
getRandomSeedvkt::pipeline::__anon434be5a80111::BindingParams84 uint32_t getRandomSeed(void) const
85 {
86 // shaderFormat: 2 bits
87 // bindingStride: 5 bits
88 // attributeOffset: 5 bits
89 // memoryOffset: 5 bits
90 return ((format << 17) | (bindingStride << 10) | (attributeOffset << 5) | memoryOffset);
91 }
92
getShaderTypevkt::pipeline::__anon434be5a80111::BindingParams93 std::string getShaderType(void) const
94 {
95 const auto tcuFormat = mapVkFormat(format);
96 const auto channelCount = tcu::getNumUsedChannels(tcuFormat.order);
97
98 if (channelCount == 1)
99 {
100 std::string scalarType;
101
102 if (shaderFormat == ShaderFormat::SIGNED_INT)
103 scalarType = "int";
104 else if (shaderFormat == ShaderFormat::UNSIGNED_INT)
105 scalarType = "uint";
106 else if (shaderFormat == ShaderFormat::FLOAT)
107 scalarType = "float";
108 else
109 DE_ASSERT(false);
110
111 return scalarType;
112 }
113
114 std::string prefix;
115 if (shaderFormat == ShaderFormat::SIGNED_INT)
116 prefix = "i";
117 else if (shaderFormat == ShaderFormat::UNSIGNED_INT)
118 prefix = "u";
119
120 return prefix + "vec" + std::to_string(channelCount);
121 }
122
useScalarLayoutvkt::pipeline::__anon434be5a80111::BindingParams123 bool useScalarLayout(void) const
124 {
125 const auto tcuFormat = mapVkFormat(format);
126 const auto channelCount = tcu::getNumUsedChannels(tcuFormat.order);
127 const auto useScalarLayout = (channelCount == 3); // scalar allows us to avoid the padding bytes in vec3
128
129 return useScalarLayout;
130 }
131 };
132
133 using BindingParamsVec = std::vector<BindingParams>;
134
135 struct LegacyVertexAttributesParams
136 {
137 const PipelineConstructionType constructionType;
138 BindingParamsVec bindings;
139
LegacyVertexAttributesParamsvkt::pipeline::__anon434be5a80111::LegacyVertexAttributesParams140 LegacyVertexAttributesParams(PipelineConstructionType constructionType_, BindingParamsVec bindings_)
141 : constructionType(constructionType_)
142 , bindings()
143 {
144 bindings.swap(bindings_);
145 }
146
getRandomSeedvkt::pipeline::__anon434be5a80111::LegacyVertexAttributesParams147 uint32_t getRandomSeed(void) const
148 {
149 DE_ASSERT(!bindings.empty());
150
151 uint32_t seed = bindings.at(0u).getRandomSeed();
152 for (size_t i = 1; i < bindings.size(); ++i)
153 seed = (seed ^ bindings.at(i).getRandomSeed());
154
155 return (0x80000000u | seed);
156 }
157
useScalarLayoutvkt::pipeline::__anon434be5a80111::LegacyVertexAttributesParams158 bool useScalarLayout(void) const
159 {
160 return std::any_of(begin(bindings), end(bindings), [](const BindingParams &b) { return b.useScalarLayout(); });
161 }
162 };
163
164 using BytesVector = std::vector<uint8_t>;
165
166 // Reinterprets an input vector expanding the components to 32-bits as used in the shader, and returns the expected output data.
getOutputData(const BytesVector & inputData,const BindingParams & params,uint32_t numPoints)167 BytesVector getOutputData(const BytesVector &inputData, const BindingParams ¶ms, uint32_t numPoints)
168 {
169 const auto tcuFormat = mapVkFormat(params.format);
170 const auto channelClass = tcu::getTextureChannelClass(tcuFormat.type);
171 const auto channelCount = tcu::getNumUsedChannels(tcuFormat.order);
172 const tcu::IVec3 size(static_cast<int>(numPoints), 1, 1);
173 const tcu::IVec3 pitch(static_cast<int>(params.bindingStride), 1, 1);
174
175 // We use a ConstPixelBufferAccess to easily intepret the input data according to the right format and extracting values from
176 // there as we would do from an image. We also take advantage of the pitch parameter, which is seldomly used, to take the
177 // binding stride into account. The pitch is used by the ConstPixelBufferAccess to calculate the memory address of the pixel to
178 // read. Note the attribute offset is also used to calculate the start of each pixel.
179 tcu::ConstPixelBufferAccess memoryAccess(tcuFormat, size, pitch,
180 de::dataOrNull(inputData) + params.attributeOffset);
181
182 tcu::Vec4 floatPixel(0.0f);
183 tcu::IVec4 intPixel(0);
184 tcu::UVec4 uintPixel(0u);
185 uint8_t *pixelData = nullptr;
186
187 // We will read pixels using 3 different methods of memoryAccess, storing the result in any of these 3 variables.
188 switch (channelClass)
189 {
190 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
191 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
192 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
193 pixelData = reinterpret_cast<uint8_t *>(&floatPixel);
194 break;
195 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
196 pixelData = reinterpret_cast<uint8_t *>(&intPixel);
197 break;
198 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
199 pixelData = reinterpret_cast<uint8_t *>(&uintPixel);
200 break;
201 default:
202 DE_ASSERT(false);
203 break;
204 }
205
206 // Read pixels and store the component bytes (for the used components) in the output data vector.
207 // Note pixel component values in the output data vector are always stored as 32-bit values (float, int or uint).
208 // See the shader for more details.
209 BytesVector outputData;
210 outputData.reserve(numPoints * channelCount * k32BitsInBytes);
211
212 for (uint32_t i = 0u; i < numPoints; ++i)
213 {
214 switch (channelClass)
215 {
216 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
217 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
218 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
219 floatPixel = memoryAccess.getPixel(static_cast<int>(i), 0);
220 break;
221 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
222 intPixel = memoryAccess.getPixelInt(static_cast<int>(i), 0);
223 break;
224 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
225 uintPixel = memoryAccess.getPixelUint(static_cast<int>(i), 0);
226 break;
227 default:
228 DE_ASSERT(false);
229 break;
230 }
231
232 for (int j = 0; j < channelCount; ++j)
233 {
234 for (uint32_t k = 0; k < k32BitsInBytes; ++k)
235 {
236 const uint8_t *bytePtr = pixelData + j * k32BitsInBytes + k;
237 outputData.push_back(*bytePtr);
238 }
239 }
240 }
241
242 return outputData;
243 }
244
genInputData(const BindingParams & params,uint32_t numPoints,de::Random & rnd)245 BytesVector genInputData(const BindingParams ¶ms, uint32_t numPoints, de::Random &rnd)
246 {
247 DE_ASSERT(numPoints > 0u);
248
249 const auto tcuFormat = mapVkFormat(params.format);
250 const auto channelClass = tcu::getTextureChannelClass(tcuFormat.type);
251 const bool floatsUsed =
252 (channelClass == tcu::TEXTURECHANNELCLASS_FLOATING_POINT || params.shaderFormat == ShaderFormat::FLOAT);
253 const int pixelSizeBytes = tcu::getPixelSize(tcuFormat);
254 const auto totalBytes = params.bindingStride * (numPoints - 1u) + params.attributeOffset + pixelSizeBytes;
255
256 BytesVector inputData;
257 inputData.reserve(totalBytes);
258
259 for (;;)
260 {
261 // Should we regenerate the pseudorandom input data vector?
262 bool badInputData = false;
263
264 inputData.clear();
265 for (uint32_t i = 0; i < totalBytes; ++i)
266 inputData.push_back(rnd.getUint8());
267
268 // Floats: we'd like to avoid infs, zeros, nans and denorms to make sure we get identical values back.
269 if (floatsUsed)
270 {
271 // Iterate over the output raw vector as if it was a float vector.
272 const auto outputData = getOutputData(inputData, params, numPoints);
273 for (size_t i = 0u; i < outputData.size(); i += k32BitsInBytes)
274 {
275 const auto floatPtr = reinterpret_cast<const float *>(&outputData.at(i));
276 tcu::Float32 value(*floatPtr);
277 if (value.isNaN() || value.isInf() || value.isDenorm() || value.isZero())
278 {
279 badInputData = true;
280 break;
281 }
282 }
283 }
284
285 if (badInputData)
286 continue;
287
288 break;
289 }
290
291 return inputData;
292 }
293
294 class LegacyVertexAttributesInstance : public vkt::TestInstance
295 {
296 public:
LegacyVertexAttributesInstance(Context & context,const LegacyVertexAttributesParams & params)297 LegacyVertexAttributesInstance(Context &context, const LegacyVertexAttributesParams ¶ms)
298 : vkt::TestInstance(context)
299 , m_params(params)
300 {
301 }
~LegacyVertexAttributesInstance(void)302 virtual ~LegacyVertexAttributesInstance(void)
303 {
304 }
305
306 tcu::TestStatus iterate(void) override;
307
308 protected:
309 const LegacyVertexAttributesParams m_params;
310 };
311
312 class LegacyVertexAttributesCase : public vkt::TestCase
313 {
314 public:
LegacyVertexAttributesCase(tcu::TestContext & testCtx,const std::string & name,const LegacyVertexAttributesParams & params)315 LegacyVertexAttributesCase(tcu::TestContext &testCtx, const std::string &name,
316 const LegacyVertexAttributesParams ¶ms)
317 : vkt::TestCase(testCtx, name)
318 , m_params(params)
319 {
320 }
~LegacyVertexAttributesCase(void)321 ~LegacyVertexAttributesCase(void)
322 {
323 }
324
325 void checkSupport(Context &context) const override;
326 void initPrograms(vk::SourceCollections &programCollection) const override;
createInstance(Context & context) const327 TestInstance *createInstance(Context &context) const override
328 {
329 return new LegacyVertexAttributesInstance(context, m_params);
330 }
331
332 protected:
333 const LegacyVertexAttributesParams m_params;
334 };
335
checkSupport(Context & context) const336 void LegacyVertexAttributesCase::checkSupport(Context &context) const
337 {
338 const auto ctx = context.getContextCommonData();
339
340 checkPipelineConstructionRequirements(ctx.vki, ctx.physicalDevice, m_params.constructionType);
341 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_FRAGMENT_STORES_AND_ATOMICS);
342 context.requireDeviceFunctionality("VK_EXT_vertex_input_dynamic_state");
343 context.requireDeviceFunctionality("VK_EXT_legacy_vertex_attributes");
344
345 // We want to use the scalar layout for *vec3 because that way we avoid the 4 bytes of padding introduced in the output with the
346 // std430 layout. The reasons to avoid the padding are varied:
347 // 1) Taking the padding into account when generating the expected output data means a bit more code in there, potentially
348 // confusing.
349 // 2) The typical padding bytes used are zeros, but we're making sure zeros are not involved (due to sign preservation concerns)
350 // when generating input data (see the checks in genInputData). We'd need to make that check a more complicated and
351 // confusing.
352 // 3) Scalar is widely supported anyway, so the number of unsupported tests would still be low and they wouldn't be critical.
353 if (m_params.useScalarLayout())
354 context.requireDeviceFunctionality("VK_EXT_scalar_block_layout");
355
356 // Format feature support.
357 for (const auto &binding : m_params.bindings)
358 {
359 const auto formatProperties = getPhysicalDeviceFormatProperties(ctx.vki, ctx.physicalDevice, binding.format);
360 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0u)
361 TCU_THROW(NotSupportedError, "Format does not support VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT");
362 }
363 }
364
initPrograms(SourceCollections & dst) const365 void LegacyVertexAttributesCase::initPrograms(SourceCollections &dst) const
366 {
367 const auto useScalarLayout = m_params.useScalarLayout();
368 const auto bufferLayout = (useScalarLayout ? "scalar" : "std430");
369
370 std::ostringstream inOutVert;
371 std::ostringstream copyVert;
372 std::ostringstream inSetFrag;
373 std::ostringstream copyFrag;
374
375 for (size_t i = 0; i < m_params.bindings.size(); ++i)
376 {
377 const auto &binding = m_params.bindings.at(i);
378 const auto shaderType = binding.getShaderType();
379
380 inOutVert << "layout (location=" << (i + 1) << ") in " << shaderType << " inData" << i << ";\n"
381 << "layout (location=" << i << ") out flat " << shaderType << " outData" << i << ";\n";
382 copyVert << " outData" << i << " = inData" << i << ";\n";
383 inSetFrag << "layout (location=" << i << ") in flat " << shaderType << " inData" << i << ";\n"
384 << "layout (set=0, binding=" << i << ", " << bufferLayout << ") buffer VerificationBlock" << i
385 << " {\n"
386 << " " << shaderType << " value[];\n"
387 << "} verificationBuffer" << i << ";\n";
388 copyFrag << " verificationBuffer" << i << ".value[index] = inData" << i << ";\n";
389 }
390
391 std::ostringstream vert;
392 vert << "#version 460\n"
393 << "layout (location=0) in vec4 inPos;\n"
394 << inOutVert.str() << "void main (void) {\n"
395 << " gl_Position = inPos;\n"
396 << copyVert.str() << "}\n";
397 dst.glslSources.add("vert") << glu::VertexSource(vert.str());
398
399 std::ostringstream frag;
400 frag << "#version 460\n"
401 << (useScalarLayout ? "#extension GL_EXT_scalar_block_layout : require\n" : "")
402 << "layout (location=0) out vec4 outColor;\n"
403 << inSetFrag.str() << "void main (void) {\n"
404 << " outColor = vec4(0.0, 0.0, 1.0, 1.0);\n"
405 << " const int index = int(gl_FragCoord.x);\n"
406 << copyFrag.str() << "}\n";
407
408 const auto allowScalars = static_cast<uint32_t>(ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
409 const auto buildOptionFlags = (useScalarLayout ? allowScalars : 0u);
410 const ShaderBuildOptions buildOptions(dst.usedVulkanVersion, SPIRV_VERSION_1_0, buildOptionFlags);
411
412 dst.glslSources.add("frag") << glu::FragmentSource(frag.str()) << buildOptions;
413 }
414
iterate(void)415 tcu::TestStatus LegacyVertexAttributesInstance::iterate(void)
416 {
417 const auto &ctx = m_context.getContextCommonData();
418 const int pixelCount = 16;
419 const auto pixelCountU = static_cast<uint32_t>(pixelCount);
420 const tcu::IVec3 fbExtent(pixelCount, 1, 1);
421 const auto vkExtent = makeExtent3D(fbExtent);
422 const auto fbFormat = VK_FORMAT_R8G8B8A8_UNORM;
423 const auto fbTcuFormat = mapVkFormat(fbFormat);
424 const auto fbUsage = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
425 const tcu::Vec4 clearColor(0.0f, 0.0f, 0.0f, 1.0f);
426 const tcu::Vec4 geomColor(0.0f, 0.0f, 1.0f, 1.0f); // Must match fragment shader.
427 const tcu::Vec4 colorThres(0.0f, 0.0f, 0.0f, 0.0f); // When using 0 and 1 only, we expect exact results.
428 const auto descType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
429 const auto bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
430 const auto dataStages = VK_SHADER_STAGE_FRAGMENT_BIT;
431
432 // Color buffer with verification buffer.
433 ImageWithBuffer colorBuffer(ctx.vkd, ctx.device, ctx.allocator, vkExtent, fbFormat, fbUsage, VK_IMAGE_TYPE_2D);
434
435 // Vertices.
436 std::vector<tcu::Vec4> vertices;
437
438 for (int i = 0; i < pixelCount; ++i)
439 {
440 const float xCoord = ((static_cast<float>(i) + 0.5f) / static_cast<float>(pixelCount) * 2.0f) - 1.0f;
441 const tcu::Vec4 position(xCoord, 0.0f, 0.0f, 1.0f);
442
443 vertices.push_back(position);
444 }
445
446 const auto seed = m_params.getRandomSeed();
447 de::Random rnd(seed);
448
449 std::vector<BytesVector> byteInputs;
450 byteInputs.reserve(m_params.bindings.size());
451 for (const auto &binding : m_params.bindings)
452 byteInputs.emplace_back(genInputData(binding, pixelCountU, rnd));
453
454 // Vertex buffers
455 using BufferWithMemoryPtr = std::unique_ptr<BufferWithMemory>;
456 using BufferWithMemoryVec = std::vector<BufferWithMemoryPtr>;
457
458 BufferWithMemoryVec vertexBuffers;
459 vertexBuffers.reserve(m_params.bindings.size() + 1); // Extra buffer for the positions.
460
461 // Positions.
462 {
463 const auto vbSize = static_cast<VkDeviceSize>(de::dataSize(vertices));
464 const auto vbInfo = makeBufferCreateInfo(vbSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
465
466 vertexBuffers.emplace_back(
467 new BufferWithMemory(ctx.vkd, ctx.device, ctx.allocator, vbInfo, MemoryRequirement::HostVisible));
468
469 const auto vbAlloc = vertexBuffers.back()->getAllocation();
470 void *vbData = vbAlloc.getHostPtr();
471
472 deMemcpy(vbData, de::dataOrNull(vertices), de::dataSize(vertices));
473 flushAlloc(ctx.vkd, ctx.device, vbAlloc);
474 }
475
476 // Extra data. We use a dedicated allocator for these buffers in order to apply the memory offset. Note we lie about the
477 // noncoherent atom size since we want to apply the offset exactly and the non-coheret atom size is irrelevant in this case:
478 // we'll flush the whole allocation.
479 for (size_t i = 0; i < m_params.bindings.size(); ++i)
480 {
481 const auto &binding = m_params.bindings.at(i);
482 const auto &inputData = byteInputs.at(i);
483
484 SimpleAllocator offsetAllocator(
485 ctx.vkd, ctx.device, getPhysicalDeviceMemoryProperties(ctx.vki, ctx.physicalDevice),
486 tcu::just(SimpleAllocator::OffsetParams{VkDeviceSize{1u}, VkDeviceSize{binding.memoryOffset}}));
487
488 const auto vbSize = static_cast<VkDeviceSize>(de::dataSize(inputData));
489 const auto vbInfo = makeBufferCreateInfo(vbSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
490
491 vertexBuffers.emplace_back(
492 new BufferWithMemory(ctx.vkd, ctx.device, offsetAllocator, vbInfo, MemoryRequirement::HostVisible));
493
494 const auto vbAlloc = vertexBuffers.back()->getAllocation();
495 void *vbData = vbAlloc.getHostPtr();
496
497 deMemcpy(vbData, de::dataOrNull(inputData), de::dataSize(inputData));
498 // Note we're not flushing data here because it's not needed and flushing with the binding memory offset would mean hitting
499 // VUID-VkMappedMemoryRange-offset-00687.
500 //flushMappedMemoryRange(ctx.vkd, ctx.device, vbAlloc.getMemory(), binding.memoryOffset, VK_WHOLE_SIZE);
501 }
502
503 // Data buffer for verification.
504 const auto verifBufferOffset = static_cast<VkDeviceSize>(0);
505 BufferWithMemoryVec verifBuffers;
506 std::vector<BytesVector> referenceVecs;
507
508 verifBuffers.reserve(byteInputs.size());
509 referenceVecs.reserve(byteInputs.size());
510
511 for (size_t i = 0; i < byteInputs.size(); ++i)
512 {
513 const auto &binding = m_params.bindings.at(i);
514 const auto &inputData = byteInputs.at(i);
515
516 referenceVecs.emplace_back(getOutputData(inputData, binding, pixelCountU));
517 const auto &refData = referenceVecs.back();
518
519 const auto bufferSize = static_cast<VkDeviceSize>(de::dataSize(refData));
520 const auto createInfo = makeBufferCreateInfo(bufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
521
522 verifBuffers.emplace_back(
523 new BufferWithMemory(ctx.vkd, ctx.device, ctx.allocator, createInfo, MemoryRequirement::HostVisible));
524
525 const auto allocation = verifBuffers.back()->getAllocation();
526 void *bufferData = allocation.getHostPtr();
527
528 deMemset(bufferData, 0, de::dataSize(refData));
529 flushAlloc(ctx.vkd, ctx.device, allocation);
530 }
531
532 // Descriptor pool, set, layout, etc.
533 DescriptorPoolBuilder poolBuilder;
534 poolBuilder.addType(descType, de::sizeU32(verifBuffers));
535 const auto descriptorPool =
536 poolBuilder.build(ctx.vkd, ctx.device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
537
538 DescriptorSetLayoutBuilder layoutBuilder;
539 for (size_t i = 0; i < verifBuffers.size(); ++i)
540 layoutBuilder.addSingleBinding(descType, dataStages);
541 const auto setLayout = layoutBuilder.build(ctx.vkd, ctx.device);
542 const auto descriptorSet = makeDescriptorSet(ctx.vkd, ctx.device, *descriptorPool, *setLayout);
543
544 DescriptorSetUpdateBuilder updateBuilder;
545 for (size_t i = 0; i < verifBuffers.size(); ++i)
546 {
547 const auto &buffer = *verifBuffers.at(i);
548 const auto dbDescInfo = makeDescriptorBufferInfo(buffer.get(), verifBufferOffset, buffer.getBufferSize());
549 updateBuilder.writeSingle(*descriptorSet,
550 DescriptorSetUpdateBuilder::Location::binding(static_cast<uint32_t>(i)), descType,
551 &dbDescInfo);
552 }
553 updateBuilder.update(ctx.vkd, ctx.device);
554
555 const auto pipelineLayout = PipelineLayoutWrapper(m_params.constructionType, ctx.vkd, ctx.device, *setLayout);
556 auto renderPass = RenderPassWrapper(m_params.constructionType, ctx.vkd, ctx.device, fbFormat);
557 renderPass.createFramebuffer(ctx.vkd, ctx.device, colorBuffer.getImage(), colorBuffer.getImageView(),
558 vkExtent.width, vkExtent.height);
559
560 // Modules.
561 const auto &binaries = m_context.getBinaryCollection();
562 const auto vertModule = ShaderWrapper(ctx.vkd, ctx.device, binaries.get("vert"));
563 const auto fragModule = ShaderWrapper(ctx.vkd, ctx.device, binaries.get("frag"));
564
565 const std::vector<VkViewport> viewports(1u, makeViewport(vkExtent));
566 const std::vector<VkRect2D> scissors(1u, makeRect2D(vkExtent));
567
568 const std::vector<VkDynamicState> dynamicStates{VK_DYNAMIC_STATE_VERTEX_INPUT_EXT};
569 const VkPipelineDynamicStateCreateInfo dynamicStateCreateInfo = {
570 VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, // VkStructureType sType;
571 nullptr, // const void* pNext;
572 0u, // VkPipelineDynamicStateCreateFlags flags;
573 de::sizeU32(dynamicStates), // uint32_t dynamicStateCount;
574 de::dataOrNull(dynamicStates), // const VkDynamicState* pDynamicStates;
575 };
576
577 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = initVulkanStructure();
578
579 GraphicsPipelineWrapper pipeline(ctx.vki, ctx.vkd, ctx.physicalDevice, ctx.device, m_context.getDeviceExtensions(),
580 m_params.constructionType);
581 pipeline.setMonolithicPipelineLayout(pipelineLayout);
582 pipeline.setDynamicState(&dynamicStateCreateInfo);
583 pipeline.setDefaultTopology(VK_PRIMITIVE_TOPOLOGY_POINT_LIST);
584 pipeline.setDefaultRasterizationState();
585 pipeline.setDefaultColorBlendState();
586 pipeline.setDefaultDepthStencilState();
587 pipeline.setDefaultMultisampleState();
588 pipeline.setDefaultPatchControlPoints(0u);
589 pipeline.setupVertexInputState(&vertexInputStateCreateInfo);
590 pipeline.setupPreRasterizationShaderState(viewports, scissors, pipelineLayout, *renderPass, 0u, vertModule);
591 pipeline.setupFragmentShaderState(pipelineLayout, *renderPass, 0u, fragModule);
592 pipeline.setupFragmentOutputState(*renderPass, 0u);
593 pipeline.buildPipeline();
594
595 CommandPoolWithBuffer cmd(ctx.vkd, ctx.device, ctx.qfIndex);
596 const auto cmdBuffer = *cmd.cmdBuffer;
597
598 std::vector<VkVertexInputBindingDescription2EXT> bindingDescriptions;
599 bindingDescriptions.reserve(vertexBuffers.size());
600
601 {
602 // Positions binding.
603 bindingDescriptions.emplace_back(VkVertexInputBindingDescription2EXT{
604 VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT, // VkStructureType sType;
605 nullptr, // void* pNext;
606 0u, // uint32_t binding;
607 static_cast<uint32_t>(sizeof(tcu::Vec4)), // uint32_t stride;
608 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
609 0u, // uint32_t divisor;
610 });
611 }
612
613 for (size_t i = 0; i < m_params.bindings.size(); ++i)
614 {
615 const auto &binding = m_params.bindings.at(i);
616
617 // Extra data bindings.
618 bindingDescriptions.emplace_back(VkVertexInputBindingDescription2EXT{
619 VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT, // VkStructureType sType;
620 nullptr, // void* pNext;
621 static_cast<uint32_t>(i + 1), // uint32_t binding;
622 binding.bindingStride, // uint32_t stride;
623 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
624 0u, // uint32_t divisor;
625 });
626 };
627
628 std::vector<VkVertexInputAttributeDescription2EXT> attributeDescriptions;
629 {
630 // Position.
631 attributeDescriptions.emplace_back(VkVertexInputAttributeDescription2EXT{
632 VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT, // VkStructureType sType;
633 nullptr, // void* pNext;
634 0u, // uint32_t location;
635 0u, // uint32_t binding;
636 vk::VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
637 0u, // uint32_t offset;
638 });
639 }
640
641 for (size_t i = 0; i < m_params.bindings.size(); ++i)
642 {
643 const auto &binding = m_params.bindings.at(i);
644 const auto idx = static_cast<uint32_t>(i + 1);
645
646 // Extra data attributes.
647 attributeDescriptions.emplace_back(VkVertexInputAttributeDescription2EXT{
648 VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT, // VkStructureType sType;
649 nullptr, // void* pNext;
650 idx, // uint32_t location;
651 idx, // uint32_t binding;
652 binding.format, // VkFormat format;
653 binding.attributeOffset, // uint32_t offset;
654 });
655 };
656
657 std::vector<VkBuffer> rawVertexBuffers;
658 rawVertexBuffers.reserve(vertexBuffers.size());
659 std::transform(begin(vertexBuffers), end(vertexBuffers), std::back_inserter(rawVertexBuffers),
660 [](const BufferWithMemoryPtr &buffer) { return buffer->get(); });
661
662 std::vector<VkDeviceSize> rawVertexBufferOffsets(rawVertexBuffers.size(), static_cast<VkDeviceSize>(0));
663
664 beginCommandBuffer(ctx.vkd, cmdBuffer);
665 renderPass.begin(ctx.vkd, cmdBuffer, scissors.at(0u), clearColor);
666 DE_ASSERT(rawVertexBuffers.size() == rawVertexBufferOffsets.size());
667 ctx.vkd.cmdBindVertexBuffers(cmdBuffer, 0u, de::sizeU32(rawVertexBuffers), de::dataOrNull(rawVertexBuffers),
668 de::dataOrNull(rawVertexBufferOffsets));
669 ctx.vkd.cmdBindDescriptorSets(cmdBuffer, bindPoint, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, nullptr);
670 ctx.vkd.cmdSetVertexInputEXT(cmdBuffer, de::sizeU32(bindingDescriptions), de::dataOrNull(bindingDescriptions),
671 de::sizeU32(attributeDescriptions), de::dataOrNull(attributeDescriptions));
672 pipeline.bind(cmdBuffer);
673 ctx.vkd.cmdDraw(cmdBuffer, de::sizeU32(vertices), 1u, 0u, 0u);
674 renderPass.end(ctx.vkd, cmdBuffer);
675 copyImageToBuffer(ctx.vkd, cmdBuffer, colorBuffer.getImage(), colorBuffer.getBuffer(), fbExtent.swizzle(0, 1),
676 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1u,
677 VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_ASPECT_COLOR_BIT,
678 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
679 {
680 const auto barrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
681 cmdPipelineMemoryBarrier(ctx.vkd, cmdBuffer, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
682 &barrier);
683 }
684 endCommandBuffer(ctx.vkd, cmdBuffer);
685 submitCommandsAndWait(ctx.vkd, ctx.device, ctx.queue, cmdBuffer);
686
687 // Verify color output.
688 invalidateAlloc(ctx.vkd, ctx.device, colorBuffer.getBufferAllocation());
689 tcu::PixelBufferAccess resultAccess(fbTcuFormat, fbExtent, colorBuffer.getBufferAllocation().getHostPtr());
690
691 tcu::TextureLevel referenceLevel(fbTcuFormat, fbExtent.x(), fbExtent.y());
692 auto referenceAccess = referenceLevel.getAccess();
693 tcu::clear(referenceAccess, geomColor);
694
695 auto &log = m_context.getTestContext().getLog();
696 if (!tcu::floatThresholdCompare(log, "Result", "", referenceAccess, resultAccess, colorThres,
697 tcu::COMPARE_LOG_ON_ERROR))
698 return tcu::TestStatus::fail("Unexpected color in result buffer; check log for details");
699
700 // Check storage buffers.
701 for (size_t idx = 0; idx < m_params.bindings.size(); ++idx)
702 {
703 const auto &binding = m_params.bindings.at(idx);
704
705 // Related to the vertex format.
706 const auto tcuVertexFormat = mapVkFormat(binding.format);
707 const auto vertexBitWidth = tcu::getTextureFormatBitDepth(tcuVertexFormat);
708 const auto channelClass = tcu::getTextureChannelClass(tcuVertexFormat.type);
709 const auto channelCount = tcu::getNumUsedChannels(tcuVertexFormat.order);
710
711 const auto &buffer = *verifBuffers.at(idx);
712 invalidateAlloc(ctx.vkd, ctx.device, buffer.getAllocation());
713
714 const auto &refData = referenceVecs.at(idx);
715 const void *bufferData = buffer.getAllocation().getHostPtr();
716
717 BytesVector resultData(refData.size());
718 deMemcpy(de::dataOrNull(resultData), bufferData, de::dataSize(resultData));
719
720 DE_ASSERT(resultData.size() == refData.size());
721 bool dataOK = true;
722
723 // Used for floating point conversion checks.
724 tcu::Vec4 vertexThres(0.0f);
725 {
726 // Note these thresholds are much larger than the precision requested in section "Floating-Point Format Conversions", which
727 // requires that finite values falling between two representable finite values use either of them as the conversion result.
728 if (channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT)
729 {
730 for (int i = 0; i < channelCount; ++i)
731 vertexThres[i] = 1.0f / static_cast<float>((1 << (vertexBitWidth[i] + 1)) - 1);
732 }
733 else if (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT)
734 {
735 for (int i = 0; i < channelCount; ++i)
736 vertexThres[i] = 1.0f / static_cast<float>((1 << vertexBitWidth[i]) - 1);
737 }
738 }
739
740 const auto channelCountU32 = static_cast<uint32_t>(channelCount);
741 DE_ASSERT(resultData.size() > 0);
742 DE_ASSERT(resultData.size() % (k32BitsInBytes * channelCountU32) == 0u);
743
744 // We'll reinterpret output data in differnt formats.
745 const uint32_t *resU32Ptr = reinterpret_cast<uint32_t *>(resultData.data());
746 const uint32_t *refU32Ptr = reinterpret_cast<const uint32_t *>(refData.data());
747
748 const int32_t *resI32Ptr = reinterpret_cast<int32_t *>(resultData.data());
749 const int32_t *refI32Ptr = reinterpret_cast<const int32_t *>(refData.data());
750
751 const float *resF32Ptr = reinterpret_cast<float *>(resultData.data());
752 const float *refF32Ptr = reinterpret_cast<const float *>(refData.data());
753
754 for (uint32_t pointIdx = 0u; pointIdx < pixelCount; ++pointIdx)
755 {
756 for (uint32_t chIdx = 0u; chIdx < channelCountU32; ++chIdx)
757 {
758 const auto scalarIdx = pointIdx * channelCountU32 + chIdx;
759
760 if (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT ||
761 channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT ||
762 channelClass == tcu::TEXTURECHANNELCLASS_FLOATING_POINT)
763 {
764 const auto res = resF32Ptr[scalarIdx];
765 const auto ref = refF32Ptr[scalarIdx];
766 const auto thr = vertexThres[chIdx];
767
768 if (de::abs(res - ref) > thr)
769 {
770 dataOK = false;
771 log << tcu::TestLog::Message << "Unexpected result in point " << pointIdx << " channel "
772 << chIdx << ": found " << res << " but expected " << ref << " (threshold " << thr << ")"
773 << tcu::TestLog::EndMessage;
774 }
775 }
776 else if (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER)
777 {
778 const auto res = resI32Ptr[scalarIdx];
779 const auto ref = refI32Ptr[scalarIdx];
780
781 if (res != ref)
782 {
783 dataOK = false;
784 log << tcu::TestLog::Message << "Unexpected result in point " << pointIdx << " channel "
785 << chIdx << ": found " << res << " but expected " << ref << tcu::TestLog::EndMessage;
786 }
787 }
788 else if (channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER)
789 {
790 const auto res = resU32Ptr[scalarIdx];
791 const auto ref = refU32Ptr[scalarIdx];
792
793 if (res != ref)
794 {
795 dataOK = false;
796 log << tcu::TestLog::Message << "Unexpected result in point " << pointIdx << " channel "
797 << chIdx << ": found " << res << " but expected " << ref << tcu::TestLog::EndMessage;
798 }
799 }
800 else
801 DE_ASSERT(false);
802 }
803 }
804
805 if (!dataOK)
806 return tcu::TestStatus::fail("Unexpected result in output buffer; check log for details");
807 }
808
809 return tcu::TestStatus::pass("Pass");
810 }
811
getFormatShortName(VkFormat format)812 std::string getFormatShortName(VkFormat format)
813 {
814 const std::string longName = getFormatName(format);
815 DE_ASSERT(de::beginsWith(longName, "VK_FORMAT_"));
816 const std::string shortName = de::toLower(longName.substr(10 /*strlen("VK_FORMAT_")*/));
817 return shortName;
818 }
819
820 using FormatVec = std::vector<VkFormat>;
getFormatShortName(const FormatVec & formats)821 std::string getFormatShortName(const FormatVec &formats)
822 {
823 std::string concat;
824 for (const auto format : formats)
825 concat += (concat.empty() ? "" : "_") + getFormatShortName(format);
826 return concat;
827 }
828
829 // Auxiliar, used to check channel bit widths below.
checkAny(const tcu::IVec4 values,int channelCount,const std::function<bool (int)> & condition)830 bool checkAny(const tcu::IVec4 values, int channelCount, const std::function<bool(int)> &condition)
831 {
832 const auto count = std::min(channelCount, static_cast<int>(tcu::IVec4::SIZE));
833 for (int i = 0; i < count; ++i)
834 if (condition(values[i]))
835 return true;
836 return false;
837 }
838
839 } // namespace
840
createLegacyVertexAttributesTests(tcu::TestCaseGroup * group,PipelineConstructionType constructionType)841 void createLegacyVertexAttributesTests(tcu::TestCaseGroup *group, PipelineConstructionType constructionType)
842 {
843 auto &testContext = group->getTestContext();
844
845 using GroupPtr = de::MovePtr<tcu::TestCaseGroup>;
846 GroupPtr singleGroup(new tcu::TestCaseGroup(testContext, "single_binding"));
847 GroupPtr multiGroup(new tcu::TestCaseGroup(testContext, "multi_binding"));
848
849 const VkFormat formatsToTest[] = {
850 // Formats with mandatory vertex input support.
851 VK_FORMAT_R8_UNORM,
852 VK_FORMAT_R8_SNORM,
853 VK_FORMAT_R8_UINT,
854 VK_FORMAT_R8_SINT,
855 VK_FORMAT_R8G8_UNORM,
856 VK_FORMAT_R8G8_SNORM,
857 VK_FORMAT_R8G8_UINT,
858 VK_FORMAT_R8G8_SINT,
859 VK_FORMAT_R8G8B8A8_UNORM,
860 VK_FORMAT_R8G8B8A8_SNORM,
861 VK_FORMAT_R8G8B8A8_UINT,
862 VK_FORMAT_R8G8B8A8_SINT,
863 VK_FORMAT_B8G8R8A8_UNORM, // weird
864 VK_FORMAT_A8B8G8R8_UNORM_PACK32, // pack?
865 VK_FORMAT_A8B8G8R8_SNORM_PACK32,
866 VK_FORMAT_A8B8G8R8_UINT_PACK32,
867 VK_FORMAT_A8B8G8R8_SINT_PACK32,
868 VK_FORMAT_A2B10G10R10_UNORM_PACK32, // interesting, pack
869 VK_FORMAT_R16_UNORM,
870 VK_FORMAT_R16_SNORM,
871 VK_FORMAT_R16_UINT,
872 VK_FORMAT_R16_SINT,
873 VK_FORMAT_R16_SFLOAT,
874 VK_FORMAT_R16G16_UNORM,
875 VK_FORMAT_R16G16_SNORM,
876 VK_FORMAT_R16G16_UINT,
877 VK_FORMAT_R16G16_SINT,
878 VK_FORMAT_R16G16_SFLOAT,
879 VK_FORMAT_R16G16B16A16_UNORM,
880 VK_FORMAT_R16G16B16A16_SNORM,
881 VK_FORMAT_R16G16B16A16_UINT,
882 VK_FORMAT_R16G16B16A16_SINT,
883 VK_FORMAT_R16G16B16A16_SFLOAT,
884 VK_FORMAT_R32_UINT,
885 VK_FORMAT_R32_SINT,
886 VK_FORMAT_R32_SFLOAT,
887 VK_FORMAT_R32G32_UINT,
888 VK_FORMAT_R32G32_SINT,
889 VK_FORMAT_R32G32_SFLOAT,
890 VK_FORMAT_R32G32B32_UINT,
891 VK_FORMAT_R32G32B32_SINT,
892 VK_FORMAT_R32G32B32_SFLOAT,
893 VK_FORMAT_R32G32B32A32_UINT,
894 VK_FORMAT_R32G32B32A32_SINT,
895 VK_FORMAT_R32G32B32A32_SFLOAT,
896
897 // 3-component formats do not have that feature as mandatory, but we should still try.
898 VK_FORMAT_R8G8B8_UNORM,
899 VK_FORMAT_R8G8B8_SNORM,
900 VK_FORMAT_R8G8B8_UINT,
901 VK_FORMAT_R8G8B8_SINT,
902 VK_FORMAT_R16G16B16_UNORM,
903 VK_FORMAT_R16G16B16_SNORM,
904 VK_FORMAT_R16G16B16_UINT,
905 VK_FORMAT_R16G16B16_SINT,
906 VK_FORMAT_R16G16B16_SFLOAT,
907 };
908
909 const struct
910 {
911 ShaderFormat shaderFormat;
912 const char *desc;
913 } shaderFormats[] = {
914 {ShaderFormat::SIGNED_INT, "shader_int"},
915 {ShaderFormat::UNSIGNED_INT, "shader_uint"},
916 {ShaderFormat::FLOAT, "shader_float"},
917 };
918
919 const auto lessThan32Bits = [](int width) { return width < 32 /*bits*/; };
920
921 // Single binding tests.
922 for (const auto &format : formatsToTest)
923 {
924 const auto tcuFormat = mapVkFormat(format);
925 const int formatSize = tcu::getPixelSize(tcuFormat);
926 const auto fmtClass = tcu::getTextureChannelClass(tcuFormat.type);
927 const auto vertexBitWidth = tcu::getTextureFormatBitDepth(tcuFormat);
928 const auto channelCount = tcu::getNumUsedChannels(tcuFormat.order);
929
930 const std::set<uint32_t> strides{
931 0u,
932 1u,
933 static_cast<uint32_t>(formatSize),
934 static_cast<uint32_t>(formatSize + formatSize - 1),
935 };
936
937 for (const uint32_t stride : strides)
938 for (const auto shaderFormat : shaderFormats)
939 {
940 const bool isFloatFormat = (fmtClass == tcu::TEXTURECHANNELCLASS_FLOATING_POINT ||
941 fmtClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT ||
942 fmtClass == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT);
943 const bool isIntegerFormat = !isFloatFormat;
944
945 // Float-like formats do not need to be reinterpreted as both signed and unsigned integers in the shader, one of
946 // them is enough.
947 if (isFloatFormat)
948 {
949 const auto fmtId = static_cast<int>(format);
950 const auto fmtMod = fmtId % 2;
951
952 if (fmtMod == 0 && shaderFormat.shaderFormat == ShaderFormat::SIGNED_INT)
953 continue;
954
955 if (fmtMod == 1 && shaderFormat.shaderFormat == ShaderFormat::UNSIGNED_INT)
956 continue;
957 }
958
959 if (isIntegerFormat && shaderFormat.shaderFormat == ShaderFormat::FLOAT)
960 {
961 // Integer formats with less than 4 bytes in any channel should not go through the shader as floats because,
962 // when the values are expanded to 32-bits, the upper byte(s) will be zeros and, if they're to be interpreted as
963 // floats, it's likely the mantissa is nonzero and the exponent zero, so it doesn't pass the denorm check we
964 // run in genInputData. Note for 24-bit channels this wouldn't always be true but it's true for half the values,
965 // which would make it unlikely that we could generate 16 inputs without wasting a lot of time.
966 bool skip = checkAny(vertexBitWidth, channelCount, lessThan32Bits);
967 if (skip)
968 continue;
969 }
970
971 for (const auto attributeOffset : {0u, 1u})
972 for (const auto memoryOffset : {0u, 1u})
973 {
974 if (attributeOffset != 0u || memoryOffset != 0u)
975 {
976 // Skip tests that do not produce unaligned access despite attempting to use attributeOffset and memoryOffset.
977 bool aligned =
978 !checkAny(vertexBitWidth, channelCount, [](int width) { return width > 8 /*bits*/; });
979 if (aligned)
980 continue;
981 }
982
983 const auto shortName = getFormatShortName(format);
984 const auto aoSuffix = ((attributeOffset > 0u) ?
985 std::string("_attribute_offset_") + std::to_string(attributeOffset) :
986 "");
987 const auto moSuffix =
988 ((memoryOffset > 0u) ? std::string("_memory_offset_") + std::to_string(memoryOffset) : "");
989 const auto testName = shortName + "_" + shaderFormat.desc + "_stride_" +
990 std::to_string(stride) + aoSuffix + moSuffix;
991
992 // Single binding.
993 const BindingParams bindingParams{format, shaderFormat.shaderFormat, stride, attributeOffset,
994 memoryOffset};
995
996 const LegacyVertexAttributesParams params{constructionType,
997 BindingParamsVec(1u, bindingParams)};
998 singleGroup->addChild(new LegacyVertexAttributesCase(testContext, testName, params));
999 }
1000 }
1001 }
1002
1003 // Tests using multiple bindings.
1004 {
1005 // We don't want many of these tests so the selected formats are a mix of components, numeric formats and bitwidth.
1006 const std::vector<VkFormat> formatTuples[] = {
1007 {VK_FORMAT_R8_UNORM, VK_FORMAT_R16G16_UINT, VK_FORMAT_R32G32B32A32_SINT},
1008 {VK_FORMAT_R32_SFLOAT, VK_FORMAT_R16G16B16_SNORM, VK_FORMAT_R8G8_UINT},
1009 {VK_FORMAT_R32G32B32A32_SFLOAT, VK_FORMAT_R16_SINT, VK_FORMAT_R8G8_UNORM},
1010 };
1011
1012 for (const auto &tuple : formatTuples)
1013 {
1014 for (const bool singleByteStride : {false, true})
1015 for (const auto attributeOffset : {0u, 1u})
1016 for (const auto memoryOffset : {0u, 1u})
1017 {
1018 BindingParamsVec bindingParams;
1019 for (const auto format : tuple)
1020 {
1021 const auto tcuFormat = mapVkFormat(format);
1022 const int formatSize = tcu::getPixelSize(tcuFormat);
1023 const auto fmtClass = tcu::getTextureChannelClass(tcuFormat.type);
1024 const auto vertexBitWidth = tcu::getTextureFormatBitDepth(tcuFormat);
1025 const auto channelCount = tcu::getNumUsedChannels(tcuFormat.order);
1026
1027 ShaderFormat shaderFormat = ShaderFormat::INVALID;
1028 const bool isFloatFormat = (fmtClass == tcu::TEXTURECHANNELCLASS_FLOATING_POINT ||
1029 fmtClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT ||
1030 fmtClass == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT);
1031
1032 if (isFloatFormat)
1033 {
1034 // Use a signed or unsigned format in the shader.
1035 const auto fmtId = static_cast<int>(format);
1036 const auto fmtMod = fmtId % 2;
1037 const std::vector<ShaderFormat> options{ShaderFormat::SIGNED_INT,
1038 ShaderFormat::UNSIGNED_INT};
1039
1040 shaderFormat = options.at(fmtMod);
1041 }
1042 else
1043 {
1044 // For integer formats use floats if possible in the shader, or the alternative signed/unsigned
1045 // variant if not.
1046 const bool signedClass = (fmtClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER);
1047 const ShaderFormat integerAlternative =
1048 (signedClass ? ShaderFormat::UNSIGNED_INT : ShaderFormat::SIGNED_INT);
1049 const bool hasSmallChannels = checkAny(vertexBitWidth, channelCount, lessThan32Bits);
1050
1051 shaderFormat = (hasSmallChannels ? integerAlternative : ShaderFormat::FLOAT);
1052 }
1053
1054 DE_ASSERT(shaderFormat != ShaderFormat::INVALID);
1055
1056 const auto stride = (singleByteStride ? 1u : static_cast<uint32_t>(formatSize));
1057
1058 bindingParams.emplace_back(format, shaderFormat, stride, attributeOffset, memoryOffset);
1059 }
1060
1061 const LegacyVertexAttributesParams testParams(constructionType, bindingParams);
1062
1063 const auto shortName = getFormatShortName(tuple);
1064 const auto strideSuffix = (singleByteStride ? "_stride_1_byte" : "_stride_normal");
1065 const auto aoSuffix = ((attributeOffset > 0u) ?
1066 std::string("_attribute_offset_") + std::to_string(attributeOffset) :
1067 "");
1068 const auto moSuffix =
1069 ((memoryOffset > 0u) ? std::string("_memory_offset_") + std::to_string(memoryOffset) : "");
1070 const auto testName = shortName + strideSuffix + aoSuffix + moSuffix;
1071
1072 multiGroup->addChild(new LegacyVertexAttributesCase(testContext, testName, testParams));
1073 }
1074 }
1075 }
1076
1077 group->addChild(singleGroup.release());
1078 group->addChild(multiGroup.release());
1079 }
1080
1081 } // namespace pipeline
1082 } // namespace vkt
1083