1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2019 The Khronos Group Inc.
6 * Copyright (c) 2019 Google Inc.
7 * Copyright (c) 2017 Codeplay Software Ltd.
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 */ /*!
22 * \file
23 * \brief Subgroups Tests
24 */ /*--------------------------------------------------------------------*/
25
26 #include "vktSubgroupsClusteredTests.hpp"
27 #include "vktSubgroupsScanHelpers.hpp"
28 #include "vktSubgroupsTestsUtils.hpp"
29
30 #include <string>
31 #include <vector>
32
33 using namespace tcu;
34 using namespace std;
35 using namespace vk;
36 using namespace vkt;
37
38 namespace
39 {
40 enum OpType
41 {
42 OPTYPE_CLUSTERED_ADD = 0,
43 OPTYPE_CLUSTERED_MUL,
44 OPTYPE_CLUSTERED_MIN,
45 OPTYPE_CLUSTERED_MAX,
46 OPTYPE_CLUSTERED_AND,
47 OPTYPE_CLUSTERED_OR,
48 OPTYPE_CLUSTERED_XOR,
49 OPTYPE_CLUSTERED_LAST
50 };
51
52 struct CaseDefinition
53 {
54 Operator op;
55 VkShaderStageFlags shaderStage;
56 VkFormat format;
57 de::SharedPtr<bool> geometryPointSizeSupported;
58 deBool requiredSubgroupSize;
59 deBool requires8BitUniformBuffer;
60 deBool requires16BitUniformBuffer;
61 };
62
getOperator(OpType opType)63 static Operator getOperator (OpType opType)
64 {
65 switch (opType)
66 {
67 case OPTYPE_CLUSTERED_ADD: return OPERATOR_ADD;
68 case OPTYPE_CLUSTERED_MUL: return OPERATOR_MUL;
69 case OPTYPE_CLUSTERED_MIN: return OPERATOR_MIN;
70 case OPTYPE_CLUSTERED_MAX: return OPERATOR_MAX;
71 case OPTYPE_CLUSTERED_AND: return OPERATOR_AND;
72 case OPTYPE_CLUSTERED_OR: return OPERATOR_OR;
73 case OPTYPE_CLUSTERED_XOR: return OPERATOR_XOR;
74 default: TCU_THROW(InternalError, "Unsupported op type");
75 }
76 }
77
checkVertexPipelineStages(const void * internalData,vector<const void * > datas,deUint32 width,deUint32)78 static bool checkVertexPipelineStages (const void* internalData,
79 vector<const void*> datas,
80 deUint32 width,
81 deUint32)
82 {
83 DE_UNREF(internalData);
84
85 return subgroups::check(datas, width, 1);
86 }
87
checkComputeOrMesh(const void * internalData,vector<const void * > datas,const deUint32 numWorkgroups[3],const deUint32 localSize[3],deUint32)88 static bool checkComputeOrMesh (const void* internalData,
89 vector<const void*> datas,
90 const deUint32 numWorkgroups[3],
91 const deUint32 localSize[3],
92 deUint32)
93 {
94 DE_UNREF(internalData);
95
96 return subgroups::checkComputeOrMesh(datas, numWorkgroups, localSize, 1);
97 }
98
getOpTypeName(Operator op)99 string getOpTypeName (Operator op)
100 {
101 return getScanOpName("subgroupClustered", "", op, SCAN_REDUCE);
102 }
103
getExtHeader(CaseDefinition & caseDef)104 string getExtHeader (CaseDefinition& caseDef)
105 {
106 return "#extension GL_KHR_shader_subgroup_clustered: enable\n"
107 "#extension GL_KHR_shader_subgroup_ballot: enable\n" +
108 subgroups::getAdditionalExtensionForFormat(caseDef.format);
109 }
110
getTestSrc(CaseDefinition & caseDef)111 string getTestSrc (CaseDefinition& caseDef)
112 {
113 const string formatName = subgroups::getFormatNameForGLSL(caseDef.format);
114 const string opTypeName = getOpTypeName(caseDef.op);
115 const string identity = getIdentity(caseDef.op, caseDef.format);
116 const string opOperation = getOpOperation(caseDef.op, caseDef.format, "ref", "data[index]");
117 const string compare = getCompare(caseDef.op, caseDef.format, "ref", "op");
118 ostringstream bdy;
119
120 bdy << " bool tempResult = true;\n"
121 << " uvec4 mask = subgroupBallot(true);\n";
122
123 for (deUint32 i = 1; i <= subgroups::maxSupportedSubgroupSize(); i *= 2)
124 {
125 bdy << " {\n"
126 << " const uint clusterSize = " << i << ";\n"
127 << " if (clusterSize <= gl_SubgroupSize)\n"
128 << " {\n"
129 << " " << formatName << " op = "
130 << opTypeName + "(data[gl_SubgroupInvocationID], clusterSize);\n"
131 << " for (uint clusterOffset = 0; clusterOffset < gl_SubgroupSize; clusterOffset += clusterSize)\n"
132 << " {\n"
133 << " " << formatName << " ref = "
134 << identity << ";\n"
135 << " for (uint index = clusterOffset; index < (clusterOffset + clusterSize); index++)\n"
136 << " {\n"
137 << " if (subgroupBallotBitExtract(mask, index))\n"
138 << " {\n"
139 << " ref = " << opOperation << ";\n"
140 << " }\n"
141 << " }\n"
142 << " if ((clusterOffset <= gl_SubgroupInvocationID) && (gl_SubgroupInvocationID < (clusterOffset + clusterSize)))\n"
143 << " {\n"
144 << " if (!" << compare << ")\n"
145 << " {\n"
146 << " tempResult = false;\n"
147 << " }\n"
148 << " }\n"
149 << " }\n"
150 << " }\n"
151 << " }\n"
152 << " tempRes = tempResult ? 1 : 0;\n";
153 }
154
155 return bdy.str();
156 }
157
initFrameBufferPrograms(SourceCollections & programCollection,CaseDefinition caseDef)158 void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
159 {
160 const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
161 const string extHeader = getExtHeader(caseDef);
162 const string testSrc = getTestSrc(caseDef);
163
164 subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
165 }
166
initPrograms(SourceCollections & programCollection,CaseDefinition caseDef)167 void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
168 {
169 #ifndef CTS_USES_VULKANSC
170 const bool spirv14required = (isAllRayTracingStages(caseDef.shaderStage) || isAllMeshShadingStages(caseDef.shaderStage));
171 #else
172 const bool spirv14required = false;
173 #endif // CTS_USES_VULKANSC
174 const SpirvVersion spirvVersion = spirv14required ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
175 const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u, spirv14required);
176 const string extHeader = getExtHeader(caseDef);
177 const string testSrc = getTestSrc(caseDef);
178
179 subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
180 }
181
supportedCheck(Context & context,CaseDefinition caseDef)182 void supportedCheck (Context& context, CaseDefinition caseDef)
183 {
184 if (!subgroups::isSubgroupSupported(context))
185 TCU_THROW(NotSupportedError, "Subgroup operations are not supported");
186
187 if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_CLUSTERED_BIT))
188 TCU_THROW(NotSupportedError, "Device does not support subgroup clustered operations");
189
190 if (!subgroups::isFormatSupportedForDevice(context, caseDef.format))
191 TCU_THROW(NotSupportedError, "Device does not support the specified format in subgroup operations");
192
193 if (caseDef.requires16BitUniformBuffer)
194 {
195 if (!subgroups::is16BitUBOStorageSupported(context))
196 {
197 TCU_THROW(NotSupportedError, "Device does not support the specified format in subgroup operations");
198 }
199 }
200
201 if (caseDef.requires8BitUniformBuffer)
202 {
203 if (!subgroups::is8BitUBOStorageSupported(context))
204 {
205 TCU_THROW(NotSupportedError, "Device does not support the specified format in subgroup operations");
206 }
207 }
208
209 if (caseDef.requiredSubgroupSize)
210 {
211 context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
212
213 #ifndef CTS_USES_VULKANSC
214 const VkPhysicalDeviceSubgroupSizeControlFeatures& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeatures();
215 const VkPhysicalDeviceSubgroupSizeControlProperties& subgroupSizeControlProperties = context.getSubgroupSizeControlProperties();
216 #else
217 const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
218 const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
219 #endif // CTS_USES_VULKANSC
220
221 if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
222 TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
223
224 if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
225 TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
226
227 if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
228 TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
229 }
230
231 *caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
232
233 #ifndef CTS_USES_VULKANSC
234 if (isAllRayTracingStages(caseDef.shaderStage))
235 {
236 context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
237 }
238 else if (isAllMeshShadingStages(caseDef.shaderStage))
239 {
240 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS);
241 context.requireDeviceFunctionality("VK_EXT_mesh_shader");
242
243 if ((caseDef.shaderStage & VK_SHADER_STAGE_TASK_BIT_EXT) != 0u)
244 {
245 const auto& features = context.getMeshShaderFeaturesEXT();
246 if (!features.taskShader)
247 TCU_THROW(NotSupportedError, "Task shaders not supported");
248 }
249 }
250 #endif // CTS_USES_VULKANSC
251
252 subgroups::supportedCheckShader(context, caseDef.shaderStage);
253 }
254
noSSBOtest(Context & context,const CaseDefinition caseDef)255 TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
256 {
257 const subgroups::SSBOData inputData =
258 {
259 subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
260 subgroups::SSBOData::LayoutStd140, // InputDataLayoutType layout;
261 caseDef.format, // vk::VkFormat format;
262 subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
263 subgroups::SSBOData::BindingUBO, // BindingType bindingType;
264 };
265
266 switch (caseDef.shaderStage)
267 {
268 case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
269 case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
270 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
271 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
272 default: TCU_THROW(InternalError, "Unhandled shader stage");
273 }
274 }
275
test(Context & context,const CaseDefinition caseDef)276 TestStatus test (Context& context, const CaseDefinition caseDef)
277 {
278 const bool isCompute = isAllComputeStages(caseDef.shaderStage);
279 #ifndef CTS_USES_VULKANSC
280 const bool isMesh = isAllMeshShadingStages(caseDef.shaderStage);
281 #else
282 const bool isMesh = false;
283 #endif // CTS_USES_VULKANSC
284 DE_ASSERT(!(isCompute && isMesh));
285
286 if (isCompute || isMesh)
287 {
288 #ifndef CTS_USES_VULKANSC
289 const VkPhysicalDeviceSubgroupSizeControlProperties& subgroupSizeControlProperties = context.getSubgroupSizeControlProperties();
290 #else
291 const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
292 #endif // CTS_USES_VULKANSC
293 TestLog& log = context.getTestContext().getLog();
294
295 subgroups::SSBOData inputData;
296 inputData.format = caseDef.format;
297 inputData.layout = subgroups::SSBOData::LayoutStd430;
298 inputData.numElements = subgroups::maxSupportedSubgroupSize();
299 inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
300
301 if (caseDef.requiredSubgroupSize == DE_FALSE)
302 {
303 if (isCompute)
304 return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkComputeOrMesh);
305 else
306 return subgroups::makeMeshTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkComputeOrMesh);
307 }
308
309 log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
310 << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
311
312 // According to the spec, requiredSubgroupSize must be a power-of-two integer.
313 for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
314 {
315 TestStatus result (QP_TEST_RESULT_INTERNAL_ERROR, "Internal Error");
316
317 if (isCompute)
318 result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkComputeOrMesh, size);
319 else
320 result = subgroups::makeMeshTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkComputeOrMesh, size);
321
322 if (result.getCode() != QP_TEST_RESULT_PASS)
323 {
324 log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
325 return result;
326 }
327 }
328
329 return TestStatus::pass("OK");
330 }
331 else if (isAllGraphicsStages(caseDef.shaderStage))
332 {
333 const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
334 const subgroups::SSBOData inputData =
335 {
336 subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
337 subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
338 caseDef.format, // vk::VkFormat format;
339 subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
340 subgroups::SSBOData::BindingSSBO, // bool isImage;
341 4u, // deUint32 binding;
342 stages, // vk::VkShaderStageFlags stages;
343 };
344
345 return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
346 }
347 #ifndef CTS_USES_VULKANSC
348 else if (isAllRayTracingStages(caseDef.shaderStage))
349 {
350 const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
351 const subgroups::SSBOData inputData =
352 {
353 subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
354 subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
355 caseDef.format, // vk::VkFormat format;
356 subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
357 subgroups::SSBOData::BindingSSBO, // bool isImage;
358 6u, // deUint32 binding;
359 stages, // vk::VkShaderStageFlags stages;
360 };
361
362 return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
363 }
364 #endif // CTS_USES_VULKANSC
365 else
366 TCU_THROW(InternalError, "Unknown stage or invalid stage set");
367 }
368 }
369
370 namespace vkt
371 {
372 namespace subgroups
373 {
createSubgroupsClusteredTests(TestContext & testCtx)374 TestCaseGroup* createSubgroupsClusteredTests (TestContext& testCtx)
375 {
376 de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "clustered", "Subgroup clustered category tests"));
377 de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup clustered category tests: graphics"));
378 de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup clustered category tests: compute"));
379 de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup clustered category tests: framebuffer"));
380 #ifndef CTS_USES_VULKANSC
381 de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup clustered category tests: ray tracing"));
382 de::MovePtr<TestCaseGroup> meshGroup (new TestCaseGroup(testCtx, "mesh", "Subgroup clustered category tests: mesh shading"));
383 #endif // CTS_USES_VULKANSC
384 const VkShaderStageFlags fbStages[] =
385 {
386 VK_SHADER_STAGE_VERTEX_BIT,
387 VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
388 VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
389 VK_SHADER_STAGE_GEOMETRY_BIT,
390 };
391 #ifndef CTS_USES_VULKANSC
392 const VkShaderStageFlags meshStages[] =
393 {
394 VK_SHADER_STAGE_MESH_BIT_EXT,
395 VK_SHADER_STAGE_TASK_BIT_EXT,
396 };
397 #endif // CTS_USES_VULKANSC
398 const deBool boolValues[] =
399 {
400 DE_FALSE,
401 DE_TRUE
402 };
403
404 {
405 const vector<VkFormat> formats = subgroups::getAllFormats();
406
407 for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
408 {
409 const VkFormat format = formats[formatIndex];
410 const string formatName = subgroups::getFormatNameForGLSL(format);
411 const bool isBool = subgroups::isFormatBool(format);
412 const bool isFloat = subgroups::isFormatFloat(format);
413 const bool needs8BitUBOStorage = isFormat8bitTy(format);
414 const bool needs16BitUBOStorage = isFormat16BitTy(format);
415
416 for (int opTypeIndex = 0; opTypeIndex < OPTYPE_CLUSTERED_LAST; ++opTypeIndex)
417 {
418 const OpType opType = static_cast<OpType>(opTypeIndex);
419 const Operator op = getOperator(opType);
420 const bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
421
422 // Skip float with bitwise category.
423 if (isFloat && isBitwiseOp)
424 continue;
425
426 // Skip bool when its not the bitwise category.
427 if (isBool && !isBitwiseOp)
428 continue;
429
430 const string name = de::toLower(getOpTypeName(op)) +"_" + formatName;
431
432 for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
433 {
434 const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
435 const string testName = name + (requiredSubgroupSize ? "_requiredsubgroupsize" : "");
436 const CaseDefinition caseDef =
437 {
438 op, // Operator op;
439 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
440 format, // VkFormat format;
441 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
442 requiredSubgroupSize, // deBool requiredSubgroupSize;
443 DE_FALSE, // deBool requires8BitUniformBuffer;
444 DE_FALSE // deBool requires16BitUniformBuffer;
445 };
446
447 addFunctionCaseWithPrograms(computeGroup.get(), testName,supportedCheck, initPrograms, test, caseDef);
448 }
449
450 #ifndef CTS_USES_VULKANSC
451 for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
452 {
453 for (const auto& stage : meshStages)
454 {
455 const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
456 const string testName = name + (requiredSubgroupSize ? "_requiredsubgroupsize" : "") + "_" + getShaderStageName(stage);
457 const CaseDefinition caseDef =
458 {
459 op, // Operator op;
460 stage, // VkShaderStageFlags shaderStage;
461 format, // VkFormat format;
462 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
463 requiredSubgroupSize, // deBool requiredSubgroupSize;
464 DE_FALSE, // deBool requires8BitUniformBuffer;
465 DE_FALSE // deBool requires16BitUniformBuffer;
466 };
467
468 addFunctionCaseWithPrograms(meshGroup.get(), testName,supportedCheck, initPrograms, test, caseDef);
469 }
470 }
471 #endif // CTS_USES_VULKANSC
472
473 {
474 const CaseDefinition caseDef =
475 {
476 op, // Operator op;
477 VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
478 format, // VkFormat format;
479 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
480 DE_FALSE, // deBool requiredSubgroupSize;
481 DE_FALSE, // deBool requires8BitUniformBuffer;
482 DE_FALSE // deBool requires16BitUniformBuffer;
483 };
484
485 addFunctionCaseWithPrograms(graphicGroup.get(), name, supportedCheck, initPrograms, test, caseDef);
486 }
487
488 for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(fbStages); ++stageIndex)
489 {
490 const CaseDefinition caseDef =
491 {
492 op, // Operator op;
493 fbStages[stageIndex], // VkShaderStageFlags shaderStage;
494 format, // VkFormat format;
495 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
496 DE_FALSE, // deBool requiredSubgroupSize;
497 deBool(needs8BitUBOStorage), // deBool requires8BitUniformBuffer;
498 deBool(needs16BitUBOStorage) // deBool requires16BitUniformBuffer;
499 };
500 const string testName = name +"_" + getShaderStageName(caseDef.shaderStage);
501
502 addFunctionCaseWithPrograms(framebufferGroup.get(), testName,supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
503 }
504 }
505 }
506 }
507
508 #ifndef CTS_USES_VULKANSC
509 {
510 const vector<VkFormat> formats = subgroups::getAllRayTracingFormats();
511
512 for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
513 {
514 const VkFormat format = formats[formatIndex];
515 const string formatName = subgroups::getFormatNameForGLSL(format);
516 const bool isBool = subgroups::isFormatBool(format);
517 const bool isFloat = subgroups::isFormatFloat(format);
518
519 for (int opTypeIndex = 0; opTypeIndex < OPTYPE_CLUSTERED_LAST; ++opTypeIndex)
520 {
521 const OpType opType = static_cast<OpType>(opTypeIndex);
522 const Operator op = getOperator(opType);
523 const bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
524
525 // Skip float with bitwise category.
526 if (isFloat && isBitwiseOp)
527 continue;
528
529 // Skip bool when its not the bitwise category.
530 if (isBool && !isBitwiseOp)
531 continue;
532
533 {
534 const string name = de::toLower(getOpTypeName(op)) +"_" + formatName;
535 const CaseDefinition caseDef =
536 {
537 op, // Operator op;
538 SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
539 format, // VkFormat format;
540 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
541 DE_FALSE, // deBool requiredSubgroupSize;
542 DE_FALSE, // deBool requires8BitUniformBuffer;
543 DE_FALSE // deBool requires16BitUniformBuffer;
544 };
545
546 addFunctionCaseWithPrograms(raytracingGroup.get(), name, supportedCheck, initPrograms, test, caseDef);
547 }
548 }
549 }
550 }
551 #endif // CTS_USES_VULKANSC
552
553 group->addChild(graphicGroup.release());
554 group->addChild(computeGroup.release());
555 group->addChild(framebufferGroup.release());
556 #ifndef CTS_USES_VULKANSC
557 group->addChild(raytracingGroup.release());
558 group->addChild(meshGroup.release());
559 #endif // CTS_USES_VULKANSC
560
561 return group.release();
562 }
563 } // subgroups
564 } // vkt
565