1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2019 The Khronos Group Inc.
6 * Copyright (c) 2019 Google Inc.
7 * Copyright (c) 2017 Codeplay Software Ltd.
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 */ /*!
22 * \file
23 * \brief Subgroups Tests
24 */ /*--------------------------------------------------------------------*/
25
26 #include "vktSubgroupsQuadTests.hpp"
27 #include "vktSubgroupsTestsUtils.hpp"
28
29 #include <string>
30 #include <vector>
31
32 using namespace tcu;
33 using namespace std;
34 using namespace vk;
35 using namespace vkt;
36
37 namespace
38 {
39 enum OpType
40 {
41 OPTYPE_QUAD_BROADCAST = 0,
42 OPTYPE_QUAD_BROADCAST_NONCONST,
43 OPTYPE_QUAD_SWAP_HORIZONTAL,
44 OPTYPE_QUAD_SWAP_VERTICAL,
45 OPTYPE_QUAD_SWAP_DIAGONAL,
46 OPTYPE_LAST
47 };
48
49 struct CaseDefinition
50 {
51 OpType opType;
52 VkShaderStageFlags shaderStage;
53 VkFormat format;
54 de::SharedPtr<bool> geometryPointSizeSupported;
55 deBool requiredSubgroupSize;
56 };
57
checkVertexPipelineStages(const void * internalData,vector<const void * > datas,deUint32 width,deUint32)58 static bool checkVertexPipelineStages (const void* internalData,
59 vector<const void*> datas,
60 deUint32 width,
61 deUint32)
62 {
63 DE_UNREF(internalData);
64
65 return subgroups::check(datas, width, 1);
66 }
67
checkComputeOrMesh(const void * internalData,vector<const void * > datas,const deUint32 numWorkgroups[3],const deUint32 localSize[3],deUint32)68 static bool checkComputeOrMesh (const void* internalData,
69 vector<const void*> datas,
70 const deUint32 numWorkgroups[3],
71 const deUint32 localSize[3],
72 deUint32)
73 {
74 DE_UNREF(internalData);
75
76 return subgroups::checkComputeOrMesh(datas, numWorkgroups, localSize, 1);
77 }
78
getOpTypeName(OpType opType)79 string getOpTypeName (OpType opType)
80 {
81 switch (opType)
82 {
83 case OPTYPE_QUAD_BROADCAST: return "subgroupQuadBroadcast";
84 case OPTYPE_QUAD_BROADCAST_NONCONST: return "subgroupQuadBroadcast";
85 case OPTYPE_QUAD_SWAP_HORIZONTAL: return "subgroupQuadSwapHorizontal";
86 case OPTYPE_QUAD_SWAP_VERTICAL: return "subgroupQuadSwapVertical";
87 case OPTYPE_QUAD_SWAP_DIAGONAL: return "subgroupQuadSwapDiagonal";
88 default: TCU_THROW(InternalError, "Unsupported op type");
89 }
90 }
91
getOpTypeCaseName(OpType opType)92 string getOpTypeCaseName (OpType opType)
93 {
94 switch (opType)
95 {
96 case OPTYPE_QUAD_BROADCAST: return "subgroupquadbroadcast";
97 case OPTYPE_QUAD_BROADCAST_NONCONST: return "subgroupquadbroadcast_nonconst";
98 case OPTYPE_QUAD_SWAP_HORIZONTAL: return "subgroupquadswaphorizontal";
99 case OPTYPE_QUAD_SWAP_VERTICAL: return "subgroupquadswapvertical";
100 case OPTYPE_QUAD_SWAP_DIAGONAL: return "subgroupquadswapdiagonal";
101 default: TCU_THROW(InternalError, "Unsupported op type");
102 }
103 }
104
getExtHeader(VkFormat format)105 string getExtHeader (VkFormat format)
106 {
107 return "#extension GL_KHR_shader_subgroup_quad: enable\n"
108 "#extension GL_KHR_shader_subgroup_ballot: enable\n" +
109 subgroups::getAdditionalExtensionForFormat(format);
110 }
111
getTestSrc(const CaseDefinition & caseDef)112 string getTestSrc (const CaseDefinition &caseDef)
113 {
114 const string swapTable[OPTYPE_LAST] =
115 {
116 "",
117 "",
118 " const uint swapTable[4] = {1, 0, 3, 2};\n",
119 " const uint swapTable[4] = {2, 3, 0, 1};\n",
120 " const uint swapTable[4] = {3, 2, 1, 0};\n",
121 };
122 const string validate =
123 " if (subgroupBallotBitExtract(mask, otherID) && op !=data[otherID])\n"
124 " tempRes = 0;\n";
125 const string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
126 const string op = getOpTypeName(caseDef.opType);
127 ostringstream testSrc;
128
129 testSrc << " uvec4 mask = subgroupBallot(true);\n"
130 << swapTable[caseDef.opType]
131 << " tempRes = 1;\n";
132
133 if (caseDef.opType == OPTYPE_QUAD_BROADCAST)
134 {
135 for (int i=0; i<4; i++)
136 {
137 testSrc << " {\n"
138 << " " << fmt << " op = " << op << "(data[gl_SubgroupInvocationID], " << i << ");\n"
139 << " uint otherID = (gl_SubgroupInvocationID & ~0x3) + " << i << ";\n"
140 << validate
141 << " }\n";
142 }
143 }
144 else if (caseDef.opType == OPTYPE_QUAD_BROADCAST_NONCONST)
145 {
146 testSrc << " for (int i=0; i<4; i++)"
147 << " {\n"
148 << " " << fmt << " op = " << op << "(data[gl_SubgroupInvocationID], i);\n"
149 << " uint otherID = (gl_SubgroupInvocationID & ~0x3) + i;\n"
150 << validate
151 << " }\n"
152 << " uint quadID = gl_SubgroupInvocationID >> 2;\n"
153 << " uint quadInvocation = gl_SubgroupInvocationID & 0x3;\n"
154 << " // Test lane ID that is only uniform in active lanes\n"
155 << " if (quadInvocation >= 2)\n"
156 << " {\n"
157 << " uint id = quadInvocation & ~1;\n"
158 << " " << fmt << " op = " << op << "(data[gl_SubgroupInvocationID], id);\n"
159 << " uint otherID = 4*quadID + id;\n"
160 << validate
161 << " }\n"
162 << " // Test lane ID that is only quad uniform, not subgroup uniform\n"
163 << " {\n"
164 << " uint id = quadID & 0x3;\n"
165 << " " << fmt << " op = " << op << "(data[gl_SubgroupInvocationID], id);\n"
166 << " uint otherID = 4*quadID + id;\n"
167 << validate
168 << " }\n";
169 }
170 else
171 {
172 testSrc << " " << fmt << " op = " << op << "(data[gl_SubgroupInvocationID]);\n"
173 << " uint otherID = (gl_SubgroupInvocationID & ~0x3) + swapTable[gl_SubgroupInvocationID & 0x3];\n"
174 << validate;
175 }
176
177 return testSrc.str();
178 }
179
initFrameBufferPrograms(SourceCollections & programCollection,CaseDefinition caseDef)180 void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
181 {
182 const SpirvVersion spirvVersion = (caseDef.opType == OPTYPE_QUAD_BROADCAST_NONCONST) ? SPIRV_VERSION_1_5 : SPIRV_VERSION_1_3;
183 const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
184
185 subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, getExtHeader(caseDef.format), getTestSrc(caseDef), "");
186 }
187
initPrograms(SourceCollections & programCollection,CaseDefinition caseDef)188 void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
189 {
190 const bool spirv15required = caseDef.opType == OPTYPE_QUAD_BROADCAST_NONCONST;
191 #ifndef CTS_USES_VULKANSC
192 const bool spirv14required = (isAllRayTracingStages(caseDef.shaderStage) || isAllMeshShadingStages(caseDef.shaderStage));
193 #else
194 const bool spirv14required = false;
195 #endif // CTS_USES_VULKANSC
196 const SpirvVersion spirvVersion = spirv15required ? SPIRV_VERSION_1_5
197 : spirv14required ? SPIRV_VERSION_1_4
198 : SPIRV_VERSION_1_3;
199 const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u, (spirv14required && !spirv15required));
200 const string extHeader = getExtHeader(caseDef.format);
201 const string testSrc = getTestSrc(caseDef);
202
203 subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
204 }
205
supportedCheck(Context & context,CaseDefinition caseDef)206 void supportedCheck (Context& context, CaseDefinition caseDef)
207 {
208 if (!subgroups::isSubgroupSupported(context))
209 TCU_THROW(NotSupportedError, "Subgroup operations are not supported");
210
211 if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_QUAD_BIT))
212 TCU_THROW(NotSupportedError, "Device does not support subgroup quad operations");
213
214 if (!subgroups::isFormatSupportedForDevice(context, caseDef.format))
215 TCU_THROW(NotSupportedError, "Device does not support the specified format in subgroup operations");
216
217 if ((caseDef.opType == OPTYPE_QUAD_BROADCAST_NONCONST) && !subgroups::isSubgroupBroadcastDynamicIdSupported(context))
218 TCU_THROW(NotSupportedError, "Device does not support SubgroupBroadcastDynamicId");
219
220 if (caseDef.requiredSubgroupSize)
221 {
222 context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
223
224 #ifndef CTS_USES_VULKANSC
225 const VkPhysicalDeviceSubgroupSizeControlFeatures& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeatures();
226 const VkPhysicalDeviceSubgroupSizeControlProperties& subgroupSizeControlProperties = context.getSubgroupSizeControlProperties();
227 #else
228 const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
229 const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
230 #endif // CTS_USES_VULKANSC
231
232 if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
233 TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
234
235 if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
236 TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
237
238 if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
239 TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
240 }
241
242 *caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
243
244 #ifndef CTS_USES_VULKANSC
245 if (isAllRayTracingStages(caseDef.shaderStage))
246 {
247 context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
248 }
249 else if (isAllMeshShadingStages(caseDef.shaderStage))
250 {
251 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS);
252 context.requireDeviceFunctionality("VK_EXT_mesh_shader");
253
254 if ((caseDef.shaderStage & VK_SHADER_STAGE_TASK_BIT_EXT) != 0u)
255 {
256 const auto& features = context.getMeshShaderFeaturesEXT();
257 if (!features.taskShader)
258 TCU_THROW(NotSupportedError, "Task shaders not supported");
259 }
260 }
261 #endif // CTS_USES_VULKANSC
262
263 subgroups::supportedCheckShader(context, caseDef.shaderStage);
264 }
265
noSSBOtest(Context & context,const CaseDefinition caseDef)266 TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
267 {
268 subgroups::SSBOData inputData;
269 inputData.format = caseDef.format;
270 inputData.layout = subgroups::SSBOData::LayoutStd140;
271 inputData.numElements = subgroups::maxSupportedSubgroupSize();
272 inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
273 inputData.bindingType = subgroups::SSBOData::BindingUBO;
274
275 switch (caseDef.shaderStage)
276 {
277 case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
278 case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
279 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
280 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
281 default: TCU_THROW(InternalError, "Unhandled shader stage");
282 }
283 }
284
test(Context & context,const CaseDefinition caseDef)285 TestStatus test (Context& context, const CaseDefinition caseDef)
286 {
287 const bool isCompute = isAllComputeStages(caseDef.shaderStage);
288 #ifndef CTS_USES_VULKANSC
289 const bool isMesh = isAllMeshShadingStages(caseDef.shaderStage);
290 #else
291 const bool isMesh = false;
292 #endif // CTS_USES_VULKANSC
293 DE_ASSERT(!(isCompute && isMesh));
294
295 if (isCompute || isMesh)
296 {
297 #ifndef CTS_USES_VULKANSC
298 const VkPhysicalDeviceSubgroupSizeControlProperties& subgroupSizeControlProperties = context.getSubgroupSizeControlProperties();
299 #else
300 const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
301 #endif // CTS_USES_VULKANSC
302 TestLog& log = context.getTestContext().getLog();
303 const subgroups::SSBOData inputData
304 {
305 subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
306 subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
307 caseDef.format, // vk::VkFormat format;
308 subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
309 };
310
311 if (caseDef.requiredSubgroupSize == DE_FALSE)
312 {
313 if (isCompute)
314 return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkComputeOrMesh);
315 else
316 return subgroups::makeMeshTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkComputeOrMesh);
317 }
318
319 log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
320 << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
321
322 // According to the spec, requiredSubgroupSize must be a power-of-two integer.
323 for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
324 {
325 TestStatus result (QP_TEST_RESULT_INTERNAL_ERROR, "Internal Error");
326
327 if (isCompute)
328 result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkComputeOrMesh, size);
329 else
330 result = subgroups::makeMeshTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkComputeOrMesh, size);
331
332 if (result.getCode() != QP_TEST_RESULT_PASS)
333 {
334 log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
335 return result;
336 }
337 }
338
339 return TestStatus::pass("OK");
340 }
341 else if (isAllGraphicsStages(caseDef.shaderStage))
342 {
343 const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
344 subgroups::SSBOData inputData;
345
346 inputData.format = caseDef.format;
347 inputData.layout = subgroups::SSBOData::LayoutStd430;
348 inputData.numElements = subgroups::maxSupportedSubgroupSize();
349 inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
350 inputData.binding = 4u;
351 inputData.stages = stages;
352
353 return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
354 }
355 #ifndef CTS_USES_VULKANSC
356 else if (isAllRayTracingStages(caseDef.shaderStage))
357 {
358 const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
359 const subgroups::SSBOData inputData =
360 {
361 subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
362 subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
363 caseDef.format, // vk::VkFormat format;
364 subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
365 subgroups::SSBOData::BindingSSBO, // bool isImage;
366 6u, // deUint32 binding;
367 stages, // vk::VkShaderStageFlags stages;
368 };
369
370 return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
371 }
372 #endif // CTS_USES_VULKANSC
373 else
374 TCU_THROW(InternalError, "Unknown stage or invalid stage set");
375 }
376 }
377
378 namespace vkt
379 {
380 namespace subgroups
381 {
createSubgroupsQuadTests(TestContext & testCtx)382 TestCaseGroup* createSubgroupsQuadTests (TestContext& testCtx)
383 {
384 de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "quad", "Subgroup quad category tests"));
385 de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup arithmetic category tests: graphics"));
386 de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup arithmetic category tests: compute"));
387 de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup arithmetic category tests: framebuffer"));
388 #ifndef CTS_USES_VULKANSC
389 de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup arithmetic category tests: ray tracing"));
390 de::MovePtr<TestCaseGroup> meshGroup (new TestCaseGroup(testCtx, "mesh", "Subgroup arithmetic category tests: mesh shading"));
391 #endif // CTS_USES_VULKANSC
392 const VkShaderStageFlags fbStages[] =
393 {
394 VK_SHADER_STAGE_VERTEX_BIT,
395 VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
396 VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
397 VK_SHADER_STAGE_GEOMETRY_BIT,
398 };
399 #ifndef CTS_USES_VULKANSC
400 const VkShaderStageFlags meshStages[] =
401 {
402 VK_SHADER_STAGE_MESH_BIT_EXT,
403 VK_SHADER_STAGE_TASK_BIT_EXT,
404 };
405 #endif // CTS_USES_VULKANSC
406 const deBool boolValues[] =
407 {
408 DE_FALSE,
409 DE_TRUE
410 };
411
412 {
413 const vector<VkFormat> formats = subgroups::getAllFormats();
414
415 for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
416 {
417 const VkFormat format = formats[formatIndex];
418 const string formatName = subgroups::getFormatNameForGLSL(format);
419
420 for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
421 {
422 const OpType opType = static_cast<OpType>(opTypeIndex);
423 const string name = getOpTypeCaseName(opType) + "_" + formatName;
424
425 for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
426 {
427 const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
428 const string testNameSuffix = requiredSubgroupSize ? "_requiredsubgroupsize" : "";
429 const string testName = name + testNameSuffix;
430 const CaseDefinition caseDef =
431 {
432 opType, // OpType opType;
433 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
434 format, // VkFormat format;
435 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
436 requiredSubgroupSize, // deBool requiredSubgroupSize;
437 };
438
439 addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
440 }
441
442 #ifndef CTS_USES_VULKANSC
443 for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
444 {
445 for (const auto& stage : meshStages)
446 {
447 const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
448 const string testNameSuffix = requiredSubgroupSize ? "_requiredsubgroupsize" : "";
449 const string testName = name + testNameSuffix + "_" + getShaderStageName(stage);
450 const CaseDefinition caseDef =
451 {
452 opType, // OpType opType;
453 stage, // VkShaderStageFlags shaderStage;
454 format, // VkFormat format;
455 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
456 requiredSubgroupSize, // deBool requiredSubgroupSize;
457 };
458
459 addFunctionCaseWithPrograms(meshGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
460 }
461 }
462 #endif // CTS_USES_VULKANSC
463
464 {
465 const CaseDefinition caseDef =
466 {
467 opType, // OpType opType;
468 VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
469 format, // VkFormat format;
470 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
471 DE_FALSE // deBool requiredSubgroupSize;
472 };
473
474 addFunctionCaseWithPrograms(graphicGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
475 }
476
477 for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(fbStages); ++stageIndex)
478 {
479 const CaseDefinition caseDef =
480 {
481 opType, // OpType opType;
482 fbStages[stageIndex], // VkShaderStageFlags shaderStage;
483 format, // VkFormat format;
484 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
485 DE_FALSE // deBool requiredSubgroupSize;
486 };
487 const string testName = name + "_" + getShaderStageName(caseDef.shaderStage);
488
489 addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
490 }
491 }
492 }
493 }
494
495 #ifndef CTS_USES_VULKANSC
496 {
497 const vector<VkFormat> formats = subgroups::getAllRayTracingFormats();
498
499 for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
500 {
501 const VkFormat format = formats[formatIndex];
502 const string formatName = subgroups::getFormatNameForGLSL(format);
503
504 for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
505 {
506 const OpType opType = static_cast<OpType>(opTypeIndex);
507 const string testName = getOpTypeCaseName(opType) + "_" + formatName;
508 const CaseDefinition caseDef =
509 {
510 opType, // OpType opType;
511 SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
512 format, // VkFormat format;
513 de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
514 DE_FALSE // deBool requiredSubgroupSize;
515 };
516
517 addFunctionCaseWithPrograms(raytracingGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
518 }
519 }
520 }
521 #endif // CTS_USES_VULKANSC
522
523 group->addChild(graphicGroup.release());
524 group->addChild(computeGroup.release());
525 group->addChild(framebufferGroup.release());
526 #ifndef CTS_USES_VULKANSC
527 group->addChild(raytracingGroup.release());
528 group->addChild(meshGroup.release());
529 #endif // CTS_USES_VULKANSC
530
531 return group.release();
532 }
533 } // subgroups
534 } // vkt
535