• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2020 Google LLC
6  * Copyright (c) 2020 The Khronos Group Inc.
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Test new features in VK_KHR_shader_subgroup_uniform_control_flow
23  *//*--------------------------------------------------------------------*/
24 
25 #include <amber/amber.h>
26 
27 #include "tcuDefs.hpp"
28 
29 #include "vkDefs.hpp"
30 #include "vkDeviceUtil.hpp"
31 #include "vktTestGroupUtil.hpp"
32 #include "vktAmberTestCase.hpp"
33 #include "vktSubgroupUniformControlFlowTests.hpp"
34 #include "vktTestGroupUtil.hpp"
35 
36 namespace vkt
37 {
38 namespace subgroups
39 {
40 namespace
41 {
42 
43 struct Case
44 {
Casevkt::subgroups::__anon1595bad00111::Case45 	Case(const char*	b,	bool sw,	bool use_ssc,	vk::VkShaderStageFlagBits s, vk::VkSubgroupFeatureFlagBits o) :
46 		basename(b),
47 		small_workgroups(sw),
48 		use_subgroup_size_control(use_ssc),
49 		stage(s)
50 	{
51 		operation = (vk::VkSubgroupFeatureFlagBits)(o | vk::VK_SUBGROUP_FEATURE_BASIC_BIT);
52 	}
53 	const char* basename;
54 	bool small_workgroups;
55 	bool use_subgroup_size_control;
56 	vk::VkShaderStageFlagBits stage;
57 	vk::VkSubgroupFeatureFlagBits operation;
58 };
59 
60 struct CaseGroup
61 {
CaseGroupvkt::subgroups::__anon1595bad00111::CaseGroup62 	CaseGroup(const char*	the_data_dir, const char*	the_subdir) : data_dir(the_data_dir),	subdir(the_subdir) { }
addvkt::subgroups::__anon1595bad00111::CaseGroup63 	void add(const char*	basename,	bool small_workgroups,	bool use_subgroup_size_control, vk::VkShaderStageFlagBits stage, vk::VkSubgroupFeatureFlagBits operation = vk::VK_SUBGROUP_FEATURE_BASIC_BIT)
64 	{
65 		cases.push_back(Case(basename, small_workgroups, use_subgroup_size_control, stage, operation));
66 	}
67 
68 	const char*	data_dir;
69 	const char*	subdir;
70 	std::vector<Case>	cases;
71 };
72 
73 class SubgroupUniformControlFlowTestCase : public cts_amber::AmberTestCase
74 {
75 public:
SubgroupUniformControlFlowTestCase(tcu::TestContext & testCtx,const char * name,const std::string & readFilename,bool small_workgroups,bool use_subgroup_size_control,vk::VkShaderStageFlagBits stage,vk::VkSubgroupFeatureFlagBits operation)76 	SubgroupUniformControlFlowTestCase(tcu::TestContext&	testCtx,
77 									   const char*	name,
78 									   const std::string&	readFilename,
79 									   bool	small_workgroups,
80 									   bool	use_subgroup_size_control,
81 									   vk::VkShaderStageFlagBits stage,
82 									   vk::VkSubgroupFeatureFlagBits operation) :
83 		cts_amber::AmberTestCase(testCtx, name, "", readFilename),
84 		m_small_workgroups(small_workgroups),
85 		m_use_subgroup_size_control(use_subgroup_size_control),
86 		m_stage(stage),
87 		m_operation(operation)
88 	{ }
89 
90 	virtual void checkSupport(Context&	ctx) const;	// override
91 private:
92 	bool	m_small_workgroups;
93 	bool	m_use_subgroup_size_control;
94 	vk::VkShaderStageFlagBits	m_stage;
95 	vk::VkSubgroupFeatureFlagBits	m_operation;
96 };
97 
checkSupport(Context & ctx) const98 void SubgroupUniformControlFlowTestCase::checkSupport(Context& ctx) const
99 {
100 	// Check required extensions.
101 	ctx.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
102 	ctx.requireDeviceFunctionality("VK_KHR_shader_subgroup_uniform_control_flow");
103 	if (m_use_subgroup_size_control)
104 	{
105 		ctx.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
106 	}
107 
108 	vk::VkPhysicalDeviceSubgroupProperties subgroupProperties;
109 	subgroupProperties.sType = vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
110 	subgroupProperties.pNext = DE_NULL;
111 
112 	vk::VkPhysicalDeviceProperties2 properties2;
113 	properties2.sType = vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
114 	properties2.pNext = &subgroupProperties;
115 
116 	ctx.getInstanceInterface().getPhysicalDeviceProperties2(ctx.getPhysicalDevice(), &properties2);
117 
118 	vk::VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
119 	subgroupSizeControlFeatures.sType = vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
120 	subgroupSizeControlFeatures.pNext = DE_NULL;
121 
122 	vk::VkPhysicalDeviceFeatures2 features2;
123 	features2.sType = vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
124 	features2.pNext = &subgroupSizeControlFeatures;
125 
126 	ctx.getInstanceInterface().getPhysicalDeviceFeatures2(ctx.getPhysicalDevice(), &features2);
127 
128 	// Check that the stage supports the required subgroup operations.
129 	if ((m_stage & subgroupProperties.supportedStages) == 0)
130 	{
131 		TCU_THROW(NotSupportedError, "Device does not support subgroup operations in this stage");
132 	}
133 	if ((m_operation & subgroupProperties.supportedOperations) != m_operation)
134 	{
135 		TCU_THROW(NotSupportedError, "Device does not support required operations");
136 	}
137 
138 	// For the compute shader tests, there are variants for implementations
139 	// that support the subgroup size control extension and variants for those
140 	// that do not. It is expected that computeFullSubgroups must be set for
141 	// these tests if the extension is supported so tests are only supported
142 	// for the extension appropriate version.
143 	if (m_stage == vk::VK_SHADER_STAGE_COMPUTE_BIT)
144 	{
145 		if (m_use_subgroup_size_control)
146 		{
147 			if (subgroupSizeControlFeatures.computeFullSubgroups != VK_TRUE)
148 			{
149 				TCU_THROW(NotSupportedError, "Implementation does not support subgroup size control");
150 			}
151 		}
152 		else
153 		{
154 			if (subgroupSizeControlFeatures.computeFullSubgroups == VK_TRUE)
155 			{
156 				TCU_THROW(NotSupportedError, "These tests are not enabled for subgroup size control implementations");
157 			}
158 		}
159 	}
160 
161 	// The are large and small variants of the tests. The large variants
162 	// require 256 invocations in a workgroup.
163 	if (!m_small_workgroups)
164 	{
165 		vk::VkPhysicalDeviceProperties properties;
166 		ctx.getInstanceInterface().getPhysicalDeviceProperties(ctx.getPhysicalDevice(), &properties);
167 		if (properties.limits.maxComputeWorkGroupInvocations < 256)
168 		{
169 			TCU_THROW(NotSupportedError, "Device supported fewer than 256 invocations per workgroup");
170 		}
171 	}
172 }
173 
addTestsForAmberFiles(tcu::TestCaseGroup * tests,CaseGroup group)174 template<bool requirements> void addTestsForAmberFiles(tcu::TestCaseGroup* tests, CaseGroup group)
175 {
176 	tcu::TestContext&	testCtx = tests->getTestContext();
177 	const std::string	data_dir(group.data_dir);
178 	const std::string	subdir(group.subdir);
179 	const std::string	category = data_dir + "/" + subdir;
180 	std::vector<Case> cases(group.cases);
181 
182 	for (unsigned i = 0; i < cases.size(); ++i)
183 	{
184 		const std::string file = std::string(cases[i].basename) + ".amber";
185 		std::string readFilename("vulkan/amber/");
186 		readFilename.append(category);
187 		readFilename.append("/");
188 		readFilename.append(file);
189 		SubgroupUniformControlFlowTestCase*	testCase =
190 			new SubgroupUniformControlFlowTestCase(testCtx,
191 													cases[i].basename,
192 													readFilename,
193 													cases[i].small_workgroups,
194 													cases[i].use_subgroup_size_control,
195 													cases[i].stage,
196 													cases[i].operation);
197 		DE_ASSERT(testCase != DE_NULL);
198 		if (requirements)
199 		{
200 			testCase->addRequirement("SubgroupSizeControl.computeFullSubgroups");
201 			testCase->addRequirement("SubgroupSizeControl.subgroupSizeControl");
202 		}
203 		tests->addChild(testCase);
204 	}
205 }
206 
207 } // anonymous
208 
createSubgroupUniformControlFlowTests(tcu::TestContext & testCtx)209 tcu::TestCaseGroup* createSubgroupUniformControlFlowTests(tcu::TestContext&	testCtx)
210 {
211 	// There are four main groups of tests. Each group runs the same set of base
212 	// shaders with minor variations. The groups are with or without compute full
213 	// subgroups and a larger or smaller number of invocations. For each group of
214 	// tests, shaders test either odd or even subgroups reconverge after
215 	// diverging, without reconverging the whole workgroup. For the _partial
216 	// tests, the workgroup is launched without a full final subgroup (not enough
217 	// invocations).
218 	//
219 	// It is assumed that if an implementation does not support the compute full
220 	// subgroups feature, that it will always launch full subgroups. Therefore,
221 	// any given implementation only runs half of the tests. Implementations that
222 	// do not support compute full subgroups cannot support the tests that enable
223 	// it, while implementations that do support the feature will (likely) not
224 	// pass the tests that do not enable the feature.
225 
226 	de::MovePtr<tcu::TestCaseGroup>	uniformControlFlowTests(new	tcu::TestCaseGroup(testCtx,	"subgroup_uniform_control_flow"));
227 
228 	// Location of the Amber script files under data/vulkan/amber source tree.
229 	const char* data_dir = "subgroup_uniform_control_flow";
230 	const char*	large_dir = "large";
231 	const char*	small_dir = "small";
232 	const char*	large_control_dir = "large_control";
233 	const char* small_control_dir = "small_control";
234 
235 	std::vector<bool> controls = {false, true};
236 	for (unsigned c = 0; c < controls.size(); ++c)
237 	{
238 		// Full subgroups.
239 		bool small = false;
240 		bool control = controls[c];
241 		vk::VkShaderStageFlagBits stage = vk::VK_SHADER_STAGE_COMPUTE_BIT;
242 		const char*	subdir = (control ? large_control_dir : large_dir);
243 		CaseGroup group(data_dir, subdir);
244 		// if/else diverge
245 		group.add("subgroup_reconverge00", small, control, stage);
246 		// do while diverge
247 		group.add("subgroup_reconverge01", small, control, stage);
248 		// while true with break
249 		group.add("subgroup_reconverge02", small, control, stage);
250 		// if/else diverge, volatile
251 		group.add("subgroup_reconverge03", small, control, stage);
252 		// early return and if/else diverge
253 		group.add("subgroup_reconverge04", small, control, stage);
254 		// early return and if/else volatile
255 		group.add("subgroup_reconverge05", small, control, stage);
256 		// while true with volatile conditional break and early return
257 		group.add("subgroup_reconverge06", small, control, stage);
258 		// while true return and break
259 		group.add("subgroup_reconverge07", small, control, stage);
260 		// for loop atomics with conditional break
261 		group.add("subgroup_reconverge08", small, control, stage);
262 		// diverge in for loop
263 		group.add("subgroup_reconverge09", small, control, stage);
264 		// diverge in for loop and break
265 		group.add("subgroup_reconverge10", small, control, stage);
266 		// diverge in for loop and continue
267 		group.add("subgroup_reconverge11", small, control, stage);
268 		// early return, divergent switch
269 		group.add("subgroup_reconverge12", small, control, stage);
270 		// early return, divergent switch more cases
271 		group.add("subgroup_reconverge13", small, control, stage);
272 		// divergent switch, some subgroups terminate
273 		group.add("subgroup_reconverge14", small, control, stage);
274 		// switch in switch
275 		group.add("subgroup_reconverge15", small, control, stage);
276 		// for loop unequal iterations
277 		group.add("subgroup_reconverge16", small, control, stage);
278 		// if/else with nested returns
279 		group.add("subgroup_reconverge17", small, control, stage);
280 		// if/else subgroup all equal
281 		group.add("subgroup_reconverge18", small, control, stage, vk::VK_SUBGROUP_FEATURE_VOTE_BIT);
282 		// if/else subgroup any nested return
283 		group.add("subgroup_reconverge19", small, control, stage, vk::VK_SUBGROUP_FEATURE_VOTE_BIT);
284 		// deeply nested
285 		group.add("subgroup_reconverge20", small, control, stage);
286 		const char*	group_name = (control ? "large_full_control" : "large_full");
287 		// Large Full subgroups
288 		uniformControlFlowTests->addChild(createTestGroup(testCtx, group_name, control?addTestsForAmberFiles<true>:addTestsForAmberFiles<false>, group));
289 
290 		// Partial subgroup.
291 		group = CaseGroup(data_dir, subdir);
292 		// if/else diverge
293 		group.add("subgroup_reconverge_partial00", small, control, stage);
294 		// do while diverge
295 		group.add("subgroup_reconverge_partial01", small, control, stage);
296 		// while true with break
297 		group.add("subgroup_reconverge_partial02", small, control, stage);
298 		// if/else diverge, volatile
299 		group.add("subgroup_reconverge_partial03", small, control, stage);
300 		// early return and if/else diverge
301 		group.add("subgroup_reconverge_partial04", small, control, stage);
302 		// early return and if/else volatile
303 		group.add("subgroup_reconverge_partial05", small, control, stage);
304 		// while true with volatile conditional break and early return
305 		group.add("subgroup_reconverge_partial06", small, control, stage);
306 		// while true return and break
307 		group.add("subgroup_reconverge_partial07", small, control, stage);
308 		// for loop atomics with conditional break
309 		group.add("subgroup_reconverge_partial08", small, control, stage);
310 		// diverge in for loop
311 		group.add("subgroup_reconverge_partial09", small, control, stage);
312 		// diverge in for loop and break
313 		group.add("subgroup_reconverge_partial10", small, control, stage);
314 		// diverge in for loop and continue
315 		group.add("subgroup_reconverge_partial11", small, control, stage);
316 		// early return, divergent switch
317 		group.add("subgroup_reconverge_partial12", small, control, stage);
318 		// early return, divergent switch more cases
319 		group.add("subgroup_reconverge_partial13", small, control, stage);
320 		// divergent switch, some subgroups terminate
321 		group.add("subgroup_reconverge_partial14", small, control, stage);
322 		// switch in switch
323 		group.add("subgroup_reconverge_partial15", small, control, stage);
324 		// for loop unequal iterations
325 		group.add("subgroup_reconverge_partial16", small, control, stage);
326 		// if/else with nested returns
327 		group.add("subgroup_reconverge_partial17", small, control, stage);
328 		// if/else subgroup all equal
329 		group.add("subgroup_reconverge_partial18", small, control, stage, vk::VK_SUBGROUP_FEATURE_VOTE_BIT);
330 		// if/else subgroup any nested return
331 		group.add("subgroup_reconverge_partial19", small, control, stage, vk::VK_SUBGROUP_FEATURE_VOTE_BIT);
332 		// deeply nested
333 		group.add("subgroup_reconverge_partial20", small, control, stage);
334 		group_name = (control ? "large_partial_control" : "large_partial");
335 		// Large Partial subgroups
336 		uniformControlFlowTests->addChild(createTestGroup(testCtx, group_name, control?addTestsForAmberFiles<true>:addTestsForAmberFiles<false>, group));
337 	}
338 
339 	for (unsigned c = 0; c < controls.size(); ++c)
340 	{
341 		// Full subgroups.
342 		bool small = true;
343 		bool control = controls[c];
344 		vk::VkShaderStageFlagBits stage = vk::VK_SHADER_STAGE_COMPUTE_BIT;
345 		const char*	subdir = (control ? small_control_dir : small_dir);
346 		CaseGroup group(data_dir, subdir);
347 		// if/else diverge
348 		group.add("small_subgroup_reconverge00", small, control, stage);
349 		// do while diverge
350 		group.add("small_subgroup_reconverge01", small, control, stage);
351 		// while true with break
352 		group.add("small_subgroup_reconverge02", small, control, stage);
353 		// if/else diverge, volatile
354 		group.add("small_subgroup_reconverge03", small, control, stage);
355 		// early return and if/else diverge
356 		group.add("small_subgroup_reconverge04", small, control, stage);
357 		// early return and if/else volatile
358 		group.add("small_subgroup_reconverge05", small, control, stage);
359 		// while true with volatile conditional break and early return
360 		group.add("small_subgroup_reconverge06", small, control, stage);
361 		// while true return and break
362 		group.add("small_subgroup_reconverge07", small, control, stage);
363 		// for loop atomics with conditional break
364 		group.add("small_subgroup_reconverge08", small, control, stage);
365 		// diverge in for loop
366 		group.add("small_subgroup_reconverge09", small, control, stage);
367 		// diverge in for loop and break
368 		group.add("small_subgroup_reconverge10", small, control, stage);
369 		// diverge in for loop and continue
370 		group.add("small_subgroup_reconverge11", small, control, stage);
371 		// early return, divergent switch
372 		group.add("small_subgroup_reconverge12", small, control, stage);
373 		// early return, divergent switch more cases
374 		group.add("small_subgroup_reconverge13", small, control, stage);
375 		// divergent switch, some subgroups terminate
376 		group.add("small_subgroup_reconverge14", small, control, stage);
377 		// switch in switch
378 		group.add("small_subgroup_reconverge15", small, control, stage);
379 		// for loop unequal iterations
380 		group.add("small_subgroup_reconverge16", small, control, stage);
381 		// if/else with nested returns
382 		group.add("small_subgroup_reconverge17", small, control, stage);
383 		// if/else subgroup all equal
384 		group.add("small_subgroup_reconverge18", small, control, stage, vk::VK_SUBGROUP_FEATURE_VOTE_BIT);
385 		// if/else subgroup any nested return
386 		group.add("small_subgroup_reconverge19", small, control, stage, vk::VK_SUBGROUP_FEATURE_VOTE_BIT);
387 		// deeply nested
388 		group.add("small_subgroup_reconverge20", small, control, stage);
389 		const char*	group_name = (control ? "small_full_control" : "small_full");
390 		// Small Full subgroups
391 		uniformControlFlowTests->addChild(createTestGroup(testCtx, group_name, control?addTestsForAmberFiles<true>:addTestsForAmberFiles<false>, group));
392 
393 		// Partial subgroup.
394 		group = CaseGroup(data_dir, subdir);
395 		// if/else diverge
396 		group.add("small_subgroup_reconverge_partial00", small, control, stage);
397 		// do while diverge
398 		group.add("small_subgroup_reconverge_partial01", small, control, stage);
399 		// while true with break
400 		group.add("small_subgroup_reconverge_partial02", small, control, stage);
401 		// if/else diverge, volatile
402 		group.add("small_subgroup_reconverge_partial03", small, control, stage);
403 		// early return and if/else diverge
404 		group.add("small_subgroup_reconverge_partial04", small, control, stage);
405 		// early return and if/else volatile
406 		group.add("small_subgroup_reconverge_partial05", small, control, stage);
407 		// while true with volatile conditional break and early return
408 		group.add("small_subgroup_reconverge_partial06", small, control, stage);
409 		// while true return and break
410 		group.add("small_subgroup_reconverge_partial07", small, control, stage);
411 		// for loop atomics with conditional break
412 		group.add("small_subgroup_reconverge_partial08", small, control, stage);
413 		// diverge in for loop
414 		group.add("small_subgroup_reconverge_partial09", small, control, stage);
415 		// diverge in for loop and break
416 		group.add("small_subgroup_reconverge_partial10", small, control, stage);
417 		// diverge in for loop and continue
418 		group.add("small_subgroup_reconverge_partial11", small, control, stage);
419 		// early return, divergent switch
420 		group.add("small_subgroup_reconverge_partial12", small, control, stage);
421 		// early return, divergent switch more cases
422 		group.add("small_subgroup_reconverge_partial13", small, control, stage);
423 		// divergent switch, some subgroups terminate
424 		group.add("small_subgroup_reconverge_partial14", small, control, stage);
425 		// switch in switch
426 		group.add("small_subgroup_reconverge_partial15", small, control, stage);
427 		// for loop unequal iterations
428 		group.add("small_subgroup_reconverge_partial16", small, control, stage);
429 		// if/else with nested returns
430 		group.add("small_subgroup_reconverge_partial17", small, control, stage);
431 		// if/else subgroup all equal
432 		group.add("small_subgroup_reconverge_partial18", small, control, stage, vk::VK_SUBGROUP_FEATURE_VOTE_BIT);
433 		// if/else subgroup any nested return
434 		group.add("small_subgroup_reconverge_partial19", small, control, stage, vk::VK_SUBGROUP_FEATURE_VOTE_BIT);
435 		// deeply nested
436 		group.add("small_subgroup_reconverge_partial20", small, control, stage);
437 		group_name = (control ? "small_partial_control" : "small_partial");
438 		// Small Partial subgroups
439 		uniformControlFlowTests->addChild(createTestGroup(testCtx, group_name, control?addTestsForAmberFiles<true>:addTestsForAmberFiles<false>, group));
440 	}
441 
442 	// Discard test
443 	CaseGroup group(data_dir, "discard");
444 	// discard test
445 	group.add("subgroup_reconverge_discard00", true, false, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
446 	// Discard tests
447 	uniformControlFlowTests->addChild(createTestGroup(testCtx, "discard", addTestsForAmberFiles<false>, group));
448 
449 	return uniformControlFlowTests.release();
450 }
451 
452 } // subgroups
453 } // vkt
454