• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*------------------------------------------------------------------------
3  * Vulkan Conformance Tests
4  * ------------------------
5  *
6  * Copyright (c) 2019 The Khronos Group Inc.
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Synchronization timeline semaphore tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktSynchronizationBasicSemaphoreTests.hpp"
26 #include "vktSynchronizationOperation.hpp"
27 #include "vktSynchronizationOperationTestData.hpp"
28 #include "vktSynchronizationOperationResources.hpp"
29 #include "vktTestCaseUtil.hpp"
30 #include "vktSynchronizationUtil.hpp"
31 #include "vktExternalMemoryUtil.hpp"
32 #include "vktCustomInstancesDevices.hpp"
33 #include "vkBarrierUtil.hpp"
34 
35 #include "vkDefs.hpp"
36 #include "vkPlatform.hpp"
37 #include "vkQueryUtil.hpp"
38 #include "vkDeviceUtil.hpp"
39 #include "vkCmdUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkRef.hpp"
42 #include "vkTypeUtil.hpp"
43 #include "vkBufferWithMemory.hpp"
44 #include "vkSafetyCriticalUtil.hpp"
45 
46 #include "tcuTestLog.hpp"
47 #include "tcuCommandLine.hpp"
48 
49 #include "deClock.h"
50 #include "deRandom.hpp"
51 #include "deThread.hpp"
52 #include "deUniquePtr.hpp"
53 
54 #include <limits>
55 #include <set>
56 #include <iterator>
57 #include <algorithm>
58 #include <sstream>
59 
60 namespace vkt
61 {
62 namespace synchronization
63 {
64 namespace
65 {
66 
67 using namespace vk;
68 using tcu::TestLog;
69 using de::MovePtr;
70 using de::SharedPtr;
71 
72 template<typename T>
makeVkSharedPtr(Move<T> move)73 inline SharedPtr<Move<T> > makeVkSharedPtr (Move<T> move)
74 {
75 	return SharedPtr<Move<T> >(new Move<T>(move));
76 }
77 
78 template<typename T>
makeSharedPtr(de::MovePtr<T> move)79 inline SharedPtr<T> makeSharedPtr (de::MovePtr<T> move)
80 {
81 	return SharedPtr<T>(move.release());
82 }
83 
84 template<typename T>
makeSharedPtr(T * ptr)85 inline SharedPtr<T> makeSharedPtr (T* ptr)
86 {
87 	return SharedPtr<T>(ptr);
88 }
89 
getMaxTimelineSemaphoreValueDifference(const InstanceInterface & vk,const VkPhysicalDevice physicalDevice)90 deUint64 getMaxTimelineSemaphoreValueDifference(const InstanceInterface& vk,
91 												const VkPhysicalDevice physicalDevice)
92 {
93 	VkPhysicalDeviceTimelineSemaphoreProperties		timelineSemaphoreProperties;
94 	VkPhysicalDeviceProperties2						properties;
95 
96 	deMemset(&timelineSemaphoreProperties, 0, sizeof(timelineSemaphoreProperties));
97 	timelineSemaphoreProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES;
98 
99 	deMemset(&properties, 0, sizeof(properties));
100 	properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
101 	properties.pNext = &timelineSemaphoreProperties;
102 
103 	vk.getPhysicalDeviceProperties2(physicalDevice, &properties);
104 
105 	return timelineSemaphoreProperties.maxTimelineSemaphoreValueDifference;
106 }
107 
deviceSignal(const DeviceInterface & vk,const VkDevice device,const VkQueue queue,const VkFence fence,const SynchronizationType type,const VkSemaphore semaphore,const deUint64 timelineValue)108 void deviceSignal (const DeviceInterface&		vk,
109 				   const VkDevice				device,
110 				   const VkQueue				queue,
111 				   const VkFence				fence,
112 				   const SynchronizationType	type,
113 				   const VkSemaphore			semaphore,
114 				   const deUint64				timelineValue)
115 {
116 	{
117 		VkSemaphoreSubmitInfoKHR	signalSemaphoreSubmitInfo	= makeCommonSemaphoreSubmitInfo(semaphore, timelineValue, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
118 		SynchronizationWrapperPtr	synchronizationWrapper		= getSynchronizationWrapper(type, vk, DE_TRUE);
119 		synchronizationWrapper->addSubmitInfo(
120 			0u,										// deUint32								waitSemaphoreInfoCount
121 			DE_NULL,								// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
122 			0u,										// deUint32								commandBufferInfoCount
123 			DE_NULL,								// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
124 			1u,										// deUint32								signalSemaphoreInfoCount
125 			&signalSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
126 			DE_FALSE,
127 			DE_TRUE
128 		);
129 		VK_CHECK(synchronizationWrapper->queueSubmit(queue, DE_NULL));
130 	}
131 
132 	if (fence != DE_NULL)
133 	{
134 		SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(type, vk, 1u);
135 		synchronizationWrapper->addSubmitInfo(
136 			0u,										// deUint32								waitSemaphoreInfoCount
137 			DE_NULL,								// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
138 			0u,										// deUint32								commandBufferInfoCount
139 			DE_NULL,								// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
140 			0u,										// deUint32								signalSemaphoreInfoCount
141 			DE_NULL									// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
142 		);
143 		VK_CHECK(synchronizationWrapper->queueSubmit(queue, fence));
144 		VK_CHECK(vk.waitForFences(device, 1u, &fence, VK_TRUE, ~(0ull)));
145 	}
146 }
147 
hostSignal(const DeviceInterface & vk,const VkDevice & device,VkSemaphore semaphore,const deUint64 timelineValue)148 void hostSignal (const DeviceInterface& vk, const VkDevice& device, VkSemaphore semaphore, const deUint64 timelineValue)
149 {
150 	VkSemaphoreSignalInfo	ssi	=
151 	{
152 		VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,	// VkStructureType				sType;
153 		DE_NULL,									// const void*					pNext;
154 		semaphore,									// VkSemaphore					semaphore;
155 		timelineValue,								// deUint64						value;
156 	};
157 
158 	VK_CHECK(vk.signalSemaphore(device, &ssi));
159 }
160 
161 class WaitTestInstance : public TestInstance
162 {
163 public:
WaitTestInstance(Context & context,SynchronizationType type,bool waitAll,bool signalFromDevice)164 	WaitTestInstance (Context& context, SynchronizationType type, bool waitAll, bool signalFromDevice)
165 		: TestInstance			(context)
166 		, m_type				(type)
167 		, m_waitAll				(waitAll)
168 		, m_signalFromDevice	(signalFromDevice)
169 	{
170 	}
171 
iterate(void)172 	tcu::TestStatus iterate (void)
173 	{
174 		const DeviceInterface&								vk				= m_context.getDeviceInterface();
175 		const VkDevice&										device			= m_context.getDevice();
176 		const VkQueue										queue			= m_context.getUniversalQueue();
177 		Unique<VkFence>										fence			(createFence(vk, device));
178 		std::vector<SharedPtr<Move<VkSemaphore > > >		semaphorePtrs	(createTimelineSemaphores(vk, device, 100));
179 		de::Random											rng				(1234);
180 		std::vector<VkSemaphore>							semaphores;
181 		std::vector<deUint64>								timelineValues;
182 
183 		for (deUint32 i = 0; i < semaphorePtrs.size(); i++)
184 		{
185 			semaphores.push_back((*semaphorePtrs[i]).get());
186 			timelineValues.push_back(rng.getInt(1, 10000));
187 		}
188 
189 		if (m_waitAll)
190 		{
191 
192 			for (deUint32 semIdx = 0; semIdx < semaphores.size(); semIdx++)
193 			{
194 				if (m_signalFromDevice)
195 				{
196 					deviceSignal(vk, device, queue, *fence, m_type, semaphores[semIdx], timelineValues[semIdx]);
197 					VK_CHECK(vk.resetFences(device, 1, &fence.get()));
198 				}
199 				else
200 					hostSignal(vk, device, semaphores[semIdx], timelineValues[semIdx]);
201 			}
202 		}
203 		else
204 		{
205 			deUint32	randomIdx	= rng.getInt(0, (deUint32)(semaphores.size() - 1));
206 
207 			if (m_signalFromDevice)
208 				deviceSignal(vk, device, queue, *fence, m_type, semaphores[randomIdx], timelineValues[randomIdx]);
209 			else
210 				hostSignal(vk, device, semaphores[randomIdx], timelineValues[randomIdx]);
211 		}
212 
213 		{
214 			const VkSemaphoreWaitInfo		waitInfo	=
215 			{
216 				VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,									// VkStructureType			sType;
217 				DE_NULL,																// const void*				pNext;
218 				m_waitAll ? 0u : (VkSemaphoreWaitFlags) VK_SEMAPHORE_WAIT_ANY_BIT,	// VkSemaphoreWaitFlagsKHR	flags;
219 				(deUint32) semaphores.size(),											// deUint32					semaphoreCount;
220 				&semaphores[0],															// const VkSemaphore*		pSemaphores;
221 				&timelineValues[0],														// const deUint64*			pValues;
222 			};
223 
224 			VkResult result = vk.waitSemaphores(device, &waitInfo, 0ull);
225 			if (result != VK_SUCCESS)
226 				return tcu::TestStatus::fail("Wait failed");
227 		}
228 
229 		VK_CHECK(vk.deviceWaitIdle(device));
230 
231 		return tcu::TestStatus::pass("Wait success");
232 	}
233 
234 private:
235 
createTimelineSemaphores(const DeviceInterface & vk,const VkDevice & device,deUint32 count)236 	std::vector<SharedPtr<Move<VkSemaphore > > > createTimelineSemaphores(const DeviceInterface& vk, const VkDevice& device, deUint32 count)
237 	{
238 		std::vector<SharedPtr<Move<VkSemaphore > > > semaphores;
239 
240 		for (deUint32 i = 0; i < count; i++)
241 			semaphores.push_back(makeVkSharedPtr(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE)));
242 
243 		return semaphores;
244 	}
245 
246 	const SynchronizationType	m_type;
247 	bool						m_waitAll;
248 	bool						m_signalFromDevice;
249 };
250 
251 class WaitTestCase : public TestCase
252 {
253 public:
WaitTestCase(tcu::TestContext & testCtx,const std::string & name,SynchronizationType type,bool waitAll,bool signalFromDevice)254 	WaitTestCase (tcu::TestContext& testCtx, const std::string& name, SynchronizationType type, bool waitAll, bool signalFromDevice)
255 		: TestCase				(testCtx, name.c_str())
256 		, m_type				(type)
257 		, m_waitAll				(waitAll)
258 		, m_signalFromDevice	(signalFromDevice)
259 	{
260 	}
261 
checkSupport(Context & context) const262 	void checkSupport(Context& context) const override
263 	{
264 		context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
265 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
266 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
267 	}
268 
createInstance(Context & context) const269 	TestInstance* createInstance (Context& context) const override
270 	{
271 		return new WaitTestInstance(context, m_type, m_waitAll, m_signalFromDevice);
272 	}
273 
274 private:
275 	const SynchronizationType	m_type;
276 	bool						m_waitAll;
277 	bool						m_signalFromDevice;
278 };
279 
280 // This test verifies that waiting from the host on a timeline point
281 // that is itself waiting for signaling works properly.
282 class HostWaitBeforeSignalTestInstance : public TestInstance
283 {
284 public:
HostWaitBeforeSignalTestInstance(Context & context,SynchronizationType type)285 	HostWaitBeforeSignalTestInstance (Context& context, SynchronizationType type)
286 		: TestInstance			(context)
287 		, m_type				(type)
288 	{
289 	}
290 
iterate(void)291 	tcu::TestStatus iterate (void)
292 	{
293 		const DeviceInterface&	vk					= m_context.getDeviceInterface();
294 		const VkDevice&			device				= m_context.getDevice();
295 		const VkQueue			queue				= m_context.getUniversalQueue();
296 		Unique<VkSemaphore>		semaphore			(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
297 		de::Random				rng					(1234);
298 		std::vector<deUint64>	timelineValues;
299 
300 		// Host value we signal at the end.
301 		timelineValues.push_back(1 + rng.getInt(1, 10000));
302 
303 		for (deUint32 i = 0; i < 12; i++)
304 		{
305 			const deUint64				newTimelineValue			= (timelineValues.back() + rng.getInt(1, 10000));
306 			VkSemaphoreSubmitInfoKHR	waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(*semaphore, timelineValues.back(), VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
307 			VkSemaphoreSubmitInfoKHR	signalSemaphoreSubmitInfo	= makeCommonSemaphoreSubmitInfo(*semaphore, newTimelineValue, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
308 			SynchronizationWrapperPtr	synchronizationWrapper		= getSynchronizationWrapper(m_type, vk, DE_TRUE);
309 
310 			synchronizationWrapper->addSubmitInfo(
311 				1u,										// deUint32								waitSemaphoreInfoCount
312 				&waitSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
313 				0u,										// deUint32								commandBufferInfoCount
314 				DE_NULL,								// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
315 				1u,										// deUint32								signalSemaphoreInfoCount
316 				&signalSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
317 				DE_TRUE,
318 				DE_TRUE
319 			);
320 
321 			VK_CHECK(synchronizationWrapper->queueSubmit(queue, DE_NULL));
322 
323 			timelineValues.push_back(newTimelineValue);
324 		}
325 
326 		{
327 			const VkSemaphoreWaitInfo waitInfo =
328 			{
329 				VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,											// VkStructureType			sType;
330 				DE_NULL,																		// const void*				pNext;
331 				0u,																				// VkSemaphoreWaitFlagsKHR	flags;
332 				(deUint32) 1u,																	// deUint32					semaphoreCount;
333 				&semaphore.get(),																// const VkSemaphore*		pSemaphores;
334 				&timelineValues[rng.getInt(0, static_cast<int>(timelineValues.size() - 1))],	// const deUint64*			pValues;
335 			};
336 
337 			VkResult result = vk.waitSemaphores(device, &waitInfo, 0ull);
338 			if (result != VK_TIMEOUT)
339 				return tcu::TestStatus::fail("Wait failed");
340 		}
341 
342 		hostSignal(vk, device, *semaphore, timelineValues.front());
343 
344 		{
345 			const VkSemaphoreWaitInfo waitInfo =
346 			{
347 				VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,		// VkStructureType			sType;
348 				DE_NULL,									// const void*				pNext;
349 				0u,											// VkSemaphoreWaitFlagsKHR	flags;
350 				(deUint32) 1u,								// deUint32					semaphoreCount;
351 				&semaphore.get(),							// const VkSemaphore*		pSemaphores;
352 				&timelineValues.back(),						// const deUint64*			pValues;
353 			};
354 
355 			VkResult result = vk.waitSemaphores(device, &waitInfo, ~(0ull));
356 			if (result != VK_SUCCESS)
357 				return tcu::TestStatus::fail("Wait failed");
358 		}
359 
360 		VK_CHECK(vk.deviceWaitIdle(device));
361 
362 		return tcu::TestStatus::pass("Wait success");
363 	}
364 
365 private:
366 
createTimelineSemaphores(const DeviceInterface & vk,const VkDevice & device,deUint32 count)367 	std::vector<SharedPtr<Move<VkSemaphore > > > createTimelineSemaphores(const DeviceInterface& vk, const VkDevice& device, deUint32 count)
368 	{
369 		std::vector<SharedPtr<Move<VkSemaphore > > > semaphores;
370 
371 		for (deUint32 i = 0; i < count; i++)
372 			semaphores.push_back(makeVkSharedPtr(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE)));
373 
374 		return semaphores;
375 	}
376 
377 protected:
378 
379 	const SynchronizationType m_type;
380 };
381 
382 class HostWaitBeforeSignalTestCase : public TestCase
383 {
384 public:
HostWaitBeforeSignalTestCase(tcu::TestContext & testCtx,const std::string & name,SynchronizationType type)385 	HostWaitBeforeSignalTestCase(tcu::TestContext&		testCtx,
386 								 const std::string&		name,
387 								 SynchronizationType	type)
388 		: TestCase(testCtx, name.c_str())
389 		, m_type(type)
390 	{
391 	}
392 
checkSupport(Context & context) const393 	void checkSupport(Context& context) const override
394 	{
395 		context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
396 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
397 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
398 	}
399 
createInstance(Context & context) const400 	TestInstance* createInstance(Context& context) const override
401 	{
402 		return new HostWaitBeforeSignalTestInstance(context, m_type);
403 	}
404 
405 protected:
406 	const SynchronizationType m_type;
407 };
408 
409 class PollTestInstance : public TestInstance
410 {
411 public:
PollTestInstance(Context & context,bool signalFromDevice)412 	PollTestInstance (Context& context, bool signalFromDevice)
413 		: TestInstance			(context)
414 		, m_signalFromDevice	(signalFromDevice)
415 	{
416 	}
417 
iterate(void)418 	tcu::TestStatus iterate (void)
419 	{
420 		const DeviceInterface&								vk				= m_context.getDeviceInterface();
421 		const VkDevice&										device			= m_context.getDevice();
422 		const VkQueue										queue			= m_context.getUniversalQueue();
423 		Unique<VkFence>										fence			(createFence(vk, device));
424 		std::vector<SharedPtr<Move<VkSemaphore > > >		semaphorePtrs	(createTimelineSemaphores(vk, device, 100));
425 		de::Random											rng				(1234);
426 		std::vector<VkSemaphore>							semaphores;
427 		std::vector<deUint64>								timelineValues;
428 		const deUint64										secondInMicroSeconds	= 1000ull * 1000ull * 1000ull;
429 		deUint64											startTime;
430 		VkResult											result = VK_SUCCESS;
431 
432 		for (deUint32 i = 0; i < semaphorePtrs.size(); i++)
433 		{
434 			semaphores.push_back((*semaphorePtrs[i]).get());
435 			timelineValues.push_back(rng.getInt(1, 10000));
436 		}
437 
438 		for (deUint32 semIdx = 0; semIdx < semaphores.size(); semIdx++)
439 		{
440 			if (m_signalFromDevice)
441 			{
442 				deviceSignal(vk, device, queue, semIdx == (semaphores.size() - 1) ? *fence : DE_NULL, SynchronizationType::LEGACY, semaphores[semIdx], timelineValues[semIdx]);
443 			}
444 			else
445 				hostSignal(vk, device, semaphores[semIdx], timelineValues[semIdx]);
446 		}
447 
448 		startTime = deGetMicroseconds();
449 
450 		do
451 		{
452 			deUint64	value;
453 
454 			result = vk.getSemaphoreCounterValue(device, semaphores.back(), &value);
455 
456 			if (result != VK_SUCCESS)
457 				break;
458 
459 			if (value == timelineValues.back())
460 			{
461 				if (m_signalFromDevice)
462 					VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), VK_TRUE, ~(0ull)));
463 				VK_CHECK(vk.deviceWaitIdle(device));
464 				return tcu::TestStatus::pass("Poll on timeline value succeeded");
465 			}
466 
467 			if (value > timelineValues.back())
468 			{
469 				result = VK_ERROR_UNKNOWN;
470 				break;
471 			}
472 		} while ((deGetMicroseconds() - startTime) < secondInMicroSeconds);
473 
474 		VK_CHECK(vk.deviceWaitIdle(device));
475 
476 		if (result != VK_SUCCESS)
477 			return tcu::TestStatus::fail("Fail");
478 		return tcu::TestStatus::fail("Timeout");
479 	}
480 
481 private:
482 
createTimelineSemaphores(const DeviceInterface & vk,const VkDevice & device,deUint32 count)483 	std::vector<SharedPtr<Move<VkSemaphore > > > createTimelineSemaphores(const DeviceInterface& vk, const VkDevice& device, deUint32 count)
484 	{
485 		std::vector<SharedPtr<Move<VkSemaphore > > > semaphores;
486 
487 		for (deUint32 i = 0; i < count; i++)
488 			semaphores.push_back(makeVkSharedPtr(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE)));
489 
490 		return semaphores;
491 	}
492 
493 	bool m_signalFromDevice;
494 };
495 
496 class PollTestCase : public TestCase
497 {
498 public:
PollTestCase(tcu::TestContext & testCtx,const std::string & name,bool signalFromDevice)499 	PollTestCase (tcu::TestContext& testCtx, const std::string& name, bool signalFromDevice)
500 		: TestCase				(testCtx, name.c_str())
501 		, m_signalFromDevice	(signalFromDevice)
502 	{
503 	}
504 
checkSupport(Context & context) const505 	virtual void checkSupport(Context& context) const
506 	{
507 		context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
508 	}
509 
createInstance(Context & context) const510 	TestInstance* createInstance (Context& context) const
511 	{
512 		return new PollTestInstance(context, m_signalFromDevice);
513 	}
514 
515 private:
516 	bool m_signalFromDevice;
517 };
518 
519 class MonotonicallyIncrementChecker : public de::Thread
520 {
521 public:
MonotonicallyIncrementChecker(const DeviceInterface & vkd,VkDevice device,VkSemaphore semaphore)522 	MonotonicallyIncrementChecker					(const DeviceInterface& vkd, VkDevice device, VkSemaphore semaphore)
523 		: de::Thread()
524 		, m_vkd(vkd)
525 		, m_device(device)
526 		, m_semaphore(semaphore)
527 		, m_running(true)
528 		, m_status(tcu::TestStatus::incomplete())
529 	{}
530 
~MonotonicallyIncrementChecker(void)531 	virtual			~MonotonicallyIncrementChecker	(void)	{}
532 
getStatus()533 	tcu::TestStatus	getStatus						() { return m_status; }
stop()534 	void			stop							() { m_running = false; }
run()535 	virtual void	run								()
536 	{
537 		deUint64 lastValue = 0;
538 
539 		while (m_running)
540 		{
541 			deUint64 value;
542 
543 			VK_CHECK(m_vkd.getSemaphoreCounterValue(m_device, m_semaphore, &value));
544 
545 			if (value < lastValue) {
546 				m_status = tcu::TestStatus::fail("Value not monotonically increasing");
547 				return;
548 			}
549 
550 			lastValue = value;
551 			deYield();
552 		}
553 
554 		m_status = tcu::TestStatus::pass("Value monotonically increasing");
555 	}
556 
557 private:
558 	const DeviceInterface&		m_vkd;
559 	VkDevice					m_device;
560 	VkSemaphore					m_semaphore;
561 	bool						m_running;
562 	tcu::TestStatus				m_status;
563 };
564 
checkSupport(Context & context,SynchronizationType type)565 void checkSupport (Context& context, SynchronizationType type)
566 {
567 	context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
568 	if (type == SynchronizationType::SYNCHRONIZATION2)
569 		context.requireDeviceFunctionality("VK_KHR_synchronization2");
570 }
571 
572 // Queue device signaling close to the edges of the
573 // maxTimelineSemaphoreValueDifference value and verify that the value
574 // of the semaphore never goes backwards.
maxDifferenceValueCase(Context & context,SynchronizationType type)575 tcu::TestStatus maxDifferenceValueCase (Context& context, SynchronizationType type)
576 {
577 	const DeviceInterface&							vk							= context.getDeviceInterface();
578 	const VkDevice&									device						= context.getDevice();
579 	const VkQueue									queue						= context.getUniversalQueue();
580 	const deUint64									requiredMinValueDifference	= deIntMaxValue32(32);
581 	const deUint64									maxTimelineValueDifference	= getMaxTimelineSemaphoreValueDifference(context.getInstanceInterface(), context.getPhysicalDevice());
582 	const Unique<VkSemaphore>						semaphore					(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
583 	const Unique<VkFence>							fence						(createFence(vk, device));
584 	tcu::TestLog&									log							= context.getTestContext().getLog();
585 	MonotonicallyIncrementChecker					checkerThread				(vk, device, *semaphore);
586 	deUint64										iterations;
587 	deUint64										timelineBackValue;
588 	deUint64										timelineFrontValue;
589 
590 	if (maxTimelineValueDifference < requiredMinValueDifference)
591 		return tcu::TestStatus::fail("Timeline semaphore max value difference test failed");
592 
593 	iterations = std::min<deUint64>(std::numeric_limits<deUint64>::max() / maxTimelineValueDifference, 100ull);
594 
595 	log << TestLog::Message
596 		<< " maxTimelineSemaphoreValueDifference=" << maxTimelineValueDifference
597 		<< " maxExpected=" << requiredMinValueDifference
598 		<< " iterations=" << iterations
599 		<< TestLog::EndMessage;
600 
601 	checkerThread.start();
602 
603 	timelineBackValue = timelineFrontValue = 1;
604 	hostSignal(vk, device, *semaphore, timelineFrontValue);
605 
606 	for (deUint64 i = 0; i < iterations; i++)
607 	{
608 		deUint64	fenceValue;
609 
610 		for (deUint32 j = 1; j <= 10; j++)
611 			deviceSignal(vk, device, queue, DE_NULL, type, *semaphore, ++timelineFrontValue);
612 
613 		timelineFrontValue = timelineBackValue + maxTimelineValueDifference - 10;
614 		fenceValue = timelineFrontValue;
615 		deviceSignal(vk, device, queue, *fence, type, *semaphore, fenceValue);
616 		for (deUint32 j = 1; j < 10; j++)
617 			deviceSignal(vk, device, queue, DE_NULL, type, *semaphore, ++timelineFrontValue);
618 
619 		deUint64 value;
620 		VK_CHECK(vk.getSemaphoreCounterValue(device, *semaphore, &value));
621 
622 		VK_CHECK(vk.waitForFences(device, 1, &fence.get(), VK_TRUE, ~(0ull)));
623 		VK_CHECK(vk.resetFences(device, 1, &fence.get()));
624 
625 		timelineBackValue = fenceValue;
626 	}
627 
628 	VK_CHECK(vk.deviceWaitIdle(device));
629 
630 	checkerThread.stop();
631 	checkerThread.join();
632 
633 	return checkerThread.getStatus();
634 }
635 
initialValueCase(Context & context,SynchronizationType type)636 tcu::TestStatus initialValueCase (Context& context, SynchronizationType type)
637 {
638 	DE_UNREF(type);
639 
640 	const DeviceInterface&							vk							= context.getDeviceInterface();
641 	const VkDevice&									device						= context.getDevice();
642 	const VkQueue									queue						= context.getUniversalQueue();
643 	const deUint64									maxTimelineValueDifference	= getMaxTimelineSemaphoreValueDifference(context.getInstanceInterface(), context.getPhysicalDevice());
644 	de::Random										rng							(1234);
645 	const deUint64									nonZeroValue				= 1 + rng.getUint64() % (maxTimelineValueDifference - 1);
646 	const Unique<VkSemaphore>						semaphoreDefaultValue		(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
647 	const Unique<VkSemaphore>						semaphoreInitialValue		(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE, 0, nonZeroValue));
648 	deUint64										initialValue;
649 	VkSemaphoreWaitInfo								waitInfo					=
650 	{
651 		VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,		// VkStructureType			sType;
652 		DE_NULL,									// const void*				pNext;
653 		0u,											// VkSemaphoreWaitFlagsKHR	flags;
654 		1u,											// deUint32					semaphoreCount;
655 		DE_NULL,									// const VkSemaphore*		pSemaphores;
656 		&initialValue,								// const deUint64*			pValues;
657 	};
658 	deUint64										value;
659 	VkResult										result;
660 
661 	waitInfo.pSemaphores = &semaphoreDefaultValue.get();
662 	initialValue = 0;
663 	result = vk.waitSemaphores(device, &waitInfo, 0ull);
664 	if (result != VK_SUCCESS)
665 		return tcu::TestStatus::fail("Wait zero initial value failed");
666 
667 	{
668 		VkSemaphoreSubmitInfoKHR	waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(*semaphoreDefaultValue, initialValue, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
669 		SynchronizationWrapperPtr	synchronizationWrapper		= getSynchronizationWrapper(type, vk, DE_TRUE);
670 
671 		synchronizationWrapper->addSubmitInfo(
672 			1u,										// deUint32								waitSemaphoreInfoCount
673 			&waitSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
674 			0u,										// deUint32								commandBufferInfoCount
675 			DE_NULL,								// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
676 			0u,										// deUint32								signalSemaphoreInfoCount
677 			DE_NULL,								// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
678 			DE_TRUE,
679 			DE_FALSE
680 		);
681 
682 		VK_CHECK(synchronizationWrapper->queueSubmit(queue, DE_NULL));
683 
684 		VK_CHECK(vk.deviceWaitIdle(device));
685 	}
686 
687 	VK_CHECK(vk.getSemaphoreCounterValue(device, *semaphoreDefaultValue, &value));
688 #ifdef CTS_USES_VULKANSC
689 	if (context.getTestContext().getCommandLine().isSubProcess())
690 #endif // CTS_USES_VULKANSC
691 	{
692 		if (value != initialValue)
693 			return tcu::TestStatus::fail("Invalid zero initial value");
694 	}
695 
696 	waitInfo.pSemaphores = &semaphoreInitialValue.get();
697 	initialValue = nonZeroValue;
698 	result = vk.waitSemaphores(device, &waitInfo, 0ull);
699 	if (result != VK_SUCCESS)
700 		return tcu::TestStatus::fail("Wait non zero initial value failed");
701 
702 	VK_CHECK(vk.getSemaphoreCounterValue(device, *semaphoreInitialValue, &value));
703 #ifdef CTS_USES_VULKANSC
704 	if (context.getTestContext().getCommandLine().isSubProcess())
705 #endif // CTS_USES_VULKANSC
706 	{
707 		if (value != nonZeroValue)
708 			return tcu::TestStatus::fail("Invalid non zero initial value");
709 	}
710 
711 	if (maxTimelineValueDifference != std::numeric_limits<deUint64>::max())
712 	{
713 		const deUint64				nonZeroMaxValue		= maxTimelineValueDifference + 1;
714 		const Unique<VkSemaphore>	semaphoreMaxValue	(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE, 0, nonZeroMaxValue));
715 
716 		waitInfo.pSemaphores = &semaphoreMaxValue.get();
717 		initialValue = nonZeroMaxValue;
718 		result = vk.waitSemaphores(device, &waitInfo, 0ull);
719 		if (result != VK_SUCCESS)
720 			return tcu::TestStatus::fail("Wait max value failed");
721 
722 		VK_CHECK(vk.getSemaphoreCounterValue(device, *semaphoreMaxValue, &value));
723 #ifdef CTS_USES_VULKANSC
724 		if (context.getTestContext().getCommandLine().isSubProcess())
725 #endif // CTS_USES_VULKANSC
726 		{
727 			if (value != nonZeroMaxValue)
728 				return tcu::TestStatus::fail("Invalid max value initial value");
729 		}
730 	}
731 
732 	return tcu::TestStatus::pass("Initial value correct");
733 }
734 
735 class WaitTests : public tcu::TestCaseGroup
736 {
737 public:
738 	// Various wait cases of timeline semaphores
WaitTests(tcu::TestContext & testCtx,SynchronizationType type)739 	WaitTests (tcu::TestContext& testCtx, SynchronizationType type)
740 		: tcu::TestCaseGroup(testCtx, "wait")
741 		, m_type(type)
742 	{
743 	}
744 
init(void)745 	void init (void)
746 	{
747 		static const struct
748 		{
749 			std::string	name;
750 			bool		waitAll;
751 			bool		signalFromDevice;
752 		}													waitCases[]	=
753 		{
754 			{ "all_signal_from_device",	true,	true },
755 			{ "one_signal_from_device",	false,	true },
756 			{ "all_signal_from_host",	true,	false },
757 			{ "one_signal_from_host",	false,	false },
758 		};
759 
760 		for (deUint32 caseIdx = 0; caseIdx < DE_LENGTH_OF_ARRAY(waitCases); caseIdx++)
761 			addChild(new WaitTestCase(m_testCtx, waitCases[caseIdx].name, m_type, waitCases[caseIdx].waitAll, waitCases[caseIdx].signalFromDevice));
762 		addChild(new HostWaitBeforeSignalTestCase(m_testCtx, "host_wait_before_signal", m_type));
763 		addChild(new PollTestCase(m_testCtx, "poll_signal_from_device", true));
764 		addChild(new PollTestCase(m_testCtx, "poll_signal_from_host", false));
765 	}
766 
767 protected:
768 	SynchronizationType m_type;
769 };
770 
771 struct TimelineIteration
772 {
TimelineIterationvkt::synchronization::__anon695804760111::TimelineIteration773 	TimelineIteration(OperationContext&						opContext,
774 					  const ResourceDescription&			resourceDesc,
775 					  const SharedPtr<OperationSupport>&	writeOpSupport,
776 					  const SharedPtr<OperationSupport>&	readOpSupport,
777 					  deUint64								lastValue,
778 					  de::Random&							rng)
779 		: resource(makeSharedPtr(new Resource(opContext, resourceDesc, writeOpSupport->getOutResourceUsageFlags() | readOpSupport->getInResourceUsageFlags())))
780 		, writeOp(makeSharedPtr(writeOpSupport->build(opContext, *resource)))
781 		, readOp(makeSharedPtr(readOpSupport->build(opContext, *resource)))
782 	{
783 		writeValue	= lastValue + rng.getInt(1, 100);
784 		readValue	= writeValue + rng.getInt(1, 100);
785 		cpuValue	= readValue + rng.getInt(1, 100);
786 	}
~TimelineIterationvkt::synchronization::__anon695804760111::TimelineIteration787 	~TimelineIteration() {}
788 
789 	SharedPtr<Resource>		resource;
790 
791 	SharedPtr<Operation>	writeOp;
792 	SharedPtr<Operation>	readOp;
793 
794 	deUint64				writeValue;
795 	deUint64				readValue;
796 	deUint64				cpuValue;
797 };
798 
799 class HostCopyThread : public de::Thread
800 {
801 public:
HostCopyThread(const DeviceInterface & vkd,VkDevice device,VkSemaphore semaphore,const std::vector<SharedPtr<TimelineIteration>> & iterations)802 	HostCopyThread	(const DeviceInterface& vkd, VkDevice device, VkSemaphore semaphore, const std::vector<SharedPtr<TimelineIteration> >& iterations)
803 		: de::Thread()
804 		, m_vkd(vkd)
805 		, m_device(device)
806 		, m_semaphore(semaphore)
807 		, m_iterations(iterations) {}
~HostCopyThread(void)808 	virtual			~HostCopyThread	(void)			{}
809 
run()810 	virtual void	run								()
811 	{
812 		for (deUint32 iterIdx = 0; iterIdx < m_iterations.size(); iterIdx++)
813 		{
814 			// Wait on the GPU read operation.
815 			{
816 				const VkSemaphoreWaitInfo	waitInfo	=
817 				{
818 					VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,	// VkStructureType			sType;
819 					DE_NULL,									// const void*				pNext;
820 					0u,											// VkSemaphoreWaitFlagsKHR	flags;
821 					1u,											// deUint32					semaphoreCount
822 					&m_semaphore,								// VkSemaphore*				pSemaphores;
823 					&m_iterations[iterIdx]->readValue,			// deUint64*				pValues;
824 				};
825 				VkResult						result;
826 
827 				result = m_vkd.waitSemaphores(m_device, &waitInfo, ~(deUint64)0u);
828 				if (result != VK_SUCCESS)
829 					return;
830 			}
831 
832 			// Copy the data read on the GPU into the next GPU write operation.
833 			if (iterIdx < (m_iterations.size() - 1))
834 				m_iterations[iterIdx + 1]->writeOp->setData(m_iterations[iterIdx]->readOp->getData());
835 
836 			// Signal the next GPU write operation.
837 			{
838 				const VkSemaphoreSignalInfo	signalInfo	=
839 				{
840 					VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,	// VkStructureType			sType;
841 					DE_NULL,										// const void*				pNext;
842 					m_semaphore,									// VkSemaphore				semaphore;
843 					m_iterations[iterIdx]->cpuValue,				// deUint64					value;
844 				};
845 				VkResult						result;
846 
847 				result = m_vkd.signalSemaphore(m_device, &signalInfo);
848 				if (result != VK_SUCCESS)
849 					return;
850 			}
851 		}
852 	}
853 
854 private:
855 	const DeviceInterface&								m_vkd;
856 	VkDevice											m_device;
857 	VkSemaphore											m_semaphore;
858 	const std::vector<SharedPtr<TimelineIteration> >&	m_iterations;
859 };
860 
randomizeData(std::vector<deUint8> & outData,const ResourceDescription & desc)861 void randomizeData(std::vector<deUint8>& outData, const ResourceDescription& desc)
862 {
863 	de::Random	rng	(1234);
864 
865 	if (desc.type == RESOURCE_TYPE_BUFFER) {
866 		for (deUint32 i = 0; i < outData.size(); i++)
867 			outData[i] = rng.getUint8();
868 	} else {
869 		const PlanarFormatDescription	planeDesc	= getPlanarFormatDescription(desc.imageFormat);
870 		tcu::PixelBufferAccess			access		(mapVkFormat(desc.imageFormat),
871 													 desc.size.x(), desc.size.y(), desc.size.z(),
872 													 static_cast<void *>(&outData[0]));
873 
874 		DE_ASSERT(desc.type == RESOURCE_TYPE_IMAGE);
875 
876 		for (int z = 0; z < access.getDepth(); z++) {
877 			for (int y = 0; y < access.getHeight(); y++) {
878 				for (int x = 0; x < access.getWidth(); x++) {
879 					if (isFloatFormat(desc.imageFormat)) {
880 						tcu::Vec4	value(rng.getFloat(), rng.getFloat(), rng.getFloat(), 1.0f);
881 						access.setPixel(value, x, y, z);
882 					} else {
883 						tcu::IVec4	value(rng.getInt(0, deIntMaxValue32(planeDesc.channels[0].sizeBits)),
884 										  rng.getInt(0, deIntMaxValue32(planeDesc.channels[1].sizeBits)),
885 										  rng.getInt(0, deIntMaxValue32(planeDesc.channels[2].sizeBits)),
886 										  rng.getInt(0, deIntMaxValue32(planeDesc.channels[3].sizeBits)));
887 						access.setPixel(value, x, y, z);
888 					}
889 				}
890 			}
891 		}
892 	}
893 }
894 
895 // Create a chain of operations with data copied over on the device
896 // and the host with each operation depending on the previous one and
897 // verifies that the data at the beginning & end of the chain is the
898 // same.
899 class DeviceHostTestInstance : public TestInstance
900 {
901 public:
DeviceHostTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const SharedPtr<OperationSupport> & writeOp,const SharedPtr<OperationSupport> & readOp,PipelineCacheData & pipelineCacheData)902 	DeviceHostTestInstance (Context&							context,
903 							SynchronizationType					type,
904 							const ResourceDescription&			resourceDesc,
905 							const SharedPtr<OperationSupport>&	writeOp,
906 							const SharedPtr<OperationSupport>&	readOp,
907 							PipelineCacheData&					pipelineCacheData)
908 		: TestInstance		(context)
909 		, m_type			(type)
910 		, m_opContext		(context, type, pipelineCacheData)
911 		, m_resourceDesc	(resourceDesc)
912 	{
913 		de::Random	rng		(1234);
914 
915 		// Create a dozen couple of operations and their associated
916 		// resource.
917 		for (deUint32 i = 0; i < 12; i++)
918 		{
919 			m_iterations.push_back(makeSharedPtr(new TimelineIteration(m_opContext, resourceDesc, writeOp, readOp,
920 																	   i == 0 ? 0 : m_iterations.back()->cpuValue, rng)));
921 		}
922 	}
923 
iterate(void)924 	tcu::TestStatus	iterate (void)
925 	{
926 		const DeviceInterface&								vk						= m_context.getDeviceInterface();
927 		const VkDevice										device					= m_context.getDevice();
928 		const VkQueue										queue					= m_context.getUniversalQueue();
929 		const deUint32										queueFamilyIndex		= m_context.getUniversalQueueFamilyIndex();
930 		const Unique<VkSemaphore>							semaphore				(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
931 		const Unique<VkCommandPool>							cmdPool					(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
932 		HostCopyThread										hostCopyThread			(vk, device, *semaphore, m_iterations);
933 		std::vector<SharedPtr<Move<VkCommandBuffer> > >		ptrCmdBuffers;
934 		std::vector<VkCommandBufferSubmitInfoKHR>			commandBufferSubmitInfos(m_iterations.size() * 2, makeCommonCommandBufferSubmitInfo(0));
935 
936 		hostCopyThread.start();
937 
938 		for (deUint32 opNdx = 0; opNdx < (m_iterations.size() * 2); opNdx++)
939 		{
940 			ptrCmdBuffers.push_back(makeVkSharedPtr(makeCommandBuffer(vk, device, *cmdPool)));
941 			commandBufferSubmitInfos[opNdx].commandBuffer = **(ptrCmdBuffers.back());
942 		}
943 
944 		// Randomize the data copied over.
945 		{
946 			const Data				startData		= m_iterations.front()->writeOp->getData();
947 			Data					randomizedData;
948 			std::vector<deUint8>	dataArray;
949 
950 			dataArray.resize(startData.size);
951 			randomizeData(dataArray, m_resourceDesc);
952 			randomizedData.size = dataArray.size();
953 			randomizedData.data = &dataArray[0];
954 			m_iterations.front()->writeOp->setData(randomizedData);
955 		}
956 
957 		SynchronizationWrapperPtr				synchronizationWrapper		= getSynchronizationWrapper(m_type, vk, DE_TRUE, (deUint32)m_iterations.size() * 2u);
958 		std::vector<VkSemaphoreSubmitInfoKHR>	waitSemaphoreSubmitInfos	(m_iterations.size() * 2, makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR));
959 		std::vector<VkSemaphoreSubmitInfoKHR>	signalSemaphoreSubmitInfos	(m_iterations.size() * 2, makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR));
960 
961 		for (deUint32 iterIdx = 0; iterIdx < m_iterations.size(); iterIdx++)
962 		{
963 			// Write operation
964 			{
965 				deUint32 wIdx = 2 * iterIdx;
966 
967 				waitSemaphoreSubmitInfos[wIdx].value	= wIdx == 0 ? 0u : m_iterations[iterIdx - 1]->cpuValue;
968 				signalSemaphoreSubmitInfos[wIdx].value	= m_iterations[iterIdx]->writeValue;
969 
970 				synchronizationWrapper->addSubmitInfo(
971 					wIdx == 0 ? 0u : 1u,							// deUint32								waitSemaphoreInfoCount
972 					&waitSemaphoreSubmitInfos[wIdx],				// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
973 					1u,												// deUint32								commandBufferInfoCount
974 					&commandBufferSubmitInfos[wIdx],				// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
975 					1u,												// deUint32								signalSemaphoreInfoCount
976 					&signalSemaphoreSubmitInfos[wIdx],				// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
977 					wIdx == 0 ? DE_FALSE : DE_TRUE,
978 					DE_TRUE
979 				);
980 
981 				VkCommandBuffer cmdBuffer = commandBufferSubmitInfos[wIdx].commandBuffer;
982 				beginCommandBuffer(vk, cmdBuffer);
983 				m_iterations[iterIdx]->writeOp->recordCommands(cmdBuffer);
984 
985 				{
986 					const SyncInfo	writeSync	= m_iterations[iterIdx]->writeOp->getOutSyncInfo();
987 					const SyncInfo	readSync	= m_iterations[iterIdx]->readOp->getInSyncInfo();
988 					const Resource& resource	= *(m_iterations[iterIdx]->resource);
989 
990 					if (resource.getType() == RESOURCE_TYPE_IMAGE)
991 					{
992 						DE_ASSERT(writeSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
993 						DE_ASSERT(readSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
994 
995 						const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
996 							writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
997 							writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
998 							readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
999 							readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
1000 							writeSync.imageLayout,							// VkImageLayout					oldLayout
1001 							readSync.imageLayout,							// VkImageLayout					newLayout
1002 							resource.getImage().handle,						// VkImage							image
1003 							resource.getImage().subresourceRange			// VkImageSubresourceRange			subresourceRange
1004 						);
1005 						VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
1006 						synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
1007 					}
1008 					else
1009 					{
1010 						const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
1011 							writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
1012 							writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
1013 							readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
1014 							readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
1015 							resource.getBuffer().handle,					// VkBuffer							buffer
1016 							0,												// VkDeviceSize						offset
1017 							VK_WHOLE_SIZE									// VkDeviceSize						size
1018 						);
1019 						VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
1020 						synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
1021 					}
1022 				}
1023 
1024 				endCommandBuffer(vk, cmdBuffer);
1025 			}
1026 
1027 			// Read operation
1028 			{
1029 				deUint32 rIdx = 2 * iterIdx + 1;
1030 
1031 				waitSemaphoreSubmitInfos[rIdx].value = m_iterations[iterIdx]->writeValue;
1032 				signalSemaphoreSubmitInfos[rIdx].value = m_iterations[iterIdx]->readValue;
1033 
1034 				synchronizationWrapper->addSubmitInfo(
1035 					1u,												// deUint32								waitSemaphoreInfoCount
1036 					&waitSemaphoreSubmitInfos[rIdx],				// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
1037 					1u,												// deUint32								commandBufferInfoCount
1038 					&commandBufferSubmitInfos[rIdx],				// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
1039 					1u,												// deUint32								signalSemaphoreInfoCount
1040 					&signalSemaphoreSubmitInfos[rIdx],				// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
1041 					rIdx == 0 ? DE_FALSE : DE_TRUE,
1042 					DE_TRUE
1043 				);
1044 
1045 				VkCommandBuffer cmdBuffer = commandBufferSubmitInfos[rIdx].commandBuffer;
1046 				beginCommandBuffer(vk, cmdBuffer);
1047 				m_iterations[iterIdx]->readOp->recordCommands(cmdBuffer);
1048 				endCommandBuffer(vk, cmdBuffer);
1049 			}
1050 		}
1051 
1052 		VK_CHECK(synchronizationWrapper->queueSubmit(queue, DE_NULL));
1053 
1054 		VK_CHECK(vk.deviceWaitIdle(device));
1055 
1056 		hostCopyThread.join();
1057 
1058 		{
1059 			const Data	expected = m_iterations.front()->writeOp->getData();
1060 			const Data	actual	 = m_iterations.back()->readOp->getData();
1061 
1062 			if (0 != deMemCmp(expected.data, actual.data, expected.size))
1063 				return tcu::TestStatus::fail("Memory contents don't match");
1064 		}
1065 
1066 		return tcu::TestStatus::pass("OK");
1067 	}
1068 
1069 protected:
1070 	const SynchronizationType					m_type;
1071 	OperationContext							m_opContext;
1072 	const ResourceDescription					m_resourceDesc;
1073 	std::vector<SharedPtr<TimelineIteration> >	m_iterations;
1074 };
1075 
1076 class DeviceHostSyncTestCase : public TestCase
1077 {
1078 public:
DeviceHostSyncTestCase(tcu::TestContext & testCtx,const std::string & name,SynchronizationType type,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,PipelineCacheData & pipelineCacheData)1079 	DeviceHostSyncTestCase	(tcu::TestContext&			testCtx,
1080 							 const std::string&			name,
1081 							 SynchronizationType		type,
1082 							 const ResourceDescription	resourceDesc,
1083 							 const OperationName		writeOp,
1084 							 const OperationName		readOp,
1085 							 PipelineCacheData&			pipelineCacheData)
1086 		: TestCase				(testCtx, name)
1087 		, m_type				(type)
1088 		, m_resourceDesc		(resourceDesc)
1089 		, m_writeOp				(makeOperationSupport(writeOp, resourceDesc).release())
1090 		, m_readOp				(makeOperationSupport(readOp, resourceDesc).release())
1091 		, m_pipelineCacheData	(pipelineCacheData)
1092 	{
1093 	}
1094 
checkSupport(Context & context) const1095 	void checkSupport(Context& context) const override
1096 	{
1097 		context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
1098 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
1099 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
1100 	}
1101 
initPrograms(SourceCollections & programCollection) const1102 	void initPrograms (SourceCollections& programCollection) const override
1103 	{
1104 		m_writeOp->initPrograms(programCollection);
1105 		m_readOp->initPrograms(programCollection);
1106 	}
1107 
createInstance(Context & context) const1108 	TestInstance* createInstance (Context& context) const override
1109 	{
1110 		return new DeviceHostTestInstance(context, m_type, m_resourceDesc, m_writeOp, m_readOp, m_pipelineCacheData);
1111 	}
1112 
1113 private:
1114 	const SynchronizationType			m_type;
1115 	const ResourceDescription			m_resourceDesc;
1116 	const SharedPtr<OperationSupport>	m_writeOp;
1117 	const SharedPtr<OperationSupport>	m_readOp;
1118 	PipelineCacheData&					m_pipelineCacheData;
1119 };
1120 
1121 class DeviceHostTestsBase : public tcu::TestCaseGroup
1122 {
1123 public:
1124 	// Synchronization of serialized device/host operations
DeviceHostTestsBase(tcu::TestContext & testCtx,SynchronizationType type)1125 	DeviceHostTestsBase(tcu::TestContext& testCtx, SynchronizationType type)
1126 		: tcu::TestCaseGroup(testCtx, "device_host")
1127 		, m_type(type)
1128 	{
1129 	}
1130 
initCommonTests(void)1131 	void initCommonTests (void)
1132 	{
1133 		static const OperationName		writeOps[]	=
1134 		{
1135 			OPERATION_NAME_WRITE_COPY_BUFFER,
1136 			OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE,
1137 			OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER,
1138 			OPERATION_NAME_WRITE_COPY_IMAGE,
1139 			OPERATION_NAME_WRITE_BLIT_IMAGE,
1140 			OPERATION_NAME_WRITE_SSBO_VERTEX,
1141 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL,
1142 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION,
1143 			OPERATION_NAME_WRITE_SSBO_GEOMETRY,
1144 			OPERATION_NAME_WRITE_SSBO_FRAGMENT,
1145 			OPERATION_NAME_WRITE_SSBO_COMPUTE,
1146 			OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT,
1147 			OPERATION_NAME_WRITE_IMAGE_VERTEX,
1148 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL,
1149 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION,
1150 			OPERATION_NAME_WRITE_IMAGE_GEOMETRY,
1151 			OPERATION_NAME_WRITE_IMAGE_FRAGMENT,
1152 			OPERATION_NAME_WRITE_IMAGE_COMPUTE,
1153 			OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT,
1154 		};
1155 		static const OperationName		readOps[]	=
1156 		{
1157 			OPERATION_NAME_READ_COPY_BUFFER,
1158 			OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE,
1159 			OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER,
1160 			OPERATION_NAME_READ_COPY_IMAGE,
1161 			OPERATION_NAME_READ_BLIT_IMAGE,
1162 			OPERATION_NAME_READ_UBO_VERTEX,
1163 			OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL,
1164 			OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION,
1165 			OPERATION_NAME_READ_UBO_GEOMETRY,
1166 			OPERATION_NAME_READ_UBO_FRAGMENT,
1167 			OPERATION_NAME_READ_UBO_COMPUTE,
1168 			OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT,
1169 			OPERATION_NAME_READ_SSBO_VERTEX,
1170 			OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL,
1171 			OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION,
1172 			OPERATION_NAME_READ_SSBO_GEOMETRY,
1173 			OPERATION_NAME_READ_SSBO_FRAGMENT,
1174 			OPERATION_NAME_READ_SSBO_COMPUTE,
1175 			OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT,
1176 			OPERATION_NAME_READ_IMAGE_VERTEX,
1177 			OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL,
1178 			OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION,
1179 			OPERATION_NAME_READ_IMAGE_GEOMETRY,
1180 			OPERATION_NAME_READ_IMAGE_FRAGMENT,
1181 			OPERATION_NAME_READ_IMAGE_COMPUTE,
1182 			OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT,
1183 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW,
1184 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED,
1185 			OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH,
1186 			OPERATION_NAME_READ_VERTEX_INPUT,
1187 		};
1188 
1189 		for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(writeOps); ++writeOpNdx)
1190 		for (int readOpNdx = 0; readOpNdx < DE_LENGTH_OF_ARRAY(readOps); ++readOpNdx)
1191 		{
1192 			const OperationName	writeOp		= writeOps[writeOpNdx];
1193 			const OperationName	readOp		= readOps[readOpNdx];
1194 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
1195 			bool				empty		= true;
1196 
1197 			de::MovePtr<tcu::TestCaseGroup> opGroup	(new tcu::TestCaseGroup(m_testCtx, opGroupName.c_str()));
1198 
1199 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1200 			{
1201 				const ResourceDescription&	resource	= s_resources[resourceNdx];
1202 				std::string					name		= getResourceName(resource);
1203 
1204 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1205 				{
1206 					opGroup->addChild(new DeviceHostSyncTestCase(m_testCtx, name, m_type, resource, writeOp, readOp, m_pipelineCacheData));
1207 					empty = false;
1208 				}
1209 			}
1210 			if (!empty)
1211 				addChild(opGroup.release());
1212 		}
1213 	}
1214 
1215 protected:
1216 	SynchronizationType m_type;
1217 
1218 private:
1219 	// synchronization.op tests share pipeline cache data to speed up test
1220 	// execution.
1221 	PipelineCacheData	m_pipelineCacheData;
1222 };
1223 
1224 class LegacyDeviceHostTests : public DeviceHostTestsBase
1225 {
1226 public:
LegacyDeviceHostTests(tcu::TestContext & testCtx)1227 	LegacyDeviceHostTests(tcu::TestContext& testCtx)
1228 		: DeviceHostTestsBase(testCtx, SynchronizationType::LEGACY)
1229 	{
1230 	}
1231 
init(void)1232 	void init(void)
1233 	{
1234 		initCommonTests();
1235 
1236 		de::MovePtr<tcu::TestCaseGroup> miscGroup(new tcu::TestCaseGroup(m_testCtx, "misc", ""));
1237 		// Timeline semaphore properties test
1238 		addFunctionCase(miscGroup.get(), "max_difference_value", checkSupport, maxDifferenceValueCase, m_type);
1239 		// Timeline semaphore initial value test
1240 		addFunctionCase(miscGroup.get(), "initial_value", checkSupport, initialValueCase, m_type);
1241 		addChild(miscGroup.release());
1242 	}
1243 };
1244 
1245 class Sytnchronization2DeviceHostTests : public DeviceHostTestsBase
1246 {
1247 public:
Sytnchronization2DeviceHostTests(tcu::TestContext & testCtx)1248 	Sytnchronization2DeviceHostTests(tcu::TestContext& testCtx)
1249 		: DeviceHostTestsBase(testCtx, SynchronizationType::SYNCHRONIZATION2)
1250 	{
1251 	}
1252 
init(void)1253 	void init(void)
1254 	{
1255 		initCommonTests();
1256 
1257 		de::MovePtr<tcu::TestCaseGroup> miscGroup(new tcu::TestCaseGroup(m_testCtx, "misc", ""));
1258 		// Timeline semaphore properties test
1259 		addFunctionCase(miscGroup.get(), "max_difference_value", checkSupport, maxDifferenceValueCase, m_type);
1260 		addChild(miscGroup.release());
1261 	}
1262 };
1263 
1264 struct QueueTimelineIteration
1265 {
QueueTimelineIterationvkt::synchronization::__anon695804760111::QueueTimelineIteration1266 	QueueTimelineIteration(const SharedPtr<OperationSupport>&	_opSupport,
1267 						   deUint64								lastValue,
1268 						   VkQueue								_queue,
1269 						   deUint32								_queueFamilyIdx,
1270 						   de::Random&							rng)
1271 		: opSupport(_opSupport)
1272 		, queue(_queue)
1273 		, queueFamilyIdx(_queueFamilyIdx)
1274 	{
1275 		timelineValue	= lastValue + rng.getInt(1, 100);
1276 	}
~QueueTimelineIterationvkt::synchronization::__anon695804760111::QueueTimelineIteration1277 	~QueueTimelineIteration() {}
1278 
1279 	SharedPtr<OperationSupport>	opSupport;
1280 	VkQueue						queue;
1281 	deUint32					queueFamilyIdx;
1282 	deUint64					timelineValue;
1283 	SharedPtr<Operation>		op;
1284 };
1285 
getQueueCreateInfo(const std::vector<VkQueueFamilyProperties> queueFamilyProperties)1286 std::vector<VkDeviceQueueCreateInfo> getQueueCreateInfo(const std::vector<VkQueueFamilyProperties> queueFamilyProperties)
1287 {
1288 	std::vector<VkDeviceQueueCreateInfo> infos;
1289 
1290 	for (deUint32 i = 0; i < queueFamilyProperties.size(); i++) {
1291 		VkDeviceQueueCreateInfo info =
1292 		{
1293 			VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
1294 			DE_NULL,
1295 			0,
1296 			i,
1297 			queueFamilyProperties[i].queueCount,
1298 			DE_NULL
1299 		};
1300 		infos.push_back(info);
1301 	}
1302 
1303 	return infos;
1304 }
1305 
createTestDevice(Context & context,const VkInstance & instance,const InstanceInterface & vki,SynchronizationType type)1306 Move<VkDevice> createTestDevice(Context& context, const VkInstance& instance, const InstanceInterface& vki, SynchronizationType type)
1307 {
1308 	const VkPhysicalDevice							physicalDevice				= chooseDevice(vki, instance, context.getTestContext().getCommandLine());
1309 	const std::vector<VkQueueFamilyProperties>		queueFamilyProperties		= getPhysicalDeviceQueueFamilyProperties(vki, physicalDevice);
1310 	std::vector<VkDeviceQueueCreateInfo>			queueCreateInfos			= getQueueCreateInfo(queueFamilyProperties);
1311 	VkPhysicalDeviceSynchronization2FeaturesKHR		synchronization2Features	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR, DE_NULL, DE_TRUE };
1312 	VkPhysicalDeviceTimelineSemaphoreFeatures		timelineSemaphoreFeatures	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, DE_NULL, DE_TRUE };
1313 	VkPhysicalDeviceFeatures2						createPhysicalFeatures		{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, &timelineSemaphoreFeatures, context.getDeviceFeatures() };
1314 	void**											nextPtr						= &timelineSemaphoreFeatures.pNext;
1315 
1316 	std::vector<const char*> deviceExtensions;
1317 
1318 	if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_timeline_semaphore"))
1319 		deviceExtensions.push_back("VK_KHR_timeline_semaphore");
1320 	if (type == SynchronizationType::SYNCHRONIZATION2)
1321 	{
1322 		deviceExtensions.push_back("VK_KHR_synchronization2");
1323 		addToChainVulkanStructure(&nextPtr, synchronization2Features);
1324 	}
1325 
1326 	void* pNext												= &createPhysicalFeatures;
1327 #ifdef CTS_USES_VULKANSC
1328 	VkDeviceObjectReservationCreateInfo memReservationInfo	= context.getTestContext().getCommandLine().isSubProcess() ? context.getResourceInterface()->getStatMax() : resetDeviceObjectReservationCreateInfo();
1329 	memReservationInfo.pNext								= pNext;
1330 	pNext													= &memReservationInfo;
1331 
1332 	VkPhysicalDeviceVulkanSC10Features sc10Features			= createDefaultSC10Features();
1333 	sc10Features.pNext										= pNext;
1334 	pNext													= &sc10Features;
1335 
1336 	VkPipelineCacheCreateInfo			pcCI;
1337 	std::vector<VkPipelinePoolSize>		poolSizes;
1338 	if (context.getTestContext().getCommandLine().isSubProcess())
1339 	{
1340 		if (context.getResourceInterface()->getCacheDataSize() > 0)
1341 		{
1342 			pcCI =
1343 			{
1344 				VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,		// VkStructureType				sType;
1345 				DE_NULL,											// const void*					pNext;
1346 				VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
1347 					VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT,	// VkPipelineCacheCreateFlags	flags;
1348 				context.getResourceInterface()->getCacheDataSize(),	// deUintptr					initialDataSize;
1349 				context.getResourceInterface()->getCacheData()		// const void*					pInitialData;
1350 			};
1351 			memReservationInfo.pipelineCacheCreateInfoCount		= 1;
1352 			memReservationInfo.pPipelineCacheCreateInfos		= &pcCI;
1353 		}
1354 
1355 		poolSizes							= context.getResourceInterface()->getPipelinePoolSizes();
1356 		if (!poolSizes.empty())
1357 		{
1358 			memReservationInfo.pipelinePoolSizeCount			= deUint32(poolSizes.size());
1359 			memReservationInfo.pPipelinePoolSizes				= poolSizes.data();
1360 		}
1361 	}
1362 #endif // CTS_USES_VULKANSC
1363 
1364 	const VkDeviceCreateInfo						deviceInfo				=
1365 	{
1366 		VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,							//VkStructureType					sType;
1367 		pNext,															//const void*						pNext;
1368 		0u,																//VkDeviceCreateFlags				flags;
1369 		static_cast<deUint32>(queueCreateInfos.size()),					//deUint32							queueCreateInfoCount;
1370 		&queueCreateInfos[0],											//const VkDeviceQueueCreateInfo*	pQueueCreateInfos;
1371 		0u,																//deUint32							enabledLayerCount;
1372 		DE_NULL,														//const char* const*				ppEnabledLayerNames;
1373 		static_cast<deUint32>(deviceExtensions.size()),					//deUint32							enabledExtensionCount;
1374 		deviceExtensions.data(),										//const char* const*				ppEnabledExtensionNames;
1375 		0u																//const VkPhysicalDeviceFeatures*	pEnabledFeatures;
1376 	};
1377 	std::vector<SharedPtr<std::vector<float> > >	queuePriorities;
1378 
1379 	for (auto& queueCreateInfo : queueCreateInfos)
1380 	{
1381 		MovePtr<std::vector<float> > priorities(new std::vector<float>);
1382 
1383 		for (deUint32 i = 0; i < queueCreateInfo.queueCount; i++)
1384 			priorities->push_back(1.0f);
1385 
1386 		queuePriorities.push_back(makeSharedPtr(priorities));
1387 
1388 		queueCreateInfo.pQueuePriorities = &(*queuePriorities.back().get())[0];
1389 	}
1390 
1391 	const auto validation = context.getTestContext().getCommandLine().isValidationEnabled();
1392 
1393 	return createCustomDevice(validation, context.getPlatformInterface(), instance,
1394 							  vki, physicalDevice, &deviceInfo);
1395 }
1396 
1397 
1398 // Class to wrap a singleton instance and device
1399 class SingletonDevice
1400 {
SingletonDevice(Context & context,SynchronizationType type)1401 	SingletonDevice	(Context& context, SynchronizationType type)
1402 		: m_logicalDevice	(createTestDevice(context, context.getInstance(), context.getInstanceInterface(), type))
1403 	{
1404 	}
1405 
1406 public:
1407 
getDevice(Context & context,SynchronizationType type)1408 	static const Unique<vk::VkDevice>& getDevice(Context& context, SynchronizationType type)
1409 	{
1410 		if (!m_singletonDevice)
1411 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context, type));
1412 
1413 		DE_ASSERT(m_singletonDevice);
1414 		return m_singletonDevice->m_logicalDevice;
1415 	}
1416 
destroy()1417 	static void destroy()
1418 	{
1419 		m_singletonDevice.clear();
1420 	}
1421 private:
1422 	const Unique<vk::VkDevice>			m_logicalDevice;
1423 
1424 	static SharedPtr<SingletonDevice>	m_singletonDevice;
1425 };
1426 SharedPtr<SingletonDevice>		SingletonDevice::m_singletonDevice;
1427 
cleanupGroup()1428 static void cleanupGroup ()
1429 {
1430 	// Destroy singleton object
1431 	SingletonDevice::destroy();
1432 }
1433 
1434 // Create a chain of operations with data copied across queues & host
1435 // and submit the operations out of order to verify that the queues
1436 // are properly unblocked as the work progresses.
1437 class WaitBeforeSignalTestInstance : public TestInstance
1438 {
1439 public:
WaitBeforeSignalTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const SharedPtr<OperationSupport> & writeOp,const SharedPtr<OperationSupport> & readOp,PipelineCacheData & pipelineCacheData)1440 	WaitBeforeSignalTestInstance (Context&								context,
1441 								  SynchronizationType					type,
1442 								  const ResourceDescription&			resourceDesc,
1443 								  const SharedPtr<OperationSupport>&	writeOp,
1444 								  const SharedPtr<OperationSupport>&	readOp,
1445 								  PipelineCacheData&					pipelineCacheData)
1446 		: TestInstance		(context)
1447 		, m_type			(type)
1448 		, m_resourceDesc	(resourceDesc)
1449 		, m_device			(SingletonDevice::getDevice(context, type))
1450 		, m_context			(context)
1451 #ifndef CTS_USES_VULKANSC
1452 		, m_deviceDriver	(de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), context.getInstance(), *m_device, context.getUsedApiVersion())))
1453 #else
1454 		, m_deviceDriver	(de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), context.getInstance(), *m_device, context.getTestContext().getCommandLine(), context.getResourceInterface(), m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties(), context.getUsedApiVersion()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_device)))
1455 #endif // CTS_USES_VULKANSC
1456 		, m_allocator		(new SimpleAllocator(*m_deviceDriver, *m_device,
1457 												 getPhysicalDeviceMemoryProperties(context.getInstanceInterface(),
1458 												 chooseDevice(context.getInstanceInterface(), context.getInstance(), context.getTestContext().getCommandLine()))))
1459 		, m_opContext		(context, type, *m_deviceDriver, *m_device, *m_allocator, pipelineCacheData)
1460 	{
1461 		const auto&									vki							= m_context.getInstanceInterface();
1462 		const auto									instance					= m_context.getInstance();
1463 		const DeviceInterface&						vk							= *m_deviceDriver;
1464 		const VkDevice								device						= *m_device;
1465 		const VkPhysicalDevice						physicalDevice				= chooseDevice(vki, instance, context.getTestContext().getCommandLine());
1466 		const std::vector<VkQueueFamilyProperties>	queueFamilyProperties		= getPhysicalDeviceQueueFamilyProperties(vki, physicalDevice);
1467 		const deUint32								universalQueueFamilyIndex	= context.getUniversalQueueFamilyIndex();
1468 		de::Random									rng							(1234);
1469 		deUint32									lastCopyOpIdx				= 0;
1470 		std::set<std::pair<deUint32, deUint32> >	used_queues;
1471 
1472 		m_hostTimelineValue = rng.getInt(0, 1000);
1473 
1474 		m_iterations.push_back(makeSharedPtr(new QueueTimelineIteration(writeOp, m_hostTimelineValue,
1475 																		getDeviceQueue(vk, device,
1476 																		universalQueueFamilyIndex, 0),
1477 																		universalQueueFamilyIndex, rng)));
1478 		used_queues.insert(std::make_pair(universalQueueFamilyIndex, 0));
1479 
1480 		// Go through all the queues and try to use all the ones that
1481 		// support the type of resource we're dealing with.
1482 		for (deUint32 familyIdx = 0; familyIdx < queueFamilyProperties.size(); familyIdx++) {
1483 			for (deUint32 instanceIdx = 0; instanceIdx < queueFamilyProperties[familyIdx].queueCount; instanceIdx++) {
1484 				// Only add each queue once.
1485 				if (used_queues.find(std::make_pair(familyIdx, instanceIdx)) != used_queues.end())
1486 					continue;
1487 
1488 				// Find an operation compatible with the queue
1489 				for (deUint32 copyOpIdx = 0; copyOpIdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpIdx++) {
1490 					OperationName					copyOpName			= s_copyOps[(lastCopyOpIdx + copyOpIdx) % DE_LENGTH_OF_ARRAY(s_copyOps)];
1491 
1492 					if (isResourceSupported(copyOpName, resourceDesc))
1493 					{
1494 						SharedPtr<OperationSupport>	copyOpSupport		(makeOperationSupport(copyOpName, resourceDesc).release());
1495 						VkQueueFlags				copyOpQueueFlags	= copyOpSupport->getQueueFlags(m_opContext);
1496 
1497 						if ((copyOpQueueFlags & queueFamilyProperties[familyIdx].queueFlags) != copyOpQueueFlags)
1498 							continue;
1499 
1500 						// Barriers use VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT pipeline stage so queue must have VK_QUEUE_GRAPHICS_BIT
1501 						if ((copyOpQueueFlags & VK_QUEUE_GRAPHICS_BIT) == 0u)
1502 							continue;
1503 
1504 						m_iterations.push_back(makeSharedPtr(new QueueTimelineIteration(copyOpSupport, m_iterations.back()->timelineValue,
1505 																						getDeviceQueue(vk, device, familyIdx, instanceIdx),
1506 																						familyIdx, rng)));
1507 						used_queues.insert(std::make_pair(familyIdx, instanceIdx));
1508 						break;
1509 					}
1510 				}
1511 			}
1512 		}
1513 
1514 		// Add the read operation on the universal queue, it should be
1515 		// submitted in order with regard to the write operation.
1516 		m_iterations.push_back(makeSharedPtr(new QueueTimelineIteration(readOp, m_iterations.back()->timelineValue,
1517 																		getDeviceQueue(vk, device,
1518 																		universalQueueFamilyIndex, 0),
1519 																		universalQueueFamilyIndex, rng)));
1520 
1521 		// Now create the resources with the usage associated to the
1522 		// operation performed on the resource.
1523 		for (deUint32 opIdx = 0; opIdx < (m_iterations.size() - 1); opIdx++)
1524 		{
1525 			deUint32 usage = m_iterations[opIdx]->opSupport->getOutResourceUsageFlags() | m_iterations[opIdx + 1]->opSupport->getInResourceUsageFlags();
1526 
1527 			m_resources.push_back(makeSharedPtr(new Resource(m_opContext, resourceDesc, usage)));
1528 		}
1529 
1530 		m_iterations.front()->op = makeSharedPtr(m_iterations.front()->opSupport->build(m_opContext, *m_resources.front()).release());
1531 		for (deUint32 opIdx = 1; opIdx < (m_iterations.size() - 1); opIdx++)
1532 		{
1533 			m_iterations[opIdx]->op = makeSharedPtr(m_iterations[opIdx]->opSupport->build(m_opContext,
1534 																						  *m_resources[opIdx - 1],
1535 																						  *m_resources[opIdx]).release());
1536 		}
1537 		m_iterations.back()->op = makeSharedPtr(m_iterations.back()->opSupport->build(m_opContext, *m_resources.back()).release());
1538 	}
1539 
~WaitBeforeSignalTestInstance()1540 	~WaitBeforeSignalTestInstance()
1541 	{
1542 	}
1543 
iterate(void)1544 	tcu::TestStatus	iterate (void)
1545 	{
1546 		const DeviceInterface&							vk							= *m_deviceDriver;
1547 		const VkDevice									device						= *m_device;
1548 		const Unique<VkSemaphore>						semaphore					(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
1549 		std::vector<SharedPtr<Move<VkCommandPool> > >	cmdPools;
1550 		std::vector<SharedPtr<Move<VkCommandBuffer> > >	ptrCmdBuffers;
1551 		std::vector<VkCommandBufferSubmitInfoKHR>		commandBufferSubmitInfos	(m_iterations.size(), makeCommonCommandBufferSubmitInfo(0));
1552 		VkSemaphoreSubmitInfoKHR						waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
1553 		VkSemaphoreSubmitInfoKHR						signalSemaphoreSubmitInfo	= makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
1554 
1555 		for (deUint32 opNdx = 0; opNdx < m_iterations.size(); opNdx++)
1556 		{
1557 			cmdPools.push_back(makeVkSharedPtr(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
1558 																 m_iterations[opNdx]->queueFamilyIdx)));
1559 			ptrCmdBuffers.push_back(makeVkSharedPtr(makeCommandBuffer(vk, device, **cmdPools.back())));
1560 			commandBufferSubmitInfos[opNdx].commandBuffer = **(ptrCmdBuffers.back());
1561 		}
1562 
1563 		// Randomize the data copied over.
1564 		{
1565 			const Data				startData		= m_iterations.front()->op->getData();
1566 			Data					randomizedData;
1567 			std::vector<deUint8>	dataArray;
1568 
1569 			dataArray.resize(startData.size);
1570 			randomizeData(dataArray, m_resourceDesc);
1571 			randomizedData.size = dataArray.size();
1572 			randomizedData.data = &dataArray[0];
1573 			m_iterations.front()->op->setData(randomizedData);
1574 		}
1575 
1576 		for (deUint32 _iterIdx = 0; _iterIdx < (m_iterations.size() - 1); _iterIdx++)
1577 		{
1578 			// Submit in reverse order of the dependency order to
1579 			// exercise the wait-before-submit behavior.
1580 			deUint32					iterIdx					= (deUint32)(m_iterations.size() - 2 - _iterIdx);
1581 			VkCommandBuffer				cmdBuffer				= commandBufferSubmitInfos[iterIdx].commandBuffer;
1582 			SynchronizationWrapperPtr	synchronizationWrapper	= getSynchronizationWrapper(m_type, vk, DE_TRUE);
1583 
1584 			waitSemaphoreSubmitInfo.value		= iterIdx == 0 ? m_hostTimelineValue : m_iterations[iterIdx - 1]->timelineValue;
1585 			signalSemaphoreSubmitInfo.value		= m_iterations[iterIdx]->timelineValue;
1586 
1587 			synchronizationWrapper->addSubmitInfo(
1588 				1u,										// deUint32								waitSemaphoreInfoCount
1589 				&waitSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
1590 				1u,										// deUint32								commandBufferInfoCount
1591 				&commandBufferSubmitInfos[iterIdx],		// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
1592 				1u,										// deUint32								signalSemaphoreInfoCount
1593 				&signalSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
1594 				DE_TRUE,
1595 				DE_TRUE
1596 			);
1597 
1598 			beginCommandBuffer(vk, cmdBuffer);
1599 			m_iterations[iterIdx]->op->recordCommands(cmdBuffer);
1600 
1601 			{
1602 				const SyncInfo	writeSync	= m_iterations[iterIdx]->op->getOutSyncInfo();
1603 				const SyncInfo	readSync	= m_iterations[iterIdx + 1]->op->getInSyncInfo();
1604 				const Resource&	resource	= *m_resources[iterIdx];
1605 
1606 				if (resource.getType() == RESOURCE_TYPE_IMAGE)
1607 				{
1608 					DE_ASSERT(writeSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
1609 					DE_ASSERT(readSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
1610 
1611 					const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
1612 						writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
1613 						writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
1614 						readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
1615 						readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
1616 						writeSync.imageLayout,							// VkImageLayout					oldLayout
1617 						readSync.imageLayout,							// VkImageLayout					newLayout
1618 						resource.getImage().handle,						// VkImage							image
1619 						resource.getImage().subresourceRange,			// VkImageSubresourceRange			subresourceRange
1620 						m_iterations[iterIdx]->queueFamilyIdx,			// deUint32							srcQueueFamilyIndex
1621 						m_iterations[iterIdx + 1]->queueFamilyIdx		// deUint32							destQueueFamilyIndex
1622 					);
1623 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
1624 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
1625 				}
1626 				else
1627 				{
1628 					const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
1629 						writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
1630 						writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
1631 						readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
1632 						readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
1633 						resource.getBuffer().handle,					// VkBuffer							buffer
1634 						0,												// VkDeviceSize						offset
1635 						VK_WHOLE_SIZE,									// VkDeviceSize						size
1636 						m_iterations[iterIdx]->queueFamilyIdx,			// deUint32							srcQueueFamilyIndex
1637 						m_iterations[iterIdx + 1]->queueFamilyIdx		// deUint32							dstQueueFamilyIndex
1638 					);
1639 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
1640 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
1641 				}
1642 			}
1643 
1644 			endCommandBuffer(vk, cmdBuffer);
1645 
1646 			VK_CHECK(synchronizationWrapper->queueSubmit(m_iterations[iterIdx]->queue, DE_NULL));
1647 		}
1648 
1649 		// Submit the last read operation in order.
1650 		{
1651 			const deUint32				iterIdx					= (deUint32) (m_iterations.size() - 1);
1652 			SynchronizationWrapperPtr	synchronizationWrapper	= getSynchronizationWrapper(m_type, vk, DE_TRUE);
1653 
1654 			waitSemaphoreSubmitInfo.value		= m_iterations[iterIdx - 1]->timelineValue;
1655 			signalSemaphoreSubmitInfo.value		= m_iterations[iterIdx]->timelineValue;
1656 
1657 			synchronizationWrapper->addSubmitInfo(
1658 				1u,										// deUint32								waitSemaphoreInfoCount
1659 				&waitSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
1660 				1u,										// deUint32								commandBufferInfoCount
1661 				&commandBufferSubmitInfos[iterIdx],		// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
1662 				1u,										// deUint32								signalSemaphoreInfoCount
1663 				&signalSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
1664 				DE_TRUE,
1665 				DE_TRUE
1666 			);
1667 
1668 			VkCommandBuffer cmdBuffer = commandBufferSubmitInfos[iterIdx].commandBuffer;
1669 			beginCommandBuffer(vk, cmdBuffer);
1670 			m_iterations[iterIdx]->op->recordCommands(cmdBuffer);
1671 			endCommandBuffer(vk, cmdBuffer);
1672 
1673 			VK_CHECK(synchronizationWrapper->queueSubmit(m_iterations[iterIdx]->queue, DE_NULL));
1674 		}
1675 
1676 		{
1677 			// Kick off the whole chain from the host.
1678 			hostSignal(vk, device, *semaphore, m_hostTimelineValue);
1679 			VK_CHECK(vk.deviceWaitIdle(device));
1680 		}
1681 
1682 		{
1683 			const Data	expected = m_iterations.front()->op->getData();
1684 			const Data	actual	 = m_iterations.back()->op->getData();
1685 
1686 			if (0 != deMemCmp(expected.data, actual.data, expected.size))
1687 				return tcu::TestStatus::fail("Memory contents don't match");
1688 		}
1689 
1690 		return tcu::TestStatus::pass("OK");
1691 	}
1692 
1693 protected:
1694 	const SynchronizationType						m_type;
1695 	const ResourceDescription						m_resourceDesc;
1696 	const Unique<VkDevice>&							m_device;
1697 	const Context&									m_context;
1698 #ifndef CTS_USES_VULKANSC
1699 	de::MovePtr<vk::DeviceDriver>					m_deviceDriver;
1700 #else
1701 	de::MovePtr<DeviceDriverSC,DeinitDeviceDeleter>	m_deviceDriver;
1702 #endif // CTS_USES_VULKANSC
1703 	MovePtr<Allocator>								m_allocator;
1704 	OperationContext								m_opContext;
1705 	std::vector<SharedPtr<QueueTimelineIteration> >	m_iterations;
1706 	std::vector<SharedPtr<Resource> >				m_resources;
1707 	deUint64										m_hostTimelineValue;
1708 };
1709 
1710 class WaitBeforeSignalTestCase : public TestCase
1711 {
1712 public:
WaitBeforeSignalTestCase(tcu::TestContext & testCtx,const std::string & name,SynchronizationType type,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,PipelineCacheData & pipelineCacheData)1713 	WaitBeforeSignalTestCase	(tcu::TestContext&			testCtx,
1714 								 const std::string&			name,
1715 								 SynchronizationType		type,
1716 								 const ResourceDescription	resourceDesc,
1717 								 const OperationName		writeOp,
1718 								 const OperationName		readOp,
1719 								 PipelineCacheData&			pipelineCacheData)
1720 		: TestCase				(testCtx, name)
1721 		, m_type				(type)
1722 		, m_resourceDesc		(resourceDesc)
1723 		, m_writeOp				(makeOperationSupport(writeOp, resourceDesc).release())
1724 		, m_readOp				(makeOperationSupport(readOp, resourceDesc).release())
1725 		, m_pipelineCacheData	(pipelineCacheData)
1726 	{
1727 	}
1728 
checkSupport(Context & context) const1729 	void checkSupport(Context& context) const override
1730 	{
1731 		context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
1732 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
1733 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
1734 	}
1735 
initPrograms(SourceCollections & programCollection) const1736 	void initPrograms (SourceCollections& programCollection) const override
1737 	{
1738 		m_writeOp->initPrograms(programCollection);
1739 		m_readOp->initPrograms(programCollection);
1740 
1741 		for (deUint32 copyOpNdx = 0; copyOpNdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpNdx++)
1742 		{
1743 			if (isResourceSupported(s_copyOps[copyOpNdx], m_resourceDesc))
1744 				makeOperationSupport(s_copyOps[copyOpNdx], m_resourceDesc)->initPrograms(programCollection);
1745 		}
1746 	}
1747 
createInstance(Context & context) const1748 	TestInstance* createInstance (Context& context) const override
1749 	{
1750 		return new WaitBeforeSignalTestInstance(context, m_type, m_resourceDesc, m_writeOp, m_readOp, m_pipelineCacheData);
1751 	}
1752 
1753 private:
1754 	SynchronizationType					m_type;
1755 	const ResourceDescription			m_resourceDesc;
1756 	const SharedPtr<OperationSupport>	m_writeOp;
1757 	const SharedPtr<OperationSupport>	m_readOp;
1758 	PipelineCacheData&					m_pipelineCacheData;
1759 };
1760 
1761 class WaitBeforeSignalTests : public tcu::TestCaseGroup
1762 {
1763 public:
1764 	// Synchronization of out of order submissions to queues
WaitBeforeSignalTests(tcu::TestContext & testCtx,SynchronizationType type)1765 	WaitBeforeSignalTests (tcu::TestContext& testCtx, SynchronizationType type)
1766 		: tcu::TestCaseGroup(testCtx, "wait_before_signal")
1767 		, m_type(type)
1768 	{
1769 	}
1770 
init(void)1771 	void init (void)
1772 	{
1773 		static const OperationName		writeOps[]	=
1774 		{
1775 			OPERATION_NAME_WRITE_COPY_BUFFER,
1776 			OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE,
1777 			OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER,
1778 			OPERATION_NAME_WRITE_COPY_IMAGE,
1779 			OPERATION_NAME_WRITE_BLIT_IMAGE,
1780 			OPERATION_NAME_WRITE_SSBO_VERTEX,
1781 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL,
1782 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION,
1783 			OPERATION_NAME_WRITE_SSBO_GEOMETRY,
1784 			OPERATION_NAME_WRITE_SSBO_FRAGMENT,
1785 			OPERATION_NAME_WRITE_SSBO_COMPUTE,
1786 			OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT,
1787 			OPERATION_NAME_WRITE_IMAGE_VERTEX,
1788 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL,
1789 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION,
1790 			OPERATION_NAME_WRITE_IMAGE_GEOMETRY,
1791 			OPERATION_NAME_WRITE_IMAGE_FRAGMENT,
1792 			OPERATION_NAME_WRITE_IMAGE_COMPUTE,
1793 			OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT,
1794 		};
1795 		static const OperationName		readOps[]	=
1796 		{
1797 			OPERATION_NAME_READ_COPY_BUFFER,
1798 			OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE,
1799 			OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER,
1800 			OPERATION_NAME_READ_COPY_IMAGE,
1801 			OPERATION_NAME_READ_BLIT_IMAGE,
1802 			OPERATION_NAME_READ_UBO_VERTEX,
1803 			OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL,
1804 			OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION,
1805 			OPERATION_NAME_READ_UBO_GEOMETRY,
1806 			OPERATION_NAME_READ_UBO_FRAGMENT,
1807 			OPERATION_NAME_READ_UBO_COMPUTE,
1808 			OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT,
1809 			OPERATION_NAME_READ_SSBO_VERTEX,
1810 			OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL,
1811 			OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION,
1812 			OPERATION_NAME_READ_SSBO_GEOMETRY,
1813 			OPERATION_NAME_READ_SSBO_FRAGMENT,
1814 			OPERATION_NAME_READ_SSBO_COMPUTE,
1815 			OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT,
1816 			OPERATION_NAME_READ_IMAGE_VERTEX,
1817 			OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL,
1818 			OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION,
1819 			OPERATION_NAME_READ_IMAGE_GEOMETRY,
1820 			OPERATION_NAME_READ_IMAGE_FRAGMENT,
1821 			OPERATION_NAME_READ_IMAGE_COMPUTE,
1822 			OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT,
1823 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW,
1824 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED,
1825 			OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH,
1826 			OPERATION_NAME_READ_VERTEX_INPUT,
1827 		};
1828 
1829 		for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(writeOps); ++writeOpNdx)
1830 		for (int readOpNdx = 0; readOpNdx < DE_LENGTH_OF_ARRAY(readOps); ++readOpNdx)
1831 		{
1832 			const OperationName	writeOp		= writeOps[writeOpNdx];
1833 			const OperationName	readOp		= readOps[readOpNdx];
1834 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
1835 			bool				empty		= true;
1836 
1837 			de::MovePtr<tcu::TestCaseGroup> opGroup	(new tcu::TestCaseGroup(m_testCtx, opGroupName.c_str()));
1838 
1839 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1840 			{
1841 				const ResourceDescription&	resource	= s_resources[resourceNdx];
1842 				std::string					name		= getResourceName(resource);
1843 
1844 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1845 				{
1846 					opGroup->addChild(new WaitBeforeSignalTestCase(m_testCtx, name, m_type, resource, writeOp, readOp, m_pipelineCacheData));
1847 					empty = false;
1848 				}
1849 			}
1850 			if (!empty)
1851 				addChild(opGroup.release());
1852 		}
1853 	}
1854 
deinit(void)1855 	void deinit (void)
1856 	{
1857 		cleanupGroup();
1858 	}
1859 
1860 private:
1861 	SynchronizationType m_type;
1862 
1863 	// synchronization.op tests share pipeline cache data to speed up test
1864 	// execution.
1865 	PipelineCacheData	m_pipelineCacheData;
1866 };
1867 
1868 // Creates a tree of operations like this :
1869 //
1870 // WriteOp1-Queue0 --> CopyOp2-Queue1 --> ReadOp-Queue4
1871 //                 |
1872 //                 --> CopyOp3-Queue3 --> ReadOp-Queue5
1873 //
1874 // Verifies that we get the data propagated properly.
1875 class OneToNTestInstance : public TestInstance
1876 {
1877 public:
OneToNTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const SharedPtr<OperationSupport> & writeOp,const SharedPtr<OperationSupport> & readOp,PipelineCacheData & pipelineCacheData)1878 	OneToNTestInstance (Context&							context,
1879 						SynchronizationType					type,
1880 						const ResourceDescription&			resourceDesc,
1881 						const SharedPtr<OperationSupport>&	writeOp,
1882 						const SharedPtr<OperationSupport>&	readOp,
1883 						PipelineCacheData&					pipelineCacheData)
1884 		: TestInstance		(context)
1885 		, m_type			(type)
1886 		, m_resourceDesc	(resourceDesc)
1887 		, m_device			(SingletonDevice::getDevice(context, type))
1888 		, m_context			(context)
1889 #ifndef CTS_USES_VULKANSC
1890 		, m_deviceDriver(de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), context.getInstance(), *m_device, context.getUsedApiVersion())))
1891 #else
1892 		, m_deviceDriver(de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), context.getInstance(), *m_device, context.getTestContext().getCommandLine(), context.getResourceInterface(), m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties(), context.getUsedApiVersion()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_device)))
1893 #endif // CTS_USES_VULKANSC
1894 		, m_allocator		(new SimpleAllocator(*m_deviceDriver, *m_device,
1895 												 getPhysicalDeviceMemoryProperties(context.getInstanceInterface(),
1896 												 chooseDevice(context.getInstanceInterface(), context.getInstance(), context.getTestContext().getCommandLine()))))
1897 		, m_opContext		(context, type, *m_deviceDriver, *m_device, *m_allocator, pipelineCacheData)
1898 	{
1899 		const auto&									vki							= m_context.getInstanceInterface();
1900 		const auto									instance					= m_context.getInstance();
1901 		const DeviceInterface&						vk							= *m_deviceDriver;
1902 		const VkDevice								device						= *m_device;
1903 		const VkPhysicalDevice						physicalDevice				= chooseDevice(vki, instance, context.getTestContext().getCommandLine());
1904 		const std::vector<VkQueueFamilyProperties>	queueFamilyProperties		= getPhysicalDeviceQueueFamilyProperties(vki, physicalDevice);
1905 		const deUint32								universalQueueFamilyIndex	= context.getUniversalQueueFamilyIndex();
1906 		de::Random									rng							(1234);
1907 		deUint32									lastCopyOpIdx				= 0;
1908 		deUint64									lastSubmitValue;
1909 
1910 		m_hostTimelineValue = rng.getInt(0, 1000);
1911 
1912 		m_writeIteration = makeSharedPtr(new QueueTimelineIteration(writeOp, m_hostTimelineValue,
1913 																	getDeviceQueue(vk, device,
1914 																	universalQueueFamilyIndex, 0),
1915 																	universalQueueFamilyIndex, rng));
1916 		lastSubmitValue = m_writeIteration->timelineValue;
1917 
1918 		// Go through all the queues and try to use all the ones that
1919 		// support the type of resource we're dealing with.
1920 		for (deUint32 familyIdx = 0; familyIdx < queueFamilyProperties.size(); familyIdx++) {
1921 			for (deUint32 instanceIdx = 0; instanceIdx < queueFamilyProperties[familyIdx].queueCount; instanceIdx++) {
1922 				// Find an operation compatible with the queue
1923 				for (deUint32 copyOpIdx = 0; copyOpIdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpIdx++) {
1924 					OperationName					copyOpName			= s_copyOps[(lastCopyOpIdx + copyOpIdx) % DE_LENGTH_OF_ARRAY(s_copyOps)];
1925 
1926 					if (isResourceSupported(copyOpName, resourceDesc))
1927 					{
1928 						SharedPtr<OperationSupport>	copyOpSupport		(makeOperationSupport(copyOpName, resourceDesc).release());
1929 						VkQueueFlags				copyOpQueueFlags	= copyOpSupport->getQueueFlags(m_opContext);
1930 
1931 						if ((copyOpQueueFlags & queueFamilyProperties[familyIdx].queueFlags) != copyOpQueueFlags)
1932 							continue;
1933 
1934 						VkShaderStageFlagBits writeStage = writeOp->getShaderStage();
1935 						if (writeStage != VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM && !isStageSupported(writeStage, copyOpQueueFlags)) {
1936 							continue;
1937 						}
1938 						VkShaderStageFlagBits readStage = readOp->getShaderStage();
1939 						if (readStage != VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM && !isStageSupported(readStage, copyOpQueueFlags)) {
1940 							continue;
1941 						}
1942 
1943 						m_copyIterations.push_back(makeSharedPtr(new QueueTimelineIteration(copyOpSupport, lastSubmitValue,
1944 																							getDeviceQueue(vk, device, familyIdx, instanceIdx),
1945 																							familyIdx, rng)));
1946 						lastSubmitValue = m_copyIterations.back()->timelineValue;
1947 						break;
1948 					}
1949 				}
1950 			}
1951 		}
1952 
1953 		for (deUint32 copyOpIdx = 0; copyOpIdx < m_copyIterations.size(); copyOpIdx++) {
1954 			bool added = false;
1955 
1956 			for (deUint32 familyIdx = 0; familyIdx < queueFamilyProperties.size() && !added; familyIdx++) {
1957 				for (deUint32 instanceIdx = 0; instanceIdx < queueFamilyProperties[familyIdx].queueCount && !added; instanceIdx++) {
1958 					VkQueueFlags	readOpQueueFlags	= readOp->getQueueFlags(m_opContext);
1959 
1960 					// If the readOpQueueFlags contain the transfer bit set then check if the queue supports graphics or compute operations before skipping this iteration.
1961 					// Because reporting transfer functionality is optional if a queue supports graphics or compute operations.
1962 					if (((readOpQueueFlags & queueFamilyProperties[familyIdx].queueFlags) != readOpQueueFlags) &&
1963 						(((readOpQueueFlags & VK_QUEUE_TRANSFER_BIT) == 0) ||
1964 						((queueFamilyProperties[familyIdx].queueFlags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) == 0)))
1965 						continue;
1966 
1967 					// Add the read operation on the universal queue, it should be
1968 					// submitted in order with regard to the write operation.
1969 					m_readIterations.push_back(makeSharedPtr(new QueueTimelineIteration(readOp, lastSubmitValue,
1970 																						getDeviceQueue(vk, device,
1971 																									   universalQueueFamilyIndex, 0),
1972 																						universalQueueFamilyIndex, rng)));
1973 					lastSubmitValue = m_readIterations.back()->timelineValue;
1974 
1975 					added = true;
1976 				}
1977 			}
1978 
1979 			DE_ASSERT(added);
1980 		}
1981 
1982 		DE_ASSERT(m_copyIterations.size() == m_readIterations.size());
1983 
1984 		// Now create the resources with the usage associated to the
1985 		// operation performed on the resource.
1986 		{
1987 			deUint32 writeUsage = writeOp->getOutResourceUsageFlags();
1988 
1989 			for (deUint32 copyOpIdx = 0; copyOpIdx < m_copyIterations.size(); copyOpIdx++) {
1990 				writeUsage |= m_copyIterations[copyOpIdx]->opSupport->getInResourceUsageFlags();
1991 			}
1992 			m_writeResource = makeSharedPtr(new Resource(m_opContext, resourceDesc, writeUsage));
1993 			m_writeIteration->op = makeSharedPtr(writeOp->build(m_opContext, *m_writeResource).release());
1994 
1995 			for (deUint32 copyOpIdx = 0; copyOpIdx < m_copyIterations.size(); copyOpIdx++)
1996 			{
1997 				deUint32 usage = m_copyIterations[copyOpIdx]->opSupport->getOutResourceUsageFlags() |
1998 					m_readIterations[copyOpIdx]->opSupport->getInResourceUsageFlags();
1999 
2000 				m_copyResources.push_back(makeSharedPtr(new Resource(m_opContext, resourceDesc, usage)));
2001 
2002 				m_copyIterations[copyOpIdx]->op = makeSharedPtr(m_copyIterations[copyOpIdx]->opSupport->build(m_opContext,
2003 																											  *m_writeResource,
2004 																											  *m_copyResources[copyOpIdx]).release());
2005 				m_readIterations[copyOpIdx]->op = makeSharedPtr(readOp->build(m_opContext,
2006 																			  *m_copyResources[copyOpIdx]).release());
2007 			}
2008 		}
2009 	}
2010 
~OneToNTestInstance()2011 	~OneToNTestInstance ()
2012 	{
2013 	}
2014 
recordBarrier(const DeviceInterface & vk,VkCommandBuffer cmdBuffer,const QueueTimelineIteration & inIter,const QueueTimelineIteration & outIter,const Resource & resource,bool originalLayout)2015 	void recordBarrier (const DeviceInterface&	vk, VkCommandBuffer cmdBuffer, const QueueTimelineIteration& inIter, const QueueTimelineIteration& outIter, const Resource& resource, bool originalLayout)
2016 	{
2017 		const SyncInfo				writeSync				= inIter.op->getOutSyncInfo();
2018 		const SyncInfo				readSync				= outIter.op->getInSyncInfo();
2019 		SynchronizationWrapperPtr	synchronizationWrapper	= getSynchronizationWrapper(m_type, vk, DE_TRUE);
2020 
2021 		if (resource.getType() == RESOURCE_TYPE_IMAGE)
2022 		{
2023 			DE_ASSERT(writeSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
2024 			DE_ASSERT(readSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
2025 
2026 			const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
2027 				writeSync.stageMask,											// VkPipelineStageFlags2KHR			srcStageMask
2028 				writeSync.accessMask,											// VkAccessFlags2KHR				srcAccessMask
2029 				readSync.stageMask,												// VkPipelineStageFlags2KHR			dstStageMask
2030 				readSync.accessMask,											// VkAccessFlags2KHR				dstAccessMask
2031 				originalLayout ? writeSync.imageLayout : readSync.imageLayout,	// VkImageLayout					oldLayout
2032 				readSync.imageLayout,											// VkImageLayout					newLayout
2033 				resource.getImage().handle,										// VkImage							image
2034 				resource.getImage().subresourceRange,							// VkImageSubresourceRange			subresourceRange
2035 				inIter.queueFamilyIdx,											// deUint32							srcQueueFamilyIndex
2036 				outIter.queueFamilyIdx											// deUint32							destQueueFamilyIndex
2037 			);
2038 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
2039 			synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
2040 		}
2041 		else
2042 		{
2043 			const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
2044 				writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
2045 				writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
2046 				readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
2047 				readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
2048 				resource.getBuffer().handle,					// VkBuffer							buffer
2049 				0,												// VkDeviceSize						offset
2050 				VK_WHOLE_SIZE,									// VkDeviceSize						size
2051 				inIter.queueFamilyIdx,							// deUint32							srcQueueFamilyIndex
2052 				outIter.queueFamilyIdx							// deUint32							dstQueueFamilyIndex
2053 			);
2054 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
2055 			synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
2056 		}
2057 	}
2058 
submit(const DeviceInterface & vk,VkCommandBuffer cmdBuffer,const QueueTimelineIteration & iter,VkSemaphore semaphore,const deUint64 * waitValues,const deUint32 waitValuesCount)2059 	void submit (const DeviceInterface&	vk, VkCommandBuffer cmdBuffer, const QueueTimelineIteration& iter, VkSemaphore semaphore, const deUint64 *waitValues, const deUint32 waitValuesCount)
2060 	{
2061 		VkSemaphoreSubmitInfoKHR		waitSemaphoreSubmitInfo[] =
2062 		{
2063 			makeCommonSemaphoreSubmitInfo(semaphore, waitValues[0], VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR),
2064 			makeCommonSemaphoreSubmitInfo(semaphore, waitValues[1], VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR)
2065 		};
2066 		VkSemaphoreSubmitInfoKHR		signalSemaphoreSubmitInfo =
2067 			makeCommonSemaphoreSubmitInfo(semaphore, iter.timelineValue, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
2068 
2069 		VkCommandBufferSubmitInfoKHR	commandBufferSubmitInfo	= makeCommonCommandBufferSubmitInfo(cmdBuffer);
2070 		SynchronizationWrapperPtr		synchronizationWrapper	= getSynchronizationWrapper(m_type, vk, DE_TRUE);
2071 
2072 		synchronizationWrapper->addSubmitInfo(
2073 			waitValuesCount,						// deUint32								waitSemaphoreInfoCount
2074 			waitSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
2075 			1u,										// deUint32								commandBufferInfoCount
2076 			&commandBufferSubmitInfo,				// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
2077 			1u,										// deUint32								signalSemaphoreInfoCount
2078 			&signalSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
2079 			DE_TRUE,
2080 			DE_TRUE
2081 		);
2082 
2083 		VK_CHECK(synchronizationWrapper->queueSubmit(iter.queue, DE_NULL));
2084 	}
2085 
iterate(void)2086 	tcu::TestStatus	iterate (void)
2087 	{
2088 		const DeviceInterface&								vk						= *m_deviceDriver;
2089 		const VkDevice										device					= *m_device;
2090 		const Unique<VkSemaphore>							semaphore				(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
2091 		Unique<VkCommandPool>								writeCmdPool			(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
2092 																									   m_context.getUniversalQueueFamilyIndex()));
2093 		Unique<VkCommandBuffer>								writeCmdBuffer			(makeCommandBuffer(vk, device, *writeCmdPool));
2094 		std::vector<SharedPtr<Move<VkCommandPool> > >		copyCmdPools;
2095 		std::vector<SharedPtr<Move<VkCommandBuffer> > >		copyPtrCmdBuffers;
2096 		std::vector<SharedPtr<Move<VkCommandPool> > >		readCmdPools;
2097 		std::vector<SharedPtr<Move<VkCommandBuffer> > >		readPtrCmdBuffers;
2098 
2099 		for (deUint32 copyOpNdx = 0; copyOpNdx < m_copyIterations.size(); copyOpNdx++)
2100 		{
2101 			copyCmdPools.push_back(makeVkSharedPtr(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
2102 																 m_copyIterations[copyOpNdx]->queueFamilyIdx)));
2103 			copyPtrCmdBuffers.push_back(makeVkSharedPtr(makeCommandBuffer(vk, device, **copyCmdPools.back())));
2104 
2105 			readCmdPools.push_back(makeVkSharedPtr(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
2106 																 m_readIterations[copyOpNdx]->queueFamilyIdx)));
2107 			readPtrCmdBuffers.push_back(makeVkSharedPtr(makeCommandBuffer(vk, device, **readCmdPools.back())));
2108 		}
2109 
2110 		// Randomize the data copied over.
2111 		{
2112 			const Data				startData		= m_writeIteration->op->getData();
2113 			Data					randomizedData;
2114 			std::vector<deUint8>	dataArray;
2115 
2116 			dataArray.resize(startData.size);
2117 			randomizeData(dataArray, m_resourceDesc);
2118 			randomizedData.size = dataArray.size();
2119 			randomizedData.data = &dataArray[0];
2120 			m_writeIteration->op->setData(randomizedData);
2121 		}
2122 
2123 		// Record command buffers
2124 		{
2125 			beginCommandBuffer(vk, *writeCmdBuffer);
2126 			m_writeIteration->op->recordCommands(*writeCmdBuffer);
2127 			endCommandBuffer(vk, *writeCmdBuffer);
2128 
2129 			for (deUint32 copyOpIdx = 0; copyOpIdx < m_copyIterations.size(); copyOpIdx++)
2130 			{
2131 				beginCommandBuffer(vk, **copyPtrCmdBuffers[copyOpIdx]);
2132 				recordBarrier(vk, **copyPtrCmdBuffers[copyOpIdx], *m_writeIteration, *m_copyIterations[copyOpIdx], *m_writeResource, copyOpIdx == 0);
2133 				m_copyIterations[copyOpIdx]->op->recordCommands(**copyPtrCmdBuffers[copyOpIdx]);
2134 				endCommandBuffer(vk, **copyPtrCmdBuffers[copyOpIdx]);
2135 			}
2136 
2137 			for (deUint32 readOpIdx = 0; readOpIdx < m_readIterations.size(); readOpIdx++)
2138 			{
2139 				beginCommandBuffer(vk, **readPtrCmdBuffers[readOpIdx]);
2140 				recordBarrier(vk, **readPtrCmdBuffers[readOpIdx], *m_copyIterations[readOpIdx], *m_readIterations[readOpIdx], *m_copyResources[readOpIdx], true);
2141 				m_readIterations[readOpIdx]->op->recordCommands(**readPtrCmdBuffers[readOpIdx]);
2142 				endCommandBuffer(vk, **readPtrCmdBuffers[readOpIdx]);
2143 			}
2144 		}
2145 
2146 		// Submit
2147 		{
2148 			submit(vk, *writeCmdBuffer, *m_writeIteration, *semaphore, &m_hostTimelineValue, 1);
2149 			for (deUint32 copyOpIdx = 0; copyOpIdx < m_copyIterations.size(); copyOpIdx++)
2150 			{
2151 				deUint64 waitValues[2] =
2152 				{
2153 					m_writeIteration->timelineValue,
2154 					copyOpIdx > 0 ? m_copyIterations[copyOpIdx - 1]->timelineValue : 0,
2155 				};
2156 
2157 				submit(vk, **copyPtrCmdBuffers[copyOpIdx], *m_copyIterations[copyOpIdx],
2158 					   *semaphore, waitValues, copyOpIdx > 0 ? 2 : 1);
2159 			}
2160 			for (deUint32 readOpIdx = 0; readOpIdx < m_readIterations.size(); readOpIdx++)
2161 			{
2162 				deUint64 waitValues[2] =
2163 				{
2164 					m_copyIterations[readOpIdx]->timelineValue,
2165 					readOpIdx > 0 ? m_readIterations[readOpIdx - 1]->timelineValue : m_copyIterations.back()->timelineValue,
2166 				};
2167 
2168 				submit(vk, **readPtrCmdBuffers[readOpIdx], *m_readIterations[readOpIdx],
2169 					   *semaphore, waitValues, 2);
2170 			}
2171 
2172 			// Kick off the whole chain from the host.
2173 			hostSignal(vk, device, *semaphore, m_hostTimelineValue);
2174 			VK_CHECK(vk.deviceWaitIdle(device));
2175 		}
2176 
2177 		{
2178 			const Data	expected = m_writeIteration->op->getData();
2179 
2180 			for (deUint32 readOpIdx = 0; readOpIdx < m_readIterations.size(); readOpIdx++)
2181 			{
2182 				const Data	actual	 = m_readIterations[readOpIdx]->op->getData();
2183 
2184 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
2185 					return tcu::TestStatus::fail("Memory contents don't match");
2186 			}
2187 		}
2188 
2189 		return tcu::TestStatus::pass("OK");
2190 	}
2191 
2192 protected:
2193 	SynchronizationType								m_type;
2194 	ResourceDescription								m_resourceDesc;
2195 	const Unique<VkDevice>&							m_device;
2196 	const Context&									m_context;
2197 #ifndef CTS_USES_VULKANSC
2198 	de::MovePtr<vk::DeviceDriver>					m_deviceDriver;
2199 #else
2200 	de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter>	m_deviceDriver;
2201 #endif // CTS_USES_VULKANSC
2202 	MovePtr<Allocator>								m_allocator;
2203 	OperationContext								m_opContext;
2204 	SharedPtr<QueueTimelineIteration>				m_writeIteration;
2205 	std::vector<SharedPtr<QueueTimelineIteration> >	m_copyIterations;
2206 	std::vector<SharedPtr<QueueTimelineIteration> >	m_readIterations;
2207 	SharedPtr<Resource>								m_writeResource;
2208 	std::vector<SharedPtr<Resource> >				m_copyResources;
2209 	deUint64										m_hostTimelineValue;
2210 };
2211 
2212 class OneToNTestCase : public TestCase
2213 {
2214 public:
OneToNTestCase(tcu::TestContext & testCtx,const std::string & name,SynchronizationType type,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,PipelineCacheData & pipelineCacheData)2215 	OneToNTestCase	(tcu::TestContext&			testCtx,
2216 					 const std::string&			name,
2217 					 SynchronizationType		type,
2218 					 const ResourceDescription	resourceDesc,
2219 					 const OperationName		writeOp,
2220 					 const OperationName		readOp,
2221 					 PipelineCacheData&			pipelineCacheData)
2222 		: TestCase				(testCtx, name)
2223 		, m_type				(type)
2224 		, m_resourceDesc		(resourceDesc)
2225 		, m_writeOp				(makeOperationSupport(writeOp, resourceDesc).release())
2226 		, m_readOp				(makeOperationSupport(readOp, resourceDesc).release())
2227 		, m_pipelineCacheData	(pipelineCacheData)
2228 	{
2229 	}
2230 
checkSupport(Context & context) const2231 	void checkSupport(Context& context) const override
2232 	{
2233 		context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
2234 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
2235 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
2236 	}
2237 
initPrograms(SourceCollections & programCollection) const2238 	void initPrograms (SourceCollections& programCollection) const override
2239 	{
2240 		m_writeOp->initPrograms(programCollection);
2241 		m_readOp->initPrograms(programCollection);
2242 
2243 		for (deUint32 copyOpNdx = 0; copyOpNdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpNdx++)
2244 		{
2245 			if (isResourceSupported(s_copyOps[copyOpNdx], m_resourceDesc))
2246 				makeOperationSupport(s_copyOps[copyOpNdx], m_resourceDesc)->initPrograms(programCollection);
2247 		}
2248 	}
2249 
createInstance(Context & context) const2250 	TestInstance* createInstance (Context& context) const override
2251 	{
2252 		return new OneToNTestInstance(context, m_type, m_resourceDesc, m_writeOp, m_readOp, m_pipelineCacheData);
2253 	}
2254 
2255 private:
2256 	SynchronizationType					m_type;
2257 	const ResourceDescription			m_resourceDesc;
2258 	const SharedPtr<OperationSupport>	m_writeOp;
2259 	const SharedPtr<OperationSupport>	m_readOp;
2260 	PipelineCacheData&					m_pipelineCacheData;
2261 };
2262 
2263 class OneToNTests : public tcu::TestCaseGroup
2264 {
2265 public:
OneToNTests(tcu::TestContext & testCtx,SynchronizationType type)2266 	OneToNTests (tcu::TestContext& testCtx, SynchronizationType type)
2267 		: tcu::TestCaseGroup(testCtx, "one_to_n")
2268 		, m_type(type)
2269 	{
2270 	}
2271 
init(void)2272 	void init (void)
2273 	{
2274 		static const OperationName		writeOps[]	=
2275 		{
2276 			OPERATION_NAME_WRITE_COPY_BUFFER,
2277 			OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE,
2278 			OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER,
2279 			OPERATION_NAME_WRITE_COPY_IMAGE,
2280 			OPERATION_NAME_WRITE_BLIT_IMAGE,
2281 			OPERATION_NAME_WRITE_SSBO_VERTEX,
2282 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL,
2283 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION,
2284 			OPERATION_NAME_WRITE_SSBO_GEOMETRY,
2285 			OPERATION_NAME_WRITE_SSBO_FRAGMENT,
2286 			OPERATION_NAME_WRITE_SSBO_COMPUTE,
2287 			OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT,
2288 			OPERATION_NAME_WRITE_IMAGE_VERTEX,
2289 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL,
2290 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION,
2291 			OPERATION_NAME_WRITE_IMAGE_GEOMETRY,
2292 			OPERATION_NAME_WRITE_IMAGE_FRAGMENT,
2293 			OPERATION_NAME_WRITE_IMAGE_COMPUTE,
2294 			OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT,
2295 		};
2296 		static const OperationName		readOps[]	=
2297 		{
2298 			OPERATION_NAME_READ_COPY_BUFFER,
2299 			OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE,
2300 			OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER,
2301 			OPERATION_NAME_READ_COPY_IMAGE,
2302 			OPERATION_NAME_READ_BLIT_IMAGE,
2303 			OPERATION_NAME_READ_UBO_VERTEX,
2304 			OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL,
2305 			OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION,
2306 			OPERATION_NAME_READ_UBO_GEOMETRY,
2307 			OPERATION_NAME_READ_UBO_FRAGMENT,
2308 			OPERATION_NAME_READ_UBO_COMPUTE,
2309 			OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT,
2310 			OPERATION_NAME_READ_SSBO_VERTEX,
2311 			OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL,
2312 			OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION,
2313 			OPERATION_NAME_READ_SSBO_GEOMETRY,
2314 			OPERATION_NAME_READ_SSBO_FRAGMENT,
2315 			OPERATION_NAME_READ_SSBO_COMPUTE,
2316 			OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT,
2317 			OPERATION_NAME_READ_IMAGE_VERTEX,
2318 			OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL,
2319 			OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION,
2320 			OPERATION_NAME_READ_IMAGE_GEOMETRY,
2321 			OPERATION_NAME_READ_IMAGE_FRAGMENT,
2322 			OPERATION_NAME_READ_IMAGE_COMPUTE,
2323 			OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT,
2324 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW,
2325 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED,
2326 			OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH,
2327 			OPERATION_NAME_READ_VERTEX_INPUT,
2328 		};
2329 
2330 		for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(writeOps); ++writeOpNdx)
2331 		for (int readOpNdx = 0; readOpNdx < DE_LENGTH_OF_ARRAY(readOps); ++readOpNdx)
2332 		{
2333 			const OperationName	writeOp		= writeOps[writeOpNdx];
2334 			const OperationName	readOp		= readOps[readOpNdx];
2335 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
2336 			bool				empty		= true;
2337 
2338 			de::MovePtr<tcu::TestCaseGroup> opGroup	(new tcu::TestCaseGroup(m_testCtx, opGroupName.c_str()));
2339 
2340 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
2341 			{
2342 				const ResourceDescription&	resource	= s_resources[resourceNdx];
2343 				std::string					name		= getResourceName(resource);
2344 
2345 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
2346 				{
2347 					opGroup->addChild(new OneToNTestCase(m_testCtx, name, m_type, resource, writeOp, readOp, m_pipelineCacheData));
2348 					empty = false;
2349 				}
2350 			}
2351 			if (!empty)
2352 				addChild(opGroup.release());
2353 		}
2354 	}
2355 
deinit(void)2356 	void deinit (void)
2357 	{
2358 		cleanupGroup();
2359 	}
2360 
2361 private:
2362 	SynchronizationType	m_type;
2363 
2364 	// synchronization.op tests share pipeline cache data to speed up test
2365 	// execution.
2366 	PipelineCacheData	m_pipelineCacheData;
2367 };
2368 
2369 #ifndef CTS_USES_VULKANSC
2370 
2371 // Make a nonzero initial value for a semaphore. semId is assigned to each semaphore by callers.
getInitialValue(deUint32 semId)2372 deUint64 getInitialValue(deUint32 semId)
2373 {
2374 	return (semId + 1ull) * 1000ull;
2375 }
2376 
2377 struct SparseBindParams
2378 {
2379 	deUint32 numWaitSems;
2380 	deUint32 numSignalSems;
2381 };
2382 
2383 class SparseBindCase : public vkt::TestCase
2384 {
2385 public:
2386 							SparseBindCase	(tcu::TestContext& testCtx, const std::string& name, const SparseBindParams& params);
~SparseBindCase(void)2387 	virtual					~SparseBindCase	(void) {}
2388 
2389 	virtual TestInstance*	createInstance	(Context& context) const;
2390 	virtual void			checkSupport	(Context& context) const;
2391 
2392 private:
2393 	SparseBindParams m_params;
2394 };
2395 
2396 class SparseBindInstance : public vkt::TestInstance
2397 {
2398 public:
2399 								SparseBindInstance	(Context& context, const SparseBindParams& params);
~SparseBindInstance(void)2400 	virtual						~SparseBindInstance	(void) {}
2401 
2402 	virtual tcu::TestStatus		iterate				(void);
2403 
2404 private:
2405 	SparseBindParams m_params;
2406 };
2407 
SparseBindCase(tcu::TestContext & testCtx,const std::string & name,const SparseBindParams & params)2408 SparseBindCase::SparseBindCase (tcu::TestContext& testCtx, const std::string& name, const SparseBindParams& params)
2409 	: vkt::TestCase	(testCtx, name)
2410 	, m_params		(params)
2411 {}
2412 
createInstance(Context & context) const2413 TestInstance* SparseBindCase::createInstance (Context& context) const
2414 {
2415 	return new SparseBindInstance(context, m_params);
2416 }
2417 
checkSupport(Context & context) const2418 void SparseBindCase::checkSupport (Context& context) const
2419 {
2420 	// Check support for sparse binding and timeline semaphores.
2421 	context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SPARSE_BINDING);
2422 	context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
2423 }
2424 
SparseBindInstance(Context & context,const SparseBindParams & params)2425 SparseBindInstance::SparseBindInstance (Context& context, const SparseBindParams& params)
2426 	: vkt::TestInstance	(context)
2427 	, m_params			(params)
2428 {
2429 }
2430 
queueBindSparse(const vk::DeviceInterface & vkd,vk::VkQueue queue,deUint32 bindInfoCount,const vk::VkBindSparseInfo * pBindInfo)2431 void queueBindSparse (const vk::DeviceInterface& vkd, vk::VkQueue queue, deUint32 bindInfoCount, const vk::VkBindSparseInfo *pBindInfo)
2432 {
2433 	VK_CHECK(vkd.queueBindSparse(queue, bindInfoCount, pBindInfo, DE_NULL));
2434 }
2435 
2436 #endif // CTS_USES_VULKANSC
2437 
2438 struct SemaphoreWithInitial
2439 {
2440 	vk::Move<vk::VkSemaphore>	semaphore;
2441 	deUint64					initialValue;
2442 
SemaphoreWithInitialvkt::synchronization::__anon695804760111::SemaphoreWithInitial2443 	SemaphoreWithInitial (vk::Move<vk::VkSemaphore>&& sem, deUint64 initVal)
2444 		: semaphore		(sem)
2445 		, initialValue	(initVal)
2446 	{}
2447 
SemaphoreWithInitialvkt::synchronization::__anon695804760111::SemaphoreWithInitial2448 	SemaphoreWithInitial (SemaphoreWithInitial&& other)
2449 		: semaphore		(other.semaphore)
2450 		, initialValue	(other.initialValue)
2451 	{}
2452 };
2453 
2454 using SemaphoreVec	= std::vector<SemaphoreWithInitial>;
2455 using PlainSemVec	= std::vector<vk::VkSemaphore>;
2456 using ValuesVec		= std::vector<deUint64>;
2457 
2458 #ifndef CTS_USES_VULKANSC
2459 
getHandles(const SemaphoreVec & semVec)2460 PlainSemVec getHandles (const SemaphoreVec& semVec)
2461 {
2462 	PlainSemVec handlesVec;
2463 	handlesVec.reserve(semVec.size());
2464 
2465 	const auto conversion = [](const SemaphoreWithInitial& s) { return s.semaphore.get(); };
2466 	std::transform(begin(semVec), end(semVec), std::back_inserter(handlesVec), conversion);
2467 
2468 	return handlesVec;
2469 }
2470 
getInitialValues(const SemaphoreVec & semVec)2471 ValuesVec getInitialValues (const SemaphoreVec& semVec)
2472 {
2473 	ValuesVec initialValues;
2474 	initialValues.reserve(semVec.size());
2475 
2476 	const auto conversion = [](const SemaphoreWithInitial& s) { return s.initialValue; };
2477 	std::transform(begin(semVec), end(semVec), std::back_inserter(initialValues), conversion);
2478 
2479 	return initialValues;
2480 }
2481 
2482 // Increases values in the vector by one.
getNextValues(const ValuesVec & values)2483 ValuesVec getNextValues (const ValuesVec& values)
2484 {
2485 	ValuesVec nextValues;
2486 	nextValues.reserve(values.size());
2487 
2488 	std::transform(begin(values), end(values), std::back_inserter(nextValues), [](deUint64 v) { return v + 1ull; });
2489 	return nextValues;
2490 }
2491 
createTimelineSemaphore(const vk::DeviceInterface & vkd,vk::VkDevice device,deUint32 semId)2492 SemaphoreWithInitial createTimelineSemaphore (const vk::DeviceInterface& vkd, vk::VkDevice device, deUint32 semId)
2493 {
2494 	const auto initialValue = getInitialValue(semId);
2495 	return SemaphoreWithInitial(createSemaphoreType(vkd, device, vk::VK_SEMAPHORE_TYPE_TIMELINE, 0u, initialValue), initialValue);
2496 }
2497 
2498 // Signal the given semaphores with the corresponding values.
hostSignal(const vk::DeviceInterface & vkd,vk::VkDevice device,const PlainSemVec & semaphores,const ValuesVec & signalValues)2499 void hostSignal (const vk::DeviceInterface& vkd, vk::VkDevice device, const PlainSemVec& semaphores, const ValuesVec& signalValues)
2500 {
2501 	DE_ASSERT(semaphores.size() == signalValues.size());
2502 
2503 	for (size_t i = 0; i < semaphores.size(); ++i)
2504 		hostSignal(vkd, device, semaphores[i], signalValues[i]);
2505 }
2506 
2507 // Wait for the given semaphores and their corresponding values.
hostWait(const vk::DeviceInterface & vkd,vk::VkDevice device,const PlainSemVec & semaphores,const ValuesVec & waitValues)2508 void hostWait (const vk::DeviceInterface& vkd, vk::VkDevice device, const PlainSemVec& semaphores, const ValuesVec& waitValues)
2509 {
2510 	DE_ASSERT(semaphores.size() == waitValues.size() && !semaphores.empty());
2511 
2512 	constexpr deUint64 kTimeout = 10000000000ull; // 10 seconds in nanoseconds.
2513 
2514 	const vk::VkSemaphoreWaitInfo waitInfo =
2515 	{
2516 		vk::VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,	//	VkStructureType			sType;
2517 		nullptr,									//	const void*				pNext;
2518 		0u,											//	VkSemaphoreWaitFlags	flags;
2519 		static_cast<deUint32>(semaphores.size()),	//	deUint32				semaphoreCount;
2520 		semaphores.data(),							//	const VkSemaphore*		pSemaphores;
2521 		waitValues.data(),							//	const deUint64*			pValues;
2522 	};
2523 	VK_CHECK(vkd.waitSemaphores(device, &waitInfo, kTimeout));
2524 }
2525 
iterate(void)2526 tcu::TestStatus SparseBindInstance::iterate (void)
2527 {
2528 	const auto&	vkd		= m_context.getDeviceInterface();
2529 	const auto	device	= m_context.getDevice();
2530 	const auto	queue	= m_context.getSparseQueue();
2531 
2532 	SemaphoreVec waitSemaphores;
2533 	SemaphoreVec signalSemaphores;
2534 
2535 	// Create as many semaphores as needed to wait and signal.
2536 	for (deUint32 i = 0; i < m_params.numWaitSems; ++i)
2537 		waitSemaphores.emplace_back(createTimelineSemaphore(vkd, device, i));
2538 	for (deUint32 i = 0; i < m_params.numSignalSems; ++i)
2539 		signalSemaphores.emplace_back(createTimelineSemaphore(vkd, device, i + m_params.numWaitSems));
2540 
2541 	// Get handles for all semaphores.
2542 	const auto waitSemHandles	= getHandles(waitSemaphores);
2543 	const auto signalSemHandles	= getHandles(signalSemaphores);
2544 
2545 	// Get initial values for all semaphores.
2546 	const auto waitSemValues	= getInitialValues(waitSemaphores);
2547 	const auto signalSemValues	= getInitialValues(signalSemaphores);
2548 
2549 	// Get next expected values for all semaphores.
2550 	const auto waitNextValues	= getNextValues(waitSemValues);
2551 	const auto signalNextValues	= getNextValues(signalSemValues);
2552 
2553 	const vk::VkTimelineSemaphoreSubmitInfo timeLineSubmitInfo =
2554 	{
2555 		vk::VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,			//	VkStructureType	sType;
2556 		nullptr,														//	const void*		pNext;
2557 		static_cast<deUint32>(waitNextValues.size()),					//	deUint32		waitSemaphoreValueCount;
2558 		(waitNextValues.empty() ? nullptr : waitNextValues.data()),		//	const deUint64*	pWaitSemaphoreValues;
2559 		static_cast<deUint32>(signalNextValues.size()),					//	deUint32		signalSemaphoreValueCount;
2560 		(signalNextValues.empty() ? nullptr : signalNextValues.data()),	//	const deUint64*	pSignalSemaphoreValues;
2561 	};
2562 
2563 	const vk::VkBindSparseInfo bindInfo =
2564 	{
2565 		vk::VK_STRUCTURE_TYPE_BIND_SPARSE_INFO,							//	VkStructureType								sType;
2566 		&timeLineSubmitInfo,											//	const void*									pNext;
2567 		static_cast<deUint32>(waitSemHandles.size()),					//	deUint32									waitSemaphoreCount;
2568 		(waitSemHandles.empty() ? nullptr : waitSemHandles.data()),		//	const VkSemaphore*							pWaitSemaphores;
2569 		0u,																//	deUint32									bufferBindCount;
2570 		nullptr,														//	const VkSparseBufferMemoryBindInfo*			pBufferBinds;
2571 		0u,																//	deUint32									imageOpaqueBindCount;
2572 		nullptr,														//	const VkSparseImageOpaqueMemoryBindInfo*	pImageOpaqueBinds;
2573 		0u,																//	deUint32									imageBindCount;
2574 		nullptr,														//	const VkSparseImageMemoryBindInfo*			pImageBinds;
2575 		static_cast<deUint32>(signalSemHandles.size()),					//	deUint32									signalSemaphoreCount;
2576 		(signalSemHandles.empty() ? nullptr : signalSemHandles.data()),	//	const VkSemaphore*							pSignalSemaphores;
2577 	};
2578 	queueBindSparse(vkd, queue, 1u, &bindInfo);
2579 
2580 	// If the device needs to wait and signal, check the signal semaphores have not been signaled yet.
2581 	if (!waitSemaphores.empty() && !signalSemaphores.empty())
2582 	{
2583 		deUint64 value;
2584 		for (size_t i = 0; i < signalSemaphores.size(); ++i)
2585 		{
2586 			value = 0;
2587 			VK_CHECK(vkd.getSemaphoreCounterValue(device, signalSemHandles[i], &value));
2588 
2589 			if (!value)
2590 				TCU_FAIL("Invalid value obtained from vkGetSemaphoreCounterValue()");
2591 
2592 			if (value != signalSemValues[i])
2593 			{
2594 				std::ostringstream msg;
2595 				msg << "vkQueueBindSparse() may not have waited before signaling semaphore " << i
2596 					<< " (expected value " << signalSemValues[i] << " but obtained " << value << ")";
2597 				TCU_FAIL(msg.str());
2598 			}
2599 		}
2600 	}
2601 
2602 	// Signal semaphores the sparse bind command is waiting on.
2603 	hostSignal(vkd, device, waitSemHandles, waitNextValues);
2604 
2605 	// Wait for semaphores the sparse bind command is supposed to signal.
2606 	if (!signalSemaphores.empty())
2607 		hostWait(vkd, device, signalSemHandles, signalNextValues);
2608 
2609 	VK_CHECK(vkd.deviceWaitIdle(device));
2610 	return tcu::TestStatus::pass("Pass");
2611 }
2612 
2613 class SparseBindGroup : public tcu::TestCaseGroup
2614 {
2615 public:
2616 	// vkQueueBindSparse combined with timeline semaphores
SparseBindGroup(tcu::TestContext & testCtx)2617 	SparseBindGroup (tcu::TestContext& testCtx)
2618 		: tcu::TestCaseGroup (testCtx, "sparse_bind")
2619 	{}
2620 
init(void)2621 	virtual void init (void)
2622 	{
2623 		static const struct
2624 		{
2625 			deUint32	waitSems;
2626 			deUint32	sigSems;
2627 			std::string	name;
2628 		} kSparseBindCases[] =
2629 		{
2630 			// No semaphores to wait for or signal
2631 			{	0u,		0u,		"no_sems"},
2632 			// Signal semaphore without waiting for any other
2633 			{	0u,		1u,		"no_wait_sig"},
2634 			// Wait for semaphore but do not signal any other
2635 			{	1u,		0u,		"wait_no_sig"},
2636 			// Wait for semaphore and signal a second one
2637 			{	1u,		1u,		"wait_and_sig"},
2638 			// Wait for two semaphores and signal two other ones
2639 			{	2u,		2u,		"wait_and_sig_2"},
2640 		};
2641 
2642 		for (int i = 0; i < DE_LENGTH_OF_ARRAY(kSparseBindCases); ++i)
2643 			addChild(new SparseBindCase(m_testCtx, kSparseBindCases[i].name, SparseBindParams{kSparseBindCases[i].waitSems, kSparseBindCases[i].sigSems}));
2644 	}
2645 };
2646 
2647 #endif // CTS_USES_VULKANSC
2648 
2649 } // anonymous
2650 
createTimelineSemaphoreTests(tcu::TestContext & testCtx)2651 tcu::TestCaseGroup* createTimelineSemaphoreTests (tcu::TestContext& testCtx)
2652 {
2653 	const SynchronizationType			type		(SynchronizationType::LEGACY);
2654 	de::MovePtr<tcu::TestCaseGroup>		basicTests	(new tcu::TestCaseGroup(testCtx, "timeline_semaphore"));
2655 
2656 	basicTests->addChild(new LegacyDeviceHostTests(testCtx));
2657 	basicTests->addChild(new OneToNTests(testCtx, type));
2658 	basicTests->addChild(new WaitBeforeSignalTests(testCtx, type));
2659 	basicTests->addChild(new WaitTests(testCtx, type));
2660 #ifndef CTS_USES_VULKANSC
2661 	basicTests->addChild(new SparseBindGroup(testCtx));
2662 #endif // CTS_USES_VULKANSC
2663 
2664 	return basicTests.release();
2665 }
2666 
createSynchronization2TimelineSemaphoreTests(tcu::TestContext & testCtx)2667 tcu::TestCaseGroup* createSynchronization2TimelineSemaphoreTests(tcu::TestContext& testCtx)
2668 {
2669 	const SynchronizationType			type		(SynchronizationType::SYNCHRONIZATION2);
2670 	de::MovePtr<tcu::TestCaseGroup>		basicTests	(new tcu::TestCaseGroup(testCtx, "timeline_semaphore"));
2671 
2672 	basicTests->addChild(new Sytnchronization2DeviceHostTests(testCtx));
2673 	basicTests->addChild(new OneToNTests(testCtx, type));
2674 	basicTests->addChild(new WaitBeforeSignalTests(testCtx, type));
2675 	basicTests->addChild(new WaitTests(testCtx, type));
2676 
2677 	return basicTests.release();
2678 }
2679 
2680 } // synchronization
2681 } // vkt
2682