• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*------------------------------------------------------------------------
3  * Vulkan Conformance Tests
4  * ------------------------
5  *
6  * Copyright (c) 2019 The Khronos Group Inc.
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Signal ordering tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktSynchronizationSignalOrderTests.hpp"
26 #include "vktSynchronizationOperation.hpp"
27 #include "vktSynchronizationOperationTestData.hpp"
28 #include "vktSynchronizationOperationResources.hpp"
29 #include "vktTestCaseUtil.hpp"
30 #include "vktSynchronizationUtil.hpp"
31 #include "vktExternalMemoryUtil.hpp"
32 #include "vktCustomInstancesDevices.hpp"
33 #include "vkBarrierUtil.hpp"
34 
35 #include "vkDefs.hpp"
36 #include "vkPlatform.hpp"
37 #include "vkQueryUtil.hpp"
38 #include "vkCmdUtil.hpp"
39 #include "vkImageUtil.hpp"
40 #include "vkRef.hpp"
41 #include "vkTypeUtil.hpp"
42 
43 #include "tcuTestLog.hpp"
44 #include "tcuCommandLine.hpp"
45 
46 #include "deRandom.hpp"
47 #include "deThread.hpp"
48 #include "deUniquePtr.hpp"
49 
50 #include <limits>
51 #include <set>
52 
53 namespace vkt
54 {
55 namespace synchronization
56 {
57 namespace
58 {
59 
60 using namespace vk;
61 using namespace vkt::ExternalMemoryUtil;
62 using de::MovePtr;
63 using de::SharedPtr;
64 using de::UniquePtr;
65 
66 template<typename T>
makeVkSharedPtr(Move<T> move)67 inline SharedPtr<Move<T> > makeVkSharedPtr (Move<T> move)
68 {
69 	return SharedPtr<Move<T> >(new Move<T>(move));
70 }
71 
72 template<typename T>
makeSharedPtr(de::MovePtr<T> move)73 inline SharedPtr<T> makeSharedPtr (de::MovePtr<T> move)
74 {
75 	return SharedPtr<T>(move.release());
76 }
77 
78 template<typename T>
makeSharedPtr(T * ptr)79 inline SharedPtr<T> makeSharedPtr (T* ptr)
80 {
81 	return SharedPtr<T>(ptr);
82 }
83 
hostSignal(const DeviceInterface & vk,const VkDevice & device,VkSemaphore semaphore,const deUint64 timelineValue)84 void hostSignal (const DeviceInterface& vk, const VkDevice& device, VkSemaphore semaphore, const deUint64 timelineValue)
85 {
86 	VkSemaphoreSignalInfoKHR	ssi	=
87 	{
88 		VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,	// VkStructureType				sType;
89 		DE_NULL,									// const void*					pNext;
90 		semaphore,									// VkSemaphore					semaphore;
91 		timelineValue,								// deUint64						value;
92 	};
93 
94 	VK_CHECK(vk.signalSemaphore(device, &ssi));
95 }
96 
97 // Waits for the device to be idle when destroying the guard object.
98 class DeviceWaitIdleGuard
99 {
100 public:
DeviceWaitIdleGuard(const DeviceInterface & vkd,const VkDevice device)101 	DeviceWaitIdleGuard (const DeviceInterface& vkd, const VkDevice device)
102 		: m_vkd(vkd), m_device(device)
103 		{}
104 
~DeviceWaitIdleGuard()105 	~DeviceWaitIdleGuard ()
106 	{
107 		VK_CHECK(m_vkd.deviceWaitIdle(m_device));
108 	}
109 
110 protected:
111 	const DeviceInterface&	m_vkd;
112 	const VkDevice			m_device;
113 };
114 
createTestDevice(const Context & context)115 Move<VkDevice> createTestDevice (const Context& context)
116 {
117 	const float									priority				= 0.0f;
118 	const std::vector<VkQueueFamilyProperties>	queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(context.getInstanceInterface(), context.getPhysicalDevice());
119 	std::vector<deUint32>						queueFamilyIndices		(queueFamilyProperties.size(), 0xFFFFFFFFu);
120 	std::vector<const char*>					extensions;
121 
122 	VkPhysicalDeviceFeatures2					createPhysicalFeature		{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, DE_NULL, context.getDeviceFeatures() };
123 	VkPhysicalDeviceTimelineSemaphoreFeatures	timelineSemaphoreFeatures	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, DE_NULL, DE_TRUE };
124 	VkPhysicalDeviceSynchronization2FeaturesKHR	synchronization2Features	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR, DE_NULL, DE_TRUE };
125 	void**										nextPtr						= &createPhysicalFeature.pNext;
126 
127 	if (context.isDeviceFunctionalitySupported("VK_KHR_timeline_semaphore"))
128 	{
129 		extensions.push_back("VK_KHR_timeline_semaphore");
130 		addToChainVulkanStructure(&nextPtr, timelineSemaphoreFeatures);
131 	}
132 
133 	if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_external_semaphore"))
134 		extensions.push_back("VK_KHR_external_semaphore");
135 	if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_external_memory"))
136 		extensions.push_back("VK_KHR_external_memory");
137 
138 	if (context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_fd"))
139 		extensions.push_back("VK_KHR_external_semaphore_fd");
140 
141 	if (context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_win32"))
142 		extensions.push_back("VK_KHR_external_semaphore_win32");
143 
144 	if (context.isDeviceFunctionalitySupported("VK_KHR_synchronization2"))
145 	{
146 		extensions.push_back("VK_KHR_synchronization2");
147 		addToChainVulkanStructure(&nextPtr, synchronization2Features);
148 	}
149 
150 	try
151 	{
152 		deUint32 maxQueueCount = 1;
153 		for (const VkQueueFamilyProperties& qfp : queueFamilyProperties)
154 			maxQueueCount = deMaxu32(qfp.queueCount, maxQueueCount);
155 
156 		std::vector<float>						queuePriorities(maxQueueCount, priority);
157 		std::vector<VkDeviceQueueCreateInfo>	queues;
158 
159 		for (size_t ndx = 0; ndx < queueFamilyProperties.size(); ndx++)
160 		{
161 			const VkDeviceQueueCreateInfo	createInfo	=
162 			{
163 				VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
164 				DE_NULL,
165 				0u,
166 
167 				(deUint32)ndx,
168 				queueFamilyProperties[ndx].queueCount,
169 				queuePriorities.data()
170 			};
171 
172 			queues.push_back(createInfo);
173 		}
174 
175 		const VkDeviceCreateInfo				createInfo				=
176 		{
177 			VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
178 			&createPhysicalFeature,
179 			0u,
180 
181 			(deUint32)queues.size(),
182 			&queues[0],
183 
184 			0u,
185 			DE_NULL,
186 
187 			(deUint32)extensions.size(),
188 			extensions.empty() ? DE_NULL : &extensions[0],
189 			0u
190 		};
191 
192 		const auto validation = context.getTestContext().getCommandLine().isValidationEnabled();
193 		return createCustomDevice(validation, context.getPlatformInterface(), context.getInstance(), context.getInstanceInterface(), context.getPhysicalDevice(), &createInfo);
194 	}
195 	catch (const vk::Error& error)
196 	{
197 		if (error.getError() == VK_ERROR_EXTENSION_NOT_PRESENT)
198 			TCU_THROW(NotSupportedError, "Required extensions not supported");
199 		else
200 			throw;
201 	}
202 }
203 
204 // Class to wrap a singleton instance and device
205 class SingletonDevice
206 {
SingletonDevice(const Context & context)207 	SingletonDevice	(const Context& context)
208 		: m_logicalDevice	(createTestDevice(context))
209 	{
210 	}
211 
212 public:
213 
getDevice(const Context & context)214 	static const Unique<vk::VkDevice>& getDevice(const Context& context)
215 	{
216 		if (!m_singletonDevice)
217 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
218 
219 		DE_ASSERT(m_singletonDevice);
220 		return m_singletonDevice->m_logicalDevice;
221 	}
222 
destroy()223 	static void destroy()
224 	{
225 		m_singletonDevice.clear();
226 	}
227 
228 private:
229 	const Unique<vk::VkDevice>					m_logicalDevice;
230 
231 	static SharedPtr<SingletonDevice>	m_singletonDevice;
232 };
233 SharedPtr<SingletonDevice>		SingletonDevice::m_singletonDevice;
234 
cleanupGroup()235 static void cleanupGroup ()
236 {
237 	// Destroy singleton object
238 	SingletonDevice::destroy();
239 }
240 
241 class SimpleAllocation : public Allocation
242 {
243 public:
244 	SimpleAllocation	(const DeviceInterface&	vkd,
245 						 VkDevice				device,
246 						 const VkDeviceMemory	memory);
247 	~SimpleAllocation	(void);
248 
249 private:
250 	const DeviceInterface&	m_vkd;
251 	const VkDevice			m_device;
252 };
253 
SimpleAllocation(const DeviceInterface & vkd,VkDevice device,const VkDeviceMemory memory)254 SimpleAllocation::SimpleAllocation (const DeviceInterface&	vkd,
255 									VkDevice				device,
256 									const VkDeviceMemory	memory)
257 	: Allocation	(memory, 0, DE_NULL)
258 	, m_vkd			(vkd)
259 	, m_device		(device)
260 {
261 }
262 
~SimpleAllocation(void)263 SimpleAllocation::~SimpleAllocation (void)
264 {
265 	m_vkd.freeMemory(m_device, getMemory(), DE_NULL);
266 }
267 
getMemoryRequirements(const DeviceInterface & vkd,VkDevice device,VkBuffer buffer)268 vk::VkMemoryRequirements getMemoryRequirements (const DeviceInterface&				vkd,
269 												 VkDevice							device,
270 												 VkBuffer							buffer)
271 {
272 	const VkBufferMemoryRequirementsInfo2	requirementInfo =
273 	{
274 		VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
275 		DE_NULL,
276 		buffer
277 	};
278 	VkMemoryRequirements2					requirements	=
279 	{
280 		VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
281 		DE_NULL,
282 		{ 0u, 0u, 0u, }
283 	};
284 	vkd.getBufferMemoryRequirements2(device, &requirementInfo, &requirements);
285 	return requirements.memoryRequirements;
286 }
287 
getMemoryRequirements(const DeviceInterface & vkd,VkDevice device,VkImage image)288 vk::VkMemoryRequirements getMemoryRequirements(const DeviceInterface&				vkd,
289 												VkDevice							device,
290 												VkImage								image)
291 {
292 	const VkImageMemoryRequirementsInfo2	requirementInfo =
293 	{
294 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
295 		DE_NULL,
296 		image
297 	};
298 	VkMemoryRequirements2					requirements =
299 	{
300 		VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
301 		DE_NULL,
302 		{ 0u, 0u, 0u, }
303 	};
304 	vkd.getImageMemoryRequirements2(device, &requirementInfo, &requirements);
305 
306 	return requirements.memoryRequirements;
307 }
308 
importAndBindMemory(const DeviceInterface & vkd,VkDevice device,VkBuffer buffer,NativeHandle & nativeHandle,VkExternalMemoryHandleTypeFlagBits externalType,const deUint32 exportedMemoryTypeIndex)309 MovePtr<Allocation> importAndBindMemory (const DeviceInterface&					vkd,
310 										 VkDevice								device,
311 										 VkBuffer								buffer,
312 										 NativeHandle&							nativeHandle,
313 										 VkExternalMemoryHandleTypeFlagBits		externalType,
314 										 const deUint32							exportedMemoryTypeIndex)
315 {
316 	const VkMemoryRequirements	requirements			= getBufferMemoryRequirements(vkd, device, buffer);
317 	Move<VkDeviceMemory>		memory;
318 
319 	if (!!buffer)
320 		memory = importDedicatedMemory(vkd, device, buffer, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
321 	else
322 		memory = importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
323 
324 	VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0u));
325 
326 	return MovePtr<Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
327 }
328 
importAndBindMemory(const DeviceInterface & vkd,VkDevice device,VkImage image,NativeHandle & nativeHandle,VkExternalMemoryHandleTypeFlagBits externalType,deUint32 exportedMemoryTypeIndex)329 MovePtr<Allocation> importAndBindMemory (const DeviceInterface&					vkd,
330 										 VkDevice								device,
331 										 VkImage								image,
332 										 NativeHandle&							nativeHandle,
333 										 VkExternalMemoryHandleTypeFlagBits		externalType,
334 										 deUint32								exportedMemoryTypeIndex)
335 {
336 	const VkMemoryRequirements	requirements	= getImageMemoryRequirements(vkd, device, image);
337 	Move<VkDeviceMemory>		memory;
338 
339 	if (!!image)
340 		memory = importDedicatedMemory(vkd, device, image, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
341 	else
342 		memory = importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
343 
344 	VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0u));
345 
346 	return MovePtr<Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
347 }
348 
349 struct QueueTimelineIteration
350 {
QueueTimelineIterationvkt::synchronization::__anon4b71b9f50111::QueueTimelineIteration351 	QueueTimelineIteration(const SharedPtr<OperationSupport>&	_opSupport,
352 						   deUint64								lastValue,
353 						   VkQueue								_queue,
354 						   deUint32								_queueFamilyIdx,
355 						   de::Random&							rng)
356 		: opSupport(_opSupport)
357 		, queue(_queue)
358 		, queueFamilyIdx(_queueFamilyIdx)
359 	{
360 		timelineValue	= lastValue + rng.getInt(1, 100);
361 	}
~QueueTimelineIterationvkt::synchronization::__anon4b71b9f50111::QueueTimelineIteration362 	~QueueTimelineIteration() {}
363 
364 	SharedPtr<OperationSupport>	opSupport;
365 	VkQueue						queue;
366 	deUint32					queueFamilyIdx;
367 	deUint64					timelineValue;
368 	SharedPtr<Operation>		op;
369 };
370 
importResource(const DeviceInterface & vkd,VkDevice device,const ResourceDescription & resourceDesc,const deUint32 queueFamilyIndex,const OperationSupport & readOp,const OperationSupport & writeOp,NativeHandle & nativeHandle,VkExternalMemoryHandleTypeFlagBits externalType,deUint32 exportedMemoryTypeIndex)371 de::MovePtr<Resource> importResource (const DeviceInterface&				vkd,
372 									  VkDevice								device,
373 									  const ResourceDescription&			resourceDesc,
374 									  const deUint32						queueFamilyIndex,
375 									  const OperationSupport&				readOp,
376 									  const OperationSupport&				writeOp,
377 									  NativeHandle&							nativeHandle,
378 									  VkExternalMemoryHandleTypeFlagBits	externalType,
379 									  deUint32								exportedMemoryTypeIndex)
380 {
381 	if (resourceDesc.type == RESOURCE_TYPE_IMAGE)
382 	{
383 		const VkExtent3D					extent					=
384 		{
385 			(deUint32)resourceDesc.size.x(),
386 			de::max(1u, (deUint32)resourceDesc.size.y()),
387 			de::max(1u, (deUint32)resourceDesc.size.z())
388 		};
389 		const VkImageSubresourceRange	subresourceRange		=
390 		{
391 			resourceDesc.imageAspect,
392 			0u,
393 			1u,
394 			0u,
395 			1u
396 		};
397 		const VkImageSubresourceLayers	subresourceLayers		=
398 		{
399 			resourceDesc.imageAspect,
400 			0u,
401 			0u,
402 			1u
403 		};
404 		const VkExternalMemoryImageCreateInfo externalInfo =
405 		{
406 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
407 			DE_NULL,
408 			(VkExternalMemoryHandleTypeFlags)externalType
409 		};
410 		const VkImageTiling				tiling					= VK_IMAGE_TILING_OPTIMAL;
411 		const VkImageCreateInfo			createInfo				=
412 		{
413 			VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
414 			&externalInfo,
415 			0u,
416 
417 			resourceDesc.imageType,
418 			resourceDesc.imageFormat,
419 			extent,
420 			1u,
421 			1u,
422 			resourceDesc.imageSamples,
423 			tiling,
424 			readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags(),
425 			VK_SHARING_MODE_EXCLUSIVE,
426 
427 			1u,
428 			&queueFamilyIndex,
429 			VK_IMAGE_LAYOUT_UNDEFINED
430 		};
431 
432 		Move<VkImage>			image		= createImage(vkd, device, &createInfo);
433 		MovePtr<Allocation>		allocation	= importAndBindMemory(vkd, device, *image, nativeHandle, externalType, exportedMemoryTypeIndex);
434 
435 		return MovePtr<Resource>(new Resource(image, allocation, extent, resourceDesc.imageType, resourceDesc.imageFormat, subresourceRange, subresourceLayers, tiling));
436 	}
437 	else
438 	{
439 		const VkDeviceSize						offset			= 0u;
440 		const VkDeviceSize						size			= static_cast<VkDeviceSize>(resourceDesc.size.x());
441 		const VkBufferUsageFlags				usage			= readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags();
442 		const VkExternalMemoryBufferCreateInfo	externalInfo	=
443 		{
444 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
445 			DE_NULL,
446 			(VkExternalMemoryHandleTypeFlags)externalType
447 		};
448 		const VkBufferCreateInfo				createInfo		=
449 		{
450 			VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
451 			&externalInfo,
452 			0u,
453 
454 			size,
455 			usage,
456 			VK_SHARING_MODE_EXCLUSIVE,
457 			1u,
458 			&queueFamilyIndex
459 		};
460 		Move<VkBuffer>							buffer		= createBuffer(vkd, device, &createInfo);
461 		MovePtr<Allocation>						allocation	= importAndBindMemory(vkd,
462 																				  device,
463 																				  *buffer,
464 																				  nativeHandle,
465 																				  externalType,
466 																				  exportedMemoryTypeIndex);
467 
468 		return MovePtr<Resource>(new Resource(resourceDesc.type, buffer, allocation, offset, size));
469 	}
470 }
471 
472 struct QueueSubmitOrderSharedIteration
473 {
QueueSubmitOrderSharedIterationvkt::synchronization::__anon4b71b9f50111::QueueSubmitOrderSharedIteration474 	QueueSubmitOrderSharedIteration() {}
~QueueSubmitOrderSharedIterationvkt::synchronization::__anon4b71b9f50111::QueueSubmitOrderSharedIteration475 	~QueueSubmitOrderSharedIteration() {}
476 
477 	SharedPtr<Resource>			resourceA;
478 	SharedPtr<Resource>			resourceB;
479 
480 	SharedPtr<Operation>		writeOp;
481 	SharedPtr<Operation>		readOp;
482 };
483 
484 // Verifies the signaling order of the semaphores in multiple
485 // VkSubmitInfo given to vkQueueSubmit() with queueA & queueB from a
486 // different VkDevice.
487 //
488 // vkQueueSubmit(queueA, [write0, write1, write2, ..., write6])
489 // vkQueueSubmit(queueB, [read0-6])
490 //
491 // With read0-6 waiting on write6, all the data should be available
492 // for reading given that signal operations are supposed to happen in
493 // order.
494 class QueueSubmitSignalOrderSharedTestInstance : public TestInstance
495 {
496 public:
QueueSubmitSignalOrderSharedTestInstance(Context & context,SynchronizationType type,const SharedPtr<OperationSupport> writeOpSupport,const SharedPtr<OperationSupport> readOpSupport,const ResourceDescription & resourceDesc,VkExternalMemoryHandleTypeFlagBits memoryHandleType,VkSemaphoreType semaphoreType,VkExternalSemaphoreHandleTypeFlagBits semaphoreHandleType,PipelineCacheData & pipelineCacheData)497 	QueueSubmitSignalOrderSharedTestInstance (Context&									context,
498 											  SynchronizationType						type,
499 											  const SharedPtr<OperationSupport>			writeOpSupport,
500 											  const SharedPtr<OperationSupport>			readOpSupport,
501 											  const ResourceDescription&				resourceDesc,
502 											  VkExternalMemoryHandleTypeFlagBits		memoryHandleType,
503 											  VkSemaphoreType							semaphoreType,
504 											  VkExternalSemaphoreHandleTypeFlagBits		semaphoreHandleType,
505 											  PipelineCacheData&						pipelineCacheData)
506 		: TestInstance			(context)
507 		, m_type				(type)
508 		, m_writeOpSupport		(writeOpSupport)
509 		, m_readOpSupport		(readOpSupport)
510 		, m_resourceDesc		(resourceDesc)
511 		, m_memoryHandleType	(memoryHandleType)
512 		, m_semaphoreType		(semaphoreType)
513 		, m_semaphoreHandleType	(semaphoreHandleType)
514 		, m_pipelineCacheData	(pipelineCacheData)
515 		, m_rng					(1234)
516 
517 	{
518 		const InstanceInterface&					vki					= context.getInstanceInterface();
519 		const VkSemaphoreTypeCreateInfoKHR			semaphoreTypeInfo	=
520 		{
521 			VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR,
522 			DE_NULL,
523 			semaphoreType,
524 			0,
525 		};
526 		const VkPhysicalDeviceExternalSemaphoreInfo	info				=
527 		{
528 			VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
529 			&semaphoreTypeInfo,
530 			semaphoreHandleType
531 		};
532 		VkExternalSemaphoreProperties				properties			=
533 		{
534 			VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
535 			DE_NULL,
536 			0u,
537 			0u,
538 			0u
539 		};
540 
541 		vki.getPhysicalDeviceExternalSemaphoreProperties(context.getPhysicalDevice(), &info, &properties);
542 
543 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
544 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
545 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
546 
547 		if ((properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) == 0
548 			|| (properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR) == 0)
549 			TCU_THROW(NotSupportedError, "Exporting and importing semaphore type not supported");
550 
551 		if (!isResourceExportable())
552 			TCU_THROW(NotSupportedError, "Resource not exportable");
553 
554 	}
555 
createImage(const vk::DeviceInterface & vkd,vk::VkDevice device,const vk::VkExtent3D & extent,deUint32 queueFamilyIndex,vk::VkImageTiling tiling)556 	Move<VkImage> createImage (const vk::DeviceInterface&	vkd,
557 							   vk::VkDevice					device,
558 							   const vk::VkExtent3D&		extent,
559 							   deUint32						queueFamilyIndex,
560 							   vk::VkImageTiling			tiling)
561 	{
562 		const VkExternalMemoryImageCreateInfo externalInfo =
563 		{
564 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
565 			DE_NULL,
566 			(VkExternalMemoryHandleTypeFlags)m_memoryHandleType
567 		};
568 		const VkImageCreateInfo createInfo =
569 		{
570 			VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
571 			&externalInfo,
572 			0u,
573 
574 			m_resourceDesc.imageType,
575 			m_resourceDesc.imageFormat,
576 			extent,
577 			1u,
578 			1u,
579 			m_resourceDesc.imageSamples,
580 			tiling,
581 			m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
582 			VK_SHARING_MODE_EXCLUSIVE,
583 
584 			1u,
585 			&queueFamilyIndex,
586 			VK_IMAGE_LAYOUT_UNDEFINED
587 		};
588 
589 		return vk::createImage(vkd, device, &createInfo);
590 	}
591 
createBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,const vk::VkDeviceSize & size,deUint32 queueFamilyIndex)592 	Move<VkBuffer> createBuffer (const vk::DeviceInterface&		vkd,
593 								 vk::VkDevice					device,
594 								 const vk::VkDeviceSize&		size,
595 								 deUint32						queueFamilyIndex)
596 	{
597 		const VkExternalMemoryBufferCreateInfo	externalInfo =
598 		{
599 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
600 			DE_NULL,
601 			(VkExternalMemoryHandleTypeFlags)m_memoryHandleType
602 		};
603 		const VkBufferCreateInfo				createInfo =
604 		{
605 			VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
606 			&externalInfo,
607 			0u,
608 
609 			size,
610 			m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
611 			VK_SHARING_MODE_EXCLUSIVE,
612 			1u,
613 			&queueFamilyIndex
614 		};
615 		return vk::createBuffer(vkd, device, &createInfo);
616 	}
617 
iterate(void)618 	tcu::TestStatus iterate (void)
619 	{
620 		// We're using 2 devices to make sure we have 2 queues even on
621 		// implementations that only have a single queue.
622 		const bool											isTimelineSemaphore			(m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR);
623 		const VkDevice&										deviceA						= m_context.getDevice();
624 		const Unique<VkDevice>&								deviceB						(SingletonDevice::getDevice(m_context));
625 		const DeviceInterface&								vkA							= m_context.getDeviceInterface();
626 		const DeviceDriver									vkB							(m_context.getPlatformInterface(), m_context.getInstance(), *deviceB, m_context.getUsedApiVersion());
627 		UniquePtr<SimpleAllocator>							allocatorA					(new SimpleAllocator(vkA, deviceA, vk::getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(),
628 																																								 m_context.getPhysicalDevice())));
629 		UniquePtr<SimpleAllocator>							allocatorB					(new SimpleAllocator(vkB, *deviceB, vk::getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(),
630 																																								  m_context.getPhysicalDevice())));
631 		UniquePtr<OperationContext>							operationContextA			(new OperationContext(m_context, m_type, vkA, deviceA, *allocatorA, m_pipelineCacheData));
632 		UniquePtr<OperationContext>							operationContextB			(new OperationContext(m_context, m_type, vkB, *deviceB, *allocatorB, m_pipelineCacheData));
633 		const deUint32										universalQueueFamilyIndex	= m_context.getUniversalQueueFamilyIndex();
634 		const VkQueue										queueA						= m_context.getUniversalQueue();
635 		const VkQueue										queueB						= getDeviceQueue(vkB, *deviceB, m_context.getUniversalQueueFamilyIndex(), 0);
636 		Unique<VkFence>										fenceA						(createFence(vkA, deviceA));
637 		Unique<VkFence>										fenceB						(createFence(vkB, *deviceB));
638 		const Unique<VkCommandPool>							cmdPoolA					(createCommandPool(vkA, deviceA, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, universalQueueFamilyIndex));
639 		const Unique<VkCommandPool>							cmdPoolB					(createCommandPool(vkB, *deviceB, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, universalQueueFamilyIndex));
640 		std::vector<SharedPtr<Move<VkCommandBuffer> > >		ptrCmdBuffersA;
641 		SharedPtr<Move<VkCommandBuffer> >					ptrCmdBufferB;
642 		std::vector<VkCommandBuffer>						cmdBuffersA;
643 		VkCommandBuffer										cmdBufferB;
644 		std::vector<Move<VkSemaphore> >						semaphoresA;
645 		std::vector<Move<VkSemaphore> >						semaphoresB;
646 		std::vector<VkSemaphore>							semaphoreHandlesA;
647 		std::vector<VkSemaphore>							semaphoreHandlesB;
648 		std::vector<deUint64>								timelineValuesA;
649 		std::vector<deUint64>								timelineValuesB;
650 		std::vector<QueueSubmitOrderSharedIteration>		iterations(12);
651 		std::vector<VkPipelineStageFlags2KHR>				stageBits;
652 
653 		// These guards will wait for the device to be idle before tearing down the resources above.
654 		const DeviceWaitIdleGuard							idleGuardA					(vkA, deviceA);
655 		const DeviceWaitIdleGuard							idleGuardB					(vkB, *deviceB);
656 
657 		// Create a dozen of set of write/read operations.
658 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
659 		{
660 			QueueSubmitOrderSharedIteration&	iter				= iterations[iterIdx];
661 			deUint32							memoryTypeIndex;
662 			NativeHandle						nativeMemoryHandle;
663 
664 			if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
665 			{
666 				const VkExtent3D				extent =
667 				{
668 					(deUint32)m_resourceDesc.size.x(),
669 					de::max(1u, (deUint32)m_resourceDesc.size.y()),
670 					de::max(1u, (deUint32)m_resourceDesc.size.z())
671 				};
672 				const VkImageSubresourceRange	subresourceRange =
673 				{
674 					m_resourceDesc.imageAspect,
675 					0u,
676 					1u,
677 					0u,
678 					1u
679 				};
680 				const VkImageSubresourceLayers	subresourceLayers =
681 				{
682 					m_resourceDesc.imageAspect,
683 					0u,
684 					0u,
685 					1u
686 				};
687 
688 				const vk::VkImageTiling					tiling			= VK_IMAGE_TILING_OPTIMAL;
689 				Move<VkImage>							image			= createImage(vkA, deviceA, extent, universalQueueFamilyIndex, tiling);
690 				const vk::VkMemoryRequirements			requirements	= getMemoryRequirements(vkA, deviceA, *image);
691 														memoryTypeIndex = chooseMemoryType(requirements.memoryTypeBits);
692 				vk::Move<vk::VkDeviceMemory>			memory			= allocateExportableMemory(vkA, deviceA, requirements.size, memoryTypeIndex, m_memoryHandleType, *image);
693 
694 				VK_CHECK(vkA.bindImageMemory(deviceA, *image, *memory, 0u));
695 
696 				MovePtr<Allocation> allocation(new SimpleAllocation(vkA, deviceA, memory.disown()));
697 				iter.resourceA = makeSharedPtr(new Resource(image, allocation, extent, m_resourceDesc.imageType, m_resourceDesc.imageFormat, subresourceRange, subresourceLayers, tiling));
698 			}
699 			else
700 			{
701 				const VkDeviceSize						offset			= 0u;
702 				const VkDeviceSize						size			= static_cast<VkDeviceSize>(m_resourceDesc.size.x());
703 				Move<VkBuffer>							buffer			= createBuffer(vkA, deviceA, size, universalQueueFamilyIndex);
704 				const vk::VkMemoryRequirements			requirements	= getMemoryRequirements(vkA, deviceA, *buffer);
705 														memoryTypeIndex	= chooseMemoryType(requirements.memoryTypeBits);
706 				vk::Move<vk::VkDeviceMemory>			memory			= allocateExportableMemory(vkA, deviceA, requirements.size, memoryTypeIndex, m_memoryHandleType, *buffer);
707 
708 				VK_CHECK(vkA.bindBufferMemory(deviceA, *buffer, *memory, 0u));
709 
710 				MovePtr<Allocation> allocation(new SimpleAllocation(vkA, deviceA, memory.disown()));
711 				iter.resourceA = makeSharedPtr(new Resource(m_resourceDesc.type, buffer, allocation, offset, size));
712 			}
713 
714 			getMemoryNative(vkA, deviceA, iter.resourceA->getMemory(), m_memoryHandleType, nativeMemoryHandle);
715 			iter.resourceB	= makeSharedPtr(importResource(vkB, *deviceB,
716 														   m_resourceDesc,
717 														   universalQueueFamilyIndex,
718 														   *m_readOpSupport,
719 														   *m_writeOpSupport,
720 														   nativeMemoryHandle,
721 														   m_memoryHandleType,
722 														   memoryTypeIndex));
723 
724 			iter.writeOp = makeSharedPtr(m_writeOpSupport->build(*operationContextA,
725 																 *iter.resourceA));
726 			iter.readOp = makeSharedPtr(m_readOpSupport->build(*operationContextB,
727 															   *iter.resourceB));
728 		}
729 
730 		// Record each write operation into its own command buffer.
731 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
732 		{
733 			QueueSubmitOrderSharedIteration&	iter		= iterations[iterIdx];
734 			const Resource&						resource	= *iter.resourceA;
735 			const SyncInfo						writeSync	= iter.writeOp->getOutSyncInfo();
736 			const SyncInfo						readSync	= iter.readOp->getInSyncInfo();
737 
738 			ptrCmdBuffersA.push_back(makeVkSharedPtr(makeCommandBuffer(vkA, deviceA, *cmdPoolA)));
739 
740 			cmdBuffersA.push_back(**(ptrCmdBuffersA.back()));
741 
742 			beginCommandBuffer(vkA, cmdBuffersA.back());
743 
744 			iter.writeOp->recordCommands(cmdBuffersA.back());
745 
746 			{
747 				SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vkA, isTimelineSemaphore);
748 
749 				if (resource.getType() == RESOURCE_TYPE_IMAGE)
750 				{
751 					DE_ASSERT(writeSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
752 					DE_ASSERT(readSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
753 
754 					const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
755 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
756 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
757 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
758 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
759 						writeSync.imageLayout,								// VkImageLayout					oldLayout
760 						readSync.imageLayout,								// VkImageLayout					newLayout
761 						resource.getImage().handle,							// VkImage							image
762 						resource.getImage().subresourceRange				// VkImageSubresourceRange			subresourceRange
763 					);
764 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
765 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
766 				}
767 				else
768 				{
769 					const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
770 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
771 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
772 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
773 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
774 						resource.getBuffer().handle,						// VkBuffer							buffer
775 						0,													// VkDeviceSize						offset
776 						VK_WHOLE_SIZE										// VkDeviceSize						size
777 					);
778 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
779 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
780 				}
781 
782 				stageBits.push_back(writeSync.stageMask);
783 			}
784 
785 			endCommandBuffer(vkA, cmdBuffersA.back());
786 
787 			addSemaphore(vkA, deviceA, semaphoresA, semaphoreHandlesA, timelineValuesA, iterIdx == (iterations.size() - 1), 2u);
788 		}
789 
790 		DE_ASSERT(stageBits.size() == iterations.size());
791 		DE_ASSERT(semaphoreHandlesA.size() == iterations.size());
792 
793 		// Record all read operations into a single command buffer and record the union of their stage masks.
794 		VkPipelineStageFlags2KHR readStages = 0;
795 		ptrCmdBufferB = makeVkSharedPtr(makeCommandBuffer(vkB, *deviceB, *cmdPoolB));
796 		cmdBufferB = **(ptrCmdBufferB);
797 		beginCommandBuffer(vkB, cmdBufferB);
798 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
799 		{
800 			QueueSubmitOrderSharedIteration& iter = iterations[iterIdx];
801 			readStages |= iter.readOp->getInSyncInfo().stageMask;
802 			iter.readOp->recordCommands(cmdBufferB);
803 		}
804 		endCommandBuffer(vkB, cmdBufferB);
805 
806 		// Export the last semaphore for use on deviceB and create another semaphore to signal on deviceB.
807 		{
808 			VkSemaphore		lastSemaphoreA			= semaphoreHandlesA.back();
809 			NativeHandle	nativeSemaphoreHandle;
810 
811 			addSemaphore(vkB, *deviceB, semaphoresB, semaphoreHandlesB, timelineValuesB, true, timelineValuesA.back());
812 
813 			getSemaphoreNative(vkA, deviceA, lastSemaphoreA, m_semaphoreHandleType, nativeSemaphoreHandle);
814 			importSemaphore(vkB, *deviceB, semaphoreHandlesB.back(), m_semaphoreHandleType, nativeSemaphoreHandle, 0u);
815 
816 			addSemaphore(vkB, *deviceB, semaphoresB, semaphoreHandlesB, timelineValuesB, false, timelineValuesA.back());
817 		}
818 
819 		// Submit writes, each in its own VkSubmitInfo. With binary
820 		// semaphores, submission don't wait on anything, with
821 		// timeline semaphores, submissions wait on a host signal
822 		// operation done below.
823 		{
824 			std::vector<VkCommandBufferSubmitInfoKHR>	cmdBuffersInfo				(iterations.size(), makeCommonCommandBufferSubmitInfo(0u));
825 			std::vector<VkSemaphoreSubmitInfoKHR>		waitSemaphoreSubmitInfos	(iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 1u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR));
826 			std::vector<VkSemaphoreSubmitInfoKHR>		signalSemaphoreSubmitInfos	(iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR));
827 			SynchronizationWrapperPtr					synchronizationWrapper		= getSynchronizationWrapper(m_type, vkA, isTimelineSemaphore, static_cast<deUint32>(iterations.size()));
828 
829 			for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
830 			{
831 				waitSemaphoreSubmitInfos[iterIdx].semaphore		= semaphoreHandlesA.front();
832 				waitSemaphoreSubmitInfos[iterIdx].stageMask		= stageBits[iterIdx];
833 				signalSemaphoreSubmitInfos[iterIdx].semaphore	= semaphoreHandlesA[iterIdx];
834 				signalSemaphoreSubmitInfos[iterIdx].value		= timelineValuesA[iterIdx];
835 				cmdBuffersInfo[iterIdx].commandBuffer			= cmdBuffersA[iterIdx];
836 
837 				synchronizationWrapper->addSubmitInfo(
838 					isTimelineSemaphore,
839 					isTimelineSemaphore ? &waitSemaphoreSubmitInfos[iterIdx] : DE_NULL,
840 					1u,
841 					&cmdBuffersInfo[iterIdx],
842 					1u,
843 					&signalSemaphoreSubmitInfos[iterIdx],
844 					isTimelineSemaphore,
845 					isTimelineSemaphore
846 				);
847 			}
848 
849 			VK_CHECK(synchronizationWrapper->queueSubmit(queueA, *fenceA));
850 		}
851 
852 		// Submit reads, only waiting waiting on the last write
853 		// operations, ordering of signaling should guarantee that
854 		// when read operations kick in all writes have completed.
855 		{
856 			VkCommandBufferSubmitInfoKHR	cmdBuffersInfo				= makeCommonCommandBufferSubmitInfo(cmdBufferB);
857 			VkSemaphoreSubmitInfoKHR		waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.front(), timelineValuesA.back(), readStages);
858 			VkSemaphoreSubmitInfoKHR		signalSemaphoreSubmitInfo	= makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.back(), timelineValuesB.back(), VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
859 			SynchronizationWrapperPtr		synchronizationWrapper		= getSynchronizationWrapper(m_type, vkB, isTimelineSemaphore);
860 
861 			synchronizationWrapper->addSubmitInfo(
862 				1u,
863 				&waitSemaphoreSubmitInfo,
864 				1u,
865 				&cmdBuffersInfo,
866 				1u,
867 				&signalSemaphoreSubmitInfo,
868 				isTimelineSemaphore,
869 				isTimelineSemaphore
870 			);
871 
872 			VK_CHECK(synchronizationWrapper->queueSubmit(queueB, *fenceB));
873 
874 			if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
875 			{
876 				const VkSemaphoreWaitInfo		waitInfo	=
877 				{
878 					VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,	// VkStructureType			sType;
879 					DE_NULL,								// const void*				pNext;
880 					0u,										// VkSemaphoreWaitFlagsKHR	flags;
881 					1u,										// deUint32					semaphoreCount;
882 					&semaphoreHandlesB.back(),				// const VkSemaphore*		pSemaphores;
883 					&timelineValuesB.back(),				// const deUint64*			pValues;
884 				};
885 
886 				// Unblock the whole lot.
887 				hostSignal(vkA, deviceA, semaphoreHandlesA.front(), 2);
888 
889 				VK_CHECK(vkB.waitSemaphores(*deviceB, &waitInfo, ~0ull));
890 			}
891 			else
892 			{
893 				VK_CHECK(vkB.waitForFences(*deviceB, 1, &fenceB.get(), VK_TRUE, ~0ull));
894 			}
895 		}
896 
897 		// Verify the result of the operations.
898 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
899 		{
900 			QueueSubmitOrderSharedIteration&	iter		= iterations[iterIdx];
901 			const Data							expected	= iter.writeOp->getData();
902 			const Data							actual		= iter.readOp->getData();
903 
904 			if (isIndirectBuffer(iter.resourceA->getType()))
905 			{
906 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
907 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
908 
909 				if (actualValue < expectedValue)
910 					return tcu::TestStatus::fail("Counter value is smaller than expected");
911 			}
912 			else
913 			{
914 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
915 					return tcu::TestStatus::fail("Memory contents don't match");
916 			}
917 		}
918 
919 		return tcu::TestStatus::pass("Success");
920 	}
921 
922 private:
addSemaphore(const DeviceInterface & vk,VkDevice device,std::vector<Move<VkSemaphore>> & semaphores,std::vector<VkSemaphore> & semaphoreHandles,std::vector<deUint64> & timelineValues,bool exportable,deUint64 firstTimelineValue)923 	void addSemaphore (const DeviceInterface&			vk,
924 					   VkDevice							device,
925 					   std::vector<Move<VkSemaphore> >&	semaphores,
926 					   std::vector<VkSemaphore>&		semaphoreHandles,
927 					   std::vector<deUint64>&			timelineValues,
928 					   bool								exportable,
929 					   deUint64							firstTimelineValue)
930 	{
931 		Move<VkSemaphore>	semaphore;
932 
933 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
934 		{
935 			// Only allocate a single exportable semaphore.
936 			if (semaphores.empty())
937 			{
938 				semaphores.push_back(createExportableSemaphoreType(vk, device, m_semaphoreType, m_semaphoreHandleType));
939 			}
940 		}
941 		else
942 		{
943 			if (exportable)
944 				semaphores.push_back(createExportableSemaphoreType(vk, device, m_semaphoreType, m_semaphoreHandleType));
945 			else
946 				semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
947 		}
948 
949 		semaphoreHandles.push_back(*semaphores.back());
950 		timelineValues.push_back((timelineValues.empty() ? firstTimelineValue : timelineValues.back()) + m_rng.getInt(1, 100));
951 	}
952 
isResourceExportable()953 	bool isResourceExportable ()
954 	{
955 		const InstanceInterface&					vki				= m_context.getInstanceInterface();
956 		VkPhysicalDevice							physicalDevice	= m_context.getPhysicalDevice();
957 
958 		if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
959 		{
960 			const VkPhysicalDeviceExternalImageFormatInfo	externalInfo		=
961 			{
962 				VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO,
963 				DE_NULL,
964 				m_memoryHandleType
965 			};
966 			const VkPhysicalDeviceImageFormatInfo2			imageFormatInfo		=
967 			{
968 				VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
969 				&externalInfo,
970 				m_resourceDesc.imageFormat,
971 				m_resourceDesc.imageType,
972 				VK_IMAGE_TILING_OPTIMAL,
973 				m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
974 				0u
975 			};
976 			VkExternalImageFormatProperties					externalProperties	=
977 			{
978 				VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES,
979 				DE_NULL,
980 				{ 0u, 0u, 0u }
981 			};
982 			VkImageFormatProperties2						formatProperties	=
983 			{
984 				VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2,
985 				&externalProperties,
986 				{
987 					{ 0u, 0u, 0u },
988 					0u,
989 					0u,
990 					0u,
991 					0u,
992 				}
993 			};
994 
995 			{
996 				const VkResult res = vki.getPhysicalDeviceImageFormatProperties2(physicalDevice, &imageFormatInfo, &formatProperties);
997 
998 				if (res == VK_ERROR_FORMAT_NOT_SUPPORTED)
999 					return false;
1000 
1001 				VK_CHECK(res); // Check other errors
1002 			}
1003 
1004 			if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0)
1005 				return false;
1006 
1007 			if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
1008 				return false;
1009 
1010 			return true;
1011 		}
1012 		else
1013 		{
1014 			const VkPhysicalDeviceExternalBufferInfo	info	=
1015 			{
1016 				VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO,
1017 				DE_NULL,
1018 
1019 				0u,
1020 				m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
1021 				m_memoryHandleType
1022 			};
1023 			VkExternalBufferProperties					properties			=
1024 			{
1025 				VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES,
1026 				DE_NULL,
1027 				{ 0u, 0u, 0u}
1028 			};
1029 			vki.getPhysicalDeviceExternalBufferProperties(physicalDevice, &info, &properties);
1030 
1031 			if ((properties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0
1032 				|| (properties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
1033 				return false;
1034 
1035 			return true;
1036 		}
1037 	}
1038 
1039 	SynchronizationType							m_type;
1040 	SharedPtr<OperationSupport>					m_writeOpSupport;
1041 	SharedPtr<OperationSupport>					m_readOpSupport;
1042 	const ResourceDescription&					m_resourceDesc;
1043 	VkExternalMemoryHandleTypeFlagBits			m_memoryHandleType;
1044 	VkSemaphoreType								m_semaphoreType;
1045 	VkExternalSemaphoreHandleTypeFlagBits		m_semaphoreHandleType;
1046 	PipelineCacheData&							m_pipelineCacheData;
1047 	de::Random									m_rng;
1048 };
1049 
1050 class QueueSubmitSignalOrderSharedTestCase : public TestCase
1051 {
1052 public:
QueueSubmitSignalOrderSharedTestCase(tcu::TestContext & testCtx,SynchronizationType type,const std::string & name,OperationName writeOp,OperationName readOp,const ResourceDescription & resourceDesc,VkExternalMemoryHandleTypeFlagBits memoryHandleType,VkSemaphoreType semaphoreType,VkExternalSemaphoreHandleTypeFlagBits semaphoreHandleType,PipelineCacheData & pipelineCacheData)1053 	QueueSubmitSignalOrderSharedTestCase (tcu::TestContext&						testCtx,
1054 										  SynchronizationType					type,
1055 										  const std::string&					name,
1056 										  OperationName							writeOp,
1057 										  OperationName							readOp,
1058 										  const ResourceDescription&			resourceDesc,
1059 										  VkExternalMemoryHandleTypeFlagBits	memoryHandleType,
1060 										  VkSemaphoreType						semaphoreType,
1061 										  VkExternalSemaphoreHandleTypeFlagBits	semaphoreHandleType,
1062 										  PipelineCacheData&					pipelineCacheData)
1063 		: TestCase				(testCtx, name.c_str())
1064 		, m_type				(type)
1065 		, m_writeOpSupport		(makeOperationSupport(writeOp, resourceDesc).release())
1066 		, m_readOpSupport		(makeOperationSupport(readOp, resourceDesc).release())
1067 		, m_resourceDesc		(resourceDesc)
1068 		, m_memoryHandleType	(memoryHandleType)
1069 		, m_semaphoreType		(semaphoreType)
1070 		, m_semaphoreHandleType	(semaphoreHandleType)
1071 		, m_pipelineCacheData	(pipelineCacheData)
1072 	{
1073 	}
1074 
checkSupport(Context & context) const1075 	virtual void checkSupport(Context& context) const
1076 	{
1077 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1078 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
1079 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1080 
1081 		if ((m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT ||
1082 			 m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) &&
1083 			 !context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_fd"))
1084 			TCU_THROW(NotSupportedError, "VK_KHR_external_semaphore_fd not supported");
1085 
1086 		if ((m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT ||
1087 			 m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT) &&
1088 			!context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_win32"))
1089 			TCU_THROW(NotSupportedError, "VK_KHR_external_semaphore_win32 not supported");
1090 
1091 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
1092 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
1093 	}
1094 
createInstance(Context & context) const1095 	TestInstance* createInstance (Context& context) const
1096 	{
1097 		return new QueueSubmitSignalOrderSharedTestInstance(context,
1098 															m_type,
1099 															m_writeOpSupport,
1100 															m_readOpSupport,
1101 															m_resourceDesc,
1102 															m_memoryHandleType,
1103 															m_semaphoreType,
1104 															m_semaphoreHandleType,
1105 															m_pipelineCacheData);
1106 	}
1107 
initPrograms(SourceCollections & programCollection) const1108 	void initPrograms (SourceCollections& programCollection) const
1109 	{
1110 		m_writeOpSupport->initPrograms(programCollection);
1111 		m_readOpSupport->initPrograms(programCollection);
1112 	}
1113 
1114 private:
1115 	SynchronizationType						m_type;
1116 	SharedPtr<OperationSupport>				m_writeOpSupport;
1117 	SharedPtr<OperationSupport>				m_readOpSupport;
1118 	const ResourceDescription&				m_resourceDesc;
1119 	VkExternalMemoryHandleTypeFlagBits		m_memoryHandleType;
1120 	VkSemaphoreType							m_semaphoreType;
1121 	VkExternalSemaphoreHandleTypeFlagBits	m_semaphoreHandleType;
1122 	PipelineCacheData&						m_pipelineCacheData;
1123 };
1124 
1125 class QueueSubmitSignalOrderSharedTests : public tcu::TestCaseGroup
1126 {
1127 public:
QueueSubmitSignalOrderSharedTests(tcu::TestContext & testCtx,SynchronizationType type,VkSemaphoreType semaphoreType,const char * name)1128 	QueueSubmitSignalOrderSharedTests (tcu::TestContext& testCtx, SynchronizationType type, VkSemaphoreType semaphoreType, const char *name)
1129 		: tcu::TestCaseGroup	(testCtx, name, "Signal ordering of semaphores")
1130 		, m_type				(type)
1131 		, m_semaphoreType		(semaphoreType)
1132 	{
1133 	}
1134 
init(void)1135 	void init (void)
1136 	{
1137 		static const OperationName	writeOps[]	=
1138 		{
1139 			OPERATION_NAME_WRITE_COPY_BUFFER,
1140 			OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE,
1141 			OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER,
1142 			OPERATION_NAME_WRITE_COPY_IMAGE,
1143 			OPERATION_NAME_WRITE_BLIT_IMAGE,
1144 			OPERATION_NAME_WRITE_SSBO_VERTEX,
1145 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL,
1146 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION,
1147 			OPERATION_NAME_WRITE_SSBO_GEOMETRY,
1148 			OPERATION_NAME_WRITE_SSBO_FRAGMENT,
1149 			OPERATION_NAME_WRITE_SSBO_COMPUTE,
1150 			OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT,
1151 			OPERATION_NAME_WRITE_IMAGE_VERTEX,
1152 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL,
1153 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION,
1154 			OPERATION_NAME_WRITE_IMAGE_GEOMETRY,
1155 			OPERATION_NAME_WRITE_IMAGE_FRAGMENT,
1156 			OPERATION_NAME_WRITE_IMAGE_COMPUTE,
1157 			OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT,
1158 		};
1159 		static const OperationName	readOps[]	=
1160 		{
1161 			OPERATION_NAME_READ_COPY_BUFFER,
1162 			OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE,
1163 			OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER,
1164 			OPERATION_NAME_READ_COPY_IMAGE,
1165 			OPERATION_NAME_READ_BLIT_IMAGE,
1166 			OPERATION_NAME_READ_UBO_VERTEX,
1167 			OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL,
1168 			OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION,
1169 			OPERATION_NAME_READ_UBO_GEOMETRY,
1170 			OPERATION_NAME_READ_UBO_FRAGMENT,
1171 			OPERATION_NAME_READ_UBO_COMPUTE,
1172 			OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT,
1173 			OPERATION_NAME_READ_SSBO_VERTEX,
1174 			OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL,
1175 			OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION,
1176 			OPERATION_NAME_READ_SSBO_GEOMETRY,
1177 			OPERATION_NAME_READ_SSBO_FRAGMENT,
1178 			OPERATION_NAME_READ_SSBO_COMPUTE,
1179 			OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT,
1180 			OPERATION_NAME_READ_IMAGE_VERTEX,
1181 			OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL,
1182 			OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION,
1183 			OPERATION_NAME_READ_IMAGE_GEOMETRY,
1184 			OPERATION_NAME_READ_IMAGE_FRAGMENT,
1185 			OPERATION_NAME_READ_IMAGE_COMPUTE,
1186 			OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT,
1187 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW,
1188 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED,
1189 			OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH,
1190 			OPERATION_NAME_READ_VERTEX_INPUT,
1191 		};
1192 		static const struct
1193 		{
1194 			VkExternalMemoryHandleTypeFlagBits		memoryType;
1195 			VkExternalSemaphoreHandleTypeFlagBits	semaphoreType;
1196 		}	exportCases[] =
1197 		{
1198 			// Only semaphore handle types having reference semantic
1199 			// are valid for this test.
1200 			{
1201 				VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
1202 				VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
1203 			},
1204 			{
1205 				VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
1206 				VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
1207 			},
1208 			{
1209 				VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT,
1210 				VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
1211 			},
1212 		};
1213 
1214 		for (deUint32 writeOpIdx = 0; writeOpIdx < DE_LENGTH_OF_ARRAY(writeOps); writeOpIdx++)
1215 		for (deUint32 readOpIdx = 0; readOpIdx < DE_LENGTH_OF_ARRAY(readOps); readOpIdx++)
1216 		{
1217 			const OperationName	writeOp		= writeOps[writeOpIdx];
1218 			const OperationName	readOp		= readOps[readOpIdx];
1219 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
1220 			bool				empty		= true;
1221 
1222 			de::MovePtr<tcu::TestCaseGroup> opGroup	(new tcu::TestCaseGroup(m_testCtx, opGroupName.c_str()));
1223 
1224 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1225 			{
1226 				const ResourceDescription&	resource	= s_resources[resourceNdx];
1227 
1228 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1229 				{
1230 					for (deUint32 exportIdx = 0; exportIdx < DE_LENGTH_OF_ARRAY(exportCases); exportIdx++)
1231 					{
1232 						std::string					caseName	= getResourceName(resource) + "_" +
1233 							externalSemaphoreTypeToName(exportCases[exportIdx].semaphoreType);
1234 
1235 						opGroup->addChild(new QueueSubmitSignalOrderSharedTestCase(m_testCtx,
1236 																				   m_type,
1237 																				   caseName,
1238 																				   writeOp,
1239 																				   readOp,
1240 																				   resource,
1241 																				   exportCases[exportIdx].memoryType,
1242 																				   m_semaphoreType,
1243 																				   exportCases[exportIdx].semaphoreType,
1244 																				   m_pipelineCacheData));
1245 						empty = false;
1246 					}
1247 				}
1248 			}
1249 			if (!empty)
1250 				addChild(opGroup.release());
1251 		}
1252 	}
1253 
deinit(void)1254 	void deinit (void)
1255 	{
1256 		cleanupGroup();
1257 	}
1258 
1259 private:
1260 	SynchronizationType	m_type;
1261 	VkSemaphoreType		m_semaphoreType;
1262 	// synchronization.op tests share pipeline cache data to speed up test
1263 	// execution.
1264 	PipelineCacheData	m_pipelineCacheData;
1265 };
1266 
1267 struct QueueSubmitOrderIteration
1268 {
QueueSubmitOrderIterationvkt::synchronization::__anon4b71b9f50111::QueueSubmitOrderIteration1269 	QueueSubmitOrderIteration() {}
~QueueSubmitOrderIterationvkt::synchronization::__anon4b71b9f50111::QueueSubmitOrderIteration1270 	~QueueSubmitOrderIteration() {}
1271 
1272 	SharedPtr<Resource>			resource;
1273 
1274 	SharedPtr<Operation>		writeOp;
1275 	SharedPtr<Operation>		readOp;
1276 };
1277 
1278 // Verifies the signaling order of the semaphores in multiple
1279 // VkSubmitInfo given to vkQueueSubmit() with queueA & queueB from the
1280 // same VkDevice.
1281 //
1282 // vkQueueSubmit(queueA, [write0, write1, write2, ..., write6])
1283 // vkQueueSubmit(queueB, [read0-6])
1284 //
1285 // With read0-6 waiting on write6, all the data should be available
1286 // for reading given that signal operations are supposed to happen in
1287 // order.
1288 class QueueSubmitSignalOrderTestInstance : public TestInstance
1289 {
1290 public:
QueueSubmitSignalOrderTestInstance(Context & context,SynchronizationType type,const SharedPtr<OperationSupport> writeOpSupport,const SharedPtr<OperationSupport> readOpSupport,const ResourceDescription & resourceDesc,VkSemaphoreType semaphoreType,PipelineCacheData & pipelineCacheData)1291 	QueueSubmitSignalOrderTestInstance (Context&									context,
1292 										SynchronizationType							type,
1293 										const SharedPtr<OperationSupport>			writeOpSupport,
1294 										const SharedPtr<OperationSupport>			readOpSupport,
1295 										const ResourceDescription&					resourceDesc,
1296 										VkSemaphoreType								semaphoreType,
1297 										PipelineCacheData&							pipelineCacheData)
1298 		: TestInstance			(context)
1299 		, m_type				(type)
1300 		, m_writeOpSupport		(writeOpSupport)
1301 		, m_readOpSupport		(readOpSupport)
1302 		, m_resourceDesc		(resourceDesc)
1303 		, m_semaphoreType		(semaphoreType)
1304 		, m_device				(SingletonDevice::getDevice(context))
1305 		, m_deviceInterface		(context.getPlatformInterface(), context.getInstance(), *m_device, context.getUsedApiVersion())
1306 		, m_allocator			(new SimpleAllocator(m_deviceInterface,
1307 													 *m_device,
1308 													 getPhysicalDeviceMemoryProperties(context.getInstanceInterface(),
1309 																					   context.getPhysicalDevice())))
1310 		, m_operationContext	(new OperationContext(context, type, m_deviceInterface, *m_device, *m_allocator, pipelineCacheData))
1311 		, m_queueA				(DE_NULL)
1312 		, m_queueB				(DE_NULL)
1313 		, m_rng					(1234)
1314 
1315 	{
1316 		const std::vector<VkQueueFamilyProperties> queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(context.getInstanceInterface(),
1317 																													 context.getPhysicalDevice());
1318 
1319 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1320 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
1321 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1322 
1323 		VkQueueFlags writeOpQueueFlags = m_writeOpSupport->getQueueFlags(*m_operationContext);
1324 		for (deUint32 familyIdx = 0; familyIdx < queueFamilyProperties.size(); familyIdx++) {
1325 			if (((queueFamilyProperties[familyIdx].queueFlags & writeOpQueueFlags) == writeOpQueueFlags) ||
1326 			((writeOpQueueFlags == VK_QUEUE_TRANSFER_BIT) &&
1327 			(((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_GRAPHICS_BIT) == VK_QUEUE_GRAPHICS_BIT) ||
1328 			((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_COMPUTE_BIT) == VK_QUEUE_COMPUTE_BIT)))) {
1329 				m_queueA = getDeviceQueue(m_deviceInterface, *m_device, familyIdx, 0);
1330 				m_queueFamilyIndexA = familyIdx;
1331 				break;
1332 			}
1333 		}
1334 		if (m_queueA == DE_NULL)
1335 			TCU_THROW(NotSupportedError, "No queue supporting write operation");
1336 
1337 		VkQueueFlags readOpQueueFlags = m_readOpSupport->getQueueFlags(*m_operationContext);
1338 		for (deUint32 familyIdx = 0; familyIdx < queueFamilyProperties.size(); familyIdx++) {
1339 			if (((queueFamilyProperties[familyIdx].queueFlags & readOpQueueFlags) == readOpQueueFlags) ||
1340 			((readOpQueueFlags == VK_QUEUE_TRANSFER_BIT) &&
1341 			(((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_GRAPHICS_BIT) == VK_QUEUE_GRAPHICS_BIT) ||
1342 			((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_COMPUTE_BIT) == VK_QUEUE_COMPUTE_BIT)))) {
1343 				for (deUint32 queueIdx = 0; queueIdx < queueFamilyProperties[familyIdx].queueCount; queueIdx++) {
1344 					VkQueue queue = getDeviceQueue(m_deviceInterface, *m_device, familyIdx, queueIdx);
1345 
1346 					if (queue == m_queueA)
1347 						continue;
1348 
1349 					m_queueB = queue;
1350 					m_queueFamilyIndexB = familyIdx;
1351 					break;
1352 				}
1353 
1354 				if (m_queueB != DE_NULL)
1355 					break;
1356 			}
1357 		}
1358 		if (m_queueB == DE_NULL)
1359 			TCU_THROW(NotSupportedError, "No queue supporting read operation");
1360 	}
1361 
iterate(void)1362 	tcu::TestStatus iterate (void)
1363 	{
1364 		const bool											isTimelineSemaphore			= (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR);
1365 		const VkDevice&										device						= *m_device;
1366 		const DeviceInterface&								vk							= m_deviceInterface;
1367 		Unique<VkFence>										fence						(createFence(vk, device));
1368 		const Unique<VkCommandPool>							cmdPoolA					(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, m_queueFamilyIndexA));
1369 		const Unique<VkCommandPool>							cmdPoolB					(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, m_queueFamilyIndexB));
1370 		std::vector<SharedPtr<Move<VkCommandBuffer> > >		ptrCmdBuffersA;
1371 		SharedPtr<Move<VkCommandBuffer> >					ptrCmdBufferB;
1372 		std::vector<VkCommandBuffer>						cmdBuffersA;
1373 		VkCommandBuffer										cmdBufferB;
1374 		std::vector<Move<VkSemaphore> >						semaphoresA;
1375 		std::vector<Move<VkSemaphore> >						semaphoresB;
1376 		std::vector<VkSemaphore>							semaphoreHandlesA;
1377 		std::vector<VkSemaphore>							semaphoreHandlesB;
1378 		std::vector<deUint64>								timelineValuesA;
1379 		std::vector<deUint64>								timelineValuesB;
1380 		std::vector<QueueSubmitOrderIteration>				iterations;
1381 		std::vector<VkPipelineStageFlags2KHR>				stageBits;
1382 		std::vector<deUint32>								queueFamilies;
1383 		SynchronizationWrapperPtr							syncWrapper					= getSynchronizationWrapper(m_type, vk, isTimelineSemaphore);
1384 
1385 		// This guard will wait for the device to be idle before tearing down the resources above.
1386 		const DeviceWaitIdleGuard							idleGuard					(vk, device);
1387 
1388 		queueFamilies.push_back(m_queueFamilyIndexA);
1389 		queueFamilies.push_back(m_queueFamilyIndexB);
1390 
1391 		// Create a dozen of set of write/read operations.
1392 		iterations.resize(12);
1393 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1394 		{
1395 			QueueSubmitOrderIteration&		iter				= iterations[iterIdx];
1396 
1397 			iter.resource	= makeSharedPtr(new Resource(*m_operationContext,
1398 														 m_resourceDesc,
1399 														 m_writeOpSupport->getOutResourceUsageFlags() |
1400 														 m_readOpSupport->getInResourceUsageFlags(),
1401 														 VK_SHARING_MODE_EXCLUSIVE,
1402 														 queueFamilies));
1403 
1404 			iter.writeOp = makeSharedPtr(m_writeOpSupport->build(*m_operationContext,
1405 																 *iter.resource));
1406 			iter.readOp = makeSharedPtr(m_readOpSupport->build(*m_operationContext,
1407 															   *iter.resource));
1408 		}
1409 
1410 		// Record each write operation into its own command buffer.
1411 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1412 		{
1413 			QueueSubmitOrderIteration&	iter	= iterations[iterIdx];
1414 
1415 			ptrCmdBuffersA.push_back(makeVkSharedPtr(makeCommandBuffer(vk, device, *cmdPoolA)));
1416 			cmdBuffersA.push_back(**(ptrCmdBuffersA.back()));
1417 
1418 			beginCommandBuffer(vk, cmdBuffersA.back());
1419 			iter.writeOp->recordCommands(cmdBuffersA.back());
1420 
1421 			{
1422 				SynchronizationWrapperPtr	synchronizationWrapper	= getSynchronizationWrapper(m_type, vk, DE_FALSE);
1423 				const SyncInfo				writeSync				= iter.writeOp->getOutSyncInfo();
1424 				const SyncInfo				readSync				= iter.readOp->getInSyncInfo();
1425 				const Resource&				resource				= *iter.resource;
1426 
1427 				if (resource.getType() == RESOURCE_TYPE_IMAGE)
1428 				{
1429 					DE_ASSERT(writeSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
1430 					DE_ASSERT(readSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
1431 
1432 					const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
1433 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
1434 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
1435 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
1436 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
1437 						writeSync.imageLayout,								// VkImageLayout					oldLayout
1438 						readSync.imageLayout,								// VkImageLayout					newLayout
1439 						resource.getImage().handle,							// VkImage							image
1440 						resource.getImage().subresourceRange				// VkImageSubresourceRange			subresourceRange
1441 					);
1442 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
1443 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
1444 				}
1445 				else
1446 				{
1447 					const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
1448 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
1449 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
1450 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
1451 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
1452 						resource.getBuffer().handle,						// VkBuffer							buffer
1453 						0,													// VkDeviceSize						offset
1454 						VK_WHOLE_SIZE										// VkDeviceSize						size
1455 					);
1456 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
1457 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
1458 				}
1459 
1460 				stageBits.push_back(writeSync.stageMask);
1461 			}
1462 
1463 			endCommandBuffer(vk, cmdBuffersA.back());
1464 
1465 			addSemaphore(vk, device, semaphoresA, semaphoreHandlesA, timelineValuesA, 2u);
1466 		}
1467 
1468 		DE_ASSERT(stageBits.size() == iterations.size());
1469 		DE_ASSERT(semaphoreHandlesA.size() == iterations.size());
1470 
1471 		// Record all read operations into a single command buffer and track the union of their execution stages.
1472 		ptrCmdBufferB = makeVkSharedPtr(makeCommandBuffer(vk, device, *cmdPoolB));
1473 		cmdBufferB = **(ptrCmdBufferB);
1474 		beginCommandBuffer(vk, cmdBufferB);
1475 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1476 		{
1477 			QueueSubmitOrderIteration& iter = iterations[iterIdx];
1478 			iter.readOp->recordCommands(cmdBufferB);
1479 		}
1480 		endCommandBuffer(vk, cmdBufferB);
1481 
1482 		addSemaphore(vk, device, semaphoresB, semaphoreHandlesB, timelineValuesB, timelineValuesA.back());
1483 
1484 		// Submit writes, each in its own VkSubmitInfo. With binary
1485 		// semaphores, submission don't wait on anything, with
1486 		// timeline semaphores, submissions wait on a host signal
1487 		// operation done below.
1488 		{
1489 			VkSemaphoreSubmitInfoKHR					waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(semaphoreHandlesA.front(), 1u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
1490 			std::vector<VkSemaphoreSubmitInfoKHR>		signalSemaphoreSubmitInfo	(iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR));
1491 			std::vector<VkCommandBufferSubmitInfoKHR>	commandBufferSubmitInfos	(iterations.size(), makeCommonCommandBufferSubmitInfo(0));
1492 			SynchronizationWrapperPtr					synchronizationWrapper		= getSynchronizationWrapper(m_type, vk, isTimelineSemaphore, (deUint32)iterations.size());
1493 
1494 			for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1495 			{
1496 				commandBufferSubmitInfos[iterIdx].commandBuffer		= cmdBuffersA[iterIdx];
1497 				signalSemaphoreSubmitInfo[iterIdx].semaphore		= semaphoreHandlesA[iterIdx];
1498 				signalSemaphoreSubmitInfo[iterIdx].value			= timelineValuesA[iterIdx];
1499 
1500 				synchronizationWrapper->addSubmitInfo(
1501 					isTimelineSemaphore,
1502 					isTimelineSemaphore ? &waitSemaphoreSubmitInfo : DE_NULL,
1503 					1u,
1504 					&commandBufferSubmitInfos[iterIdx],
1505 					1u,
1506 					&signalSemaphoreSubmitInfo[iterIdx],
1507 					isTimelineSemaphore,
1508 					isTimelineSemaphore
1509 				);
1510 			}
1511 
1512 			VK_CHECK(synchronizationWrapper->queueSubmit(m_queueA, DE_NULL));
1513 		}
1514 
1515 		// Submit reads, only waiting waiting on the last write
1516 		// operations, ordering of signaling should guarantee that
1517 		// when read operations kick in all writes have completed.
1518 		{
1519 			VkCommandBufferSubmitInfoKHR	commandBufferSubmitInfos	= makeCommonCommandBufferSubmitInfo(cmdBufferB);
1520 			VkSemaphoreSubmitInfoKHR		waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(semaphoreHandlesA.back(), timelineValuesA.back(), VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
1521 			VkSemaphoreSubmitInfoKHR		signalSemaphoreSubmitInfo	= makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.back(), timelineValuesB.back(), VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
1522 			SynchronizationWrapperPtr		synchronizationWrapper		= getSynchronizationWrapper(m_type, vk, isTimelineSemaphore);
1523 
1524 			synchronizationWrapper->addSubmitInfo(
1525 				1u,										// deUint32								waitSemaphoreInfoCount
1526 				&waitSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
1527 				1u,										// deUint32								commandBufferInfoCount
1528 				&commandBufferSubmitInfos,				// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
1529 				1u,										// deUint32								signalSemaphoreInfoCount
1530 				&signalSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
1531 				isTimelineSemaphore,
1532 				isTimelineSemaphore
1533 			);
1534 
1535 			VK_CHECK(synchronizationWrapper->queueSubmit(m_queueB, *fence));
1536 
1537 			if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1538 			{
1539 				const VkSemaphoreWaitInfo		waitInfo	=
1540 				{
1541 					VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,	// VkStructureType			sType;
1542 					DE_NULL,								// const void*				pNext;
1543 					0u,										// VkSemaphoreWaitFlagsKHR	flags;
1544 					1u,										// deUint32					semaphoreCount;
1545 					&semaphoreHandlesB.back(),				// const VkSemaphore*		pSemaphores;
1546 					&timelineValuesB.back(),				// const deUint64*			pValues;
1547 				};
1548 
1549 				// Unblock the whole lot.
1550 				hostSignal(vk, device, semaphoreHandlesA.front(), 1);
1551 
1552 				VK_CHECK(vk.waitSemaphores(device, &waitInfo, ~0ull));
1553 			}
1554 			else
1555 			{
1556 				VK_CHECK(vk.waitForFences(device, 1, &fence.get(), VK_TRUE, ~0ull));
1557 			}
1558 		}
1559 
1560 		// Verify the result of the operations.
1561 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1562 		{
1563 			QueueSubmitOrderIteration&		iter		= iterations[iterIdx];
1564 			const Data						expected	= iter.writeOp->getData();
1565 			const Data						actual		= iter.readOp->getData();
1566 
1567 			if (isIndirectBuffer(iter.resource->getType()))
1568 			{
1569 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
1570 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
1571 
1572 				if (actualValue < expectedValue)
1573 					return tcu::TestStatus::fail("Counter value is smaller than expected");
1574 			}
1575 			else
1576 			{
1577 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
1578 					return tcu::TestStatus::fail("Memory contents don't match");
1579 			}
1580 		}
1581 
1582 		return tcu::TestStatus::pass("Success");
1583 	}
1584 
1585 private:
addSemaphore(const DeviceInterface & vk,VkDevice device,std::vector<Move<VkSemaphore>> & semaphores,std::vector<VkSemaphore> & semaphoreHandles,std::vector<deUint64> & timelineValues,deUint64 firstTimelineValue)1586 	void addSemaphore (const DeviceInterface&			vk,
1587 					   VkDevice							device,
1588 					   std::vector<Move<VkSemaphore> >&	semaphores,
1589 					   std::vector<VkSemaphore>&		semaphoreHandles,
1590 					   std::vector<deUint64>&			timelineValues,
1591 					   deUint64							firstTimelineValue)
1592 	{
1593 		Move<VkSemaphore>	semaphore;
1594 
1595 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1596 		{
1597 			// Only allocate a single exportable semaphore.
1598 			if (semaphores.empty())
1599 			{
1600 				semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
1601 			}
1602 		}
1603 		else
1604 		{
1605 			semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
1606 		}
1607 
1608 		semaphoreHandles.push_back(*semaphores.back());
1609 		timelineValues.push_back((timelineValues.empty() ? firstTimelineValue : timelineValues.back()) + m_rng.getInt(1, 100));
1610 	}
1611 
1612 	SynchronizationType							m_type;
1613 	SharedPtr<OperationSupport>					m_writeOpSupport;
1614 	SharedPtr<OperationSupport>					m_readOpSupport;
1615 	const ResourceDescription&					m_resourceDesc;
1616 	VkSemaphoreType								m_semaphoreType;
1617 	const Unique<VkDevice>&						m_device;
1618 	const DeviceDriver							m_deviceInterface;
1619 	UniquePtr<SimpleAllocator>					m_allocator;
1620 	UniquePtr<OperationContext>					m_operationContext;
1621 	VkQueue										m_queueA;
1622 	VkQueue										m_queueB;
1623 	deUint32									m_queueFamilyIndexA;
1624 	deUint32									m_queueFamilyIndexB;
1625 	de::Random									m_rng;
1626 };
1627 
1628 class QueueSubmitSignalOrderTestCase : public TestCase
1629 {
1630 public:
QueueSubmitSignalOrderTestCase(tcu::TestContext & testCtx,SynchronizationType type,const std::string & name,OperationName writeOp,OperationName readOp,const ResourceDescription & resourceDesc,VkSemaphoreType semaphoreType,PipelineCacheData & pipelineCacheData)1631 	QueueSubmitSignalOrderTestCase (tcu::TestContext&			testCtx,
1632 									SynchronizationType			type,
1633 									const std::string&			name,
1634 									OperationName				writeOp,
1635 									OperationName				readOp,
1636 									const ResourceDescription&	resourceDesc,
1637 									VkSemaphoreType				semaphoreType,
1638 									PipelineCacheData&			pipelineCacheData)
1639 		: TestCase				(testCtx, name.c_str())
1640 		, m_type				(type)
1641 		, m_writeOpSupport		(makeOperationSupport(writeOp, resourceDesc).release())
1642 		, m_readOpSupport		(makeOperationSupport(readOp, resourceDesc).release())
1643 		, m_resourceDesc		(resourceDesc)
1644 		, m_semaphoreType		(semaphoreType)
1645 		, m_pipelineCacheData	(pipelineCacheData)
1646 	{
1647 	}
1648 
checkSupport(Context & context) const1649 	virtual void checkSupport(Context& context) const
1650 	{
1651 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1652 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
1653 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1654 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
1655 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
1656 	}
1657 
createInstance(Context & context) const1658 	TestInstance* createInstance (Context& context) const
1659 	{
1660 		return new QueueSubmitSignalOrderTestInstance(context,
1661 													  m_type,
1662 													  m_writeOpSupport,
1663 													  m_readOpSupport,
1664 													  m_resourceDesc,
1665 													  m_semaphoreType,
1666 													  m_pipelineCacheData);
1667 	}
1668 
initPrograms(SourceCollections & programCollection) const1669 	void initPrograms (SourceCollections& programCollection) const
1670 	{
1671 		m_writeOpSupport->initPrograms(programCollection);
1672 		m_readOpSupport->initPrograms(programCollection);
1673 	}
1674 
1675 private:
1676 	SynchronizationType						m_type;
1677 	SharedPtr<OperationSupport>				m_writeOpSupport;
1678 	SharedPtr<OperationSupport>				m_readOpSupport;
1679 	const ResourceDescription&				m_resourceDesc;
1680 	VkSemaphoreType							m_semaphoreType;
1681 	PipelineCacheData&						m_pipelineCacheData;
1682 };
1683 
1684 class QueueSubmitSignalOrderTests : public tcu::TestCaseGroup
1685 {
1686 public:
QueueSubmitSignalOrderTests(tcu::TestContext & testCtx,SynchronizationType type,VkSemaphoreType semaphoreType,const char * name)1687 	QueueSubmitSignalOrderTests (tcu::TestContext& testCtx, SynchronizationType type, VkSemaphoreType semaphoreType, const char *name)
1688 		: tcu::TestCaseGroup	(testCtx, name, "Signal ordering of semaphores")
1689 		, m_type				(type)
1690 		, m_semaphoreType		(semaphoreType)
1691 	{
1692 	}
1693 
init(void)1694 	void init (void)
1695 	{
1696 		static const OperationName	writeOps[]	=
1697 		{
1698 			OPERATION_NAME_WRITE_COPY_BUFFER,
1699 			OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE,
1700 			OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER,
1701 			OPERATION_NAME_WRITE_COPY_IMAGE,
1702 			OPERATION_NAME_WRITE_BLIT_IMAGE,
1703 			OPERATION_NAME_WRITE_SSBO_VERTEX,
1704 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL,
1705 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION,
1706 			OPERATION_NAME_WRITE_SSBO_GEOMETRY,
1707 			OPERATION_NAME_WRITE_SSBO_FRAGMENT,
1708 			OPERATION_NAME_WRITE_SSBO_COMPUTE,
1709 			OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT,
1710 			OPERATION_NAME_WRITE_IMAGE_VERTEX,
1711 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL,
1712 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION,
1713 			OPERATION_NAME_WRITE_IMAGE_GEOMETRY,
1714 			OPERATION_NAME_WRITE_IMAGE_FRAGMENT,
1715 			OPERATION_NAME_WRITE_IMAGE_COMPUTE,
1716 			OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT,
1717 		};
1718 		static const OperationName	readOps[]	=
1719 		{
1720 			OPERATION_NAME_READ_COPY_BUFFER,
1721 			OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE,
1722 			OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER,
1723 			OPERATION_NAME_READ_COPY_IMAGE,
1724 			OPERATION_NAME_READ_BLIT_IMAGE,
1725 			OPERATION_NAME_READ_UBO_VERTEX,
1726 			OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL,
1727 			OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION,
1728 			OPERATION_NAME_READ_UBO_GEOMETRY,
1729 			OPERATION_NAME_READ_UBO_FRAGMENT,
1730 			OPERATION_NAME_READ_UBO_COMPUTE,
1731 			OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT,
1732 			OPERATION_NAME_READ_SSBO_VERTEX,
1733 			OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL,
1734 			OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION,
1735 			OPERATION_NAME_READ_SSBO_GEOMETRY,
1736 			OPERATION_NAME_READ_SSBO_FRAGMENT,
1737 			OPERATION_NAME_READ_SSBO_COMPUTE,
1738 			OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT,
1739 			OPERATION_NAME_READ_IMAGE_VERTEX,
1740 			OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL,
1741 			OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION,
1742 			OPERATION_NAME_READ_IMAGE_GEOMETRY,
1743 			OPERATION_NAME_READ_IMAGE_FRAGMENT,
1744 			OPERATION_NAME_READ_IMAGE_COMPUTE,
1745 			OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT,
1746 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW,
1747 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED,
1748 			OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH,
1749 			OPERATION_NAME_READ_VERTEX_INPUT,
1750 		};
1751 
1752 		for (deUint32 writeOpIdx = 0; writeOpIdx < DE_LENGTH_OF_ARRAY(writeOps); writeOpIdx++)
1753 		for (deUint32 readOpIdx = 0; readOpIdx < DE_LENGTH_OF_ARRAY(readOps); readOpIdx++)
1754 		{
1755 			const OperationName	writeOp		= writeOps[writeOpIdx];
1756 			const OperationName	readOp		= readOps[readOpIdx];
1757 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
1758 			bool				empty		= true;
1759 
1760 			de::MovePtr<tcu::TestCaseGroup> opGroup	(new tcu::TestCaseGroup(m_testCtx, opGroupName.c_str()));
1761 
1762 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1763 			{
1764 				const ResourceDescription&	resource	= s_resources[resourceNdx];
1765 
1766 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1767 				{
1768 					opGroup->addChild(new QueueSubmitSignalOrderTestCase(m_testCtx,
1769 																		 m_type,
1770 																		 getResourceName(resource),
1771 																		 writeOp,
1772 																		 readOp,
1773 																		 resource,
1774 																		 m_semaphoreType,
1775 																		 m_pipelineCacheData));
1776 					empty = false;
1777 				}
1778 			}
1779 			if (!empty)
1780 				addChild(opGroup.release());
1781 		}
1782 	}
1783 
deinit(void)1784 	void deinit (void)
1785 	{
1786 		cleanupGroup();
1787 	}
1788 
1789 private:
1790 	SynchronizationType	m_type;
1791 	VkSemaphoreType		m_semaphoreType;
1792 	// synchronization.op tests share pipeline cache data to speed up test
1793 	// execution.
1794 	PipelineCacheData	m_pipelineCacheData;
1795 };
1796 
1797 } // anonymous
1798 
createSignalOrderTests(tcu::TestContext & testCtx,SynchronizationType type)1799 tcu::TestCaseGroup* createSignalOrderTests (tcu::TestContext& testCtx, SynchronizationType type)
1800 {
1801 	de::MovePtr<tcu::TestCaseGroup> orderingTests(new tcu::TestCaseGroup(testCtx, "signal_order"));
1802 
1803 	orderingTests->addChild(new QueueSubmitSignalOrderTests(testCtx, type, VK_SEMAPHORE_TYPE_BINARY_KHR, "binary_semaphore"));
1804 	orderingTests->addChild(new QueueSubmitSignalOrderTests(testCtx, type, VK_SEMAPHORE_TYPE_TIMELINE_KHR, "timeline_semaphore"));
1805 	orderingTests->addChild(new QueueSubmitSignalOrderSharedTests(testCtx, type, VK_SEMAPHORE_TYPE_BINARY_KHR, "shared_binary_semaphore"));
1806 	orderingTests->addChild(new QueueSubmitSignalOrderSharedTests(testCtx, type, VK_SEMAPHORE_TYPE_TIMELINE_KHR, "shared_timeline_semaphore"));
1807 
1808 	return orderingTests.release();
1809 }
1810 
1811 } // synchronization
1812 } // vkt
1813