• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*------------------------------------------------------------------------
3  * Vulkan Conformance Tests
4  * ------------------------
5  *
6  * Copyright (c) 2019 The Khronos Group Inc.
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Signal ordering tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktSynchronizationSignalOrderTests.hpp"
26 #include "vktSynchronizationOperation.hpp"
27 #include "vktSynchronizationOperationTestData.hpp"
28 #include "vktSynchronizationOperationResources.hpp"
29 #include "vktTestCaseUtil.hpp"
30 #include "vktSynchronizationUtil.hpp"
31 #include "vktExternalMemoryUtil.hpp"
32 #include "vktCustomInstancesDevices.hpp"
33 #include "vkBarrierUtil.hpp"
34 
35 #include "vkDefs.hpp"
36 #include "vkPlatform.hpp"
37 #include "vkQueryUtil.hpp"
38 #include "vkCmdUtil.hpp"
39 #include "vkImageUtil.hpp"
40 #include "vkRef.hpp"
41 #include "vkTypeUtil.hpp"
42 
43 #include "tcuTestLog.hpp"
44 #include "tcuCommandLine.hpp"
45 
46 #include "deRandom.hpp"
47 #include "deThread.hpp"
48 #include "deUniquePtr.hpp"
49 
50 #include <limits>
51 #include <set>
52 
53 namespace vkt
54 {
55 namespace synchronization
56 {
57 namespace
58 {
59 
60 using namespace vk;
61 using namespace vkt::ExternalMemoryUtil;
62 using tcu::TestLog;
63 using de::MovePtr;
64 using de::SharedPtr;
65 using de::UniquePtr;
66 
67 template<typename T>
makeVkSharedPtr(Move<T> move)68 inline SharedPtr<Move<T> > makeVkSharedPtr (Move<T> move)
69 {
70 	return SharedPtr<Move<T> >(new Move<T>(move));
71 }
72 
73 template<typename T>
makeSharedPtr(de::MovePtr<T> move)74 inline SharedPtr<T> makeSharedPtr (de::MovePtr<T> move)
75 {
76 	return SharedPtr<T>(move.release());
77 }
78 
79 template<typename T>
makeSharedPtr(T * ptr)80 inline SharedPtr<T> makeSharedPtr (T* ptr)
81 {
82 	return SharedPtr<T>(ptr);
83 }
84 
hostSignal(const DeviceInterface & vk,const VkDevice & device,VkSemaphore semaphore,const deUint64 timelineValue)85 void hostSignal (const DeviceInterface& vk, const VkDevice& device, VkSemaphore semaphore, const deUint64 timelineValue)
86 {
87 	VkSemaphoreSignalInfoKHR	ssi	=
88 	{
89 		VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,	// VkStructureType				sType;
90 		DE_NULL,									// const void*					pNext;
91 		semaphore,									// VkSemaphore					semaphore;
92 		timelineValue,								// deUint64						value;
93 	};
94 
95 	VK_CHECK(vk.signalSemaphore(device, &ssi));
96 }
97 
createTestDevice(const Context & context)98 Move<VkDevice> createTestDevice (const Context& context)
99 {
100 	const float									priority				= 0.0f;
101 	const std::vector<VkQueueFamilyProperties>	queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(context.getInstanceInterface(), context.getPhysicalDevice());
102 	std::vector<deUint32>						queueFamilyIndices		(queueFamilyProperties.size(), 0xFFFFFFFFu);
103 	std::vector<const char*>					extensions;
104 
105 	VkPhysicalDeviceFeatures2					createPhysicalFeature		{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, DE_NULL, context.getDeviceFeatures() };
106 	VkPhysicalDeviceTimelineSemaphoreFeatures	timelineSemaphoreFeatures	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, DE_NULL, DE_TRUE };
107 	VkPhysicalDeviceSynchronization2FeaturesKHR	synchronization2Features	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR, DE_NULL, DE_TRUE };
108 	void**										nextPtr						= &createPhysicalFeature.pNext;
109 
110 	if (context.isDeviceFunctionalitySupported("VK_KHR_timeline_semaphore"))
111 	{
112 		extensions.push_back("VK_KHR_timeline_semaphore");
113 		addToChainVulkanStructure(&nextPtr, timelineSemaphoreFeatures);
114 	}
115 
116 	if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_external_semaphore"))
117 		extensions.push_back("VK_KHR_external_semaphore");
118 	if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_external_memory"))
119 		extensions.push_back("VK_KHR_external_memory");
120 
121 	if (context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_fd"))
122 		extensions.push_back("VK_KHR_external_semaphore_fd");
123 
124 	if (context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_win32"))
125 		extensions.push_back("VK_KHR_external_semaphore_win32");
126 
127 	if (context.isDeviceFunctionalitySupported("VK_KHR_synchronization2"))
128 	{
129 		extensions.push_back("VK_KHR_synchronization2");
130 		addToChainVulkanStructure(&nextPtr, synchronization2Features);
131 	}
132 
133 	try
134 	{
135 		deUint32 maxQueueCount = 1;
136 		for (const VkQueueFamilyProperties& qfp : queueFamilyProperties)
137 			maxQueueCount = deMaxu32(qfp.queueCount, maxQueueCount);
138 
139 		std::vector<float>						queuePriorities(maxQueueCount, priority);
140 		std::vector<VkDeviceQueueCreateInfo>	queues;
141 
142 		for (size_t ndx = 0; ndx < queueFamilyProperties.size(); ndx++)
143 		{
144 			const VkDeviceQueueCreateInfo	createInfo	=
145 			{
146 				VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
147 				DE_NULL,
148 				0u,
149 
150 				(deUint32)ndx,
151 				queueFamilyProperties[ndx].queueCount,
152 				queuePriorities.data()
153 			};
154 
155 			queues.push_back(createInfo);
156 		}
157 
158 		const VkDeviceCreateInfo				createInfo				=
159 		{
160 			VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
161 			&createPhysicalFeature,
162 			0u,
163 
164 			(deUint32)queues.size(),
165 			&queues[0],
166 
167 			0u,
168 			DE_NULL,
169 
170 			(deUint32)extensions.size(),
171 			extensions.empty() ? DE_NULL : &extensions[0],
172 			0u
173 		};
174 
175 		const auto validation = context.getTestContext().getCommandLine().isValidationEnabled();
176 		return createCustomDevice(validation, context.getPlatformInterface(), context.getInstance(), context.getInstanceInterface(), context.getPhysicalDevice(), &createInfo);
177 	}
178 	catch (const vk::Error& error)
179 	{
180 		if (error.getError() == VK_ERROR_EXTENSION_NOT_PRESENT)
181 			TCU_THROW(NotSupportedError, "Required extensions not supported");
182 		else
183 			throw;
184 	}
185 }
186 
187 // Class to wrap a singleton instance and device
188 class SingletonDevice
189 {
SingletonDevice(const Context & context)190 	SingletonDevice	(const Context& context)
191 		: m_logicalDevice	(createTestDevice(context))
192 	{
193 	}
194 
195 public:
196 
getDevice(const Context & context)197 	static const Unique<vk::VkDevice>& getDevice(const Context& context)
198 	{
199 		if (!m_singletonDevice)
200 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
201 
202 		DE_ASSERT(m_singletonDevice);
203 		return m_singletonDevice->m_logicalDevice;
204 	}
205 
destroy()206 	static void destroy()
207 	{
208 		m_singletonDevice.clear();
209 	}
210 
211 private:
212 	const Unique<vk::VkDevice>					m_logicalDevice;
213 
214 	static SharedPtr<SingletonDevice>	m_singletonDevice;
215 };
216 SharedPtr<SingletonDevice>		SingletonDevice::m_singletonDevice;
217 
cleanupGroup()218 static void cleanupGroup ()
219 {
220 	// Destroy singleton object
221 	SingletonDevice::destroy();
222 }
223 
224 class SimpleAllocation : public Allocation
225 {
226 public:
227 	SimpleAllocation	(const DeviceInterface&	vkd,
228 						 VkDevice				device,
229 						 const VkDeviceMemory	memory);
230 	~SimpleAllocation	(void);
231 
232 private:
233 	const DeviceInterface&	m_vkd;
234 	const VkDevice			m_device;
235 };
236 
SimpleAllocation(const DeviceInterface & vkd,VkDevice device,const VkDeviceMemory memory)237 SimpleAllocation::SimpleAllocation (const DeviceInterface&	vkd,
238 									VkDevice				device,
239 									const VkDeviceMemory	memory)
240 	: Allocation	(memory, 0, DE_NULL)
241 	, m_vkd			(vkd)
242 	, m_device		(device)
243 {
244 }
245 
~SimpleAllocation(void)246 SimpleAllocation::~SimpleAllocation (void)
247 {
248 	m_vkd.freeMemory(m_device, getMemory(), DE_NULL);
249 }
250 
getMemoryRequirements(const DeviceInterface & vkd,VkDevice device,VkBuffer buffer)251 vk::VkMemoryRequirements getMemoryRequirements (const DeviceInterface&				vkd,
252 												 VkDevice							device,
253 												 VkBuffer							buffer)
254 {
255 	const VkBufferMemoryRequirementsInfo2	requirementInfo =
256 	{
257 		VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
258 		DE_NULL,
259 		buffer
260 	};
261 	VkMemoryRequirements2					requirements	=
262 	{
263 		VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
264 		DE_NULL,
265 		{ 0u, 0u, 0u, }
266 	};
267 	vkd.getBufferMemoryRequirements2(device, &requirementInfo, &requirements);
268 	return requirements.memoryRequirements;
269 }
270 
getMemoryRequirements(const DeviceInterface & vkd,VkDevice device,VkImage image)271 vk::VkMemoryRequirements getMemoryRequirements(const DeviceInterface&				vkd,
272 												VkDevice							device,
273 												VkImage								image)
274 {
275 	const VkImageMemoryRequirementsInfo2	requirementInfo =
276 	{
277 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
278 		DE_NULL,
279 		image
280 	};
281 	VkMemoryRequirements2					requirements =
282 	{
283 		VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
284 		DE_NULL,
285 		{ 0u, 0u, 0u, }
286 	};
287 	vkd.getImageMemoryRequirements2(device, &requirementInfo, &requirements);
288 
289 	return requirements.memoryRequirements;
290 }
291 
importAndBindMemory(const DeviceInterface & vkd,VkDevice device,VkBuffer buffer,NativeHandle & nativeHandle,VkExternalMemoryHandleTypeFlagBits externalType,const deUint32 exportedMemoryTypeIndex)292 MovePtr<Allocation> importAndBindMemory (const DeviceInterface&					vkd,
293 										 VkDevice								device,
294 										 VkBuffer								buffer,
295 										 NativeHandle&							nativeHandle,
296 										 VkExternalMemoryHandleTypeFlagBits		externalType,
297 										 const deUint32							exportedMemoryTypeIndex)
298 {
299 	const VkMemoryRequirements	requirements			= getBufferMemoryRequirements(vkd, device, buffer);
300 	Move<VkDeviceMemory>		memory;
301 
302 	if (!!buffer)
303 		memory = importDedicatedMemory(vkd, device, buffer, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
304 	else
305 		memory = importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
306 
307 	VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0u));
308 
309 	return MovePtr<Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
310 }
311 
importAndBindMemory(const DeviceInterface & vkd,VkDevice device,VkImage image,NativeHandle & nativeHandle,VkExternalMemoryHandleTypeFlagBits externalType,deUint32 exportedMemoryTypeIndex)312 MovePtr<Allocation> importAndBindMemory (const DeviceInterface&					vkd,
313 										 VkDevice								device,
314 										 VkImage								image,
315 										 NativeHandle&							nativeHandle,
316 										 VkExternalMemoryHandleTypeFlagBits		externalType,
317 										 deUint32								exportedMemoryTypeIndex)
318 {
319 	const VkMemoryRequirements	requirements	= getImageMemoryRequirements(vkd, device, image);
320 	Move<VkDeviceMemory>		memory;
321 
322 	if (!!image)
323 		memory = importDedicatedMemory(vkd, device, image, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
324 	else
325 		memory = importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
326 
327 	VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0u));
328 
329 	return MovePtr<Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
330 }
331 
332 struct QueueTimelineIteration
333 {
QueueTimelineIterationvkt::synchronization::__anona7629bb30111::QueueTimelineIteration334 	QueueTimelineIteration(const SharedPtr<OperationSupport>&	_opSupport,
335 						   deUint64								lastValue,
336 						   VkQueue								_queue,
337 						   deUint32								_queueFamilyIdx,
338 						   de::Random&							rng)
339 		: opSupport(_opSupport)
340 		, queue(_queue)
341 		, queueFamilyIdx(_queueFamilyIdx)
342 	{
343 		timelineValue	= lastValue + rng.getInt(1, 100);
344 	}
~QueueTimelineIterationvkt::synchronization::__anona7629bb30111::QueueTimelineIteration345 	~QueueTimelineIteration() {}
346 
347 	SharedPtr<OperationSupport>	opSupport;
348 	VkQueue						queue;
349 	deUint32					queueFamilyIdx;
350 	deUint64					timelineValue;
351 	SharedPtr<Operation>		op;
352 };
353 
importResource(const DeviceInterface & vkd,VkDevice device,const ResourceDescription & resourceDesc,const deUint32 queueFamilyIndex,const OperationSupport & readOp,const OperationSupport & writeOp,NativeHandle & nativeHandle,VkExternalMemoryHandleTypeFlagBits externalType,deUint32 exportedMemoryTypeIndex)354 de::MovePtr<Resource> importResource (const DeviceInterface&				vkd,
355 									  VkDevice								device,
356 									  const ResourceDescription&			resourceDesc,
357 									  const deUint32						queueFamilyIndex,
358 									  const OperationSupport&				readOp,
359 									  const OperationSupport&				writeOp,
360 									  NativeHandle&							nativeHandle,
361 									  VkExternalMemoryHandleTypeFlagBits	externalType,
362 									  deUint32								exportedMemoryTypeIndex)
363 {
364 	if (resourceDesc.type == RESOURCE_TYPE_IMAGE)
365 	{
366 		const VkExtent3D					extent					=
367 		{
368 			(deUint32)resourceDesc.size.x(),
369 			de::max(1u, (deUint32)resourceDesc.size.y()),
370 			de::max(1u, (deUint32)resourceDesc.size.z())
371 		};
372 		const VkImageSubresourceRange	subresourceRange		=
373 		{
374 			resourceDesc.imageAspect,
375 			0u,
376 			1u,
377 			0u,
378 			1u
379 		};
380 		const VkImageSubresourceLayers	subresourceLayers		=
381 		{
382 			resourceDesc.imageAspect,
383 			0u,
384 			0u,
385 			1u
386 		};
387 		const VkExternalMemoryImageCreateInfo externalInfo =
388 		{
389 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
390 			DE_NULL,
391 			(VkExternalMemoryHandleTypeFlags)externalType
392 		};
393 		const VkImageCreateInfo			createInfo				=
394 		{
395 			VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
396 			&externalInfo,
397 			0u,
398 
399 			resourceDesc.imageType,
400 			resourceDesc.imageFormat,
401 			extent,
402 			1u,
403 			1u,
404 			resourceDesc.imageSamples,
405 			VK_IMAGE_TILING_OPTIMAL,
406 			readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags(),
407 			VK_SHARING_MODE_EXCLUSIVE,
408 
409 			1u,
410 			&queueFamilyIndex,
411 			VK_IMAGE_LAYOUT_UNDEFINED
412 		};
413 
414 		Move<VkImage>			image		= createImage(vkd, device, &createInfo);
415 		MovePtr<Allocation>		allocation	= importAndBindMemory(vkd, device, *image, nativeHandle, externalType, exportedMemoryTypeIndex);
416 
417 		return MovePtr<Resource>(new Resource(image, allocation, extent, resourceDesc.imageType, resourceDesc.imageFormat, subresourceRange, subresourceLayers));
418 	}
419 	else
420 	{
421 		const VkDeviceSize						offset			= 0u;
422 		const VkDeviceSize						size			= static_cast<VkDeviceSize>(resourceDesc.size.x());
423 		const VkBufferUsageFlags				usage			= readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags();
424 		const VkExternalMemoryBufferCreateInfo	externalInfo	=
425 		{
426 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
427 			DE_NULL,
428 			(VkExternalMemoryHandleTypeFlags)externalType
429 		};
430 		const VkBufferCreateInfo				createInfo		=
431 		{
432 			VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
433 			&externalInfo,
434 			0u,
435 
436 			size,
437 			usage,
438 			VK_SHARING_MODE_EXCLUSIVE,
439 			1u,
440 			&queueFamilyIndex
441 		};
442 		Move<VkBuffer>							buffer		= createBuffer(vkd, device, &createInfo);
443 		MovePtr<Allocation>						allocation	= importAndBindMemory(vkd,
444 																				  device,
445 																				  *buffer,
446 																				  nativeHandle,
447 																				  externalType,
448 																				  exportedMemoryTypeIndex);
449 
450 		return MovePtr<Resource>(new Resource(resourceDesc.type, buffer, allocation, offset, size));
451 	}
452 }
453 
454 struct QueueSubmitOrderSharedIteration
455 {
QueueSubmitOrderSharedIterationvkt::synchronization::__anona7629bb30111::QueueSubmitOrderSharedIteration456 	QueueSubmitOrderSharedIteration() {}
~QueueSubmitOrderSharedIterationvkt::synchronization::__anona7629bb30111::QueueSubmitOrderSharedIteration457 	~QueueSubmitOrderSharedIteration() {}
458 
459 	SharedPtr<Resource>			resourceA;
460 	SharedPtr<Resource>			resourceB;
461 
462 	SharedPtr<Operation>		writeOp;
463 	SharedPtr<Operation>		readOp;
464 };
465 
466 // Verifies the signaling order of the semaphores in multiple
467 // VkSubmitInfo given to vkQueueSubmit() with queueA & queueB from a
468 // different VkDevice.
469 //
470 // vkQueueSubmit(queueA, [write0, write1, write2, ..., write6])
471 // vkQueueSubmit(queueB, [read0-6])
472 //
473 // With read0-6 waiting on write6, all the data should be available
474 // for reading given that signal operations are supposed to happen in
475 // order.
476 class QueueSubmitSignalOrderSharedTestInstance : public TestInstance
477 {
478 public:
QueueSubmitSignalOrderSharedTestInstance(Context & context,SynchronizationType type,const SharedPtr<OperationSupport> writeOpSupport,const SharedPtr<OperationSupport> readOpSupport,const ResourceDescription & resourceDesc,VkExternalMemoryHandleTypeFlagBits memoryHandleType,VkSemaphoreType semaphoreType,VkExternalSemaphoreHandleTypeFlagBits semaphoreHandleType,PipelineCacheData & pipelineCacheData)479 	QueueSubmitSignalOrderSharedTestInstance (Context&									context,
480 											  SynchronizationType						type,
481 											  const SharedPtr<OperationSupport>			writeOpSupport,
482 											  const SharedPtr<OperationSupport>			readOpSupport,
483 											  const ResourceDescription&				resourceDesc,
484 											  VkExternalMemoryHandleTypeFlagBits		memoryHandleType,
485 											  VkSemaphoreType							semaphoreType,
486 											  VkExternalSemaphoreHandleTypeFlagBits		semaphoreHandleType,
487 											  PipelineCacheData&						pipelineCacheData)
488 		: TestInstance			(context)
489 		, m_type				(type)
490 		, m_writeOpSupport		(writeOpSupport)
491 		, m_readOpSupport		(readOpSupport)
492 		, m_resourceDesc		(resourceDesc)
493 		, m_memoryHandleType	(memoryHandleType)
494 		, m_semaphoreType		(semaphoreType)
495 		, m_semaphoreHandleType	(semaphoreHandleType)
496 		, m_pipelineCacheData	(pipelineCacheData)
497 		, m_rng					(1234)
498 
499 	{
500 		const InstanceInterface&					vki					= context.getInstanceInterface();
501 		const VkSemaphoreTypeCreateInfoKHR			semaphoreTypeInfo	=
502 		{
503 			VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR,
504 			DE_NULL,
505 			semaphoreType,
506 			0,
507 		};
508 		const VkPhysicalDeviceExternalSemaphoreInfo	info				=
509 		{
510 			VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
511 			&semaphoreTypeInfo,
512 			semaphoreHandleType
513 		};
514 		VkExternalSemaphoreProperties				properties			=
515 		{
516 			VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
517 			DE_NULL,
518 			0u,
519 			0u,
520 			0u
521 		};
522 
523 		vki.getPhysicalDeviceExternalSemaphoreProperties(context.getPhysicalDevice(), &info, &properties);
524 
525 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
526 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
527 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
528 
529 		if ((properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) == 0
530 			|| (properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR) == 0)
531 			TCU_THROW(NotSupportedError, "Exporting and importing semaphore type not supported");
532 
533 		if (!isResourceExportable())
534 			TCU_THROW(NotSupportedError, "Resource not exportable");
535 
536 	}
537 
createImage(const vk::DeviceInterface & vkd,vk::VkDevice device,const vk::VkExtent3D & extent,deUint32 queueFamilyIndex)538 	Move<VkImage> createImage (const vk::DeviceInterface&	vkd,
539 							   vk::VkDevice					device,
540 							   const vk::VkExtent3D&		extent,
541 							   deUint32						queueFamilyIndex)
542 	{
543 		const VkExternalMemoryImageCreateInfo externalInfo =
544 		{
545 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
546 			DE_NULL,
547 			(VkExternalMemoryHandleTypeFlags)m_memoryHandleType
548 		};
549 		const VkImageCreateInfo createInfo =
550 		{
551 			VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
552 			&externalInfo,
553 			0u,
554 
555 			m_resourceDesc.imageType,
556 			m_resourceDesc.imageFormat,
557 			extent,
558 			1u,
559 			1u,
560 			m_resourceDesc.imageSamples,
561 			VK_IMAGE_TILING_OPTIMAL,
562 			m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
563 			VK_SHARING_MODE_EXCLUSIVE,
564 
565 			1u,
566 			&queueFamilyIndex,
567 			VK_IMAGE_LAYOUT_UNDEFINED
568 		};
569 
570 		return vk::createImage(vkd, device, &createInfo);
571 	}
572 
createBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,const vk::VkDeviceSize & size,deUint32 queueFamilyIndex)573 	Move<VkBuffer> createBuffer (const vk::DeviceInterface&		vkd,
574 								 vk::VkDevice					device,
575 								 const vk::VkDeviceSize&		size,
576 								 deUint32						queueFamilyIndex)
577 	{
578 		const VkExternalMemoryBufferCreateInfo	externalInfo =
579 		{
580 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
581 			DE_NULL,
582 			(VkExternalMemoryHandleTypeFlags)m_memoryHandleType
583 		};
584 		const VkBufferCreateInfo				createInfo =
585 		{
586 			VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
587 			&externalInfo,
588 			0u,
589 
590 			size,
591 			m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
592 			VK_SHARING_MODE_EXCLUSIVE,
593 			1u,
594 			&queueFamilyIndex
595 		};
596 		return vk::createBuffer(vkd, device, &createInfo);
597 	}
598 
iterate(void)599 	tcu::TestStatus iterate (void)
600 	{
601 		// We're using 2 devices to make sure we have 2 queues even on
602 		// implementations that only have a single queue.
603 		const bool											isTimelineSemaphore			(m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR);
604 		const VkDevice&										deviceA						= m_context.getDevice();
605 		const Unique<VkDevice>&								deviceB						(SingletonDevice::getDevice(m_context));
606 		const DeviceInterface&								vkA							= m_context.getDeviceInterface();
607 		const DeviceDriver									vkB							(m_context.getPlatformInterface(), m_context.getInstance(), *deviceB);
608 		UniquePtr<SimpleAllocator>							allocatorA					(new SimpleAllocator(vkA, deviceA, vk::getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(),
609 																																								 m_context.getPhysicalDevice())));
610 		UniquePtr<SimpleAllocator>							allocatorB					(new SimpleAllocator(vkB, *deviceB, vk::getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(),
611 																																								  m_context.getPhysicalDevice())));
612 		UniquePtr<OperationContext>							operationContextA			(new OperationContext(m_context, m_type, vkA, deviceA, *allocatorA, m_pipelineCacheData));
613 		UniquePtr<OperationContext>							operationContextB			(new OperationContext(m_context, m_type, vkB, *deviceB, *allocatorB, m_pipelineCacheData));
614 		const deUint32										universalQueueFamilyIndex	= m_context.getUniversalQueueFamilyIndex();
615 		const VkQueue										queueA						= m_context.getUniversalQueue();
616 		const VkQueue										queueB						= getDeviceQueue(vkB, *deviceB, m_context.getUniversalQueueFamilyIndex(), 0);
617 		Unique<VkFence>										fenceA						(createFence(vkA, deviceA));
618 		Unique<VkFence>										fenceB						(createFence(vkB, *deviceB));
619 		const Unique<VkCommandPool>							cmdPoolA					(createCommandPool(vkA, deviceA, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, universalQueueFamilyIndex));
620 		const Unique<VkCommandPool>							cmdPoolB					(createCommandPool(vkB, *deviceB, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, universalQueueFamilyIndex));
621 		std::vector<SharedPtr<Move<VkCommandBuffer> > >		ptrCmdBuffersA;
622 		SharedPtr<Move<VkCommandBuffer> >					ptrCmdBufferB;
623 		std::vector<VkCommandBuffer>						cmdBuffersA;
624 		VkCommandBuffer										cmdBufferB;
625 		std::vector<Move<VkSemaphore> >						semaphoresA;
626 		std::vector<Move<VkSemaphore> >						semaphoresB;
627 		std::vector<VkSemaphore>							semaphoreHandlesA;
628 		std::vector<VkSemaphore>							semaphoreHandlesB;
629 		std::vector<deUint64>								timelineValuesA;
630 		std::vector<deUint64>								timelineValuesB;
631 		std::vector<QueueSubmitOrderSharedIteration>		iterations(12);
632 		std::vector<VkPipelineStageFlags2KHR>				stageBits;
633 
634 		// Create a dozen of set of write/read operations.
635 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
636 		{
637 			QueueSubmitOrderSharedIteration&	iter				= iterations[iterIdx];
638 			deUint32							memoryTypeIndex;
639 			NativeHandle						nativeMemoryHandle;
640 
641 			if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
642 			{
643 				const VkExtent3D				extent =
644 				{
645 					(deUint32)m_resourceDesc.size.x(),
646 					de::max(1u, (deUint32)m_resourceDesc.size.y()),
647 					de::max(1u, (deUint32)m_resourceDesc.size.z())
648 				};
649 				const VkImageSubresourceRange	subresourceRange =
650 				{
651 					m_resourceDesc.imageAspect,
652 					0u,
653 					1u,
654 					0u,
655 					1u
656 				};
657 				const VkImageSubresourceLayers	subresourceLayers =
658 				{
659 					m_resourceDesc.imageAspect,
660 					0u,
661 					0u,
662 					1u
663 				};
664 
665 				Move<VkImage>							image			= createImage(vkA, deviceA, extent, universalQueueFamilyIndex);
666 				const vk::VkMemoryRequirements			requirements	= getMemoryRequirements(vkA, deviceA, *image);
667 														memoryTypeIndex = chooseMemoryType(requirements.memoryTypeBits);
668 				vk::Move<vk::VkDeviceMemory>			memory			= allocateExportableMemory(vkA, deviceA, requirements.size, memoryTypeIndex, m_memoryHandleType, *image);
669 
670 				VK_CHECK(vkA.bindImageMemory(deviceA, *image, *memory, 0u));
671 
672 				MovePtr<Allocation> allocation(new SimpleAllocation(vkA, deviceA, memory.disown()));
673 				iter.resourceA = makeSharedPtr(new Resource(image, allocation, extent, m_resourceDesc.imageType, m_resourceDesc.imageFormat, subresourceRange, subresourceLayers));
674 			}
675 			else
676 			{
677 				const VkDeviceSize						offset			= 0u;
678 				const VkDeviceSize						size			= static_cast<VkDeviceSize>(m_resourceDesc.size.x());
679 				Move<VkBuffer>							buffer			= createBuffer(vkA, deviceA, size, universalQueueFamilyIndex);
680 				const vk::VkMemoryRequirements			requirements	= getMemoryRequirements(vkA, deviceA, *buffer);
681 														memoryTypeIndex	= chooseMemoryType(requirements.memoryTypeBits);
682 				vk::Move<vk::VkDeviceMemory>			memory			= allocateExportableMemory(vkA, deviceA, requirements.size, memoryTypeIndex, m_memoryHandleType, *buffer);
683 
684 				VK_CHECK(vkA.bindBufferMemory(deviceA, *buffer, *memory, 0u));
685 
686 				MovePtr<Allocation> allocation(new SimpleAllocation(vkA, deviceA, memory.disown()));
687 				iter.resourceA = makeSharedPtr(new Resource(m_resourceDesc.type, buffer, allocation, offset, size));
688 			}
689 
690 			getMemoryNative(vkA, deviceA, iter.resourceA->getMemory(), m_memoryHandleType, nativeMemoryHandle);
691 			iter.resourceB	= makeSharedPtr(importResource(vkB, *deviceB,
692 														   m_resourceDesc,
693 														   universalQueueFamilyIndex,
694 														   *m_readOpSupport,
695 														   *m_writeOpSupport,
696 														   nativeMemoryHandle,
697 														   m_memoryHandleType,
698 														   memoryTypeIndex));
699 
700 			iter.writeOp = makeSharedPtr(m_writeOpSupport->build(*operationContextA,
701 																 *iter.resourceA));
702 			iter.readOp = makeSharedPtr(m_readOpSupport->build(*operationContextB,
703 															   *iter.resourceB));
704 		}
705 
706 		// Record each write operation into its own command buffer.
707 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
708 		{
709 			QueueSubmitOrderSharedIteration&	iter		= iterations[iterIdx];
710 			const Resource&						resource	= *iter.resourceA;
711 			const SyncInfo						writeSync	= iter.writeOp->getOutSyncInfo();
712 			const SyncInfo						readSync	= iter.readOp->getInSyncInfo();
713 
714 			ptrCmdBuffersA.push_back(makeVkSharedPtr(makeCommandBuffer(vkA, deviceA, *cmdPoolA)));
715 
716 			cmdBuffersA.push_back(**(ptrCmdBuffersA.back()));
717 
718 			beginCommandBuffer(vkA, cmdBuffersA.back());
719 
720 			iter.writeOp->recordCommands(cmdBuffersA.back());
721 
722 			{
723 				SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vkA, isTimelineSemaphore);
724 
725 				if (resource.getType() == RESOURCE_TYPE_IMAGE)
726 				{
727 					DE_ASSERT(writeSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
728 					DE_ASSERT(readSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
729 
730 					const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
731 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
732 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
733 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
734 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
735 						writeSync.imageLayout,								// VkImageLayout					oldLayout
736 						readSync.imageLayout,								// VkImageLayout					newLayout
737 						resource.getImage().handle,							// VkImage							image
738 						resource.getImage().subresourceRange				// VkImageSubresourceRange			subresourceRange
739 					);
740 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
741 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
742 				}
743 				else
744 				{
745 					const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
746 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
747 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
748 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
749 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
750 						resource.getBuffer().handle,						// VkBuffer							buffer
751 						0,													// VkDeviceSize						offset
752 						VK_WHOLE_SIZE										// VkDeviceSize						size
753 					);
754 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
755 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
756 				}
757 
758 				stageBits.push_back(writeSync.stageMask);
759 			}
760 
761 			endCommandBuffer(vkA, cmdBuffersA.back());
762 
763 			addSemaphore(vkA, deviceA, semaphoresA, semaphoreHandlesA, timelineValuesA, iterIdx == (iterations.size() - 1), 2u);
764 		}
765 
766 		DE_ASSERT(stageBits.size() == iterations.size());
767 		DE_ASSERT(semaphoreHandlesA.size() == iterations.size());
768 
769 		// Record all read operations into a single command buffer and record the union of their stage masks.
770 		VkPipelineStageFlags2KHR readStages = 0;
771 		ptrCmdBufferB = makeVkSharedPtr(makeCommandBuffer(vkB, *deviceB, *cmdPoolB));
772 		cmdBufferB = **(ptrCmdBufferB);
773 		beginCommandBuffer(vkB, cmdBufferB);
774 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
775 		{
776 			QueueSubmitOrderSharedIteration& iter = iterations[iterIdx];
777 			readStages |= iter.readOp->getInSyncInfo().stageMask;
778 			iter.readOp->recordCommands(cmdBufferB);
779 		}
780 		endCommandBuffer(vkB, cmdBufferB);
781 
782 		// Export the last semaphore for use on deviceB and create another semaphore to signal on deviceB.
783 		{
784 			VkSemaphore		lastSemaphoreA			= semaphoreHandlesA.back();
785 			NativeHandle	nativeSemaphoreHandle;
786 
787 			addSemaphore(vkB, *deviceB, semaphoresB, semaphoreHandlesB, timelineValuesB, true, timelineValuesA.back());
788 
789 			getSemaphoreNative(vkA, deviceA, lastSemaphoreA, m_semaphoreHandleType, nativeSemaphoreHandle);
790 			importSemaphore(vkB, *deviceB, semaphoreHandlesB.back(), m_semaphoreHandleType, nativeSemaphoreHandle, 0u);
791 
792 			addSemaphore(vkB, *deviceB, semaphoresB, semaphoreHandlesB, timelineValuesB, false, timelineValuesA.back());
793 		}
794 
795 		// Submit writes, each in its own VkSubmitInfo. With binary
796 		// semaphores, submission don't wait on anything, with
797 		// timeline semaphores, submissions wait on a host signal
798 		// operation done below.
799 		{
800 			std::vector<VkCommandBufferSubmitInfoKHR>	cmdBuffersInfo				(iterations.size(), makeCommonCommandBufferSubmitInfo(0u));
801 			std::vector<VkSemaphoreSubmitInfoKHR>		waitSemaphoreSubmitInfos	(iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 1u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR));
802 			std::vector<VkSemaphoreSubmitInfoKHR>		signalSemaphoreSubmitInfos	(iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR));
803 			SynchronizationWrapperPtr					synchronizationWrapper		= getSynchronizationWrapper(m_type, vkA, isTimelineSemaphore, static_cast<deUint32>(iterations.size()));
804 
805 			for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
806 			{
807 				waitSemaphoreSubmitInfos[iterIdx].semaphore		= semaphoreHandlesA.front();
808 				waitSemaphoreSubmitInfos[iterIdx].stageMask		= stageBits[iterIdx];
809 				signalSemaphoreSubmitInfos[iterIdx].semaphore	= semaphoreHandlesA[iterIdx];
810 				signalSemaphoreSubmitInfos[iterIdx].value		= timelineValuesA[iterIdx];
811 				cmdBuffersInfo[iterIdx].commandBuffer			= cmdBuffersA[iterIdx];
812 
813 				synchronizationWrapper->addSubmitInfo(
814 					isTimelineSemaphore,
815 					isTimelineSemaphore ? &waitSemaphoreSubmitInfos[iterIdx] : DE_NULL,
816 					1u,
817 					&cmdBuffersInfo[iterIdx],
818 					1u,
819 					&signalSemaphoreSubmitInfos[iterIdx],
820 					isTimelineSemaphore,
821 					isTimelineSemaphore
822 				);
823 			}
824 
825 			VK_CHECK(synchronizationWrapper->queueSubmit(queueA, *fenceA));
826 		}
827 
828 		// Submit reads, only waiting waiting on the last write
829 		// operations, ordering of signaling should guarantee that
830 		// when read operations kick in all writes have completed.
831 		{
832 			VkCommandBufferSubmitInfoKHR	cmdBuffersInfo				= makeCommonCommandBufferSubmitInfo(cmdBufferB);
833 			VkSemaphoreSubmitInfoKHR		waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.front(), timelineValuesA.back(), readStages);
834 			VkSemaphoreSubmitInfoKHR		signalSemaphoreSubmitInfo	= makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.back(), timelineValuesB.back(), VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
835 			SynchronizationWrapperPtr		synchronizationWrapper		= getSynchronizationWrapper(m_type, vkB, isTimelineSemaphore);
836 
837 			synchronizationWrapper->addSubmitInfo(
838 				1u,
839 				&waitSemaphoreSubmitInfo,
840 				1u,
841 				&cmdBuffersInfo,
842 				1u,
843 				&signalSemaphoreSubmitInfo,
844 				isTimelineSemaphore,
845 				isTimelineSemaphore
846 			);
847 
848 			VK_CHECK(synchronizationWrapper->queueSubmit(queueB, *fenceB));
849 
850 			if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
851 			{
852 				const VkSemaphoreWaitInfo		waitInfo	=
853 				{
854 					VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,	// VkStructureType			sType;
855 					DE_NULL,								// const void*				pNext;
856 					0u,										// VkSemaphoreWaitFlagsKHR	flags;
857 					1u,										// deUint32					semaphoreCount;
858 					&semaphoreHandlesB.back(),				// const VkSemaphore*		pSemaphores;
859 					&timelineValuesB.back(),				// const deUint64*			pValues;
860 				};
861 
862 				// Unblock the whole lot.
863 				hostSignal(vkA, deviceA, semaphoreHandlesA.front(), 1);
864 
865 				VK_CHECK(vkB.waitSemaphores(*deviceB, &waitInfo, ~0ull));
866 			}
867 			else
868 			{
869 				VK_CHECK(vkB.waitForFences(*deviceB, 1, &fenceB.get(), VK_TRUE, ~0ull));
870 			}
871 		}
872 
873 		// Verify the result of the operations.
874 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
875 		{
876 			QueueSubmitOrderSharedIteration&	iter		= iterations[iterIdx];
877 			const Data							expected	= iter.writeOp->getData();
878 			const Data							actual		= iter.readOp->getData();
879 
880 			if (isIndirectBuffer(iter.resourceA->getType()))
881 			{
882 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
883 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
884 
885 				if (actualValue < expectedValue)
886 					return tcu::TestStatus::fail("Counter value is smaller than expected");
887 			}
888 			else
889 			{
890 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
891 					return tcu::TestStatus::fail("Memory contents don't match");
892 			}
893 		}
894 
895 		VK_CHECK(vkA.deviceWaitIdle(deviceA));
896 		VK_CHECK(vkB.deviceWaitIdle(*deviceB));
897 
898 		return tcu::TestStatus::pass("Success");
899 	}
900 
901 private:
addSemaphore(const DeviceInterface & vk,VkDevice device,std::vector<Move<VkSemaphore>> & semaphores,std::vector<VkSemaphore> & semaphoreHandles,std::vector<deUint64> & timelineValues,bool exportable,deUint64 firstTimelineValue)902 	void addSemaphore (const DeviceInterface&			vk,
903 					   VkDevice							device,
904 					   std::vector<Move<VkSemaphore> >&	semaphores,
905 					   std::vector<VkSemaphore>&		semaphoreHandles,
906 					   std::vector<deUint64>&			timelineValues,
907 					   bool								exportable,
908 					   deUint64							firstTimelineValue)
909 	{
910 		Move<VkSemaphore>	semaphore;
911 
912 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
913 		{
914 			// Only allocate a single exportable semaphore.
915 			if (semaphores.empty())
916 			{
917 				semaphores.push_back(createExportableSemaphoreType(vk, device, m_semaphoreType, m_semaphoreHandleType));
918 			}
919 		}
920 		else
921 		{
922 			if (exportable)
923 				semaphores.push_back(createExportableSemaphoreType(vk, device, m_semaphoreType, m_semaphoreHandleType));
924 			else
925 				semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
926 		}
927 
928 		semaphoreHandles.push_back(*semaphores.back());
929 		timelineValues.push_back((timelineValues.empty() ? firstTimelineValue : timelineValues.back()) + m_rng.getInt(1, 100));
930 	}
931 
isResourceExportable()932 	bool isResourceExportable ()
933 	{
934 		const InstanceInterface&					vki				= m_context.getInstanceInterface();
935 		VkPhysicalDevice							physicalDevice	= m_context.getPhysicalDevice();
936 
937 		if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
938 		{
939 			const VkPhysicalDeviceExternalImageFormatInfo	externalInfo		=
940 			{
941 				VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO,
942 				DE_NULL,
943 				m_memoryHandleType
944 			};
945 			const VkPhysicalDeviceImageFormatInfo2			imageFormatInfo		=
946 			{
947 				VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
948 				&externalInfo,
949 				m_resourceDesc.imageFormat,
950 				m_resourceDesc.imageType,
951 				VK_IMAGE_TILING_OPTIMAL,
952 				m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
953 				0u
954 			};
955 			VkExternalImageFormatProperties					externalProperties	=
956 			{
957 				VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES,
958 				DE_NULL,
959 				{ 0u, 0u, 0u }
960 			};
961 			VkImageFormatProperties2						formatProperties	=
962 			{
963 				VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2,
964 				&externalProperties,
965 				{
966 					{ 0u, 0u, 0u },
967 					0u,
968 					0u,
969 					0u,
970 					0u,
971 				}
972 			};
973 
974 			{
975 				const VkResult res = vki.getPhysicalDeviceImageFormatProperties2(physicalDevice, &imageFormatInfo, &formatProperties);
976 
977 				if (res == VK_ERROR_FORMAT_NOT_SUPPORTED)
978 					return false;
979 
980 				VK_CHECK(res); // Check other errors
981 			}
982 
983 			if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0)
984 				return false;
985 
986 			if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
987 				return false;
988 
989 			return true;
990 		}
991 		else
992 		{
993 			const VkPhysicalDeviceExternalBufferInfo	info	=
994 			{
995 				VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO,
996 				DE_NULL,
997 
998 				0u,
999 				m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
1000 				m_memoryHandleType
1001 			};
1002 			VkExternalBufferProperties					properties			=
1003 			{
1004 				VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES,
1005 				DE_NULL,
1006 				{ 0u, 0u, 0u}
1007 			};
1008 			vki.getPhysicalDeviceExternalBufferProperties(physicalDevice, &info, &properties);
1009 
1010 			if ((properties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0
1011 				|| (properties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
1012 				return false;
1013 
1014 			return true;
1015 		}
1016 	}
1017 
1018 	SynchronizationType							m_type;
1019 	SharedPtr<OperationSupport>					m_writeOpSupport;
1020 	SharedPtr<OperationSupport>					m_readOpSupport;
1021 	const ResourceDescription&					m_resourceDesc;
1022 	VkExternalMemoryHandleTypeFlagBits			m_memoryHandleType;
1023 	VkSemaphoreType								m_semaphoreType;
1024 	VkExternalSemaphoreHandleTypeFlagBits		m_semaphoreHandleType;
1025 	PipelineCacheData&							m_pipelineCacheData;
1026 	de::Random									m_rng;
1027 };
1028 
1029 class QueueSubmitSignalOrderSharedTestCase : public TestCase
1030 {
1031 public:
QueueSubmitSignalOrderSharedTestCase(tcu::TestContext & testCtx,SynchronizationType type,const std::string & name,OperationName writeOp,OperationName readOp,const ResourceDescription & resourceDesc,VkExternalMemoryHandleTypeFlagBits memoryHandleType,VkSemaphoreType semaphoreType,VkExternalSemaphoreHandleTypeFlagBits semaphoreHandleType,PipelineCacheData & pipelineCacheData)1032 	QueueSubmitSignalOrderSharedTestCase (tcu::TestContext&						testCtx,
1033 										  SynchronizationType					type,
1034 										  const std::string&					name,
1035 										  OperationName							writeOp,
1036 										  OperationName							readOp,
1037 										  const ResourceDescription&			resourceDesc,
1038 										  VkExternalMemoryHandleTypeFlagBits	memoryHandleType,
1039 										  VkSemaphoreType						semaphoreType,
1040 										  VkExternalSemaphoreHandleTypeFlagBits	semaphoreHandleType,
1041 										  PipelineCacheData&					pipelineCacheData)
1042 		: TestCase				(testCtx, name.c_str(), "")
1043 		, m_type				(type)
1044 		, m_writeOpSupport		(makeOperationSupport(writeOp, resourceDesc).release())
1045 		, m_readOpSupport		(makeOperationSupport(readOp, resourceDesc).release())
1046 		, m_resourceDesc		(resourceDesc)
1047 		, m_memoryHandleType	(memoryHandleType)
1048 		, m_semaphoreType		(semaphoreType)
1049 		, m_semaphoreHandleType	(semaphoreHandleType)
1050 		, m_pipelineCacheData	(pipelineCacheData)
1051 	{
1052 	}
1053 
checkSupport(Context & context) const1054 	virtual void checkSupport(Context& context) const
1055 	{
1056 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1057 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
1058 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1059 
1060 		if ((m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT ||
1061 			 m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) &&
1062 			 !context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_fd"))
1063 			TCU_THROW(NotSupportedError, "VK_KHR_external_semaphore_fd not supported");
1064 
1065 		if ((m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT ||
1066 			 m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT) &&
1067 			!context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_win32"))
1068 			TCU_THROW(NotSupportedError, "VK_KHR_external_semaphore_win32 not supported");
1069 
1070 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
1071 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
1072 	}
1073 
createInstance(Context & context) const1074 	TestInstance* createInstance (Context& context) const
1075 	{
1076 		return new QueueSubmitSignalOrderSharedTestInstance(context,
1077 															m_type,
1078 															m_writeOpSupport,
1079 															m_readOpSupport,
1080 															m_resourceDesc,
1081 															m_memoryHandleType,
1082 															m_semaphoreType,
1083 															m_semaphoreHandleType,
1084 															m_pipelineCacheData);
1085 	}
1086 
initPrograms(SourceCollections & programCollection) const1087 	void initPrograms (SourceCollections& programCollection) const
1088 	{
1089 		m_writeOpSupport->initPrograms(programCollection);
1090 		m_readOpSupport->initPrograms(programCollection);
1091 	}
1092 
1093 private:
1094 	SynchronizationType						m_type;
1095 	SharedPtr<OperationSupport>				m_writeOpSupport;
1096 	SharedPtr<OperationSupport>				m_readOpSupport;
1097 	const ResourceDescription&				m_resourceDesc;
1098 	VkExternalMemoryHandleTypeFlagBits		m_memoryHandleType;
1099 	VkSemaphoreType							m_semaphoreType;
1100 	VkExternalSemaphoreHandleTypeFlagBits	m_semaphoreHandleType;
1101 	PipelineCacheData&						m_pipelineCacheData;
1102 };
1103 
1104 class QueueSubmitSignalOrderSharedTests : public tcu::TestCaseGroup
1105 {
1106 public:
QueueSubmitSignalOrderSharedTests(tcu::TestContext & testCtx,SynchronizationType type,VkSemaphoreType semaphoreType,const char * name)1107 	QueueSubmitSignalOrderSharedTests (tcu::TestContext& testCtx, SynchronizationType type, VkSemaphoreType semaphoreType, const char *name)
1108 		: tcu::TestCaseGroup	(testCtx, name, "Signal ordering of semaphores")
1109 		, m_type				(type)
1110 		, m_semaphoreType		(semaphoreType)
1111 	{
1112 	}
1113 
init(void)1114 	void init (void)
1115 	{
1116 		static const OperationName	writeOps[]	=
1117 		{
1118 			OPERATION_NAME_WRITE_COPY_BUFFER,
1119 			OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE,
1120 			OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER,
1121 			OPERATION_NAME_WRITE_COPY_IMAGE,
1122 			OPERATION_NAME_WRITE_BLIT_IMAGE,
1123 			OPERATION_NAME_WRITE_SSBO_VERTEX,
1124 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL,
1125 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION,
1126 			OPERATION_NAME_WRITE_SSBO_GEOMETRY,
1127 			OPERATION_NAME_WRITE_SSBO_FRAGMENT,
1128 			OPERATION_NAME_WRITE_SSBO_COMPUTE,
1129 			OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT,
1130 			OPERATION_NAME_WRITE_IMAGE_VERTEX,
1131 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL,
1132 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION,
1133 			OPERATION_NAME_WRITE_IMAGE_GEOMETRY,
1134 			OPERATION_NAME_WRITE_IMAGE_FRAGMENT,
1135 			OPERATION_NAME_WRITE_IMAGE_COMPUTE,
1136 			OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT,
1137 		};
1138 		static const OperationName	readOps[]	=
1139 		{
1140 			OPERATION_NAME_READ_COPY_BUFFER,
1141 			OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE,
1142 			OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER,
1143 			OPERATION_NAME_READ_COPY_IMAGE,
1144 			OPERATION_NAME_READ_BLIT_IMAGE,
1145 			OPERATION_NAME_READ_UBO_VERTEX,
1146 			OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL,
1147 			OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION,
1148 			OPERATION_NAME_READ_UBO_GEOMETRY,
1149 			OPERATION_NAME_READ_UBO_FRAGMENT,
1150 			OPERATION_NAME_READ_UBO_COMPUTE,
1151 			OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT,
1152 			OPERATION_NAME_READ_SSBO_VERTEX,
1153 			OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL,
1154 			OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION,
1155 			OPERATION_NAME_READ_SSBO_GEOMETRY,
1156 			OPERATION_NAME_READ_SSBO_FRAGMENT,
1157 			OPERATION_NAME_READ_SSBO_COMPUTE,
1158 			OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT,
1159 			OPERATION_NAME_READ_IMAGE_VERTEX,
1160 			OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL,
1161 			OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION,
1162 			OPERATION_NAME_READ_IMAGE_GEOMETRY,
1163 			OPERATION_NAME_READ_IMAGE_FRAGMENT,
1164 			OPERATION_NAME_READ_IMAGE_COMPUTE,
1165 			OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT,
1166 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW,
1167 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED,
1168 			OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH,
1169 			OPERATION_NAME_READ_VERTEX_INPUT,
1170 		};
1171 		static const struct
1172 		{
1173 			VkExternalMemoryHandleTypeFlagBits		memoryType;
1174 			VkExternalSemaphoreHandleTypeFlagBits	semaphoreType;
1175 		}	exportCases[] =
1176 		{
1177 			// Only semaphore handle types having reference semantic
1178 			// are valid for this test.
1179 			{
1180 				VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
1181 				VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
1182 			},
1183 			{
1184 				VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
1185 				VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
1186 			},
1187 			{
1188 				VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT,
1189 				VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
1190 			},
1191 		};
1192 
1193 		for (deUint32 writeOpIdx = 0; writeOpIdx < DE_LENGTH_OF_ARRAY(writeOps); writeOpIdx++)
1194 		for (deUint32 readOpIdx = 0; readOpIdx < DE_LENGTH_OF_ARRAY(readOps); readOpIdx++)
1195 		{
1196 			const OperationName	writeOp		= writeOps[writeOpIdx];
1197 			const OperationName	readOp		= readOps[readOpIdx];
1198 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
1199 			bool				empty		= true;
1200 
1201 			de::MovePtr<tcu::TestCaseGroup> opGroup	(new tcu::TestCaseGroup(m_testCtx, opGroupName.c_str(), ""));
1202 
1203 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1204 			{
1205 				const ResourceDescription&	resource	= s_resources[resourceNdx];
1206 
1207 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1208 				{
1209 					for (deUint32 exportIdx = 0; exportIdx < DE_LENGTH_OF_ARRAY(exportCases); exportIdx++)
1210 					{
1211 						std::string					caseName	= getResourceName(resource) + "_" +
1212 							externalSemaphoreTypeToName(exportCases[exportIdx].semaphoreType);
1213 
1214 						opGroup->addChild(new QueueSubmitSignalOrderSharedTestCase(m_testCtx,
1215 																				   m_type,
1216 																				   caseName,
1217 																				   writeOp,
1218 																				   readOp,
1219 																				   resource,
1220 																				   exportCases[exportIdx].memoryType,
1221 																				   m_semaphoreType,
1222 																				   exportCases[exportIdx].semaphoreType,
1223 																				   m_pipelineCacheData));
1224 						empty = false;
1225 					}
1226 				}
1227 			}
1228 			if (!empty)
1229 				addChild(opGroup.release());
1230 		}
1231 	}
1232 
deinit(void)1233 	void deinit (void)
1234 	{
1235 		cleanupGroup();
1236 	}
1237 
1238 private:
1239 	SynchronizationType	m_type;
1240 	VkSemaphoreType		m_semaphoreType;
1241 	// synchronization.op tests share pipeline cache data to speed up test
1242 	// execution.
1243 	PipelineCacheData	m_pipelineCacheData;
1244 };
1245 
1246 struct QueueSubmitOrderIteration
1247 {
QueueSubmitOrderIterationvkt::synchronization::__anona7629bb30111::QueueSubmitOrderIteration1248 	QueueSubmitOrderIteration() {}
~QueueSubmitOrderIterationvkt::synchronization::__anona7629bb30111::QueueSubmitOrderIteration1249 	~QueueSubmitOrderIteration() {}
1250 
1251 	SharedPtr<Resource>			resource;
1252 
1253 	SharedPtr<Operation>		writeOp;
1254 	SharedPtr<Operation>		readOp;
1255 };
1256 
1257 // Verifies the signaling order of the semaphores in multiple
1258 // VkSubmitInfo given to vkQueueSubmit() with queueA & queueB from the
1259 // same VkDevice.
1260 //
1261 // vkQueueSubmit(queueA, [write0, write1, write2, ..., write6])
1262 // vkQueueSubmit(queueB, [read0-6])
1263 //
1264 // With read0-6 waiting on write6, all the data should be available
1265 // for reading given that signal operations are supposed to happen in
1266 // order.
1267 class QueueSubmitSignalOrderTestInstance : public TestInstance
1268 {
1269 public:
QueueSubmitSignalOrderTestInstance(Context & context,SynchronizationType type,const SharedPtr<OperationSupport> writeOpSupport,const SharedPtr<OperationSupport> readOpSupport,const ResourceDescription & resourceDesc,VkSemaphoreType semaphoreType,PipelineCacheData & pipelineCacheData)1270 	QueueSubmitSignalOrderTestInstance (Context&									context,
1271 										SynchronizationType							type,
1272 										const SharedPtr<OperationSupport>			writeOpSupport,
1273 										const SharedPtr<OperationSupport>			readOpSupport,
1274 										const ResourceDescription&					resourceDesc,
1275 										VkSemaphoreType								semaphoreType,
1276 										PipelineCacheData&							pipelineCacheData)
1277 		: TestInstance			(context)
1278 		, m_type				(type)
1279 		, m_writeOpSupport		(writeOpSupport)
1280 		, m_readOpSupport		(readOpSupport)
1281 		, m_resourceDesc		(resourceDesc)
1282 		, m_semaphoreType		(semaphoreType)
1283 		, m_device				(SingletonDevice::getDevice(context))
1284 		, m_deviceInterface		(context.getPlatformInterface(), context.getInstance(), *m_device)
1285 		, m_allocator			(new SimpleAllocator(m_deviceInterface,
1286 													 *m_device,
1287 													 getPhysicalDeviceMemoryProperties(context.getInstanceInterface(),
1288 																					   context.getPhysicalDevice())))
1289 		, m_operationContext	(new OperationContext(context, type, m_deviceInterface, *m_device, *m_allocator, pipelineCacheData))
1290 		, m_queueA				(DE_NULL)
1291 		, m_queueB				(DE_NULL)
1292 		, m_rng					(1234)
1293 
1294 	{
1295 		const std::vector<VkQueueFamilyProperties> queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(context.getInstanceInterface(),
1296 																													 context.getPhysicalDevice());
1297 
1298 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1299 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
1300 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1301 
1302 		VkQueueFlags writeOpQueueFlags = m_writeOpSupport->getQueueFlags(*m_operationContext);
1303 		for (deUint32 familyIdx = 0; familyIdx < queueFamilyProperties.size(); familyIdx++) {
1304 			if (((queueFamilyProperties[familyIdx].queueFlags & writeOpQueueFlags) == writeOpQueueFlags) ||
1305 			((writeOpQueueFlags == VK_QUEUE_TRANSFER_BIT) &&
1306 			(((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_GRAPHICS_BIT) == VK_QUEUE_GRAPHICS_BIT) ||
1307 			((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_COMPUTE_BIT) == VK_QUEUE_COMPUTE_BIT)))) {
1308 				m_queueA = getDeviceQueue(m_deviceInterface, *m_device, familyIdx, 0);
1309 				m_queueFamilyIndexA = familyIdx;
1310 				break;
1311 			}
1312 		}
1313 		if (m_queueA == DE_NULL)
1314 			TCU_THROW(NotSupportedError, "No queue supporting write operation");
1315 
1316 		VkQueueFlags readOpQueueFlags = m_readOpSupport->getQueueFlags(*m_operationContext);
1317 		for (deUint32 familyIdx = 0; familyIdx < queueFamilyProperties.size(); familyIdx++) {
1318 			if (((queueFamilyProperties[familyIdx].queueFlags & readOpQueueFlags) == readOpQueueFlags) ||
1319 			((readOpQueueFlags == VK_QUEUE_TRANSFER_BIT) &&
1320 			(((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_GRAPHICS_BIT) == VK_QUEUE_GRAPHICS_BIT) ||
1321 			((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_COMPUTE_BIT) == VK_QUEUE_COMPUTE_BIT)))) {
1322 				for (deUint32 queueIdx = 0; queueIdx < queueFamilyProperties[familyIdx].queueCount; queueIdx++) {
1323 					VkQueue queue = getDeviceQueue(m_deviceInterface, *m_device, familyIdx, queueIdx);
1324 
1325 					if (queue == m_queueA)
1326 						continue;
1327 
1328 					m_queueB = queue;
1329 					m_queueFamilyIndexB = familyIdx;
1330 					break;
1331 				}
1332 
1333 				if (m_queueB != DE_NULL)
1334 					break;
1335 			}
1336 		}
1337 		if (m_queueB == DE_NULL)
1338 			TCU_THROW(NotSupportedError, "No queue supporting read operation");
1339 	}
1340 
iterate(void)1341 	tcu::TestStatus iterate (void)
1342 	{
1343 		const bool											isTimelineSemaphore			= (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR);
1344 		const VkDevice&										device						= *m_device;
1345 		const DeviceInterface&								vk							= m_deviceInterface;
1346 		Unique<VkFence>										fence						(createFence(vk, device));
1347 		const Unique<VkCommandPool>							cmdPoolA					(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, m_queueFamilyIndexA));
1348 		const Unique<VkCommandPool>							cmdPoolB					(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, m_queueFamilyIndexB));
1349 		std::vector<SharedPtr<Move<VkCommandBuffer> > >		ptrCmdBuffersA;
1350 		SharedPtr<Move<VkCommandBuffer> >					ptrCmdBufferB;
1351 		std::vector<VkCommandBuffer>						cmdBuffersA;
1352 		VkCommandBuffer										cmdBufferB;
1353 		std::vector<Move<VkSemaphore> >						semaphoresA;
1354 		std::vector<Move<VkSemaphore> >						semaphoresB;
1355 		std::vector<VkSemaphore>							semaphoreHandlesA;
1356 		std::vector<VkSemaphore>							semaphoreHandlesB;
1357 		std::vector<deUint64>								timelineValuesA;
1358 		std::vector<deUint64>								timelineValuesB;
1359 		std::vector<QueueSubmitOrderIteration>				iterations;
1360 		std::vector<VkPipelineStageFlags2KHR>				stageBits;
1361 		std::vector<deUint32>								queueFamilies;
1362 		SynchronizationWrapperPtr							syncWrapper					= getSynchronizationWrapper(m_type, vk, isTimelineSemaphore);
1363 
1364 		queueFamilies.push_back(m_queueFamilyIndexA);
1365 		queueFamilies.push_back(m_queueFamilyIndexB);
1366 
1367 		// Create a dozen of set of write/read operations.
1368 		iterations.resize(12);
1369 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1370 		{
1371 			QueueSubmitOrderIteration&		iter				= iterations[iterIdx];
1372 
1373 			iter.resource	= makeSharedPtr(new Resource(*m_operationContext,
1374 														 m_resourceDesc,
1375 														 m_writeOpSupport->getOutResourceUsageFlags() |
1376 														 m_readOpSupport->getInResourceUsageFlags(),
1377 														 VK_SHARING_MODE_EXCLUSIVE,
1378 														 queueFamilies));
1379 
1380 			iter.writeOp = makeSharedPtr(m_writeOpSupport->build(*m_operationContext,
1381 																 *iter.resource));
1382 			iter.readOp = makeSharedPtr(m_readOpSupport->build(*m_operationContext,
1383 															   *iter.resource));
1384 		}
1385 
1386 		// Record each write operation into its own command buffer.
1387 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1388 		{
1389 			QueueSubmitOrderIteration&	iter	= iterations[iterIdx];
1390 
1391 			ptrCmdBuffersA.push_back(makeVkSharedPtr(makeCommandBuffer(vk, device, *cmdPoolA)));
1392 			cmdBuffersA.push_back(**(ptrCmdBuffersA.back()));
1393 
1394 			beginCommandBuffer(vk, cmdBuffersA.back());
1395 			iter.writeOp->recordCommands(cmdBuffersA.back());
1396 
1397 			{
1398 				SynchronizationWrapperPtr	synchronizationWrapper	= getSynchronizationWrapper(m_type, vk, DE_FALSE);
1399 				const SyncInfo				writeSync				= iter.writeOp->getOutSyncInfo();
1400 				const SyncInfo				readSync				= iter.readOp->getInSyncInfo();
1401 				const Resource&				resource				= *iter.resource;
1402 
1403 				if (resource.getType() == RESOURCE_TYPE_IMAGE)
1404 				{
1405 					DE_ASSERT(writeSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
1406 					DE_ASSERT(readSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
1407 
1408 					const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
1409 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
1410 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
1411 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
1412 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
1413 						writeSync.imageLayout,								// VkImageLayout					oldLayout
1414 						readSync.imageLayout,								// VkImageLayout					newLayout
1415 						resource.getImage().handle,							// VkImage							image
1416 						resource.getImage().subresourceRange				// VkImageSubresourceRange			subresourceRange
1417 					);
1418 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
1419 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
1420 				}
1421 				else
1422 				{
1423 					const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
1424 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
1425 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
1426 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
1427 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
1428 						resource.getBuffer().handle,						// VkBuffer							buffer
1429 						0,													// VkDeviceSize						offset
1430 						VK_WHOLE_SIZE										// VkDeviceSize						size
1431 					);
1432 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
1433 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
1434 				}
1435 
1436 				stageBits.push_back(writeSync.stageMask);
1437 			}
1438 
1439 			endCommandBuffer(vk, cmdBuffersA.back());
1440 
1441 			addSemaphore(vk, device, semaphoresA, semaphoreHandlesA, timelineValuesA, 2u);
1442 		}
1443 
1444 		DE_ASSERT(stageBits.size() == iterations.size());
1445 		DE_ASSERT(semaphoreHandlesA.size() == iterations.size());
1446 
1447 		// Record all read operations into a single command buffer and track the union of their execution stages.
1448 		VkPipelineStageFlags2KHR readStages = 0;
1449 		ptrCmdBufferB = makeVkSharedPtr(makeCommandBuffer(vk, device, *cmdPoolB));
1450 		cmdBufferB = **(ptrCmdBufferB);
1451 		beginCommandBuffer(vk, cmdBufferB);
1452 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1453 		{
1454 			QueueSubmitOrderIteration& iter = iterations[iterIdx];
1455 			readStages |= iter.readOp->getInSyncInfo().stageMask;
1456 			iter.readOp->recordCommands(cmdBufferB);
1457 		}
1458 		endCommandBuffer(vk, cmdBufferB);
1459 
1460 		addSemaphore(vk, device, semaphoresB, semaphoreHandlesB, timelineValuesB, timelineValuesA.back());
1461 
1462 		// Submit writes, each in its own VkSubmitInfo. With binary
1463 		// semaphores, submission don't wait on anything, with
1464 		// timeline semaphores, submissions wait on a host signal
1465 		// operation done below.
1466 		{
1467 			VkSemaphoreSubmitInfoKHR					waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(semaphoreHandlesA.front(), 1u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
1468 			std::vector<VkSemaphoreSubmitInfoKHR>		signalSemaphoreSubmitInfo	(iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR));
1469 			std::vector<VkCommandBufferSubmitInfoKHR>	commandBufferSubmitInfos	(iterations.size(), makeCommonCommandBufferSubmitInfo(0));
1470 			SynchronizationWrapperPtr					synchronizationWrapper		= getSynchronizationWrapper(m_type, vk, isTimelineSemaphore, (deUint32)iterations.size());
1471 
1472 			for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1473 			{
1474 				commandBufferSubmitInfos[iterIdx].commandBuffer		= cmdBuffersA[iterIdx];
1475 				signalSemaphoreSubmitInfo[iterIdx].semaphore		= semaphoreHandlesA[iterIdx];
1476 				signalSemaphoreSubmitInfo[iterIdx].value			= timelineValuesA[iterIdx];
1477 
1478 				synchronizationWrapper->addSubmitInfo(
1479 					isTimelineSemaphore,
1480 					isTimelineSemaphore ? &waitSemaphoreSubmitInfo : DE_NULL,
1481 					1u,
1482 					&commandBufferSubmitInfos[iterIdx],
1483 					1u,
1484 					&signalSemaphoreSubmitInfo[iterIdx],
1485 					isTimelineSemaphore,
1486 					isTimelineSemaphore
1487 				);
1488 			}
1489 
1490 			VK_CHECK(synchronizationWrapper->queueSubmit(m_queueA, DE_NULL));
1491 		}
1492 
1493 		// Submit reads, only waiting waiting on the last write
1494 		// operations, ordering of signaling should guarantee that
1495 		// when read operations kick in all writes have completed.
1496 		{
1497 			VkCommandBufferSubmitInfoKHR	commandBufferSubmitInfos	= makeCommonCommandBufferSubmitInfo(cmdBufferB);
1498 			VkSemaphoreSubmitInfoKHR		waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(semaphoreHandlesA.back(), timelineValuesA.back(), VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
1499 			VkSemaphoreSubmitInfoKHR		signalSemaphoreSubmitInfo	= makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.back(), timelineValuesB.back(), VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
1500 			SynchronizationWrapperPtr		synchronizationWrapper		= getSynchronizationWrapper(m_type, vk, isTimelineSemaphore);
1501 
1502 			synchronizationWrapper->addSubmitInfo(
1503 				1u,										// deUint32								waitSemaphoreInfoCount
1504 				&waitSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
1505 				1u,										// deUint32								commandBufferInfoCount
1506 				&commandBufferSubmitInfos,				// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
1507 				1u,										// deUint32								signalSemaphoreInfoCount
1508 				&signalSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
1509 				isTimelineSemaphore,
1510 				isTimelineSemaphore
1511 			);
1512 
1513 			VK_CHECK(synchronizationWrapper->queueSubmit(m_queueB, *fence));
1514 
1515 			if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1516 			{
1517 				const VkSemaphoreWaitInfo		waitInfo	=
1518 				{
1519 					VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,	// VkStructureType			sType;
1520 					DE_NULL,								// const void*				pNext;
1521 					0u,										// VkSemaphoreWaitFlagsKHR	flags;
1522 					1u,										// deUint32					semaphoreCount;
1523 					&semaphoreHandlesB.back(),				// const VkSemaphore*		pSemaphores;
1524 					&timelineValuesB.back(),				// const deUint64*			pValues;
1525 				};
1526 
1527 				// Unblock the whole lot.
1528 				hostSignal(vk, device, semaphoreHandlesA.front(), 1);
1529 
1530 				VK_CHECK(vk.waitSemaphores(device, &waitInfo, ~0ull));
1531 			}
1532 			else
1533 			{
1534 				VK_CHECK(vk.waitForFences(device, 1, &fence.get(), VK_TRUE, ~0ull));
1535 			}
1536 		}
1537 
1538 		// Verify the result of the operations.
1539 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1540 		{
1541 			QueueSubmitOrderIteration&		iter		= iterations[iterIdx];
1542 			const Data						expected	= iter.writeOp->getData();
1543 			const Data						actual		= iter.readOp->getData();
1544 
1545 			if (isIndirectBuffer(iter.resource->getType()))
1546 			{
1547 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
1548 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
1549 
1550 				if (actualValue < expectedValue)
1551 					return tcu::TestStatus::fail("Counter value is smaller than expected");
1552 			}
1553 			else
1554 			{
1555 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
1556 					return tcu::TestStatus::fail("Memory contents don't match");
1557 			}
1558 		}
1559 
1560 		VK_CHECK(vk.deviceWaitIdle(device));
1561 
1562 		return tcu::TestStatus::pass("Success");
1563 	}
1564 
1565 private:
addSemaphore(const DeviceInterface & vk,VkDevice device,std::vector<Move<VkSemaphore>> & semaphores,std::vector<VkSemaphore> & semaphoreHandles,std::vector<deUint64> & timelineValues,deUint64 firstTimelineValue)1566 	void addSemaphore (const DeviceInterface&			vk,
1567 					   VkDevice							device,
1568 					   std::vector<Move<VkSemaphore> >&	semaphores,
1569 					   std::vector<VkSemaphore>&		semaphoreHandles,
1570 					   std::vector<deUint64>&			timelineValues,
1571 					   deUint64							firstTimelineValue)
1572 	{
1573 		Move<VkSemaphore>	semaphore;
1574 
1575 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1576 		{
1577 			// Only allocate a single exportable semaphore.
1578 			if (semaphores.empty())
1579 			{
1580 				semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
1581 			}
1582 		}
1583 		else
1584 		{
1585 			semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
1586 		}
1587 
1588 		semaphoreHandles.push_back(*semaphores.back());
1589 		timelineValues.push_back((timelineValues.empty() ? firstTimelineValue : timelineValues.back()) + m_rng.getInt(1, 100));
1590 	}
1591 
1592 	SynchronizationType							m_type;
1593 	SharedPtr<OperationSupport>					m_writeOpSupport;
1594 	SharedPtr<OperationSupport>					m_readOpSupport;
1595 	const ResourceDescription&					m_resourceDesc;
1596 	VkSemaphoreType								m_semaphoreType;
1597 	const Unique<VkDevice>&						m_device;
1598 	const DeviceDriver							m_deviceInterface;
1599 	UniquePtr<SimpleAllocator>					m_allocator;
1600 	UniquePtr<OperationContext>					m_operationContext;
1601 	VkQueue										m_queueA;
1602 	VkQueue										m_queueB;
1603 	deUint32									m_queueFamilyIndexA;
1604 	deUint32									m_queueFamilyIndexB;
1605 	de::Random									m_rng;
1606 };
1607 
1608 class QueueSubmitSignalOrderTestCase : public TestCase
1609 {
1610 public:
QueueSubmitSignalOrderTestCase(tcu::TestContext & testCtx,SynchronizationType type,const std::string & name,OperationName writeOp,OperationName readOp,const ResourceDescription & resourceDesc,VkSemaphoreType semaphoreType,PipelineCacheData & pipelineCacheData)1611 	QueueSubmitSignalOrderTestCase (tcu::TestContext&			testCtx,
1612 									SynchronizationType			type,
1613 									const std::string&			name,
1614 									OperationName				writeOp,
1615 									OperationName				readOp,
1616 									const ResourceDescription&	resourceDesc,
1617 									VkSemaphoreType				semaphoreType,
1618 									PipelineCacheData&			pipelineCacheData)
1619 		: TestCase				(testCtx, name.c_str(), "")
1620 		, m_type				(type)
1621 		, m_writeOpSupport		(makeOperationSupport(writeOp, resourceDesc).release())
1622 		, m_readOpSupport		(makeOperationSupport(readOp, resourceDesc).release())
1623 		, m_resourceDesc		(resourceDesc)
1624 		, m_semaphoreType		(semaphoreType)
1625 		, m_pipelineCacheData	(pipelineCacheData)
1626 	{
1627 	}
1628 
checkSupport(Context & context) const1629 	virtual void checkSupport(Context& context) const
1630 	{
1631 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1632 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
1633 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1634 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
1635 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
1636 	}
1637 
createInstance(Context & context) const1638 	TestInstance* createInstance (Context& context) const
1639 	{
1640 		return new QueueSubmitSignalOrderTestInstance(context,
1641 													  m_type,
1642 													  m_writeOpSupport,
1643 													  m_readOpSupport,
1644 													  m_resourceDesc,
1645 													  m_semaphoreType,
1646 													  m_pipelineCacheData);
1647 	}
1648 
initPrograms(SourceCollections & programCollection) const1649 	void initPrograms (SourceCollections& programCollection) const
1650 	{
1651 		m_writeOpSupport->initPrograms(programCollection);
1652 		m_readOpSupport->initPrograms(programCollection);
1653 	}
1654 
1655 private:
1656 	SynchronizationType						m_type;
1657 	SharedPtr<OperationSupport>				m_writeOpSupport;
1658 	SharedPtr<OperationSupport>				m_readOpSupport;
1659 	const ResourceDescription&				m_resourceDesc;
1660 	VkSemaphoreType							m_semaphoreType;
1661 	PipelineCacheData&						m_pipelineCacheData;
1662 };
1663 
1664 class QueueSubmitSignalOrderTests : public tcu::TestCaseGroup
1665 {
1666 public:
QueueSubmitSignalOrderTests(tcu::TestContext & testCtx,SynchronizationType type,VkSemaphoreType semaphoreType,const char * name)1667 	QueueSubmitSignalOrderTests (tcu::TestContext& testCtx, SynchronizationType type, VkSemaphoreType semaphoreType, const char *name)
1668 		: tcu::TestCaseGroup	(testCtx, name, "Signal ordering of semaphores")
1669 		, m_type				(type)
1670 		, m_semaphoreType		(semaphoreType)
1671 	{
1672 	}
1673 
init(void)1674 	void init (void)
1675 	{
1676 		static const OperationName	writeOps[]	=
1677 		{
1678 			OPERATION_NAME_WRITE_COPY_BUFFER,
1679 			OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE,
1680 			OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER,
1681 			OPERATION_NAME_WRITE_COPY_IMAGE,
1682 			OPERATION_NAME_WRITE_BLIT_IMAGE,
1683 			OPERATION_NAME_WRITE_SSBO_VERTEX,
1684 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL,
1685 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION,
1686 			OPERATION_NAME_WRITE_SSBO_GEOMETRY,
1687 			OPERATION_NAME_WRITE_SSBO_FRAGMENT,
1688 			OPERATION_NAME_WRITE_SSBO_COMPUTE,
1689 			OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT,
1690 			OPERATION_NAME_WRITE_IMAGE_VERTEX,
1691 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL,
1692 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION,
1693 			OPERATION_NAME_WRITE_IMAGE_GEOMETRY,
1694 			OPERATION_NAME_WRITE_IMAGE_FRAGMENT,
1695 			OPERATION_NAME_WRITE_IMAGE_COMPUTE,
1696 			OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT,
1697 		};
1698 		static const OperationName	readOps[]	=
1699 		{
1700 			OPERATION_NAME_READ_COPY_BUFFER,
1701 			OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE,
1702 			OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER,
1703 			OPERATION_NAME_READ_COPY_IMAGE,
1704 			OPERATION_NAME_READ_BLIT_IMAGE,
1705 			OPERATION_NAME_READ_UBO_VERTEX,
1706 			OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL,
1707 			OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION,
1708 			OPERATION_NAME_READ_UBO_GEOMETRY,
1709 			OPERATION_NAME_READ_UBO_FRAGMENT,
1710 			OPERATION_NAME_READ_UBO_COMPUTE,
1711 			OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT,
1712 			OPERATION_NAME_READ_SSBO_VERTEX,
1713 			OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL,
1714 			OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION,
1715 			OPERATION_NAME_READ_SSBO_GEOMETRY,
1716 			OPERATION_NAME_READ_SSBO_FRAGMENT,
1717 			OPERATION_NAME_READ_SSBO_COMPUTE,
1718 			OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT,
1719 			OPERATION_NAME_READ_IMAGE_VERTEX,
1720 			OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL,
1721 			OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION,
1722 			OPERATION_NAME_READ_IMAGE_GEOMETRY,
1723 			OPERATION_NAME_READ_IMAGE_FRAGMENT,
1724 			OPERATION_NAME_READ_IMAGE_COMPUTE,
1725 			OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT,
1726 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW,
1727 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED,
1728 			OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH,
1729 			OPERATION_NAME_READ_VERTEX_INPUT,
1730 		};
1731 
1732 		for (deUint32 writeOpIdx = 0; writeOpIdx < DE_LENGTH_OF_ARRAY(writeOps); writeOpIdx++)
1733 		for (deUint32 readOpIdx = 0; readOpIdx < DE_LENGTH_OF_ARRAY(readOps); readOpIdx++)
1734 		{
1735 			const OperationName	writeOp		= writeOps[writeOpIdx];
1736 			const OperationName	readOp		= readOps[readOpIdx];
1737 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
1738 			bool				empty		= true;
1739 
1740 			de::MovePtr<tcu::TestCaseGroup> opGroup	(new tcu::TestCaseGroup(m_testCtx, opGroupName.c_str(), ""));
1741 
1742 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1743 			{
1744 				const ResourceDescription&	resource	= s_resources[resourceNdx];
1745 
1746 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1747 				{
1748 					opGroup->addChild(new QueueSubmitSignalOrderTestCase(m_testCtx,
1749 																		 m_type,
1750 																		 getResourceName(resource),
1751 																		 writeOp,
1752 																		 readOp,
1753 																		 resource,
1754 																		 m_semaphoreType,
1755 																		 m_pipelineCacheData));
1756 					empty = false;
1757 				}
1758 			}
1759 			if (!empty)
1760 				addChild(opGroup.release());
1761 		}
1762 	}
1763 
deinit(void)1764 	void deinit (void)
1765 	{
1766 		cleanupGroup();
1767 	}
1768 
1769 private:
1770 	SynchronizationType	m_type;
1771 	VkSemaphoreType		m_semaphoreType;
1772 	// synchronization.op tests share pipeline cache data to speed up test
1773 	// execution.
1774 	PipelineCacheData	m_pipelineCacheData;
1775 };
1776 
1777 } // anonymous
1778 
createSignalOrderTests(tcu::TestContext & testCtx,SynchronizationType type)1779 tcu::TestCaseGroup* createSignalOrderTests (tcu::TestContext& testCtx, SynchronizationType type)
1780 {
1781 	de::MovePtr<tcu::TestCaseGroup> orderingTests(new tcu::TestCaseGroup(testCtx, "signal_order", "Signal ordering tests"));
1782 
1783 	orderingTests->addChild(new QueueSubmitSignalOrderTests(testCtx, type, VK_SEMAPHORE_TYPE_BINARY_KHR, "binary_semaphore"));
1784 	orderingTests->addChild(new QueueSubmitSignalOrderTests(testCtx, type, VK_SEMAPHORE_TYPE_TIMELINE_KHR, "timeline_semaphore"));
1785 	orderingTests->addChild(new QueueSubmitSignalOrderSharedTests(testCtx, type, VK_SEMAPHORE_TYPE_BINARY_KHR, "shared_binary_semaphore"));
1786 	orderingTests->addChild(new QueueSubmitSignalOrderSharedTests(testCtx, type, VK_SEMAPHORE_TYPE_TIMELINE_KHR, "shared_timeline_semaphore"));
1787 
1788 	return orderingTests.release();
1789 }
1790 
1791 } // synchronization
1792 } // vkt
1793