• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*------------------------------------------------------------------------
3  * Vulkan Conformance Tests
4  * ------------------------
5  *
6  * Copyright (c) 2019 The Khronos Group Inc.
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Signal ordering tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktSynchronizationSignalOrderTests.hpp"
26 #include "vktSynchronizationOperation.hpp"
27 #include "vktSynchronizationOperationTestData.hpp"
28 #include "vktSynchronizationOperationResources.hpp"
29 #include "vktTestCaseUtil.hpp"
30 #include "vktSynchronizationUtil.hpp"
31 #include "vktExternalMemoryUtil.hpp"
32 #include "vktCustomInstancesDevices.hpp"
33 #include "vkBarrierUtil.hpp"
34 
35 #include "vkDefs.hpp"
36 #include "vkPlatform.hpp"
37 #include "vkQueryUtil.hpp"
38 #include "vkCmdUtil.hpp"
39 #include "vkImageUtil.hpp"
40 #include "vkRef.hpp"
41 #include "vkTypeUtil.hpp"
42 
43 #include "tcuTestLog.hpp"
44 #include "tcuCommandLine.hpp"
45 
46 #include "deRandom.hpp"
47 #include "deThread.hpp"
48 #include "deUniquePtr.hpp"
49 
50 #include <limits>
51 #include <set>
52 
53 namespace vkt
54 {
55 namespace synchronization
56 {
57 namespace
58 {
59 
60 using namespace vk;
61 using namespace vkt::ExternalMemoryUtil;
62 using de::MovePtr;
63 using de::SharedPtr;
64 using de::UniquePtr;
65 
66 template<typename T>
makeVkSharedPtr(Move<T> move)67 inline SharedPtr<Move<T> > makeVkSharedPtr (Move<T> move)
68 {
69 	return SharedPtr<Move<T> >(new Move<T>(move));
70 }
71 
72 template<typename T>
makeSharedPtr(de::MovePtr<T> move)73 inline SharedPtr<T> makeSharedPtr (de::MovePtr<T> move)
74 {
75 	return SharedPtr<T>(move.release());
76 }
77 
78 template<typename T>
makeSharedPtr(T * ptr)79 inline SharedPtr<T> makeSharedPtr (T* ptr)
80 {
81 	return SharedPtr<T>(ptr);
82 }
83 
hostSignal(const DeviceInterface & vk,const VkDevice & device,VkSemaphore semaphore,const deUint64 timelineValue)84 void hostSignal (const DeviceInterface& vk, const VkDevice& device, VkSemaphore semaphore, const deUint64 timelineValue)
85 {
86 	VkSemaphoreSignalInfoKHR	ssi	=
87 	{
88 		VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,	// VkStructureType				sType;
89 		DE_NULL,									// const void*					pNext;
90 		semaphore,									// VkSemaphore					semaphore;
91 		timelineValue,								// deUint64						value;
92 	};
93 
94 	VK_CHECK(vk.signalSemaphore(device, &ssi));
95 }
96 
createTestDevice(const Context & context)97 Move<VkDevice> createTestDevice (const Context& context)
98 {
99 	const float									priority				= 0.0f;
100 	const std::vector<VkQueueFamilyProperties>	queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(context.getInstanceInterface(), context.getPhysicalDevice());
101 	std::vector<deUint32>						queueFamilyIndices		(queueFamilyProperties.size(), 0xFFFFFFFFu);
102 	std::vector<const char*>					extensions;
103 
104 	VkPhysicalDeviceFeatures2					createPhysicalFeature		{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, DE_NULL, context.getDeviceFeatures() };
105 	VkPhysicalDeviceTimelineSemaphoreFeatures	timelineSemaphoreFeatures	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, DE_NULL, DE_TRUE };
106 	VkPhysicalDeviceSynchronization2FeaturesKHR	synchronization2Features	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR, DE_NULL, DE_TRUE };
107 	void**										nextPtr						= &createPhysicalFeature.pNext;
108 
109 	if (context.isDeviceFunctionalitySupported("VK_KHR_timeline_semaphore"))
110 	{
111 		extensions.push_back("VK_KHR_timeline_semaphore");
112 		addToChainVulkanStructure(&nextPtr, timelineSemaphoreFeatures);
113 	}
114 
115 	if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_external_semaphore"))
116 		extensions.push_back("VK_KHR_external_semaphore");
117 	if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_external_memory"))
118 		extensions.push_back("VK_KHR_external_memory");
119 
120 	if (context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_fd"))
121 		extensions.push_back("VK_KHR_external_semaphore_fd");
122 
123 	if (context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_win32"))
124 		extensions.push_back("VK_KHR_external_semaphore_win32");
125 
126 	if (context.isDeviceFunctionalitySupported("VK_KHR_synchronization2"))
127 	{
128 		extensions.push_back("VK_KHR_synchronization2");
129 		addToChainVulkanStructure(&nextPtr, synchronization2Features);
130 	}
131 
132 	try
133 	{
134 		deUint32 maxQueueCount = 1;
135 		for (const VkQueueFamilyProperties& qfp : queueFamilyProperties)
136 			maxQueueCount = deMaxu32(qfp.queueCount, maxQueueCount);
137 
138 		std::vector<float>						queuePriorities(maxQueueCount, priority);
139 		std::vector<VkDeviceQueueCreateInfo>	queues;
140 
141 		for (size_t ndx = 0; ndx < queueFamilyProperties.size(); ndx++)
142 		{
143 			const VkDeviceQueueCreateInfo	createInfo	=
144 			{
145 				VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
146 				DE_NULL,
147 				0u,
148 
149 				(deUint32)ndx,
150 				queueFamilyProperties[ndx].queueCount,
151 				queuePriorities.data()
152 			};
153 
154 			queues.push_back(createInfo);
155 		}
156 
157 		const VkDeviceCreateInfo				createInfo				=
158 		{
159 			VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
160 			&createPhysicalFeature,
161 			0u,
162 
163 			(deUint32)queues.size(),
164 			&queues[0],
165 
166 			0u,
167 			DE_NULL,
168 
169 			(deUint32)extensions.size(),
170 			extensions.empty() ? DE_NULL : &extensions[0],
171 			0u
172 		};
173 
174 		const auto validation = context.getTestContext().getCommandLine().isValidationEnabled();
175 		return createCustomDevice(validation, context.getPlatformInterface(), context.getInstance(), context.getInstanceInterface(), context.getPhysicalDevice(), &createInfo);
176 	}
177 	catch (const vk::Error& error)
178 	{
179 		if (error.getError() == VK_ERROR_EXTENSION_NOT_PRESENT)
180 			TCU_THROW(NotSupportedError, "Required extensions not supported");
181 		else
182 			throw;
183 	}
184 }
185 
186 // Class to wrap a singleton instance and device
187 class SingletonDevice
188 {
SingletonDevice(const Context & context)189 	SingletonDevice	(const Context& context)
190 		: m_logicalDevice	(createTestDevice(context))
191 	{
192 	}
193 
194 public:
195 
getDevice(const Context & context)196 	static const Unique<vk::VkDevice>& getDevice(const Context& context)
197 	{
198 		if (!m_singletonDevice)
199 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
200 
201 		DE_ASSERT(m_singletonDevice);
202 		return m_singletonDevice->m_logicalDevice;
203 	}
204 
destroy()205 	static void destroy()
206 	{
207 		m_singletonDevice.clear();
208 	}
209 
210 private:
211 	const Unique<vk::VkDevice>					m_logicalDevice;
212 
213 	static SharedPtr<SingletonDevice>	m_singletonDevice;
214 };
215 SharedPtr<SingletonDevice>		SingletonDevice::m_singletonDevice;
216 
cleanupGroup()217 static void cleanupGroup ()
218 {
219 	// Destroy singleton object
220 	SingletonDevice::destroy();
221 }
222 
223 class SimpleAllocation : public Allocation
224 {
225 public:
226 	SimpleAllocation	(const DeviceInterface&	vkd,
227 						 VkDevice				device,
228 						 const VkDeviceMemory	memory);
229 	~SimpleAllocation	(void);
230 
231 private:
232 	const DeviceInterface&	m_vkd;
233 	const VkDevice			m_device;
234 };
235 
SimpleAllocation(const DeviceInterface & vkd,VkDevice device,const VkDeviceMemory memory)236 SimpleAllocation::SimpleAllocation (const DeviceInterface&	vkd,
237 									VkDevice				device,
238 									const VkDeviceMemory	memory)
239 	: Allocation	(memory, 0, DE_NULL)
240 	, m_vkd			(vkd)
241 	, m_device		(device)
242 {
243 }
244 
~SimpleAllocation(void)245 SimpleAllocation::~SimpleAllocation (void)
246 {
247 	m_vkd.freeMemory(m_device, getMemory(), DE_NULL);
248 }
249 
getMemoryRequirements(const DeviceInterface & vkd,VkDevice device,VkBuffer buffer)250 vk::VkMemoryRequirements getMemoryRequirements (const DeviceInterface&				vkd,
251 												 VkDevice							device,
252 												 VkBuffer							buffer)
253 {
254 	const VkBufferMemoryRequirementsInfo2	requirementInfo =
255 	{
256 		VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
257 		DE_NULL,
258 		buffer
259 	};
260 	VkMemoryRequirements2					requirements	=
261 	{
262 		VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
263 		DE_NULL,
264 		{ 0u, 0u, 0u, }
265 	};
266 	vkd.getBufferMemoryRequirements2(device, &requirementInfo, &requirements);
267 	return requirements.memoryRequirements;
268 }
269 
getMemoryRequirements(const DeviceInterface & vkd,VkDevice device,VkImage image)270 vk::VkMemoryRequirements getMemoryRequirements(const DeviceInterface&				vkd,
271 												VkDevice							device,
272 												VkImage								image)
273 {
274 	const VkImageMemoryRequirementsInfo2	requirementInfo =
275 	{
276 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
277 		DE_NULL,
278 		image
279 	};
280 	VkMemoryRequirements2					requirements =
281 	{
282 		VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
283 		DE_NULL,
284 		{ 0u, 0u, 0u, }
285 	};
286 	vkd.getImageMemoryRequirements2(device, &requirementInfo, &requirements);
287 
288 	return requirements.memoryRequirements;
289 }
290 
importAndBindMemory(const DeviceInterface & vkd,VkDevice device,VkBuffer buffer,NativeHandle & nativeHandle,VkExternalMemoryHandleTypeFlagBits externalType,const deUint32 exportedMemoryTypeIndex)291 MovePtr<Allocation> importAndBindMemory (const DeviceInterface&					vkd,
292 										 VkDevice								device,
293 										 VkBuffer								buffer,
294 										 NativeHandle&							nativeHandle,
295 										 VkExternalMemoryHandleTypeFlagBits		externalType,
296 										 const deUint32							exportedMemoryTypeIndex)
297 {
298 	const VkMemoryRequirements	requirements			= getBufferMemoryRequirements(vkd, device, buffer);
299 	Move<VkDeviceMemory>		memory;
300 
301 	if (!!buffer)
302 		memory = importDedicatedMemory(vkd, device, buffer, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
303 	else
304 		memory = importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
305 
306 	VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0u));
307 
308 	return MovePtr<Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
309 }
310 
importAndBindMemory(const DeviceInterface & vkd,VkDevice device,VkImage image,NativeHandle & nativeHandle,VkExternalMemoryHandleTypeFlagBits externalType,deUint32 exportedMemoryTypeIndex)311 MovePtr<Allocation> importAndBindMemory (const DeviceInterface&					vkd,
312 										 VkDevice								device,
313 										 VkImage								image,
314 										 NativeHandle&							nativeHandle,
315 										 VkExternalMemoryHandleTypeFlagBits		externalType,
316 										 deUint32								exportedMemoryTypeIndex)
317 {
318 	const VkMemoryRequirements	requirements	= getImageMemoryRequirements(vkd, device, image);
319 	Move<VkDeviceMemory>		memory;
320 
321 	if (!!image)
322 		memory = importDedicatedMemory(vkd, device, image, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
323 	else
324 		memory = importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
325 
326 	VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0u));
327 
328 	return MovePtr<Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
329 }
330 
331 struct QueueTimelineIteration
332 {
QueueTimelineIterationvkt::synchronization::__anon570917b90111::QueueTimelineIteration333 	QueueTimelineIteration(const SharedPtr<OperationSupport>&	_opSupport,
334 						   deUint64								lastValue,
335 						   VkQueue								_queue,
336 						   deUint32								_queueFamilyIdx,
337 						   de::Random&							rng)
338 		: opSupport(_opSupport)
339 		, queue(_queue)
340 		, queueFamilyIdx(_queueFamilyIdx)
341 	{
342 		timelineValue	= lastValue + rng.getInt(1, 100);
343 	}
~QueueTimelineIterationvkt::synchronization::__anon570917b90111::QueueTimelineIteration344 	~QueueTimelineIteration() {}
345 
346 	SharedPtr<OperationSupport>	opSupport;
347 	VkQueue						queue;
348 	deUint32					queueFamilyIdx;
349 	deUint64					timelineValue;
350 	SharedPtr<Operation>		op;
351 };
352 
importResource(const DeviceInterface & vkd,VkDevice device,const ResourceDescription & resourceDesc,const deUint32 queueFamilyIndex,const OperationSupport & readOp,const OperationSupport & writeOp,NativeHandle & nativeHandle,VkExternalMemoryHandleTypeFlagBits externalType,deUint32 exportedMemoryTypeIndex)353 de::MovePtr<Resource> importResource (const DeviceInterface&				vkd,
354 									  VkDevice								device,
355 									  const ResourceDescription&			resourceDesc,
356 									  const deUint32						queueFamilyIndex,
357 									  const OperationSupport&				readOp,
358 									  const OperationSupport&				writeOp,
359 									  NativeHandle&							nativeHandle,
360 									  VkExternalMemoryHandleTypeFlagBits	externalType,
361 									  deUint32								exportedMemoryTypeIndex)
362 {
363 	if (resourceDesc.type == RESOURCE_TYPE_IMAGE)
364 	{
365 		const VkExtent3D					extent					=
366 		{
367 			(deUint32)resourceDesc.size.x(),
368 			de::max(1u, (deUint32)resourceDesc.size.y()),
369 			de::max(1u, (deUint32)resourceDesc.size.z())
370 		};
371 		const VkImageSubresourceRange	subresourceRange		=
372 		{
373 			resourceDesc.imageAspect,
374 			0u,
375 			1u,
376 			0u,
377 			1u
378 		};
379 		const VkImageSubresourceLayers	subresourceLayers		=
380 		{
381 			resourceDesc.imageAspect,
382 			0u,
383 			0u,
384 			1u
385 		};
386 		const VkExternalMemoryImageCreateInfo externalInfo =
387 		{
388 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
389 			DE_NULL,
390 			(VkExternalMemoryHandleTypeFlags)externalType
391 		};
392 		const VkImageTiling				tiling					= VK_IMAGE_TILING_OPTIMAL;
393 		const VkImageCreateInfo			createInfo				=
394 		{
395 			VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
396 			&externalInfo,
397 			0u,
398 
399 			resourceDesc.imageType,
400 			resourceDesc.imageFormat,
401 			extent,
402 			1u,
403 			1u,
404 			resourceDesc.imageSamples,
405 			tiling,
406 			readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags(),
407 			VK_SHARING_MODE_EXCLUSIVE,
408 
409 			1u,
410 			&queueFamilyIndex,
411 			VK_IMAGE_LAYOUT_UNDEFINED
412 		};
413 
414 		Move<VkImage>			image		= createImage(vkd, device, &createInfo);
415 		MovePtr<Allocation>		allocation	= importAndBindMemory(vkd, device, *image, nativeHandle, externalType, exportedMemoryTypeIndex);
416 
417 		return MovePtr<Resource>(new Resource(image, allocation, extent, resourceDesc.imageType, resourceDesc.imageFormat, subresourceRange, subresourceLayers, tiling));
418 	}
419 	else
420 	{
421 		const VkDeviceSize						offset			= 0u;
422 		const VkDeviceSize						size			= static_cast<VkDeviceSize>(resourceDesc.size.x());
423 		const VkBufferUsageFlags				usage			= readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags();
424 		const VkExternalMemoryBufferCreateInfo	externalInfo	=
425 		{
426 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
427 			DE_NULL,
428 			(VkExternalMemoryHandleTypeFlags)externalType
429 		};
430 		const VkBufferCreateInfo				createInfo		=
431 		{
432 			VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
433 			&externalInfo,
434 			0u,
435 
436 			size,
437 			usage,
438 			VK_SHARING_MODE_EXCLUSIVE,
439 			1u,
440 			&queueFamilyIndex
441 		};
442 		Move<VkBuffer>							buffer		= createBuffer(vkd, device, &createInfo);
443 		MovePtr<Allocation>						allocation	= importAndBindMemory(vkd,
444 																				  device,
445 																				  *buffer,
446 																				  nativeHandle,
447 																				  externalType,
448 																				  exportedMemoryTypeIndex);
449 
450 		return MovePtr<Resource>(new Resource(resourceDesc.type, buffer, allocation, offset, size));
451 	}
452 }
453 
454 struct QueueSubmitOrderSharedIteration
455 {
QueueSubmitOrderSharedIterationvkt::synchronization::__anon570917b90111::QueueSubmitOrderSharedIteration456 	QueueSubmitOrderSharedIteration() {}
~QueueSubmitOrderSharedIterationvkt::synchronization::__anon570917b90111::QueueSubmitOrderSharedIteration457 	~QueueSubmitOrderSharedIteration() {}
458 
459 	SharedPtr<Resource>			resourceA;
460 	SharedPtr<Resource>			resourceB;
461 
462 	SharedPtr<Operation>		writeOp;
463 	SharedPtr<Operation>		readOp;
464 };
465 
466 // Verifies the signaling order of the semaphores in multiple
467 // VkSubmitInfo given to vkQueueSubmit() with queueA & queueB from a
468 // different VkDevice.
469 //
470 // vkQueueSubmit(queueA, [write0, write1, write2, ..., write6])
471 // vkQueueSubmit(queueB, [read0-6])
472 //
473 // With read0-6 waiting on write6, all the data should be available
474 // for reading given that signal operations are supposed to happen in
475 // order.
476 class QueueSubmitSignalOrderSharedTestInstance : public TestInstance
477 {
478 public:
QueueSubmitSignalOrderSharedTestInstance(Context & context,SynchronizationType type,const SharedPtr<OperationSupport> writeOpSupport,const SharedPtr<OperationSupport> readOpSupport,const ResourceDescription & resourceDesc,VkExternalMemoryHandleTypeFlagBits memoryHandleType,VkSemaphoreType semaphoreType,VkExternalSemaphoreHandleTypeFlagBits semaphoreHandleType,PipelineCacheData & pipelineCacheData)479 	QueueSubmitSignalOrderSharedTestInstance (Context&									context,
480 											  SynchronizationType						type,
481 											  const SharedPtr<OperationSupport>			writeOpSupport,
482 											  const SharedPtr<OperationSupport>			readOpSupport,
483 											  const ResourceDescription&				resourceDesc,
484 											  VkExternalMemoryHandleTypeFlagBits		memoryHandleType,
485 											  VkSemaphoreType							semaphoreType,
486 											  VkExternalSemaphoreHandleTypeFlagBits		semaphoreHandleType,
487 											  PipelineCacheData&						pipelineCacheData)
488 		: TestInstance			(context)
489 		, m_type				(type)
490 		, m_writeOpSupport		(writeOpSupport)
491 		, m_readOpSupport		(readOpSupport)
492 		, m_resourceDesc		(resourceDesc)
493 		, m_memoryHandleType	(memoryHandleType)
494 		, m_semaphoreType		(semaphoreType)
495 		, m_semaphoreHandleType	(semaphoreHandleType)
496 		, m_pipelineCacheData	(pipelineCacheData)
497 		, m_rng					(1234)
498 
499 	{
500 		const InstanceInterface&					vki					= context.getInstanceInterface();
501 		const VkSemaphoreTypeCreateInfoKHR			semaphoreTypeInfo	=
502 		{
503 			VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR,
504 			DE_NULL,
505 			semaphoreType,
506 			0,
507 		};
508 		const VkPhysicalDeviceExternalSemaphoreInfo	info				=
509 		{
510 			VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
511 			&semaphoreTypeInfo,
512 			semaphoreHandleType
513 		};
514 		VkExternalSemaphoreProperties				properties			=
515 		{
516 			VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
517 			DE_NULL,
518 			0u,
519 			0u,
520 			0u
521 		};
522 
523 		vki.getPhysicalDeviceExternalSemaphoreProperties(context.getPhysicalDevice(), &info, &properties);
524 
525 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
526 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
527 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
528 
529 		if ((properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) == 0
530 			|| (properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR) == 0)
531 			TCU_THROW(NotSupportedError, "Exporting and importing semaphore type not supported");
532 
533 		if (!isResourceExportable())
534 			TCU_THROW(NotSupportedError, "Resource not exportable");
535 
536 	}
537 
createImage(const vk::DeviceInterface & vkd,vk::VkDevice device,const vk::VkExtent3D & extent,deUint32 queueFamilyIndex,vk::VkImageTiling tiling)538 	Move<VkImage> createImage (const vk::DeviceInterface&	vkd,
539 							   vk::VkDevice					device,
540 							   const vk::VkExtent3D&		extent,
541 							   deUint32						queueFamilyIndex,
542 							   vk::VkImageTiling			tiling)
543 	{
544 		const VkExternalMemoryImageCreateInfo externalInfo =
545 		{
546 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
547 			DE_NULL,
548 			(VkExternalMemoryHandleTypeFlags)m_memoryHandleType
549 		};
550 		const VkImageCreateInfo createInfo =
551 		{
552 			VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
553 			&externalInfo,
554 			0u,
555 
556 			m_resourceDesc.imageType,
557 			m_resourceDesc.imageFormat,
558 			extent,
559 			1u,
560 			1u,
561 			m_resourceDesc.imageSamples,
562 			tiling,
563 			m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
564 			VK_SHARING_MODE_EXCLUSIVE,
565 
566 			1u,
567 			&queueFamilyIndex,
568 			VK_IMAGE_LAYOUT_UNDEFINED
569 		};
570 
571 		return vk::createImage(vkd, device, &createInfo);
572 	}
573 
createBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,const vk::VkDeviceSize & size,deUint32 queueFamilyIndex)574 	Move<VkBuffer> createBuffer (const vk::DeviceInterface&		vkd,
575 								 vk::VkDevice					device,
576 								 const vk::VkDeviceSize&		size,
577 								 deUint32						queueFamilyIndex)
578 	{
579 		const VkExternalMemoryBufferCreateInfo	externalInfo =
580 		{
581 			VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
582 			DE_NULL,
583 			(VkExternalMemoryHandleTypeFlags)m_memoryHandleType
584 		};
585 		const VkBufferCreateInfo				createInfo =
586 		{
587 			VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
588 			&externalInfo,
589 			0u,
590 
591 			size,
592 			m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
593 			VK_SHARING_MODE_EXCLUSIVE,
594 			1u,
595 			&queueFamilyIndex
596 		};
597 		return vk::createBuffer(vkd, device, &createInfo);
598 	}
599 
iterate(void)600 	tcu::TestStatus iterate (void)
601 	{
602 		// We're using 2 devices to make sure we have 2 queues even on
603 		// implementations that only have a single queue.
604 		const bool											isTimelineSemaphore			(m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR);
605 		const VkDevice&										deviceA						= m_context.getDevice();
606 		const Unique<VkDevice>&								deviceB						(SingletonDevice::getDevice(m_context));
607 		const DeviceInterface&								vkA							= m_context.getDeviceInterface();
608 		const DeviceDriver									vkB							(m_context.getPlatformInterface(), m_context.getInstance(), *deviceB);
609 		UniquePtr<SimpleAllocator>							allocatorA					(new SimpleAllocator(vkA, deviceA, vk::getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(),
610 																																								 m_context.getPhysicalDevice())));
611 		UniquePtr<SimpleAllocator>							allocatorB					(new SimpleAllocator(vkB, *deviceB, vk::getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(),
612 																																								  m_context.getPhysicalDevice())));
613 		UniquePtr<OperationContext>							operationContextA			(new OperationContext(m_context, m_type, vkA, deviceA, *allocatorA, m_pipelineCacheData));
614 		UniquePtr<OperationContext>							operationContextB			(new OperationContext(m_context, m_type, vkB, *deviceB, *allocatorB, m_pipelineCacheData));
615 		const deUint32										universalQueueFamilyIndex	= m_context.getUniversalQueueFamilyIndex();
616 		const VkQueue										queueA						= m_context.getUniversalQueue();
617 		const VkQueue										queueB						= getDeviceQueue(vkB, *deviceB, m_context.getUniversalQueueFamilyIndex(), 0);
618 		Unique<VkFence>										fenceA						(createFence(vkA, deviceA));
619 		Unique<VkFence>										fenceB						(createFence(vkB, *deviceB));
620 		const Unique<VkCommandPool>							cmdPoolA					(createCommandPool(vkA, deviceA, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, universalQueueFamilyIndex));
621 		const Unique<VkCommandPool>							cmdPoolB					(createCommandPool(vkB, *deviceB, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, universalQueueFamilyIndex));
622 		std::vector<SharedPtr<Move<VkCommandBuffer> > >		ptrCmdBuffersA;
623 		SharedPtr<Move<VkCommandBuffer> >					ptrCmdBufferB;
624 		std::vector<VkCommandBuffer>						cmdBuffersA;
625 		VkCommandBuffer										cmdBufferB;
626 		std::vector<Move<VkSemaphore> >						semaphoresA;
627 		std::vector<Move<VkSemaphore> >						semaphoresB;
628 		std::vector<VkSemaphore>							semaphoreHandlesA;
629 		std::vector<VkSemaphore>							semaphoreHandlesB;
630 		std::vector<deUint64>								timelineValuesA;
631 		std::vector<deUint64>								timelineValuesB;
632 		std::vector<QueueSubmitOrderSharedIteration>		iterations(12);
633 		std::vector<VkPipelineStageFlags2KHR>				stageBits;
634 
635 		// Create a dozen of set of write/read operations.
636 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
637 		{
638 			QueueSubmitOrderSharedIteration&	iter				= iterations[iterIdx];
639 			deUint32							memoryTypeIndex;
640 			NativeHandle						nativeMemoryHandle;
641 
642 			if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
643 			{
644 				const VkExtent3D				extent =
645 				{
646 					(deUint32)m_resourceDesc.size.x(),
647 					de::max(1u, (deUint32)m_resourceDesc.size.y()),
648 					de::max(1u, (deUint32)m_resourceDesc.size.z())
649 				};
650 				const VkImageSubresourceRange	subresourceRange =
651 				{
652 					m_resourceDesc.imageAspect,
653 					0u,
654 					1u,
655 					0u,
656 					1u
657 				};
658 				const VkImageSubresourceLayers	subresourceLayers =
659 				{
660 					m_resourceDesc.imageAspect,
661 					0u,
662 					0u,
663 					1u
664 				};
665 
666 				const vk::VkImageTiling					tiling			= VK_IMAGE_TILING_OPTIMAL;
667 				Move<VkImage>							image			= createImage(vkA, deviceA, extent, universalQueueFamilyIndex, tiling);
668 				const vk::VkMemoryRequirements			requirements	= getMemoryRequirements(vkA, deviceA, *image);
669 														memoryTypeIndex = chooseMemoryType(requirements.memoryTypeBits);
670 				vk::Move<vk::VkDeviceMemory>			memory			= allocateExportableMemory(vkA, deviceA, requirements.size, memoryTypeIndex, m_memoryHandleType, *image);
671 
672 				VK_CHECK(vkA.bindImageMemory(deviceA, *image, *memory, 0u));
673 
674 				MovePtr<Allocation> allocation(new SimpleAllocation(vkA, deviceA, memory.disown()));
675 				iter.resourceA = makeSharedPtr(new Resource(image, allocation, extent, m_resourceDesc.imageType, m_resourceDesc.imageFormat, subresourceRange, subresourceLayers, tiling));
676 			}
677 			else
678 			{
679 				const VkDeviceSize						offset			= 0u;
680 				const VkDeviceSize						size			= static_cast<VkDeviceSize>(m_resourceDesc.size.x());
681 				Move<VkBuffer>							buffer			= createBuffer(vkA, deviceA, size, universalQueueFamilyIndex);
682 				const vk::VkMemoryRequirements			requirements	= getMemoryRequirements(vkA, deviceA, *buffer);
683 														memoryTypeIndex	= chooseMemoryType(requirements.memoryTypeBits);
684 				vk::Move<vk::VkDeviceMemory>			memory			= allocateExportableMemory(vkA, deviceA, requirements.size, memoryTypeIndex, m_memoryHandleType, *buffer);
685 
686 				VK_CHECK(vkA.bindBufferMemory(deviceA, *buffer, *memory, 0u));
687 
688 				MovePtr<Allocation> allocation(new SimpleAllocation(vkA, deviceA, memory.disown()));
689 				iter.resourceA = makeSharedPtr(new Resource(m_resourceDesc.type, buffer, allocation, offset, size));
690 			}
691 
692 			getMemoryNative(vkA, deviceA, iter.resourceA->getMemory(), m_memoryHandleType, nativeMemoryHandle);
693 			iter.resourceB	= makeSharedPtr(importResource(vkB, *deviceB,
694 														   m_resourceDesc,
695 														   universalQueueFamilyIndex,
696 														   *m_readOpSupport,
697 														   *m_writeOpSupport,
698 														   nativeMemoryHandle,
699 														   m_memoryHandleType,
700 														   memoryTypeIndex));
701 
702 			iter.writeOp = makeSharedPtr(m_writeOpSupport->build(*operationContextA,
703 																 *iter.resourceA));
704 			iter.readOp = makeSharedPtr(m_readOpSupport->build(*operationContextB,
705 															   *iter.resourceB));
706 		}
707 
708 		// Record each write operation into its own command buffer.
709 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
710 		{
711 			QueueSubmitOrderSharedIteration&	iter		= iterations[iterIdx];
712 			const Resource&						resource	= *iter.resourceA;
713 			const SyncInfo						writeSync	= iter.writeOp->getOutSyncInfo();
714 			const SyncInfo						readSync	= iter.readOp->getInSyncInfo();
715 
716 			ptrCmdBuffersA.push_back(makeVkSharedPtr(makeCommandBuffer(vkA, deviceA, *cmdPoolA)));
717 
718 			cmdBuffersA.push_back(**(ptrCmdBuffersA.back()));
719 
720 			beginCommandBuffer(vkA, cmdBuffersA.back());
721 
722 			iter.writeOp->recordCommands(cmdBuffersA.back());
723 
724 			{
725 				SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vkA, isTimelineSemaphore);
726 
727 				if (resource.getType() == RESOURCE_TYPE_IMAGE)
728 				{
729 					DE_ASSERT(writeSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
730 					DE_ASSERT(readSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
731 
732 					const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
733 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
734 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
735 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
736 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
737 						writeSync.imageLayout,								// VkImageLayout					oldLayout
738 						readSync.imageLayout,								// VkImageLayout					newLayout
739 						resource.getImage().handle,							// VkImage							image
740 						resource.getImage().subresourceRange				// VkImageSubresourceRange			subresourceRange
741 					);
742 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
743 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
744 				}
745 				else
746 				{
747 					const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
748 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
749 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
750 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
751 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
752 						resource.getBuffer().handle,						// VkBuffer							buffer
753 						0,													// VkDeviceSize						offset
754 						VK_WHOLE_SIZE										// VkDeviceSize						size
755 					);
756 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
757 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
758 				}
759 
760 				stageBits.push_back(writeSync.stageMask);
761 			}
762 
763 			endCommandBuffer(vkA, cmdBuffersA.back());
764 
765 			addSemaphore(vkA, deviceA, semaphoresA, semaphoreHandlesA, timelineValuesA, iterIdx == (iterations.size() - 1), 2u);
766 		}
767 
768 		DE_ASSERT(stageBits.size() == iterations.size());
769 		DE_ASSERT(semaphoreHandlesA.size() == iterations.size());
770 
771 		// Record all read operations into a single command buffer and record the union of their stage masks.
772 		VkPipelineStageFlags2KHR readStages = 0;
773 		ptrCmdBufferB = makeVkSharedPtr(makeCommandBuffer(vkB, *deviceB, *cmdPoolB));
774 		cmdBufferB = **(ptrCmdBufferB);
775 		beginCommandBuffer(vkB, cmdBufferB);
776 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
777 		{
778 			QueueSubmitOrderSharedIteration& iter = iterations[iterIdx];
779 			readStages |= iter.readOp->getInSyncInfo().stageMask;
780 			iter.readOp->recordCommands(cmdBufferB);
781 		}
782 		endCommandBuffer(vkB, cmdBufferB);
783 
784 		// Export the last semaphore for use on deviceB and create another semaphore to signal on deviceB.
785 		{
786 			VkSemaphore		lastSemaphoreA			= semaphoreHandlesA.back();
787 			NativeHandle	nativeSemaphoreHandle;
788 
789 			addSemaphore(vkB, *deviceB, semaphoresB, semaphoreHandlesB, timelineValuesB, true, timelineValuesA.back());
790 
791 			getSemaphoreNative(vkA, deviceA, lastSemaphoreA, m_semaphoreHandleType, nativeSemaphoreHandle);
792 			importSemaphore(vkB, *deviceB, semaphoreHandlesB.back(), m_semaphoreHandleType, nativeSemaphoreHandle, 0u);
793 
794 			addSemaphore(vkB, *deviceB, semaphoresB, semaphoreHandlesB, timelineValuesB, false, timelineValuesA.back());
795 		}
796 
797 		// Submit writes, each in its own VkSubmitInfo. With binary
798 		// semaphores, submission don't wait on anything, with
799 		// timeline semaphores, submissions wait on a host signal
800 		// operation done below.
801 		{
802 			std::vector<VkCommandBufferSubmitInfoKHR>	cmdBuffersInfo				(iterations.size(), makeCommonCommandBufferSubmitInfo(0u));
803 			std::vector<VkSemaphoreSubmitInfoKHR>		waitSemaphoreSubmitInfos	(iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 1u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR));
804 			std::vector<VkSemaphoreSubmitInfoKHR>		signalSemaphoreSubmitInfos	(iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR));
805 			SynchronizationWrapperPtr					synchronizationWrapper		= getSynchronizationWrapper(m_type, vkA, isTimelineSemaphore, static_cast<deUint32>(iterations.size()));
806 
807 			for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
808 			{
809 				waitSemaphoreSubmitInfos[iterIdx].semaphore		= semaphoreHandlesA.front();
810 				waitSemaphoreSubmitInfos[iterIdx].stageMask		= stageBits[iterIdx];
811 				signalSemaphoreSubmitInfos[iterIdx].semaphore	= semaphoreHandlesA[iterIdx];
812 				signalSemaphoreSubmitInfos[iterIdx].value		= timelineValuesA[iterIdx];
813 				cmdBuffersInfo[iterIdx].commandBuffer			= cmdBuffersA[iterIdx];
814 
815 				synchronizationWrapper->addSubmitInfo(
816 					isTimelineSemaphore,
817 					isTimelineSemaphore ? &waitSemaphoreSubmitInfos[iterIdx] : DE_NULL,
818 					1u,
819 					&cmdBuffersInfo[iterIdx],
820 					1u,
821 					&signalSemaphoreSubmitInfos[iterIdx],
822 					isTimelineSemaphore,
823 					isTimelineSemaphore
824 				);
825 			}
826 
827 			VK_CHECK(synchronizationWrapper->queueSubmit(queueA, *fenceA));
828 		}
829 
830 		// Submit reads, only waiting waiting on the last write
831 		// operations, ordering of signaling should guarantee that
832 		// when read operations kick in all writes have completed.
833 		{
834 			VkCommandBufferSubmitInfoKHR	cmdBuffersInfo				= makeCommonCommandBufferSubmitInfo(cmdBufferB);
835 			VkSemaphoreSubmitInfoKHR		waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.front(), timelineValuesA.back(), readStages);
836 			VkSemaphoreSubmitInfoKHR		signalSemaphoreSubmitInfo	= makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.back(), timelineValuesB.back(), VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
837 			SynchronizationWrapperPtr		synchronizationWrapper		= getSynchronizationWrapper(m_type, vkB, isTimelineSemaphore);
838 
839 			synchronizationWrapper->addSubmitInfo(
840 				1u,
841 				&waitSemaphoreSubmitInfo,
842 				1u,
843 				&cmdBuffersInfo,
844 				1u,
845 				&signalSemaphoreSubmitInfo,
846 				isTimelineSemaphore,
847 				isTimelineSemaphore
848 			);
849 
850 			VK_CHECK(synchronizationWrapper->queueSubmit(queueB, *fenceB));
851 
852 			if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
853 			{
854 				const VkSemaphoreWaitInfo		waitInfo	=
855 				{
856 					VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,	// VkStructureType			sType;
857 					DE_NULL,								// const void*				pNext;
858 					0u,										// VkSemaphoreWaitFlagsKHR	flags;
859 					1u,										// deUint32					semaphoreCount;
860 					&semaphoreHandlesB.back(),				// const VkSemaphore*		pSemaphores;
861 					&timelineValuesB.back(),				// const deUint64*			pValues;
862 				};
863 
864 				// Unblock the whole lot.
865 				hostSignal(vkA, deviceA, semaphoreHandlesA.front(), 2);
866 
867 				VK_CHECK(vkB.waitSemaphores(*deviceB, &waitInfo, ~0ull));
868 			}
869 			else
870 			{
871 				VK_CHECK(vkB.waitForFences(*deviceB, 1, &fenceB.get(), VK_TRUE, ~0ull));
872 			}
873 		}
874 
875 		// Verify the result of the operations.
876 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
877 		{
878 			QueueSubmitOrderSharedIteration&	iter		= iterations[iterIdx];
879 			const Data							expected	= iter.writeOp->getData();
880 			const Data							actual		= iter.readOp->getData();
881 
882 			if (isIndirectBuffer(iter.resourceA->getType()))
883 			{
884 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
885 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
886 
887 				if (actualValue < expectedValue)
888 					return tcu::TestStatus::fail("Counter value is smaller than expected");
889 			}
890 			else
891 			{
892 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
893 					return tcu::TestStatus::fail("Memory contents don't match");
894 			}
895 		}
896 
897 		VK_CHECK(vkA.deviceWaitIdle(deviceA));
898 		VK_CHECK(vkB.deviceWaitIdle(*deviceB));
899 
900 		return tcu::TestStatus::pass("Success");
901 	}
902 
903 private:
addSemaphore(const DeviceInterface & vk,VkDevice device,std::vector<Move<VkSemaphore>> & semaphores,std::vector<VkSemaphore> & semaphoreHandles,std::vector<deUint64> & timelineValues,bool exportable,deUint64 firstTimelineValue)904 	void addSemaphore (const DeviceInterface&			vk,
905 					   VkDevice							device,
906 					   std::vector<Move<VkSemaphore> >&	semaphores,
907 					   std::vector<VkSemaphore>&		semaphoreHandles,
908 					   std::vector<deUint64>&			timelineValues,
909 					   bool								exportable,
910 					   deUint64							firstTimelineValue)
911 	{
912 		Move<VkSemaphore>	semaphore;
913 
914 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
915 		{
916 			// Only allocate a single exportable semaphore.
917 			if (semaphores.empty())
918 			{
919 				semaphores.push_back(createExportableSemaphoreType(vk, device, m_semaphoreType, m_semaphoreHandleType));
920 			}
921 		}
922 		else
923 		{
924 			if (exportable)
925 				semaphores.push_back(createExportableSemaphoreType(vk, device, m_semaphoreType, m_semaphoreHandleType));
926 			else
927 				semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
928 		}
929 
930 		semaphoreHandles.push_back(*semaphores.back());
931 		timelineValues.push_back((timelineValues.empty() ? firstTimelineValue : timelineValues.back()) + m_rng.getInt(1, 100));
932 	}
933 
isResourceExportable()934 	bool isResourceExportable ()
935 	{
936 		const InstanceInterface&					vki				= m_context.getInstanceInterface();
937 		VkPhysicalDevice							physicalDevice	= m_context.getPhysicalDevice();
938 
939 		if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
940 		{
941 			const VkPhysicalDeviceExternalImageFormatInfo	externalInfo		=
942 			{
943 				VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO,
944 				DE_NULL,
945 				m_memoryHandleType
946 			};
947 			const VkPhysicalDeviceImageFormatInfo2			imageFormatInfo		=
948 			{
949 				VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
950 				&externalInfo,
951 				m_resourceDesc.imageFormat,
952 				m_resourceDesc.imageType,
953 				VK_IMAGE_TILING_OPTIMAL,
954 				m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
955 				0u
956 			};
957 			VkExternalImageFormatProperties					externalProperties	=
958 			{
959 				VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES,
960 				DE_NULL,
961 				{ 0u, 0u, 0u }
962 			};
963 			VkImageFormatProperties2						formatProperties	=
964 			{
965 				VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2,
966 				&externalProperties,
967 				{
968 					{ 0u, 0u, 0u },
969 					0u,
970 					0u,
971 					0u,
972 					0u,
973 				}
974 			};
975 
976 			{
977 				const VkResult res = vki.getPhysicalDeviceImageFormatProperties2(physicalDevice, &imageFormatInfo, &formatProperties);
978 
979 				if (res == VK_ERROR_FORMAT_NOT_SUPPORTED)
980 					return false;
981 
982 				VK_CHECK(res); // Check other errors
983 			}
984 
985 			if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0)
986 				return false;
987 
988 			if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
989 				return false;
990 
991 			return true;
992 		}
993 		else
994 		{
995 			const VkPhysicalDeviceExternalBufferInfo	info	=
996 			{
997 				VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO,
998 				DE_NULL,
999 
1000 				0u,
1001 				m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
1002 				m_memoryHandleType
1003 			};
1004 			VkExternalBufferProperties					properties			=
1005 			{
1006 				VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES,
1007 				DE_NULL,
1008 				{ 0u, 0u, 0u}
1009 			};
1010 			vki.getPhysicalDeviceExternalBufferProperties(physicalDevice, &info, &properties);
1011 
1012 			if ((properties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0
1013 				|| (properties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
1014 				return false;
1015 
1016 			return true;
1017 		}
1018 	}
1019 
1020 	SynchronizationType							m_type;
1021 	SharedPtr<OperationSupport>					m_writeOpSupport;
1022 	SharedPtr<OperationSupport>					m_readOpSupport;
1023 	const ResourceDescription&					m_resourceDesc;
1024 	VkExternalMemoryHandleTypeFlagBits			m_memoryHandleType;
1025 	VkSemaphoreType								m_semaphoreType;
1026 	VkExternalSemaphoreHandleTypeFlagBits		m_semaphoreHandleType;
1027 	PipelineCacheData&							m_pipelineCacheData;
1028 	de::Random									m_rng;
1029 };
1030 
1031 class QueueSubmitSignalOrderSharedTestCase : public TestCase
1032 {
1033 public:
QueueSubmitSignalOrderSharedTestCase(tcu::TestContext & testCtx,SynchronizationType type,const std::string & name,OperationName writeOp,OperationName readOp,const ResourceDescription & resourceDesc,VkExternalMemoryHandleTypeFlagBits memoryHandleType,VkSemaphoreType semaphoreType,VkExternalSemaphoreHandleTypeFlagBits semaphoreHandleType,PipelineCacheData & pipelineCacheData)1034 	QueueSubmitSignalOrderSharedTestCase (tcu::TestContext&						testCtx,
1035 										  SynchronizationType					type,
1036 										  const std::string&					name,
1037 										  OperationName							writeOp,
1038 										  OperationName							readOp,
1039 										  const ResourceDescription&			resourceDesc,
1040 										  VkExternalMemoryHandleTypeFlagBits	memoryHandleType,
1041 										  VkSemaphoreType						semaphoreType,
1042 										  VkExternalSemaphoreHandleTypeFlagBits	semaphoreHandleType,
1043 										  PipelineCacheData&					pipelineCacheData)
1044 		: TestCase				(testCtx, name.c_str(), "")
1045 		, m_type				(type)
1046 		, m_writeOpSupport		(makeOperationSupport(writeOp, resourceDesc).release())
1047 		, m_readOpSupport		(makeOperationSupport(readOp, resourceDesc).release())
1048 		, m_resourceDesc		(resourceDesc)
1049 		, m_memoryHandleType	(memoryHandleType)
1050 		, m_semaphoreType		(semaphoreType)
1051 		, m_semaphoreHandleType	(semaphoreHandleType)
1052 		, m_pipelineCacheData	(pipelineCacheData)
1053 	{
1054 	}
1055 
checkSupport(Context & context) const1056 	virtual void checkSupport(Context& context) const
1057 	{
1058 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1059 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
1060 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1061 
1062 		if ((m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT ||
1063 			 m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) &&
1064 			 !context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_fd"))
1065 			TCU_THROW(NotSupportedError, "VK_KHR_external_semaphore_fd not supported");
1066 
1067 		if ((m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT ||
1068 			 m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT) &&
1069 			!context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_win32"))
1070 			TCU_THROW(NotSupportedError, "VK_KHR_external_semaphore_win32 not supported");
1071 
1072 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
1073 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
1074 	}
1075 
createInstance(Context & context) const1076 	TestInstance* createInstance (Context& context) const
1077 	{
1078 		return new QueueSubmitSignalOrderSharedTestInstance(context,
1079 															m_type,
1080 															m_writeOpSupport,
1081 															m_readOpSupport,
1082 															m_resourceDesc,
1083 															m_memoryHandleType,
1084 															m_semaphoreType,
1085 															m_semaphoreHandleType,
1086 															m_pipelineCacheData);
1087 	}
1088 
initPrograms(SourceCollections & programCollection) const1089 	void initPrograms (SourceCollections& programCollection) const
1090 	{
1091 		m_writeOpSupport->initPrograms(programCollection);
1092 		m_readOpSupport->initPrograms(programCollection);
1093 	}
1094 
1095 private:
1096 	SynchronizationType						m_type;
1097 	SharedPtr<OperationSupport>				m_writeOpSupport;
1098 	SharedPtr<OperationSupport>				m_readOpSupport;
1099 	const ResourceDescription&				m_resourceDesc;
1100 	VkExternalMemoryHandleTypeFlagBits		m_memoryHandleType;
1101 	VkSemaphoreType							m_semaphoreType;
1102 	VkExternalSemaphoreHandleTypeFlagBits	m_semaphoreHandleType;
1103 	PipelineCacheData&						m_pipelineCacheData;
1104 };
1105 
1106 class QueueSubmitSignalOrderSharedTests : public tcu::TestCaseGroup
1107 {
1108 public:
QueueSubmitSignalOrderSharedTests(tcu::TestContext & testCtx,SynchronizationType type,VkSemaphoreType semaphoreType,const char * name)1109 	QueueSubmitSignalOrderSharedTests (tcu::TestContext& testCtx, SynchronizationType type, VkSemaphoreType semaphoreType, const char *name)
1110 		: tcu::TestCaseGroup	(testCtx, name, "Signal ordering of semaphores")
1111 		, m_type				(type)
1112 		, m_semaphoreType		(semaphoreType)
1113 	{
1114 	}
1115 
init(void)1116 	void init (void)
1117 	{
1118 		static const OperationName	writeOps[]	=
1119 		{
1120 			OPERATION_NAME_WRITE_COPY_BUFFER,
1121 			OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE,
1122 			OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER,
1123 			OPERATION_NAME_WRITE_COPY_IMAGE,
1124 			OPERATION_NAME_WRITE_BLIT_IMAGE,
1125 			OPERATION_NAME_WRITE_SSBO_VERTEX,
1126 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL,
1127 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION,
1128 			OPERATION_NAME_WRITE_SSBO_GEOMETRY,
1129 			OPERATION_NAME_WRITE_SSBO_FRAGMENT,
1130 			OPERATION_NAME_WRITE_SSBO_COMPUTE,
1131 			OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT,
1132 			OPERATION_NAME_WRITE_IMAGE_VERTEX,
1133 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL,
1134 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION,
1135 			OPERATION_NAME_WRITE_IMAGE_GEOMETRY,
1136 			OPERATION_NAME_WRITE_IMAGE_FRAGMENT,
1137 			OPERATION_NAME_WRITE_IMAGE_COMPUTE,
1138 			OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT,
1139 		};
1140 		static const OperationName	readOps[]	=
1141 		{
1142 			OPERATION_NAME_READ_COPY_BUFFER,
1143 			OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE,
1144 			OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER,
1145 			OPERATION_NAME_READ_COPY_IMAGE,
1146 			OPERATION_NAME_READ_BLIT_IMAGE,
1147 			OPERATION_NAME_READ_UBO_VERTEX,
1148 			OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL,
1149 			OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION,
1150 			OPERATION_NAME_READ_UBO_GEOMETRY,
1151 			OPERATION_NAME_READ_UBO_FRAGMENT,
1152 			OPERATION_NAME_READ_UBO_COMPUTE,
1153 			OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT,
1154 			OPERATION_NAME_READ_SSBO_VERTEX,
1155 			OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL,
1156 			OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION,
1157 			OPERATION_NAME_READ_SSBO_GEOMETRY,
1158 			OPERATION_NAME_READ_SSBO_FRAGMENT,
1159 			OPERATION_NAME_READ_SSBO_COMPUTE,
1160 			OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT,
1161 			OPERATION_NAME_READ_IMAGE_VERTEX,
1162 			OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL,
1163 			OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION,
1164 			OPERATION_NAME_READ_IMAGE_GEOMETRY,
1165 			OPERATION_NAME_READ_IMAGE_FRAGMENT,
1166 			OPERATION_NAME_READ_IMAGE_COMPUTE,
1167 			OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT,
1168 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW,
1169 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED,
1170 			OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH,
1171 			OPERATION_NAME_READ_VERTEX_INPUT,
1172 		};
1173 		static const struct
1174 		{
1175 			VkExternalMemoryHandleTypeFlagBits		memoryType;
1176 			VkExternalSemaphoreHandleTypeFlagBits	semaphoreType;
1177 		}	exportCases[] =
1178 		{
1179 			// Only semaphore handle types having reference semantic
1180 			// are valid for this test.
1181 			{
1182 				VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
1183 				VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
1184 			},
1185 			{
1186 				VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
1187 				VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
1188 			},
1189 			{
1190 				VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT,
1191 				VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
1192 			},
1193 		};
1194 
1195 		for (deUint32 writeOpIdx = 0; writeOpIdx < DE_LENGTH_OF_ARRAY(writeOps); writeOpIdx++)
1196 		for (deUint32 readOpIdx = 0; readOpIdx < DE_LENGTH_OF_ARRAY(readOps); readOpIdx++)
1197 		{
1198 			const OperationName	writeOp		= writeOps[writeOpIdx];
1199 			const OperationName	readOp		= readOps[readOpIdx];
1200 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
1201 			bool				empty		= true;
1202 
1203 			de::MovePtr<tcu::TestCaseGroup> opGroup	(new tcu::TestCaseGroup(m_testCtx, opGroupName.c_str(), ""));
1204 
1205 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1206 			{
1207 				const ResourceDescription&	resource	= s_resources[resourceNdx];
1208 
1209 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1210 				{
1211 					for (deUint32 exportIdx = 0; exportIdx < DE_LENGTH_OF_ARRAY(exportCases); exportIdx++)
1212 					{
1213 						std::string					caseName	= getResourceName(resource) + "_" +
1214 							externalSemaphoreTypeToName(exportCases[exportIdx].semaphoreType);
1215 
1216 						opGroup->addChild(new QueueSubmitSignalOrderSharedTestCase(m_testCtx,
1217 																				   m_type,
1218 																				   caseName,
1219 																				   writeOp,
1220 																				   readOp,
1221 																				   resource,
1222 																				   exportCases[exportIdx].memoryType,
1223 																				   m_semaphoreType,
1224 																				   exportCases[exportIdx].semaphoreType,
1225 																				   m_pipelineCacheData));
1226 						empty = false;
1227 					}
1228 				}
1229 			}
1230 			if (!empty)
1231 				addChild(opGroup.release());
1232 		}
1233 	}
1234 
deinit(void)1235 	void deinit (void)
1236 	{
1237 		cleanupGroup();
1238 	}
1239 
1240 private:
1241 	SynchronizationType	m_type;
1242 	VkSemaphoreType		m_semaphoreType;
1243 	// synchronization.op tests share pipeline cache data to speed up test
1244 	// execution.
1245 	PipelineCacheData	m_pipelineCacheData;
1246 };
1247 
1248 struct QueueSubmitOrderIteration
1249 {
QueueSubmitOrderIterationvkt::synchronization::__anon570917b90111::QueueSubmitOrderIteration1250 	QueueSubmitOrderIteration() {}
~QueueSubmitOrderIterationvkt::synchronization::__anon570917b90111::QueueSubmitOrderIteration1251 	~QueueSubmitOrderIteration() {}
1252 
1253 	SharedPtr<Resource>			resource;
1254 
1255 	SharedPtr<Operation>		writeOp;
1256 	SharedPtr<Operation>		readOp;
1257 };
1258 
1259 // Verifies the signaling order of the semaphores in multiple
1260 // VkSubmitInfo given to vkQueueSubmit() with queueA & queueB from the
1261 // same VkDevice.
1262 //
1263 // vkQueueSubmit(queueA, [write0, write1, write2, ..., write6])
1264 // vkQueueSubmit(queueB, [read0-6])
1265 //
1266 // With read0-6 waiting on write6, all the data should be available
1267 // for reading given that signal operations are supposed to happen in
1268 // order.
1269 class QueueSubmitSignalOrderTestInstance : public TestInstance
1270 {
1271 public:
QueueSubmitSignalOrderTestInstance(Context & context,SynchronizationType type,const SharedPtr<OperationSupport> writeOpSupport,const SharedPtr<OperationSupport> readOpSupport,const ResourceDescription & resourceDesc,VkSemaphoreType semaphoreType,PipelineCacheData & pipelineCacheData)1272 	QueueSubmitSignalOrderTestInstance (Context&									context,
1273 										SynchronizationType							type,
1274 										const SharedPtr<OperationSupport>			writeOpSupport,
1275 										const SharedPtr<OperationSupport>			readOpSupport,
1276 										const ResourceDescription&					resourceDesc,
1277 										VkSemaphoreType								semaphoreType,
1278 										PipelineCacheData&							pipelineCacheData)
1279 		: TestInstance			(context)
1280 		, m_type				(type)
1281 		, m_writeOpSupport		(writeOpSupport)
1282 		, m_readOpSupport		(readOpSupport)
1283 		, m_resourceDesc		(resourceDesc)
1284 		, m_semaphoreType		(semaphoreType)
1285 		, m_device				(SingletonDevice::getDevice(context))
1286 		, m_deviceInterface		(context.getPlatformInterface(), context.getInstance(), *m_device)
1287 		, m_allocator			(new SimpleAllocator(m_deviceInterface,
1288 													 *m_device,
1289 													 getPhysicalDeviceMemoryProperties(context.getInstanceInterface(),
1290 																					   context.getPhysicalDevice())))
1291 		, m_operationContext	(new OperationContext(context, type, m_deviceInterface, *m_device, *m_allocator, pipelineCacheData))
1292 		, m_queueA				(DE_NULL)
1293 		, m_queueB				(DE_NULL)
1294 		, m_rng					(1234)
1295 
1296 	{
1297 		const std::vector<VkQueueFamilyProperties> queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(context.getInstanceInterface(),
1298 																													 context.getPhysicalDevice());
1299 
1300 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1301 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
1302 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1303 
1304 		VkQueueFlags writeOpQueueFlags = m_writeOpSupport->getQueueFlags(*m_operationContext);
1305 		for (deUint32 familyIdx = 0; familyIdx < queueFamilyProperties.size(); familyIdx++) {
1306 			if (((queueFamilyProperties[familyIdx].queueFlags & writeOpQueueFlags) == writeOpQueueFlags) ||
1307 			((writeOpQueueFlags == VK_QUEUE_TRANSFER_BIT) &&
1308 			(((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_GRAPHICS_BIT) == VK_QUEUE_GRAPHICS_BIT) ||
1309 			((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_COMPUTE_BIT) == VK_QUEUE_COMPUTE_BIT)))) {
1310 				m_queueA = getDeviceQueue(m_deviceInterface, *m_device, familyIdx, 0);
1311 				m_queueFamilyIndexA = familyIdx;
1312 				break;
1313 			}
1314 		}
1315 		if (m_queueA == DE_NULL)
1316 			TCU_THROW(NotSupportedError, "No queue supporting write operation");
1317 
1318 		VkQueueFlags readOpQueueFlags = m_readOpSupport->getQueueFlags(*m_operationContext);
1319 		for (deUint32 familyIdx = 0; familyIdx < queueFamilyProperties.size(); familyIdx++) {
1320 			if (((queueFamilyProperties[familyIdx].queueFlags & readOpQueueFlags) == readOpQueueFlags) ||
1321 			((readOpQueueFlags == VK_QUEUE_TRANSFER_BIT) &&
1322 			(((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_GRAPHICS_BIT) == VK_QUEUE_GRAPHICS_BIT) ||
1323 			((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_COMPUTE_BIT) == VK_QUEUE_COMPUTE_BIT)))) {
1324 				for (deUint32 queueIdx = 0; queueIdx < queueFamilyProperties[familyIdx].queueCount; queueIdx++) {
1325 					VkQueue queue = getDeviceQueue(m_deviceInterface, *m_device, familyIdx, queueIdx);
1326 
1327 					if (queue == m_queueA)
1328 						continue;
1329 
1330 					m_queueB = queue;
1331 					m_queueFamilyIndexB = familyIdx;
1332 					break;
1333 				}
1334 
1335 				if (m_queueB != DE_NULL)
1336 					break;
1337 			}
1338 		}
1339 		if (m_queueB == DE_NULL)
1340 			TCU_THROW(NotSupportedError, "No queue supporting read operation");
1341 	}
1342 
iterate(void)1343 	tcu::TestStatus iterate (void)
1344 	{
1345 		const bool											isTimelineSemaphore			= (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR);
1346 		const VkDevice&										device						= *m_device;
1347 		const DeviceInterface&								vk							= m_deviceInterface;
1348 		Unique<VkFence>										fence						(createFence(vk, device));
1349 		const Unique<VkCommandPool>							cmdPoolA					(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, m_queueFamilyIndexA));
1350 		const Unique<VkCommandPool>							cmdPoolB					(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, m_queueFamilyIndexB));
1351 		std::vector<SharedPtr<Move<VkCommandBuffer> > >		ptrCmdBuffersA;
1352 		SharedPtr<Move<VkCommandBuffer> >					ptrCmdBufferB;
1353 		std::vector<VkCommandBuffer>						cmdBuffersA;
1354 		VkCommandBuffer										cmdBufferB;
1355 		std::vector<Move<VkSemaphore> >						semaphoresA;
1356 		std::vector<Move<VkSemaphore> >						semaphoresB;
1357 		std::vector<VkSemaphore>							semaphoreHandlesA;
1358 		std::vector<VkSemaphore>							semaphoreHandlesB;
1359 		std::vector<deUint64>								timelineValuesA;
1360 		std::vector<deUint64>								timelineValuesB;
1361 		std::vector<QueueSubmitOrderIteration>				iterations;
1362 		std::vector<VkPipelineStageFlags2KHR>				stageBits;
1363 		std::vector<deUint32>								queueFamilies;
1364 		SynchronizationWrapperPtr							syncWrapper					= getSynchronizationWrapper(m_type, vk, isTimelineSemaphore);
1365 
1366 		queueFamilies.push_back(m_queueFamilyIndexA);
1367 		queueFamilies.push_back(m_queueFamilyIndexB);
1368 
1369 		// Create a dozen of set of write/read operations.
1370 		iterations.resize(12);
1371 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1372 		{
1373 			QueueSubmitOrderIteration&		iter				= iterations[iterIdx];
1374 
1375 			iter.resource	= makeSharedPtr(new Resource(*m_operationContext,
1376 														 m_resourceDesc,
1377 														 m_writeOpSupport->getOutResourceUsageFlags() |
1378 														 m_readOpSupport->getInResourceUsageFlags(),
1379 														 VK_SHARING_MODE_EXCLUSIVE,
1380 														 queueFamilies));
1381 
1382 			iter.writeOp = makeSharedPtr(m_writeOpSupport->build(*m_operationContext,
1383 																 *iter.resource));
1384 			iter.readOp = makeSharedPtr(m_readOpSupport->build(*m_operationContext,
1385 															   *iter.resource));
1386 		}
1387 
1388 		// Record each write operation into its own command buffer.
1389 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1390 		{
1391 			QueueSubmitOrderIteration&	iter	= iterations[iterIdx];
1392 
1393 			ptrCmdBuffersA.push_back(makeVkSharedPtr(makeCommandBuffer(vk, device, *cmdPoolA)));
1394 			cmdBuffersA.push_back(**(ptrCmdBuffersA.back()));
1395 
1396 			beginCommandBuffer(vk, cmdBuffersA.back());
1397 			iter.writeOp->recordCommands(cmdBuffersA.back());
1398 
1399 			{
1400 				SynchronizationWrapperPtr	synchronizationWrapper	= getSynchronizationWrapper(m_type, vk, DE_FALSE);
1401 				const SyncInfo				writeSync				= iter.writeOp->getOutSyncInfo();
1402 				const SyncInfo				readSync				= iter.readOp->getInSyncInfo();
1403 				const Resource&				resource				= *iter.resource;
1404 
1405 				if (resource.getType() == RESOURCE_TYPE_IMAGE)
1406 				{
1407 					DE_ASSERT(writeSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
1408 					DE_ASSERT(readSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
1409 
1410 					const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
1411 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
1412 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
1413 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
1414 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
1415 						writeSync.imageLayout,								// VkImageLayout					oldLayout
1416 						readSync.imageLayout,								// VkImageLayout					newLayout
1417 						resource.getImage().handle,							// VkImage							image
1418 						resource.getImage().subresourceRange				// VkImageSubresourceRange			subresourceRange
1419 					);
1420 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
1421 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
1422 				}
1423 				else
1424 				{
1425 					const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
1426 						writeSync.stageMask,								// VkPipelineStageFlags2KHR			srcStageMask
1427 						writeSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
1428 						readSync.stageMask,									// VkPipelineStageFlags2KHR			dstStageMask
1429 						readSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
1430 						resource.getBuffer().handle,						// VkBuffer							buffer
1431 						0,													// VkDeviceSize						offset
1432 						VK_WHOLE_SIZE										// VkDeviceSize						size
1433 					);
1434 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
1435 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
1436 				}
1437 
1438 				stageBits.push_back(writeSync.stageMask);
1439 			}
1440 
1441 			endCommandBuffer(vk, cmdBuffersA.back());
1442 
1443 			addSemaphore(vk, device, semaphoresA, semaphoreHandlesA, timelineValuesA, 2u);
1444 		}
1445 
1446 		DE_ASSERT(stageBits.size() == iterations.size());
1447 		DE_ASSERT(semaphoreHandlesA.size() == iterations.size());
1448 
1449 		// Record all read operations into a single command buffer and track the union of their execution stages.
1450 		ptrCmdBufferB = makeVkSharedPtr(makeCommandBuffer(vk, device, *cmdPoolB));
1451 		cmdBufferB = **(ptrCmdBufferB);
1452 		beginCommandBuffer(vk, cmdBufferB);
1453 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1454 		{
1455 			QueueSubmitOrderIteration& iter = iterations[iterIdx];
1456 			iter.readOp->recordCommands(cmdBufferB);
1457 		}
1458 		endCommandBuffer(vk, cmdBufferB);
1459 
1460 		addSemaphore(vk, device, semaphoresB, semaphoreHandlesB, timelineValuesB, timelineValuesA.back());
1461 
1462 		// Submit writes, each in its own VkSubmitInfo. With binary
1463 		// semaphores, submission don't wait on anything, with
1464 		// timeline semaphores, submissions wait on a host signal
1465 		// operation done below.
1466 		{
1467 			VkSemaphoreSubmitInfoKHR					waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(semaphoreHandlesA.front(), 1u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
1468 			std::vector<VkSemaphoreSubmitInfoKHR>		signalSemaphoreSubmitInfo	(iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR));
1469 			std::vector<VkCommandBufferSubmitInfoKHR>	commandBufferSubmitInfos	(iterations.size(), makeCommonCommandBufferSubmitInfo(0));
1470 			SynchronizationWrapperPtr					synchronizationWrapper		= getSynchronizationWrapper(m_type, vk, isTimelineSemaphore, (deUint32)iterations.size());
1471 
1472 			for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1473 			{
1474 				commandBufferSubmitInfos[iterIdx].commandBuffer		= cmdBuffersA[iterIdx];
1475 				signalSemaphoreSubmitInfo[iterIdx].semaphore		= semaphoreHandlesA[iterIdx];
1476 				signalSemaphoreSubmitInfo[iterIdx].value			= timelineValuesA[iterIdx];
1477 
1478 				synchronizationWrapper->addSubmitInfo(
1479 					isTimelineSemaphore,
1480 					isTimelineSemaphore ? &waitSemaphoreSubmitInfo : DE_NULL,
1481 					1u,
1482 					&commandBufferSubmitInfos[iterIdx],
1483 					1u,
1484 					&signalSemaphoreSubmitInfo[iterIdx],
1485 					isTimelineSemaphore,
1486 					isTimelineSemaphore
1487 				);
1488 			}
1489 
1490 			VK_CHECK(synchronizationWrapper->queueSubmit(m_queueA, DE_NULL));
1491 		}
1492 
1493 		// Submit reads, only waiting waiting on the last write
1494 		// operations, ordering of signaling should guarantee that
1495 		// when read operations kick in all writes have completed.
1496 		{
1497 			VkCommandBufferSubmitInfoKHR	commandBufferSubmitInfos	= makeCommonCommandBufferSubmitInfo(cmdBufferB);
1498 			VkSemaphoreSubmitInfoKHR		waitSemaphoreSubmitInfo		= makeCommonSemaphoreSubmitInfo(semaphoreHandlesA.back(), timelineValuesA.back(), VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
1499 			VkSemaphoreSubmitInfoKHR		signalSemaphoreSubmitInfo	= makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.back(), timelineValuesB.back(), VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
1500 			SynchronizationWrapperPtr		synchronizationWrapper		= getSynchronizationWrapper(m_type, vk, isTimelineSemaphore);
1501 
1502 			synchronizationWrapper->addSubmitInfo(
1503 				1u,										// deUint32								waitSemaphoreInfoCount
1504 				&waitSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pWaitSemaphoreInfos
1505 				1u,										// deUint32								commandBufferInfoCount
1506 				&commandBufferSubmitInfos,				// const VkCommandBufferSubmitInfoKHR*	pCommandBufferInfos
1507 				1u,										// deUint32								signalSemaphoreInfoCount
1508 				&signalSemaphoreSubmitInfo,				// const VkSemaphoreSubmitInfoKHR*		pSignalSemaphoreInfos
1509 				isTimelineSemaphore,
1510 				isTimelineSemaphore
1511 			);
1512 
1513 			VK_CHECK(synchronizationWrapper->queueSubmit(m_queueB, *fence));
1514 
1515 			if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1516 			{
1517 				const VkSemaphoreWaitInfo		waitInfo	=
1518 				{
1519 					VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,	// VkStructureType			sType;
1520 					DE_NULL,								// const void*				pNext;
1521 					0u,										// VkSemaphoreWaitFlagsKHR	flags;
1522 					1u,										// deUint32					semaphoreCount;
1523 					&semaphoreHandlesB.back(),				// const VkSemaphore*		pSemaphores;
1524 					&timelineValuesB.back(),				// const deUint64*			pValues;
1525 				};
1526 
1527 				// Unblock the whole lot.
1528 				hostSignal(vk, device, semaphoreHandlesA.front(), 1);
1529 
1530 				VK_CHECK(vk.waitSemaphores(device, &waitInfo, ~0ull));
1531 			}
1532 			else
1533 			{
1534 				VK_CHECK(vk.waitForFences(device, 1, &fence.get(), VK_TRUE, ~0ull));
1535 			}
1536 		}
1537 
1538 		// Verify the result of the operations.
1539 		for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1540 		{
1541 			QueueSubmitOrderIteration&		iter		= iterations[iterIdx];
1542 			const Data						expected	= iter.writeOp->getData();
1543 			const Data						actual		= iter.readOp->getData();
1544 
1545 			if (isIndirectBuffer(iter.resource->getType()))
1546 			{
1547 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
1548 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
1549 
1550 				if (actualValue < expectedValue)
1551 					return tcu::TestStatus::fail("Counter value is smaller than expected");
1552 			}
1553 			else
1554 			{
1555 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
1556 					return tcu::TestStatus::fail("Memory contents don't match");
1557 			}
1558 		}
1559 
1560 		VK_CHECK(vk.deviceWaitIdle(device));
1561 
1562 		return tcu::TestStatus::pass("Success");
1563 	}
1564 
1565 private:
addSemaphore(const DeviceInterface & vk,VkDevice device,std::vector<Move<VkSemaphore>> & semaphores,std::vector<VkSemaphore> & semaphoreHandles,std::vector<deUint64> & timelineValues,deUint64 firstTimelineValue)1566 	void addSemaphore (const DeviceInterface&			vk,
1567 					   VkDevice							device,
1568 					   std::vector<Move<VkSemaphore> >&	semaphores,
1569 					   std::vector<VkSemaphore>&		semaphoreHandles,
1570 					   std::vector<deUint64>&			timelineValues,
1571 					   deUint64							firstTimelineValue)
1572 	{
1573 		Move<VkSemaphore>	semaphore;
1574 
1575 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1576 		{
1577 			// Only allocate a single exportable semaphore.
1578 			if (semaphores.empty())
1579 			{
1580 				semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
1581 			}
1582 		}
1583 		else
1584 		{
1585 			semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
1586 		}
1587 
1588 		semaphoreHandles.push_back(*semaphores.back());
1589 		timelineValues.push_back((timelineValues.empty() ? firstTimelineValue : timelineValues.back()) + m_rng.getInt(1, 100));
1590 	}
1591 
1592 	SynchronizationType							m_type;
1593 	SharedPtr<OperationSupport>					m_writeOpSupport;
1594 	SharedPtr<OperationSupport>					m_readOpSupport;
1595 	const ResourceDescription&					m_resourceDesc;
1596 	VkSemaphoreType								m_semaphoreType;
1597 	const Unique<VkDevice>&						m_device;
1598 	const DeviceDriver							m_deviceInterface;
1599 	UniquePtr<SimpleAllocator>					m_allocator;
1600 	UniquePtr<OperationContext>					m_operationContext;
1601 	VkQueue										m_queueA;
1602 	VkQueue										m_queueB;
1603 	deUint32									m_queueFamilyIndexA;
1604 	deUint32									m_queueFamilyIndexB;
1605 	de::Random									m_rng;
1606 };
1607 
1608 class QueueSubmitSignalOrderTestCase : public TestCase
1609 {
1610 public:
QueueSubmitSignalOrderTestCase(tcu::TestContext & testCtx,SynchronizationType type,const std::string & name,OperationName writeOp,OperationName readOp,const ResourceDescription & resourceDesc,VkSemaphoreType semaphoreType,PipelineCacheData & pipelineCacheData)1611 	QueueSubmitSignalOrderTestCase (tcu::TestContext&			testCtx,
1612 									SynchronizationType			type,
1613 									const std::string&			name,
1614 									OperationName				writeOp,
1615 									OperationName				readOp,
1616 									const ResourceDescription&	resourceDesc,
1617 									VkSemaphoreType				semaphoreType,
1618 									PipelineCacheData&			pipelineCacheData)
1619 		: TestCase				(testCtx, name.c_str(), "")
1620 		, m_type				(type)
1621 		, m_writeOpSupport		(makeOperationSupport(writeOp, resourceDesc).release())
1622 		, m_readOpSupport		(makeOperationSupport(readOp, resourceDesc).release())
1623 		, m_resourceDesc		(resourceDesc)
1624 		, m_semaphoreType		(semaphoreType)
1625 		, m_pipelineCacheData	(pipelineCacheData)
1626 	{
1627 	}
1628 
checkSupport(Context & context) const1629 	virtual void checkSupport(Context& context) const
1630 	{
1631 		if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1632 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
1633 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1634 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
1635 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
1636 	}
1637 
createInstance(Context & context) const1638 	TestInstance* createInstance (Context& context) const
1639 	{
1640 		return new QueueSubmitSignalOrderTestInstance(context,
1641 													  m_type,
1642 													  m_writeOpSupport,
1643 													  m_readOpSupport,
1644 													  m_resourceDesc,
1645 													  m_semaphoreType,
1646 													  m_pipelineCacheData);
1647 	}
1648 
initPrograms(SourceCollections & programCollection) const1649 	void initPrograms (SourceCollections& programCollection) const
1650 	{
1651 		m_writeOpSupport->initPrograms(programCollection);
1652 		m_readOpSupport->initPrograms(programCollection);
1653 	}
1654 
1655 private:
1656 	SynchronizationType						m_type;
1657 	SharedPtr<OperationSupport>				m_writeOpSupport;
1658 	SharedPtr<OperationSupport>				m_readOpSupport;
1659 	const ResourceDescription&				m_resourceDesc;
1660 	VkSemaphoreType							m_semaphoreType;
1661 	PipelineCacheData&						m_pipelineCacheData;
1662 };
1663 
1664 class QueueSubmitSignalOrderTests : public tcu::TestCaseGroup
1665 {
1666 public:
QueueSubmitSignalOrderTests(tcu::TestContext & testCtx,SynchronizationType type,VkSemaphoreType semaphoreType,const char * name)1667 	QueueSubmitSignalOrderTests (tcu::TestContext& testCtx, SynchronizationType type, VkSemaphoreType semaphoreType, const char *name)
1668 		: tcu::TestCaseGroup	(testCtx, name, "Signal ordering of semaphores")
1669 		, m_type				(type)
1670 		, m_semaphoreType		(semaphoreType)
1671 	{
1672 	}
1673 
init(void)1674 	void init (void)
1675 	{
1676 		static const OperationName	writeOps[]	=
1677 		{
1678 			OPERATION_NAME_WRITE_COPY_BUFFER,
1679 			OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE,
1680 			OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER,
1681 			OPERATION_NAME_WRITE_COPY_IMAGE,
1682 			OPERATION_NAME_WRITE_BLIT_IMAGE,
1683 			OPERATION_NAME_WRITE_SSBO_VERTEX,
1684 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL,
1685 			OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION,
1686 			OPERATION_NAME_WRITE_SSBO_GEOMETRY,
1687 			OPERATION_NAME_WRITE_SSBO_FRAGMENT,
1688 			OPERATION_NAME_WRITE_SSBO_COMPUTE,
1689 			OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT,
1690 			OPERATION_NAME_WRITE_IMAGE_VERTEX,
1691 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL,
1692 			OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION,
1693 			OPERATION_NAME_WRITE_IMAGE_GEOMETRY,
1694 			OPERATION_NAME_WRITE_IMAGE_FRAGMENT,
1695 			OPERATION_NAME_WRITE_IMAGE_COMPUTE,
1696 			OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT,
1697 		};
1698 		static const OperationName	readOps[]	=
1699 		{
1700 			OPERATION_NAME_READ_COPY_BUFFER,
1701 			OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE,
1702 			OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER,
1703 			OPERATION_NAME_READ_COPY_IMAGE,
1704 			OPERATION_NAME_READ_BLIT_IMAGE,
1705 			OPERATION_NAME_READ_UBO_VERTEX,
1706 			OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL,
1707 			OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION,
1708 			OPERATION_NAME_READ_UBO_GEOMETRY,
1709 			OPERATION_NAME_READ_UBO_FRAGMENT,
1710 			OPERATION_NAME_READ_UBO_COMPUTE,
1711 			OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT,
1712 			OPERATION_NAME_READ_SSBO_VERTEX,
1713 			OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL,
1714 			OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION,
1715 			OPERATION_NAME_READ_SSBO_GEOMETRY,
1716 			OPERATION_NAME_READ_SSBO_FRAGMENT,
1717 			OPERATION_NAME_READ_SSBO_COMPUTE,
1718 			OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT,
1719 			OPERATION_NAME_READ_IMAGE_VERTEX,
1720 			OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL,
1721 			OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION,
1722 			OPERATION_NAME_READ_IMAGE_GEOMETRY,
1723 			OPERATION_NAME_READ_IMAGE_FRAGMENT,
1724 			OPERATION_NAME_READ_IMAGE_COMPUTE,
1725 			OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT,
1726 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW,
1727 			OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED,
1728 			OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH,
1729 			OPERATION_NAME_READ_VERTEX_INPUT,
1730 		};
1731 
1732 		for (deUint32 writeOpIdx = 0; writeOpIdx < DE_LENGTH_OF_ARRAY(writeOps); writeOpIdx++)
1733 		for (deUint32 readOpIdx = 0; readOpIdx < DE_LENGTH_OF_ARRAY(readOps); readOpIdx++)
1734 		{
1735 			const OperationName	writeOp		= writeOps[writeOpIdx];
1736 			const OperationName	readOp		= readOps[readOpIdx];
1737 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
1738 			bool				empty		= true;
1739 
1740 			de::MovePtr<tcu::TestCaseGroup> opGroup	(new tcu::TestCaseGroup(m_testCtx, opGroupName.c_str(), ""));
1741 
1742 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1743 			{
1744 				const ResourceDescription&	resource	= s_resources[resourceNdx];
1745 
1746 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1747 				{
1748 					opGroup->addChild(new QueueSubmitSignalOrderTestCase(m_testCtx,
1749 																		 m_type,
1750 																		 getResourceName(resource),
1751 																		 writeOp,
1752 																		 readOp,
1753 																		 resource,
1754 																		 m_semaphoreType,
1755 																		 m_pipelineCacheData));
1756 					empty = false;
1757 				}
1758 			}
1759 			if (!empty)
1760 				addChild(opGroup.release());
1761 		}
1762 	}
1763 
deinit(void)1764 	void deinit (void)
1765 	{
1766 		cleanupGroup();
1767 	}
1768 
1769 private:
1770 	SynchronizationType	m_type;
1771 	VkSemaphoreType		m_semaphoreType;
1772 	// synchronization.op tests share pipeline cache data to speed up test
1773 	// execution.
1774 	PipelineCacheData	m_pipelineCacheData;
1775 };
1776 
1777 } // anonymous
1778 
createSignalOrderTests(tcu::TestContext & testCtx,SynchronizationType type)1779 tcu::TestCaseGroup* createSignalOrderTests (tcu::TestContext& testCtx, SynchronizationType type)
1780 {
1781 	de::MovePtr<tcu::TestCaseGroup> orderingTests(new tcu::TestCaseGroup(testCtx, "signal_order", "Signal ordering tests"));
1782 
1783 	orderingTests->addChild(new QueueSubmitSignalOrderTests(testCtx, type, VK_SEMAPHORE_TYPE_BINARY_KHR, "binary_semaphore"));
1784 	orderingTests->addChild(new QueueSubmitSignalOrderTests(testCtx, type, VK_SEMAPHORE_TYPE_TIMELINE_KHR, "timeline_semaphore"));
1785 	orderingTests->addChild(new QueueSubmitSignalOrderSharedTests(testCtx, type, VK_SEMAPHORE_TYPE_BINARY_KHR, "shared_binary_semaphore"));
1786 	orderingTests->addChild(new QueueSubmitSignalOrderSharedTests(testCtx, type, VK_SEMAPHORE_TYPE_TIMELINE_KHR, "shared_timeline_semaphore"));
1787 
1788 	return orderingTests.release();
1789 }
1790 
1791 } // synchronization
1792 } // vkt
1793