• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2016 The Khronos Group Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Synchronization primitive tests with multi queue
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktSynchronizationOperationMultiQueueTests.hpp"
25 #include "vktCustomInstancesDevices.hpp"
26 #include "vkDefs.hpp"
27 #include "vktTestCase.hpp"
28 #include "vktTestCaseUtil.hpp"
29 #include "vkRef.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkMemUtil.hpp"
32 #include "vkBarrierUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkDeviceUtil.hpp"
35 #include "vkTypeUtil.hpp"
36 #include "vkPlatform.hpp"
37 #include "vkCmdUtil.hpp"
38 #include "vkSafetyCriticalUtil.hpp"
39 #include "deRandom.hpp"
40 #include "deUniquePtr.hpp"
41 #include "deSharedPtr.hpp"
42 #include "tcuTestLog.hpp"
43 #include "vktSynchronizationUtil.hpp"
44 #include "vktSynchronizationOperation.hpp"
45 #include "vktSynchronizationOperationTestData.hpp"
46 #include "vktSynchronizationOperationResources.hpp"
47 #include "vktTestGroupUtil.hpp"
48 #include "tcuCommandLine.hpp"
49 
50 #include <set>
51 
52 namespace vkt
53 {
54 
55 namespace synchronization
56 {
57 
58 namespace
59 {
60 using namespace vk;
61 using de::MovePtr;
62 using de::SharedPtr;
63 using de::UniquePtr;
64 using de::SharedPtr;
65 
66 enum QueueType
67 {
68 	QUEUETYPE_WRITE,
69 	QUEUETYPE_READ
70 };
71 
72 struct QueuePair
73 {
QueuePairvkt::synchronization::__anon1b17ad200111::QueuePair74 	QueuePair	(const deUint32 familyWrite, const deUint32 familyRead, const VkQueue write, const VkQueue read)
75 		: familyIndexWrite	(familyWrite)
76 		, familyIndexRead	(familyRead)
77 		, queueWrite		(write)
78 		, queueRead			(read)
79 	{}
80 
81 	deUint32	familyIndexWrite;
82 	deUint32	familyIndexRead;
83 	VkQueue		queueWrite;
84 	VkQueue		queueRead;
85 };
86 
87 struct Queue
88 {
Queuevkt::synchronization::__anon1b17ad200111::Queue89 	Queue	(const deUint32 familyOp, const VkQueue queueOp)
90 		:	family	(familyOp)
91 		,	queue	(queueOp)
92 	{}
93 
94 	deUint32	family;
95 	VkQueue		queue;
96 };
97 
checkQueueFlags(VkQueueFlags availableFlags,const VkQueueFlags neededFlags)98 bool checkQueueFlags (VkQueueFlags availableFlags, const VkQueueFlags neededFlags)
99 {
100 	if ((availableFlags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) != 0)
101 		availableFlags |= VK_QUEUE_TRANSFER_BIT;
102 
103 	return (availableFlags & neededFlags) != 0;
104 }
105 
106 class MultiQueues
107 {
108 	struct QueueData
109 	{
110 		VkQueueFlags			flags;
111 		std::vector<VkQueue>	queue;
112 	};
113 
MultiQueues(Context & context,SynchronizationType type,bool timelineSemaphore)114 	MultiQueues	(Context& context, SynchronizationType type, bool timelineSemaphore)
115 #ifdef CTS_USES_VULKANSC
116 		: m_instance	(createCustomInstanceFromContext(context)),
117 #else
118 		:
119 #endif // CTS_USES_VULKANSC
120 		m_queueCount	(0)
121 	{
122 #ifdef CTS_USES_VULKANSC
123 		const InstanceInterface&					instanceDriver			= m_instance.getDriver();
124 		const VkPhysicalDevice						physicalDevice			= chooseDevice(instanceDriver, m_instance, context.getTestContext().getCommandLine());
125 		const VkInstance							instance				= m_instance;
126 #else
127 		const InstanceInterface&					instanceDriver			= context.getInstanceInterface();
128 		const VkPhysicalDevice						physicalDevice			= context.getPhysicalDevice();
129 		const VkInstance							instance				= context.getInstance();
130 #endif // CTS_USES_VULKANSC
131 		const std::vector<VkQueueFamilyProperties>	queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(instanceDriver, physicalDevice);
132 
133 		for (deUint32 queuePropertiesNdx = 0; queuePropertiesNdx < queueFamilyProperties.size(); ++queuePropertiesNdx)
134 		{
135 			addQueueIndex(queuePropertiesNdx,
136 						  std::min(2u, queueFamilyProperties[queuePropertiesNdx].queueCount),
137 						  queueFamilyProperties[queuePropertiesNdx].queueFlags);
138 		}
139 
140 		std::vector<VkDeviceQueueCreateInfo>	queueInfos;
141 		const float								queuePriorities[2] = { 1.0f, 1.0f };	//get max 2 queues from one family
142 
143 		for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it!= m_queues.end(); ++it)
144 		{
145 			const VkDeviceQueueCreateInfo queueInfo	=
146 			{
147 				VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,		//VkStructureType			sType;
148 				DE_NULL,										//const void*				pNext;
149 				(VkDeviceQueueCreateFlags)0u,					//VkDeviceQueueCreateFlags	flags;
150 				it->first,										//deUint32					queueFamilyIndex;
151 				static_cast<deUint32>(it->second.queue.size()),	//deUint32					queueCount;
152 				&queuePriorities[0]								//const float*				pQueuePriorities;
153 			};
154 			queueInfos.push_back(queueInfo);
155 		}
156 
157 		{
158 			VkPhysicalDeviceFeatures2					createPhysicalFeature		{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, DE_NULL, context.getDeviceFeatures() };
159 			VkPhysicalDeviceTimelineSemaphoreFeatures	timelineSemaphoreFeatures	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, DE_NULL, DE_TRUE };
160 			VkPhysicalDeviceSynchronization2FeaturesKHR	synchronization2Features	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR, DE_NULL, DE_TRUE };
161 			void**										nextPtr						= &createPhysicalFeature.pNext;
162 
163 			std::vector<const char*> deviceExtensions;
164 			if (timelineSemaphore)
165 			{
166 				if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_timeline_semaphore"))
167 					deviceExtensions.push_back("VK_KHR_timeline_semaphore");
168 				addToChainVulkanStructure(&nextPtr, timelineSemaphoreFeatures);
169 			}
170 			if (type == SynchronizationType::SYNCHRONIZATION2)
171 			{
172 				deviceExtensions.push_back("VK_KHR_synchronization2");
173 				addToChainVulkanStructure(&nextPtr, synchronization2Features);
174 			}
175 
176 			void* pNext												= &createPhysicalFeature;
177 #ifdef CTS_USES_VULKANSC
178 			VkDeviceObjectReservationCreateInfo memReservationInfo	= context.getTestContext().getCommandLine().isSubProcess() ? context.getResourceInterface()->getStatMax() : resetDeviceObjectReservationCreateInfo();
179 			memReservationInfo.pNext								= pNext;
180 			pNext													= &memReservationInfo;
181 
182 			VkPhysicalDeviceVulkanSC10Features sc10Features			= createDefaultSC10Features();
183 			sc10Features.pNext										= pNext;
184 			pNext													= &sc10Features;
185 
186 			VkPipelineCacheCreateInfo			pcCI;
187 			std::vector<VkPipelinePoolSize>		poolSizes;
188 			if (context.getTestContext().getCommandLine().isSubProcess())
189 			{
190 				if (context.getResourceInterface()->getCacheDataSize() > 0)
191 				{
192 					pcCI =
193 					{
194 						VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,		// VkStructureType				sType;
195 						DE_NULL,											// const void*					pNext;
196 						VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
197 							VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT,	// VkPipelineCacheCreateFlags	flags;
198 						context.getResourceInterface()->getCacheDataSize(),	// deUintptr					initialDataSize;
199 						context.getResourceInterface()->getCacheData()		// const void*					pInitialData;
200 					};
201 					memReservationInfo.pipelineCacheCreateInfoCount		= 1;
202 					memReservationInfo.pPipelineCacheCreateInfos		= &pcCI;
203 				}
204 
205 				poolSizes							= context.getResourceInterface()->getPipelinePoolSizes();
206 				if (!poolSizes.empty())
207 				{
208 					memReservationInfo.pipelinePoolSizeCount			= deUint32(poolSizes.size());
209 					memReservationInfo.pPipelinePoolSizes				= poolSizes.data();
210 				}
211 			}
212 #endif // CTS_USES_VULKANSC
213 
214 			const VkDeviceCreateInfo deviceInfo =
215 			{
216 				VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,							//VkStructureType					sType;
217 				pNext,															//const void*						pNext;
218 				0u,																//VkDeviceCreateFlags				flags;
219 				static_cast<deUint32>(queueInfos.size()),						//deUint32							queueCreateInfoCount;
220 				&queueInfos[0],													//const VkDeviceQueueCreateInfo*	pQueueCreateInfos;
221 				0u,																//deUint32							enabledLayerCount;
222 				DE_NULL,														//const char* const*				ppEnabledLayerNames;
223 				static_cast<deUint32>(deviceExtensions.size()),					//deUint32							enabledExtensionCount;
224 				deviceExtensions.empty() ? DE_NULL : &deviceExtensions[0],		//const char* const*				ppEnabledExtensionNames;
225 				DE_NULL															//const VkPhysicalDeviceFeatures*	pEnabledFeatures;
226 			};
227 
228 			m_logicalDevice	= createCustomDevice(context.getTestContext().getCommandLine().isValidationEnabled(), context.getPlatformInterface(), instance, instanceDriver, physicalDevice, &deviceInfo);
229 #ifndef CTS_USES_VULKANSC
230 			m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), instance, *m_logicalDevice));
231 #else
232 			m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), instance, *m_logicalDevice, context.getTestContext().getCommandLine(), context.getResourceInterface(), context.getDeviceVulkanSC10Properties(), context.getDeviceProperties()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_logicalDevice));
233 #endif // CTS_USES_VULKANSC
234 			m_allocator		= MovePtr<Allocator>(new SimpleAllocator(*m_deviceDriver, *m_logicalDevice, getPhysicalDeviceMemoryProperties(instanceDriver, physicalDevice)));
235 
236 			for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it)
237 			for (int queueNdx = 0; queueNdx < static_cast<int>(it->second.queue.size()); ++queueNdx)
238 				m_deviceDriver->getDeviceQueue(*m_logicalDevice, it->first, queueNdx, &it->second.queue[queueNdx]);
239 		}
240 	}
241 
addQueueIndex(const deUint32 queueFamilyIndex,const deUint32 count,const VkQueueFlags flags)242 	void addQueueIndex (const deUint32 queueFamilyIndex, const deUint32 count, const VkQueueFlags flags)
243 	{
244 		QueueData dataToPush;
245 		dataToPush.flags = flags;
246 		dataToPush.queue.resize(count);
247 		m_queues[queueFamilyIndex] = dataToPush;
248 
249 		m_queueCount++;
250 	}
251 
252 public:
~MultiQueues()253 	~MultiQueues()
254 	{
255 	}
256 
getQueuesPairs(const VkQueueFlags flagsWrite,const VkQueueFlags flagsRead) const257 	std::vector<QueuePair> getQueuesPairs (const VkQueueFlags flagsWrite, const VkQueueFlags flagsRead) const
258 	{
259 		std::map<deUint32, QueueData>	queuesWrite;
260 		std::map<deUint32, QueueData>	queuesRead;
261 		std::vector<QueuePair>			queuesPairs;
262 
263 		for (std::map<deUint32, QueueData>::const_iterator it = m_queues.begin(); it != m_queues.end(); ++it)
264 		{
265 			const bool writeQueue	= checkQueueFlags(it->second.flags, flagsWrite);
266 			const bool readQueue	= checkQueueFlags(it->second.flags, flagsRead);
267 
268 			if (!(writeQueue || readQueue))
269 				continue;
270 
271 			if (writeQueue && readQueue)
272 			{
273 				queuesWrite[it->first]	= it->second;
274 				queuesRead[it->first]	= it->second;
275 			}
276 			else if (writeQueue)
277 				queuesWrite[it->first]	= it->second;
278 			else if (readQueue)
279 				queuesRead[it->first]	= it->second;
280 		}
281 
282 		for (std::map<deUint32, QueueData>::iterator write = queuesWrite.begin(); write != queuesWrite.end(); ++write)
283 		for (std::map<deUint32, QueueData>::iterator read  = queuesRead.begin();  read  != queuesRead.end();  ++read)
284 		{
285 			const int writeSize	= static_cast<int>(write->second.queue.size());
286 			const int readSize	= static_cast<int>(read->second.queue.size());
287 
288 			for (int writeNdx = 0; writeNdx < writeSize; ++writeNdx)
289 			for (int readNdx  = 0; readNdx  < readSize;  ++readNdx)
290 			{
291 				if (write->second.queue[writeNdx] != read->second.queue[readNdx])
292 				{
293 					queuesPairs.push_back(QueuePair(write->first, read->first, write->second.queue[writeNdx], read->second.queue[readNdx]));
294 					writeNdx = readNdx = std::max(writeSize, readSize);	//exit from the loops
295 				}
296 			}
297 		}
298 
299 		if (queuesPairs.empty())
300 			TCU_THROW(NotSupportedError, "Queue not found");
301 
302 		return queuesPairs;
303 	}
304 
getDefaultQueue(const VkQueueFlags flagsOp) const305 	Queue getDefaultQueue(const VkQueueFlags flagsOp) const
306 	{
307 		for (std::map<deUint32, QueueData>::const_iterator it = m_queues.begin(); it!= m_queues.end(); ++it)
308 		{
309 			if (checkQueueFlags(it->second.flags, flagsOp))
310 				return Queue(it->first, it->second.queue[0]);
311 		}
312 
313 		TCU_THROW(NotSupportedError, "Queue not found");
314 	}
315 
getQueue(const deUint32 familyIdx,const deUint32 queueIdx)316 	Queue getQueue (const deUint32 familyIdx, const deUint32 queueIdx)
317 	{
318 		return Queue(familyIdx, m_queues[familyIdx].queue[queueIdx]);
319 	}
320 
getQueueFamilyFlags(const deUint32 familyIdx)321 	VkQueueFlags getQueueFamilyFlags (const deUint32 familyIdx)
322 	{
323 		return m_queues[familyIdx].flags;
324 	}
325 
queueFamilyCount(const deUint32 familyIdx)326 	deUint32 queueFamilyCount (const deUint32 familyIdx)
327 	{
328 		return (deUint32) m_queues[familyIdx].queue.size();
329 	}
330 
familyCount(void) const331 	deUint32 familyCount (void) const
332 	{
333 		return (deUint32) m_queues.size();
334 	}
335 
totalQueueCount(void)336 	deUint32 totalQueueCount (void)
337 	{
338 		deUint32	count	= 0;
339 
340 		for (deUint32 familyIdx = 0; familyIdx < familyCount(); familyIdx++)
341 		{
342 			count	+= queueFamilyCount(familyIdx);
343 		}
344 
345 		return count;
346 	}
347 
getDevice(void) const348 	VkDevice getDevice (void) const
349 	{
350 		return *m_logicalDevice;
351 	}
352 
getDeviceInterface(void) const353 	const DeviceInterface& getDeviceInterface (void) const
354 	{
355 		return *m_deviceDriver;
356 	}
357 
getAllocator(void)358 	Allocator& getAllocator (void)
359 	{
360 		return *m_allocator;
361 	}
362 
getInstance(Context & context,SynchronizationType type,bool timelineSemaphore)363 	static SharedPtr<MultiQueues> getInstance(Context& context, SynchronizationType type, bool timelineSemaphore)
364 	{
365 		if (!m_multiQueues)
366 			m_multiQueues = SharedPtr<MultiQueues>(new MultiQueues(context, type, timelineSemaphore));
367 
368 		return m_multiQueues;
369 	}
destroy()370 	static void destroy()
371 	{
372 		m_multiQueues.clear();
373 	}
374 
375 private:
376 #ifdef CTS_USES_VULKANSC
377 	CustomInstance					m_instance;
378 #endif // CTS_USES_VULKANSC
379 	Move<VkDevice>					m_logicalDevice;
380 #ifndef CTS_USES_VULKANSC
381 	de::MovePtr<vk::DeviceDriver>	m_deviceDriver;
382 #else
383 	de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>	m_deviceDriver;
384 #endif // CTS_USES_VULKANSC
385 	MovePtr<Allocator>				m_allocator;
386 	std::map<deUint32, QueueData>	m_queues;
387 	deUint32						m_queueCount;
388 
389 	static SharedPtr<MultiQueues>	m_multiQueues;
390 };
391 SharedPtr<MultiQueues>				MultiQueues::m_multiQueues;
392 
createBarrierMultiQueue(SynchronizationWrapperPtr synchronizationWrapper,const VkCommandBuffer & cmdBuffer,const SyncInfo & writeSync,const SyncInfo & readSync,const Resource & resource,const deUint32 writeFamily,const deUint32 readFamily,const VkSharingMode sharingMode,const bool secondQueue=false)393 void createBarrierMultiQueue (SynchronizationWrapperPtr synchronizationWrapper,
394 							  const VkCommandBuffer&	cmdBuffer,
395 							  const SyncInfo&			writeSync,
396 							  const SyncInfo&			readSync,
397 							  const Resource&			resource,
398 							  const deUint32			writeFamily,
399 							  const deUint32			readFamily,
400 							  const VkSharingMode		sharingMode,
401 							  const bool				secondQueue = false)
402 {
403 	if (resource.getType() == RESOURCE_TYPE_IMAGE)
404 	{
405 		VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
406 			secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
407 			secondQueue ? 0u : writeSync.accessMask,
408 			!secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask,
409 			!secondQueue ? 0u : readSync.accessMask,
410 			writeSync.imageLayout,
411 			readSync.imageLayout,
412 			resource.getImage().handle,
413 			resource.getImage().subresourceRange
414 		);
415 
416 		if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
417 		{
418 			imageMemoryBarrier2.srcQueueFamilyIndex = writeFamily;
419 			imageMemoryBarrier2.dstQueueFamilyIndex = readFamily;
420 
421 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
422 			synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
423 		}
424 		else if (!secondQueue)
425 		{
426 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
427 			synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
428 		}
429 	}
430 	else
431 	{
432 		VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
433 			secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
434 			secondQueue ? 0u : writeSync.accessMask,
435 			!secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask,
436 			!secondQueue ? 0u : readSync.accessMask,
437 			resource.getBuffer().handle,
438 			resource.getBuffer().offset,
439 			resource.getBuffer().size
440 		);
441 
442 		if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
443 		{
444 			bufferMemoryBarrier2.srcQueueFamilyIndex = writeFamily;
445 			bufferMemoryBarrier2.dstQueueFamilyIndex = readFamily;
446 		}
447 
448 		VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
449 		synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
450 	}
451 }
452 
453 class BaseTestInstance : public TestInstance
454 {
455 public:
BaseTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,bool timelineSemaphore)456 	BaseTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, bool timelineSemaphore)
457 		: TestInstance		(context)
458 		, m_type			(type)
459 		, m_queues			(MultiQueues::getInstance(context, type, timelineSemaphore))
460 		, m_opContext		(new OperationContext(context, type, m_queues->getDeviceInterface(), m_queues->getDevice(), m_queues->getAllocator(), pipelineCacheData))
461 		, m_resourceDesc	(resourceDesc)
462 		, m_writeOp			(writeOp)
463 		, m_readOp			(readOp)
464 	{
465 	}
466 
467 protected:
468 	const SynchronizationType			m_type;
469 	const SharedPtr<MultiQueues>		m_queues;
470 	const UniquePtr<OperationContext>	m_opContext;
471 	const ResourceDescription			m_resourceDesc;
472 	const OperationSupport&				m_writeOp;
473 	const OperationSupport&				m_readOp;
474 };
475 
476 class BinarySemaphoreTestInstance : public BaseTestInstance
477 {
478 public:
BinarySemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)479 	BinarySemaphoreTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
480 		: BaseTestInstance	(context, type, resourceDesc, writeOp, readOp, pipelineCacheData, false)
481 		, m_sharingMode		(sharingMode)
482 	{
483 	}
484 
iterate(void)485 	tcu::TestStatus	iterate (void)
486 	{
487 		const DeviceInterface&			vk			= m_opContext->getDeviceInterface();
488 		const VkDevice					device		= m_opContext->getDevice();
489 		const std::vector<QueuePair>	queuePairs	= m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
490 
491 		for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
492 		{
493 			const UniquePtr<Resource>		resource		(new Resource(*m_opContext, m_resourceDesc, m_writeOp.getOutResourceUsageFlags() | m_readOp.getInResourceUsageFlags()));
494 			const UniquePtr<Operation>		writeOp			(m_writeOp.build(*m_opContext, *resource));
495 			const UniquePtr<Operation>		readOp			(m_readOp.build (*m_opContext, *resource));
496 
497 			const Move<VkCommandPool>			cmdPool[]		=
498 			{
499 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite),
500 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead)
501 			};
502 			const Move<VkCommandBuffer>			ptrCmdBuffer[]	=
503 			{
504 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
505 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
506 			};
507 			const VkCommandBufferSubmitInfoKHR	cmdBufferInfos[]	=
508 			{
509 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_WRITE]),
510 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_READ]),
511 			};
512 			const Unique<VkSemaphore>			semaphore		(createSemaphore(vk, device));
513 			VkSemaphoreSubmitInfoKHR			waitSemaphoreSubmitInfo =
514 				makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
515 			VkSemaphoreSubmitInfoKHR			signalSemaphoreSubmitInfo =
516 				makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
517 			SynchronizationWrapperPtr			synchronizationWrapper[]
518 			{
519 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
520 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
521 			};
522 
523 			synchronizationWrapper[QUEUETYPE_WRITE]->addSubmitInfo(
524 				0u,
525 				DE_NULL,
526 				1u,
527 				&cmdBufferInfos[QUEUETYPE_WRITE],
528 				1u,
529 				&signalSemaphoreSubmitInfo
530 			);
531 			synchronizationWrapper[QUEUETYPE_READ]->addSubmitInfo(
532 				1u,
533 				&waitSemaphoreSubmitInfo,
534 				1u,
535 				&cmdBufferInfos[QUEUETYPE_READ],
536 				0u,
537 				DE_NULL
538 			);
539 
540 			const SyncInfo					writeSync		= writeOp->getOutSyncInfo();
541 			const SyncInfo					readSync		= readOp->getInSyncInfo();
542 			VkCommandBuffer					writeCmdBuffer	= cmdBufferInfos[QUEUETYPE_WRITE].commandBuffer;
543 			VkCommandBuffer					readCmdBuffer	= cmdBufferInfos[QUEUETYPE_READ].commandBuffer;
544 
545 			beginCommandBuffer		(vk, writeCmdBuffer);
546 			writeOp->recordCommands	(writeCmdBuffer);
547 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_WRITE], writeCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
548 			endCommandBuffer		(vk, writeCmdBuffer);
549 
550 			beginCommandBuffer		(vk, readCmdBuffer);
551 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_READ], readCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
552 			readOp->recordCommands	(readCmdBuffer);
553 			endCommandBuffer		(vk, readCmdBuffer);
554 
555 			VK_CHECK(synchronizationWrapper[QUEUETYPE_WRITE]->queueSubmit(queuePairs[pairNdx].queueWrite, DE_NULL));
556 			VK_CHECK(synchronizationWrapper[QUEUETYPE_READ]->queueSubmit(queuePairs[pairNdx].queueRead, DE_NULL));
557 			VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueWrite));
558 			VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueRead));
559 
560 			{
561 				const Data	expected	= writeOp->getData();
562 				const Data	actual		= readOp->getData();
563 
564 #ifdef CTS_USES_VULKANSC
565 				if (m_context.getTestContext().getCommandLine().isSubProcess())
566 #endif // CTS_USES_VULKANSC
567 				{
568 					if (isIndirectBuffer(m_resourceDesc.type))
569 					{
570 						const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
571 						const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
572 
573 						if (actualValue < expectedValue)
574 							return tcu::TestStatus::fail("Counter value is smaller than expected");
575 					}
576 					else
577 					{
578 						if (0 != deMemCmp(expected.data, actual.data, expected.size))
579 							return tcu::TestStatus::fail("Memory contents don't match");
580 					}
581 				}
582 			}
583 		}
584 		return tcu::TestStatus::pass("OK");
585 	}
586 
587 private:
588 	const VkSharingMode	m_sharingMode;
589 };
590 
591 template<typename T>
makeVkSharedPtr(Move<T> move)592 inline SharedPtr<Move<T> > makeVkSharedPtr (Move<T> move)
593 {
594 	return SharedPtr<Move<T> >(new Move<T>(move));
595 }
596 
597 class TimelineSemaphoreTestInstance : public BaseTestInstance
598 {
599 public:
TimelineSemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const SharedPtr<OperationSupport> & writeOp,const SharedPtr<OperationSupport> & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)600 	TimelineSemaphoreTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const SharedPtr<OperationSupport>& writeOp, const SharedPtr<OperationSupport>& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
601 		: BaseTestInstance	(context, type, resourceDesc, *writeOp, *readOp, pipelineCacheData, true)
602 		, m_sharingMode		(sharingMode)
603 	{
604 		deUint32				maxQueues		= 0;
605 		std::vector<deUint32>	queueFamilies;
606 
607 		if (m_queues->totalQueueCount() < 2)
608 			TCU_THROW(NotSupportedError, "Not enough queues");
609 
610 		for (deUint32 familyNdx = 0; familyNdx < m_queues->familyCount(); familyNdx++)
611 		{
612 			maxQueues = std::max(m_queues->queueFamilyCount(familyNdx), maxQueues);
613 			queueFamilies.push_back(familyNdx);
614 		}
615 
616 		// Create a chain of operations copying data from one resource
617 		// to another across at least every single queue of the system
618 		// at least once. Each of the operation will be executing with
619 		// a dependency on the previous using timeline points.
620 		m_opSupports.push_back(writeOp);
621 		m_opQueues.push_back(m_queues->getDefaultQueue(writeOp->getQueueFlags(*m_opContext)));
622 
623 		for (deUint32 queueIdx = 0; queueIdx < maxQueues; queueIdx++)
624 		{
625 			for (deUint32 familyIdx = 0; familyIdx < m_queues->familyCount(); familyIdx++)
626 			{
627 				for (deUint32 copyOpIdx = 0; copyOpIdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpIdx++)
628 				{
629 					if (isResourceSupported(s_copyOps[copyOpIdx], resourceDesc))
630 					{
631 						SharedPtr<OperationSupport>	opSupport	(makeOperationSupport(s_copyOps[copyOpIdx], m_resourceDesc).release());
632 
633 						if (!checkQueueFlags(opSupport->getQueueFlags(*m_opContext), m_queues->getQueueFamilyFlags(familyIdx)))
634 							continue;
635 
636 						m_opSupports.push_back(opSupport);
637 						m_opQueues.push_back(m_queues->getQueue(familyIdx, queueIdx % m_queues->queueFamilyCount(familyIdx)));
638 						break;
639 					}
640 				}
641 			}
642 		}
643 
644 		m_opSupports.push_back(readOp);
645 		m_opQueues.push_back(m_queues->getDefaultQueue(readOp->getQueueFlags(*m_opContext)));
646 
647 		// Now create the resources with the usage associated to the
648 		// operation performed on the resource.
649 		for (deUint32 opIdx = 0; opIdx < (m_opSupports.size() - 1); opIdx++)
650 		{
651 			deUint32 usage = m_opSupports[opIdx]->getOutResourceUsageFlags() | m_opSupports[opIdx + 1]->getInResourceUsageFlags();
652 
653 			m_resources.push_back(SharedPtr<Resource>(new Resource(*m_opContext, m_resourceDesc, usage, m_sharingMode, queueFamilies)));
654 		}
655 
656 		// Finally create the operations using the resources.
657 		m_ops.push_back(SharedPtr<Operation>(m_opSupports[0]->build(*m_opContext, *m_resources[0]).release()));
658 		for (deUint32 opIdx = 1; opIdx < (m_opSupports.size() - 1); opIdx++)
659 			m_ops.push_back(SharedPtr<Operation>(m_opSupports[opIdx]->build(*m_opContext, *m_resources[opIdx - 1], *m_resources[opIdx]).release()));
660 		m_ops.push_back(SharedPtr<Operation>(m_opSupports[m_opSupports.size() - 1]->build(*m_opContext, *m_resources.back()).release()));
661 	}
662 
iterate(void)663 	tcu::TestStatus	iterate (void)
664 	{
665 		const DeviceInterface&							vk				= m_opContext->getDeviceInterface();
666 		const VkDevice									device			= m_opContext->getDevice();
667 		de::Random										rng				(1234);
668 		const Unique<VkSemaphore>						semaphore		(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
669 		std::vector<SharedPtr<Move<VkCommandPool> > >	cmdPools;
670 		std::vector<SharedPtr<Move<VkCommandBuffer> > >	ptrCmdBuffers;
671 		std::vector<VkCommandBufferSubmitInfoKHR>		cmdBufferInfos;
672 		std::vector<deUint64>							timelineValues;
673 
674 		cmdPools.resize(m_queues->familyCount());
675 		for (deUint32 familyIdx = 0; familyIdx < m_queues->familyCount(); familyIdx++)
676 			cmdPools[familyIdx] = makeVkSharedPtr(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, familyIdx));
677 
678 		ptrCmdBuffers.resize(m_ops.size());
679 		cmdBufferInfos.resize(m_ops.size());
680 		for (deUint32 opIdx = 0; opIdx < m_ops.size(); opIdx++)
681 		{
682 			deUint64	increment	= 1 + rng.getUint8();
683 
684 			ptrCmdBuffers[opIdx] = makeVkSharedPtr(makeCommandBuffer(vk, device, **cmdPools[m_opQueues[opIdx].family]));
685 			cmdBufferInfos[opIdx] = makeCommonCommandBufferSubmitInfo(**ptrCmdBuffers[opIdx]);
686 
687 			timelineValues.push_back(timelineValues.empty() ? increment : (timelineValues.back() + increment));
688 		}
689 
690 		for (deUint32 opIdx = 0; opIdx < m_ops.size(); opIdx++)
691 		{
692 			VkCommandBuffer				cmdBuffer = cmdBufferInfos[opIdx].commandBuffer;
693 			VkSemaphoreSubmitInfoKHR	waitSemaphoreSubmitInfo =
694 				makeCommonSemaphoreSubmitInfo(*semaphore, (opIdx == 0 ? 0u : timelineValues[opIdx - 1]), VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
695 			VkSemaphoreSubmitInfoKHR	signalSemaphoreSubmitInfo =
696 				makeCommonSemaphoreSubmitInfo(*semaphore, timelineValues[opIdx], VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
697 			SynchronizationWrapperPtr	synchronizationWrapper = getSynchronizationWrapper(m_type, vk, DE_TRUE);
698 
699 			synchronizationWrapper->addSubmitInfo(
700 				opIdx == 0 ? 0u : 1u,
701 				&waitSemaphoreSubmitInfo,
702 				1u,
703 				&cmdBufferInfos[opIdx],
704 				1u,
705 				&signalSemaphoreSubmitInfo,
706 				opIdx == 0 ? DE_FALSE : DE_TRUE,
707 				DE_TRUE
708 			);
709 
710 			beginCommandBuffer(vk, cmdBuffer);
711 
712 			if (opIdx > 0)
713 			{
714 				const SyncInfo	writeSync	= m_ops[opIdx - 1]->getOutSyncInfo();
715 				const SyncInfo	readSync	= m_ops[opIdx]->getInSyncInfo();
716 				const Resource&	resource	= *m_resources[opIdx - 1].get();
717 
718 				createBarrierMultiQueue(synchronizationWrapper, cmdBuffer, writeSync, readSync, resource, m_opQueues[opIdx - 1].family, m_opQueues[opIdx].family, m_sharingMode, true);
719 			}
720 
721 			m_ops[opIdx]->recordCommands(cmdBuffer);
722 
723 			if (opIdx < (m_ops.size() - 1))
724 			{
725 				const SyncInfo	writeSync	= m_ops[opIdx]->getOutSyncInfo();
726 				const SyncInfo	readSync	= m_ops[opIdx + 1]->getInSyncInfo();
727 				const Resource&	resource	= *m_resources[opIdx].get();
728 
729 				createBarrierMultiQueue(synchronizationWrapper, cmdBuffer, writeSync, readSync, resource, m_opQueues[opIdx].family, m_opQueues[opIdx + 1].family, m_sharingMode);
730 			}
731 
732 			endCommandBuffer(vk, cmdBuffer);
733 
734 			VK_CHECK(synchronizationWrapper->queueSubmit(m_opQueues[opIdx].queue, DE_NULL));
735 		}
736 
737 
738 		VK_CHECK(vk.queueWaitIdle(m_opQueues.back().queue));
739 
740 		{
741 			const Data	expected	= m_ops.front()->getData();
742 			const Data	actual		= m_ops.back()->getData();
743 
744 			if (isIndirectBuffer(m_resourceDesc.type))
745 			{
746 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
747 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
748 
749 				if (actualValue < expectedValue)
750 					return tcu::TestStatus::fail("Counter value is smaller than expected");
751 			}
752 			else
753 			{
754 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
755 					return tcu::TestStatus::fail("Memory contents don't match");
756 			}
757 		}
758 
759 		// Make the validation layers happy.
760 		for (deUint32 opIdx = 0; opIdx < m_opQueues.size(); opIdx++)
761 			VK_CHECK(vk.queueWaitIdle(m_opQueues[opIdx].queue));
762 
763 		return tcu::TestStatus::pass("OK");
764 	}
765 
766 private:
767 	const VkSharingMode							m_sharingMode;
768 	std::vector<SharedPtr<OperationSupport> >	m_opSupports;
769 	std::vector<SharedPtr<Operation> >			m_ops;
770 	std::vector<SharedPtr<Resource> >			m_resources;
771 	std::vector<Queue>							m_opQueues;
772 };
773 
774 class FenceTestInstance : public BaseTestInstance
775 {
776 public:
FenceTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)777 	FenceTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
778 		: BaseTestInstance	(context, type, resourceDesc, writeOp, readOp, pipelineCacheData, false)
779 		, m_sharingMode		(sharingMode)
780 	{
781 	}
782 
iterate(void)783 	tcu::TestStatus	iterate (void)
784 	{
785 		const DeviceInterface&			vk			= m_opContext->getDeviceInterface();
786 		const VkDevice					device		= m_opContext->getDevice();
787 		const std::vector<QueuePair>	queuePairs	= m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
788 
789 		for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
790 		{
791 			const UniquePtr<Resource>		resource		(new Resource(*m_opContext, m_resourceDesc, m_writeOp.getOutResourceUsageFlags() | m_readOp.getInResourceUsageFlags()));
792 			const UniquePtr<Operation>		writeOp			(m_writeOp.build(*m_opContext, *resource));
793 			const UniquePtr<Operation>		readOp			(m_readOp.build(*m_opContext, *resource));
794 			const Move<VkCommandPool>		cmdPool[]
795 			{
796 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite),
797 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead)
798 			};
799 			const Move<VkCommandBuffer>		ptrCmdBuffer[]
800 			{
801 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
802 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
803 			};
804 			const VkCommandBufferSubmitInfoKHR	cmdBufferInfos[]
805 			{
806 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_WRITE]),
807 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_READ])
808 			};
809 			SynchronizationWrapperPtr		synchronizationWrapper[]
810 			{
811 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
812 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
813 			};
814 			const SyncInfo					writeSync		= writeOp->getOutSyncInfo();
815 			const SyncInfo					readSync		= readOp->getInSyncInfo();
816 			VkCommandBuffer					writeCmdBuffer	= cmdBufferInfos[QUEUETYPE_WRITE].commandBuffer;
817 			VkCommandBuffer					readCmdBuffer	= cmdBufferInfos[QUEUETYPE_READ].commandBuffer;
818 
819 			beginCommandBuffer		(vk, writeCmdBuffer);
820 			writeOp->recordCommands	(writeCmdBuffer);
821 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_WRITE], writeCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
822 			endCommandBuffer		(vk, writeCmdBuffer);
823 
824 			submitCommandsAndWait	(synchronizationWrapper[QUEUETYPE_WRITE], vk, device, queuePairs[pairNdx].queueWrite, writeCmdBuffer);
825 
826 			beginCommandBuffer		(vk, readCmdBuffer);
827 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_READ], readCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
828 			readOp->recordCommands	(readCmdBuffer);
829 			endCommandBuffer		(vk, readCmdBuffer);
830 
831 			submitCommandsAndWait(synchronizationWrapper[QUEUETYPE_READ], vk, device, queuePairs[pairNdx].queueRead, readCmdBuffer);
832 
833 			{
834 				const Data	expected = writeOp->getData();
835 				const Data	actual	 = readOp->getData();
836 
837 #ifdef CTS_USES_VULKANSC
838 				if (m_context.getTestContext().getCommandLine().isSubProcess())
839 #endif // CTS_USES_VULKANSC
840 				{
841 					if (isIndirectBuffer(m_resourceDesc.type))
842 					{
843 						const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
844 						const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
845 
846 						if (actualValue < expectedValue)
847 							return tcu::TestStatus::fail("Counter value is smaller than expected");
848 					}
849 					else
850 					{
851 						if (0 != deMemCmp(expected.data, actual.data, expected.size))
852 							return tcu::TestStatus::fail("Memory contents don't match");
853 					}
854 				}
855 			}
856 		}
857 		return tcu::TestStatus::pass("OK");
858 	}
859 
860 private:
861 	const VkSharingMode	m_sharingMode;
862 };
863 
864 class BaseTestCase : public TestCase
865 {
866 public:
BaseTestCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,SynchronizationType type,const SyncPrimitive syncPrimitive,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,const VkSharingMode sharingMode,PipelineCacheData & pipelineCacheData)867 	BaseTestCase (tcu::TestContext&			testCtx,
868 				  const std::string&		name,
869 				  const std::string&		description,
870 				  SynchronizationType		type,
871 				  const SyncPrimitive		syncPrimitive,
872 				  const ResourceDescription	resourceDesc,
873 				  const OperationName		writeOp,
874 				  const OperationName		readOp,
875 				  const VkSharingMode		sharingMode,
876 				  PipelineCacheData&		pipelineCacheData)
877 		: TestCase				(testCtx, name, description)
878 		, m_type				(type)
879 		, m_resourceDesc		(resourceDesc)
880 		, m_writeOp				(makeOperationSupport(writeOp, resourceDesc).release())
881 		, m_readOp				(makeOperationSupport(readOp, resourceDesc).release())
882 		, m_syncPrimitive		(syncPrimitive)
883 		, m_sharingMode			(sharingMode)
884 		, m_pipelineCacheData	(pipelineCacheData)
885 	{
886 	}
887 
initPrograms(SourceCollections & programCollection) const888 	void initPrograms (SourceCollections& programCollection) const
889 	{
890 		m_writeOp->initPrograms(programCollection);
891 		m_readOp->initPrograms(programCollection);
892 
893 		if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
894 		{
895 			for (deUint32 copyOpNdx = 0; copyOpNdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpNdx++)
896 			{
897 				if (isResourceSupported(s_copyOps[copyOpNdx], m_resourceDesc))
898 					makeOperationSupport(s_copyOps[copyOpNdx], m_resourceDesc)->initPrograms(programCollection);
899 			}
900 		}
901 	}
902 
checkSupport(Context & context) const903 	void checkSupport(Context& context) const
904 	{
905 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
906 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
907 		if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
908 			context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
909 
910 		const InstanceInterface&					instance				= context.getInstanceInterface();
911 		const VkPhysicalDevice						physicalDevice			= context.getPhysicalDevice();
912 		const std::vector<VkQueueFamilyProperties>	queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(instance, physicalDevice);
913 		if (m_sharingMode == VK_SHARING_MODE_CONCURRENT && queueFamilyProperties.size() < 2)
914 			TCU_THROW(NotSupportedError, "Concurrent requires more than 1 queue family");
915 
916 		if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE &&
917 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
918 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
919 
920 		if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
921 		{
922 			VkImageFormatProperties	imageFormatProperties;
923 			const deUint32			usage					= m_writeOp->getOutResourceUsageFlags() | m_readOp->getInResourceUsageFlags();
924 			const VkResult			formatResult			= instance.getPhysicalDeviceImageFormatProperties(physicalDevice, m_resourceDesc.imageFormat, m_resourceDesc.imageType, VK_IMAGE_TILING_OPTIMAL, usage, (VkImageCreateFlags)0, &imageFormatProperties);
925 
926 			if (formatResult != VK_SUCCESS)
927 				TCU_THROW(NotSupportedError, "Image format is not supported");
928 
929 			if ((imageFormatProperties.sampleCounts & m_resourceDesc.imageSamples) != m_resourceDesc.imageSamples)
930 				TCU_THROW(NotSupportedError, "Requested sample count is not supported");
931 		}
932 	}
933 
createInstance(Context & context) const934 	TestInstance* createInstance (Context& context) const
935 	{
936 		switch (m_syncPrimitive)
937 		{
938 			case SYNC_PRIMITIVE_FENCE:
939 				return new FenceTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
940 			case SYNC_PRIMITIVE_BINARY_SEMAPHORE:
941 				return new BinarySemaphoreTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
942 			case SYNC_PRIMITIVE_TIMELINE_SEMAPHORE:
943 				return new TimelineSemaphoreTestInstance(context, m_type, m_resourceDesc, m_writeOp, m_readOp, m_pipelineCacheData, m_sharingMode);
944 			default:
945 				DE_ASSERT(0);
946 				return DE_NULL;
947 		}
948 	}
949 
950 private:
951 	const SynchronizationType				m_type;
952 	const ResourceDescription				m_resourceDesc;
953 	const SharedPtr<OperationSupport>		m_writeOp;
954 	const SharedPtr<OperationSupport>		m_readOp;
955 	const SyncPrimitive						m_syncPrimitive;
956 	const VkSharingMode						m_sharingMode;
957 	PipelineCacheData&						m_pipelineCacheData;
958 };
959 
960 struct TestData
961 {
962 	SynchronizationType		type;
963 	PipelineCacheData*		pipelineCacheData;
964 };
965 
createTests(tcu::TestCaseGroup * group,TestData data)966 void createTests (tcu::TestCaseGroup* group, TestData data)
967 {
968 	tcu::TestContext& testCtx = group->getTestContext();
969 
970 	static const struct
971 	{
972 		const char*		name;
973 		SyncPrimitive	syncPrimitive;
974 		int				numOptions;
975 	} groups[] =
976 	{
977 		{ "fence",				SYNC_PRIMITIVE_FENCE,				1 },
978 		{ "binary_semaphore",	SYNC_PRIMITIVE_BINARY_SEMAPHORE,	1 },
979 		{ "timeline_semaphore",	SYNC_PRIMITIVE_TIMELINE_SEMAPHORE,	1 }
980 	};
981 
982 	for (int groupNdx = 0; groupNdx < DE_LENGTH_OF_ARRAY(groups); ++groupNdx)
983 	{
984 		MovePtr<tcu::TestCaseGroup> synchGroup (new tcu::TestCaseGroup(testCtx, groups[groupNdx].name, ""));
985 
986 		for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(s_writeOps); ++writeOpNdx)
987 		for (int readOpNdx  = 0; readOpNdx  < DE_LENGTH_OF_ARRAY(s_readOps);  ++readOpNdx)
988 		{
989 			const OperationName	writeOp		= s_writeOps[writeOpNdx];
990 			const OperationName	readOp		= s_readOps[readOpNdx];
991 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
992 			bool				empty		= true;
993 
994 			MovePtr<tcu::TestCaseGroup> opGroup		(new tcu::TestCaseGroup(testCtx, opGroupName.c_str(), ""));
995 
996 			for (int optionNdx = 0; optionNdx <= groups[groupNdx].numOptions; ++optionNdx)
997 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
998 			{
999 				const ResourceDescription&	resource	= s_resources[resourceNdx];
1000 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1001 				{
1002 					std::string					name		= getResourceName(resource);
1003 					VkSharingMode				sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1004 
1005 					// queue family sharing mode used for resource
1006 					if (optionNdx)
1007 					{
1008 						name += "_concurrent";
1009 						sharingMode = VK_SHARING_MODE_CONCURRENT;
1010 					}
1011 					else
1012 						name += "_exclusive";
1013 
1014 					opGroup->addChild(new BaseTestCase(testCtx, name, "", data.type, groups[groupNdx].syncPrimitive, resource, writeOp, readOp, sharingMode, *data.pipelineCacheData));
1015 					empty = false;
1016 				}
1017 			}
1018 			if (!empty)
1019 				synchGroup->addChild(opGroup.release());
1020 		}
1021 		group->addChild(synchGroup.release());
1022 	}
1023 }
1024 
cleanupGroup(tcu::TestCaseGroup * group,TestData data)1025 void cleanupGroup (tcu::TestCaseGroup* group, TestData data)
1026 {
1027 	DE_UNREF(group);
1028 	DE_UNREF(data.pipelineCacheData);
1029 	// Destroy singleton object
1030 	MultiQueues::destroy();
1031 }
1032 
1033 } // anonymous
1034 
createSynchronizedOperationMultiQueueTests(tcu::TestContext & testCtx,SynchronizationType type,PipelineCacheData & pipelineCacheData)1035 tcu::TestCaseGroup* createSynchronizedOperationMultiQueueTests (tcu::TestContext& testCtx, SynchronizationType type, PipelineCacheData& pipelineCacheData)
1036 {
1037 	TestData data
1038 	{
1039 		type,
1040 		&pipelineCacheData
1041 	};
1042 
1043 	return createTestGroup(testCtx, "multi_queue", "Synchronization of a memory-modifying operation", createTests, data, cleanupGroup);
1044 }
1045 
1046 } // synchronization
1047 } // vkt
1048