• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2016 The Khronos Group Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Synchronization primitive tests with multi queue
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktSynchronizationOperationMultiQueueTests.hpp"
25 #include "vktCustomInstancesDevices.hpp"
26 #include "vkDefs.hpp"
27 #include "vktTestCase.hpp"
28 #include "vktTestCaseUtil.hpp"
29 #include "vkRef.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkMemUtil.hpp"
32 #include "vkBarrierUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkDeviceUtil.hpp"
35 #include "vkTypeUtil.hpp"
36 #include "vkPlatform.hpp"
37 #include "vkCmdUtil.hpp"
38 #include "vkSafetyCriticalUtil.hpp"
39 #include "deRandom.hpp"
40 #include "deUniquePtr.hpp"
41 #include "deSharedPtr.hpp"
42 #include "tcuTestLog.hpp"
43 #include "vktSynchronizationUtil.hpp"
44 #include "vktSynchronizationOperation.hpp"
45 #include "vktSynchronizationOperationTestData.hpp"
46 #include "vktSynchronizationOperationResources.hpp"
47 #include "vktTestGroupUtil.hpp"
48 #include "tcuCommandLine.hpp"
49 
50 #include <set>
51 
52 namespace vkt
53 {
54 
55 namespace synchronization
56 {
57 
58 namespace
59 {
60 using namespace vk;
61 using de::MovePtr;
62 using de::SharedPtr;
63 using de::UniquePtr;
64 using de::SharedPtr;
65 
66 enum QueueType
67 {
68 	QUEUETYPE_WRITE,
69 	QUEUETYPE_READ
70 };
71 
72 struct QueuePair
73 {
QueuePairvkt::synchronization::__anon438aa3a70111::QueuePair74 	QueuePair	(const deUint32 familyWrite, const deUint32 familyRead, const VkQueue write, const VkQueue read)
75 		: familyIndexWrite	(familyWrite)
76 		, familyIndexRead	(familyRead)
77 		, queueWrite		(write)
78 		, queueRead			(read)
79 	{}
80 
81 	deUint32	familyIndexWrite;
82 	deUint32	familyIndexRead;
83 	VkQueue		queueWrite;
84 	VkQueue		queueRead;
85 };
86 
87 struct Queue
88 {
Queuevkt::synchronization::__anon438aa3a70111::Queue89 	Queue	(const deUint32 familyOp, const VkQueue queueOp)
90 		:	family	(familyOp)
91 		,	queue	(queueOp)
92 	{}
93 
94 	deUint32	family;
95 	VkQueue		queue;
96 };
97 
checkQueueFlags(VkQueueFlags availableFlags,const VkQueueFlags neededFlags)98 bool checkQueueFlags (VkQueueFlags availableFlags, const VkQueueFlags neededFlags)
99 {
100 	if ((availableFlags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) != 0)
101 		availableFlags |= VK_QUEUE_TRANSFER_BIT;
102 
103 	return (availableFlags & neededFlags) != 0;
104 }
105 
106 class MultiQueues
107 {
108 	struct QueueData
109 	{
110 		VkQueueFlags			flags;
111 		std::vector<VkQueue>	queue;
112 	};
113 
MultiQueues(Context & context,SynchronizationType type,bool timelineSemaphore)114 	MultiQueues	(Context& context, SynchronizationType type, bool timelineSemaphore)
115 		: m_instance	(createCustomInstanceFromContext(context))
116 		, m_queueCount	(0)
117 	{
118 		const InstanceInterface&					instanceDriver			= m_instance.getDriver();
119 		const VkPhysicalDevice						physicalDevice			= chooseDevice(instanceDriver, m_instance, context.getTestContext().getCommandLine());
120 		const std::vector<VkQueueFamilyProperties>	queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(instanceDriver, physicalDevice);
121 
122 		for (deUint32 queuePropertiesNdx = 0; queuePropertiesNdx < queueFamilyProperties.size(); ++queuePropertiesNdx)
123 		{
124 			addQueueIndex(queuePropertiesNdx,
125 						  std::min(2u, queueFamilyProperties[queuePropertiesNdx].queueCount),
126 						  queueFamilyProperties[queuePropertiesNdx].queueFlags);
127 		}
128 
129 		std::vector<VkDeviceQueueCreateInfo>	queueInfos;
130 		const float								queuePriorities[2] = { 1.0f, 1.0f };	//get max 2 queues from one family
131 
132 		for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it!= m_queues.end(); ++it)
133 		{
134 			const VkDeviceQueueCreateInfo queueInfo	=
135 			{
136 				VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,		//VkStructureType			sType;
137 				DE_NULL,										//const void*				pNext;
138 				(VkDeviceQueueCreateFlags)0u,					//VkDeviceQueueCreateFlags	flags;
139 				it->first,										//deUint32					queueFamilyIndex;
140 				static_cast<deUint32>(it->second.queue.size()),	//deUint32					queueCount;
141 				&queuePriorities[0]								//const float*				pQueuePriorities;
142 			};
143 			queueInfos.push_back(queueInfo);
144 		}
145 
146 		{
147 			VkPhysicalDeviceFeatures2					createPhysicalFeature		{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, DE_NULL, context.getDeviceFeatures() };
148 			VkPhysicalDeviceTimelineSemaphoreFeatures	timelineSemaphoreFeatures	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, DE_NULL, DE_TRUE };
149 			VkPhysicalDeviceSynchronization2FeaturesKHR	synchronization2Features	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR, DE_NULL, DE_TRUE };
150 			void**										nextPtr						= &createPhysicalFeature.pNext;
151 
152 			std::vector<const char*> deviceExtensions;
153 			if (timelineSemaphore)
154 			{
155 				if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_timeline_semaphore"))
156 					deviceExtensions.push_back("VK_KHR_timeline_semaphore");
157 				addToChainVulkanStructure(&nextPtr, timelineSemaphoreFeatures);
158 			}
159 			if (type == SynchronizationType::SYNCHRONIZATION2)
160 			{
161 				deviceExtensions.push_back("VK_KHR_synchronization2");
162 				addToChainVulkanStructure(&nextPtr, synchronization2Features);
163 			}
164 
165 			void* pNext												= &createPhysicalFeature;
166 #ifdef CTS_USES_VULKANSC
167 			VkDeviceObjectReservationCreateInfo memReservationInfo	= context.getTestContext().getCommandLine().isSubProcess() ? context.getResourceInterface()->getStatMax() : resetDeviceObjectReservationCreateInfo();
168 			memReservationInfo.pNext								= pNext;
169 			pNext													= &memReservationInfo;
170 
171 			VkPhysicalDeviceVulkanSC10Features sc10Features			= createDefaultSC10Features();
172 			sc10Features.pNext										= pNext;
173 			pNext													= &sc10Features;
174 
175 			VkPipelineCacheCreateInfo			pcCI;
176 			std::vector<VkPipelinePoolSize>		poolSizes;
177 			if (context.getTestContext().getCommandLine().isSubProcess())
178 			{
179 				if (context.getResourceInterface()->getCacheDataSize() > 0)
180 				{
181 					pcCI =
182 					{
183 						VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,		// VkStructureType				sType;
184 						DE_NULL,											// const void*					pNext;
185 						VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
186 							VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT,	// VkPipelineCacheCreateFlags	flags;
187 						context.getResourceInterface()->getCacheDataSize(),	// deUintptr					initialDataSize;
188 						context.getResourceInterface()->getCacheData()		// const void*					pInitialData;
189 					};
190 					memReservationInfo.pipelineCacheCreateInfoCount		= 1;
191 					memReservationInfo.pPipelineCacheCreateInfos		= &pcCI;
192 				}
193 
194 				poolSizes							= context.getResourceInterface()->getPipelinePoolSizes();
195 				if (!poolSizes.empty())
196 				{
197 					memReservationInfo.pipelinePoolSizeCount			= deUint32(poolSizes.size());
198 					memReservationInfo.pPipelinePoolSizes				= poolSizes.data();
199 				}
200 			}
201 #endif // CTS_USES_VULKANSC
202 
203 			const VkDeviceCreateInfo deviceInfo =
204 			{
205 				VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,							//VkStructureType					sType;
206 				pNext,															//const void*						pNext;
207 				0u,																//VkDeviceCreateFlags				flags;
208 				static_cast<deUint32>(queueInfos.size()),						//deUint32							queueCreateInfoCount;
209 				&queueInfos[0],													//const VkDeviceQueueCreateInfo*	pQueueCreateInfos;
210 				0u,																//deUint32							enabledLayerCount;
211 				DE_NULL,														//const char* const*				ppEnabledLayerNames;
212 				static_cast<deUint32>(deviceExtensions.size()),					//deUint32							enabledExtensionCount;
213 				deviceExtensions.empty() ? DE_NULL : &deviceExtensions[0],		//const char* const*				ppEnabledExtensionNames;
214 				DE_NULL															//const VkPhysicalDeviceFeatures*	pEnabledFeatures;
215 			};
216 
217 			m_logicalDevice	= createCustomDevice(context.getTestContext().getCommandLine().isValidationEnabled(), context.getPlatformInterface(), m_instance, instanceDriver, physicalDevice, &deviceInfo);
218 #ifndef CTS_USES_VULKANSC
219 			m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), m_instance, *m_logicalDevice));
220 #else
221 			m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), m_instance, *m_logicalDevice, context.getTestContext().getCommandLine(), context.getResourceInterface(), context.getDeviceVulkanSC10Properties(), context.getDeviceProperties()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_logicalDevice));
222 #endif // CTS_USES_VULKANSC
223 			m_allocator		= MovePtr<Allocator>(new SimpleAllocator(*m_deviceDriver, *m_logicalDevice, getPhysicalDeviceMemoryProperties(instanceDriver, physicalDevice)));
224 
225 			for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it)
226 			for (int queueNdx = 0; queueNdx < static_cast<int>(it->second.queue.size()); ++queueNdx)
227 				m_deviceDriver->getDeviceQueue(*m_logicalDevice, it->first, queueNdx, &it->second.queue[queueNdx]);
228 		}
229 	}
230 
addQueueIndex(const deUint32 queueFamilyIndex,const deUint32 count,const VkQueueFlags flags)231 	void addQueueIndex (const deUint32 queueFamilyIndex, const deUint32 count, const VkQueueFlags flags)
232 	{
233 		QueueData dataToPush;
234 		dataToPush.flags = flags;
235 		dataToPush.queue.resize(count);
236 		m_queues[queueFamilyIndex] = dataToPush;
237 
238 		m_queueCount++;
239 	}
240 
241 public:
~MultiQueues()242 	~MultiQueues()
243 	{
244 	}
245 
getQueuesPairs(const VkQueueFlags flagsWrite,const VkQueueFlags flagsRead) const246 	std::vector<QueuePair> getQueuesPairs (const VkQueueFlags flagsWrite, const VkQueueFlags flagsRead) const
247 	{
248 		std::map<deUint32, QueueData>	queuesWrite;
249 		std::map<deUint32, QueueData>	queuesRead;
250 		std::vector<QueuePair>			queuesPairs;
251 
252 		for (std::map<deUint32, QueueData>::const_iterator it = m_queues.begin(); it != m_queues.end(); ++it)
253 		{
254 			const bool writeQueue	= checkQueueFlags(it->second.flags, flagsWrite);
255 			const bool readQueue	= checkQueueFlags(it->second.flags, flagsRead);
256 
257 			if (!(writeQueue || readQueue))
258 				continue;
259 
260 			if (writeQueue && readQueue)
261 			{
262 				queuesWrite[it->first]	= it->second;
263 				queuesRead[it->first]	= it->second;
264 			}
265 			else if (writeQueue)
266 				queuesWrite[it->first]	= it->second;
267 			else if (readQueue)
268 				queuesRead[it->first]	= it->second;
269 		}
270 
271 		for (std::map<deUint32, QueueData>::iterator write = queuesWrite.begin(); write != queuesWrite.end(); ++write)
272 		for (std::map<deUint32, QueueData>::iterator read  = queuesRead.begin();  read  != queuesRead.end();  ++read)
273 		{
274 			const int writeSize	= static_cast<int>(write->second.queue.size());
275 			const int readSize	= static_cast<int>(read->second.queue.size());
276 
277 			for (int writeNdx = 0; writeNdx < writeSize; ++writeNdx)
278 			for (int readNdx  = 0; readNdx  < readSize;  ++readNdx)
279 			{
280 				if (write->second.queue[writeNdx] != read->second.queue[readNdx])
281 				{
282 					queuesPairs.push_back(QueuePair(write->first, read->first, write->second.queue[writeNdx], read->second.queue[readNdx]));
283 					writeNdx = readNdx = std::max(writeSize, readSize);	//exit from the loops
284 				}
285 			}
286 		}
287 
288 		if (queuesPairs.empty())
289 			TCU_THROW(NotSupportedError, "Queue not found");
290 
291 		return queuesPairs;
292 	}
293 
getDefaultQueue(const VkQueueFlags flagsOp) const294 	Queue getDefaultQueue(const VkQueueFlags flagsOp) const
295 	{
296 		for (std::map<deUint32, QueueData>::const_iterator it = m_queues.begin(); it!= m_queues.end(); ++it)
297 		{
298 			if (checkQueueFlags(it->second.flags, flagsOp))
299 				return Queue(it->first, it->second.queue[0]);
300 		}
301 
302 		TCU_THROW(NotSupportedError, "Queue not found");
303 	}
304 
getQueue(const deUint32 familyIdx,const deUint32 queueIdx)305 	Queue getQueue (const deUint32 familyIdx, const deUint32 queueIdx)
306 	{
307 		return Queue(familyIdx, m_queues[familyIdx].queue[queueIdx]);
308 	}
309 
getQueueFamilyFlags(const deUint32 familyIdx)310 	VkQueueFlags getQueueFamilyFlags (const deUint32 familyIdx)
311 	{
312 		return m_queues[familyIdx].flags;
313 	}
314 
queueFamilyCount(const deUint32 familyIdx)315 	deUint32 queueFamilyCount (const deUint32 familyIdx)
316 	{
317 		return (deUint32) m_queues[familyIdx].queue.size();
318 	}
319 
familyCount(void) const320 	deUint32 familyCount (void) const
321 	{
322 		return (deUint32) m_queues.size();
323 	}
324 
totalQueueCount(void)325 	deUint32 totalQueueCount (void)
326 	{
327 		deUint32	count	= 0;
328 
329 		for (deUint32 familyIdx = 0; familyIdx < familyCount(); familyIdx++)
330 		{
331 			count	+= queueFamilyCount(familyIdx);
332 		}
333 
334 		return count;
335 	}
336 
getDevice(void) const337 	VkDevice getDevice (void) const
338 	{
339 		return *m_logicalDevice;
340 	}
341 
getDeviceInterface(void) const342 	const DeviceInterface& getDeviceInterface (void) const
343 	{
344 		return *m_deviceDriver;
345 	}
346 
getAllocator(void)347 	Allocator& getAllocator (void)
348 	{
349 		return *m_allocator;
350 	}
351 
getInstance(Context & context,SynchronizationType type,bool timelineSemaphore)352 	static SharedPtr<MultiQueues> getInstance(Context& context, SynchronizationType type, bool timelineSemaphore)
353 	{
354 		if (!m_multiQueues)
355 			m_multiQueues = SharedPtr<MultiQueues>(new MultiQueues(context, type, timelineSemaphore));
356 
357 		return m_multiQueues;
358 	}
destroy()359 	static void destroy()
360 	{
361 		m_multiQueues.clear();
362 	}
363 
364 private:
365 	CustomInstance					m_instance;
366 	Move<VkDevice>					m_logicalDevice;
367 #ifndef CTS_USES_VULKANSC
368 	de::MovePtr<vk::DeviceDriver>	m_deviceDriver;
369 #else
370 	de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>	m_deviceDriver;
371 #endif // CTS_USES_VULKANSC
372 	MovePtr<Allocator>				m_allocator;
373 	std::map<deUint32, QueueData>	m_queues;
374 	deUint32						m_queueCount;
375 
376 	static SharedPtr<MultiQueues>	m_multiQueues;
377 };
378 SharedPtr<MultiQueues>				MultiQueues::m_multiQueues;
379 
createBarrierMultiQueue(SynchronizationWrapperPtr synchronizationWrapper,const VkCommandBuffer & cmdBuffer,const SyncInfo & writeSync,const SyncInfo & readSync,const Resource & resource,const deUint32 writeFamily,const deUint32 readFamily,const VkSharingMode sharingMode,const bool secondQueue=false)380 void createBarrierMultiQueue (SynchronizationWrapperPtr synchronizationWrapper,
381 							  const VkCommandBuffer&	cmdBuffer,
382 							  const SyncInfo&			writeSync,
383 							  const SyncInfo&			readSync,
384 							  const Resource&			resource,
385 							  const deUint32			writeFamily,
386 							  const deUint32			readFamily,
387 							  const VkSharingMode		sharingMode,
388 							  const bool				secondQueue = false)
389 {
390 	if (resource.getType() == RESOURCE_TYPE_IMAGE)
391 	{
392 		VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
393 			secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
394 			secondQueue ? 0u : writeSync.accessMask,
395 			!secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask,
396 			!secondQueue ? 0u : readSync.accessMask,
397 			writeSync.imageLayout,
398 			readSync.imageLayout,
399 			resource.getImage().handle,
400 			resource.getImage().subresourceRange
401 		);
402 
403 		if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
404 		{
405 			imageMemoryBarrier2.srcQueueFamilyIndex = writeFamily;
406 			imageMemoryBarrier2.dstQueueFamilyIndex = readFamily;
407 
408 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
409 			synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
410 		}
411 		else if (!secondQueue)
412 		{
413 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
414 			synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
415 		}
416 	}
417 	else
418 	{
419 		VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
420 			secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
421 			secondQueue ? 0u : writeSync.accessMask,
422 			!secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask,
423 			!secondQueue ? 0u : readSync.accessMask,
424 			resource.getBuffer().handle,
425 			resource.getBuffer().offset,
426 			resource.getBuffer().size
427 		);
428 
429 		if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
430 		{
431 			bufferMemoryBarrier2.srcQueueFamilyIndex = writeFamily;
432 			bufferMemoryBarrier2.dstQueueFamilyIndex = readFamily;
433 		}
434 
435 		VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
436 		synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
437 	}
438 }
439 
440 class BaseTestInstance : public TestInstance
441 {
442 public:
BaseTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,bool timelineSemaphore)443 	BaseTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, bool timelineSemaphore)
444 		: TestInstance		(context)
445 		, m_type			(type)
446 		, m_queues			(MultiQueues::getInstance(context, type, timelineSemaphore))
447 		, m_opContext		(new OperationContext(context, type, m_queues->getDeviceInterface(), m_queues->getDevice(), m_queues->getAllocator(), pipelineCacheData))
448 		, m_resourceDesc	(resourceDesc)
449 		, m_writeOp			(writeOp)
450 		, m_readOp			(readOp)
451 	{
452 	}
453 
454 protected:
455 	const SynchronizationType			m_type;
456 	const SharedPtr<MultiQueues>		m_queues;
457 	const UniquePtr<OperationContext>	m_opContext;
458 	const ResourceDescription			m_resourceDesc;
459 	const OperationSupport&				m_writeOp;
460 	const OperationSupport&				m_readOp;
461 };
462 
463 class BinarySemaphoreTestInstance : public BaseTestInstance
464 {
465 public:
BinarySemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)466 	BinarySemaphoreTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
467 		: BaseTestInstance	(context, type, resourceDesc, writeOp, readOp, pipelineCacheData, false)
468 		, m_sharingMode		(sharingMode)
469 	{
470 	}
471 
iterate(void)472 	tcu::TestStatus	iterate (void)
473 	{
474 		const DeviceInterface&			vk			= m_opContext->getDeviceInterface();
475 		const VkDevice					device		= m_opContext->getDevice();
476 		const std::vector<QueuePair>	queuePairs	= m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
477 
478 		for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
479 		{
480 			const UniquePtr<Resource>		resource		(new Resource(*m_opContext, m_resourceDesc, m_writeOp.getOutResourceUsageFlags() | m_readOp.getInResourceUsageFlags()));
481 			const UniquePtr<Operation>		writeOp			(m_writeOp.build(*m_opContext, *resource));
482 			const UniquePtr<Operation>		readOp			(m_readOp.build (*m_opContext, *resource));
483 
484 			const Move<VkCommandPool>			cmdPool[]		=
485 			{
486 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite),
487 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead)
488 			};
489 			const Move<VkCommandBuffer>			ptrCmdBuffer[]	=
490 			{
491 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
492 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
493 			};
494 			const VkCommandBufferSubmitInfoKHR	cmdBufferInfos[]	=
495 			{
496 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_WRITE]),
497 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_READ]),
498 			};
499 			const Unique<VkSemaphore>			semaphore		(createSemaphore(vk, device));
500 			VkSemaphoreSubmitInfoKHR			waitSemaphoreSubmitInfo =
501 				makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
502 			VkSemaphoreSubmitInfoKHR			signalSemaphoreSubmitInfo =
503 				makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
504 			SynchronizationWrapperPtr			synchronizationWrapper[]
505 			{
506 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
507 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
508 			};
509 
510 			synchronizationWrapper[QUEUETYPE_WRITE]->addSubmitInfo(
511 				0u,
512 				DE_NULL,
513 				1u,
514 				&cmdBufferInfos[QUEUETYPE_WRITE],
515 				1u,
516 				&signalSemaphoreSubmitInfo
517 			);
518 			synchronizationWrapper[QUEUETYPE_READ]->addSubmitInfo(
519 				1u,
520 				&waitSemaphoreSubmitInfo,
521 				1u,
522 				&cmdBufferInfos[QUEUETYPE_READ],
523 				0u,
524 				DE_NULL
525 			);
526 
527 			const SyncInfo					writeSync		= writeOp->getOutSyncInfo();
528 			const SyncInfo					readSync		= readOp->getInSyncInfo();
529 			VkCommandBuffer					writeCmdBuffer	= cmdBufferInfos[QUEUETYPE_WRITE].commandBuffer;
530 			VkCommandBuffer					readCmdBuffer	= cmdBufferInfos[QUEUETYPE_READ].commandBuffer;
531 
532 			beginCommandBuffer		(vk, writeCmdBuffer);
533 			writeOp->recordCommands	(writeCmdBuffer);
534 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_WRITE], writeCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
535 			endCommandBuffer		(vk, writeCmdBuffer);
536 
537 			beginCommandBuffer		(vk, readCmdBuffer);
538 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_READ], readCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
539 			readOp->recordCommands	(readCmdBuffer);
540 			endCommandBuffer		(vk, readCmdBuffer);
541 
542 			VK_CHECK(synchronizationWrapper[QUEUETYPE_WRITE]->queueSubmit(queuePairs[pairNdx].queueWrite, DE_NULL));
543 			VK_CHECK(synchronizationWrapper[QUEUETYPE_READ]->queueSubmit(queuePairs[pairNdx].queueRead, DE_NULL));
544 			VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueWrite));
545 			VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueRead));
546 
547 			{
548 				const Data	expected	= writeOp->getData();
549 				const Data	actual		= readOp->getData();
550 
551 #ifdef CTS_USES_VULKANSC
552 				if (m_context.getTestContext().getCommandLine().isSubProcess())
553 #endif // CTS_USES_VULKANSC
554 				{
555 					if (isIndirectBuffer(m_resourceDesc.type))
556 					{
557 						const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
558 						const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
559 
560 						if (actualValue < expectedValue)
561 							return tcu::TestStatus::fail("Counter value is smaller than expected");
562 					}
563 					else
564 					{
565 						if (0 != deMemCmp(expected.data, actual.data, expected.size))
566 							return tcu::TestStatus::fail("Memory contents don't match");
567 					}
568 				}
569 			}
570 		}
571 		return tcu::TestStatus::pass("OK");
572 	}
573 
574 private:
575 	const VkSharingMode	m_sharingMode;
576 };
577 
578 template<typename T>
makeVkSharedPtr(Move<T> move)579 inline SharedPtr<Move<T> > makeVkSharedPtr (Move<T> move)
580 {
581 	return SharedPtr<Move<T> >(new Move<T>(move));
582 }
583 
584 class TimelineSemaphoreTestInstance : public BaseTestInstance
585 {
586 public:
TimelineSemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const SharedPtr<OperationSupport> & writeOp,const SharedPtr<OperationSupport> & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)587 	TimelineSemaphoreTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const SharedPtr<OperationSupport>& writeOp, const SharedPtr<OperationSupport>& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
588 		: BaseTestInstance	(context, type, resourceDesc, *writeOp, *readOp, pipelineCacheData, true)
589 		, m_sharingMode		(sharingMode)
590 	{
591 		deUint32				maxQueues		= 0;
592 		std::vector<deUint32>	queueFamilies;
593 
594 		if (m_queues->totalQueueCount() < 2)
595 			TCU_THROW(NotSupportedError, "Not enough queues");
596 
597 		for (deUint32 familyNdx = 0; familyNdx < m_queues->familyCount(); familyNdx++)
598 		{
599 			maxQueues = std::max(m_queues->queueFamilyCount(familyNdx), maxQueues);
600 			queueFamilies.push_back(familyNdx);
601 		}
602 
603 		// Create a chain of operations copying data from one resource
604 		// to another across at least every single queue of the system
605 		// at least once. Each of the operation will be executing with
606 		// a dependency on the previous using timeline points.
607 		m_opSupports.push_back(writeOp);
608 		m_opQueues.push_back(m_queues->getDefaultQueue(writeOp->getQueueFlags(*m_opContext)));
609 
610 		for (deUint32 queueIdx = 0; queueIdx < maxQueues; queueIdx++)
611 		{
612 			for (deUint32 familyIdx = 0; familyIdx < m_queues->familyCount(); familyIdx++)
613 			{
614 				for (deUint32 copyOpIdx = 0; copyOpIdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpIdx++)
615 				{
616 					if (isResourceSupported(s_copyOps[copyOpIdx], resourceDesc))
617 					{
618 						SharedPtr<OperationSupport>	opSupport	(makeOperationSupport(s_copyOps[copyOpIdx], m_resourceDesc).release());
619 
620 						if (!checkQueueFlags(opSupport->getQueueFlags(*m_opContext), m_queues->getQueueFamilyFlags(familyIdx)))
621 							continue;
622 
623 						m_opSupports.push_back(opSupport);
624 						m_opQueues.push_back(m_queues->getQueue(familyIdx, queueIdx % m_queues->queueFamilyCount(familyIdx)));
625 						break;
626 					}
627 				}
628 			}
629 		}
630 
631 		m_opSupports.push_back(readOp);
632 		m_opQueues.push_back(m_queues->getDefaultQueue(readOp->getQueueFlags(*m_opContext)));
633 
634 		// Now create the resources with the usage associated to the
635 		// operation performed on the resource.
636 		for (deUint32 opIdx = 0; opIdx < (m_opSupports.size() - 1); opIdx++)
637 		{
638 			deUint32 usage = m_opSupports[opIdx]->getOutResourceUsageFlags() | m_opSupports[opIdx + 1]->getInResourceUsageFlags();
639 
640 			m_resources.push_back(SharedPtr<Resource>(new Resource(*m_opContext, m_resourceDesc, usage, m_sharingMode, queueFamilies)));
641 		}
642 
643 		// Finally create the operations using the resources.
644 		m_ops.push_back(SharedPtr<Operation>(m_opSupports[0]->build(*m_opContext, *m_resources[0]).release()));
645 		for (deUint32 opIdx = 1; opIdx < (m_opSupports.size() - 1); opIdx++)
646 			m_ops.push_back(SharedPtr<Operation>(m_opSupports[opIdx]->build(*m_opContext, *m_resources[opIdx - 1], *m_resources[opIdx]).release()));
647 		m_ops.push_back(SharedPtr<Operation>(m_opSupports[m_opSupports.size() - 1]->build(*m_opContext, *m_resources.back()).release()));
648 	}
649 
iterate(void)650 	tcu::TestStatus	iterate (void)
651 	{
652 		const DeviceInterface&							vk				= m_opContext->getDeviceInterface();
653 		const VkDevice									device			= m_opContext->getDevice();
654 		de::Random										rng				(1234);
655 		const Unique<VkSemaphore>						semaphore		(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
656 		std::vector<SharedPtr<Move<VkCommandPool> > >	cmdPools;
657 		std::vector<SharedPtr<Move<VkCommandBuffer> > >	ptrCmdBuffers;
658 		std::vector<VkCommandBufferSubmitInfoKHR>		cmdBufferInfos;
659 		std::vector<deUint64>							timelineValues;
660 
661 		cmdPools.resize(m_queues->familyCount());
662 		for (deUint32 familyIdx = 0; familyIdx < m_queues->familyCount(); familyIdx++)
663 			cmdPools[familyIdx] = makeVkSharedPtr(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, familyIdx));
664 
665 		ptrCmdBuffers.resize(m_ops.size());
666 		cmdBufferInfos.resize(m_ops.size());
667 		for (deUint32 opIdx = 0; opIdx < m_ops.size(); opIdx++)
668 		{
669 			deUint64	increment	= 1 + rng.getUint8();
670 
671 			ptrCmdBuffers[opIdx] = makeVkSharedPtr(makeCommandBuffer(vk, device, **cmdPools[m_opQueues[opIdx].family]));
672 			cmdBufferInfos[opIdx] = makeCommonCommandBufferSubmitInfo(**ptrCmdBuffers[opIdx]);
673 
674 			timelineValues.push_back(timelineValues.empty() ? increment : (timelineValues.back() + increment));
675 		}
676 
677 		for (deUint32 opIdx = 0; opIdx < m_ops.size(); opIdx++)
678 		{
679 			VkCommandBuffer				cmdBuffer = cmdBufferInfos[opIdx].commandBuffer;
680 			VkSemaphoreSubmitInfoKHR	waitSemaphoreSubmitInfo =
681 				makeCommonSemaphoreSubmitInfo(*semaphore, (opIdx == 0 ? 0u : timelineValues[opIdx - 1]), VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
682 			VkSemaphoreSubmitInfoKHR	signalSemaphoreSubmitInfo =
683 				makeCommonSemaphoreSubmitInfo(*semaphore, timelineValues[opIdx], VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
684 			SynchronizationWrapperPtr	synchronizationWrapper = getSynchronizationWrapper(m_type, vk, DE_TRUE);
685 
686 			synchronizationWrapper->addSubmitInfo(
687 				opIdx == 0 ? 0u : 1u,
688 				&waitSemaphoreSubmitInfo,
689 				1u,
690 				&cmdBufferInfos[opIdx],
691 				1u,
692 				&signalSemaphoreSubmitInfo,
693 				opIdx == 0 ? DE_FALSE : DE_TRUE,
694 				DE_TRUE
695 			);
696 
697 			beginCommandBuffer(vk, cmdBuffer);
698 
699 			if (opIdx > 0)
700 			{
701 				const SyncInfo	writeSync	= m_ops[opIdx - 1]->getOutSyncInfo();
702 				const SyncInfo	readSync	= m_ops[opIdx]->getInSyncInfo();
703 				const Resource&	resource	= *m_resources[opIdx - 1].get();
704 
705 				createBarrierMultiQueue(synchronizationWrapper, cmdBuffer, writeSync, readSync, resource, m_opQueues[opIdx - 1].family, m_opQueues[opIdx].family, m_sharingMode, true);
706 			}
707 
708 			m_ops[opIdx]->recordCommands(cmdBuffer);
709 
710 			if (opIdx < (m_ops.size() - 1))
711 			{
712 				const SyncInfo	writeSync	= m_ops[opIdx]->getOutSyncInfo();
713 				const SyncInfo	readSync	= m_ops[opIdx + 1]->getInSyncInfo();
714 				const Resource&	resource	= *m_resources[opIdx].get();
715 
716 				createBarrierMultiQueue(synchronizationWrapper, cmdBuffer, writeSync, readSync, resource, m_opQueues[opIdx].family, m_opQueues[opIdx + 1].family, m_sharingMode);
717 			}
718 
719 			endCommandBuffer(vk, cmdBuffer);
720 
721 			VK_CHECK(synchronizationWrapper->queueSubmit(m_opQueues[opIdx].queue, DE_NULL));
722 		}
723 
724 
725 		VK_CHECK(vk.queueWaitIdle(m_opQueues.back().queue));
726 
727 		{
728 			const Data	expected	= m_ops.front()->getData();
729 			const Data	actual		= m_ops.back()->getData();
730 
731 			if (isIndirectBuffer(m_resourceDesc.type))
732 			{
733 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
734 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
735 
736 				if (actualValue < expectedValue)
737 					return tcu::TestStatus::fail("Counter value is smaller than expected");
738 			}
739 			else
740 			{
741 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
742 					return tcu::TestStatus::fail("Memory contents don't match");
743 			}
744 		}
745 
746 		// Make the validation layers happy.
747 		for (deUint32 opIdx = 0; opIdx < m_opQueues.size(); opIdx++)
748 			VK_CHECK(vk.queueWaitIdle(m_opQueues[opIdx].queue));
749 
750 		return tcu::TestStatus::pass("OK");
751 	}
752 
753 private:
754 	const VkSharingMode							m_sharingMode;
755 	std::vector<SharedPtr<OperationSupport> >	m_opSupports;
756 	std::vector<SharedPtr<Operation> >			m_ops;
757 	std::vector<SharedPtr<Resource> >			m_resources;
758 	std::vector<Queue>							m_opQueues;
759 };
760 
761 class FenceTestInstance : public BaseTestInstance
762 {
763 public:
FenceTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)764 	FenceTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
765 		: BaseTestInstance	(context, type, resourceDesc, writeOp, readOp, pipelineCacheData, false)
766 		, m_sharingMode		(sharingMode)
767 	{
768 	}
769 
iterate(void)770 	tcu::TestStatus	iterate (void)
771 	{
772 		const DeviceInterface&			vk			= m_opContext->getDeviceInterface();
773 		const VkDevice					device		= m_opContext->getDevice();
774 		const std::vector<QueuePair>	queuePairs	= m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
775 
776 		for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
777 		{
778 			const UniquePtr<Resource>		resource		(new Resource(*m_opContext, m_resourceDesc, m_writeOp.getOutResourceUsageFlags() | m_readOp.getInResourceUsageFlags()));
779 			const UniquePtr<Operation>		writeOp			(m_writeOp.build(*m_opContext, *resource));
780 			const UniquePtr<Operation>		readOp			(m_readOp.build(*m_opContext, *resource));
781 			const Move<VkCommandPool>		cmdPool[]
782 			{
783 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite),
784 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead)
785 			};
786 			const Move<VkCommandBuffer>		ptrCmdBuffer[]
787 			{
788 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
789 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
790 			};
791 			const VkCommandBufferSubmitInfoKHR	cmdBufferInfos[]
792 			{
793 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_WRITE]),
794 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_READ])
795 			};
796 			SynchronizationWrapperPtr		synchronizationWrapper[]
797 			{
798 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
799 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
800 			};
801 			const SyncInfo					writeSync		= writeOp->getOutSyncInfo();
802 			const SyncInfo					readSync		= readOp->getInSyncInfo();
803 			VkCommandBuffer					writeCmdBuffer	= cmdBufferInfos[QUEUETYPE_WRITE].commandBuffer;
804 			VkCommandBuffer					readCmdBuffer	= cmdBufferInfos[QUEUETYPE_READ].commandBuffer;
805 
806 			beginCommandBuffer		(vk, writeCmdBuffer);
807 			writeOp->recordCommands	(writeCmdBuffer);
808 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_WRITE], writeCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
809 			endCommandBuffer		(vk, writeCmdBuffer);
810 
811 			submitCommandsAndWait	(synchronizationWrapper[QUEUETYPE_WRITE], vk, device, queuePairs[pairNdx].queueWrite, writeCmdBuffer);
812 
813 			beginCommandBuffer		(vk, readCmdBuffer);
814 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_READ], readCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
815 			readOp->recordCommands	(readCmdBuffer);
816 			endCommandBuffer		(vk, readCmdBuffer);
817 
818 			submitCommandsAndWait(synchronizationWrapper[QUEUETYPE_READ], vk, device, queuePairs[pairNdx].queueRead, readCmdBuffer);
819 
820 			{
821 				const Data	expected = writeOp->getData();
822 				const Data	actual	 = readOp->getData();
823 
824 #ifdef CTS_USES_VULKANSC
825 				if (m_context.getTestContext().getCommandLine().isSubProcess())
826 #endif // CTS_USES_VULKANSC
827 				{
828 					if (isIndirectBuffer(m_resourceDesc.type))
829 					{
830 						const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
831 						const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
832 
833 						if (actualValue < expectedValue)
834 							return tcu::TestStatus::fail("Counter value is smaller than expected");
835 					}
836 					else
837 					{
838 						if (0 != deMemCmp(expected.data, actual.data, expected.size))
839 							return tcu::TestStatus::fail("Memory contents don't match");
840 					}
841 				}
842 			}
843 		}
844 		return tcu::TestStatus::pass("OK");
845 	}
846 
847 private:
848 	const VkSharingMode	m_sharingMode;
849 };
850 
851 class BaseTestCase : public TestCase
852 {
853 public:
BaseTestCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,SynchronizationType type,const SyncPrimitive syncPrimitive,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,const VkSharingMode sharingMode,PipelineCacheData & pipelineCacheData)854 	BaseTestCase (tcu::TestContext&			testCtx,
855 				  const std::string&		name,
856 				  const std::string&		description,
857 				  SynchronizationType		type,
858 				  const SyncPrimitive		syncPrimitive,
859 				  const ResourceDescription	resourceDesc,
860 				  const OperationName		writeOp,
861 				  const OperationName		readOp,
862 				  const VkSharingMode		sharingMode,
863 				  PipelineCacheData&		pipelineCacheData)
864 		: TestCase				(testCtx, name, description)
865 		, m_type				(type)
866 		, m_resourceDesc		(resourceDesc)
867 		, m_writeOp				(makeOperationSupport(writeOp, resourceDesc).release())
868 		, m_readOp				(makeOperationSupport(readOp, resourceDesc).release())
869 		, m_syncPrimitive		(syncPrimitive)
870 		, m_sharingMode			(sharingMode)
871 		, m_pipelineCacheData	(pipelineCacheData)
872 	{
873 	}
874 
initPrograms(SourceCollections & programCollection) const875 	void initPrograms (SourceCollections& programCollection) const
876 	{
877 		m_writeOp->initPrograms(programCollection);
878 		m_readOp->initPrograms(programCollection);
879 
880 		if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
881 		{
882 			for (deUint32 copyOpNdx = 0; copyOpNdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpNdx++)
883 			{
884 				if (isResourceSupported(s_copyOps[copyOpNdx], m_resourceDesc))
885 					makeOperationSupport(s_copyOps[copyOpNdx], m_resourceDesc)->initPrograms(programCollection);
886 			}
887 		}
888 	}
889 
checkSupport(Context & context) const890 	void checkSupport(Context& context) const
891 	{
892 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
893 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
894 		if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
895 			context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
896 
897 		const InstanceInterface&					instance				= context.getInstanceInterface();
898 		const VkPhysicalDevice						physicalDevice			= context.getPhysicalDevice();
899 		const std::vector<VkQueueFamilyProperties>	queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(instance, physicalDevice);
900 		if (m_sharingMode == VK_SHARING_MODE_CONCURRENT && queueFamilyProperties.size() < 2)
901 			TCU_THROW(NotSupportedError, "Concurrent requires more than 1 queue family");
902 
903 		if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE &&
904 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
905 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
906 
907 		if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
908 		{
909 			VkImageFormatProperties	imageFormatProperties;
910 			const deUint32			usage					= m_writeOp->getOutResourceUsageFlags() | m_readOp->getInResourceUsageFlags();
911 			const VkResult			formatResult			= instance.getPhysicalDeviceImageFormatProperties(physicalDevice, m_resourceDesc.imageFormat, m_resourceDesc.imageType, VK_IMAGE_TILING_OPTIMAL, usage, (VkImageCreateFlags)0, &imageFormatProperties);
912 
913 			if (formatResult != VK_SUCCESS)
914 				TCU_THROW(NotSupportedError, "Image format is not supported");
915 
916 			if ((imageFormatProperties.sampleCounts & m_resourceDesc.imageSamples) != m_resourceDesc.imageSamples)
917 				TCU_THROW(NotSupportedError, "Requested sample count is not supported");
918 		}
919 	}
920 
createInstance(Context & context) const921 	TestInstance* createInstance (Context& context) const
922 	{
923 		switch (m_syncPrimitive)
924 		{
925 			case SYNC_PRIMITIVE_FENCE:
926 				return new FenceTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
927 			case SYNC_PRIMITIVE_BINARY_SEMAPHORE:
928 				return new BinarySemaphoreTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
929 			case SYNC_PRIMITIVE_TIMELINE_SEMAPHORE:
930 				return new TimelineSemaphoreTestInstance(context, m_type, m_resourceDesc, m_writeOp, m_readOp, m_pipelineCacheData, m_sharingMode);
931 			default:
932 				DE_ASSERT(0);
933 				return DE_NULL;
934 		}
935 	}
936 
937 private:
938 	const SynchronizationType				m_type;
939 	const ResourceDescription				m_resourceDesc;
940 	const SharedPtr<OperationSupport>		m_writeOp;
941 	const SharedPtr<OperationSupport>		m_readOp;
942 	const SyncPrimitive						m_syncPrimitive;
943 	const VkSharingMode						m_sharingMode;
944 	PipelineCacheData&						m_pipelineCacheData;
945 };
946 
947 struct TestData
948 {
949 	SynchronizationType		type;
950 	PipelineCacheData*		pipelineCacheData;
951 };
952 
createTests(tcu::TestCaseGroup * group,TestData data)953 void createTests (tcu::TestCaseGroup* group, TestData data)
954 {
955 	tcu::TestContext& testCtx = group->getTestContext();
956 
957 	static const struct
958 	{
959 		const char*		name;
960 		SyncPrimitive	syncPrimitive;
961 		int				numOptions;
962 	} groups[] =
963 	{
964 		{ "fence",				SYNC_PRIMITIVE_FENCE,				1 },
965 		{ "binary_semaphore",	SYNC_PRIMITIVE_BINARY_SEMAPHORE,	1 },
966 		{ "timeline_semaphore",	SYNC_PRIMITIVE_TIMELINE_SEMAPHORE,	1 }
967 	};
968 
969 	for (int groupNdx = 0; groupNdx < DE_LENGTH_OF_ARRAY(groups); ++groupNdx)
970 	{
971 		MovePtr<tcu::TestCaseGroup> synchGroup (new tcu::TestCaseGroup(testCtx, groups[groupNdx].name, ""));
972 
973 		for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(s_writeOps); ++writeOpNdx)
974 		for (int readOpNdx  = 0; readOpNdx  < DE_LENGTH_OF_ARRAY(s_readOps);  ++readOpNdx)
975 		{
976 			const OperationName	writeOp		= s_writeOps[writeOpNdx];
977 			const OperationName	readOp		= s_readOps[readOpNdx];
978 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
979 			bool				empty		= true;
980 
981 			MovePtr<tcu::TestCaseGroup> opGroup		(new tcu::TestCaseGroup(testCtx, opGroupName.c_str(), ""));
982 
983 			for (int optionNdx = 0; optionNdx <= groups[groupNdx].numOptions; ++optionNdx)
984 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
985 			{
986 				const ResourceDescription&	resource	= s_resources[resourceNdx];
987 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
988 				{
989 					std::string					name		= getResourceName(resource);
990 					VkSharingMode				sharingMode = VK_SHARING_MODE_EXCLUSIVE;
991 
992 					// queue family sharing mode used for resource
993 					if (optionNdx)
994 					{
995 						name += "_concurrent";
996 						sharingMode = VK_SHARING_MODE_CONCURRENT;
997 					}
998 					else
999 						name += "_exclusive";
1000 
1001 					opGroup->addChild(new BaseTestCase(testCtx, name, "", data.type, groups[groupNdx].syncPrimitive, resource, writeOp, readOp, sharingMode, *data.pipelineCacheData));
1002 					empty = false;
1003 				}
1004 			}
1005 			if (!empty)
1006 				synchGroup->addChild(opGroup.release());
1007 		}
1008 		group->addChild(synchGroup.release());
1009 	}
1010 }
1011 
cleanupGroup(tcu::TestCaseGroup * group,TestData data)1012 void cleanupGroup (tcu::TestCaseGroup* group, TestData data)
1013 {
1014 	DE_UNREF(group);
1015 	DE_UNREF(data.pipelineCacheData);
1016 	// Destroy singleton object
1017 	MultiQueues::destroy();
1018 }
1019 
1020 } // anonymous
1021 
createSynchronizedOperationMultiQueueTests(tcu::TestContext & testCtx,SynchronizationType type,PipelineCacheData & pipelineCacheData)1022 tcu::TestCaseGroup* createSynchronizedOperationMultiQueueTests (tcu::TestContext& testCtx, SynchronizationType type, PipelineCacheData& pipelineCacheData)
1023 {
1024 	TestData data
1025 	{
1026 		type,
1027 		&pipelineCacheData
1028 	};
1029 
1030 	return createTestGroup(testCtx, "multi_queue", "Synchronization of a memory-modifying operation", createTests, data, cleanupGroup);
1031 }
1032 
1033 } // synchronization
1034 } // vkt
1035