• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2016 The Khronos Group Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Synchronization primitive tests with multi queue
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktSynchronizationOperationMultiQueueTests.hpp"
25 #include "vktCustomInstancesDevices.hpp"
26 #include "vkDefs.hpp"
27 #include "vktTestCase.hpp"
28 #include "vktTestCaseUtil.hpp"
29 #include "vkRef.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkMemUtil.hpp"
32 #include "vkBarrierUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkPlatform.hpp"
36 #include "vkCmdUtil.hpp"
37 #include "deRandom.hpp"
38 #include "deUniquePtr.hpp"
39 #include "deSharedPtr.hpp"
40 #include "tcuTestLog.hpp"
41 #include "vktSynchronizationUtil.hpp"
42 #include "vktSynchronizationOperation.hpp"
43 #include "vktSynchronizationOperationTestData.hpp"
44 #include "vktSynchronizationOperationResources.hpp"
45 #include "vktTestGroupUtil.hpp"
46 #include "tcuCommandLine.hpp"
47 
48 #include <set>
49 
50 namespace vkt
51 {
52 
53 namespace synchronization
54 {
55 
56 namespace
57 {
58 using namespace vk;
59 using de::MovePtr;
60 using de::SharedPtr;
61 using de::UniquePtr;
62 using de::SharedPtr;
63 
64 enum QueueType
65 {
66 	QUEUETYPE_WRITE,
67 	QUEUETYPE_READ
68 };
69 
70 struct QueuePair
71 {
QueuePairvkt::synchronization::__anone6b21dfa0111::QueuePair72 	QueuePair	(const deUint32 familyWrite, const deUint32 familyRead, const VkQueue write, const VkQueue read)
73 		: familyIndexWrite	(familyWrite)
74 		, familyIndexRead	(familyRead)
75 		, queueWrite		(write)
76 		, queueRead			(read)
77 	{}
78 
79 	deUint32	familyIndexWrite;
80 	deUint32	familyIndexRead;
81 	VkQueue		queueWrite;
82 	VkQueue		queueRead;
83 };
84 
85 struct Queue
86 {
Queuevkt::synchronization::__anone6b21dfa0111::Queue87 	Queue	(const deUint32 familyOp, const VkQueue queueOp)
88 		:	family	(familyOp)
89 		,	queue	(queueOp)
90 	{}
91 
92 	deUint32	family;
93 	VkQueue		queue;
94 };
95 
checkQueueFlags(VkQueueFlags availableFlags,const VkQueueFlags neededFlags)96 bool checkQueueFlags (VkQueueFlags availableFlags, const VkQueueFlags neededFlags)
97 {
98 	if ((availableFlags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) != 0)
99 		availableFlags |= VK_QUEUE_TRANSFER_BIT;
100 
101 	return (availableFlags & neededFlags) != 0;
102 }
103 
104 class MultiQueues
105 {
106 	struct QueueData
107 	{
108 		VkQueueFlags			flags;
109 		std::vector<VkQueue>	queue;
110 	};
111 
MultiQueues(const Context & context,SynchronizationType type,bool timelineSemaphore)112 	MultiQueues	(const Context& context, SynchronizationType type, bool timelineSemaphore)
113 		: m_queueCount	(0)
114 	{
115 		const InstanceInterface&					instance				= context.getInstanceInterface();
116 		const VkPhysicalDevice						physicalDevice			= context.getPhysicalDevice();
117 		const std::vector<VkQueueFamilyProperties>	queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(instance, physicalDevice);
118 
119 		for (deUint32 queuePropertiesNdx = 0; queuePropertiesNdx < queueFamilyProperties.size(); ++queuePropertiesNdx)
120 		{
121 			addQueueIndex(queuePropertiesNdx,
122 						  std::min(2u, queueFamilyProperties[queuePropertiesNdx].queueCount),
123 						  queueFamilyProperties[queuePropertiesNdx].queueFlags);
124 		}
125 
126 		std::vector<VkDeviceQueueCreateInfo>	queueInfos;
127 		const float								queuePriorities[2] = { 1.0f, 1.0f };	//get max 2 queues from one family
128 
129 		for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it!= m_queues.end(); ++it)
130 		{
131 			const VkDeviceQueueCreateInfo queueInfo	=
132 			{
133 				VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,		//VkStructureType			sType;
134 				DE_NULL,										//const void*				pNext;
135 				(VkDeviceQueueCreateFlags)0u,					//VkDeviceQueueCreateFlags	flags;
136 				it->first,										//deUint32					queueFamilyIndex;
137 				static_cast<deUint32>(it->second.queue.size()),	//deUint32					queueCount;
138 				&queuePriorities[0]								//const float*				pQueuePriorities;
139 			};
140 			queueInfos.push_back(queueInfo);
141 		}
142 
143 		{
144 			VkPhysicalDeviceFeatures2					createPhysicalFeature		{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, DE_NULL, context.getDeviceFeatures() };
145 			VkPhysicalDeviceTimelineSemaphoreFeatures	timelineSemaphoreFeatures	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, DE_NULL, DE_TRUE };
146 			VkPhysicalDeviceSynchronization2FeaturesKHR	synchronization2Features	{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR, DE_NULL, DE_TRUE };
147 			void**										nextPtr						= &createPhysicalFeature.pNext;
148 
149 			std::vector<const char*> deviceExtensions;
150 			if (timelineSemaphore)
151 			{
152 				deviceExtensions.push_back("VK_KHR_timeline_semaphore");
153 				addToChainVulkanStructure(&nextPtr, timelineSemaphoreFeatures);
154 			}
155 			if (type == SynchronizationType::SYNCHRONIZATION2)
156 			{
157 				deviceExtensions.push_back("VK_KHR_synchronization2");
158 				addToChainVulkanStructure(&nextPtr, synchronization2Features);
159 			}
160 
161 			const VkDeviceCreateInfo deviceInfo =
162 			{
163 				VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,							//VkStructureType					sType;
164 				&createPhysicalFeature,											//const void*						pNext;
165 				0u,																//VkDeviceCreateFlags				flags;
166 				static_cast<deUint32>(queueInfos.size()),						//deUint32							queueCreateInfoCount;
167 				&queueInfos[0],													//const VkDeviceQueueCreateInfo*	pQueueCreateInfos;
168 				0u,																//deUint32							enabledLayerCount;
169 				DE_NULL,														//const char* const*				ppEnabledLayerNames;
170 				static_cast<deUint32>(deviceExtensions.size()),					//deUint32							enabledExtensionCount;
171 				deviceExtensions.empty() ? DE_NULL : &deviceExtensions[0],		//const char* const*				ppEnabledExtensionNames;
172 				DE_NULL															//const VkPhysicalDeviceFeatures*	pEnabledFeatures;
173 			};
174 
175 			m_logicalDevice	= createCustomDevice(context.getTestContext().getCommandLine().isValidationEnabled(), context.getPlatformInterface(), context.getInstance(), instance, physicalDevice, &deviceInfo);
176 			m_deviceDriver	= MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), context.getInstance(), *m_logicalDevice));
177 			m_allocator		= MovePtr<Allocator>(new SimpleAllocator(*m_deviceDriver, *m_logicalDevice, getPhysicalDeviceMemoryProperties(instance, physicalDevice)));
178 
179 			for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it)
180 			for (int queueNdx = 0; queueNdx < static_cast<int>(it->second.queue.size()); ++queueNdx)
181 				m_deviceDriver->getDeviceQueue(*m_logicalDevice, it->first, queueNdx, &it->second.queue[queueNdx]);
182 		}
183 	}
184 
addQueueIndex(const deUint32 queueFamilyIndex,const deUint32 count,const VkQueueFlags flags)185 	void addQueueIndex (const deUint32 queueFamilyIndex, const deUint32 count, const VkQueueFlags flags)
186 	{
187 		QueueData dataToPush;
188 		dataToPush.flags = flags;
189 		dataToPush.queue.resize(count);
190 		m_queues[queueFamilyIndex] = dataToPush;
191 
192 		m_queueCount++;
193 	}
194 
195 public:
getQueuesPairs(const VkQueueFlags flagsWrite,const VkQueueFlags flagsRead) const196 	std::vector<QueuePair> getQueuesPairs (const VkQueueFlags flagsWrite, const VkQueueFlags flagsRead) const
197 	{
198 		std::map<deUint32, QueueData>	queuesWrite;
199 		std::map<deUint32, QueueData>	queuesRead;
200 		std::vector<QueuePair>			queuesPairs;
201 
202 		for (std::map<deUint32, QueueData>::const_iterator it = m_queues.begin(); it != m_queues.end(); ++it)
203 		{
204 			const bool writeQueue	= checkQueueFlags(it->second.flags, flagsWrite);
205 			const bool readQueue	= checkQueueFlags(it->second.flags, flagsRead);
206 
207 			if (!(writeQueue || readQueue))
208 				continue;
209 
210 			if (writeQueue && readQueue)
211 			{
212 				queuesWrite[it->first]	= it->second;
213 				queuesRead[it->first]	= it->second;
214 			}
215 			else if (writeQueue)
216 				queuesWrite[it->first]	= it->second;
217 			else if (readQueue)
218 				queuesRead[it->first]	= it->second;
219 		}
220 
221 		for (std::map<deUint32, QueueData>::iterator write = queuesWrite.begin(); write != queuesWrite.end(); ++write)
222 		for (std::map<deUint32, QueueData>::iterator read  = queuesRead.begin();  read  != queuesRead.end();  ++read)
223 		{
224 			const int writeSize	= static_cast<int>(write->second.queue.size());
225 			const int readSize	= static_cast<int>(read->second.queue.size());
226 
227 			for (int writeNdx = 0; writeNdx < writeSize; ++writeNdx)
228 			for (int readNdx  = 0; readNdx  < readSize;  ++readNdx)
229 			{
230 				if (write->second.queue[writeNdx] != read->second.queue[readNdx])
231 				{
232 					queuesPairs.push_back(QueuePair(write->first, read->first, write->second.queue[writeNdx], read->second.queue[readNdx]));
233 					writeNdx = readNdx = std::max(writeSize, readSize);	//exit from the loops
234 				}
235 			}
236 		}
237 
238 		if (queuesPairs.empty())
239 			TCU_THROW(NotSupportedError, "Queue not found");
240 
241 		return queuesPairs;
242 	}
243 
getDefaultQueue(const VkQueueFlags flagsOp) const244 	Queue getDefaultQueue(const VkQueueFlags flagsOp) const
245 	{
246 		for (std::map<deUint32, QueueData>::const_iterator it = m_queues.begin(); it!= m_queues.end(); ++it)
247 		{
248 			if (checkQueueFlags(it->second.flags, flagsOp))
249 				return Queue(it->first, it->second.queue[0]);
250 		}
251 
252 		TCU_THROW(NotSupportedError, "Queue not found");
253 	}
254 
getQueue(const deUint32 familyIdx,const deUint32 queueIdx)255 	Queue getQueue (const deUint32 familyIdx, const deUint32 queueIdx)
256 	{
257 		return Queue(familyIdx, m_queues[familyIdx].queue[queueIdx]);
258 	}
259 
getQueueFamilyFlags(const deUint32 familyIdx)260 	VkQueueFlags getQueueFamilyFlags (const deUint32 familyIdx)
261 	{
262 		return m_queues[familyIdx].flags;
263 	}
264 
queueFamilyCount(const deUint32 familyIdx)265 	deUint32 queueFamilyCount (const deUint32 familyIdx)
266 	{
267 		return (deUint32) m_queues[familyIdx].queue.size();
268 	}
269 
familyCount(void) const270 	deUint32 familyCount (void) const
271 	{
272 		return (deUint32) m_queues.size();
273 	}
274 
totalQueueCount(void)275 	deUint32 totalQueueCount (void)
276 	{
277 		deUint32	count	= 0;
278 
279 		for (deUint32 familyIdx = 0; familyIdx < familyCount(); familyIdx++)
280 		{
281 			count	+= queueFamilyCount(familyIdx);
282 		}
283 
284 		return count;
285 	}
286 
getDevice(void) const287 	VkDevice getDevice (void) const
288 	{
289 		return *m_logicalDevice;
290 	}
291 
getDeviceInterface(void) const292 	const DeviceInterface& getDeviceInterface (void) const
293 	{
294 		return *m_deviceDriver;
295 	}
296 
getAllocator(void)297 	Allocator& getAllocator (void)
298 	{
299 		return *m_allocator;
300 	}
301 
getInstance(const Context & context,SynchronizationType type,bool timelineSemaphore)302 	static SharedPtr<MultiQueues> getInstance(const Context& context, SynchronizationType type, bool timelineSemaphore)
303 	{
304 		if (!m_multiQueues)
305 			m_multiQueues = SharedPtr<MultiQueues>(new MultiQueues(context, type, timelineSemaphore));
306 
307 		return m_multiQueues;
308 	}
destroy()309 	static void destroy()
310 	{
311 		m_multiQueues.clear();
312 	}
313 
314 private:
315 	Move<VkDevice>					m_logicalDevice;
316 	MovePtr<DeviceDriver>			m_deviceDriver;
317 	MovePtr<Allocator>				m_allocator;
318 	std::map<deUint32, QueueData>	m_queues;
319 	deUint32						m_queueCount;
320 
321 	static SharedPtr<MultiQueues>	m_multiQueues;
322 };
323 SharedPtr<MultiQueues>				MultiQueues::m_multiQueues;
324 
createBarrierMultiQueue(SynchronizationWrapperPtr synchronizationWrapper,const VkCommandBuffer & cmdBuffer,const SyncInfo & writeSync,const SyncInfo & readSync,const Resource & resource,const deUint32 writeFamily,const deUint32 readFamily,const VkSharingMode sharingMode,const bool secondQueue=false)325 void createBarrierMultiQueue (SynchronizationWrapperPtr synchronizationWrapper,
326 							  const VkCommandBuffer&	cmdBuffer,
327 							  const SyncInfo&			writeSync,
328 							  const SyncInfo&			readSync,
329 							  const Resource&			resource,
330 							  const deUint32			writeFamily,
331 							  const deUint32			readFamily,
332 							  const VkSharingMode		sharingMode,
333 							  const bool				secondQueue = false)
334 {
335 	if (resource.getType() == RESOURCE_TYPE_IMAGE)
336 	{
337 		VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
338 			secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
339 			secondQueue ? 0u : writeSync.accessMask,
340 			!secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask,
341 			!secondQueue ? 0u : readSync.accessMask,
342 			writeSync.imageLayout,
343 			readSync.imageLayout,
344 			resource.getImage().handle,
345 			resource.getImage().subresourceRange
346 		);
347 
348 		if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
349 		{
350 			imageMemoryBarrier2.srcQueueFamilyIndex = writeFamily;
351 			imageMemoryBarrier2.dstQueueFamilyIndex = readFamily;
352 
353 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
354 			synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
355 		}
356 		else if (!secondQueue)
357 		{
358 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
359 			synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
360 		}
361 	}
362 	else
363 	{
364 		VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
365 			secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
366 			secondQueue ? 0u : writeSync.accessMask,
367 			!secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask,
368 			!secondQueue ? 0u : readSync.accessMask,
369 			resource.getBuffer().handle,
370 			resource.getBuffer().offset,
371 			resource.getBuffer().size
372 		);
373 
374 		if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
375 		{
376 			bufferMemoryBarrier2.srcQueueFamilyIndex = writeFamily;
377 			bufferMemoryBarrier2.dstQueueFamilyIndex = readFamily;
378 		}
379 
380 		VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
381 		synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
382 	}
383 }
384 
385 class BaseTestInstance : public TestInstance
386 {
387 public:
BaseTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,bool timelineSemaphore)388 	BaseTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, bool timelineSemaphore)
389 		: TestInstance		(context)
390 		, m_type			(type)
391 		, m_queues			(MultiQueues::getInstance(context, type, timelineSemaphore))
392 		, m_opContext		(new OperationContext(context, type, m_queues->getDeviceInterface(), m_queues->getDevice(), m_queues->getAllocator(), pipelineCacheData))
393 		, m_resourceDesc	(resourceDesc)
394 		, m_writeOp			(writeOp)
395 		, m_readOp			(readOp)
396 	{
397 	}
398 
399 protected:
400 	const SynchronizationType			m_type;
401 	const SharedPtr<MultiQueues>		m_queues;
402 	const UniquePtr<OperationContext>	m_opContext;
403 	const ResourceDescription			m_resourceDesc;
404 	const OperationSupport&				m_writeOp;
405 	const OperationSupport&				m_readOp;
406 };
407 
408 class BinarySemaphoreTestInstance : public BaseTestInstance
409 {
410 public:
BinarySemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)411 	BinarySemaphoreTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
412 		: BaseTestInstance	(context, type, resourceDesc, writeOp, readOp, pipelineCacheData, false)
413 		, m_sharingMode		(sharingMode)
414 	{
415 	}
416 
iterate(void)417 	tcu::TestStatus	iterate (void)
418 	{
419 		const DeviceInterface&			vk			= m_opContext->getDeviceInterface();
420 		const VkDevice					device		= m_opContext->getDevice();
421 		const std::vector<QueuePair>	queuePairs	= m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
422 
423 		for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
424 		{
425 			const UniquePtr<Resource>		resource		(new Resource(*m_opContext, m_resourceDesc, m_writeOp.getOutResourceUsageFlags() | m_readOp.getInResourceUsageFlags()));
426 			const UniquePtr<Operation>		writeOp			(m_writeOp.build(*m_opContext, *resource));
427 			const UniquePtr<Operation>		readOp			(m_readOp.build (*m_opContext, *resource));
428 
429 			const Move<VkCommandPool>			cmdPool[]		=
430 			{
431 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite),
432 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead)
433 			};
434 			const Move<VkCommandBuffer>			ptrCmdBuffer[]	=
435 			{
436 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
437 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
438 			};
439 			const VkCommandBufferSubmitInfoKHR	cmdBufferInfos[]	=
440 			{
441 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_WRITE]),
442 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_READ]),
443 			};
444 			const Unique<VkSemaphore>			semaphore		(createSemaphore(vk, device));
445 			VkSemaphoreSubmitInfoKHR			waitSemaphoreSubmitInfo =
446 				makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
447 			VkSemaphoreSubmitInfoKHR			signalSemaphoreSubmitInfo =
448 				makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
449 			SynchronizationWrapperPtr			synchronizationWrapper[]
450 			{
451 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
452 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
453 			};
454 
455 			synchronizationWrapper[QUEUETYPE_WRITE]->addSubmitInfo(
456 				0u,
457 				DE_NULL,
458 				1u,
459 				&cmdBufferInfos[QUEUETYPE_WRITE],
460 				1u,
461 				&signalSemaphoreSubmitInfo
462 			);
463 			synchronizationWrapper[QUEUETYPE_READ]->addSubmitInfo(
464 				1u,
465 				&waitSemaphoreSubmitInfo,
466 				1u,
467 				&cmdBufferInfos[QUEUETYPE_READ],
468 				0u,
469 				DE_NULL
470 			);
471 
472 			const SyncInfo					writeSync		= writeOp->getOutSyncInfo();
473 			const SyncInfo					readSync		= readOp->getInSyncInfo();
474 			VkCommandBuffer					writeCmdBuffer	= cmdBufferInfos[QUEUETYPE_WRITE].commandBuffer;
475 			VkCommandBuffer					readCmdBuffer	= cmdBufferInfos[QUEUETYPE_READ].commandBuffer;
476 
477 			beginCommandBuffer		(vk, writeCmdBuffer);
478 			writeOp->recordCommands	(writeCmdBuffer);
479 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_WRITE], writeCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
480 			endCommandBuffer		(vk, writeCmdBuffer);
481 
482 			beginCommandBuffer		(vk, readCmdBuffer);
483 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_READ], readCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
484 			readOp->recordCommands	(readCmdBuffer);
485 			endCommandBuffer		(vk, readCmdBuffer);
486 
487 			VK_CHECK(synchronizationWrapper[QUEUETYPE_WRITE]->queueSubmit(queuePairs[pairNdx].queueWrite, DE_NULL));
488 			VK_CHECK(synchronizationWrapper[QUEUETYPE_READ]->queueSubmit(queuePairs[pairNdx].queueRead, DE_NULL));
489 			VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueWrite));
490 			VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueRead));
491 
492 			{
493 				const Data	expected	= writeOp->getData();
494 				const Data	actual		= readOp->getData();
495 
496 				if (isIndirectBuffer(m_resourceDesc.type))
497 				{
498 					const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
499 					const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
500 
501 					if (actualValue < expectedValue)
502 						return tcu::TestStatus::fail("Counter value is smaller than expected");
503 				}
504 				else
505 				{
506 					if (0 != deMemCmp(expected.data, actual.data, expected.size))
507 						return tcu::TestStatus::fail("Memory contents don't match");
508 				}
509 			}
510 		}
511 		return tcu::TestStatus::pass("OK");
512 	}
513 
514 private:
515 	const VkSharingMode	m_sharingMode;
516 };
517 
518 template<typename T>
makeVkSharedPtr(Move<T> move)519 inline SharedPtr<Move<T> > makeVkSharedPtr (Move<T> move)
520 {
521 	return SharedPtr<Move<T> >(new Move<T>(move));
522 }
523 
524 class TimelineSemaphoreTestInstance : public BaseTestInstance
525 {
526 public:
TimelineSemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const SharedPtr<OperationSupport> & writeOp,const SharedPtr<OperationSupport> & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)527 	TimelineSemaphoreTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const SharedPtr<OperationSupport>& writeOp, const SharedPtr<OperationSupport>& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
528 		: BaseTestInstance	(context, type, resourceDesc, *writeOp, *readOp, pipelineCacheData, true)
529 		, m_sharingMode		(sharingMode)
530 	{
531 		deUint32				maxQueues		= 0;
532 		std::vector<deUint32>	queueFamilies;
533 
534 		if (m_queues->totalQueueCount() < 2)
535 			TCU_THROW(NotSupportedError, "Not enough queues");
536 
537 		for (deUint32 familyNdx = 0; familyNdx < m_queues->familyCount(); familyNdx++)
538 		{
539 			maxQueues = std::max(m_queues->queueFamilyCount(familyNdx), maxQueues);
540 			queueFamilies.push_back(familyNdx);
541 		}
542 
543 		// Create a chain of operations copying data from one resource
544 		// to another across at least every single queue of the system
545 		// at least once. Each of the operation will be executing with
546 		// a dependency on the previous using timeline points.
547 		m_opSupports.push_back(writeOp);
548 		m_opQueues.push_back(m_queues->getDefaultQueue(writeOp->getQueueFlags(*m_opContext)));
549 
550 		for (deUint32 queueIdx = 0; queueIdx < maxQueues; queueIdx++)
551 		{
552 			for (deUint32 familyIdx = 0; familyIdx < m_queues->familyCount(); familyIdx++)
553 			{
554 				for (deUint32 copyOpIdx = 0; copyOpIdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpIdx++)
555 				{
556 					if (isResourceSupported(s_copyOps[copyOpIdx], resourceDesc))
557 					{
558 						SharedPtr<OperationSupport>	opSupport	(makeOperationSupport(s_copyOps[copyOpIdx], m_resourceDesc).release());
559 
560 						if (!checkQueueFlags(opSupport->getQueueFlags(*m_opContext), m_queues->getQueueFamilyFlags(familyIdx)))
561 							continue;
562 
563 						m_opSupports.push_back(opSupport);
564 						m_opQueues.push_back(m_queues->getQueue(familyIdx, queueIdx % m_queues->queueFamilyCount(familyIdx)));
565 						break;
566 					}
567 				}
568 			}
569 		}
570 
571 		m_opSupports.push_back(readOp);
572 		m_opQueues.push_back(m_queues->getDefaultQueue(readOp->getQueueFlags(*m_opContext)));
573 
574 		// Now create the resources with the usage associated to the
575 		// operation performed on the resource.
576 		for (deUint32 opIdx = 0; opIdx < (m_opSupports.size() - 1); opIdx++)
577 		{
578 			deUint32 usage = m_opSupports[opIdx]->getOutResourceUsageFlags() | m_opSupports[opIdx + 1]->getInResourceUsageFlags();
579 
580 			m_resources.push_back(SharedPtr<Resource>(new Resource(*m_opContext, m_resourceDesc, usage, m_sharingMode, queueFamilies)));
581 		}
582 
583 		// Finally create the operations using the resources.
584 		m_ops.push_back(SharedPtr<Operation>(m_opSupports[0]->build(*m_opContext, *m_resources[0]).release()));
585 		for (deUint32 opIdx = 1; opIdx < (m_opSupports.size() - 1); opIdx++)
586 			m_ops.push_back(SharedPtr<Operation>(m_opSupports[opIdx]->build(*m_opContext, *m_resources[opIdx - 1], *m_resources[opIdx]).release()));
587 		m_ops.push_back(SharedPtr<Operation>(m_opSupports[m_opSupports.size() - 1]->build(*m_opContext, *m_resources.back()).release()));
588 	}
589 
iterate(void)590 	tcu::TestStatus	iterate (void)
591 	{
592 		const DeviceInterface&							vk				= m_opContext->getDeviceInterface();
593 		const VkDevice									device			= m_opContext->getDevice();
594 		de::Random										rng				(1234);
595 		const Unique<VkSemaphore>						semaphore		(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE_KHR));
596 		std::vector<SharedPtr<Move<VkCommandPool> > >	cmdPools;
597 		std::vector<SharedPtr<Move<VkCommandBuffer> > >	ptrCmdBuffers;
598 		std::vector<VkCommandBufferSubmitInfoKHR>		cmdBufferInfos;
599 		std::vector<deUint64>							timelineValues;
600 
601 		cmdPools.resize(m_queues->familyCount());
602 		for (deUint32 familyIdx = 0; familyIdx < m_queues->familyCount(); familyIdx++)
603 			cmdPools[familyIdx] = makeVkSharedPtr(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, familyIdx));
604 
605 		ptrCmdBuffers.resize(m_ops.size());
606 		cmdBufferInfos.resize(m_ops.size());
607 		for (deUint32 opIdx = 0; opIdx < m_ops.size(); opIdx++)
608 		{
609 			deUint64	increment	= 1 + rng.getUint8();
610 
611 			ptrCmdBuffers[opIdx] = makeVkSharedPtr(makeCommandBuffer(vk, device, **cmdPools[m_opQueues[opIdx].family]));
612 			cmdBufferInfos[opIdx] = makeCommonCommandBufferSubmitInfo(**ptrCmdBuffers[opIdx]);
613 
614 			timelineValues.push_back(timelineValues.empty() ? increment : (timelineValues.back() + increment));
615 		}
616 
617 		for (deUint32 opIdx = 0; opIdx < m_ops.size(); opIdx++)
618 		{
619 			VkCommandBuffer				cmdBuffer = cmdBufferInfos[opIdx].commandBuffer;
620 			VkSemaphoreSubmitInfoKHR	waitSemaphoreSubmitInfo =
621 				makeCommonSemaphoreSubmitInfo(*semaphore, (opIdx == 0 ? 0u : timelineValues[opIdx - 1]), VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
622 			VkSemaphoreSubmitInfoKHR	signalSemaphoreSubmitInfo =
623 				makeCommonSemaphoreSubmitInfo(*semaphore, timelineValues[opIdx], VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
624 			SynchronizationWrapperPtr	synchronizationWrapper = getSynchronizationWrapper(m_type, vk, DE_TRUE);
625 
626 			synchronizationWrapper->addSubmitInfo(
627 				opIdx == 0 ? 0u : 1u,
628 				&waitSemaphoreSubmitInfo,
629 				1u,
630 				&cmdBufferInfos[opIdx],
631 				1u,
632 				&signalSemaphoreSubmitInfo,
633 				opIdx == 0 ? DE_FALSE : DE_TRUE,
634 				DE_TRUE
635 			);
636 
637 			beginCommandBuffer(vk, cmdBuffer);
638 
639 			if (opIdx > 0)
640 			{
641 				const SyncInfo	writeSync	= m_ops[opIdx - 1]->getOutSyncInfo();
642 				const SyncInfo	readSync	= m_ops[opIdx]->getInSyncInfo();
643 				const Resource&	resource	= *m_resources[opIdx - 1].get();
644 
645 				createBarrierMultiQueue(synchronizationWrapper, cmdBuffer, writeSync, readSync, resource, m_opQueues[opIdx - 1].family, m_opQueues[opIdx].family, m_sharingMode, true);
646 			}
647 
648 			m_ops[opIdx]->recordCommands(cmdBuffer);
649 
650 			if (opIdx < (m_ops.size() - 1))
651 			{
652 				const SyncInfo	writeSync	= m_ops[opIdx]->getOutSyncInfo();
653 				const SyncInfo	readSync	= m_ops[opIdx + 1]->getInSyncInfo();
654 				const Resource&	resource	= *m_resources[opIdx].get();
655 
656 				createBarrierMultiQueue(synchronizationWrapper, cmdBuffer, writeSync, readSync, resource, m_opQueues[opIdx].family, m_opQueues[opIdx + 1].family, m_sharingMode);
657 			}
658 
659 			endCommandBuffer(vk, cmdBuffer);
660 
661 			VK_CHECK(synchronizationWrapper->queueSubmit(m_opQueues[opIdx].queue, DE_NULL));
662 		}
663 
664 
665 		VK_CHECK(vk.queueWaitIdle(m_opQueues.back().queue));
666 
667 		{
668 			const Data	expected	= m_ops.front()->getData();
669 			const Data	actual		= m_ops.back()->getData();
670 
671 			if (isIndirectBuffer(m_resourceDesc.type))
672 			{
673 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
674 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
675 
676 				if (actualValue < expectedValue)
677 					return tcu::TestStatus::fail("Counter value is smaller than expected");
678 			}
679 			else
680 			{
681 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
682 					return tcu::TestStatus::fail("Memory contents don't match");
683 			}
684 		}
685 
686 		// Make the validation layers happy.
687 		for (deUint32 opIdx = 0; opIdx < m_opQueues.size(); opIdx++)
688 			VK_CHECK(vk.queueWaitIdle(m_opQueues[opIdx].queue));
689 
690 		return tcu::TestStatus::pass("OK");
691 	}
692 
693 private:
694 	const VkSharingMode							m_sharingMode;
695 	std::vector<SharedPtr<OperationSupport> >	m_opSupports;
696 	std::vector<SharedPtr<Operation> >			m_ops;
697 	std::vector<SharedPtr<Resource> >			m_resources;
698 	std::vector<Queue>							m_opQueues;
699 };
700 
701 class FenceTestInstance : public BaseTestInstance
702 {
703 public:
FenceTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)704 	FenceTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
705 		: BaseTestInstance	(context, type, resourceDesc, writeOp, readOp, pipelineCacheData, false)
706 		, m_sharingMode		(sharingMode)
707 	{
708 	}
709 
iterate(void)710 	tcu::TestStatus	iterate (void)
711 	{
712 		const DeviceInterface&			vk			= m_opContext->getDeviceInterface();
713 		const VkDevice					device		= m_opContext->getDevice();
714 		const std::vector<QueuePair>	queuePairs	= m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
715 
716 		for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
717 		{
718 			const UniquePtr<Resource>		resource		(new Resource(*m_opContext, m_resourceDesc, m_writeOp.getOutResourceUsageFlags() | m_readOp.getInResourceUsageFlags()));
719 			const UniquePtr<Operation>		writeOp			(m_writeOp.build(*m_opContext, *resource));
720 			const UniquePtr<Operation>		readOp			(m_readOp.build(*m_opContext, *resource));
721 			const Move<VkCommandPool>		cmdPool[]
722 			{
723 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite),
724 				createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead)
725 			};
726 			const Move<VkCommandBuffer>		ptrCmdBuffer[]
727 			{
728 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
729 				makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
730 			};
731 			const VkCommandBufferSubmitInfoKHR	cmdBufferInfos[]
732 			{
733 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_WRITE]),
734 				makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_READ])
735 			};
736 			SynchronizationWrapperPtr		synchronizationWrapper[]
737 			{
738 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
739 				getSynchronizationWrapper(m_type, vk, DE_FALSE),
740 			};
741 			const SyncInfo					writeSync		= writeOp->getOutSyncInfo();
742 			const SyncInfo					readSync		= readOp->getInSyncInfo();
743 			VkCommandBuffer					writeCmdBuffer	= cmdBufferInfos[QUEUETYPE_WRITE].commandBuffer;
744 			VkCommandBuffer					readCmdBuffer	= cmdBufferInfos[QUEUETYPE_READ].commandBuffer;
745 
746 			beginCommandBuffer		(vk, writeCmdBuffer);
747 			writeOp->recordCommands	(writeCmdBuffer);
748 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_WRITE], writeCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
749 			endCommandBuffer		(vk, writeCmdBuffer);
750 
751 			submitCommandsAndWait	(synchronizationWrapper[QUEUETYPE_WRITE], vk, device, queuePairs[pairNdx].queueWrite, writeCmdBuffer);
752 
753 			beginCommandBuffer		(vk, readCmdBuffer);
754 			createBarrierMultiQueue	(synchronizationWrapper[QUEUETYPE_READ], readCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
755 			readOp->recordCommands	(readCmdBuffer);
756 			endCommandBuffer		(vk, readCmdBuffer);
757 
758 			submitCommandsAndWait(synchronizationWrapper[QUEUETYPE_READ], vk, device, queuePairs[pairNdx].queueRead, readCmdBuffer);
759 
760 			{
761 				const Data	expected = writeOp->getData();
762 				const Data	actual	 = readOp->getData();
763 
764 				if (isIndirectBuffer(m_resourceDesc.type))
765 				{
766 					const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
767 					const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
768 
769 					if (actualValue < expectedValue)
770 						return tcu::TestStatus::fail("Counter value is smaller than expected");
771 				}
772 				else
773 				{
774 					if (0 != deMemCmp(expected.data, actual.data, expected.size))
775 						return tcu::TestStatus::fail("Memory contents don't match");
776 				}
777 			}
778 		}
779 		return tcu::TestStatus::pass("OK");
780 	}
781 
782 private:
783 	const VkSharingMode	m_sharingMode;
784 };
785 
786 class BaseTestCase : public TestCase
787 {
788 public:
BaseTestCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,SynchronizationType type,const SyncPrimitive syncPrimitive,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,const VkSharingMode sharingMode,PipelineCacheData & pipelineCacheData)789 	BaseTestCase (tcu::TestContext&			testCtx,
790 				  const std::string&		name,
791 				  const std::string&		description,
792 				  SynchronizationType		type,
793 				  const SyncPrimitive		syncPrimitive,
794 				  const ResourceDescription	resourceDesc,
795 				  const OperationName		writeOp,
796 				  const OperationName		readOp,
797 				  const VkSharingMode		sharingMode,
798 				  PipelineCacheData&		pipelineCacheData)
799 		: TestCase				(testCtx, name, description)
800 		, m_type				(type)
801 		, m_resourceDesc		(resourceDesc)
802 		, m_writeOp				(makeOperationSupport(writeOp, resourceDesc).release())
803 		, m_readOp				(makeOperationSupport(readOp, resourceDesc).release())
804 		, m_syncPrimitive		(syncPrimitive)
805 		, m_sharingMode			(sharingMode)
806 		, m_pipelineCacheData	(pipelineCacheData)
807 	{
808 	}
809 
initPrograms(SourceCollections & programCollection) const810 	void initPrograms (SourceCollections& programCollection) const
811 	{
812 		m_writeOp->initPrograms(programCollection);
813 		m_readOp->initPrograms(programCollection);
814 
815 		if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
816 		{
817 			for (deUint32 copyOpNdx = 0; copyOpNdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpNdx++)
818 			{
819 				if (isResourceSupported(s_copyOps[copyOpNdx], m_resourceDesc))
820 					makeOperationSupport(s_copyOps[copyOpNdx], m_resourceDesc)->initPrograms(programCollection);
821 			}
822 		}
823 	}
824 
checkSupport(Context & context) const825 	void checkSupport(Context& context) const
826 	{
827 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
828 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
829 		if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
830 			context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
831 
832 		const InstanceInterface&					instance				= context.getInstanceInterface();
833 		const VkPhysicalDevice						physicalDevice			= context.getPhysicalDevice();
834 		const std::vector<VkQueueFamilyProperties>	queueFamilyProperties	= getPhysicalDeviceQueueFamilyProperties(instance, physicalDevice);
835 		if (m_sharingMode == VK_SHARING_MODE_CONCURRENT && queueFamilyProperties.size() < 2)
836 			TCU_THROW(NotSupportedError, "Concurrent requires more than 1 queue family");
837 
838 		if (!context.getTimelineSemaphoreFeatures().timelineSemaphore)
839 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
840 
841 		if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
842 		{
843 			VkImageFormatProperties	imageFormatProperties;
844 			const deUint32			usage					= m_writeOp->getOutResourceUsageFlags() | m_readOp->getInResourceUsageFlags();
845 			const VkResult			formatResult			= instance.getPhysicalDeviceImageFormatProperties(physicalDevice, m_resourceDesc.imageFormat, m_resourceDesc.imageType, VK_IMAGE_TILING_OPTIMAL, usage, (VkImageCreateFlags)0, &imageFormatProperties);
846 
847 			if (formatResult != VK_SUCCESS)
848 				TCU_THROW(NotSupportedError, "Image format is not supported");
849 
850 			if ((imageFormatProperties.sampleCounts & m_resourceDesc.imageSamples) != m_resourceDesc.imageSamples)
851 				TCU_THROW(NotSupportedError, "Requested sample count is not supported");
852 		}
853 	}
854 
createInstance(Context & context) const855 	TestInstance* createInstance (Context& context) const
856 	{
857 		switch (m_syncPrimitive)
858 		{
859 			case SYNC_PRIMITIVE_FENCE:
860 				return new FenceTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
861 			case SYNC_PRIMITIVE_BINARY_SEMAPHORE:
862 				return new BinarySemaphoreTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
863 			case SYNC_PRIMITIVE_TIMELINE_SEMAPHORE:
864 				return new TimelineSemaphoreTestInstance(context, m_type, m_resourceDesc, m_writeOp, m_readOp, m_pipelineCacheData, m_sharingMode);
865 			default:
866 				DE_ASSERT(0);
867 				return DE_NULL;
868 		}
869 	}
870 
871 private:
872 	const SynchronizationType				m_type;
873 	const ResourceDescription				m_resourceDesc;
874 	const SharedPtr<OperationSupport>		m_writeOp;
875 	const SharedPtr<OperationSupport>		m_readOp;
876 	const SyncPrimitive						m_syncPrimitive;
877 	const VkSharingMode						m_sharingMode;
878 	PipelineCacheData&						m_pipelineCacheData;
879 };
880 
881 struct TestData
882 {
883 	SynchronizationType		type;
884 	PipelineCacheData*		pipelineCacheData;
885 };
886 
createTests(tcu::TestCaseGroup * group,TestData data)887 void createTests (tcu::TestCaseGroup* group, TestData data)
888 {
889 	tcu::TestContext& testCtx = group->getTestContext();
890 
891 	static const struct
892 	{
893 		const char*		name;
894 		SyncPrimitive	syncPrimitive;
895 		int				numOptions;
896 	} groups[] =
897 	{
898 		{ "fence",				SYNC_PRIMITIVE_FENCE,				1 },
899 		{ "binary_semaphore",	SYNC_PRIMITIVE_BINARY_SEMAPHORE,	1 },
900 		{ "timeline_semaphore",	SYNC_PRIMITIVE_TIMELINE_SEMAPHORE,	1 }
901 	};
902 
903 	for (int groupNdx = 0; groupNdx < DE_LENGTH_OF_ARRAY(groups); ++groupNdx)
904 	{
905 		MovePtr<tcu::TestCaseGroup> synchGroup (new tcu::TestCaseGroup(testCtx, groups[groupNdx].name, ""));
906 
907 		for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(s_writeOps); ++writeOpNdx)
908 		for (int readOpNdx  = 0; readOpNdx  < DE_LENGTH_OF_ARRAY(s_readOps);  ++readOpNdx)
909 		{
910 			const OperationName	writeOp		= s_writeOps[writeOpNdx];
911 			const OperationName	readOp		= s_readOps[readOpNdx];
912 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
913 			bool				empty		= true;
914 
915 			MovePtr<tcu::TestCaseGroup> opGroup		(new tcu::TestCaseGroup(testCtx, opGroupName.c_str(), ""));
916 
917 			for (int optionNdx = 0; optionNdx <= groups[groupNdx].numOptions; ++optionNdx)
918 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
919 			{
920 				const ResourceDescription&	resource	= s_resources[resourceNdx];
921 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
922 				{
923 					std::string					name		= getResourceName(resource);
924 					VkSharingMode				sharingMode = VK_SHARING_MODE_EXCLUSIVE;
925 
926 					// queue family sharing mode used for resource
927 					if (optionNdx)
928 					{
929 						name += "_concurrent";
930 						sharingMode = VK_SHARING_MODE_CONCURRENT;
931 					}
932 					else
933 						name += "_exclusive";
934 
935 					opGroup->addChild(new BaseTestCase(testCtx, name, "", data.type, groups[groupNdx].syncPrimitive, resource, writeOp, readOp, sharingMode, *data.pipelineCacheData));
936 					empty = false;
937 				}
938 			}
939 			if (!empty)
940 				synchGroup->addChild(opGroup.release());
941 		}
942 		group->addChild(synchGroup.release());
943 	}
944 }
945 
cleanupGroup(tcu::TestCaseGroup * group,TestData data)946 void cleanupGroup (tcu::TestCaseGroup* group, TestData data)
947 {
948 	DE_UNREF(group);
949 	DE_UNREF(data.pipelineCacheData);
950 	// Destroy singleton object
951 	MultiQueues::destroy();
952 }
953 
954 } // anonymous
955 
createSynchronizedOperationMultiQueueTests(tcu::TestContext & testCtx,SynchronizationType type,PipelineCacheData & pipelineCacheData)956 tcu::TestCaseGroup* createSynchronizedOperationMultiQueueTests (tcu::TestContext& testCtx, SynchronizationType type, PipelineCacheData& pipelineCacheData)
957 {
958 	TestData data
959 	{
960 		type,
961 		&pipelineCacheData
962 	};
963 
964 	return createTestGroup(testCtx, "multi_queue", "Synchronization of a memory-modifying operation", createTests, data, cleanupGroup);
965 }
966 
967 } // synchronization
968 } // vkt
969