• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2016 The Khronos Group Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Synchronization primitive tests with single queue
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktSynchronizationOperationSingleQueueTests.hpp"
25 #include "vkDefs.hpp"
26 #include "vktTestCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vktTestGroupUtil.hpp"
29 #include "vkRef.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkMemUtil.hpp"
32 #include "vkBarrierUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkCmdUtil.hpp"
35 #include "vkTypeUtil.hpp"
36 #include "vkCmdUtil.hpp"
37 #include "deRandom.hpp"
38 #include "deUniquePtr.hpp"
39 #include "tcuTestLog.hpp"
40 #include "vktSynchronizationUtil.hpp"
41 #include "vktSynchronizationOperation.hpp"
42 #include "vktSynchronizationOperationTestData.hpp"
43 #include "vktSynchronizationOperationResources.hpp"
44 
45 namespace vkt
46 {
47 namespace synchronization
48 {
49 namespace
50 {
51 using namespace vk;
52 using tcu::TestLog;
53 
54 class BaseTestInstance : public TestInstance
55 {
56 public:
BaseTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)57 	BaseTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData)
58 		: TestInstance	(context)
59 		, m_type		(type)
60 		, m_opContext	(context, type, pipelineCacheData)
61 		, m_resource	(new Resource(m_opContext, resourceDesc, writeOp.getOutResourceUsageFlags() | readOp.getInResourceUsageFlags()))
62 		, m_writeOp		(writeOp.build(m_opContext, *m_resource))
63 		, m_readOp		(readOp.build(m_opContext, *m_resource))
64 	{
65 	}
66 
67 protected:
68 	SynchronizationType					m_type;
69 	OperationContext					m_opContext;
70 	const de::UniquePtr<Resource>		m_resource;
71 	const de::UniquePtr<Operation>		m_writeOp;
72 	const de::UniquePtr<Operation>		m_readOp;
73 };
74 
75 class EventTestInstance : public BaseTestInstance
76 {
77 public:
EventTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)78 	EventTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData)
79 		: BaseTestInstance		(context, type, resourceDesc, writeOp, readOp, pipelineCacheData)
80 	{
81 	}
82 
iterate(void)83 	tcu::TestStatus iterate (void)
84 	{
85 		const DeviceInterface&			vk						= m_context.getDeviceInterface();
86 		const VkDevice					device					= m_context.getDevice();
87 		const VkQueue					queue					= m_context.getUniversalQueue();
88 		const deUint32					queueFamilyIndex		= m_context.getUniversalQueueFamilyIndex();
89 		const Unique<VkCommandPool>		cmdPool					(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
90 		const Unique<VkCommandBuffer>	cmdBuffer				(makeCommandBuffer(vk, device, *cmdPool));
91 		const Unique<VkEvent>			event					(createEvent(vk, device));
92 		const SyncInfo					writeSync				= m_writeOp->getOutSyncInfo();
93 		const SyncInfo					readSync				= m_readOp->getInSyncInfo();
94 		SynchronizationWrapperPtr		synchronizationWrapper	= getSynchronizationWrapper(m_type, vk, DE_FALSE);
95 
96 		beginCommandBuffer(vk, *cmdBuffer);
97 
98 		m_writeOp->recordCommands(*cmdBuffer);
99 
100 		if (m_resource->getType() == RESOURCE_TYPE_IMAGE)
101 		{
102 			const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
103 				writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
104 				writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
105 				readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
106 				readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
107 				writeSync.imageLayout,							// VkImageLayout					oldLayout
108 				readSync.imageLayout,							// VkImageLayout					newLayout
109 				m_resource->getImage().handle,					// VkImage							image
110 				m_resource->getImage().subresourceRange			// VkImageSubresourceRange			subresourceRange
111 			);
112 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2, DE_TRUE);
113 			synchronizationWrapper->cmdSetEvent(*cmdBuffer, *event, &dependencyInfo);
114 			synchronizationWrapper->cmdWaitEvents(*cmdBuffer, 1u, &event.get(), &dependencyInfo);
115 		}
116 		else
117 		{
118 			const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
119 				writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
120 				writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
121 				readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
122 				readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
123 				m_resource->getBuffer().handle,					// VkBuffer							buffer
124 				m_resource->getBuffer().offset,					// VkDeviceSize						offset
125 				m_resource->getBuffer().size					// VkDeviceSize						size
126 			);
127 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2, DE_NULL, DE_TRUE);
128 			synchronizationWrapper->cmdSetEvent(*cmdBuffer, *event, &dependencyInfo);
129 			synchronizationWrapper->cmdWaitEvents(*cmdBuffer, 1u, &event.get(), &dependencyInfo);
130 		}
131 
132 		m_readOp->recordCommands(*cmdBuffer);
133 
134 		endCommandBuffer(vk, *cmdBuffer);
135 		submitCommandsAndWait(synchronizationWrapper, vk, device, queue, *cmdBuffer);
136 
137 		{
138 			const Data	expected = m_writeOp->getData();
139 			const Data	actual	 = m_readOp->getData();
140 
141 			if (isIndirectBuffer(m_resource->getType()))
142 			{
143 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
144 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
145 
146 				if (actualValue < expectedValue)
147 					return tcu::TestStatus::fail("Counter value is smaller than expected");
148 			}
149 			else
150 			{
151 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
152 					return tcu::TestStatus::fail("Memory contents don't match");
153 			}
154 		}
155 
156 		return tcu::TestStatus::pass("OK");
157 	}
158 };
159 
160 class BarrierTestInstance : public BaseTestInstance
161 {
162 public:
BarrierTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)163 	BarrierTestInstance	(Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData)
164 		: BaseTestInstance		(context, type, resourceDesc, writeOp, readOp, pipelineCacheData)
165 	{
166 	}
167 
iterate(void)168 	tcu::TestStatus iterate (void)
169 	{
170 		const DeviceInterface&			vk						= m_context.getDeviceInterface();
171 		const VkDevice					device					= m_context.getDevice();
172 		const VkQueue					queue					= m_context.getUniversalQueue();
173 		const deUint32					queueFamilyIndex		= m_context.getUniversalQueueFamilyIndex();
174 		const Unique<VkCommandPool>		cmdPool					(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
175 		const Move<VkCommandBuffer>		cmdBuffer				(makeCommandBuffer(vk, device, *cmdPool));
176 		const SyncInfo					writeSync				= m_writeOp->getOutSyncInfo();
177 		const SyncInfo					readSync				= m_readOp->getInSyncInfo();
178 		SynchronizationWrapperPtr		synchronizationWrapper	= getSynchronizationWrapper(m_type, vk, DE_FALSE);
179 
180 		beginCommandBuffer(vk, *cmdBuffer);
181 
182 		m_writeOp->recordCommands(*cmdBuffer);
183 
184 		if (m_resource->getType() == RESOURCE_TYPE_IMAGE)
185 		{
186 			const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
187 				writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
188 				writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
189 				readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
190 				readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
191 				writeSync.imageLayout,							// VkImageLayout					oldLayout
192 				readSync.imageLayout,							// VkImageLayout					newLayout
193 				m_resource->getImage().handle,					// VkImage							image
194 				m_resource->getImage().subresourceRange			// VkImageSubresourceRange			subresourceRange
195 			);
196 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
197 			synchronizationWrapper->cmdPipelineBarrier(*cmdBuffer, &dependencyInfo);
198 		}
199 		else
200 		{
201 			const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
202 				writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
203 				writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
204 				readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
205 				readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
206 				m_resource->getBuffer().handle,					// VkBuffer							buffer
207 				m_resource->getBuffer().offset,					// VkDeviceSize						offset
208 				m_resource->getBuffer().size					// VkDeviceSize						size
209 			);
210 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
211 			synchronizationWrapper->cmdPipelineBarrier(*cmdBuffer, &dependencyInfo);
212 		}
213 
214 		m_readOp->recordCommands(*cmdBuffer);
215 
216 		endCommandBuffer(vk, *cmdBuffer);
217 
218 		submitCommandsAndWait(synchronizationWrapper, vk, device, queue, *cmdBuffer);
219 
220 		{
221 			const Data	expected = m_writeOp->getData();
222 			const Data	actual	 = m_readOp->getData();
223 
224 			if (isIndirectBuffer(m_resource->getType()))
225 			{
226 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
227 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
228 
229 				if (actualValue < expectedValue)
230 					return tcu::TestStatus::fail("Counter value is smaller than expected");
231 			}
232 			else
233 			{
234 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
235 					return tcu::TestStatus::fail("Memory contents don't match");
236 			}
237 		}
238 
239 		return tcu::TestStatus::pass("OK");
240 	}
241 };
242 
243 class BinarySemaphoreTestInstance : public BaseTestInstance
244 {
245 public:
BinarySemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)246 	BinarySemaphoreTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData)
247 		: BaseTestInstance	(context, type, resourceDesc, writeOp, readOp, pipelineCacheData)
248 	{
249 	}
250 
iterate(void)251 	tcu::TestStatus	iterate (void)
252 	{
253 		enum {WRITE=0, READ, COUNT};
254 		const DeviceInterface&			vk						= m_context.getDeviceInterface();
255 		const VkDevice					device					= m_context.getDevice();
256 		const VkQueue					queue					= m_context.getUniversalQueue();
257 		const deUint32					queueFamilyIndex		= m_context.getUniversalQueueFamilyIndex();
258 		const Unique<VkSemaphore>		semaphore				(createSemaphore (vk, device));
259 		const Unique<VkCommandPool>		cmdPool					(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
260 		const Move<VkCommandBuffer>		ptrCmdBuffer[COUNT]		= {makeCommandBuffer(vk, device, *cmdPool), makeCommandBuffer(vk, device, *cmdPool)};
261 		VkCommandBuffer					cmdBuffers[COUNT]		= {*ptrCmdBuffer[WRITE], *ptrCmdBuffer[READ]};
262 		SynchronizationWrapperPtr		synchronizationWrapper	= getSynchronizationWrapper(m_type, vk, DE_FALSE, 2u);
263 		const SyncInfo					writeSync				= m_writeOp->getOutSyncInfo();
264 		const SyncInfo					readSync				= m_readOp->getInSyncInfo();
265 		VkSemaphoreSubmitInfoKHR		signalSemaphoreSubmitInfo =
266 			makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
267 		VkSemaphoreSubmitInfoKHR		waitSemaphoreSubmitInfo =
268 			makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
269 		VkCommandBufferSubmitInfoKHR	commandBufferSubmitInfo[]
270 		{
271 			makeCommonCommandBufferSubmitInfo(cmdBuffers[WRITE]),
272 			makeCommonCommandBufferSubmitInfo(cmdBuffers[READ])
273 		};
274 
275 		synchronizationWrapper->addSubmitInfo(
276 			0u,
277 			DE_NULL,
278 			1u,
279 			&commandBufferSubmitInfo[WRITE],
280 			1u,
281 			&signalSemaphoreSubmitInfo
282 		);
283 		synchronizationWrapper->addSubmitInfo(
284 			1u,
285 			&waitSemaphoreSubmitInfo,
286 			1u,
287 			&commandBufferSubmitInfo[READ],
288 			0u,
289 			DE_NULL
290 		);
291 
292 		beginCommandBuffer(vk, cmdBuffers[WRITE]);
293 
294 		m_writeOp->recordCommands(cmdBuffers[WRITE]);
295 
296 		if (m_resource->getType() == RESOURCE_TYPE_IMAGE)
297 		{
298 			const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
299 				writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
300 				writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
301 				readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
302 				readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
303 				writeSync.imageLayout,							// VkImageLayout					oldLayout
304 				readSync.imageLayout,							// VkImageLayout					newLayout
305 				m_resource->getImage().handle,					// VkImage							image
306 				m_resource->getImage().subresourceRange			// VkImageSubresourceRange			subresourceRange
307 			);
308 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
309 			synchronizationWrapper->cmdPipelineBarrier(cmdBuffers[WRITE], &dependencyInfo);
310 		}
311 		else
312 		{
313 			const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
314 				writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
315 				writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
316 				readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
317 				readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
318 				m_resource->getBuffer().handle,					// VkBuffer							buffer
319 				0,												// VkDeviceSize						offset
320 				VK_WHOLE_SIZE									// VkDeviceSize						size
321 			);
322 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
323 			synchronizationWrapper->cmdPipelineBarrier(cmdBuffers[WRITE], &dependencyInfo);
324 		}
325 
326 		endCommandBuffer(vk, cmdBuffers[WRITE]);
327 
328 		beginCommandBuffer(vk, cmdBuffers[READ]);
329 
330 		m_readOp->recordCommands(cmdBuffers[READ]);
331 
332 		endCommandBuffer(vk, cmdBuffers[READ]);
333 
334 		VK_CHECK(synchronizationWrapper->queueSubmit(queue, DE_NULL));
335 		VK_CHECK(vk.queueWaitIdle(queue));
336 
337 		{
338 			const Data	expected = m_writeOp->getData();
339 			const Data	actual	 = m_readOp->getData();
340 
341 			if (isIndirectBuffer(m_resource->getType()))
342 			{
343 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
344 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
345 
346 				if (actualValue < expectedValue)
347 					return tcu::TestStatus::fail("Counter value is smaller than expected");
348 			}
349 			else
350 			{
351 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
352 					return tcu::TestStatus::fail("Memory contents don't match");
353 			}
354 		}
355 
356 		return tcu::TestStatus::pass("OK");
357 	}
358 };
359 
360 template<typename T>
makeVkSharedPtr(Move<T> move)361 inline de::SharedPtr<Move<T> > makeVkSharedPtr (Move<T> move)
362 {
363 	return de::SharedPtr<Move<T> >(new Move<T>(move));
364 }
365 
366 class TimelineSemaphoreTestInstance : public TestInstance
367 {
368 public:
TimelineSemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const de::SharedPtr<OperationSupport> & writeOp,const de::SharedPtr<OperationSupport> & readOp,PipelineCacheData & pipelineCacheData)369 	TimelineSemaphoreTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const de::SharedPtr<OperationSupport>& writeOp, const de::SharedPtr<OperationSupport>& readOp, PipelineCacheData& pipelineCacheData)
370 		: TestInstance	(context)
371 		, m_type		(type)
372 		, m_opContext	(context, type, pipelineCacheData)
373 	{
374 
375 		// Create a chain operation copying data from one resource to
376 		// another, each of the operation will be executing with a
377 		// dependency on the previous using timeline points.
378 		m_opSupports.push_back(writeOp);
379 		for (deUint32 copyOpNdx = 0; copyOpNdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpNdx++)
380 		{
381 			if (isResourceSupported(s_copyOps[copyOpNdx], resourceDesc))
382 				m_opSupports.push_back(de::SharedPtr<OperationSupport>(makeOperationSupport(s_copyOps[copyOpNdx], resourceDesc).release()));
383 		}
384 		m_opSupports.push_back(readOp);
385 
386 		for (deUint32 opNdx = 0; opNdx < (m_opSupports.size() - 1); opNdx++)
387 		{
388 			deUint32 usage = m_opSupports[opNdx]->getOutResourceUsageFlags() | m_opSupports[opNdx + 1]->getInResourceUsageFlags();
389 
390 			m_resources.push_back(de::SharedPtr<Resource>(new Resource(m_opContext, resourceDesc, usage)));
391 		}
392 
393 		m_ops.push_back(de::SharedPtr<Operation>(m_opSupports[0]->build(m_opContext, *m_resources[0]).release()));
394 		for (deUint32 opNdx = 1; opNdx < (m_opSupports.size() - 1); opNdx++)
395 			m_ops.push_back(de::SharedPtr<Operation>(m_opSupports[opNdx]->build(m_opContext, *m_resources[opNdx - 1], *m_resources[opNdx]).release()));
396 		m_ops.push_back(de::SharedPtr<Operation>(m_opSupports[m_opSupports.size() - 1]->build(m_opContext, *m_resources.back()).release()));
397 	}
398 
iterate(void)399 	tcu::TestStatus	iterate (void)
400 	{
401 		const DeviceInterface&									vk						= m_context.getDeviceInterface();
402 		const VkDevice											device					= m_context.getDevice();
403 		const VkQueue											queue					= m_context.getUniversalQueue();
404 		const deUint32											queueFamilyIndex		= m_context.getUniversalQueueFamilyIndex();
405 		de::Random												rng						(1234);
406 		const Unique<VkSemaphore>								semaphore				(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
407 		const Unique<VkCommandPool>								cmdPool					(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
408 		std::vector<de::SharedPtr<Move<VkCommandBuffer> > >		ptrCmdBuffers;
409 		std::vector<VkCommandBufferSubmitInfoKHR>				cmdBuffersInfo				(m_ops.size(), makeCommonCommandBufferSubmitInfo(0u));
410 		std::vector<VkSemaphoreSubmitInfoKHR>					waitSemaphoreSubmitInfos	(m_ops.size(), makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR));
411 		std::vector<VkSemaphoreSubmitInfoKHR>					signalSemaphoreSubmitInfos	(m_ops.size(), makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR));
412 		SynchronizationWrapperPtr								synchronizationWrapper		= getSynchronizationWrapper(m_type, vk, DE_TRUE, static_cast<deUint32>(m_ops.size()));
413 		deUint64												increment					= 0u;
414 
415 		for (deUint32 opNdx = 0; opNdx < m_ops.size(); opNdx++)
416 		{
417 			ptrCmdBuffers.push_back(makeVkSharedPtr(makeCommandBuffer(vk, device, *cmdPool)));
418 			cmdBuffersInfo[opNdx].commandBuffer = **(ptrCmdBuffers.back());
419 		}
420 
421 		for (deUint32 opNdx = 0; opNdx < m_ops.size(); opNdx++)
422 		{
423 			increment								+= (1 + rng.getUint8());
424 			signalSemaphoreSubmitInfos[opNdx].value = increment;
425 			waitSemaphoreSubmitInfos[opNdx].value	= increment;
426 
427 			synchronizationWrapper->addSubmitInfo(
428 				opNdx == 0 ? 0u : 1u,
429 				opNdx == 0 ? DE_NULL : &waitSemaphoreSubmitInfos[opNdx-1],
430 				1u,
431 				&cmdBuffersInfo[opNdx],
432 				1u,
433 				&signalSemaphoreSubmitInfos[opNdx],
434 				opNdx == 0 ? DE_FALSE : DE_TRUE,
435 				DE_TRUE
436 			);
437 
438 			VkCommandBuffer cmdBuffer = cmdBuffersInfo[opNdx].commandBuffer;
439 			beginCommandBuffer(vk, cmdBuffer);
440 
441 			if (opNdx > 0)
442 			{
443 				const SyncInfo	lastSync	= m_ops[opNdx - 1]->getOutSyncInfo();
444 				const SyncInfo	currentSync	= m_ops[opNdx]->getInSyncInfo();
445 				const Resource&	resource	= *m_resources[opNdx - 1].get();
446 
447 				if (resource.getType() == RESOURCE_TYPE_IMAGE)
448 				{
449 					DE_ASSERT(lastSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
450 					DE_ASSERT(currentSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
451 
452 					const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
453 						lastSync.stageMask,									// VkPipelineStageFlags2KHR			srcStageMask
454 						lastSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
455 						currentSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
456 						currentSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
457 						lastSync.imageLayout,								// VkImageLayout					oldLayout
458 						currentSync.imageLayout,							// VkImageLayout					newLayout
459 						resource.getImage().handle,							// VkImage							image
460 						resource.getImage().subresourceRange				// VkImageSubresourceRange			subresourceRange
461 					);
462 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
463 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
464 				}
465 				else
466 				{
467 					const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
468 						lastSync.stageMask,									// VkPipelineStageFlags2KHR			srcStageMask
469 						lastSync.accessMask,								// VkAccessFlags2KHR				srcAccessMask
470 						currentSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
471 						currentSync.accessMask,								// VkAccessFlags2KHR				dstAccessMask
472 						resource.getBuffer().handle,						// VkBuffer							buffer
473 						0,													// VkDeviceSize						offset
474 						VK_WHOLE_SIZE										// VkDeviceSize						size
475 					);
476 					VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
477 					synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
478 				}
479 			}
480 
481 			m_ops[opNdx]->recordCommands(cmdBuffer);
482 
483 			endCommandBuffer(vk, cmdBuffer);
484 		}
485 
486 		VK_CHECK(synchronizationWrapper->queueSubmit(queue, DE_NULL));
487 		VK_CHECK(vk.queueWaitIdle(queue));
488 
489 		{
490 			const Data	expected = m_ops.front()->getData();
491 			const Data	actual	 = m_ops.back()->getData();
492 
493 			if (isIndirectBuffer(m_resources[0]->getType()))
494 			{
495 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
496 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
497 
498 				if (actualValue < expectedValue)
499 					return tcu::TestStatus::fail("Counter value is smaller than expected");
500 			}
501 			else
502 			{
503 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
504 					return tcu::TestStatus::fail("Memory contents don't match");
505 			}
506 		}
507 
508 		return tcu::TestStatus::pass("OK");
509 	}
510 
511 protected:
512 	SynchronizationType								m_type;
513 	OperationContext								m_opContext;
514 	std::vector<de::SharedPtr<OperationSupport> >	m_opSupports;
515 	std::vector<de::SharedPtr<Operation> >			m_ops;
516 	std::vector<de::SharedPtr<Resource> >			m_resources;
517 };
518 
519 class FenceTestInstance : public BaseTestInstance
520 {
521 public:
FenceTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)522 	FenceTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData)
523 		: BaseTestInstance	(context, type, resourceDesc, writeOp, readOp, pipelineCacheData)
524 	{
525 	}
526 
iterate(void)527 	tcu::TestStatus	iterate (void)
528 	{
529 		enum {WRITE=0, READ, COUNT};
530 		const DeviceInterface&			vk								= m_context.getDeviceInterface();
531 		const VkDevice					device							= m_context.getDevice();
532 		const VkQueue					queue							= m_context.getUniversalQueue();
533 		const deUint32					queueFamilyIndex				= m_context.getUniversalQueueFamilyIndex();
534 		const Unique<VkCommandPool>		cmdPool							(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
535 		const Move<VkCommandBuffer>		ptrCmdBuffer[COUNT]				= {makeCommandBuffer(vk, device, *cmdPool), makeCommandBuffer(vk, device, *cmdPool)};
536 		VkCommandBuffer					cmdBuffers[COUNT]				= {*ptrCmdBuffer[WRITE], *ptrCmdBuffer[READ]};
537 		const SyncInfo					writeSync						= m_writeOp->getOutSyncInfo();
538 		const SyncInfo					readSync						= m_readOp->getInSyncInfo();
539 		SynchronizationWrapperPtr		synchronizationWrapper[COUNT]
540 		{
541 			getSynchronizationWrapper(m_type, vk, DE_FALSE),
542 			getSynchronizationWrapper(m_type, vk, DE_FALSE)
543 		};
544 
545 		beginCommandBuffer(vk, cmdBuffers[WRITE]);
546 
547 		m_writeOp->recordCommands(cmdBuffers[WRITE]);
548 
549 		if (m_resource->getType() == RESOURCE_TYPE_IMAGE)
550 		{
551 			const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
552 				writeSync.stageMask,							// VkPipelineStageFlags2KHR			srcStageMask
553 				writeSync.accessMask,							// VkAccessFlags2KHR				srcAccessMask
554 				readSync.stageMask,								// VkPipelineStageFlags2KHR			dstStageMask
555 				readSync.accessMask,							// VkAccessFlags2KHR				dstAccessMask
556 				writeSync.imageLayout,							// VkImageLayout					oldLayout
557 				readSync.imageLayout,							// VkImageLayout					newLayout
558 				m_resource->getImage().handle,					// VkImage							image
559 				m_resource->getImage().subresourceRange			// VkImageSubresourceRange			subresourceRange
560 			);
561 			VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
562 			synchronizationWrapper[WRITE]->cmdPipelineBarrier(cmdBuffers[WRITE], &dependencyInfo);
563 		}
564 
565 		endCommandBuffer(vk, cmdBuffers[WRITE]);
566 
567 		submitCommandsAndWait(synchronizationWrapper[WRITE], vk, device, queue, cmdBuffers[WRITE]);
568 
569 		beginCommandBuffer(vk, cmdBuffers[READ]);
570 
571 		m_readOp->recordCommands(cmdBuffers[READ]);
572 
573 		endCommandBuffer(vk, cmdBuffers[READ]);
574 
575 		submitCommandsAndWait(synchronizationWrapper[READ], vk, device, queue, cmdBuffers[READ]);
576 
577 		{
578 			const Data	expected = m_writeOp->getData();
579 			const Data	actual	 = m_readOp->getData();
580 
581 			if (isIndirectBuffer(m_resource->getType()))
582 			{
583 				const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
584 				const deUint32 actualValue   = reinterpret_cast<const deUint32*>(actual.data)[0];
585 
586 				if (actualValue < expectedValue)
587 					return tcu::TestStatus::fail("Counter value is smaller than expected");
588 			}
589 			else
590 			{
591 				if (0 != deMemCmp(expected.data, actual.data, expected.size))
592 					return tcu::TestStatus::fail("Memory contents don't match");
593 			}
594 		}
595 
596 		return tcu::TestStatus::pass("OK");
597 	}
598 };
599 
600 class SyncTestCase : public TestCase
601 {
602 public:
SyncTestCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,SynchronizationType type,const SyncPrimitive syncPrimitive,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,PipelineCacheData & pipelineCacheData)603 	SyncTestCase	(tcu::TestContext&			testCtx,
604 					 const std::string&			name,
605 					 const std::string&			description,
606 					 SynchronizationType		type,
607 					 const SyncPrimitive		syncPrimitive,
608 					 const ResourceDescription	resourceDesc,
609 					 const OperationName		writeOp,
610 					 const OperationName		readOp,
611 					 PipelineCacheData&			pipelineCacheData)
612 		: TestCase				(testCtx, name, description)
613 		, m_type				(type)
614 		, m_resourceDesc		(resourceDesc)
615 		, m_writeOp				(makeOperationSupport(writeOp, resourceDesc).release())
616 		, m_readOp				(makeOperationSupport(readOp, resourceDesc).release())
617 		, m_syncPrimitive		(syncPrimitive)
618 		, m_pipelineCacheData	(pipelineCacheData)
619 	{
620 	}
621 
initPrograms(SourceCollections & programCollection) const622 	void initPrograms (SourceCollections& programCollection) const
623 	{
624 		m_writeOp->initPrograms(programCollection);
625 		m_readOp->initPrograms(programCollection);
626 
627 		if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
628 		{
629 			for (deUint32 copyOpNdx = 0; copyOpNdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpNdx++)
630 			{
631 				if (isResourceSupported(s_copyOps[copyOpNdx], m_resourceDesc))
632 					makeOperationSupport(s_copyOps[copyOpNdx], m_resourceDesc)->initPrograms(programCollection);
633 			}
634 		}
635 	}
636 
checkSupport(Context & context) const637 	void checkSupport(Context& context) const
638 	{
639 		if (m_type == SynchronizationType::SYNCHRONIZATION2)
640 			context.requireDeviceFunctionality("VK_KHR_synchronization2");
641 
642 #ifndef CTS_USES_VULKANSC
643 		if (SYNC_PRIMITIVE_EVENT == m_syncPrimitive &&
644 			context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
645 			!context.getPortabilitySubsetFeatures().events)
646 		{
647 			TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: Events are not supported by this implementation");
648 		}
649 #endif // CTS_USES_VULKANSC
650 
651 		if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE &&
652 			!context.getTimelineSemaphoreFeatures().timelineSemaphore)
653 			TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
654 
655 		if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
656 		{
657 			VkImageFormatProperties	imageFormatProperties;
658 			const deUint32				usage					= m_writeOp->getOutResourceUsageFlags() | m_readOp->getInResourceUsageFlags();
659 			const InstanceInterface&	instance				= context.getInstanceInterface();
660 			const VkPhysicalDevice		physicalDevice			= context.getPhysicalDevice();
661 			const VkResult				formatResult			= instance.getPhysicalDeviceImageFormatProperties(physicalDevice, m_resourceDesc.imageFormat, m_resourceDesc.imageType, VK_IMAGE_TILING_OPTIMAL, usage, (VkImageCreateFlags)0, &imageFormatProperties);
662 
663 			if (formatResult != VK_SUCCESS)
664 				TCU_THROW(NotSupportedError, "Image format is not supported");
665 
666 			if ((imageFormatProperties.sampleCounts & m_resourceDesc.imageSamples) != m_resourceDesc.imageSamples)
667 				TCU_THROW(NotSupportedError, "Requested sample count is not supported");
668 		}
669 	}
670 
createInstance(Context & context) const671 	TestInstance* createInstance (Context& context) const
672 	{
673 		switch (m_syncPrimitive)
674 		{
675 			case SYNC_PRIMITIVE_FENCE:
676 				return new FenceTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData);
677 			case SYNC_PRIMITIVE_BINARY_SEMAPHORE:
678 				return new BinarySemaphoreTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData);
679 			case SYNC_PRIMITIVE_TIMELINE_SEMAPHORE:
680 				return new TimelineSemaphoreTestInstance(context, m_type, m_resourceDesc, m_writeOp, m_readOp, m_pipelineCacheData);
681 			case SYNC_PRIMITIVE_BARRIER:
682 				return new BarrierTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData);
683 			case SYNC_PRIMITIVE_EVENT:
684 				return new EventTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData);
685 		}
686 
687 		DE_ASSERT(0);
688 		return DE_NULL;
689 	}
690 
691 private:
692 	SynchronizationType						m_type;
693 	const ResourceDescription				m_resourceDesc;
694 	const de::SharedPtr<OperationSupport>	m_writeOp;
695 	const de::SharedPtr<OperationSupport>	m_readOp;
696 	const SyncPrimitive						m_syncPrimitive;
697 	PipelineCacheData&						m_pipelineCacheData;
698 };
699 
700 struct TestData
701 {
702 	SynchronizationType		type;
703 	PipelineCacheData*		pipelineCacheData;
704 };
705 
createTests(tcu::TestCaseGroup * group,TestData data)706 void createTests (tcu::TestCaseGroup* group, TestData data)
707 {
708 	tcu::TestContext& testCtx = group->getTestContext();
709 
710 	static const struct
711 	{
712 		const char*		name;
713 		SyncPrimitive	syncPrimitive;
714 		int				numOptions;
715 	} groups[] =
716 	{
717 		{ "fence",				SYNC_PRIMITIVE_FENCE,				0, },
718 		{ "binary_semaphore",	SYNC_PRIMITIVE_BINARY_SEMAPHORE,	0, },
719 		{ "timeline_semaphore",	SYNC_PRIMITIVE_TIMELINE_SEMAPHORE,	0, },
720 		{ "barrier",			SYNC_PRIMITIVE_BARRIER,				1, },
721 		{ "event",				SYNC_PRIMITIVE_EVENT,				1, },
722 	};
723 
724 	for (int groupNdx = 0; groupNdx < DE_LENGTH_OF_ARRAY(groups); ++groupNdx)
725 	{
726 		de::MovePtr<tcu::TestCaseGroup> synchGroup (new tcu::TestCaseGroup(testCtx, groups[groupNdx].name, ""));
727 
728 		for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(s_writeOps); ++writeOpNdx)
729 		for (int readOpNdx = 0; readOpNdx < DE_LENGTH_OF_ARRAY(s_readOps); ++readOpNdx)
730 		{
731 			const OperationName	writeOp		= s_writeOps[writeOpNdx];
732 			const OperationName	readOp		= s_readOps[readOpNdx];
733 			const std::string	opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
734 			bool				empty		= true;
735 
736 			de::MovePtr<tcu::TestCaseGroup> opGroup	(new tcu::TestCaseGroup(testCtx, opGroupName.c_str(), ""));
737 
738 			for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
739 			{
740 				const ResourceDescription&	resource	= s_resources[resourceNdx];
741 				std::string					name		= getResourceName(resource);
742 
743 				if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
744 				{
745 					opGroup->addChild(new SyncTestCase(testCtx, name, "", data.type, groups[groupNdx].syncPrimitive, resource, writeOp, readOp, *data.pipelineCacheData));
746 					empty = false;
747 				}
748 			}
749 			if (!empty)
750 				synchGroup->addChild(opGroup.release());
751 		}
752 
753 		group->addChild(synchGroup.release());
754 	}
755 }
756 
757 } // anonymous
758 
createSynchronizedOperationSingleQueueTests(tcu::TestContext & testCtx,SynchronizationType type,PipelineCacheData & pipelineCacheData)759 tcu::TestCaseGroup* createSynchronizedOperationSingleQueueTests (tcu::TestContext& testCtx, SynchronizationType type, PipelineCacheData& pipelineCacheData)
760 {
761 	TestData data
762 	{
763 		type,
764 		&pipelineCacheData
765 	};
766 
767 	return createTestGroup(testCtx, "single_queue", "Synchronization of a memory-modifying operation", createTests, data);
768 }
769 
770 } // synchronization
771 } // vkt
772