1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Synchronization primitive tests with multi queue
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSynchronizationOperationMultiQueueTests.hpp"
25 #include "vktCustomInstancesDevices.hpp"
26 #include "vkDefs.hpp"
27 #include "vktTestCase.hpp"
28 #include "vktTestCaseUtil.hpp"
29 #include "vkRef.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkMemUtil.hpp"
32 #include "vkBarrierUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkDeviceUtil.hpp"
35 #include "vkTypeUtil.hpp"
36 #include "vkPlatform.hpp"
37 #include "vkCmdUtil.hpp"
38 #include "vkSafetyCriticalUtil.hpp"
39 #include "deRandom.hpp"
40 #include "deUniquePtr.hpp"
41 #include "deSharedPtr.hpp"
42 #include "tcuTestLog.hpp"
43 #include "vktSynchronizationUtil.hpp"
44 #include "vktSynchronizationOperation.hpp"
45 #include "vktSynchronizationOperationTestData.hpp"
46 #include "vktSynchronizationOperationResources.hpp"
47 #include "vktTestGroupUtil.hpp"
48 #include "tcuCommandLine.hpp"
49
50 #include <set>
51 #include <unordered_map>
52
53 namespace vkt
54 {
55
56 namespace synchronization
57 {
58
59 namespace
60 {
61 using namespace vk;
62 using de::MovePtr;
63 using de::SharedPtr;
64 using de::UniquePtr;
65 using de::SharedPtr;
66
67 enum QueueType
68 {
69 QUEUETYPE_WRITE,
70 QUEUETYPE_READ
71 };
72
73 struct QueuePair
74 {
QueuePairvkt::synchronization::__anon5a481fda0111::QueuePair75 QueuePair (const deUint32 familyWrite, const deUint32 familyRead, const VkQueue write, const VkQueue read)
76 : familyIndexWrite (familyWrite)
77 , familyIndexRead (familyRead)
78 , queueWrite (write)
79 , queueRead (read)
80 {}
81
82 deUint32 familyIndexWrite;
83 deUint32 familyIndexRead;
84 VkQueue queueWrite;
85 VkQueue queueRead;
86 };
87
88 struct Queue
89 {
Queuevkt::synchronization::__anon5a481fda0111::Queue90 Queue (const deUint32 familyOp, const VkQueue queueOp)
91 : family (familyOp)
92 , queue (queueOp)
93 {}
94
95 deUint32 family;
96 VkQueue queue;
97 };
98
checkQueueFlags(VkQueueFlags availableFlags,const VkQueueFlags neededFlags)99 bool checkQueueFlags (VkQueueFlags availableFlags, const VkQueueFlags neededFlags)
100 {
101 if ((availableFlags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) != 0)
102 availableFlags |= VK_QUEUE_TRANSFER_BIT;
103
104 return (availableFlags & neededFlags) != 0;
105 }
106
107 class MultiQueues
108 {
109 struct QueueData
110 {
111 VkQueueFlags flags;
112 std::vector<VkQueue> queue;
113 };
114
MultiQueues(Context & context,SynchronizationType type,bool timelineSemaphore)115 MultiQueues (Context& context, SynchronizationType type, bool timelineSemaphore)
116 #ifdef CTS_USES_VULKANSC
117 : m_instance (createCustomInstanceFromContext(context)),
118 #else
119 :
120 #endif // CTS_USES_VULKANSC
121 m_queueCount (0)
122 {
123 #ifdef CTS_USES_VULKANSC
124 const InstanceInterface& instanceDriver = m_instance.getDriver();
125 const VkPhysicalDevice physicalDevice = chooseDevice(instanceDriver, m_instance, context.getTestContext().getCommandLine());
126 const VkInstance instance = m_instance;
127 #else
128 const InstanceInterface& instanceDriver = context.getInstanceInterface();
129 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
130 const VkInstance instance = context.getInstance();
131 #endif // CTS_USES_VULKANSC
132 const std::vector<VkQueueFamilyProperties> queueFamilyProperties = getPhysicalDeviceQueueFamilyProperties(instanceDriver, physicalDevice);
133
134 for (deUint32 queuePropertiesNdx = 0; queuePropertiesNdx < queueFamilyProperties.size(); ++queuePropertiesNdx)
135 {
136 addQueueIndex(queuePropertiesNdx,
137 std::min(2u, queueFamilyProperties[queuePropertiesNdx].queueCount),
138 queueFamilyProperties[queuePropertiesNdx].queueFlags);
139 }
140
141 std::vector<VkDeviceQueueCreateInfo> queueInfos;
142 const float queuePriorities[2] = { 1.0f, 1.0f }; //get max 2 queues from one family
143
144 for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it!= m_queues.end(); ++it)
145 {
146 const VkDeviceQueueCreateInfo queueInfo =
147 {
148 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, //VkStructureType sType;
149 DE_NULL, //const void* pNext;
150 (VkDeviceQueueCreateFlags)0u, //VkDeviceQueueCreateFlags flags;
151 it->first, //deUint32 queueFamilyIndex;
152 static_cast<deUint32>(it->second.queue.size()), //deUint32 queueCount;
153 &queuePriorities[0] //const float* pQueuePriorities;
154 };
155 queueInfos.push_back(queueInfo);
156 }
157
158 {
159 VkPhysicalDeviceFeatures2 createPhysicalFeature { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, DE_NULL, context.getDeviceFeatures() };
160 VkPhysicalDeviceTimelineSemaphoreFeatures timelineSemaphoreFeatures { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, DE_NULL, DE_TRUE };
161 VkPhysicalDeviceSynchronization2FeaturesKHR synchronization2Features { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR, DE_NULL, DE_TRUE };
162 void** nextPtr = &createPhysicalFeature.pNext;
163
164 std::vector<const char*> deviceExtensions;
165 if (timelineSemaphore)
166 {
167 if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_timeline_semaphore"))
168 deviceExtensions.push_back("VK_KHR_timeline_semaphore");
169 addToChainVulkanStructure(&nextPtr, timelineSemaphoreFeatures);
170 }
171 if (type == SynchronizationType::SYNCHRONIZATION2)
172 {
173 deviceExtensions.push_back("VK_KHR_synchronization2");
174 addToChainVulkanStructure(&nextPtr, synchronization2Features);
175 }
176
177 void* pNext = &createPhysicalFeature;
178 #ifdef CTS_USES_VULKANSC
179 VkDeviceObjectReservationCreateInfo memReservationInfo = context.getTestContext().getCommandLine().isSubProcess() ? context.getResourceInterface()->getStatMax() : resetDeviceObjectReservationCreateInfo();
180 memReservationInfo.pNext = pNext;
181 pNext = &memReservationInfo;
182
183 VkPhysicalDeviceVulkanSC10Features sc10Features = createDefaultSC10Features();
184 sc10Features.pNext = pNext;
185 pNext = &sc10Features;
186
187 VkPipelineCacheCreateInfo pcCI;
188 std::vector<VkPipelinePoolSize> poolSizes;
189 if (context.getTestContext().getCommandLine().isSubProcess())
190 {
191 if (context.getResourceInterface()->getCacheDataSize() > 0)
192 {
193 pcCI =
194 {
195 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
196 DE_NULL, // const void* pNext;
197 VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
198 VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT, // VkPipelineCacheCreateFlags flags;
199 context.getResourceInterface()->getCacheDataSize(), // deUintptr initialDataSize;
200 context.getResourceInterface()->getCacheData() // const void* pInitialData;
201 };
202 memReservationInfo.pipelineCacheCreateInfoCount = 1;
203 memReservationInfo.pPipelineCacheCreateInfos = &pcCI;
204 }
205
206 poolSizes = context.getResourceInterface()->getPipelinePoolSizes();
207 if (!poolSizes.empty())
208 {
209 memReservationInfo.pipelinePoolSizeCount = deUint32(poolSizes.size());
210 memReservationInfo.pPipelinePoolSizes = poolSizes.data();
211 }
212 }
213 #endif // CTS_USES_VULKANSC
214
215 const VkDeviceCreateInfo deviceInfo =
216 {
217 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //VkStructureType sType;
218 pNext, //const void* pNext;
219 0u, //VkDeviceCreateFlags flags;
220 static_cast<deUint32>(queueInfos.size()), //deUint32 queueCreateInfoCount;
221 &queueInfos[0], //const VkDeviceQueueCreateInfo* pQueueCreateInfos;
222 0u, //deUint32 enabledLayerCount;
223 DE_NULL, //const char* const* ppEnabledLayerNames;
224 static_cast<deUint32>(deviceExtensions.size()), //deUint32 enabledExtensionCount;
225 deviceExtensions.empty() ? DE_NULL : &deviceExtensions[0], //const char* const* ppEnabledExtensionNames;
226 DE_NULL //const VkPhysicalDeviceFeatures* pEnabledFeatures;
227 };
228
229 m_logicalDevice = createCustomDevice(context.getTestContext().getCommandLine().isValidationEnabled(), context.getPlatformInterface(), instance, instanceDriver, physicalDevice, &deviceInfo);
230 #ifndef CTS_USES_VULKANSC
231 m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), instance, *m_logicalDevice, context.getUsedApiVersion()));
232 #else
233 m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), instance, *m_logicalDevice, context.getTestContext().getCommandLine(), context.getResourceInterface(), context.getDeviceVulkanSC10Properties(), context.getDeviceProperties(), context.getUsedApiVersion()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_logicalDevice));
234 #endif // CTS_USES_VULKANSC
235 m_allocator = MovePtr<Allocator>(new SimpleAllocator(*m_deviceDriver, *m_logicalDevice, getPhysicalDeviceMemoryProperties(instanceDriver, physicalDevice)));
236
237 for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it)
238 for (int queueNdx = 0; queueNdx < static_cast<int>(it->second.queue.size()); ++queueNdx)
239 m_deviceDriver->getDeviceQueue(*m_logicalDevice, it->first, queueNdx, &it->second.queue[queueNdx]);
240 }
241 }
242
addQueueIndex(const deUint32 queueFamilyIndex,const deUint32 count,const VkQueueFlags flags)243 void addQueueIndex (const deUint32 queueFamilyIndex, const deUint32 count, const VkQueueFlags flags)
244 {
245 QueueData dataToPush;
246 dataToPush.flags = flags;
247 dataToPush.queue.resize(count);
248 m_queues[queueFamilyIndex] = dataToPush;
249
250 m_queueCount++;
251 }
252
253 public:
~MultiQueues()254 ~MultiQueues()
255 {
256 }
257
getQueuesPairs(const VkQueueFlags flagsWrite,const VkQueueFlags flagsRead) const258 std::vector<QueuePair> getQueuesPairs (const VkQueueFlags flagsWrite, const VkQueueFlags flagsRead) const
259 {
260 std::map<deUint32, QueueData> queuesWrite;
261 std::map<deUint32, QueueData> queuesRead;
262 std::vector<QueuePair> queuesPairs;
263
264 for (std::map<deUint32, QueueData>::const_iterator it = m_queues.begin(); it != m_queues.end(); ++it)
265 {
266 const bool writeQueue = checkQueueFlags(it->second.flags, flagsWrite);
267 const bool readQueue = checkQueueFlags(it->second.flags, flagsRead);
268
269 if (!(writeQueue || readQueue))
270 continue;
271
272 if (writeQueue && readQueue)
273 {
274 queuesWrite[it->first] = it->second;
275 queuesRead[it->first] = it->second;
276 }
277 else if (writeQueue)
278 queuesWrite[it->first] = it->second;
279 else if (readQueue)
280 queuesRead[it->first] = it->second;
281 }
282
283 for (std::map<deUint32, QueueData>::iterator write = queuesWrite.begin(); write != queuesWrite.end(); ++write)
284 for (std::map<deUint32, QueueData>::iterator read = queuesRead.begin(); read != queuesRead.end(); ++read)
285 {
286 const int writeSize = static_cast<int>(write->second.queue.size());
287 const int readSize = static_cast<int>(read->second.queue.size());
288
289 for (int writeNdx = 0; writeNdx < writeSize; ++writeNdx)
290 for (int readNdx = 0; readNdx < readSize; ++readNdx)
291 {
292 if (write->second.queue[writeNdx] != read->second.queue[readNdx])
293 {
294 queuesPairs.push_back(QueuePair(write->first, read->first, write->second.queue[writeNdx], read->second.queue[readNdx]));
295 writeNdx = readNdx = std::max(writeSize, readSize); //exit from the loops
296 }
297 }
298 }
299
300 if (queuesPairs.empty())
301 TCU_THROW(NotSupportedError, "Queue not found");
302
303 return queuesPairs;
304 }
305
getDefaultQueue(const VkQueueFlags flagsOp) const306 Queue getDefaultQueue(const VkQueueFlags flagsOp) const
307 {
308 for (std::map<deUint32, QueueData>::const_iterator it = m_queues.begin(); it!= m_queues.end(); ++it)
309 {
310 if (checkQueueFlags(it->second.flags, flagsOp))
311 return Queue(it->first, it->second.queue[0]);
312 }
313
314 TCU_THROW(NotSupportedError, "Queue not found");
315 }
316
getQueue(const deUint32 familyIdx,const deUint32 queueIdx)317 Queue getQueue (const deUint32 familyIdx, const deUint32 queueIdx)
318 {
319 return Queue(familyIdx, m_queues[familyIdx].queue[queueIdx]);
320 }
321
getQueueFamilyFlags(const deUint32 familyIdx)322 VkQueueFlags getQueueFamilyFlags (const deUint32 familyIdx)
323 {
324 return m_queues[familyIdx].flags;
325 }
326
queueFamilyCount(const deUint32 familyIdx)327 deUint32 queueFamilyCount (const deUint32 familyIdx)
328 {
329 return (deUint32) m_queues[familyIdx].queue.size();
330 }
331
familyCount(void) const332 deUint32 familyCount (void) const
333 {
334 return (deUint32) m_queues.size();
335 }
336
totalQueueCount(void)337 deUint32 totalQueueCount (void)
338 {
339 deUint32 count = 0;
340
341 for (deUint32 familyIdx = 0; familyIdx < familyCount(); familyIdx++)
342 {
343 count += queueFamilyCount(familyIdx);
344 }
345
346 return count;
347 }
348
getDevice(void) const349 VkDevice getDevice (void) const
350 {
351 return *m_logicalDevice;
352 }
353
getDeviceInterface(void) const354 const DeviceInterface& getDeviceInterface (void) const
355 {
356 return *m_deviceDriver;
357 }
358
getAllocator(void)359 Allocator& getAllocator (void)
360 {
361 return *m_allocator;
362 }
363
getInstance(Context & context,SynchronizationType type,bool timelineSemaphore)364 static SharedPtr<MultiQueues> getInstance(Context& context, SynchronizationType type, bool timelineSemaphore)
365 {
366 deUint32 index = (deUint32)type << 1 | timelineSemaphore;
367 if (!m_multiQueues[index])
368 m_multiQueues[index] = SharedPtr<MultiQueues>(new MultiQueues(context, type, timelineSemaphore));
369
370 return m_multiQueues[index];
371 }
destroy()372 static void destroy()
373 {
374 m_multiQueues.clear();
375 }
376
377 private:
378 #ifdef CTS_USES_VULKANSC
379 CustomInstance m_instance;
380 #endif // CTS_USES_VULKANSC
381 Move<VkDevice> m_logicalDevice;
382 #ifndef CTS_USES_VULKANSC
383 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
384 #else
385 de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter> m_deviceDriver;
386 #endif // CTS_USES_VULKANSC
387 MovePtr<Allocator> m_allocator;
388 std::map<deUint32, QueueData> m_queues;
389 deUint32 m_queueCount;
390
391 static std::unordered_map<deUint32, SharedPtr<MultiQueues>> m_multiQueues;
392 };
393 std::unordered_map<deUint32, SharedPtr<MultiQueues>> MultiQueues::m_multiQueues;
394
createBarrierMultiQueue(SynchronizationWrapperPtr synchronizationWrapper,const VkCommandBuffer & cmdBuffer,const SyncInfo & writeSync,const SyncInfo & readSync,const Resource & resource,const deUint32 writeFamily,const deUint32 readFamily,const VkSharingMode sharingMode,const bool secondQueue=false)395 void createBarrierMultiQueue (SynchronizationWrapperPtr synchronizationWrapper,
396 const VkCommandBuffer& cmdBuffer,
397 const SyncInfo& writeSync,
398 const SyncInfo& readSync,
399 const Resource& resource,
400 const deUint32 writeFamily,
401 const deUint32 readFamily,
402 const VkSharingMode sharingMode,
403 const bool secondQueue = false)
404 {
405 if (resource.getType() == RESOURCE_TYPE_IMAGE)
406 {
407 VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
408 secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
409 secondQueue ? 0u : writeSync.accessMask,
410 !secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask,
411 !secondQueue ? 0u : readSync.accessMask,
412 writeSync.imageLayout,
413 readSync.imageLayout,
414 resource.getImage().handle,
415 resource.getImage().subresourceRange
416 );
417
418 if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
419 {
420 imageMemoryBarrier2.srcQueueFamilyIndex = writeFamily;
421 imageMemoryBarrier2.dstQueueFamilyIndex = readFamily;
422
423 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
424 synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
425 }
426 else if (!secondQueue)
427 {
428 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
429 synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
430 }
431 }
432 else
433 {
434 VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
435 secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
436 secondQueue ? 0u : writeSync.accessMask,
437 !secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask,
438 !secondQueue ? 0u : readSync.accessMask,
439 resource.getBuffer().handle,
440 resource.getBuffer().offset,
441 resource.getBuffer().size
442 );
443
444 if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
445 {
446 bufferMemoryBarrier2.srcQueueFamilyIndex = writeFamily;
447 bufferMemoryBarrier2.dstQueueFamilyIndex = readFamily;
448 }
449
450 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
451 synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
452 }
453 }
454
455 class BaseTestInstance : public TestInstance
456 {
457 public:
BaseTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,bool timelineSemaphore)458 BaseTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, bool timelineSemaphore)
459 : TestInstance (context)
460 , m_type (type)
461 , m_queues (MultiQueues::getInstance(context, type, timelineSemaphore))
462 , m_opContext (new OperationContext(context, type, m_queues->getDeviceInterface(), m_queues->getDevice(), m_queues->getAllocator(), pipelineCacheData))
463 , m_resourceDesc (resourceDesc)
464 , m_writeOp (writeOp)
465 , m_readOp (readOp)
466 {
467 }
468
469 protected:
470 const SynchronizationType m_type;
471 const SharedPtr<MultiQueues> m_queues;
472 const UniquePtr<OperationContext> m_opContext;
473 const ResourceDescription m_resourceDesc;
474 const OperationSupport& m_writeOp;
475 const OperationSupport& m_readOp;
476 };
477
478 class BinarySemaphoreTestInstance : public BaseTestInstance
479 {
480 public:
BinarySemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)481 BinarySemaphoreTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
482 : BaseTestInstance (context, type, resourceDesc, writeOp, readOp, pipelineCacheData, false)
483 , m_sharingMode (sharingMode)
484 {
485 }
486
iterate(void)487 tcu::TestStatus iterate (void)
488 {
489 const DeviceInterface& vk = m_opContext->getDeviceInterface();
490 const VkDevice device = m_opContext->getDevice();
491 const std::vector<QueuePair> queuePairs = m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
492
493 for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
494 {
495 const UniquePtr<Resource> resource (new Resource(*m_opContext, m_resourceDesc, m_writeOp.getOutResourceUsageFlags() | m_readOp.getInResourceUsageFlags()));
496 const UniquePtr<Operation> writeOp (m_writeOp.build(*m_opContext, *resource));
497 const UniquePtr<Operation> readOp (m_readOp.build (*m_opContext, *resource));
498
499 const Move<VkCommandPool> cmdPool[] =
500 {
501 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite),
502 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead)
503 };
504 const Move<VkCommandBuffer> ptrCmdBuffer[] =
505 {
506 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
507 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
508 };
509 const VkCommandBufferSubmitInfoKHR cmdBufferInfos[] =
510 {
511 makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_WRITE]),
512 makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_READ]),
513 };
514 const Unique<VkSemaphore> semaphore (createSemaphore(vk, device));
515 VkSemaphoreSubmitInfoKHR waitSemaphoreSubmitInfo =
516 makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
517 VkSemaphoreSubmitInfoKHR signalSemaphoreSubmitInfo =
518 makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
519 SynchronizationWrapperPtr synchronizationWrapper[]
520 {
521 getSynchronizationWrapper(m_type, vk, DE_FALSE),
522 getSynchronizationWrapper(m_type, vk, DE_FALSE),
523 };
524
525 synchronizationWrapper[QUEUETYPE_WRITE]->addSubmitInfo(
526 0u,
527 DE_NULL,
528 1u,
529 &cmdBufferInfos[QUEUETYPE_WRITE],
530 1u,
531 &signalSemaphoreSubmitInfo
532 );
533 synchronizationWrapper[QUEUETYPE_READ]->addSubmitInfo(
534 1u,
535 &waitSemaphoreSubmitInfo,
536 1u,
537 &cmdBufferInfos[QUEUETYPE_READ],
538 0u,
539 DE_NULL
540 );
541
542 const SyncInfo writeSync = writeOp->getOutSyncInfo();
543 const SyncInfo readSync = readOp->getInSyncInfo();
544 VkCommandBuffer writeCmdBuffer = cmdBufferInfos[QUEUETYPE_WRITE].commandBuffer;
545 VkCommandBuffer readCmdBuffer = cmdBufferInfos[QUEUETYPE_READ].commandBuffer;
546
547 beginCommandBuffer (vk, writeCmdBuffer);
548 writeOp->recordCommands (writeCmdBuffer);
549 createBarrierMultiQueue (synchronizationWrapper[QUEUETYPE_WRITE], writeCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
550 endCommandBuffer (vk, writeCmdBuffer);
551
552 beginCommandBuffer (vk, readCmdBuffer);
553 createBarrierMultiQueue (synchronizationWrapper[QUEUETYPE_READ], readCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
554 readOp->recordCommands (readCmdBuffer);
555 endCommandBuffer (vk, readCmdBuffer);
556
557 VK_CHECK(synchronizationWrapper[QUEUETYPE_WRITE]->queueSubmit(queuePairs[pairNdx].queueWrite, DE_NULL));
558 VK_CHECK(synchronizationWrapper[QUEUETYPE_READ]->queueSubmit(queuePairs[pairNdx].queueRead, DE_NULL));
559 VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueWrite));
560 VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueRead));
561
562 {
563 const Data expected = writeOp->getData();
564 const Data actual = readOp->getData();
565
566 #ifdef CTS_USES_VULKANSC
567 if (m_context.getTestContext().getCommandLine().isSubProcess())
568 #endif // CTS_USES_VULKANSC
569 {
570 if (isIndirectBuffer(m_resourceDesc.type))
571 {
572 const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
573 const deUint32 actualValue = reinterpret_cast<const deUint32*>(actual.data)[0];
574
575 if (actualValue < expectedValue)
576 return tcu::TestStatus::fail("Counter value is smaller than expected");
577 }
578 else
579 {
580 if (0 != deMemCmp(expected.data, actual.data, expected.size))
581 return tcu::TestStatus::fail("Memory contents don't match");
582 }
583 }
584 }
585 }
586 return tcu::TestStatus::pass("OK");
587 }
588
589 private:
590 const VkSharingMode m_sharingMode;
591 };
592
593 template<typename T>
makeVkSharedPtr(Move<T> move)594 inline SharedPtr<Move<T> > makeVkSharedPtr (Move<T> move)
595 {
596 return SharedPtr<Move<T> >(new Move<T>(move));
597 }
598
599 class TimelineSemaphoreTestInstance : public BaseTestInstance
600 {
601 public:
TimelineSemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const SharedPtr<OperationSupport> & writeOp,const SharedPtr<OperationSupport> & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)602 TimelineSemaphoreTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const SharedPtr<OperationSupport>& writeOp, const SharedPtr<OperationSupport>& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
603 : BaseTestInstance (context, type, resourceDesc, *writeOp, *readOp, pipelineCacheData, true)
604 , m_sharingMode (sharingMode)
605 {
606 deUint32 maxQueues = 0;
607 std::vector<deUint32> queueFamilies;
608
609 if (m_queues->totalQueueCount() < 2)
610 TCU_THROW(NotSupportedError, "Not enough queues");
611
612 for (deUint32 familyNdx = 0; familyNdx < m_queues->familyCount(); familyNdx++)
613 {
614 maxQueues = std::max(m_queues->queueFamilyCount(familyNdx), maxQueues);
615 queueFamilies.push_back(familyNdx);
616 }
617
618 // Create a chain of operations copying data from one resource
619 // to another across at least every single queue of the system
620 // at least once. Each of the operation will be executing with
621 // a dependency on the previous using timeline points.
622 m_opSupports.push_back(writeOp);
623 m_opQueues.push_back(m_queues->getDefaultQueue(writeOp->getQueueFlags(*m_opContext)));
624
625 for (deUint32 queueIdx = 0; queueIdx < maxQueues; queueIdx++)
626 {
627 for (deUint32 familyIdx = 0; familyIdx < m_queues->familyCount(); familyIdx++)
628 {
629 for (deUint32 copyOpIdx = 0; copyOpIdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpIdx++)
630 {
631 if (isResourceSupported(s_copyOps[copyOpIdx], resourceDesc))
632 {
633 SharedPtr<OperationSupport> opSupport (makeOperationSupport(s_copyOps[copyOpIdx], m_resourceDesc).release());
634
635 if (!checkQueueFlags(opSupport->getQueueFlags(*m_opContext), m_queues->getQueueFamilyFlags(familyIdx)))
636 continue;
637
638 m_opSupports.push_back(opSupport);
639 m_opQueues.push_back(m_queues->getQueue(familyIdx, queueIdx % m_queues->queueFamilyCount(familyIdx)));
640 break;
641 }
642 }
643 }
644 }
645
646 m_opSupports.push_back(readOp);
647 m_opQueues.push_back(m_queues->getDefaultQueue(readOp->getQueueFlags(*m_opContext)));
648
649 // Now create the resources with the usage associated to the
650 // operation performed on the resource.
651 for (deUint32 opIdx = 0; opIdx < (m_opSupports.size() - 1); opIdx++)
652 {
653 deUint32 usage = m_opSupports[opIdx]->getOutResourceUsageFlags() | m_opSupports[opIdx + 1]->getInResourceUsageFlags();
654
655 m_resources.push_back(SharedPtr<Resource>(new Resource(*m_opContext, m_resourceDesc, usage, m_sharingMode, queueFamilies)));
656 }
657
658 // Finally create the operations using the resources.
659 m_ops.push_back(SharedPtr<Operation>(m_opSupports[0]->build(*m_opContext, *m_resources[0]).release()));
660 for (deUint32 opIdx = 1; opIdx < (m_opSupports.size() - 1); opIdx++)
661 m_ops.push_back(SharedPtr<Operation>(m_opSupports[opIdx]->build(*m_opContext, *m_resources[opIdx - 1], *m_resources[opIdx]).release()));
662 m_ops.push_back(SharedPtr<Operation>(m_opSupports[m_opSupports.size() - 1]->build(*m_opContext, *m_resources.back()).release()));
663 }
664
iterate(void)665 tcu::TestStatus iterate (void)
666 {
667 const DeviceInterface& vk = m_opContext->getDeviceInterface();
668 const VkDevice device = m_opContext->getDevice();
669 de::Random rng (1234);
670 const Unique<VkSemaphore> semaphore (createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
671 std::vector<SharedPtr<Move<VkCommandPool> > > cmdPools;
672 std::vector<SharedPtr<Move<VkCommandBuffer> > > ptrCmdBuffers;
673 std::vector<VkCommandBufferSubmitInfoKHR> cmdBufferInfos;
674 std::vector<deUint64> timelineValues;
675
676 cmdPools.resize(m_queues->familyCount());
677 for (deUint32 familyIdx = 0; familyIdx < m_queues->familyCount(); familyIdx++)
678 cmdPools[familyIdx] = makeVkSharedPtr(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, familyIdx));
679
680 ptrCmdBuffers.resize(m_ops.size());
681 cmdBufferInfos.resize(m_ops.size());
682 for (deUint32 opIdx = 0; opIdx < m_ops.size(); opIdx++)
683 {
684 deUint64 increment = 1 + rng.getUint8();
685
686 ptrCmdBuffers[opIdx] = makeVkSharedPtr(makeCommandBuffer(vk, device, **cmdPools[m_opQueues[opIdx].family]));
687 cmdBufferInfos[opIdx] = makeCommonCommandBufferSubmitInfo(**ptrCmdBuffers[opIdx]);
688
689 timelineValues.push_back(timelineValues.empty() ? increment : (timelineValues.back() + increment));
690 }
691
692 for (deUint32 opIdx = 0; opIdx < m_ops.size(); opIdx++)
693 {
694 VkCommandBuffer cmdBuffer = cmdBufferInfos[opIdx].commandBuffer;
695 VkSemaphoreSubmitInfoKHR waitSemaphoreSubmitInfo =
696 makeCommonSemaphoreSubmitInfo(*semaphore, (opIdx == 0 ? 0u : timelineValues[opIdx - 1]), VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
697 VkSemaphoreSubmitInfoKHR signalSemaphoreSubmitInfo =
698 makeCommonSemaphoreSubmitInfo(*semaphore, timelineValues[opIdx], VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
699 SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vk, DE_TRUE);
700
701 synchronizationWrapper->addSubmitInfo(
702 opIdx == 0 ? 0u : 1u,
703 &waitSemaphoreSubmitInfo,
704 1u,
705 &cmdBufferInfos[opIdx],
706 1u,
707 &signalSemaphoreSubmitInfo,
708 opIdx == 0 ? DE_FALSE : DE_TRUE,
709 DE_TRUE
710 );
711
712 beginCommandBuffer(vk, cmdBuffer);
713
714 if (opIdx > 0)
715 {
716 const SyncInfo writeSync = m_ops[opIdx - 1]->getOutSyncInfo();
717 const SyncInfo readSync = m_ops[opIdx]->getInSyncInfo();
718 const Resource& resource = *m_resources[opIdx - 1].get();
719
720 createBarrierMultiQueue(synchronizationWrapper, cmdBuffer, writeSync, readSync, resource, m_opQueues[opIdx - 1].family, m_opQueues[opIdx].family, m_sharingMode, true);
721 }
722
723 m_ops[opIdx]->recordCommands(cmdBuffer);
724
725 if (opIdx < (m_ops.size() - 1))
726 {
727 const SyncInfo writeSync = m_ops[opIdx]->getOutSyncInfo();
728 const SyncInfo readSync = m_ops[opIdx + 1]->getInSyncInfo();
729 const Resource& resource = *m_resources[opIdx].get();
730
731 createBarrierMultiQueue(synchronizationWrapper, cmdBuffer, writeSync, readSync, resource, m_opQueues[opIdx].family, m_opQueues[opIdx + 1].family, m_sharingMode);
732 }
733
734 endCommandBuffer(vk, cmdBuffer);
735
736 VK_CHECK(synchronizationWrapper->queueSubmit(m_opQueues[opIdx].queue, DE_NULL));
737 }
738
739
740 VK_CHECK(vk.queueWaitIdle(m_opQueues.back().queue));
741
742 {
743 const Data expected = m_ops.front()->getData();
744 const Data actual = m_ops.back()->getData();
745
746 if (isIndirectBuffer(m_resourceDesc.type))
747 {
748 const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
749 const deUint32 actualValue = reinterpret_cast<const deUint32*>(actual.data)[0];
750
751 if (actualValue < expectedValue)
752 return tcu::TestStatus::fail("Counter value is smaller than expected");
753 }
754 else
755 {
756 if (0 != deMemCmp(expected.data, actual.data, expected.size))
757 return tcu::TestStatus::fail("Memory contents don't match");
758 }
759 }
760
761 // Make the validation layers happy.
762 for (deUint32 opIdx = 0; opIdx < m_opQueues.size(); opIdx++)
763 VK_CHECK(vk.queueWaitIdle(m_opQueues[opIdx].queue));
764
765 return tcu::TestStatus::pass("OK");
766 }
767
768 private:
769 const VkSharingMode m_sharingMode;
770 std::vector<SharedPtr<OperationSupport> > m_opSupports;
771 std::vector<SharedPtr<Operation> > m_ops;
772 std::vector<SharedPtr<Resource> > m_resources;
773 std::vector<Queue> m_opQueues;
774 };
775
776 class FenceTestInstance : public BaseTestInstance
777 {
778 public:
FenceTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)779 FenceTestInstance (Context& context, SynchronizationType type, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
780 : BaseTestInstance (context, type, resourceDesc, writeOp, readOp, pipelineCacheData, false)
781 , m_sharingMode (sharingMode)
782 {
783 }
784
iterate(void)785 tcu::TestStatus iterate (void)
786 {
787 const DeviceInterface& vk = m_opContext->getDeviceInterface();
788 const VkDevice device = m_opContext->getDevice();
789 const std::vector<QueuePair> queuePairs = m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
790
791 for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
792 {
793 const UniquePtr<Resource> resource (new Resource(*m_opContext, m_resourceDesc, m_writeOp.getOutResourceUsageFlags() | m_readOp.getInResourceUsageFlags()));
794 const UniquePtr<Operation> writeOp (m_writeOp.build(*m_opContext, *resource));
795 const UniquePtr<Operation> readOp (m_readOp.build(*m_opContext, *resource));
796 const Move<VkCommandPool> cmdPool[]
797 {
798 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite),
799 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead)
800 };
801 const Move<VkCommandBuffer> ptrCmdBuffer[]
802 {
803 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
804 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
805 };
806 const VkCommandBufferSubmitInfoKHR cmdBufferInfos[]
807 {
808 makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_WRITE]),
809 makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_READ])
810 };
811 SynchronizationWrapperPtr synchronizationWrapper[]
812 {
813 getSynchronizationWrapper(m_type, vk, DE_FALSE),
814 getSynchronizationWrapper(m_type, vk, DE_FALSE),
815 };
816 const SyncInfo writeSync = writeOp->getOutSyncInfo();
817 const SyncInfo readSync = readOp->getInSyncInfo();
818 VkCommandBuffer writeCmdBuffer = cmdBufferInfos[QUEUETYPE_WRITE].commandBuffer;
819 VkCommandBuffer readCmdBuffer = cmdBufferInfos[QUEUETYPE_READ].commandBuffer;
820
821 beginCommandBuffer (vk, writeCmdBuffer);
822 writeOp->recordCommands (writeCmdBuffer);
823 createBarrierMultiQueue (synchronizationWrapper[QUEUETYPE_WRITE], writeCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
824 endCommandBuffer (vk, writeCmdBuffer);
825
826 submitCommandsAndWait (synchronizationWrapper[QUEUETYPE_WRITE], vk, device, queuePairs[pairNdx].queueWrite, writeCmdBuffer);
827
828 beginCommandBuffer (vk, readCmdBuffer);
829 createBarrierMultiQueue (synchronizationWrapper[QUEUETYPE_READ], readCmdBuffer, writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
830 readOp->recordCommands (readCmdBuffer);
831 endCommandBuffer (vk, readCmdBuffer);
832
833 submitCommandsAndWait(synchronizationWrapper[QUEUETYPE_READ], vk, device, queuePairs[pairNdx].queueRead, readCmdBuffer);
834
835 {
836 const Data expected = writeOp->getData();
837 const Data actual = readOp->getData();
838
839 #ifdef CTS_USES_VULKANSC
840 if (m_context.getTestContext().getCommandLine().isSubProcess())
841 #endif // CTS_USES_VULKANSC
842 {
843 if (isIndirectBuffer(m_resourceDesc.type))
844 {
845 const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
846 const deUint32 actualValue = reinterpret_cast<const deUint32*>(actual.data)[0];
847
848 if (actualValue < expectedValue)
849 return tcu::TestStatus::fail("Counter value is smaller than expected");
850 }
851 else
852 {
853 if (0 != deMemCmp(expected.data, actual.data, expected.size))
854 return tcu::TestStatus::fail("Memory contents don't match");
855 }
856 }
857 }
858 }
859 return tcu::TestStatus::pass("OK");
860 }
861
862 private:
863 const VkSharingMode m_sharingMode;
864 };
865
866 class BaseTestCase : public TestCase
867 {
868 public:
BaseTestCase(tcu::TestContext & testCtx,const std::string & name,SynchronizationType type,const SyncPrimitive syncPrimitive,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,const VkSharingMode sharingMode,PipelineCacheData & pipelineCacheData)869 BaseTestCase (tcu::TestContext& testCtx,
870 const std::string& name,
871 SynchronizationType type,
872 const SyncPrimitive syncPrimitive,
873 const ResourceDescription resourceDesc,
874 const OperationName writeOp,
875 const OperationName readOp,
876 const VkSharingMode sharingMode,
877 PipelineCacheData& pipelineCacheData)
878 : TestCase (testCtx, name)
879 , m_type (type)
880 , m_resourceDesc (resourceDesc)
881 , m_writeOp (makeOperationSupport(writeOp, resourceDesc).release())
882 , m_readOp (makeOperationSupport(readOp, resourceDesc).release())
883 , m_syncPrimitive (syncPrimitive)
884 , m_sharingMode (sharingMode)
885 , m_pipelineCacheData (pipelineCacheData)
886 {
887 }
888
initPrograms(SourceCollections & programCollection) const889 void initPrograms (SourceCollections& programCollection) const
890 {
891 m_writeOp->initPrograms(programCollection);
892 m_readOp->initPrograms(programCollection);
893
894 if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
895 {
896 for (deUint32 copyOpNdx = 0; copyOpNdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpNdx++)
897 {
898 if (isResourceSupported(s_copyOps[copyOpNdx], m_resourceDesc))
899 makeOperationSupport(s_copyOps[copyOpNdx], m_resourceDesc)->initPrograms(programCollection);
900 }
901 }
902 }
903
checkSupport(Context & context) const904 void checkSupport(Context& context) const
905 {
906 if (m_type == SynchronizationType::SYNCHRONIZATION2)
907 context.requireDeviceFunctionality("VK_KHR_synchronization2");
908 if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
909 context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
910
911 const InstanceInterface& instance = context.getInstanceInterface();
912 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
913 const std::vector<VkQueueFamilyProperties> queueFamilyProperties = getPhysicalDeviceQueueFamilyProperties(instance, physicalDevice);
914 if (m_sharingMode == VK_SHARING_MODE_CONCURRENT && queueFamilyProperties.size() < 2)
915 TCU_THROW(NotSupportedError, "Concurrent requires more than 1 queue family");
916
917 if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE &&
918 !context.getTimelineSemaphoreFeatures().timelineSemaphore)
919 TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
920
921 if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
922 {
923 VkImageFormatProperties imageFormatProperties;
924 const deUint32 usage = m_writeOp->getOutResourceUsageFlags() | m_readOp->getInResourceUsageFlags();
925 const VkResult formatResult = instance.getPhysicalDeviceImageFormatProperties(physicalDevice, m_resourceDesc.imageFormat, m_resourceDesc.imageType, VK_IMAGE_TILING_OPTIMAL, usage, (VkImageCreateFlags)0, &imageFormatProperties);
926
927 if (formatResult != VK_SUCCESS)
928 TCU_THROW(NotSupportedError, "Image format is not supported");
929
930 if ((imageFormatProperties.sampleCounts & m_resourceDesc.imageSamples) != m_resourceDesc.imageSamples)
931 TCU_THROW(NotSupportedError, "Requested sample count is not supported");
932 }
933 }
934
createInstance(Context & context) const935 TestInstance* createInstance (Context& context) const
936 {
937 switch (m_syncPrimitive)
938 {
939 case SYNC_PRIMITIVE_FENCE:
940 return new FenceTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
941 case SYNC_PRIMITIVE_BINARY_SEMAPHORE:
942 return new BinarySemaphoreTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
943 case SYNC_PRIMITIVE_TIMELINE_SEMAPHORE:
944 return new TimelineSemaphoreTestInstance(context, m_type, m_resourceDesc, m_writeOp, m_readOp, m_pipelineCacheData, m_sharingMode);
945 default:
946 DE_ASSERT(0);
947 return DE_NULL;
948 }
949 }
950
951 private:
952 const SynchronizationType m_type;
953 const ResourceDescription m_resourceDesc;
954 const SharedPtr<OperationSupport> m_writeOp;
955 const SharedPtr<OperationSupport> m_readOp;
956 const SyncPrimitive m_syncPrimitive;
957 const VkSharingMode m_sharingMode;
958 PipelineCacheData& m_pipelineCacheData;
959 };
960
961 struct TestData
962 {
963 SynchronizationType type;
964 PipelineCacheData* pipelineCacheData;
965 };
966
createTests(tcu::TestCaseGroup * group,TestData data)967 void createTests (tcu::TestCaseGroup* group, TestData data)
968 {
969 tcu::TestContext& testCtx = group->getTestContext();
970
971 static const struct
972 {
973 const char* name;
974 SyncPrimitive syncPrimitive;
975 int numOptions;
976 } groups[] =
977 {
978 { "fence", SYNC_PRIMITIVE_FENCE, 1 },
979 { "binary_semaphore", SYNC_PRIMITIVE_BINARY_SEMAPHORE, 1 },
980 { "timeline_semaphore", SYNC_PRIMITIVE_TIMELINE_SEMAPHORE, 1 }
981 };
982
983 for (int groupNdx = 0; groupNdx < DE_LENGTH_OF_ARRAY(groups); ++groupNdx)
984 {
985 MovePtr<tcu::TestCaseGroup> synchGroup (new tcu::TestCaseGroup(testCtx, groups[groupNdx].name));
986
987 for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(s_writeOps); ++writeOpNdx)
988 for (int readOpNdx = 0; readOpNdx < DE_LENGTH_OF_ARRAY(s_readOps); ++readOpNdx)
989 {
990 const OperationName writeOp = s_writeOps[writeOpNdx];
991 const OperationName readOp = s_readOps[readOpNdx];
992 const std::string opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
993 bool empty = true;
994
995 MovePtr<tcu::TestCaseGroup> opGroup (new tcu::TestCaseGroup(testCtx, opGroupName.c_str()));
996
997 for (int optionNdx = 0; optionNdx <= groups[groupNdx].numOptions; ++optionNdx)
998 for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
999 {
1000 const ResourceDescription& resource = s_resources[resourceNdx];
1001 if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1002 {
1003 std::string name = getResourceName(resource);
1004 VkSharingMode sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1005
1006 // queue family sharing mode used for resource
1007 if (optionNdx)
1008 {
1009 name += "_concurrent";
1010 sharingMode = VK_SHARING_MODE_CONCURRENT;
1011 }
1012 else
1013 name += "_exclusive";
1014
1015 opGroup->addChild(new BaseTestCase(testCtx, name, data.type, groups[groupNdx].syncPrimitive, resource, writeOp, readOp, sharingMode, *data.pipelineCacheData));
1016 empty = false;
1017 }
1018 }
1019 if (!empty)
1020 synchGroup->addChild(opGroup.release());
1021 }
1022 group->addChild(synchGroup.release());
1023 }
1024 }
1025
cleanupGroup(tcu::TestCaseGroup * group,TestData data)1026 void cleanupGroup (tcu::TestCaseGroup* group, TestData data)
1027 {
1028 DE_UNREF(group);
1029 DE_UNREF(data.pipelineCacheData);
1030 // Destroy singleton object
1031 MultiQueues::destroy();
1032 }
1033
1034 } // anonymous
1035
createSynchronizedOperationMultiQueueTests(tcu::TestContext & testCtx,SynchronizationType type,PipelineCacheData & pipelineCacheData)1036 tcu::TestCaseGroup* createSynchronizedOperationMultiQueueTests (tcu::TestContext& testCtx, SynchronizationType type, PipelineCacheData& pipelineCacheData)
1037 {
1038 TestData data
1039 {
1040 type,
1041 &pipelineCacheData
1042 };
1043
1044 // Synchronization of a memory-modifying operation
1045 return createTestGroup(testCtx, "multi_queue", createTests, data, cleanupGroup);
1046 }
1047
1048 } // synchronization
1049 } // vkt
1050