1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Synchronization primitive tests with multi queue
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSynchronizationOperationMultiQueueTests.hpp"
25 #include "vkDefs.hpp"
26 #include "vktTestCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vkRef.hpp"
29 #include "vkRefUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkTypeUtil.hpp"
33 #include "vkPlatform.hpp"
34 #include "deUniquePtr.hpp"
35 #include "tcuTestLog.hpp"
36 #include "vktSynchronizationUtil.hpp"
37 #include "vktSynchronizationOperation.hpp"
38 #include "vktSynchronizationOperationTestData.hpp"
39 #include "vktTestGroupUtil.hpp"
40
41 namespace vkt
42 {
43 namespace synchronization
44 {
45 namespace
46 {
47 using namespace vk;
48 using de::MovePtr;
49 using de::UniquePtr;
50
51 enum QueueType
52 {
53 QUEUETYPE_WRITE,
54 QUEUETYPE_READ
55 };
56
57 struct QueuePair
58 {
QueuePairvkt::synchronization::__anon25e7c29a0111::QueuePair59 QueuePair (const deUint32 familyWrite, const deUint32 familyRead, const VkQueue write, const VkQueue read)
60 : familyIndexWrite (familyWrite)
61 , familyIndexRead (familyRead)
62 , queueWrite (write)
63 , queueRead (read)
64 {}
65
66 deUint32 familyIndexWrite;
67 deUint32 familyIndexRead;
68 VkQueue queueWrite;
69 VkQueue queueRead;
70 };
71
checkQueueFlags(VkQueueFlags availableFlags,const VkQueueFlags neededFlags)72 bool checkQueueFlags (VkQueueFlags availableFlags, const VkQueueFlags neededFlags)
73 {
74 if ((availableFlags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) != 0)
75 availableFlags |= VK_QUEUE_TRANSFER_BIT;
76
77 return (availableFlags & neededFlags) != 0;
78 }
79
80 class MultiQueues
81 {
82 struct QueueData
83 {
84 VkQueueFlags flags;
85 std::vector<VkQueue> queue;
86 };
87
88 public:
MultiQueues(const Context & context)89 MultiQueues (const Context& context)
90 {
91 const InstanceInterface& instance = context.getInstanceInterface();
92 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
93 const std::vector<VkQueueFamilyProperties> queueFamilyProperties = getPhysicalDeviceQueueFamilyProperties(instance, physicalDevice);
94
95 for (deUint32 queuePropertiesNdx = 0; queuePropertiesNdx < queueFamilyProperties.size(); ++queuePropertiesNdx)
96 {
97 addQueueIndex(queuePropertiesNdx,
98 std::min(2u, queueFamilyProperties[queuePropertiesNdx].queueCount),
99 queueFamilyProperties[queuePropertiesNdx].queueFlags);
100 }
101
102 std::vector<VkDeviceQueueCreateInfo> queueInfos;
103 const float queuePriorities[2] = { 1.0f, 1.0f }; //get max 2 queues from one family
104
105 for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it!= m_queues.end(); ++it)
106 {
107 const VkDeviceQueueCreateInfo queueInfo =
108 {
109 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, //VkStructureType sType;
110 DE_NULL, //const void* pNext;
111 (VkDeviceQueueCreateFlags)0u, //VkDeviceQueueCreateFlags flags;
112 it->first, //deUint32 queueFamilyIndex;
113 static_cast<deUint32>(it->second.queue.size()), //deUint32 queueCount;
114 &queuePriorities[0] //const float* pQueuePriorities;
115 };
116 queueInfos.push_back(queueInfo);
117 }
118
119 {
120 const std::vector<std::string>& deviceExtensions = context.getDeviceExtensions();
121 std::vector<const char*> charDevExtensions;
122
123 for (size_t ndx = 0; ndx < deviceExtensions.size(); ++ndx)
124 charDevExtensions.push_back(deviceExtensions[ndx].c_str());
125
126 const VkDeviceCreateInfo deviceInfo =
127 {
128 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //VkStructureType sType;
129 DE_NULL, //const void* pNext;
130 0u, //VkDeviceCreateFlags flags;
131 static_cast<deUint32>(queueInfos.size()), //deUint32 queueCreateInfoCount;
132 &queueInfos[0], //const VkDeviceQueueCreateInfo* pQueueCreateInfos;
133 0u, //deUint32 enabledLayerCount;
134 DE_NULL, //const char* const* ppEnabledLayerNames;
135 static_cast<deUint32>(deviceExtensions.size()), //deUint32 enabledExtensionCount;
136 &charDevExtensions[0], //const char* const* ppEnabledExtensionNames;
137 &context.getDeviceFeatures() //const VkPhysicalDeviceFeatures* pEnabledFeatures;
138 };
139
140 m_logicalDevice = createDevice(instance, physicalDevice, &deviceInfo);
141 m_deviceDriver = MovePtr<DeviceDriver>(new DeviceDriver(instance, *m_logicalDevice));
142 m_allocator = MovePtr<Allocator>(new SimpleAllocator(*m_deviceDriver, *m_logicalDevice, getPhysicalDeviceMemoryProperties(instance, physicalDevice)));
143
144 for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it)
145 for (int queueNdx = 0; queueNdx < static_cast<int>(it->second.queue.size()); ++queueNdx)
146 m_deviceDriver->getDeviceQueue(*m_logicalDevice, it->first, queueNdx, &it->second.queue[queueNdx]);
147 }
148 }
149
addQueueIndex(const deUint32 queueFamilyIndex,const deUint32 count,const VkQueueFlags flags)150 void addQueueIndex (const deUint32 queueFamilyIndex, const deUint32 count, const VkQueueFlags flags)
151 {
152 QueueData dataToPush;
153 dataToPush.flags = flags;
154 dataToPush.queue.resize(count);
155 m_queues[queueFamilyIndex] = dataToPush;
156 }
157
getQueuesPairs(const VkQueueFlags flagsWrite,const VkQueueFlags flagsRead)158 std::vector<QueuePair> getQueuesPairs (const VkQueueFlags flagsWrite, const VkQueueFlags flagsRead)
159 {
160 std::map<deUint32, QueueData> queuesWrite;
161 std::map<deUint32, QueueData> queuesRead;
162 std::vector<QueuePair> queuesPairs;
163
164 for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it)
165 {
166 const bool writeQueue = checkQueueFlags(it->second.flags, flagsWrite);
167 const bool readQueue = checkQueueFlags(it->second.flags, flagsRead);
168
169 if (!(writeQueue || readQueue))
170 continue;
171
172 if (writeQueue && readQueue)
173 {
174 queuesWrite[it->first] = it->second;
175 queuesRead[it->first] = it->second;
176 }
177 else if (writeQueue)
178 queuesWrite[it->first] = it->second;
179 else if (readQueue)
180 queuesRead[it->first] = it->second;
181 }
182
183 for (std::map<deUint32, QueueData>::iterator write = queuesWrite.begin(); write != queuesWrite.end(); ++write)
184 for (std::map<deUint32, QueueData>::iterator read = queuesRead.begin(); read != queuesRead.end(); ++read)
185 {
186 const int writeSize = static_cast<int>(write->second.queue.size());
187 const int readSize = static_cast<int>(read->second.queue.size());
188
189 for (int writeNdx = 0; writeNdx < writeSize; ++writeNdx)
190 for (int readNdx = 0; readNdx < readSize; ++readNdx)
191 {
192 if (write->second.queue[writeNdx] != read->second.queue[readNdx])
193 {
194 queuesPairs.push_back(QueuePair(write->first, read->first, write->second.queue[writeNdx], read->second.queue[readNdx]));
195 writeNdx = readNdx = std::max(writeSize, readSize); //exit from the loops
196 }
197 }
198 }
199
200 if (queuesPairs.empty())
201 TCU_THROW(NotSupportedError, "Queue not found");
202
203 return queuesPairs;
204 }
205
getDevice(void) const206 VkDevice getDevice (void) const
207 {
208 return *m_logicalDevice;
209 }
210
getDeviceInterface(void) const211 const DeviceInterface& getDeviceInterface (void) const
212 {
213 return *m_deviceDriver;
214 }
215
getAllocator(void)216 Allocator& getAllocator (void)
217 {
218 return *m_allocator;
219 }
220
221 private:
222 Move<VkDevice> m_logicalDevice;
223 MovePtr<DeviceDriver> m_deviceDriver;
224 MovePtr<Allocator> m_allocator;
225 std::map<deUint32, QueueData> m_queues;
226 };
227
createBarrierMultiQueue(const DeviceInterface & vk,const VkCommandBuffer & cmdBuffer,const SyncInfo & writeSync,const SyncInfo & readSync,const Resource & resource,const deUint32 writeFamily,const deUint32 readFamily,const VkSharingMode sharingMode,const bool secondQueue=false)228 void createBarrierMultiQueue (const DeviceInterface& vk,
229 const VkCommandBuffer& cmdBuffer,
230 const SyncInfo& writeSync,
231 const SyncInfo& readSync,
232 const Resource& resource,
233 const deUint32 writeFamily,
234 const deUint32 readFamily,
235 const VkSharingMode sharingMode,
236 const bool secondQueue = false)
237 {
238 if (resource.getType() == RESOURCE_TYPE_IMAGE)
239 {
240 VkImageMemoryBarrier barrier = makeImageMemoryBarrier(writeSync.accessMask, readSync.accessMask,
241 writeSync.imageLayout, readSync.imageLayout, resource.getImage().handle, resource.getImage().subresourceRange);
242
243 if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
244 {
245 barrier.srcQueueFamilyIndex = writeFamily;
246 barrier.dstQueueFamilyIndex = readFamily;
247 if (secondQueue)
248 {
249 barrier.oldLayout = barrier.newLayout;
250 barrier.srcAccessMask = barrier.dstAccessMask;
251 }
252 vk.cmdPipelineBarrier(cmdBuffer, writeSync.stageMask, readSync.stageMask, (VkDependencyFlags)0, 0u, (const VkMemoryBarrier*)DE_NULL, 0u, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &barrier);
253 }
254 else if (!secondQueue)
255 vk.cmdPipelineBarrier(cmdBuffer, writeSync.stageMask, readSync.stageMask, (VkDependencyFlags)0, 0u, (const VkMemoryBarrier*)DE_NULL, 0u, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &barrier);
256 }
257 else if ((resource.getType() == RESOURCE_TYPE_BUFFER || isIndirectBuffer(resource.getType())) &&
258 writeFamily != readFamily &&
259 VK_SHARING_MODE_EXCLUSIVE == sharingMode)
260 {
261 const VkBufferMemoryBarrier barrier =
262 {
263 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
264 DE_NULL, // const void* pNext;
265 writeSync.accessMask , // VkAccessFlags srcAccessMask;
266 readSync.accessMask, // VkAccessFlags dstAccessMask;
267 writeFamily, // deUint32 srcQueueFamilyIndex;
268 readFamily, // deUint32 destQueueFamilyIndex;
269 resource.getBuffer().handle, // VkBuffer buffer;
270 resource.getBuffer().offset, // VkDeviceSize offset;
271 resource.getBuffer().size, // VkDeviceSize size;
272 };
273 vk.cmdPipelineBarrier(cmdBuffer, writeSync.stageMask, readSync.stageMask, (VkDependencyFlags)0, 0u, (const VkMemoryBarrier*)DE_NULL, 1u, (const VkBufferMemoryBarrier*)&barrier, 0u, (const VkImageMemoryBarrier *)DE_NULL);
274 }
275 }
276
277 class BaseTestInstance : public TestInstance
278 {
279 public:
BaseTestInstance(Context & context,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)280 BaseTestInstance (Context& context, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData)
281 : TestInstance (context)
282 , m_queues (new MultiQueues(context))
283 , m_opContext (new OperationContext(context, pipelineCacheData, m_queues->getDeviceInterface(), m_queues->getDevice(), m_queues->getAllocator()))
284 , m_resourceDesc (resourceDesc)
285 , m_writeOp (writeOp)
286 , m_readOp (readOp)
287 {
288 }
289
290 protected:
291 const UniquePtr<MultiQueues> m_queues;
292 const UniquePtr<OperationContext> m_opContext;
293 const ResourceDescription m_resourceDesc;
294 const OperationSupport& m_writeOp;
295 const OperationSupport& m_readOp;
296 };
297
298 class SemaphoreTestInstance : public BaseTestInstance
299 {
300 public:
SemaphoreTestInstance(Context & context,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)301 SemaphoreTestInstance (Context& context, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
302 : BaseTestInstance (context, resourceDesc, writeOp, readOp, pipelineCacheData)
303 , m_sharingMode (sharingMode)
304 {
305 }
306
iterate(void)307 tcu::TestStatus iterate (void)
308 {
309 const DeviceInterface& vk = m_opContext->getDeviceInterface();
310 const VkDevice device = m_opContext->getDevice();
311 const std::vector<QueuePair> queuePairs = m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
312
313 for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
314 {
315
316 const UniquePtr<Resource> resource (new Resource(*m_opContext, m_resourceDesc, m_writeOp.getResourceUsageFlags() | m_readOp.getResourceUsageFlags()));
317 const UniquePtr<Operation> writeOp (m_writeOp.build(*m_opContext, *resource));
318 const UniquePtr<Operation> readOp (m_readOp.build (*m_opContext, *resource));
319
320 const Move<VkCommandPool> cmdPool[] =
321 {
322 makeCommandPool(vk, device, queuePairs[pairNdx].familyIndexWrite),
323 makeCommandPool(vk, device, queuePairs[pairNdx].familyIndexRead)
324 };
325 const Move<VkCommandBuffer> ptrCmdBuffer[] =
326 {
327 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
328 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
329 };
330 const VkCommandBuffer cmdBuffers[] =
331 {
332 *ptrCmdBuffer[QUEUETYPE_WRITE],
333 *ptrCmdBuffer[QUEUETYPE_READ]
334 };
335 const VkSemaphoreCreateInfo semaphoreInfo =
336 {
337 VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, //VkStructureType sType;
338 DE_NULL, //const void* pNext;
339 0u //VkSemaphoreCreateFlags flags;
340 };
341 const Unique<VkSemaphore> semaphore (createSemaphore(vk, device, &semaphoreInfo, DE_NULL));
342 const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT };
343 const VkSubmitInfo submitInfo[] =
344 {
345 {
346 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
347 DE_NULL, // const void* pNext;
348 0u, // deUint32 waitSemaphoreCount;
349 DE_NULL, // const VkSemaphore* pWaitSemaphores;
350 (const VkPipelineStageFlags*)DE_NULL,
351 1u, // deUint32 commandBufferCount;
352 &cmdBuffers[QUEUETYPE_WRITE], // const VkCommandBuffer* pCommandBuffers;
353 1u, // deUint32 signalSemaphoreCount;
354 &semaphore.get(), // const VkSemaphore* pSignalSemaphores;
355 },
356 {
357 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
358 DE_NULL, // const void* pNext;
359 1u, // deUint32 waitSemaphoreCount;
360 &semaphore.get(), // const VkSemaphore* pWaitSemaphores;
361 stageBits, // const VkPipelineStageFlags* pWaitDstStageMask;
362 1u, // deUint32 commandBufferCount;
363 &cmdBuffers[QUEUETYPE_READ], // const VkCommandBuffer* pCommandBuffers;
364 0u, // deUint32 signalSemaphoreCount;
365 DE_NULL, // const VkSemaphore* pSignalSemaphores;
366 }
367 };
368 const SyncInfo writeSync = writeOp->getSyncInfo();
369 const SyncInfo readSync = readOp->getSyncInfo();
370
371 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]);
372 writeOp->recordCommands (cmdBuffers[QUEUETYPE_WRITE]);
373 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_WRITE], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
374 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]);
375
376 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]);
377 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_READ], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
378 readOp->recordCommands (cmdBuffers[QUEUETYPE_READ]);
379 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]);
380
381 VK_CHECK(vk.queueSubmit(queuePairs[pairNdx].queueWrite, 1u, &submitInfo[QUEUETYPE_WRITE], DE_NULL));
382 VK_CHECK(vk.queueSubmit(queuePairs[pairNdx].queueRead, 1u, &submitInfo[QUEUETYPE_READ], DE_NULL));
383 VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueWrite));
384 VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueRead));
385
386 {
387 const Data expected = writeOp->getData();
388 const Data actual = readOp->getData();
389
390 if (0 != deMemCmp(expected.data, actual.data, expected.size))
391 return tcu::TestStatus::fail("Memory contents don't match");
392 }
393 }
394 return tcu::TestStatus::pass("OK");
395 }
396
397 private:
398 const VkSharingMode m_sharingMode;
399 };
400
401 class FenceTestInstance : public BaseTestInstance
402 {
403 public:
FenceTestInstance(Context & context,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)404 FenceTestInstance (Context& context, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
405 : BaseTestInstance (context, resourceDesc, writeOp, readOp, pipelineCacheData)
406 , m_sharingMode (sharingMode)
407 {
408 }
409
iterate(void)410 tcu::TestStatus iterate (void)
411 {
412 const DeviceInterface& vk = m_opContext->getDeviceInterface();
413 const VkDevice device = m_opContext->getDevice();
414 const std::vector<QueuePair> queuePairs = m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
415
416 for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
417 {
418 const UniquePtr<Resource> resource (new Resource(*m_opContext, m_resourceDesc, m_writeOp.getResourceUsageFlags() | m_readOp.getResourceUsageFlags()));
419 const UniquePtr<Operation> writeOp (m_writeOp.build(*m_opContext, *resource));
420 const UniquePtr<Operation> readOp (m_readOp.build(*m_opContext, *resource));
421 const Move<VkCommandPool> cmdPool[] =
422 {
423 makeCommandPool(vk, device, queuePairs[pairNdx].familyIndexWrite),
424 makeCommandPool(vk, device, queuePairs[pairNdx].familyIndexRead)
425 };
426 const Move<VkCommandBuffer> ptrCmdBuffer[] =
427 {
428 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
429 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
430 };
431 const VkCommandBuffer cmdBuffers[] =
432 {
433 *ptrCmdBuffer[QUEUETYPE_WRITE],
434 *ptrCmdBuffer[QUEUETYPE_READ]
435 };
436 const SyncInfo writeSync = writeOp->getSyncInfo();
437 const SyncInfo readSync = readOp->getSyncInfo();
438
439 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]);
440 writeOp->recordCommands (cmdBuffers[QUEUETYPE_WRITE]);
441 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_WRITE], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
442 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]);
443
444 submitCommandsAndWait (vk, device, queuePairs[pairNdx].queueWrite, cmdBuffers[QUEUETYPE_WRITE]);
445
446 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]);
447 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_READ], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
448 readOp->recordCommands (cmdBuffers[QUEUETYPE_READ]);
449 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]);
450
451 submitCommandsAndWait (vk, device, queuePairs[pairNdx].queueRead, cmdBuffers[QUEUETYPE_READ]);
452
453 {
454 const Data expected = writeOp->getData();
455 const Data actual = readOp->getData();
456
457 if (0 != deMemCmp(expected.data, actual.data, expected.size))
458 return tcu::TestStatus::fail("Memory contents don't match");
459 }
460 }
461 return tcu::TestStatus::pass("OK");
462 }
463
464 private:
465 const VkSharingMode m_sharingMode;
466 };
467
468 class BaseTestCase : public TestCase
469 {
470 public:
BaseTestCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const SyncPrimitive syncPrimitive,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,const VkSharingMode sharingMode,PipelineCacheData & pipelineCacheData)471 BaseTestCase (tcu::TestContext& testCtx,
472 const std::string& name,
473 const std::string& description,
474 const SyncPrimitive syncPrimitive,
475 const ResourceDescription resourceDesc,
476 const OperationName writeOp,
477 const OperationName readOp,
478 const VkSharingMode sharingMode,
479 PipelineCacheData& pipelineCacheData)
480 : TestCase (testCtx, name, description)
481 , m_resourceDesc (resourceDesc)
482 , m_writeOp (makeOperationSupport(writeOp, resourceDesc))
483 , m_readOp (makeOperationSupport(readOp, resourceDesc))
484 , m_syncPrimitive (syncPrimitive)
485 , m_sharingMode (sharingMode)
486 , m_pipelineCacheData (pipelineCacheData)
487 {
488 }
489
initPrograms(SourceCollections & programCollection) const490 void initPrograms (SourceCollections& programCollection) const
491 {
492 m_writeOp->initPrograms(programCollection);
493 m_readOp->initPrograms(programCollection);
494 }
495
createInstance(Context & context) const496 TestInstance* createInstance (Context& context) const
497 {
498 switch (m_syncPrimitive)
499 {
500 case SYNC_PRIMITIVE_FENCE:
501 return new FenceTestInstance(context, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
502 case SYNC_PRIMITIVE_SEMAPHORE:
503 return new SemaphoreTestInstance(context, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
504 default:
505 DE_ASSERT(0);
506 return DE_NULL;
507 }
508 }
509
510 private:
511 const ResourceDescription m_resourceDesc;
512 const UniquePtr<OperationSupport> m_writeOp;
513 const UniquePtr<OperationSupport> m_readOp;
514 const SyncPrimitive m_syncPrimitive;
515 const VkSharingMode m_sharingMode;
516 PipelineCacheData& m_pipelineCacheData;
517 };
518
createTests(tcu::TestCaseGroup * group,PipelineCacheData * pipelineCacheData)519 void createTests (tcu::TestCaseGroup* group, PipelineCacheData* pipelineCacheData)
520 {
521 tcu::TestContext& testCtx = group->getTestContext();
522
523 static const struct
524 {
525 const char* name;
526 SyncPrimitive syncPrimitive;
527 int numOptions;
528 } groups[] =
529 {
530 { "fence", SYNC_PRIMITIVE_FENCE, 1 },
531 { "semaphore", SYNC_PRIMITIVE_SEMAPHORE, 1 }
532 };
533
534 for (int groupNdx = 0; groupNdx < DE_LENGTH_OF_ARRAY(groups); ++groupNdx)
535 {
536 MovePtr<tcu::TestCaseGroup> synchGroup (new tcu::TestCaseGroup(testCtx, groups[groupNdx].name, ""));
537
538 for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(s_writeOps); ++writeOpNdx)
539 for (int readOpNdx = 0; readOpNdx < DE_LENGTH_OF_ARRAY(s_readOps); ++readOpNdx)
540 {
541 const OperationName writeOp = s_writeOps[writeOpNdx];
542 const OperationName readOp = s_readOps[readOpNdx];
543 const std::string opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
544 bool empty = true;
545
546 MovePtr<tcu::TestCaseGroup> opGroup (new tcu::TestCaseGroup(testCtx, opGroupName.c_str(), ""));
547
548 for (int optionNdx = 0; optionNdx <= groups[groupNdx].numOptions; ++optionNdx)
549 for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
550 {
551 const ResourceDescription& resource = s_resources[resourceNdx];
552 std::string name = getResourceName(resource);
553 VkSharingMode sharingMode = VK_SHARING_MODE_EXCLUSIVE;
554
555 // queue family sharing mode used for resource
556 if (optionNdx)
557 {
558 name += "_concurrent";
559 sharingMode = VK_SHARING_MODE_CONCURRENT;
560 }
561 else
562 name += "_exclusive";
563
564 if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
565 {
566 opGroup->addChild(new BaseTestCase(testCtx, name, "", groups[groupNdx].syncPrimitive, resource, writeOp, readOp, sharingMode, *pipelineCacheData));
567 empty = false;
568 }
569 }
570 if (!empty)
571 synchGroup->addChild(opGroup.release());
572 }
573 group->addChild(synchGroup.release());
574 }
575 }
576
577 } // anonymous
578
createSynchronizedOperationMultiQueueTests(tcu::TestContext & testCtx,PipelineCacheData & pipelineCacheData)579 tcu::TestCaseGroup* createSynchronizedOperationMultiQueueTests (tcu::TestContext& testCtx, PipelineCacheData& pipelineCacheData)
580 {
581 return createTestGroup(testCtx, "multi_queue", "Synchronization of a memory-modifying operation", createTests, &pipelineCacheData);
582 }
583
584 } // synchronization
585 } // vkt
586