1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2019 The Khronos Group Inc.
6 * Copyright (c) 2019 Google Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Concurrent draw tests
23 * Tests that create queue for rendering as well as queue for
24 * compute, and trigger work on both pipelines at the same time,
25 * and finally verify that the results are as expected.
26 *//*--------------------------------------------------------------------*/
27
28 #include "vktDrawConcurrentTests.hpp"
29
30 #include "vktCustomInstancesDevices.hpp"
31 #include "vktTestCaseUtil.hpp"
32 #include "vktDrawTestCaseUtil.hpp"
33 #include "../compute/vktComputeTestsUtil.hpp"
34
35 #include "vktDrawBaseClass.hpp"
36
37 #include "tcuTestLog.hpp"
38 #include "tcuResource.hpp"
39 #include "tcuImageCompare.hpp"
40 #include "tcuTextureUtil.hpp"
41 #include "tcuRGBA.hpp"
42
43 #include "vkDefs.hpp"
44 #include "vkCmdUtil.hpp"
45 #include "vkQueryUtil.hpp"
46 #include "vkBuilderUtil.hpp"
47 #include "vkBarrierUtil.hpp"
48 #include "vkObjUtil.hpp"
49 #include "vkDeviceUtil.hpp"
50 #include "vkSafetyCriticalUtil.hpp"
51 #include "vkBufferWithMemory.hpp"
52
53 #include "deRandom.hpp"
54
55 using namespace vk;
56
57 namespace vkt
58 {
59 namespace Draw
60 {
61 namespace
62 {
63
64 class ConcurrentDraw : public DrawTestsBaseClass
65 {
66 public:
67 typedef TestSpecBase TestSpec;
68 ConcurrentDraw (Context &context, TestSpec testSpec);
69 virtual tcu::TestStatus iterate (void);
70 };
71
ConcurrentDraw(Context & context,TestSpec testSpec)72 ConcurrentDraw::ConcurrentDraw (Context &context, TestSpec testSpec)
73 : DrawTestsBaseClass(context, testSpec.shaders[glu::SHADERTYPE_VERTEX], testSpec.shaders[glu::SHADERTYPE_FRAGMENT], testSpec.groupParams, testSpec.topology)
74 {
75 m_data.push_back(VertexElementData(tcu::Vec4(1.0f, -1.0f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), -1));
76 m_data.push_back(VertexElementData(tcu::Vec4(-1.0f, 1.0f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), -1));
77
78 int refVertexIndex = 2;
79
80 for (int i = 0; i < 1000; i++)
81 {
82 m_data.push_back(VertexElementData(tcu::Vec4(-0.3f, -0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), refVertexIndex++));
83 m_data.push_back(VertexElementData(tcu::Vec4(-0.3f, 0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), refVertexIndex++));
84 m_data.push_back(VertexElementData(tcu::Vec4(0.3f, -0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), refVertexIndex++));
85 m_data.push_back(VertexElementData(tcu::Vec4(0.3f, -0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), refVertexIndex++));
86 m_data.push_back(VertexElementData(tcu::Vec4(0.3f, 0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), refVertexIndex++));
87 m_data.push_back(VertexElementData(tcu::Vec4(-0.3f, 0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), refVertexIndex++));
88 }
89 m_data.push_back(VertexElementData(tcu::Vec4(-1.0f, 1.0f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), -1));
90
91 initialize();
92 }
93
iterate(void)94 tcu::TestStatus ConcurrentDraw::iterate (void)
95 {
96 enum
97 {
98 NO_MATCH_FOUND = ~((deUint32)0),
99 ERROR_NONE = 0,
100 ERROR_WAIT_COMPUTE = 1,
101 ERROR_WAIT_DRAW = 2
102 };
103
104 struct Queue
105 {
106 VkQueue queue;
107 deUint32 queueFamilyIndex;
108 };
109
110 const deUint32 numValues = 1024;
111 const CustomInstance instance (createCustomInstanceFromContext(m_context));
112 const InstanceDriver& instanceDriver (instance.getDriver());
113 const VkPhysicalDevice physicalDevice = chooseDevice(instanceDriver, instance, m_context.getTestContext().getCommandLine());
114 //
115 // const InstanceInterface& instance = m_context.getInstanceInterface();
116 // const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
117 const auto validation = m_context.getTestContext().getCommandLine().isValidationEnabled();
118 tcu::TestLog& log = m_context.getTestContext().getLog();
119 Move<VkDevice> computeDevice;
120 std::vector<VkQueueFamilyProperties> queueFamilyProperties;
121 VkDeviceCreateInfo deviceInfo;
122 VkPhysicalDeviceFeatures deviceFeatures;
123 const float queuePriority = 1.0f;
124 VkDeviceQueueCreateInfo queueInfos;
125 Queue computeQueue = { DE_NULL, (deUint32)NO_MATCH_FOUND };
126
127 // Set up compute
128
129 queueFamilyProperties = getPhysicalDeviceQueueFamilyProperties(instanceDriver, physicalDevice);
130
131 for (deUint32 queueNdx = 0; queueNdx < queueFamilyProperties.size(); ++queueNdx)
132 {
133 if (queueFamilyProperties[queueNdx].queueFlags & VK_QUEUE_COMPUTE_BIT)
134 {
135 if (computeQueue.queueFamilyIndex == NO_MATCH_FOUND)
136 computeQueue.queueFamilyIndex = queueNdx;
137 }
138 }
139
140 if (computeQueue.queueFamilyIndex == NO_MATCH_FOUND)
141 TCU_THROW(NotSupportedError, "Compute queue couldn't be created");
142
143 VkDeviceQueueCreateInfo queueInfo;
144 deMemset(&queueInfo, 0, sizeof(queueInfo));
145
146 queueInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
147 queueInfo.pNext = DE_NULL;
148 queueInfo.flags = (VkDeviceQueueCreateFlags)0u;
149 queueInfo.queueFamilyIndex = computeQueue.queueFamilyIndex;
150 queueInfo.queueCount = 1;
151 queueInfo.pQueuePriorities = &queuePriority;
152
153 queueInfos = queueInfo;
154
155 deMemset(&deviceInfo, 0, sizeof(deviceInfo));
156 instanceDriver.getPhysicalDeviceFeatures(physicalDevice, &deviceFeatures);
157
158 void* pNext = DE_NULL;
159 #ifdef CTS_USES_VULKANSC
160 VkDeviceObjectReservationCreateInfo memReservationInfo = m_context.getTestContext().getCommandLine().isSubProcess() ? m_context.getResourceInterface()->getStatMax() : resetDeviceObjectReservationCreateInfo();
161 memReservationInfo.pNext = pNext;
162 pNext = &memReservationInfo;
163
164 VkPhysicalDeviceVulkanSC10Features sc10Features = createDefaultSC10Features();
165 sc10Features.pNext = pNext;
166 pNext = &sc10Features;
167
168 VkPipelineCacheCreateInfo pcCI;
169 std::vector<VkPipelinePoolSize> poolSizes;
170 if (m_context.getTestContext().getCommandLine().isSubProcess())
171 {
172 if (m_context.getResourceInterface()->getCacheDataSize() > 0)
173 {
174 pcCI =
175 {
176 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
177 DE_NULL, // const void* pNext;
178 VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
179 VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT, // VkPipelineCacheCreateFlags flags;
180 m_context.getResourceInterface()->getCacheDataSize(), // deUintptr initialDataSize;
181 m_context.getResourceInterface()->getCacheData() // const void* pInitialData;
182 };
183 memReservationInfo.pipelineCacheCreateInfoCount = 1;
184 memReservationInfo.pPipelineCacheCreateInfos = &pcCI;
185 }
186
187 poolSizes = m_context.getResourceInterface()->getPipelinePoolSizes();
188 if (!poolSizes.empty())
189 {
190 memReservationInfo.pipelinePoolSizeCount = deUint32(poolSizes.size());
191 memReservationInfo.pPipelinePoolSizes = poolSizes.data();
192 }
193 }
194 #endif // CTS_USES_VULKANSC
195
196 deviceInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
197 deviceInfo.pNext = pNext;
198 deviceInfo.enabledExtensionCount = 0u;
199 deviceInfo.ppEnabledExtensionNames = DE_NULL;
200 deviceInfo.enabledLayerCount = 0u;
201 deviceInfo.ppEnabledLayerNames = DE_NULL;
202 deviceInfo.pEnabledFeatures = &deviceFeatures;
203 deviceInfo.queueCreateInfoCount = 1;
204 deviceInfo.pQueueCreateInfos = &queueInfos;
205
206 computeDevice = createCustomDevice(validation, m_context.getPlatformInterface(), instance, instanceDriver, physicalDevice, &deviceInfo);
207
208 #ifndef CTS_USES_VULKANSC
209 de::MovePtr<vk::DeviceDriver> deviceDriver = de::MovePtr<vk::DeviceDriver>(new vk::DeviceDriver(m_context.getPlatformInterface(), instance, *computeDevice));
210 #else
211 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(m_context.getPlatformInterface(), instance, *computeDevice, m_context.getTestContext().getCommandLine(), m_context.getResourceInterface(), m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties()), vk::DeinitDeviceDeleter(m_context.getResourceInterface().get(), *computeDevice));
212 #endif // CTS_USES_VULKANSC
213 vk::DeviceInterface& vk = *deviceDriver;
214
215 vk.getDeviceQueue(*computeDevice, computeQueue.queueFamilyIndex, 0, &computeQueue.queue);
216
217 // Create an input/output buffer
218 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(instanceDriver, physicalDevice);
219
220 de::MovePtr<SimpleAllocator> allocator = de::MovePtr<SimpleAllocator>(new SimpleAllocator(vk, *computeDevice, memoryProperties));
221 const VkDeviceSize bufferSizeBytes = sizeof(deUint32) * numValues;
222 const vk::BufferWithMemory buffer(vk, *computeDevice, *allocator, makeBufferCreateInfo(bufferSizeBytes, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible);
223
224 // Fill the buffer with data
225
226 typedef std::vector<deUint32> data_vector_t;
227 data_vector_t inputData(numValues);
228
229 {
230 de::Random rnd(0x82ce7f);
231 const Allocation& bufferAllocation = buffer.getAllocation();
232 deUint32* bufferPtr = static_cast<deUint32*>(bufferAllocation.getHostPtr());
233
234 for (deUint32 i = 0; i < numValues; ++i)
235 {
236 deUint32 val = rnd.getUint32();
237 inputData[i] = val;
238 *bufferPtr++ = val;
239 }
240
241 flushAlloc(vk, *computeDevice, bufferAllocation);
242 }
243
244 // Create descriptor set
245
246 const Unique<VkDescriptorSetLayout> descriptorSetLayout(
247 DescriptorSetLayoutBuilder()
248 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
249 .build(vk, *computeDevice));
250
251 const Unique<VkDescriptorPool> descriptorPool(
252 DescriptorPoolBuilder()
253 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
254 .build(vk, *computeDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
255
256 const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(vk, *computeDevice, *descriptorPool, *descriptorSetLayout));
257
258 const VkDescriptorBufferInfo bufferDescriptorInfo = makeDescriptorBufferInfo(*buffer, 0ull, bufferSizeBytes);
259 DescriptorSetUpdateBuilder()
260 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferDescriptorInfo)
261 .update(vk, *computeDevice);
262
263 // Perform the computation
264
265 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, *computeDevice, m_context.getBinaryCollection().get("vulkan/draw/ConcurrentPayload.comp"), 0u));
266
267 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, *computeDevice, *descriptorSetLayout));
268 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, *computeDevice, *pipelineLayout, *shaderModule));
269 const VkBufferMemoryBarrier hostWriteBarrier = makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, *buffer, 0ull, bufferSizeBytes);
270 const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *buffer, 0ull, bufferSizeBytes);
271 const Unique<VkCommandPool> cmdPool(makeCommandPool(vk, *computeDevice, computeQueue.queueFamilyIndex));
272 const Unique<VkCommandBuffer> computeCommandBuffer(allocateCommandBuffer(vk, *computeDevice, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
273
274 // Compute command buffer
275
276 beginCommandBuffer(vk, *computeCommandBuffer);
277 vk.cmdBindPipeline(*computeCommandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
278 vk.cmdBindDescriptorSets(*computeCommandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
279 vk.cmdPipelineBarrier(*computeCommandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &hostWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
280 vk.cmdDispatch(*computeCommandBuffer, 1, 1, 1);
281 vk.cmdPipelineBarrier(*computeCommandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
282 endCommandBuffer(vk, *computeCommandBuffer);
283
284 const VkSubmitInfo submitInfo =
285 {
286 VK_STRUCTURE_TYPE_SUBMIT_INFO, // sType
287 DE_NULL, // pNext
288 0u, // waitSemaphoreCount
289 DE_NULL, // pWaitSemaphores
290 (const VkPipelineStageFlags*)DE_NULL, // pWaitDstStageMask
291 1u, // commandBufferCount
292 &computeCommandBuffer.get(), // pCommandBuffers
293 0u, // signalSemaphoreCount
294 DE_NULL // pSignalSemaphores
295 };
296
297 // Set up draw
298
299 const VkQueue drawQueue = m_context.getUniversalQueue();
300 const VkDevice drawDevice = m_context.getDevice();
301 const VkDeviceSize vertexBufferOffset = 0;
302 const VkBuffer vertexBuffer = m_vertexBuffer->object();
303
304 #ifndef CTS_USES_VULKANSC
305 if (m_groupParams->useSecondaryCmdBuffer)
306 {
307 // record secondary command buffer
308 if (m_groupParams->secondaryCmdBufferCompletelyContainsDynamicRenderpass)
309 {
310 beginSecondaryCmdBuffer(m_vk, VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT);
311 beginDynamicRender(*m_secCmdBuffer);
312 }
313 else
314 beginSecondaryCmdBuffer(m_vk);
315
316 m_vk.cmdBindVertexBuffers(*m_secCmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
317 m_vk.cmdBindPipeline(*m_secCmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
318 m_vk.cmdDraw(*m_secCmdBuffer, 6, 1, 2, 0);
319
320 if (m_groupParams->secondaryCmdBufferCompletelyContainsDynamicRenderpass)
321 endDynamicRender(*m_secCmdBuffer);
322
323 endCommandBuffer(m_vk, *m_secCmdBuffer);
324
325 // record primary command buffer
326 beginCommandBuffer(m_vk, *m_cmdBuffer, 0u);
327 preRenderBarriers();
328
329 if (!m_groupParams->secondaryCmdBufferCompletelyContainsDynamicRenderpass)
330 beginDynamicRender(*m_cmdBuffer, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
331
332 m_vk.cmdExecuteCommands(*m_cmdBuffer, 1u, &*m_secCmdBuffer);
333
334 if (!m_groupParams->secondaryCmdBufferCompletelyContainsDynamicRenderpass)
335 endDynamicRender(*m_cmdBuffer);
336
337 endCommandBuffer(m_vk, *m_cmdBuffer);
338 }
339 else if (m_groupParams->useDynamicRendering)
340 {
341 beginCommandBuffer(m_vk, *m_cmdBuffer, 0u);
342 preRenderBarriers();
343 beginDynamicRender(*m_cmdBuffer);
344
345 m_vk.cmdBindVertexBuffers(*m_cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
346 m_vk.cmdBindPipeline(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
347 m_vk.cmdDraw(*m_cmdBuffer, 6, 1, 2, 0);
348
349 endDynamicRender(*m_cmdBuffer);
350 endCommandBuffer(m_vk, *m_cmdBuffer);
351 }
352 #endif // CTS_USES_VULKANSC
353
354 if (!m_groupParams->useDynamicRendering)
355 {
356 beginCommandBuffer(m_vk, *m_cmdBuffer, 0u);
357 preRenderBarriers();
358 beginLegacyRender(*m_cmdBuffer);
359
360 m_vk.cmdBindVertexBuffers(*m_cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
361 m_vk.cmdBindPipeline(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
362 m_vk.cmdDraw(*m_cmdBuffer, 6, 1, 2, 0);
363
364 endLegacyRender(*m_cmdBuffer);
365 endCommandBuffer(m_vk, *m_cmdBuffer);
366 }
367
368 const VkCommandBuffer drawCommandBuffer = m_cmdBuffer.get();
369 const bool useDeviceGroups = false;
370 const deUint32 deviceMask = 1u;
371 const Unique<VkFence> drawFence(createFence(vk, drawDevice));
372
373 VkDeviceGroupSubmitInfo deviceGroupSubmitInfo =
374 {
375 VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, // VkStructureType sType;
376 DE_NULL, // const void* pNext;
377 0u, // deUint32 waitSemaphoreCount;
378 DE_NULL, // const deUint32* pWaitSemaphoreDeviceIndices;
379 1u, // deUint32 commandBufferCount;
380 &deviceMask, // const deUint32* pCommandBufferDeviceMasks;
381 0u, // deUint32 signalSemaphoreCount;
382 DE_NULL, // const deUint32* pSignalSemaphoreDeviceIndices;
383 };
384
385 const VkSubmitInfo drawSubmitInfo =
386 {
387 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
388 useDeviceGroups ? &deviceGroupSubmitInfo : DE_NULL, // const void* pNext;
389 0u, // deUint32 waitSemaphoreCount;
390 DE_NULL, // const VkSemaphore* pWaitSemaphores;
391 (const VkPipelineStageFlags*)DE_NULL, // const VkPipelineStageFlags* pWaitDstStageMask;
392 1u, // deUint32 commandBufferCount;
393 &drawCommandBuffer, // const VkCommandBuffer* pCommandBuffers;
394 0u, // deUint32 signalSemaphoreCount;
395 DE_NULL, // const VkSemaphore* pSignalSemaphores;
396 };
397
398 const Unique<VkFence> computeFence(createFence(vk, *computeDevice));
399
400 // Submit both compute and draw queues
401 VK_CHECK(vk.queueSubmit(computeQueue.queue, 1u, &submitInfo, *computeFence));
402 VK_CHECK(vk.queueSubmit(drawQueue, 1u, &drawSubmitInfo, *drawFence));
403
404 int err = ERROR_NONE;
405
406 if (VK_SUCCESS != vk.waitForFences(*computeDevice, 1u, &computeFence.get(), DE_TRUE, ~0ull))
407 err = ERROR_WAIT_COMPUTE;
408
409 if (VK_SUCCESS != vk.waitForFences(drawDevice, 1u, &drawFence.get(), DE_TRUE, ~0ull))
410 err = ERROR_WAIT_DRAW;
411
412 // Have to wait for all fences before calling fail, or some fence may be left hanging.
413
414
415 #ifdef CTS_USES_VULKANSC
416 if (m_context.getTestContext().getCommandLine().isSubProcess())
417 #endif // CTS_USES_VULKANSC
418 {
419 if (err == ERROR_WAIT_COMPUTE)
420 {
421 return tcu::TestStatus::fail("Failed waiting for compute queue fence.");
422 }
423
424 if (err == ERROR_WAIT_DRAW)
425 {
426 return tcu::TestStatus::fail("Failed waiting for draw queue fence.");
427 }
428
429 // Validation - compute
430
431 const Allocation& bufferAllocation = buffer.getAllocation();
432 invalidateAlloc(vk, *computeDevice, bufferAllocation);
433 const deUint32* bufferPtr = static_cast<deUint32*>(bufferAllocation.getHostPtr());
434
435 for (deUint32 ndx = 0; ndx < numValues; ++ndx)
436 {
437 const deUint32 res = bufferPtr[ndx];
438 const deUint32 inp = inputData[ndx];
439 const deUint32 ref = ~inp;
440
441 if (res != ref)
442 {
443 std::ostringstream msg;
444 msg << "Comparison failed (compute) for InOut.values[" << ndx << "] ref:" << ref << " res:" << res << " inp:" << inp;
445 return tcu::TestStatus::fail(msg.str());
446 }
447 }
448 }
449
450 // Validation - draw
451
452 tcu::Texture2D referenceFrame(mapVkFormat(m_colorAttachmentFormat), (int)(0.5f + static_cast<float>(WIDTH)), (int)(0.5f + static_cast<float>(HEIGHT)));
453
454 referenceFrame.allocLevel(0);
455
456 const deInt32 frameWidth = referenceFrame.getWidth();
457 const deInt32 frameHeight = referenceFrame.getHeight();
458
459 tcu::clear(referenceFrame.getLevel(0), tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
460
461 ReferenceImageCoordinates refCoords;
462
463 for (int y = 0; y < frameHeight; y++)
464 {
465 const float yCoord = (float)(y / (0.5 * frameHeight)) - 1.0f;
466
467 for (int x = 0; x < frameWidth; x++)
468 {
469 const float xCoord = (float)(x / (0.5 * frameWidth)) - 1.0f;
470
471 if ((yCoord >= refCoords.bottom &&
472 yCoord <= refCoords.top &&
473 xCoord >= refCoords.left &&
474 xCoord <= refCoords.right))
475 referenceFrame.getLevel(0).setPixel(tcu::Vec4(0.0f, 0.0f, 1.0f, 1.0f), x, y);
476 }
477 }
478
479 const VkOffset3D zeroOffset = { 0, 0, 0 };
480 const tcu::ConstPixelBufferAccess renderedFrame = m_colorTargetImage->readSurface(
481 drawQueue, m_context.getDefaultAllocator(), VK_IMAGE_LAYOUT_GENERAL, zeroOffset, WIDTH, HEIGHT, VK_IMAGE_ASPECT_COLOR_BIT);
482
483 qpTestResult res = QP_TEST_RESULT_PASS;
484
485 if (!tcu::fuzzyCompare(log, "Result", "Image comparison result",
486 referenceFrame.getLevel(0), renderedFrame, 0.05f,
487 tcu::COMPARE_LOG_RESULT))
488 {
489 res = QP_TEST_RESULT_FAIL;
490 }
491 return tcu::TestStatus(res, qpGetTestResultName(res));
492 }
493
checkSupport(Context & context,ConcurrentDraw::TestSpec testSpec)494 void checkSupport(Context& context, ConcurrentDraw::TestSpec testSpec)
495 {
496 if (testSpec.groupParams->useDynamicRendering)
497 context.requireDeviceFunctionality("VK_KHR_dynamic_rendering");
498 }
499
500 } // anonymous
501
ConcurrentDrawTests(tcu::TestContext & testCtx,const SharedGroupParams groupParams)502 ConcurrentDrawTests::ConcurrentDrawTests (tcu::TestContext &testCtx, const SharedGroupParams groupParams)
503 : TestCaseGroup (testCtx, "concurrent", "concurrent drawing")
504 , m_groupParams (groupParams)
505 {
506 /* Left blank on purpose */
507 }
508
init(void)509 void ConcurrentDrawTests::init (void)
510 {
511 ConcurrentDraw::TestSpec testSpec
512 {
513 {
514 { glu::SHADERTYPE_VERTEX, "vulkan/draw/VertexFetch.vert" },
515 { glu::SHADERTYPE_FRAGMENT, "vulkan/draw/VertexFetch.frag" },
516 { glu::SHADERTYPE_COMPUTE, "vulkan/draw/ConcurrentPayload.comp" }
517 },
518 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
519 m_groupParams
520 };
521
522 addChild(new InstanceFactory<ConcurrentDraw, FunctionSupport1<ConcurrentDraw::TestSpec>>(m_testCtx, "compute_and_triangle_list", "Draws triangle list while running a compute shader", testSpec, FunctionSupport1<ConcurrentDraw::TestSpec>::Args(checkSupport, testSpec)));
523 }
524
525 } // DrawTests
526 } // vkt
527