1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Synchronization tests utilities
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSynchronizationUtil.hpp"
25 #include "vkTypeUtil.hpp"
26 #include "vkCmdUtil.hpp"
27 #include "vkBarrierUtil.hpp"
28 #include "deStringUtil.hpp"
29 #include <set>
30 #include <limits>
31
32 namespace vkt
33 {
34 namespace synchronization
35 {
36 using namespace vk;
37
makeCommandBuffer(const DeviceInterface & vk,const VkDevice device,const VkCommandPool commandPool)38 Move<VkCommandBuffer> makeCommandBuffer (const DeviceInterface& vk, const VkDevice device, const VkCommandPool commandPool)
39 {
40 const VkCommandBufferAllocateInfo info =
41 {
42 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
43 DE_NULL, // const void* pNext;
44 commandPool, // VkCommandPool commandPool;
45 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCommandBufferLevel level;
46 1u, // deUint32 commandBufferCount;
47 };
48 return allocateCommandBuffer(vk, device, &info);
49 }
50
makeComputePipeline(const DeviceInterface & vk,const VkDevice device,const VkPipelineLayout pipelineLayout,const VkShaderModule shaderModule,const VkSpecializationInfo * specInfo,PipelineCacheData & pipelineCacheData,de::SharedPtr<vk::ResourceInterface> resourceInterface)51 Move<VkPipeline> makeComputePipeline (const DeviceInterface& vk,
52 const VkDevice device,
53 const VkPipelineLayout pipelineLayout,
54 const VkShaderModule shaderModule,
55 const VkSpecializationInfo* specInfo,
56 PipelineCacheData& pipelineCacheData,
57 de::SharedPtr<vk::ResourceInterface> resourceInterface
58 )
59 {
60 const VkPipelineShaderStageCreateInfo shaderStageInfo =
61 {
62 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
63 DE_NULL, // const void* pNext;
64 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
65 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
66 shaderModule, // VkShaderModule module;
67 "main", // const char* pName;
68 specInfo, // const VkSpecializationInfo* pSpecializationInfo;
69 };
70 const VkComputePipelineCreateInfo pipelineInfo =
71 {
72 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
73 DE_NULL, // const void* pNext;
74 (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
75 shaderStageInfo, // VkPipelineShaderStageCreateInfo stage;
76 pipelineLayout, // VkPipelineLayout layout;
77 DE_NULL, // VkPipeline basePipelineHandle;
78 0, // deInt32 basePipelineIndex;
79 };
80
81 {
82 const vk::Unique<vk::VkPipelineCache> pipelineCache (pipelineCacheData.createPipelineCache(vk, device, resourceInterface));
83
84 vk::Move<vk::VkPipeline> pipeline (createComputePipeline(vk, device, *pipelineCache, &pipelineInfo));
85
86 // Refresh data from cache
87 pipelineCacheData.setFromPipelineCache(vk, device, *pipelineCache);
88
89 return pipeline;
90 }
91 }
92
makeImageCreateInfo(const VkImageType imageType,const VkExtent3D & extent,const VkFormat format,const VkImageUsageFlags usage,const VkSampleCountFlagBits samples,const VkImageTiling tiling)93 VkImageCreateInfo makeImageCreateInfo (const VkImageType imageType,
94 const VkExtent3D& extent,
95 const VkFormat format,
96 const VkImageUsageFlags usage,
97 const VkSampleCountFlagBits samples,
98 const VkImageTiling tiling)
99 {
100 return
101 {
102 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
103 DE_NULL, // const void* pNext;
104 (VkImageCreateFlags)0, // VkImageCreateFlags flags;
105 imageType, // VkImageType imageType;
106 format, // VkFormat format;
107 extent, // VkExtent3D extent;
108 1u, // uint32_t mipLevels;
109 1u, // uint32_t arrayLayers;
110 samples, // VkSampleCountFlagBits samples;
111 tiling, // VkImageTiling tiling;
112 usage, // VkImageUsageFlags usage;
113 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
114 0u, // uint32_t queueFamilyIndexCount;
115 DE_NULL, // const uint32_t* pQueueFamilyIndices;
116 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
117 };
118 }
119
beginRenderPassWithRasterizationDisabled(const DeviceInterface & vk,const VkCommandBuffer commandBuffer,const VkRenderPass renderPass,const VkFramebuffer framebuffer)120 void beginRenderPassWithRasterizationDisabled (const DeviceInterface& vk,
121 const VkCommandBuffer commandBuffer,
122 const VkRenderPass renderPass,
123 const VkFramebuffer framebuffer)
124 {
125 const VkRect2D renderArea = {{ 0, 0 }, { 0, 0 }};
126
127 beginRenderPass(vk, commandBuffer, renderPass, framebuffer, renderArea);
128 }
129
setShader(const DeviceInterface & vk,const VkDevice device,const VkShaderStageFlagBits stage,const ProgramBinary & binary,const VkSpecializationInfo * specInfo)130 GraphicsPipelineBuilder& GraphicsPipelineBuilder::setShader (const DeviceInterface& vk,
131 const VkDevice device,
132 const VkShaderStageFlagBits stage,
133 const ProgramBinary& binary,
134 const VkSpecializationInfo* specInfo)
135 {
136 VkShaderModule module;
137 switch (stage)
138 {
139 case (VK_SHADER_STAGE_VERTEX_BIT):
140 DE_ASSERT(m_vertexShaderModule.get() == DE_NULL);
141 m_vertexShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
142 module = *m_vertexShaderModule;
143 break;
144
145 case (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT):
146 DE_ASSERT(m_tessControlShaderModule.get() == DE_NULL);
147 m_tessControlShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
148 module = *m_tessControlShaderModule;
149 break;
150
151 case (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT):
152 DE_ASSERT(m_tessEvaluationShaderModule.get() == DE_NULL);
153 m_tessEvaluationShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
154 module = *m_tessEvaluationShaderModule;
155 break;
156
157 case (VK_SHADER_STAGE_GEOMETRY_BIT):
158 DE_ASSERT(m_geometryShaderModule.get() == DE_NULL);
159 m_geometryShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
160 module = *m_geometryShaderModule;
161 break;
162
163 case (VK_SHADER_STAGE_FRAGMENT_BIT):
164 DE_ASSERT(m_fragmentShaderModule.get() == DE_NULL);
165 m_fragmentShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
166 module = *m_fragmentShaderModule;
167 break;
168
169 default:
170 DE_FATAL("Invalid shader stage");
171 return *this;
172 }
173
174 const VkPipelineShaderStageCreateInfo pipelineShaderStageInfo =
175 {
176 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
177 DE_NULL, // const void* pNext;
178 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
179 stage, // VkShaderStageFlagBits stage;
180 module, // VkShaderModule module;
181 "main", // const char* pName;
182 specInfo, // const VkSpecializationInfo* pSpecializationInfo;
183 };
184
185 m_shaderStageFlags |= stage;
186 m_shaderStages.push_back(pipelineShaderStageInfo);
187
188 return *this;
189 }
190
setVertexInputSingleAttribute(const VkFormat vertexFormat,const deUint32 stride)191 GraphicsPipelineBuilder& GraphicsPipelineBuilder::setVertexInputSingleAttribute (const VkFormat vertexFormat, const deUint32 stride)
192 {
193 const VkVertexInputBindingDescription bindingDesc =
194 {
195 0u, // uint32_t binding;
196 stride, // uint32_t stride;
197 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
198 };
199 const VkVertexInputAttributeDescription attributeDesc =
200 {
201 0u, // uint32_t location;
202 0u, // uint32_t binding;
203 vertexFormat, // VkFormat format;
204 0u, // uint32_t offset;
205 };
206
207 m_vertexInputBindings.clear();
208 m_vertexInputBindings.push_back(bindingDesc);
209
210 m_vertexInputAttributes.clear();
211 m_vertexInputAttributes.push_back(attributeDesc);
212
213 return *this;
214 }
215
216 template<typename T>
dataPointer(const std::vector<T> & vec)217 inline const T* dataPointer (const std::vector<T>& vec)
218 {
219 return (vec.size() != 0 ? &vec[0] : DE_NULL);
220 }
221
build(const DeviceInterface & vk,const VkDevice device,const VkPipelineLayout pipelineLayout,const VkRenderPass renderPass,PipelineCacheData & pipelineCacheData,de::SharedPtr<vk::ResourceInterface> resourceInterface)222 Move<VkPipeline> GraphicsPipelineBuilder::build (const DeviceInterface& vk,
223 const VkDevice device,
224 const VkPipelineLayout pipelineLayout,
225 const VkRenderPass renderPass,
226 PipelineCacheData& pipelineCacheData,
227 de::SharedPtr<vk::ResourceInterface> resourceInterface)
228 {
229 const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo =
230 {
231 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
232 DE_NULL, // const void* pNext;
233 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
234 static_cast<deUint32>(m_vertexInputBindings.size()), // uint32_t vertexBindingDescriptionCount;
235 dataPointer(m_vertexInputBindings), // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
236 static_cast<deUint32>(m_vertexInputAttributes.size()), // uint32_t vertexAttributeDescriptionCount;
237 dataPointer(m_vertexInputAttributes), // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
238 };
239
240 const VkPrimitiveTopology topology = (m_shaderStageFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
241 : m_primitiveTopology;
242 const VkPipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateInfo =
243 {
244 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
245 DE_NULL, // const void* pNext;
246 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
247 topology, // VkPrimitiveTopology topology;
248 VK_FALSE, // VkBool32 primitiveRestartEnable;
249 };
250
251 const VkPipelineTessellationStateCreateInfo pipelineTessellationStateInfo =
252 {
253 VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, // VkStructureType sType;
254 DE_NULL, // const void* pNext;
255 (VkPipelineTessellationStateCreateFlags)0, // VkPipelineTessellationStateCreateFlags flags;
256 m_patchControlPoints, // uint32_t patchControlPoints;
257 };
258
259 const VkViewport viewport = makeViewport(m_renderSize);
260 const VkRect2D scissor = makeRect2D(m_renderSize);
261
262 const VkPipelineViewportStateCreateInfo pipelineViewportStateInfo =
263 {
264 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
265 DE_NULL, // const void* pNext;
266 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags;
267 1u, // uint32_t viewportCount;
268 &viewport, // const VkViewport* pViewports;
269 1u, // uint32_t scissorCount;
270 &scissor, // const VkRect2D* pScissors;
271 };
272
273 const bool isRasterizationDisabled = ((m_shaderStageFlags & VK_SHADER_STAGE_FRAGMENT_BIT) == 0);
274 const VkPipelineRasterizationStateCreateInfo pipelineRasterizationStateInfo =
275 {
276 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
277 DE_NULL, // const void* pNext;
278 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
279 VK_FALSE, // VkBool32 depthClampEnable;
280 isRasterizationDisabled, // VkBool32 rasterizerDiscardEnable;
281 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
282 m_cullModeFlags, // VkCullModeFlags cullMode;
283 m_frontFace, // VkFrontFace frontFace;
284 VK_FALSE, // VkBool32 depthBiasEnable;
285 0.0f, // float depthBiasConstantFactor;
286 0.0f, // float depthBiasClamp;
287 0.0f, // float depthBiasSlopeFactor;
288 1.0f, // float lineWidth;
289 };
290
291 const VkPipelineMultisampleStateCreateInfo pipelineMultisampleStateInfo =
292 {
293 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
294 DE_NULL, // const void* pNext;
295 (VkPipelineMultisampleStateCreateFlags)0, // VkPipelineMultisampleStateCreateFlags flags;
296 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
297 VK_FALSE, // VkBool32 sampleShadingEnable;
298 0.0f, // float minSampleShading;
299 DE_NULL, // const VkSampleMask* pSampleMask;
300 VK_FALSE, // VkBool32 alphaToCoverageEnable;
301 VK_FALSE // VkBool32 alphaToOneEnable;
302 };
303
304 const VkStencilOpState stencilOpState = makeStencilOpState(
305 VK_STENCIL_OP_KEEP, // stencil fail
306 VK_STENCIL_OP_KEEP, // depth & stencil pass
307 VK_STENCIL_OP_KEEP, // depth only fail
308 VK_COMPARE_OP_NEVER, // compare op
309 0u, // compare mask
310 0u, // write mask
311 0u); // reference
312
313 const VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo =
314 {
315 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
316 DE_NULL, // const void* pNext;
317 (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags;
318 VK_FALSE, // VkBool32 depthTestEnable;
319 VK_FALSE, // VkBool32 depthWriteEnable;
320 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
321 VK_FALSE, // VkBool32 depthBoundsTestEnable;
322 VK_FALSE, // VkBool32 stencilTestEnable;
323 stencilOpState, // VkStencilOpState front;
324 stencilOpState, // VkStencilOpState back;
325 0.0f, // float minDepthBounds;
326 1.0f, // float maxDepthBounds;
327 };
328
329 const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
330 const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState =
331 {
332 m_blendEnable, // VkBool32 blendEnable;
333 VK_BLEND_FACTOR_SRC_ALPHA, // VkBlendFactor srcColorBlendFactor;
334 VK_BLEND_FACTOR_ONE, // VkBlendFactor dstColorBlendFactor;
335 VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
336 VK_BLEND_FACTOR_SRC_ALPHA, // VkBlendFactor srcAlphaBlendFactor;
337 VK_BLEND_FACTOR_ONE, // VkBlendFactor dstAlphaBlendFactor;
338 VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
339 colorComponentsAll, // VkColorComponentFlags colorWriteMask;
340 };
341
342 const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo =
343 {
344 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
345 DE_NULL, // const void* pNext;
346 (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags;
347 VK_FALSE, // VkBool32 logicOpEnable;
348 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
349 1u, // deUint32 attachmentCount;
350 &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
351 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
352 };
353
354 const VkGraphicsPipelineCreateInfo graphicsPipelineInfo =
355 {
356 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
357 DE_NULL, // const void* pNext;
358 (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
359 static_cast<deUint32>(m_shaderStages.size()), // deUint32 stageCount;
360 &m_shaderStages[0], // const VkPipelineShaderStageCreateInfo* pStages;
361 &vertexInputStateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
362 &pipelineInputAssemblyStateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
363 (m_shaderStageFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ? &pipelineTessellationStateInfo : DE_NULL), // const VkPipelineTessellationStateCreateInfo* pTessellationState;
364 (isRasterizationDisabled ? DE_NULL : &pipelineViewportStateInfo), // const VkPipelineViewportStateCreateInfo* pViewportState;
365 &pipelineRasterizationStateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
366 (isRasterizationDisabled ? DE_NULL : &pipelineMultisampleStateInfo), // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
367 (isRasterizationDisabled ? DE_NULL : &pipelineDepthStencilStateInfo), // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
368 (isRasterizationDisabled ? DE_NULL : &pipelineColorBlendStateInfo), // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
369 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
370 pipelineLayout, // VkPipelineLayout layout;
371 renderPass, // VkRenderPass renderPass;
372 0u, // deUint32 subpass;
373 DE_NULL, // VkPipeline basePipelineHandle;
374 0, // deInt32 basePipelineIndex;
375 };
376
377 {
378 const vk::Unique<vk::VkPipelineCache> pipelineCache(pipelineCacheData.createPipelineCache(vk, device, resourceInterface));
379 vk::Move<vk::VkPipeline> pipeline (createGraphicsPipeline(vk, device, *pipelineCache, &graphicsPipelineInfo));
380
381 // Refresh data from cache
382 pipelineCacheData.setFromPipelineCache(vk, device, *pipelineCache);
383
384 return pipeline;
385 }
386 }
387
388 // Uses some structures added by VK_KHR_synchronization2 to fill legacy structures.
389 // With this approach we dont have to create branch in each test (one for legacy
390 // second for new synchronization), this helps to reduce code of some tests.
391 class LegacySynchronizationWrapper : public SynchronizationWrapperBase
392 {
393 protected:
394
395 struct SubmitInfoData
396 {
397 deUint32 waitSemaphoreCount;
398 std::size_t waitSemaphoreIndex;
399 std::size_t waitSemaphoreValueIndexPlusOne;
400 deUint32 commandBufferCount;
401 deUint32 commandBufferIndex;
402 deUint32 signalSemaphoreCount;
403 std::size_t signalSemaphoreIndex;
404 std::size_t signalSemaphoreValueIndexPlusOne;
405 };
406
isStageFlagAllowed(VkPipelineStageFlags2 stage) const407 bool isStageFlagAllowed(VkPipelineStageFlags2 stage) const
408 {
409 // synchronization2 suports more stages then legacy synchronization
410 // and so SynchronizationWrapper can only be used for cases that
411 // operate on stages also supported by legacy synchronization
412 // NOTE: if some tests hits assertion that uses this method then this
413 // test should not use synchronizationWrapper - it should be synchronization2 exclusive
414
415 static const std::set<deUint32> allowedStages
416 {
417 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
418 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
419 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
420 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
421 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
422 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
423 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
424 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
425 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
426 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
427 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
428 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
429 VK_PIPELINE_STAGE_TRANSFER_BIT,
430 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
431 VK_PIPELINE_STAGE_HOST_BIT,
432 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
433 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
434 #ifndef CTS_USES_VULKANSC
435 VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
436 VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT,
437 VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR,
438 VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,
439 #endif // CTS_USES_VULKANSC
440 VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
441 #ifndef CTS_USES_VULKANSC
442 VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV,
443 VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV,
444 VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT,
445 VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV,
446 #endif // CTS_USES_VULKANSC
447 VK_PIPELINE_STAGE_NONE_KHR,
448 };
449
450 if (stage > static_cast<deUint64>(std::numeric_limits<deUint32>::max()))
451 return false;
452
453 return (allowedStages.find(static_cast<deUint32>(stage)) != allowedStages.end());
454 }
455
isAccessFlagAllowed(VkAccessFlags2 access) const456 bool isAccessFlagAllowed(VkAccessFlags2 access) const
457 {
458 // synchronization2 suports more access flags then legacy synchronization
459 // and so SynchronizationWrapper can only be used for cases that
460 // operate on access flags also supported by legacy synchronization
461 // NOTE: if some tests hits assertion that uses this method then this
462 // test should not use synchronizationWrapper - it should be synchronization2 exclusive
463
464 static const std::set<deUint32> allowedAccessFlags
465 {
466 VK_ACCESS_INDIRECT_COMMAND_READ_BIT,
467 VK_ACCESS_INDEX_READ_BIT,
468 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
469 VK_ACCESS_UNIFORM_READ_BIT,
470 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT,
471 VK_ACCESS_SHADER_READ_BIT,
472 VK_ACCESS_SHADER_WRITE_BIT,
473 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT,
474 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
475 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
476 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
477 VK_ACCESS_TRANSFER_READ_BIT,
478 VK_ACCESS_TRANSFER_WRITE_BIT,
479 VK_ACCESS_HOST_READ_BIT,
480 VK_ACCESS_HOST_WRITE_BIT,
481 VK_ACCESS_MEMORY_READ_BIT,
482 VK_ACCESS_MEMORY_WRITE_BIT,
483 #ifndef CTS_USES_VULKANSC
484 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT,
485 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
486 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT,
487 VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT,
488 #endif // CTS_USES_VULKANSC
489 VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT,
490 #ifndef CTS_USES_VULKANSC
491 VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR,
492 VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR,
493 #endif // CTS_USES_VULKANSC
494 VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR ,
495 #ifndef CTS_USES_VULKANSC
496 VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT,
497 VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV,
498 VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV,
499 #endif // CTS_USES_VULKANSC
500 VK_ACCESS_NONE_KHR,
501 };
502
503 if (access > static_cast<deUint64>(std::numeric_limits<deUint32>::max()))
504 return false;
505
506 return (allowedAccessFlags.find(static_cast<deUint32>(access)) != allowedAccessFlags.end());
507 }
508
509 public:
LegacySynchronizationWrapper(const DeviceInterface & vk,bool usingTimelineSemaphores,deUint32 submitInfoCount=1u)510 LegacySynchronizationWrapper(const DeviceInterface& vk, bool usingTimelineSemaphores, deUint32 submitInfoCount = 1u)
511 : SynchronizationWrapperBase (vk)
512 , m_submited (DE_FALSE)
513 {
514 m_waitSemaphores.reserve(submitInfoCount);
515 m_signalSemaphores.reserve(submitInfoCount);
516 m_waitDstStageMasks.reserve(submitInfoCount);
517 m_commandBuffers.reserve(submitInfoCount);
518 m_submitInfoData.reserve(submitInfoCount);
519
520 if (usingTimelineSemaphores)
521 m_timelineSemaphoreValues.reserve(2 * submitInfoCount);
522 }
523
524 ~LegacySynchronizationWrapper() = default;
525
addSubmitInfo(deUint32 waitSemaphoreInfoCount,const VkSemaphoreSubmitInfo * pWaitSemaphoreInfos,deUint32 commandBufferInfoCount,const VkCommandBufferSubmitInfo * pCommandBufferInfos,deUint32 signalSemaphoreInfoCount,const VkSemaphoreSubmitInfo * pSignalSemaphoreInfos,bool usingWaitTimelineSemaphore,bool usingSignalTimelineSemaphore)526 void addSubmitInfo(deUint32 waitSemaphoreInfoCount,
527 const VkSemaphoreSubmitInfo* pWaitSemaphoreInfos,
528 deUint32 commandBufferInfoCount,
529 const VkCommandBufferSubmitInfo* pCommandBufferInfos,
530 deUint32 signalSemaphoreInfoCount,
531 const VkSemaphoreSubmitInfo* pSignalSemaphoreInfos,
532 bool usingWaitTimelineSemaphore,
533 bool usingSignalTimelineSemaphore) override
534 {
535 m_submitInfoData.push_back(SubmitInfoData{ waitSemaphoreInfoCount, 0, 0, commandBufferInfoCount, 0u, signalSemaphoreInfoCount, 0, 0 });
536 SubmitInfoData& si = m_submitInfoData.back();
537
538 // memorize wait values
539 if (usingWaitTimelineSemaphore)
540 {
541 DE_ASSERT(pWaitSemaphoreInfos);
542 si.waitSemaphoreValueIndexPlusOne = m_timelineSemaphoreValues.size() + 1;
543 for (deUint32 i = 0; i < waitSemaphoreInfoCount; ++i)
544 m_timelineSemaphoreValues.push_back(pWaitSemaphoreInfos[i].value);
545 }
546
547 // memorize signal values
548 if (usingSignalTimelineSemaphore)
549 {
550 DE_ASSERT(pSignalSemaphoreInfos);
551 si.signalSemaphoreValueIndexPlusOne = m_timelineSemaphoreValues.size() + 1;
552 for (deUint32 i = 0; i < signalSemaphoreInfoCount; ++i)
553 m_timelineSemaphoreValues.push_back(pSignalSemaphoreInfos[i].value);
554 }
555
556 // construct list of semaphores that we need to wait on
557 if (waitSemaphoreInfoCount)
558 {
559 si.waitSemaphoreIndex = m_waitSemaphores.size();
560 for (deUint32 i = 0; i < waitSemaphoreInfoCount; ++i)
561 {
562 DE_ASSERT(isStageFlagAllowed(pWaitSemaphoreInfos[i].stageMask));
563 m_waitSemaphores.push_back(pWaitSemaphoreInfos[i].semaphore);
564 m_waitDstStageMasks.push_back(static_cast<VkPipelineStageFlags>(pWaitSemaphoreInfos[i].stageMask));
565 }
566 }
567
568 // construct list of command buffers
569 if (commandBufferInfoCount)
570 {
571 si.commandBufferIndex = static_cast<deUint32>(m_commandBuffers.size());
572 for (deUint32 i = 0; i < commandBufferInfoCount; ++i)
573 m_commandBuffers.push_back(pCommandBufferInfos[i].commandBuffer);
574 }
575
576 // construct list of semaphores that will be signaled
577 if (signalSemaphoreInfoCount)
578 {
579 si.signalSemaphoreIndex = m_signalSemaphores.size();
580 for (deUint32 i = 0; i < signalSemaphoreInfoCount; ++i)
581 m_signalSemaphores.push_back(pSignalSemaphoreInfos[i].semaphore);
582 }
583 }
584
cmdPipelineBarrier(VkCommandBuffer commandBuffer,const VkDependencyInfo * pDependencyInfo) const585 void cmdPipelineBarrier(VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo) const override
586 {
587 DE_ASSERT(pDependencyInfo);
588
589 #ifndef CTS_USES_VULKANSC
590 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_NONE;
591 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_NONE;
592 #else
593 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_NONE_KHR;
594 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_NONE_KHR;
595 #endif // CTS_USES_VULKANSC
596 deUint32 memoryBarrierCount = pDependencyInfo->memoryBarrierCount;
597 VkMemoryBarrier* pMemoryBarriers = DE_NULL;
598 deUint32 bufferMemoryBarrierCount = pDependencyInfo->bufferMemoryBarrierCount;
599 VkBufferMemoryBarrier* pBufferMemoryBarriers = DE_NULL;
600 deUint32 imageMemoryBarrierCount = pDependencyInfo->imageMemoryBarrierCount;
601 VkImageMemoryBarrier* pImageMemoryBarriers = DE_NULL;
602
603 // translate VkMemoryBarrier2 to VkMemoryBarrier
604 std::vector<VkMemoryBarrier> memoryBarriers;
605 if (memoryBarrierCount)
606 {
607 memoryBarriers.reserve(memoryBarrierCount);
608 for (deUint32 i = 0; i < memoryBarrierCount; ++i)
609 {
610 const VkMemoryBarrier2& pMemoryBarrier = pDependencyInfo->pMemoryBarriers[i];
611
612 DE_ASSERT(isStageFlagAllowed(pMemoryBarrier.srcStageMask));
613 DE_ASSERT(isStageFlagAllowed(pMemoryBarrier.dstStageMask));
614 DE_ASSERT(isAccessFlagAllowed(pMemoryBarrier.srcAccessMask));
615 DE_ASSERT(isAccessFlagAllowed(pMemoryBarrier.dstAccessMask));
616
617 srcStageMask |= static_cast<VkPipelineStageFlags>(pMemoryBarrier.srcStageMask);
618 dstStageMask |= static_cast<VkPipelineStageFlags>(pMemoryBarrier.dstStageMask);
619 memoryBarriers.push_back(makeMemoryBarrier(
620 static_cast<VkAccessFlags>(pMemoryBarrier.srcAccessMask),
621 static_cast<VkAccessFlags>(pMemoryBarrier.dstAccessMask)
622 ));
623 }
624 pMemoryBarriers = &memoryBarriers[0];
625 }
626
627 // translate VkBufferMemoryBarrier2 to VkBufferMemoryBarrier
628 std::vector<VkBufferMemoryBarrier> bufferMemoryBarriers;
629 if (bufferMemoryBarrierCount)
630 {
631 bufferMemoryBarriers.reserve(bufferMemoryBarrierCount);
632 for (deUint32 i = 0; i < bufferMemoryBarrierCount; ++i)
633 {
634 const VkBufferMemoryBarrier2& pBufferMemoryBarrier = pDependencyInfo->pBufferMemoryBarriers[i];
635
636 DE_ASSERT(isStageFlagAllowed(pBufferMemoryBarrier.srcStageMask));
637 DE_ASSERT(isStageFlagAllowed(pBufferMemoryBarrier.dstStageMask));
638 DE_ASSERT(isAccessFlagAllowed(pBufferMemoryBarrier.srcAccessMask));
639 DE_ASSERT(isAccessFlagAllowed(pBufferMemoryBarrier.dstAccessMask));
640
641 srcStageMask |= static_cast<VkPipelineStageFlags>(pBufferMemoryBarrier.srcStageMask);
642 dstStageMask |= static_cast<VkPipelineStageFlags>(pBufferMemoryBarrier.dstStageMask);
643 bufferMemoryBarriers.push_back(makeBufferMemoryBarrier(
644 static_cast<VkAccessFlags>(pBufferMemoryBarrier.srcAccessMask),
645 static_cast<VkAccessFlags>(pBufferMemoryBarrier.dstAccessMask),
646 pBufferMemoryBarrier.buffer,
647 pBufferMemoryBarrier.offset,
648 pBufferMemoryBarrier.size,
649 pBufferMemoryBarrier.srcQueueFamilyIndex,
650 pBufferMemoryBarrier.dstQueueFamilyIndex
651 ));
652 }
653 pBufferMemoryBarriers = &bufferMemoryBarriers[0];
654 }
655
656 // translate VkImageMemoryBarrier2 to VkImageMemoryBarrier
657 std::vector<VkImageMemoryBarrier> imageMemoryBarriers;
658 if (imageMemoryBarrierCount)
659 {
660 imageMemoryBarriers.reserve(imageMemoryBarrierCount);
661 for (deUint32 i = 0; i < imageMemoryBarrierCount; ++i)
662 {
663 const VkImageMemoryBarrier2& pImageMemoryBarrier = pDependencyInfo->pImageMemoryBarriers[i];
664
665 DE_ASSERT(isStageFlagAllowed(pImageMemoryBarrier.srcStageMask));
666 DE_ASSERT(isStageFlagAllowed(pImageMemoryBarrier.dstStageMask));
667 DE_ASSERT(isAccessFlagAllowed(pImageMemoryBarrier.srcAccessMask));
668 DE_ASSERT(isAccessFlagAllowed(pImageMemoryBarrier.dstAccessMask));
669
670 srcStageMask |= static_cast<VkPipelineStageFlags>(pImageMemoryBarrier.srcStageMask);
671 dstStageMask |= static_cast<VkPipelineStageFlags>(pImageMemoryBarrier.dstStageMask);
672 imageMemoryBarriers.push_back(makeImageMemoryBarrier(
673 static_cast<VkAccessFlags>(pImageMemoryBarrier.srcAccessMask),
674 static_cast<VkAccessFlags>(pImageMemoryBarrier.dstAccessMask),
675 pImageMemoryBarrier.oldLayout,
676 pImageMemoryBarrier.newLayout,
677 pImageMemoryBarrier.image,
678 pImageMemoryBarrier.subresourceRange,
679 pImageMemoryBarrier.srcQueueFamilyIndex,
680 pImageMemoryBarrier.dstQueueFamilyIndex
681 ));
682 }
683 pImageMemoryBarriers = &imageMemoryBarriers[0];
684 }
685
686 m_vk.cmdPipelineBarrier(
687 commandBuffer,
688 srcStageMask,
689 dstStageMask,
690 (VkDependencyFlags)0,
691 memoryBarrierCount,
692 pMemoryBarriers,
693 bufferMemoryBarrierCount,
694 pBufferMemoryBarriers,
695 imageMemoryBarrierCount,
696 pImageMemoryBarriers
697 );
698 }
699
cmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,const VkDependencyInfo * pDependencyInfo) const700 void cmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo) const override
701 {
702 DE_ASSERT(pDependencyInfo);
703
704 #ifndef CTS_USES_VULKANSC
705 VkPipelineStageFlags2 srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
706 #else
707 VkPipelineStageFlags2 srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR;
708 #endif // CTS_USES_VULKANSC
709 if (pDependencyInfo->pMemoryBarriers)
710 srcStageMask = pDependencyInfo->pMemoryBarriers[0].srcStageMask;
711 if (pDependencyInfo->pBufferMemoryBarriers)
712 srcStageMask = pDependencyInfo->pBufferMemoryBarriers[0].srcStageMask;
713 if (pDependencyInfo->pImageMemoryBarriers)
714 srcStageMask = pDependencyInfo->pImageMemoryBarriers[0].srcStageMask;
715
716 DE_ASSERT(isStageFlagAllowed(srcStageMask));
717 m_vk.cmdSetEvent(commandBuffer, event, static_cast<VkPipelineStageFlags>(srcStageMask));
718 }
719
cmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags2 flag) const720 void cmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 flag) const override
721 {
722 DE_ASSERT(isStageFlagAllowed(flag));
723 VkPipelineStageFlags legacyStageMask = static_cast<VkPipelineStageFlags>(flag);
724 m_vk.cmdResetEvent(commandBuffer, event, legacyStageMask);
725 }
726
cmdWaitEvents(VkCommandBuffer commandBuffer,deUint32 eventCount,const VkEvent * pEvents,const VkDependencyInfo * pDependencyInfo) const727 void cmdWaitEvents(VkCommandBuffer commandBuffer, deUint32 eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfo) const override
728 {
729 DE_ASSERT(pDependencyInfo);
730
731 #ifndef CTS_USES_VULKANSC
732 VkPipelineStageFlags2 srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
733 VkPipelineStageFlags2 dstStageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
734 #else
735 VkPipelineStageFlags2 srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR;
736 VkPipelineStageFlags2 dstStageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR;
737 #endif // CTS_USES_VULKANSC
738 deUint32 memoryBarrierCount = pDependencyInfo->memoryBarrierCount;
739 deUint32 bufferMemoryBarrierCount = pDependencyInfo->bufferMemoryBarrierCount;
740 deUint32 imageMemoryBarrierCount = pDependencyInfo->imageMemoryBarrierCount;
741 VkMemoryBarrier* pMemoryBarriers = DE_NULL;
742 VkBufferMemoryBarrier* pBufferMemoryBarriers = DE_NULL;
743 VkImageMemoryBarrier* pImageMemoryBarriers = DE_NULL;
744 std::vector<VkMemoryBarrier> memoryBarriers;
745 std::vector<VkBufferMemoryBarrier> bufferMemoryBarriers;
746 std::vector<VkImageMemoryBarrier> imageMemoryBarriers;
747
748 if (pDependencyInfo->pMemoryBarriers)
749 {
750 srcStageMask = pDependencyInfo->pMemoryBarriers[0].srcStageMask;
751 dstStageMask = pDependencyInfo->pMemoryBarriers[0].dstStageMask;
752
753 memoryBarriers.reserve(memoryBarrierCount);
754 for (deUint32 i = 0; i < memoryBarrierCount; ++i)
755 {
756 const VkMemoryBarrier2& mb = pDependencyInfo->pMemoryBarriers[i];
757 DE_ASSERT(isAccessFlagAllowed(mb.srcAccessMask));
758 DE_ASSERT(isAccessFlagAllowed(mb.dstAccessMask));
759 memoryBarriers.push_back(
760 makeMemoryBarrier(
761 static_cast<VkAccessFlags>(mb.srcAccessMask),
762 static_cast<VkAccessFlags>(mb.dstAccessMask)
763 )
764 );
765 }
766 pMemoryBarriers = &memoryBarriers[0];
767 }
768 if (pDependencyInfo->pBufferMemoryBarriers)
769 {
770 srcStageMask = pDependencyInfo->pBufferMemoryBarriers[0].srcStageMask;
771 dstStageMask = pDependencyInfo->pBufferMemoryBarriers[0].dstStageMask;
772
773 bufferMemoryBarriers.reserve(bufferMemoryBarrierCount);
774 for (deUint32 i = 0; i < bufferMemoryBarrierCount; ++i)
775 {
776 const VkBufferMemoryBarrier2& bmb = pDependencyInfo->pBufferMemoryBarriers[i];
777 DE_ASSERT(isAccessFlagAllowed(bmb.srcAccessMask));
778 DE_ASSERT(isAccessFlagAllowed(bmb.dstAccessMask));
779 bufferMemoryBarriers.push_back(
780 makeBufferMemoryBarrier(
781 static_cast<VkAccessFlags>(bmb.srcAccessMask),
782 static_cast<VkAccessFlags>(bmb.dstAccessMask),
783 bmb.buffer,
784 bmb.offset,
785 bmb.size,
786 bmb.srcQueueFamilyIndex,
787 bmb.dstQueueFamilyIndex
788 )
789 );
790 }
791 pBufferMemoryBarriers = &bufferMemoryBarriers[0];
792 }
793 if (pDependencyInfo->pImageMemoryBarriers)
794 {
795 srcStageMask = pDependencyInfo->pImageMemoryBarriers[0].srcStageMask;
796 dstStageMask = pDependencyInfo->pImageMemoryBarriers[0].dstStageMask;
797
798 imageMemoryBarriers.reserve(imageMemoryBarrierCount);
799 for (deUint32 i = 0; i < imageMemoryBarrierCount; ++i)
800 {
801 const VkImageMemoryBarrier2& imb = pDependencyInfo->pImageMemoryBarriers[i];
802 DE_ASSERT(isAccessFlagAllowed(imb.srcAccessMask));
803 DE_ASSERT(isAccessFlagAllowed(imb.dstAccessMask));
804 imageMemoryBarriers.push_back(
805 makeImageMemoryBarrier(
806 static_cast<VkAccessFlags>(imb.srcAccessMask),
807 static_cast<VkAccessFlags>(imb.dstAccessMask),
808 imb.oldLayout,
809 imb.newLayout,
810 imb.image,
811 imb.subresourceRange,
812 imb.srcQueueFamilyIndex,
813 imb.dstQueueFamilyIndex
814 )
815 );
816 }
817 pImageMemoryBarriers = &imageMemoryBarriers[0];
818 }
819
820 DE_ASSERT(isStageFlagAllowed(srcStageMask));
821 DE_ASSERT(isStageFlagAllowed(dstStageMask));
822 m_vk.cmdWaitEvents(commandBuffer, eventCount, pEvents,
823 static_cast<VkPipelineStageFlags>(srcStageMask), static_cast<VkPipelineStageFlags>(dstStageMask),
824 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
825 }
826
queueSubmit(VkQueue queue,VkFence fence)827 VkResult queueSubmit(VkQueue queue, VkFence fence) override
828 {
829 // make sure submit info was added
830 DE_ASSERT(!m_submitInfoData.empty());
831
832 // make sure separate LegacySynchronizationWrapper is created per single submit
833 DE_ASSERT(!m_submited);
834
835 std::vector<VkSubmitInfo> submitInfo(m_submitInfoData.size(), { VK_STRUCTURE_TYPE_SUBMIT_INFO, DE_NULL, 0u, DE_NULL, DE_NULL, 0u, DE_NULL, 0u, DE_NULL });
836
837 std::vector<VkTimelineSemaphoreSubmitInfo> timelineSemaphoreSubmitInfo;
838 timelineSemaphoreSubmitInfo.reserve(m_submitInfoData.size());
839
840 // translate indexes from m_submitInfoData to pointers and construct VkSubmitInfo
841 for (deUint32 i = 0; i < m_submitInfoData.size(); ++i)
842 {
843 auto& data = m_submitInfoData[i];
844 VkSubmitInfo& si = submitInfo[i];
845
846 si.waitSemaphoreCount = data.waitSemaphoreCount;
847 si.commandBufferCount = data.commandBufferCount;
848 si.signalSemaphoreCount = data.signalSemaphoreCount;
849
850 if (data.waitSemaphoreValueIndexPlusOne || data.signalSemaphoreValueIndexPlusOne)
851 {
852 deUint64* pWaitSemaphoreValues = DE_NULL;
853 if (data.waitSemaphoreValueIndexPlusOne)
854 pWaitSemaphoreValues = &m_timelineSemaphoreValues[data.waitSemaphoreValueIndexPlusOne - 1];
855
856 deUint64* pSignalSemaphoreValues = DE_NULL;
857 if (data.signalSemaphoreValueIndexPlusOne)
858 pSignalSemaphoreValues = &m_timelineSemaphoreValues[data.signalSemaphoreValueIndexPlusOne - 1];
859
860 timelineSemaphoreSubmitInfo.push_back({
861 VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO, // VkStructureType sType;
862 DE_NULL, // const void* pNext;
863 data.waitSemaphoreCount, // deUint32 waitSemaphoreValueCount
864 pWaitSemaphoreValues, // const deUint64* pWaitSemaphoreValues
865 data.signalSemaphoreCount, // deUint32 signalSemaphoreValueCount
866 pSignalSemaphoreValues // const deUint64* pSignalSemaphoreValues
867 });
868 si.pNext = &timelineSemaphoreSubmitInfo.back();
869 }
870
871 if (data.waitSemaphoreCount)
872 {
873 si.pWaitSemaphores = &m_waitSemaphores[data.waitSemaphoreIndex];
874 si.pWaitDstStageMask = &m_waitDstStageMasks[data.waitSemaphoreIndex];
875 }
876
877 if (data.commandBufferCount)
878 si.pCommandBuffers = &m_commandBuffers[data.commandBufferIndex];
879
880 if (data.signalSemaphoreCount)
881 si.pSignalSemaphores = &m_signalSemaphores[data.signalSemaphoreIndex];
882 }
883
884 m_submited = DE_TRUE;
885 return m_vk.queueSubmit(queue, static_cast<deUint32>(submitInfo.size()), &submitInfo[0], fence);
886 }
887
888 protected:
889
890 std::vector<VkSemaphore> m_waitSemaphores;
891 std::vector<VkSemaphore> m_signalSemaphores;
892 std::vector<VkPipelineStageFlags> m_waitDstStageMasks;
893 std::vector<VkCommandBuffer> m_commandBuffers;
894 std::vector<SubmitInfoData> m_submitInfoData;
895 std::vector<deUint64> m_timelineSemaphoreValues;
896 bool m_submited;
897 };
898
899 class Synchronization2Wrapper : public SynchronizationWrapperBase
900 {
901 public:
Synchronization2Wrapper(const DeviceInterface & vk,deUint32 submitInfoCount)902 Synchronization2Wrapper(const DeviceInterface& vk, deUint32 submitInfoCount)
903 : SynchronizationWrapperBase(vk)
904 {
905 m_submitInfo.reserve(submitInfoCount);
906 }
907
908 ~Synchronization2Wrapper() = default;
909
addSubmitInfo(deUint32 waitSemaphoreInfoCount,const VkSemaphoreSubmitInfo * pWaitSemaphoreInfos,deUint32 commandBufferInfoCount,const VkCommandBufferSubmitInfo * pCommandBufferInfos,deUint32 signalSemaphoreInfoCount,const VkSemaphoreSubmitInfo * pSignalSemaphoreInfos,bool usingWaitTimelineSemaphore,bool usingSignalTimelineSemaphore)910 void addSubmitInfo(deUint32 waitSemaphoreInfoCount,
911 const VkSemaphoreSubmitInfo* pWaitSemaphoreInfos,
912 deUint32 commandBufferInfoCount,
913 const VkCommandBufferSubmitInfo* pCommandBufferInfos,
914 deUint32 signalSemaphoreInfoCount,
915 const VkSemaphoreSubmitInfo* pSignalSemaphoreInfos,
916 bool usingWaitTimelineSemaphore,
917 bool usingSignalTimelineSemaphore) override
918 {
919 DE_UNREF(usingWaitTimelineSemaphore);
920 DE_UNREF(usingSignalTimelineSemaphore);
921
922 m_submitInfo.push_back(VkSubmitInfo2{
923 #ifndef CTS_USES_VULKANSC
924 VK_STRUCTURE_TYPE_SUBMIT_INFO_2, // VkStructureType sType
925 #else
926 VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR, // VkStructureType sType
927 #endif // CTS_USES_VULKANSC
928 DE_NULL, // const void* pNext
929 0u, // VkSubmitFlags flags
930 waitSemaphoreInfoCount, // deUint32 waitSemaphoreInfoCount
931 pWaitSemaphoreInfos, // const VkSemaphoreSubmitInfo* pWaitSemaphoreInfos
932 commandBufferInfoCount, // deUint32 commandBufferInfoCount
933 pCommandBufferInfos, // const VkCommandBufferSubmitInfo* pCommandBufferInfos
934 signalSemaphoreInfoCount, // deUint32 signalSemaphoreInfoCount
935 pSignalSemaphoreInfos // const VkSemaphoreSubmitInfo* pSignalSemaphoreInfos
936 });
937 }
938
cmdPipelineBarrier(VkCommandBuffer commandBuffer,const VkDependencyInfo * pDependencyInfo) const939 void cmdPipelineBarrier(VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo) const override
940 {
941 #ifndef CTS_USES_VULKANSC
942 m_vk.cmdPipelineBarrier2(commandBuffer, pDependencyInfo);
943 #else
944 m_vk.cmdPipelineBarrier2KHR(commandBuffer, pDependencyInfo);
945 #endif // CTS_USES_VULKANSC
946 }
947
cmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,const VkDependencyInfo * pDependencyInfo) const948 void cmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo) const override
949 {
950 #ifndef CTS_USES_VULKANSC
951 m_vk.cmdSetEvent2(commandBuffer, event, pDependencyInfo);
952 #else
953 m_vk.cmdSetEvent2KHR(commandBuffer, event, pDependencyInfo);
954 #endif // CTS_USES_VULKANSC
955 }
956
cmdWaitEvents(VkCommandBuffer commandBuffer,deUint32 eventCount,const VkEvent * pEvents,const VkDependencyInfo * pDependencyInfo) const957 void cmdWaitEvents(VkCommandBuffer commandBuffer, deUint32 eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfo) const override
958 {
959 #ifndef CTS_USES_VULKANSC
960 m_vk.cmdWaitEvents2(commandBuffer, eventCount, pEvents, pDependencyInfo);
961 #else
962 m_vk.cmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfo);
963 #endif // CTS_USES_VULKANSC
964 }
965
cmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags2 flag) const966 void cmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 flag) const override
967 {
968 #ifndef CTS_USES_VULKANSC
969 m_vk.cmdResetEvent2(commandBuffer, event, flag);
970 #else
971 m_vk.cmdResetEvent2KHR(commandBuffer, event, flag);
972 #endif // CTS_USES_VULKANSC
973 }
974
queueSubmit(VkQueue queue,VkFence fence)975 VkResult queueSubmit(VkQueue queue, VkFence fence) override
976 {
977 #ifndef CTS_USES_VULKANSC
978 return m_vk.queueSubmit2(queue, static_cast<deUint32>(m_submitInfo.size()), &m_submitInfo[0], fence);
979 #else
980 return m_vk.queueSubmit2KHR(queue, static_cast<deUint32>(m_submitInfo.size()), &m_submitInfo[0], fence);
981 #endif // CTS_USES_VULKANSC
982 }
983
984 protected:
985
986 std::vector<VkSubmitInfo2> m_submitInfo;
987 };
988
getSynchronizationWrapper(SynchronizationType type,const DeviceInterface & vk,bool usingTimelineSemaphores,deUint32 submitInfoCount)989 SynchronizationWrapperPtr getSynchronizationWrapper(SynchronizationType type,
990 const DeviceInterface& vk,
991 bool usingTimelineSemaphores,
992 deUint32 submitInfoCount)
993 {
994 return (type == SynchronizationType::LEGACY)
995 ? SynchronizationWrapperPtr(new LegacySynchronizationWrapper(vk, usingTimelineSemaphores, submitInfoCount))
996 : SynchronizationWrapperPtr(new Synchronization2Wrapper(vk, submitInfoCount));
997 }
998
submitCommandsAndWait(SynchronizationWrapperPtr synchronizationWrapper,const DeviceInterface & vk,const VkDevice device,const VkQueue queue,const VkCommandBuffer cmdBuffer)999 void submitCommandsAndWait(SynchronizationWrapperPtr synchronizationWrapper,
1000 const DeviceInterface& vk,
1001 const VkDevice device,
1002 const VkQueue queue,
1003 const VkCommandBuffer cmdBuffer)
1004 {
1005 VkCommandBufferSubmitInfo commandBufferInfoCount = makeCommonCommandBufferSubmitInfo(cmdBuffer);
1006
1007 synchronizationWrapper->addSubmitInfo(
1008 0u, // deUint32 waitSemaphoreInfoCount
1009 DE_NULL, // const VkSemaphoreSubmitInfo* pWaitSemaphoreInfos
1010 1u, // deUint32 commandBufferInfoCount
1011 &commandBufferInfoCount, // const VkCommandBufferSubmitInfo* pCommandBufferInfos
1012 0u, // deUint32 signalSemaphoreInfoCount
1013 DE_NULL // const VkSemaphoreSubmitInfo* pSignalSemaphoreInfos
1014 );
1015
1016 const Unique<VkFence> fence(createFence(vk, device));
1017 VK_CHECK(synchronizationWrapper->queueSubmit(queue, *fence));
1018 VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), DE_TRUE, ~0ull));
1019 }
1020
requireFeatures(const InstanceInterface & vki,const VkPhysicalDevice physDevice,const FeatureFlags flags)1021 void requireFeatures (const InstanceInterface& vki, const VkPhysicalDevice physDevice, const FeatureFlags flags)
1022 {
1023 const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(vki, physDevice);
1024
1025 if (((flags & FEATURE_TESSELLATION_SHADER) != 0) && !features.tessellationShader)
1026 throw tcu::NotSupportedError("Tessellation shader not supported");
1027
1028 if (((flags & FEATURE_GEOMETRY_SHADER) != 0) && !features.geometryShader)
1029 throw tcu::NotSupportedError("Geometry shader not supported");
1030
1031 if (((flags & FEATURE_SHADER_FLOAT_64) != 0) && !features.shaderFloat64)
1032 throw tcu::NotSupportedError("Double-precision floats not supported");
1033
1034 if (((flags & FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS) != 0) && !features.vertexPipelineStoresAndAtomics)
1035 throw tcu::NotSupportedError("SSBO and image writes not supported in vertex pipeline");
1036
1037 if (((flags & FEATURE_FRAGMENT_STORES_AND_ATOMICS) != 0) && !features.fragmentStoresAndAtomics)
1038 throw tcu::NotSupportedError("SSBO and image writes not supported in fragment shader");
1039
1040 if (((flags & FEATURE_SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE) != 0) && !features.shaderTessellationAndGeometryPointSize)
1041 throw tcu::NotSupportedError("Tessellation and geometry shaders don't support PointSize built-in");
1042 }
1043
requireStorageImageSupport(const InstanceInterface & vki,const VkPhysicalDevice physDevice,const VkFormat fmt,const VkImageTiling tiling)1044 void requireStorageImageSupport(const InstanceInterface& vki, const VkPhysicalDevice physDevice, const VkFormat fmt, const VkImageTiling tiling)
1045 {
1046 const VkFormatProperties p = getPhysicalDeviceFormatProperties(vki, physDevice, fmt);
1047 const auto& features = ((tiling == VK_IMAGE_TILING_LINEAR) ? p.linearTilingFeatures : p.optimalTilingFeatures);
1048
1049 if ((features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) == 0)
1050 throw tcu::NotSupportedError("Storage image format not supported");
1051 }
1052
getResourceName(const ResourceDescription & resource)1053 std::string getResourceName (const ResourceDescription& resource)
1054 {
1055 std::ostringstream str;
1056
1057 if ((resource.type == RESOURCE_TYPE_BUFFER) ||
1058 (resource.type == RESOURCE_TYPE_INDEX_BUFFER))
1059 {
1060 str << "buffer_" << resource.size.x();
1061 }
1062 else if (resource.type == RESOURCE_TYPE_IMAGE)
1063 {
1064 str << "image_" << resource.size.x()
1065 << (resource.size.y() > 0 ? "x" + de::toString(resource.size.y()) : "")
1066 << (resource.size.z() > 0 ? "x" + de::toString(resource.size.z()) : "")
1067 << "_" << de::toLower(getFormatName(resource.imageFormat)).substr(10);
1068 }
1069 else if (isIndirectBuffer(resource.type))
1070 str << "indirect_buffer";
1071 else
1072 DE_ASSERT(0);
1073
1074 return str.str();
1075 }
1076
isIndirectBuffer(const ResourceType type)1077 bool isIndirectBuffer (const ResourceType type)
1078 {
1079 switch (type)
1080 {
1081 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW:
1082 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW_INDEXED:
1083 case RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH:
1084 return true;
1085
1086 default:
1087 return false;
1088 }
1089 }
1090
makeCommonCommandBufferSubmitInfo(const VkCommandBuffer cmdBuf)1091 VkCommandBufferSubmitInfo makeCommonCommandBufferSubmitInfo (const VkCommandBuffer cmdBuf)
1092 {
1093 return
1094 {
1095 #ifndef CTS_USES_VULKANSC
1096 VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO, // VkStructureType sType
1097 #else
1098 VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR, // VkStructureType sType
1099 #endif // CTS_USES_VULKANSC
1100 DE_NULL, // const void* pNext
1101 cmdBuf, // VkCommandBuffer commandBuffer
1102 0u // uint32_t deviceMask
1103 };
1104 }
1105
makeCommonSemaphoreSubmitInfo(VkSemaphore semaphore,deUint64 value,VkPipelineStageFlags2 stageMask)1106 VkSemaphoreSubmitInfo makeCommonSemaphoreSubmitInfo(VkSemaphore semaphore, deUint64 value, VkPipelineStageFlags2 stageMask)
1107 {
1108 return
1109 {
1110 #ifndef CTS_USES_VULKANSC
1111 VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO, // VkStructureType sType
1112 #else
1113 VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR, // VkStructureType sType
1114 #endif // CTS_USES_VULKANSC
1115 DE_NULL, // const void* pNext
1116 semaphore, // VkSemaphore semaphore
1117 value, // deUint64 value
1118 stageMask, // VkPipelineStageFlags2 stageMask
1119 0u // deUint32 deviceIndex
1120 };
1121 }
1122
makeCommonDependencyInfo(const VkMemoryBarrier2 * pMemoryBarrier,const VkBufferMemoryBarrier2 * pBufferMemoryBarrier,const VkImageMemoryBarrier2 * pImageMemoryBarrier,bool eventDependency)1123 VkDependencyInfo makeCommonDependencyInfo(const VkMemoryBarrier2* pMemoryBarrier, const VkBufferMemoryBarrier2* pBufferMemoryBarrier, const VkImageMemoryBarrier2* pImageMemoryBarrier,
1124 bool eventDependency)
1125 {
1126 return
1127 {
1128 #ifndef CTS_USES_VULKANSC
1129 VK_STRUCTURE_TYPE_DEPENDENCY_INFO, // VkStructureType sType
1130 #else
1131 VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR, // VkStructureType sType
1132 #endif // CTS_USES_VULKANSC
1133 DE_NULL, // const void* pNext
1134 eventDependency ? (VkDependencyFlags)0u : (VkDependencyFlags)VK_DEPENDENCY_BY_REGION_BIT, // VkDependencyFlags dependencyFlags
1135 !!pMemoryBarrier, // deUint32 memoryBarrierCount
1136 pMemoryBarrier, // const VkMemoryBarrier2KHR* pMemoryBarriers
1137 !!pBufferMemoryBarrier, // deUint32 bufferMemoryBarrierCount
1138 pBufferMemoryBarrier, // const VkBufferMemoryBarrier2KHR* pBufferMemoryBarriers
1139 !!pImageMemoryBarrier, // deUint32 imageMemoryBarrierCount
1140 pImageMemoryBarrier // const VkImageMemoryBarrier2KHR* pImageMemoryBarriers
1141 };
1142 }
1143
PipelineCacheData(void)1144 PipelineCacheData::PipelineCacheData (void)
1145 {
1146 }
1147
~PipelineCacheData(void)1148 PipelineCacheData::~PipelineCacheData (void)
1149 {
1150 }
1151
createPipelineCache(const vk::DeviceInterface & vk,const vk::VkDevice device,de::SharedPtr<vk::ResourceInterface> resourceInterface) const1152 vk::Move<VkPipelineCache> PipelineCacheData::createPipelineCache (const vk::DeviceInterface& vk, const vk::VkDevice device, de::SharedPtr<vk::ResourceInterface> resourceInterface) const
1153 {
1154 #ifndef CTS_USES_VULKANSC
1155 DE_UNREF(resourceInterface);
1156 #endif
1157 const de::ScopedLock dataLock (m_lock);
1158 const struct vk::VkPipelineCacheCreateInfo params =
1159 {
1160 vk::VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,
1161 DE_NULL,
1162 #ifndef CTS_USES_VULKANSC
1163 (vk::VkPipelineCacheCreateFlags)0,
1164 (deUintptr)m_data.size(),
1165 (m_data.empty() ? DE_NULL : &m_data[0])
1166 #else
1167 VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
1168 VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT,
1169 resourceInterface->getCacheDataSize(), // deUintptr initialDataSize;
1170 resourceInterface->getCacheData() // const void* pInitialData;
1171 #endif // CTS_USES_VULKANSC
1172 };
1173
1174 return vk::createPipelineCache(vk, device, ¶ms);
1175 }
1176
setFromPipelineCache(const vk::DeviceInterface & vk,const vk::VkDevice device,const vk::VkPipelineCache pipelineCache)1177 void PipelineCacheData::setFromPipelineCache (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkPipelineCache pipelineCache)
1178 {
1179 const de::ScopedLock dataLock (m_lock);
1180
1181 #ifndef CTS_USES_VULKANSC
1182 deUintptr dataSize = 0;
1183 VK_CHECK(vk.getPipelineCacheData(device, pipelineCache, &dataSize, DE_NULL));
1184
1185 m_data.resize(dataSize);
1186
1187 if (dataSize > 0)
1188 VK_CHECK(vk.getPipelineCacheData(device, pipelineCache, &dataSize, &m_data[0]));
1189 #else
1190 DE_UNREF(vk);
1191 DE_UNREF(device);
1192 DE_UNREF(pipelineCache);
1193 #endif
1194 }
1195
getSyncDevice(de::MovePtr<VideoDevice> & device,Context & context)1196 vk::VkDevice getSyncDevice (de::MovePtr<VideoDevice>& device, Context& context)
1197 {
1198 if (device == DE_NULL)
1199 return context.getDevice();
1200 else
1201 return device->getDeviceSupportingQueue();
1202 }
1203
getSyncDeviceInterface(de::MovePtr<VideoDevice> & device,Context & context)1204 const vk::DeviceInterface& getSyncDeviceInterface (de::MovePtr<VideoDevice>& device, Context& context)
1205 {
1206 if (device == DE_NULL)
1207 return context.getDeviceInterface();
1208 else
1209 return device->getDeviceDriver();
1210 }
1211
getSyncQueueFamilyIndex(de::MovePtr<VideoDevice> & device,Context & context)1212 deUint32 getSyncQueueFamilyIndex (de::MovePtr<VideoDevice>& device, Context& context)
1213 {
1214 if (device == DE_NULL)
1215 return context.getUniversalQueueFamilyIndex();
1216 else
1217 return device->getQueueFamilyVideo();
1218 }
1219
getSyncQueue(de::MovePtr<VideoDevice> & device,Context & context)1220 vk::VkQueue getSyncQueue (de::MovePtr<VideoDevice>& device, Context& context)
1221 {
1222 if (device == DE_NULL)
1223 return context.getUniversalQueue();
1224 else
1225 return getDeviceQueue(device->getDeviceDriver(), device->getDeviceSupportingQueue(), device->getQueueFamilyVideo(), 0u);
1226 }
1227
1228 } // synchronization
1229 } // vkt
1230