1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Synchronization tests utilities
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSynchronizationUtil.hpp"
25 #include "vkTypeUtil.hpp"
26 #include "vkCmdUtil.hpp"
27 #include "vkBarrierUtil.hpp"
28 #include "deStringUtil.hpp"
29 #include <set>
30
31 namespace vkt
32 {
33 namespace synchronization
34 {
35 using namespace vk;
36
makeCommandBuffer(const DeviceInterface & vk,const VkDevice device,const VkCommandPool commandPool)37 Move<VkCommandBuffer> makeCommandBuffer (const DeviceInterface& vk, const VkDevice device, const VkCommandPool commandPool)
38 {
39 const VkCommandBufferAllocateInfo info =
40 {
41 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
42 DE_NULL, // const void* pNext;
43 commandPool, // VkCommandPool commandPool;
44 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCommandBufferLevel level;
45 1u, // deUint32 commandBufferCount;
46 };
47 return allocateCommandBuffer(vk, device, &info);
48 }
49
makeComputePipeline(const DeviceInterface & vk,const VkDevice device,const VkPipelineLayout pipelineLayout,const VkShaderModule shaderModule,const VkSpecializationInfo * specInfo,PipelineCacheData & pipelineCacheData)50 Move<VkPipeline> makeComputePipeline (const DeviceInterface& vk,
51 const VkDevice device,
52 const VkPipelineLayout pipelineLayout,
53 const VkShaderModule shaderModule,
54 const VkSpecializationInfo* specInfo,
55 PipelineCacheData& pipelineCacheData)
56 {
57 const VkPipelineShaderStageCreateInfo shaderStageInfo =
58 {
59 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
60 DE_NULL, // const void* pNext;
61 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
62 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
63 shaderModule, // VkShaderModule module;
64 "main", // const char* pName;
65 specInfo, // const VkSpecializationInfo* pSpecializationInfo;
66 };
67 const VkComputePipelineCreateInfo pipelineInfo =
68 {
69 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
70 DE_NULL, // const void* pNext;
71 (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
72 shaderStageInfo, // VkPipelineShaderStageCreateInfo stage;
73 pipelineLayout, // VkPipelineLayout layout;
74 DE_NULL, // VkPipeline basePipelineHandle;
75 0, // deInt32 basePipelineIndex;
76 };
77
78 {
79 const vk::Unique<vk::VkPipelineCache> pipelineCache (pipelineCacheData.createPipelineCache(vk, device));
80 vk::Move<vk::VkPipeline> pipeline (createComputePipeline(vk, device, *pipelineCache, &pipelineInfo));
81
82 // Refresh data from cache
83 pipelineCacheData.setFromPipelineCache(vk, device, *pipelineCache);
84
85 return pipeline;
86 }
87 }
88
makeImageCreateInfo(const VkImageType imageType,const VkExtent3D & extent,const VkFormat format,const VkImageUsageFlags usage,const VkSampleCountFlagBits samples)89 VkImageCreateInfo makeImageCreateInfo (const VkImageType imageType,
90 const VkExtent3D& extent,
91 const VkFormat format,
92 const VkImageUsageFlags usage,
93 const VkSampleCountFlagBits samples)
94 {
95 return
96 {
97 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
98 DE_NULL, // const void* pNext;
99 (VkImageCreateFlags)0, // VkImageCreateFlags flags;
100 imageType, // VkImageType imageType;
101 format, // VkFormat format;
102 extent, // VkExtent3D extent;
103 1u, // uint32_t mipLevels;
104 1u, // uint32_t arrayLayers;
105 samples, // VkSampleCountFlagBits samples;
106 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
107 usage, // VkImageUsageFlags usage;
108 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
109 0u, // uint32_t queueFamilyIndexCount;
110 DE_NULL, // const uint32_t* pQueueFamilyIndices;
111 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
112 };
113 }
114
beginRenderPassWithRasterizationDisabled(const DeviceInterface & vk,const VkCommandBuffer commandBuffer,const VkRenderPass renderPass,const VkFramebuffer framebuffer)115 void beginRenderPassWithRasterizationDisabled (const DeviceInterface& vk,
116 const VkCommandBuffer commandBuffer,
117 const VkRenderPass renderPass,
118 const VkFramebuffer framebuffer)
119 {
120 const VkRect2D renderArea = {{ 0, 0 }, { 0, 0 }};
121
122 beginRenderPass(vk, commandBuffer, renderPass, framebuffer, renderArea);
123 }
124
setShader(const DeviceInterface & vk,const VkDevice device,const VkShaderStageFlagBits stage,const ProgramBinary & binary,const VkSpecializationInfo * specInfo)125 GraphicsPipelineBuilder& GraphicsPipelineBuilder::setShader (const DeviceInterface& vk,
126 const VkDevice device,
127 const VkShaderStageFlagBits stage,
128 const ProgramBinary& binary,
129 const VkSpecializationInfo* specInfo)
130 {
131 VkShaderModule module;
132 switch (stage)
133 {
134 case (VK_SHADER_STAGE_VERTEX_BIT):
135 DE_ASSERT(m_vertexShaderModule.get() == DE_NULL);
136 m_vertexShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
137 module = *m_vertexShaderModule;
138 break;
139
140 case (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT):
141 DE_ASSERT(m_tessControlShaderModule.get() == DE_NULL);
142 m_tessControlShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
143 module = *m_tessControlShaderModule;
144 break;
145
146 case (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT):
147 DE_ASSERT(m_tessEvaluationShaderModule.get() == DE_NULL);
148 m_tessEvaluationShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
149 module = *m_tessEvaluationShaderModule;
150 break;
151
152 case (VK_SHADER_STAGE_GEOMETRY_BIT):
153 DE_ASSERT(m_geometryShaderModule.get() == DE_NULL);
154 m_geometryShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
155 module = *m_geometryShaderModule;
156 break;
157
158 case (VK_SHADER_STAGE_FRAGMENT_BIT):
159 DE_ASSERT(m_fragmentShaderModule.get() == DE_NULL);
160 m_fragmentShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
161 module = *m_fragmentShaderModule;
162 break;
163
164 default:
165 DE_FATAL("Invalid shader stage");
166 return *this;
167 }
168
169 const VkPipelineShaderStageCreateInfo pipelineShaderStageInfo =
170 {
171 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
172 DE_NULL, // const void* pNext;
173 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
174 stage, // VkShaderStageFlagBits stage;
175 module, // VkShaderModule module;
176 "main", // const char* pName;
177 specInfo, // const VkSpecializationInfo* pSpecializationInfo;
178 };
179
180 m_shaderStageFlags |= stage;
181 m_shaderStages.push_back(pipelineShaderStageInfo);
182
183 return *this;
184 }
185
setVertexInputSingleAttribute(const VkFormat vertexFormat,const deUint32 stride)186 GraphicsPipelineBuilder& GraphicsPipelineBuilder::setVertexInputSingleAttribute (const VkFormat vertexFormat, const deUint32 stride)
187 {
188 const VkVertexInputBindingDescription bindingDesc =
189 {
190 0u, // uint32_t binding;
191 stride, // uint32_t stride;
192 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
193 };
194 const VkVertexInputAttributeDescription attributeDesc =
195 {
196 0u, // uint32_t location;
197 0u, // uint32_t binding;
198 vertexFormat, // VkFormat format;
199 0u, // uint32_t offset;
200 };
201
202 m_vertexInputBindings.clear();
203 m_vertexInputBindings.push_back(bindingDesc);
204
205 m_vertexInputAttributes.clear();
206 m_vertexInputAttributes.push_back(attributeDesc);
207
208 return *this;
209 }
210
211 template<typename T>
dataPointer(const std::vector<T> & vec)212 inline const T* dataPointer (const std::vector<T>& vec)
213 {
214 return (vec.size() != 0 ? &vec[0] : DE_NULL);
215 }
216
build(const DeviceInterface & vk,const VkDevice device,const VkPipelineLayout pipelineLayout,const VkRenderPass renderPass,PipelineCacheData & pipelineCacheData)217 Move<VkPipeline> GraphicsPipelineBuilder::build (const DeviceInterface& vk,
218 const VkDevice device,
219 const VkPipelineLayout pipelineLayout,
220 const VkRenderPass renderPass,
221 PipelineCacheData& pipelineCacheData)
222 {
223 const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo =
224 {
225 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
226 DE_NULL, // const void* pNext;
227 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
228 static_cast<deUint32>(m_vertexInputBindings.size()), // uint32_t vertexBindingDescriptionCount;
229 dataPointer(m_vertexInputBindings), // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
230 static_cast<deUint32>(m_vertexInputAttributes.size()), // uint32_t vertexAttributeDescriptionCount;
231 dataPointer(m_vertexInputAttributes), // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
232 };
233
234 const VkPrimitiveTopology topology = (m_shaderStageFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
235 : m_primitiveTopology;
236 const VkPipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateInfo =
237 {
238 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
239 DE_NULL, // const void* pNext;
240 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
241 topology, // VkPrimitiveTopology topology;
242 VK_FALSE, // VkBool32 primitiveRestartEnable;
243 };
244
245 const VkPipelineTessellationStateCreateInfo pipelineTessellationStateInfo =
246 {
247 VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, // VkStructureType sType;
248 DE_NULL, // const void* pNext;
249 (VkPipelineTessellationStateCreateFlags)0, // VkPipelineTessellationStateCreateFlags flags;
250 m_patchControlPoints, // uint32_t patchControlPoints;
251 };
252
253 const VkViewport viewport = makeViewport(m_renderSize);
254 const VkRect2D scissor = makeRect2D(m_renderSize);
255
256 const VkPipelineViewportStateCreateInfo pipelineViewportStateInfo =
257 {
258 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
259 DE_NULL, // const void* pNext;
260 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags;
261 1u, // uint32_t viewportCount;
262 &viewport, // const VkViewport* pViewports;
263 1u, // uint32_t scissorCount;
264 &scissor, // const VkRect2D* pScissors;
265 };
266
267 const bool isRasterizationDisabled = ((m_shaderStageFlags & VK_SHADER_STAGE_FRAGMENT_BIT) == 0);
268 const VkPipelineRasterizationStateCreateInfo pipelineRasterizationStateInfo =
269 {
270 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
271 DE_NULL, // const void* pNext;
272 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
273 VK_FALSE, // VkBool32 depthClampEnable;
274 isRasterizationDisabled, // VkBool32 rasterizerDiscardEnable;
275 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
276 m_cullModeFlags, // VkCullModeFlags cullMode;
277 m_frontFace, // VkFrontFace frontFace;
278 VK_FALSE, // VkBool32 depthBiasEnable;
279 0.0f, // float depthBiasConstantFactor;
280 0.0f, // float depthBiasClamp;
281 0.0f, // float depthBiasSlopeFactor;
282 1.0f, // float lineWidth;
283 };
284
285 const VkPipelineMultisampleStateCreateInfo pipelineMultisampleStateInfo =
286 {
287 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
288 DE_NULL, // const void* pNext;
289 (VkPipelineMultisampleStateCreateFlags)0, // VkPipelineMultisampleStateCreateFlags flags;
290 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
291 VK_FALSE, // VkBool32 sampleShadingEnable;
292 0.0f, // float minSampleShading;
293 DE_NULL, // const VkSampleMask* pSampleMask;
294 VK_FALSE, // VkBool32 alphaToCoverageEnable;
295 VK_FALSE // VkBool32 alphaToOneEnable;
296 };
297
298 const VkStencilOpState stencilOpState = makeStencilOpState(
299 VK_STENCIL_OP_KEEP, // stencil fail
300 VK_STENCIL_OP_KEEP, // depth & stencil pass
301 VK_STENCIL_OP_KEEP, // depth only fail
302 VK_COMPARE_OP_NEVER, // compare op
303 0u, // compare mask
304 0u, // write mask
305 0u); // reference
306
307 const VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo =
308 {
309 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
310 DE_NULL, // const void* pNext;
311 (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags;
312 VK_FALSE, // VkBool32 depthTestEnable;
313 VK_FALSE, // VkBool32 depthWriteEnable;
314 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
315 VK_FALSE, // VkBool32 depthBoundsTestEnable;
316 VK_FALSE, // VkBool32 stencilTestEnable;
317 stencilOpState, // VkStencilOpState front;
318 stencilOpState, // VkStencilOpState back;
319 0.0f, // float minDepthBounds;
320 1.0f, // float maxDepthBounds;
321 };
322
323 const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
324 const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState =
325 {
326 m_blendEnable, // VkBool32 blendEnable;
327 VK_BLEND_FACTOR_SRC_ALPHA, // VkBlendFactor srcColorBlendFactor;
328 VK_BLEND_FACTOR_ONE, // VkBlendFactor dstColorBlendFactor;
329 VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
330 VK_BLEND_FACTOR_SRC_ALPHA, // VkBlendFactor srcAlphaBlendFactor;
331 VK_BLEND_FACTOR_ONE, // VkBlendFactor dstAlphaBlendFactor;
332 VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
333 colorComponentsAll, // VkColorComponentFlags colorWriteMask;
334 };
335
336 const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo =
337 {
338 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
339 DE_NULL, // const void* pNext;
340 (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags;
341 VK_FALSE, // VkBool32 logicOpEnable;
342 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
343 1u, // deUint32 attachmentCount;
344 &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
345 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
346 };
347
348 const VkGraphicsPipelineCreateInfo graphicsPipelineInfo =
349 {
350 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
351 DE_NULL, // const void* pNext;
352 (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
353 static_cast<deUint32>(m_shaderStages.size()), // deUint32 stageCount;
354 &m_shaderStages[0], // const VkPipelineShaderStageCreateInfo* pStages;
355 &vertexInputStateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
356 &pipelineInputAssemblyStateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
357 (m_shaderStageFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ? &pipelineTessellationStateInfo : DE_NULL), // const VkPipelineTessellationStateCreateInfo* pTessellationState;
358 (isRasterizationDisabled ? DE_NULL : &pipelineViewportStateInfo), // const VkPipelineViewportStateCreateInfo* pViewportState;
359 &pipelineRasterizationStateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
360 (isRasterizationDisabled ? DE_NULL : &pipelineMultisampleStateInfo), // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
361 (isRasterizationDisabled ? DE_NULL : &pipelineDepthStencilStateInfo), // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
362 (isRasterizationDisabled ? DE_NULL : &pipelineColorBlendStateInfo), // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
363 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
364 pipelineLayout, // VkPipelineLayout layout;
365 renderPass, // VkRenderPass renderPass;
366 0u, // deUint32 subpass;
367 DE_NULL, // VkPipeline basePipelineHandle;
368 0, // deInt32 basePipelineIndex;
369 };
370
371 {
372 const vk::Unique<vk::VkPipelineCache> pipelineCache (pipelineCacheData.createPipelineCache(vk, device));
373 vk::Move<vk::VkPipeline> pipeline (createGraphicsPipeline(vk, device, *pipelineCache, &graphicsPipelineInfo));
374
375 // Refresh data from cache
376 pipelineCacheData.setFromPipelineCache(vk, device, *pipelineCache);
377
378 return pipeline;
379 }
380 }
381
382 // Uses some structures added by VK_KHR_synchronization2 to fill legacy structures.
383 // With this approach we dont have to create branch in each test (one for legacy
384 // second for new synchronization), this helps to reduce code of some tests.
385 class LegacySynchronizationWrapper : public SynchronizationWrapperBase
386 {
387 protected:
388
389 struct SubmitInfoData
390 {
391 deUint32 waitSemaphoreCount;
392 std::size_t waitSemaphoreIndex;
393 std::size_t waitSemaphoreValueIndexPlusOne;
394 deUint32 commandBufferCount;
395 deUint32 commandBufferIndex;
396 deUint32 signalSemaphoreCount;
397 std::size_t signalSemaphoreIndex;
398 std::size_t signalSemaphoreValueIndexPlusOne;
399 };
400
isStageFlagAllowed(VkPipelineStageFlags2KHR stage) const401 bool isStageFlagAllowed(VkPipelineStageFlags2KHR stage) const
402 {
403 // synchronization2 suports more stages then legacy synchronization
404 // and so SynchronizationWrapper can only be used for cases that
405 // operate on stages also supported by legacy synchronization
406 // NOTE: if some tests hits assertion that uses this method then this
407 // test should not use synchronizationWrapper - it should be synchronization2 exclusive
408
409 static const std::set<deUint32> allowedStages
410 {
411 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
412 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
413 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
414 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
415 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
416 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
417 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
418 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
419 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
420 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
421 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
422 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
423 VK_PIPELINE_STAGE_TRANSFER_BIT,
424 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
425 VK_PIPELINE_STAGE_HOST_BIT,
426 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
427 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
428 VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
429 VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT,
430 VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR,
431 VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,
432 VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV,
433 VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV,
434 VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV,
435 VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT,
436 VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV,
437 VK_PIPELINE_STAGE_NONE_KHR,
438 };
439
440 if (stage > static_cast<deUint64>(std::numeric_limits<deUint32>::max()))
441 return false;
442
443 return (allowedStages.find(static_cast<deUint32>(stage)) != allowedStages.end());
444 }
445
isAccessFlagAllowed(VkAccessFlags2KHR access) const446 bool isAccessFlagAllowed(VkAccessFlags2KHR access) const
447 {
448 // synchronization2 suports more access flags then legacy synchronization
449 // and so SynchronizationWrapper can only be used for cases that
450 // operate on access flags also supported by legacy synchronization
451 // NOTE: if some tests hits assertion that uses this method then this
452 // test should not use synchronizationWrapper - it should be synchronization2 exclusive
453
454 static const std::set<deUint32> allowedAccessFlags
455 {
456 VK_ACCESS_INDIRECT_COMMAND_READ_BIT,
457 VK_ACCESS_INDEX_READ_BIT,
458 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
459 VK_ACCESS_UNIFORM_READ_BIT,
460 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT,
461 VK_ACCESS_SHADER_READ_BIT,
462 VK_ACCESS_SHADER_WRITE_BIT,
463 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT,
464 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
465 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
466 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
467 VK_ACCESS_TRANSFER_READ_BIT,
468 VK_ACCESS_TRANSFER_WRITE_BIT,
469 VK_ACCESS_HOST_READ_BIT,
470 VK_ACCESS_HOST_WRITE_BIT,
471 VK_ACCESS_MEMORY_READ_BIT,
472 VK_ACCESS_MEMORY_WRITE_BIT,
473 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT,
474 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
475 VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT,
476 VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT,
477 VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT,
478 VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR,
479 VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR,
480 VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV ,
481 VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT,
482 VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV,
483 VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV,
484 VK_ACCESS_NONE_KHR,
485 };
486
487 if (access > static_cast<deUint64>(std::numeric_limits<deUint32>::max()))
488 return false;
489
490 return (allowedAccessFlags.find(static_cast<deUint32>(access)) != allowedAccessFlags.end());
491 }
492
493 public:
LegacySynchronizationWrapper(const DeviceInterface & vk,bool usingTimelineSemaphores,deUint32 submitInfoCount=1u)494 LegacySynchronizationWrapper(const DeviceInterface& vk, bool usingTimelineSemaphores, deUint32 submitInfoCount = 1u)
495 : SynchronizationWrapperBase (vk)
496 , m_submited (DE_FALSE)
497 {
498 m_waitSemaphores.reserve(submitInfoCount);
499 m_signalSemaphores.reserve(submitInfoCount);
500 m_waitDstStageMasks.reserve(submitInfoCount);
501 m_commandBuffers.reserve(submitInfoCount);
502 m_submitInfoData.reserve(submitInfoCount);
503
504 if (usingTimelineSemaphores)
505 m_timelineSemaphoreValues.reserve(2 * submitInfoCount);
506 }
507
508 ~LegacySynchronizationWrapper() = default;
509
addSubmitInfo(deUint32 waitSemaphoreInfoCount,const VkSemaphoreSubmitInfoKHR * pWaitSemaphoreInfos,deUint32 commandBufferInfoCount,const VkCommandBufferSubmitInfoKHR * pCommandBufferInfos,deUint32 signalSemaphoreInfoCount,const VkSemaphoreSubmitInfoKHR * pSignalSemaphoreInfos,bool usingWaitTimelineSemaphore,bool usingSignalTimelineSemaphore)510 void addSubmitInfo(deUint32 waitSemaphoreInfoCount,
511 const VkSemaphoreSubmitInfoKHR* pWaitSemaphoreInfos,
512 deUint32 commandBufferInfoCount,
513 const VkCommandBufferSubmitInfoKHR* pCommandBufferInfos,
514 deUint32 signalSemaphoreInfoCount,
515 const VkSemaphoreSubmitInfoKHR* pSignalSemaphoreInfos,
516 bool usingWaitTimelineSemaphore,
517 bool usingSignalTimelineSemaphore) override
518 {
519 m_submitInfoData.push_back(SubmitInfoData{ waitSemaphoreInfoCount, 0, 0, commandBufferInfoCount, 0u, signalSemaphoreInfoCount, 0, 0 });
520 SubmitInfoData& si = m_submitInfoData.back();
521
522 // memorize wait values
523 if (usingWaitTimelineSemaphore)
524 {
525 DE_ASSERT(pWaitSemaphoreInfos);
526 si.waitSemaphoreValueIndexPlusOne = m_timelineSemaphoreValues.size() + 1;
527 for (deUint32 i = 0; i < waitSemaphoreInfoCount; ++i)
528 m_timelineSemaphoreValues.push_back(pWaitSemaphoreInfos[i].value);
529 }
530
531 // memorize signal values
532 if (usingSignalTimelineSemaphore)
533 {
534 DE_ASSERT(pSignalSemaphoreInfos);
535 si.signalSemaphoreValueIndexPlusOne = m_timelineSemaphoreValues.size() + 1;
536 for (deUint32 i = 0; i < signalSemaphoreInfoCount; ++i)
537 m_timelineSemaphoreValues.push_back(pSignalSemaphoreInfos[i].value);
538 }
539
540 // construct list of semaphores that we need to wait on
541 if (waitSemaphoreInfoCount)
542 {
543 si.waitSemaphoreIndex = m_waitSemaphores.size();
544 for (deUint32 i = 0; i < waitSemaphoreInfoCount; ++i)
545 {
546 DE_ASSERT(isStageFlagAllowed(pWaitSemaphoreInfos[i].stageMask));
547 m_waitSemaphores.push_back(pWaitSemaphoreInfos[i].semaphore);
548 m_waitDstStageMasks.push_back(static_cast<VkPipelineStageFlags>(pWaitSemaphoreInfos[i].stageMask));
549 }
550 }
551
552 // construct list of command buffers
553 if (commandBufferInfoCount)
554 {
555 si.commandBufferIndex = static_cast<deUint32>(m_commandBuffers.size());
556 for (deUint32 i = 0; i < commandBufferInfoCount; ++i)
557 m_commandBuffers.push_back(pCommandBufferInfos[i].commandBuffer);
558 }
559
560 // construct list of semaphores that will be signaled
561 if (signalSemaphoreInfoCount)
562 {
563 si.signalSemaphoreIndex = m_signalSemaphores.size();
564 for (deUint32 i = 0; i < signalSemaphoreInfoCount; ++i)
565 m_signalSemaphores.push_back(pSignalSemaphoreInfos[i].semaphore);
566 }
567 }
568
cmdPipelineBarrier(VkCommandBuffer commandBuffer,const VkDependencyInfoKHR * pDependencyInfo) const569 void cmdPipelineBarrier(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR* pDependencyInfo) const override
570 {
571 DE_ASSERT(pDependencyInfo);
572
573 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_NONE_KHR;
574 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_NONE_KHR;
575 deUint32 memoryBarrierCount = pDependencyInfo->memoryBarrierCount;
576 VkMemoryBarrier* pMemoryBarriers = DE_NULL;
577 deUint32 bufferMemoryBarrierCount = pDependencyInfo->bufferMemoryBarrierCount;
578 VkBufferMemoryBarrier* pBufferMemoryBarriers = DE_NULL;
579 deUint32 imageMemoryBarrierCount = pDependencyInfo->imageMemoryBarrierCount;
580 VkImageMemoryBarrier* pImageMemoryBarriers = DE_NULL;
581
582 // translate VkMemoryBarrier2KHR to VkMemoryBarrier
583 std::vector<VkMemoryBarrier> memoryBarriers;
584 if (memoryBarrierCount)
585 {
586 memoryBarriers.reserve(memoryBarrierCount);
587 for (deUint32 i = 0; i < memoryBarrierCount; ++i)
588 {
589 const VkMemoryBarrier2KHR& pMemoryBarrier = pDependencyInfo->pMemoryBarriers[i];
590
591 DE_ASSERT(isStageFlagAllowed(pMemoryBarrier.srcStageMask));
592 DE_ASSERT(isStageFlagAllowed(pMemoryBarrier.dstStageMask));
593 DE_ASSERT(isAccessFlagAllowed(pMemoryBarrier.srcAccessMask));
594 DE_ASSERT(isAccessFlagAllowed(pMemoryBarrier.dstAccessMask));
595
596 srcStageMask |= static_cast<VkPipelineStageFlags>(pMemoryBarrier.srcStageMask);
597 dstStageMask |= static_cast<VkPipelineStageFlags>(pMemoryBarrier.dstStageMask);
598 memoryBarriers.push_back(makeMemoryBarrier(
599 static_cast<VkAccessFlags>(pMemoryBarrier.srcAccessMask),
600 static_cast<VkAccessFlags>(pMemoryBarrier.dstAccessMask)
601 ));
602 }
603 pMemoryBarriers = &memoryBarriers[0];
604 }
605
606 // translate VkBufferMemoryBarrier2KHR to VkBufferMemoryBarrier
607 std::vector<VkBufferMemoryBarrier> bufferMemoryBarriers;
608 if (bufferMemoryBarrierCount)
609 {
610 bufferMemoryBarriers.reserve(bufferMemoryBarrierCount);
611 for (deUint32 i = 0; i < bufferMemoryBarrierCount; ++i)
612 {
613 const VkBufferMemoryBarrier2KHR& pBufferMemoryBarrier = pDependencyInfo->pBufferMemoryBarriers[i];
614
615 DE_ASSERT(isStageFlagAllowed(pBufferMemoryBarrier.srcStageMask));
616 DE_ASSERT(isStageFlagAllowed(pBufferMemoryBarrier.dstStageMask));
617 DE_ASSERT(isAccessFlagAllowed(pBufferMemoryBarrier.srcAccessMask));
618 DE_ASSERT(isAccessFlagAllowed(pBufferMemoryBarrier.dstAccessMask));
619
620 srcStageMask |= static_cast<VkPipelineStageFlags>(pBufferMemoryBarrier.srcStageMask);
621 dstStageMask |= static_cast<VkPipelineStageFlags>(pBufferMemoryBarrier.dstStageMask);
622 bufferMemoryBarriers.push_back(makeBufferMemoryBarrier(
623 static_cast<VkAccessFlags>(pBufferMemoryBarrier.srcAccessMask),
624 static_cast<VkAccessFlags>(pBufferMemoryBarrier.dstAccessMask),
625 pBufferMemoryBarrier.buffer,
626 pBufferMemoryBarrier.offset,
627 pBufferMemoryBarrier.size,
628 pBufferMemoryBarrier.srcQueueFamilyIndex,
629 pBufferMemoryBarrier.dstQueueFamilyIndex
630 ));
631 }
632 pBufferMemoryBarriers = &bufferMemoryBarriers[0];
633 }
634
635 // translate VkImageMemoryBarrier2KHR to VkImageMemoryBarrier
636 std::vector<VkImageMemoryBarrier> imageMemoryBarriers;
637 if (imageMemoryBarrierCount)
638 {
639 imageMemoryBarriers.reserve(imageMemoryBarrierCount);
640 for (deUint32 i = 0; i < imageMemoryBarrierCount; ++i)
641 {
642 const VkImageMemoryBarrier2KHR& pImageMemoryBarrier = pDependencyInfo->pImageMemoryBarriers[i];
643
644 DE_ASSERT(isStageFlagAllowed(pImageMemoryBarrier.srcStageMask));
645 DE_ASSERT(isStageFlagAllowed(pImageMemoryBarrier.dstStageMask));
646 DE_ASSERT(isAccessFlagAllowed(pImageMemoryBarrier.srcAccessMask));
647 DE_ASSERT(isAccessFlagAllowed(pImageMemoryBarrier.dstAccessMask));
648
649 srcStageMask |= static_cast<VkPipelineStageFlags>(pImageMemoryBarrier.srcStageMask);
650 dstStageMask |= static_cast<VkPipelineStageFlags>(pImageMemoryBarrier.dstStageMask);
651 imageMemoryBarriers.push_back(makeImageMemoryBarrier(
652 static_cast<VkAccessFlags>(pImageMemoryBarrier.srcAccessMask),
653 static_cast<VkAccessFlags>(pImageMemoryBarrier.dstAccessMask),
654 pImageMemoryBarrier.oldLayout,
655 pImageMemoryBarrier.newLayout,
656 pImageMemoryBarrier.image,
657 pImageMemoryBarrier.subresourceRange,
658 pImageMemoryBarrier.srcQueueFamilyIndex,
659 pImageMemoryBarrier.dstQueueFamilyIndex
660 ));
661 }
662 pImageMemoryBarriers = &imageMemoryBarriers[0];
663 }
664
665 m_vk.cmdPipelineBarrier(
666 commandBuffer,
667 srcStageMask,
668 dstStageMask,
669 (VkDependencyFlags)0,
670 memoryBarrierCount,
671 pMemoryBarriers,
672 bufferMemoryBarrierCount,
673 pBufferMemoryBarriers,
674 imageMemoryBarrierCount,
675 pImageMemoryBarriers
676 );
677 }
678
cmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,const VkDependencyInfoKHR * pDependencyInfo) const679 void cmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfoKHR* pDependencyInfo) const override
680 {
681 DE_ASSERT(pDependencyInfo);
682
683 VkPipelineStageFlags2KHR srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR;
684 if (pDependencyInfo->pMemoryBarriers)
685 srcStageMask = pDependencyInfo->pMemoryBarriers[0].srcStageMask;
686 if (pDependencyInfo->pBufferMemoryBarriers)
687 srcStageMask = pDependencyInfo->pBufferMemoryBarriers[0].srcStageMask;
688 if (pDependencyInfo->pImageMemoryBarriers)
689 srcStageMask = pDependencyInfo->pImageMemoryBarriers[0].srcStageMask;
690
691 DE_ASSERT(isStageFlagAllowed(srcStageMask));
692 m_vk.cmdSetEvent(commandBuffer, event, static_cast<VkPipelineStageFlags>(srcStageMask));
693 }
694
cmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags2KHR flag) const695 void cmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2KHR flag) const override
696 {
697 DE_ASSERT(isStageFlagAllowed(flag));
698 VkPipelineStageFlags legacyStageMask = static_cast<VkPipelineStageFlags>(flag);
699 m_vk.cmdResetEvent(commandBuffer, event, legacyStageMask);
700 }
701
cmdWaitEvents(VkCommandBuffer commandBuffer,deUint32 eventCount,const VkEvent * pEvents,const VkDependencyInfoKHR * pDependencyInfo) const702 void cmdWaitEvents(VkCommandBuffer commandBuffer, deUint32 eventCount, const VkEvent* pEvents, const VkDependencyInfoKHR* pDependencyInfo) const override
703 {
704 DE_ASSERT(pDependencyInfo);
705
706 VkPipelineStageFlags2KHR srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR;
707 VkPipelineStageFlags2KHR dstStageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR;
708 deUint32 memoryBarrierCount = pDependencyInfo->memoryBarrierCount;
709 deUint32 bufferMemoryBarrierCount = pDependencyInfo->bufferMemoryBarrierCount;
710 deUint32 imageMemoryBarrierCount = pDependencyInfo->imageMemoryBarrierCount;
711 VkMemoryBarrier* pMemoryBarriers = DE_NULL;
712 VkBufferMemoryBarrier* pBufferMemoryBarriers = DE_NULL;
713 VkImageMemoryBarrier* pImageMemoryBarriers = DE_NULL;
714 std::vector<VkMemoryBarrier> memoryBarriers;
715 std::vector<VkBufferMemoryBarrier> bufferMemoryBarriers;
716 std::vector<VkImageMemoryBarrier> imageMemoryBarriers;
717
718 if (pDependencyInfo->pMemoryBarriers)
719 {
720 srcStageMask = pDependencyInfo->pMemoryBarriers[0].srcStageMask;
721 dstStageMask = pDependencyInfo->pMemoryBarriers[0].dstStageMask;
722
723 memoryBarriers.reserve(memoryBarrierCount);
724 for (deUint32 i = 0; i < memoryBarrierCount; ++i)
725 {
726 const VkMemoryBarrier2KHR& mb = pDependencyInfo->pMemoryBarriers[i];
727 DE_ASSERT(isAccessFlagAllowed(mb.srcAccessMask));
728 DE_ASSERT(isAccessFlagAllowed(mb.dstAccessMask));
729 memoryBarriers.push_back(
730 makeMemoryBarrier(
731 static_cast<VkAccessFlags>(mb.srcAccessMask),
732 static_cast<VkAccessFlags>(mb.dstAccessMask)
733 )
734 );
735 }
736 pMemoryBarriers = &memoryBarriers[0];
737 }
738 if (pDependencyInfo->pBufferMemoryBarriers)
739 {
740 srcStageMask = pDependencyInfo->pBufferMemoryBarriers[0].srcStageMask;
741 dstStageMask = pDependencyInfo->pBufferMemoryBarriers[0].dstStageMask;
742
743 bufferMemoryBarriers.reserve(bufferMemoryBarrierCount);
744 for (deUint32 i = 0; i < bufferMemoryBarrierCount; ++i)
745 {
746 const VkBufferMemoryBarrier2KHR& bmb = pDependencyInfo->pBufferMemoryBarriers[i];
747 DE_ASSERT(isAccessFlagAllowed(bmb.srcAccessMask));
748 DE_ASSERT(isAccessFlagAllowed(bmb.dstAccessMask));
749 bufferMemoryBarriers.push_back(
750 makeBufferMemoryBarrier(
751 static_cast<VkAccessFlags>(bmb.srcAccessMask),
752 static_cast<VkAccessFlags>(bmb.dstAccessMask),
753 bmb.buffer,
754 bmb.offset,
755 bmb.size,
756 bmb.srcQueueFamilyIndex,
757 bmb.dstQueueFamilyIndex
758 )
759 );
760 }
761 pBufferMemoryBarriers = &bufferMemoryBarriers[0];
762 }
763 if (pDependencyInfo->pImageMemoryBarriers)
764 {
765 srcStageMask = pDependencyInfo->pImageMemoryBarriers[0].srcStageMask;
766 dstStageMask = pDependencyInfo->pImageMemoryBarriers[0].dstStageMask;
767
768 imageMemoryBarriers.reserve(imageMemoryBarrierCount);
769 for (deUint32 i = 0; i < imageMemoryBarrierCount; ++i)
770 {
771 const VkImageMemoryBarrier2KHR& imb = pDependencyInfo->pImageMemoryBarriers[i];
772 DE_ASSERT(isAccessFlagAllowed(imb.srcAccessMask));
773 DE_ASSERT(isAccessFlagAllowed(imb.dstAccessMask));
774 imageMemoryBarriers.push_back(
775 makeImageMemoryBarrier(
776 static_cast<VkAccessFlags>(imb.srcAccessMask),
777 static_cast<VkAccessFlags>(imb.dstAccessMask),
778 imb.oldLayout,
779 imb.newLayout,
780 imb.image,
781 imb.subresourceRange,
782 imb.srcQueueFamilyIndex,
783 imb.dstQueueFamilyIndex
784 )
785 );
786 }
787 pImageMemoryBarriers = &imageMemoryBarriers[0];
788 }
789
790 DE_ASSERT(isStageFlagAllowed(srcStageMask));
791 DE_ASSERT(isStageFlagAllowed(dstStageMask));
792 m_vk.cmdWaitEvents(commandBuffer, eventCount, pEvents,
793 static_cast<VkPipelineStageFlags>(srcStageMask), static_cast<VkPipelineStageFlags>(dstStageMask),
794 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
795 }
796
queueSubmit(VkQueue queue,VkFence fence)797 VkResult queueSubmit(VkQueue queue, VkFence fence) override
798 {
799 // make sure submit info was added
800 DE_ASSERT(!m_submitInfoData.empty());
801
802 // make sure separate LegacySynchronizationWrapper is created per single submit
803 DE_ASSERT(!m_submited);
804
805 std::vector<VkSubmitInfo> submitInfo(m_submitInfoData.size(), { VK_STRUCTURE_TYPE_SUBMIT_INFO, DE_NULL, 0u, DE_NULL, DE_NULL, 0u, DE_NULL, 0u, DE_NULL });
806
807 std::vector<VkTimelineSemaphoreSubmitInfo> timelineSemaphoreSubmitInfo;
808 timelineSemaphoreSubmitInfo.reserve(m_submitInfoData.size());
809
810 // translate indexes from m_submitInfoData to pointers and construct VkSubmitInfo
811 for (deUint32 i = 0; i < m_submitInfoData.size(); ++i)
812 {
813 auto& data = m_submitInfoData[i];
814 VkSubmitInfo& si = submitInfo[i];
815
816 si.waitSemaphoreCount = data.waitSemaphoreCount;
817 si.commandBufferCount = data.commandBufferCount;
818 si.signalSemaphoreCount = data.signalSemaphoreCount;
819
820 if (data.waitSemaphoreValueIndexPlusOne || data.signalSemaphoreValueIndexPlusOne)
821 {
822 deUint64* pWaitSemaphoreValues = DE_NULL;
823 if (data.waitSemaphoreValueIndexPlusOne)
824 pWaitSemaphoreValues = &m_timelineSemaphoreValues[data.waitSemaphoreValueIndexPlusOne - 1];
825
826 deUint64* pSignalSemaphoreValues = DE_NULL;
827 if (data.signalSemaphoreValueIndexPlusOne)
828 pSignalSemaphoreValues = &m_timelineSemaphoreValues[data.signalSemaphoreValueIndexPlusOne - 1];
829
830 timelineSemaphoreSubmitInfo.push_back({
831 VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO, // VkStructureType sType;
832 DE_NULL, // const void* pNext;
833 data.waitSemaphoreCount, // deUint32 waitSemaphoreValueCount
834 pWaitSemaphoreValues, // const deUint64* pWaitSemaphoreValues
835 data.signalSemaphoreCount, // deUint32 signalSemaphoreValueCount
836 pSignalSemaphoreValues // const deUint64* pSignalSemaphoreValues
837 });
838 si.pNext = &timelineSemaphoreSubmitInfo.back();
839 }
840
841 if (data.waitSemaphoreCount)
842 {
843 si.pWaitSemaphores = &m_waitSemaphores[data.waitSemaphoreIndex];
844 si.pWaitDstStageMask = &m_waitDstStageMasks[data.waitSemaphoreIndex];
845 }
846
847 if (data.commandBufferCount)
848 si.pCommandBuffers = &m_commandBuffers[data.commandBufferIndex];
849
850 if (data.signalSemaphoreCount)
851 si.pSignalSemaphores = &m_signalSemaphores[data.signalSemaphoreIndex];
852 }
853
854 m_submited = DE_TRUE;
855 return m_vk.queueSubmit(queue, static_cast<deUint32>(submitInfo.size()), &submitInfo[0], fence);
856 }
857
858 protected:
859
860 std::vector<VkSemaphore> m_waitSemaphores;
861 std::vector<VkSemaphore> m_signalSemaphores;
862 std::vector<VkPipelineStageFlags> m_waitDstStageMasks;
863 std::vector<VkCommandBuffer> m_commandBuffers;
864 std::vector<SubmitInfoData> m_submitInfoData;
865 std::vector<deUint64> m_timelineSemaphoreValues;
866 bool m_submited;
867 };
868
869 class Synchronization2Wrapper : public SynchronizationWrapperBase
870 {
871 public:
Synchronization2Wrapper(const DeviceInterface & vk,deUint32 submitInfoCount)872 Synchronization2Wrapper(const DeviceInterface& vk, deUint32 submitInfoCount)
873 : SynchronizationWrapperBase(vk)
874 {
875 m_submitInfo.reserve(submitInfoCount);
876 }
877
878 ~Synchronization2Wrapper() = default;
879
addSubmitInfo(deUint32 waitSemaphoreInfoCount,const VkSemaphoreSubmitInfoKHR * pWaitSemaphoreInfos,deUint32 commandBufferInfoCount,const VkCommandBufferSubmitInfoKHR * pCommandBufferInfos,deUint32 signalSemaphoreInfoCount,const VkSemaphoreSubmitInfoKHR * pSignalSemaphoreInfos,bool usingWaitTimelineSemaphore,bool usingSignalTimelineSemaphore)880 void addSubmitInfo(deUint32 waitSemaphoreInfoCount,
881 const VkSemaphoreSubmitInfoKHR* pWaitSemaphoreInfos,
882 deUint32 commandBufferInfoCount,
883 const VkCommandBufferSubmitInfoKHR* pCommandBufferInfos,
884 deUint32 signalSemaphoreInfoCount,
885 const VkSemaphoreSubmitInfoKHR* pSignalSemaphoreInfos,
886 bool usingWaitTimelineSemaphore,
887 bool usingSignalTimelineSemaphore) override
888 {
889 DE_UNREF(usingWaitTimelineSemaphore);
890 DE_UNREF(usingSignalTimelineSemaphore);
891
892 m_submitInfo.push_back(VkSubmitInfo2KHR{
893 VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR, // VkStructureType sType
894 DE_NULL, // const void* pNext
895 0u, // VkSubmitFlagsKHR flags
896 waitSemaphoreInfoCount, // deUint32 waitSemaphoreInfoCount
897 pWaitSemaphoreInfos, // const VkSemaphoreSubmitInfoKHR* pWaitSemaphoreInfos
898 commandBufferInfoCount, // deUint32 commandBufferInfoCount
899 pCommandBufferInfos, // const VkCommandBufferSubmitInfoKHR* pCommandBufferInfos
900 signalSemaphoreInfoCount, // deUint32 signalSemaphoreInfoCount
901 pSignalSemaphoreInfos // const VkSemaphoreSubmitInfoKHR* pSignalSemaphoreInfos
902 });
903 }
904
cmdPipelineBarrier(VkCommandBuffer commandBuffer,const VkDependencyInfoKHR * pDependencyInfo) const905 void cmdPipelineBarrier(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR* pDependencyInfo) const override
906 {
907 m_vk.cmdPipelineBarrier2KHR(commandBuffer, pDependencyInfo);
908 }
909
cmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,const VkDependencyInfoKHR * pDependencyInfo) const910 void cmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfoKHR* pDependencyInfo) const override
911 {
912 m_vk.cmdSetEvent2KHR(commandBuffer, event, pDependencyInfo);
913 }
914
cmdWaitEvents(VkCommandBuffer commandBuffer,deUint32 eventCount,const VkEvent * pEvents,const VkDependencyInfoKHR * pDependencyInfo) const915 void cmdWaitEvents(VkCommandBuffer commandBuffer, deUint32 eventCount, const VkEvent* pEvents, const VkDependencyInfoKHR* pDependencyInfo) const override
916 {
917 m_vk.cmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfo);
918 }
919
cmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags2KHR flag) const920 void cmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2KHR flag) const override
921 {
922 m_vk.cmdResetEvent2KHR(commandBuffer, event, flag);
923 }
924
queueSubmit(VkQueue queue,VkFence fence)925 VkResult queueSubmit(VkQueue queue, VkFence fence) override
926 {
927 return m_vk.queueSubmit2KHR(queue, static_cast<deUint32>(m_submitInfo.size()), &m_submitInfo[0], fence);
928 }
929
930 protected:
931
932 std::vector<VkSubmitInfo2KHR> m_submitInfo;
933 };
934
getSynchronizationWrapper(SynchronizationType type,const DeviceInterface & vk,bool usingTimelineSemaphores,deUint32 submitInfoCount)935 SynchronizationWrapperPtr getSynchronizationWrapper(SynchronizationType type,
936 const DeviceInterface& vk,
937 bool usingTimelineSemaphores,
938 deUint32 submitInfoCount)
939 {
940 return (type == SynchronizationType::LEGACY)
941 ? SynchronizationWrapperPtr(new LegacySynchronizationWrapper(vk, usingTimelineSemaphores, submitInfoCount))
942 : SynchronizationWrapperPtr(new Synchronization2Wrapper(vk, submitInfoCount));
943 }
944
submitCommandsAndWait(SynchronizationWrapperPtr synchronizationWrapper,const DeviceInterface & vk,const VkDevice device,const VkQueue queue,const VkCommandBuffer cmdBuffer)945 void submitCommandsAndWait(SynchronizationWrapperPtr synchronizationWrapper,
946 const DeviceInterface& vk,
947 const VkDevice device,
948 const VkQueue queue,
949 const VkCommandBuffer cmdBuffer)
950 {
951 VkCommandBufferSubmitInfoKHR commandBufferInfoCount = makeCommonCommandBufferSubmitInfo(cmdBuffer);
952
953 synchronizationWrapper->addSubmitInfo(
954 0u, // deUint32 waitSemaphoreInfoCount
955 DE_NULL, // const VkSemaphoreSubmitInfoKHR* pWaitSemaphoreInfos
956 1u, // deUint32 commandBufferInfoCount
957 &commandBufferInfoCount, // const VkCommandBufferSubmitInfoKHR* pCommandBufferInfos
958 0u, // deUint32 signalSemaphoreInfoCount
959 DE_NULL // const VkSemaphoreSubmitInfoKHR* pSignalSemaphoreInfos
960 );
961
962 const Unique<VkFence> fence(createFence(vk, device));
963 VK_CHECK(synchronizationWrapper->queueSubmit(queue, *fence));
964 VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), DE_TRUE, ~0ull));
965 }
966
requireFeatures(const InstanceInterface & vki,const VkPhysicalDevice physDevice,const FeatureFlags flags)967 void requireFeatures (const InstanceInterface& vki, const VkPhysicalDevice physDevice, const FeatureFlags flags)
968 {
969 const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(vki, physDevice);
970
971 if (((flags & FEATURE_TESSELLATION_SHADER) != 0) && !features.tessellationShader)
972 throw tcu::NotSupportedError("Tessellation shader not supported");
973
974 if (((flags & FEATURE_GEOMETRY_SHADER) != 0) && !features.geometryShader)
975 throw tcu::NotSupportedError("Geometry shader not supported");
976
977 if (((flags & FEATURE_SHADER_FLOAT_64) != 0) && !features.shaderFloat64)
978 throw tcu::NotSupportedError("Double-precision floats not supported");
979
980 if (((flags & FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS) != 0) && !features.vertexPipelineStoresAndAtomics)
981 throw tcu::NotSupportedError("SSBO and image writes not supported in vertex pipeline");
982
983 if (((flags & FEATURE_FRAGMENT_STORES_AND_ATOMICS) != 0) && !features.fragmentStoresAndAtomics)
984 throw tcu::NotSupportedError("SSBO and image writes not supported in fragment shader");
985
986 if (((flags & FEATURE_SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE) != 0) && !features.shaderTessellationAndGeometryPointSize)
987 throw tcu::NotSupportedError("Tessellation and geometry shaders don't support PointSize built-in");
988 }
989
requireStorageImageSupport(const InstanceInterface & vki,const VkPhysicalDevice physDevice,const VkFormat fmt)990 void requireStorageImageSupport(const InstanceInterface& vki, const VkPhysicalDevice physDevice, const VkFormat fmt)
991 {
992 const VkFormatProperties p = getPhysicalDeviceFormatProperties(vki, physDevice, fmt);
993 if ((p.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) == 0)
994 throw tcu::NotSupportedError("Storage image format not supported");
995 }
996
getResourceName(const ResourceDescription & resource)997 std::string getResourceName (const ResourceDescription& resource)
998 {
999 std::ostringstream str;
1000
1001 if ((resource.type == RESOURCE_TYPE_BUFFER) ||
1002 (resource.type == RESOURCE_TYPE_INDEX_BUFFER))
1003 {
1004 str << "buffer_" << resource.size.x();
1005 }
1006 else if (resource.type == RESOURCE_TYPE_IMAGE)
1007 {
1008 str << "image_" << resource.size.x()
1009 << (resource.size.y() > 0 ? "x" + de::toString(resource.size.y()) : "")
1010 << (resource.size.z() > 0 ? "x" + de::toString(resource.size.z()) : "")
1011 << "_" << de::toLower(getFormatName(resource.imageFormat)).substr(10);
1012 }
1013 else if (isIndirectBuffer(resource.type))
1014 str << "indirect_buffer";
1015 else
1016 DE_ASSERT(0);
1017
1018 return str.str();
1019 }
1020
isIndirectBuffer(const ResourceType type)1021 bool isIndirectBuffer (const ResourceType type)
1022 {
1023 switch (type)
1024 {
1025 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW:
1026 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW_INDEXED:
1027 case RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH:
1028 return true;
1029
1030 default:
1031 return false;
1032 }
1033 }
1034
makeCommonCommandBufferSubmitInfo(const VkCommandBuffer cmdBuf)1035 VkCommandBufferSubmitInfoKHR makeCommonCommandBufferSubmitInfo (const VkCommandBuffer cmdBuf)
1036 {
1037 return
1038 {
1039 VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR, // VkStructureType sType
1040 DE_NULL, // const void* pNext
1041 cmdBuf, // VkCommandBuffer commandBuffer
1042 0u // uint32_t deviceMask
1043 };
1044 }
1045
makeCommonSemaphoreSubmitInfo(VkSemaphore semaphore,deUint64 value,VkPipelineStageFlags2KHR stageMask)1046 VkSemaphoreSubmitInfoKHR makeCommonSemaphoreSubmitInfo(VkSemaphore semaphore, deUint64 value, VkPipelineStageFlags2KHR stageMask)
1047 {
1048 return
1049 {
1050 VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR, // VkStructureType sType
1051 DE_NULL, // const void* pNext
1052 semaphore, // VkSemaphore semaphore
1053 value, // deUint64 value
1054 stageMask, // VkPipelineStageFlags2KHR stageMask
1055 0u // deUint32 deviceIndex
1056 };
1057 }
1058
makeCommonDependencyInfo(const VkMemoryBarrier2KHR * pMemoryBarrier,const VkBufferMemoryBarrier2KHR * pBufferMemoryBarrier,const VkImageMemoryBarrier2KHR * pImageMemoryBarrier)1059 VkDependencyInfoKHR makeCommonDependencyInfo(const VkMemoryBarrier2KHR* pMemoryBarrier, const VkBufferMemoryBarrier2KHR* pBufferMemoryBarrier, const VkImageMemoryBarrier2KHR* pImageMemoryBarrier)
1060 {
1061 return
1062 {
1063 VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR, // VkStructureType sType
1064 DE_NULL, // const void* pNext
1065 VK_DEPENDENCY_BY_REGION_BIT, // VkDependencyFlags dependencyFlags
1066 !!pMemoryBarrier, // deUint32 memoryBarrierCount
1067 pMemoryBarrier, // const VkMemoryBarrier2KHR* pMemoryBarriers
1068 !!pBufferMemoryBarrier, // deUint32 bufferMemoryBarrierCount
1069 pBufferMemoryBarrier, // const VkBufferMemoryBarrier2KHR* pBufferMemoryBarriers
1070 !!pImageMemoryBarrier, // deUint32 imageMemoryBarrierCount
1071 pImageMemoryBarrier // const VkImageMemoryBarrier2KHR* pImageMemoryBarriers
1072 };
1073 };
1074
PipelineCacheData(void)1075 PipelineCacheData::PipelineCacheData (void)
1076 {
1077 }
1078
~PipelineCacheData(void)1079 PipelineCacheData::~PipelineCacheData (void)
1080 {
1081 }
1082
createPipelineCache(const vk::DeviceInterface & vk,const vk::VkDevice device) const1083 vk::Move<VkPipelineCache> PipelineCacheData::createPipelineCache (const vk::DeviceInterface& vk, const vk::VkDevice device) const
1084 {
1085 const de::ScopedLock dataLock (m_lock);
1086 const struct vk::VkPipelineCacheCreateInfo params =
1087 {
1088 vk::VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,
1089 DE_NULL,
1090 (vk::VkPipelineCacheCreateFlags)0,
1091 (deUintptr)m_data.size(),
1092 (m_data.empty() ? DE_NULL : &m_data[0])
1093 };
1094
1095 return vk::createPipelineCache(vk, device, ¶ms);
1096 }
1097
setFromPipelineCache(const vk::DeviceInterface & vk,const vk::VkDevice device,const vk::VkPipelineCache pipelineCache)1098 void PipelineCacheData::setFromPipelineCache (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkPipelineCache pipelineCache)
1099 {
1100 const de::ScopedLock dataLock (m_lock);
1101 deUintptr dataSize = 0;
1102
1103 VK_CHECK(vk.getPipelineCacheData(device, pipelineCache, &dataSize, DE_NULL));
1104
1105 m_data.resize(dataSize);
1106
1107 if (dataSize > 0)
1108 VK_CHECK(vk.getPipelineCacheData(device, pipelineCache, &dataSize, &m_data[0]));
1109 }
1110
1111 } // synchronization
1112 } // vkt
1113