1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2018 The Khronos Group Inc.
6 * Copyright (c) 2018 Google Inc.
7 * Copyright (c) 2018 ARM Limited.
8 * Copyright (c) 2023 LunarG, Inc.
9 * Copyright (c) 2023 Nintendo
10 *
11 * Licensed under the Apache License, Version 2.0 (the "License");
12 * you may not use this file except in compliance with the License.
13 * You may obtain a copy of the License at
14 *
15 * http://www.apache.org/licenses/LICENSE-2.0
16 *
17 * Unless required by applicable law or agreed to in writing, software
18 * distributed under the License is distributed on an "AS IS" BASIS,
19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 * See the License for the specific language governing permissions and
21 * limitations under the License.
22 *
23 *//*!
24 * \file
25 * \brief Dynamic Offset Tests
26 *//*--------------------------------------------------------------------*/
27
28 #include "vktPipelineDynamicOffsetTests.hpp"
29 #include "vktPipelineClearUtil.hpp"
30 #include "vktPipelineImageUtil.hpp"
31 #include "vktPipelineVertexUtil.hpp"
32 #include "vktPipelineReferenceRenderer.hpp"
33 #include "vkComputePipelineConstructionUtil.hpp"
34 #include "vktTestCase.hpp"
35 #include "vkImageUtil.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkPrograms.hpp"
38 #include "vkQueryUtil.hpp"
39 #include "vkRef.hpp"
40 #include "vkRefUtil.hpp"
41 #include "vkTypeUtil.hpp"
42 #include "vkCmdUtil.hpp"
43 #include "vkObjUtil.hpp"
44 #include "vkDeviceUtil.hpp"
45 #include "vkBuilderUtil.hpp"
46 #include "tcuImageCompare.hpp"
47 #include "deMemory.h"
48 #include "deUniquePtr.hpp"
49 #include "tcuTestLog.hpp"
50 #include <array>
51 #include <cmath>
52 #include <vector>
53 #include <sstream>
54
55 namespace vkt
56 {
57 namespace pipeline
58 {
59
60 using namespace vk;
61 using namespace std;
62 using de::UniquePtr;
63
64 namespace
65 {
66 typedef de::SharedPtr<Unique<VkBuffer>> VkBufferSp;
67 typedef de::SharedPtr<Allocation> AllocationSp;
68 typedef de::SharedPtr<Unique<VkCommandBuffer>> VkCommandBufferSp;
69 typedef de::SharedPtr<RenderPassWrapper> VkRenderPassSp;
70
71 enum class GroupingStrategy
72 {
73 SINGLE_SET = 0,
74 MULTISET = 1,
75 ARRAYS = 2,
76 };
77
78 struct TestParams
79 {
80 PipelineConstructionType pipelineConstructionType;
81 VkDescriptorType descriptorType;
82 deUint32 numCmdBuffers;
83 bool reverseOrder;
84 deUint32 numDescriptorSetBindings;
85 deUint32 numDynamicBindings;
86 deUint32 numNonDynamicBindings;
87 GroupingStrategy groupingStrategy;
88 };
89 #ifndef CTS_USES_VULKANSC
createQuads(deUint32 numQuads,float size)90 vector<Vertex4RGBA> createQuads (deUint32 numQuads, float size)
91 {
92 vector<Vertex4RGBA> vertices;
93
94 for (deUint32 quadNdx = 0; quadNdx < numQuads; quadNdx++)
95 {
96 const float xOffset = -0.5f + (float)quadNdx;
97 const tcu::Vec4 color (0.0f);
98 const Vertex4RGBA lowerLeftVertex = {tcu::Vec4(-size + xOffset, -size, 0.0f, 1.0f), color};
99 const Vertex4RGBA lowerRightVertex = {tcu::Vec4(size + xOffset, -size, 0.0f, 1.0f), color};
100 const Vertex4RGBA UpperLeftVertex = {tcu::Vec4(-size + xOffset, size, 0.0f, 1.0f), color};
101 const Vertex4RGBA UpperRightVertex = {tcu::Vec4(size + xOffset, size, 0.0f, 1.0f), color};
102
103 vertices.push_back(lowerLeftVertex);
104 vertices.push_back(lowerRightVertex);
105 vertices.push_back(UpperLeftVertex);
106 vertices.push_back(UpperLeftVertex);
107 vertices.push_back(lowerRightVertex);
108 vertices.push_back(UpperRightVertex);
109 }
110
111 return vertices;
112 }
113 #endif // CTS_USES_VULKANSC
114
115 static const tcu::Vec4 testColors[] =
116 {
117 tcu::Vec4(0.3f, 0.0f, 0.0f, 1.0f),
118 tcu::Vec4(0.0f, 0.3f, 0.0f, 1.0f),
119 tcu::Vec4(0.0f, 0.0f, 0.3f, 1.0f),
120 tcu::Vec4(0.3f, 0.3f, 0.0f, 1.0f),
121 tcu::Vec4(0.0f, 0.3f, 0.3f, 1.0f),
122 tcu::Vec4(0.3f, 0.0f, 0.3f, 1.0f)
123 };
124 static constexpr VkDeviceSize kColorSize = static_cast<VkDeviceSize>(sizeof(testColors[0]));
125 static constexpr deUint32 kNumTestColors = static_cast<deUint32>(DE_LENGTH_OF_ARRAY(testColors));
126
compareVectors(const tcu::Vec4 firstVector,const tcu::Vec4 secondVector,const float tolerance)127 bool compareVectors (const tcu::Vec4 firstVector, const tcu::Vec4 secondVector, const float tolerance)
128 {
129 for (auto i = 0; i < firstVector.SIZE; i++)
130 {
131 if (abs(firstVector[i] - secondVector[i]) > tolerance)
132 return false;
133 }
134
135 return true;
136 }
137
makeImageCreateInfo(const tcu::IVec2 & size,const VkFormat format,const VkImageUsageFlags usage)138 inline VkImageCreateInfo makeImageCreateInfo (const tcu::IVec2& size, const VkFormat format, const VkImageUsageFlags usage)
139 {
140 const VkImageCreateInfo imageParams =
141 {
142 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
143 DE_NULL, // const void* pNext;
144 (VkImageCreateFlags)0, // VkImageCreateFlags flags;
145 VK_IMAGE_TYPE_2D, // VkImageType imageType;
146 format, // VkFormat format;
147 makeExtent3D(size.x(), size.y(), 1), // VkExtent3D extent;
148 1u, // deUint32 mipLevels;
149 1u, // deUint32 arrayLayers;
150 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
151 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
152 usage, // VkImageUsageFlags usage;
153 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
154 0u, // deUint32 queueFamilyIndexCount;
155 DE_NULL, // const deUint32* pQueueFamilyIndices;
156 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
157 };
158
159 return imageParams;
160 }
161
162 class DynamicOffsetTestInstance : public vkt::TestInstance
163 {
164 public:
DynamicOffsetTestInstance(Context & context,const TestParams & params)165 DynamicOffsetTestInstance (Context& context, const TestParams& params)
166 : vkt::TestInstance (context)
167 , m_params (params)
168 , m_memAlloc (context.getDeviceInterface(), context.getDevice(), getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
169 {}
170
171 protected:
172 const TestParams m_params;
173 SimpleAllocator m_memAlloc;
174 };
175
176 class DynamicOffsetGraphicsTestInstance : public DynamicOffsetTestInstance
177 {
178 public:
179 DynamicOffsetGraphicsTestInstance (Context& context, const TestParams& params);
180 virtual ~DynamicOffsetGraphicsTestInstance (void);
181 void init (void);
182 virtual tcu::TestStatus iterate (void);
183 tcu::TestStatus verifyImage (void);
184
185 private:
186 const tcu::UVec2 m_renderSize;
187 const VkFormat m_colorFormat;
188 VkImageCreateInfo m_colorImageCreateInfo;
189 Move<VkImage> m_colorImage;
190 de::MovePtr<Allocation> m_colorImageAlloc;
191 Move<VkImageView> m_colorAttachmentView;
192 vector<VkRenderPassSp> m_renderPasses;
193 ShaderWrapper m_vertexShaderModule;
194 ShaderWrapper m_fragmentShaderModule;
195 Move<VkBuffer> m_vertexBuffer;
196 de::MovePtr<Allocation> m_vertexBufferAlloc;
197 Move<VkBuffer> m_buffer;
198 de::MovePtr<Allocation> m_bufferAlloc;
199 vector<Move<VkDescriptorSetLayout>> m_descriptorSetLayouts;
200 Move<VkDescriptorPool> m_descriptorPool;
201 vector<Move<VkDescriptorSet>> m_descriptorSets;
202 PipelineLayoutWrapper m_pipelineLayout;
203 vector<GraphicsPipelineWrapper> m_graphicsPipelines;
204 Move<VkCommandPool> m_cmdPool;
205 vector<VkCommandBufferSp> m_cmdBuffers;
206 vector<Vertex4RGBA> m_vertices;
207 };
208 #ifndef CTS_USES_VULKANSC
DynamicOffsetGraphicsTestInstance(Context & context,const TestParams & params)209 DynamicOffsetGraphicsTestInstance::DynamicOffsetGraphicsTestInstance (Context& context, const TestParams& params)
210 : DynamicOffsetTestInstance (context, params)
211 , m_renderSize (32, 32)
212 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
213 , m_vertices (createQuads(m_params.numDescriptorSetBindings * m_params.numCmdBuffers, 0.25f))
214 {
215 }
216 #endif // CTS_USES_VULKANSC
217
init(void)218 void DynamicOffsetGraphicsTestInstance::init (void)
219 {
220 const VkComponentMapping componentMappingRGBA = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A };
221 const InstanceInterface& vki = m_context.getInstanceInterface();
222 const DeviceInterface& vk = m_context.getDeviceInterface();
223 const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
224 const VkDevice vkDevice = m_context.getDevice();
225 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
226 const deUint32 numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
227 deUint32 offset = 0;
228 deUint32 quadNdx = 0;
229 const VkPhysicalDeviceLimits deviceLimits = getPhysicalDeviceProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()).limits;
230 const VkDeviceSize alignment = ((m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ? deviceLimits.minUniformBufferOffsetAlignment : deviceLimits.minStorageBufferOffsetAlignment);
231 const VkDeviceSize extraBytes = kColorSize % alignment;
232 const VkDeviceSize colorBlockInputSize = ((extraBytes == 0ull) ? kColorSize : (kColorSize + alignment - extraBytes));
233 const VkDeviceSize bufferSize = colorBlockInputSize * kNumTestColors;
234 const VkDeviceSize bindingOffset = bufferSize / numBindings;
235 const VkDescriptorType nonDynamicDescriptorType = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
236
237 vector<VkDescriptorSetLayout> descriptorSetLayoutsPlain;
238 vector<VkDescriptorSet> descriptorSetsPlain;
239
240 // Create color image
241 {
242
243 const VkImageCreateInfo colorImageParams =
244 {
245 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
246 DE_NULL, // const void* pNext;
247 0u, // VkImageCreateFlags flags;
248 VK_IMAGE_TYPE_2D, // VkImageType imageType;
249 m_colorFormat, // VkFormat format;
250 { m_renderSize.x(), m_renderSize.y(), 1u }, // VkExtent3D extent;
251 1u, // deUint32 mipLevels;
252 1u, // deUint32 arrayLayers;
253 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
254 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
255 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // VkImageUsageFlags usage;
256 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
257 1u, // deUint32 queueFamilyIndexCount;
258 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
259 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
260 };
261
262 m_colorImageCreateInfo = colorImageParams;
263 m_colorImage = createImage(vk, vkDevice, &m_colorImageCreateInfo);
264
265 // Allocate and bind color image memory
266 m_colorImageAlloc = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *m_colorImage), MemoryRequirement::Any);
267 VK_CHECK(vk.bindImageMemory(vkDevice, *m_colorImage, m_colorImageAlloc->getMemory(), m_colorImageAlloc->getOffset()));
268 }
269
270 // Create color attachment view
271 {
272 const VkImageViewCreateInfo colorAttachmentViewParams =
273 {
274 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
275 DE_NULL, // const void* pNext;
276 0u, // VkImageViewCreateFlags flags;
277 *m_colorImage, // VkImage image;
278 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
279 m_colorFormat, // VkFormat format;
280 componentMappingRGBA, // VkChannelMapping channels;
281 { VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u }, // VkImageSubresourceRange subresourceRange;
282 };
283
284 m_colorAttachmentView = createImageView(vk, vkDevice, &colorAttachmentViewParams);
285 }
286
287 // Create render passes
288 for (deUint32 renderPassIdx = 0; renderPassIdx < m_params.numCmdBuffers; renderPassIdx++)
289 {
290 // The first pass clears the output image, and the second one draws on top of the first pass.
291 const VkAttachmentLoadOp loadOps[] =
292 {
293 VK_ATTACHMENT_LOAD_OP_CLEAR,
294 VK_ATTACHMENT_LOAD_OP_LOAD
295 };
296
297 const VkImageLayout initialLayouts[] =
298 {
299 VK_IMAGE_LAYOUT_UNDEFINED,
300 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
301 };
302
303 const VkAttachmentDescription attachmentDescription =
304 {
305 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags
306 m_colorFormat, // VkFormat format
307 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples
308 loadOps[renderPassIdx], // VkAttachmentLoadOp loadOp
309 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp
310 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp
311 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp
312 initialLayouts[renderPassIdx], // VkImageLayout initialLayout
313 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout
314 };
315
316 const VkAttachmentReference attachmentRef =
317 {
318 0u, // deUint32 attachment
319 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout
320 };
321
322 const VkSubpassDescription subpassDescription =
323 {
324 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
325 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
326 0u, // deUint32 inputAttachmentCount
327 DE_NULL, // const VkAttachmentReference* pInputAttachments
328 1u, // deUint32 colorAttachmentCount
329 &attachmentRef, // const VkAttachmentReference* pColorAttachments
330 DE_NULL, // const VkAttachmentReference* pResolveAttachments
331 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
332 0u, // deUint32 preserveAttachmentCount
333 DE_NULL // const deUint32* pPreserveAttachments
334 };
335
336 const VkRenderPassCreateInfo renderPassInfo =
337 {
338 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureTypei sType
339 DE_NULL, // const void* pNext
340 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
341 1u, // deUint32 attachmentCount
342 &attachmentDescription, // const VkAttachmentDescription* pAttachments
343 1u, // deUint32 subpassCount
344 &subpassDescription, // const VkSubpassDescription* pSubpasses
345 0u, // deUint32 dependencyCount
346 DE_NULL // const VkSubpassDependency* pDependencies
347 };
348
349 m_renderPasses.push_back(VkRenderPassSp(new RenderPassWrapper(m_params.pipelineConstructionType, vk, vkDevice, &renderPassInfo)));
350
351 const VkImageView attachmentBindInfos[] =
352 {
353 *m_colorAttachmentView
354 };
355
356 const VkFramebufferCreateInfo framebufferParams =
357 {
358 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
359 DE_NULL, // const void* pNext;
360 0u, // VkFramebufferCreateFlags flags;
361 **m_renderPasses[renderPassIdx], // VkRenderPass renderPass;
362 1u, // deUint32 attachmentCount;
363 attachmentBindInfos, // const VkImageView* pAttachments;
364 (deUint32)m_renderSize.x(), // deUint32 width;
365 (deUint32)m_renderSize.y(), // deUint32 height;
366 1u // deUint32 layers;
367 };
368
369 m_renderPasses[renderPassIdx]->createFramebuffer(vk, vkDevice, &framebufferParams, *m_colorImage);
370 }
371
372 // Create pipeline layout
373 {
374 // Create descriptor set layouts
375 vector<VkDescriptorSetLayoutBinding> descriptorSetLayoutBindings;
376
377 for (deUint32 binding = 0; binding < numBindings; binding++)
378 {
379 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
380 const VkDescriptorType descriptorType = (dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType);
381 const deUint32 bindingNumber = (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET ? binding : 0u);
382 const deUint32 descriptorCount = ((m_params.groupingStrategy == GroupingStrategy::ARRAYS) ? (dynamicDesc ? m_params.numDynamicBindings : m_params.numNonDynamicBindings) : 1u);
383 const VkDescriptorSetLayoutBinding descriptorSetLayoutBinding =
384 {
385 bindingNumber, // uint32_t binding;
386 descriptorType, // VkDescriptorType descriptorType;
387 descriptorCount, // uint32_t descriptorCount;
388 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlags stageFlags;
389 DE_NULL // const VkSampler* pImmutableSamplers;
390 };
391
392 // Skip used descriptors in array mode.
393 if (m_params.groupingStrategy == GroupingStrategy::ARRAYS)
394 binding = (dynamicDesc ? m_params.numDynamicBindings - 1 : numBindings);
395
396 descriptorSetLayoutBindings.push_back(descriptorSetLayoutBinding);
397 }
398
399 vector<VkDescriptorSetLayoutCreateInfo> descriptorSetLayoutCreateInfos;
400
401 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
402 {
403 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo =
404 {
405 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
406 DE_NULL, // const void* pNext;
407 0u, // VkDescriptorSetLayoutCreateFlags flags;
408 numBindings, // uint32_t bindingCount;
409 descriptorSetLayoutBindings.data() // const VkDescriptorSetLayoutBinding* pBindings;
410 };
411
412 m_descriptorSetLayouts.push_back(createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo));
413 }
414 else
415 {
416 for (size_t i = 0; i < descriptorSetLayoutBindings.size(); ++i)
417 {
418 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo =
419 {
420 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
421 DE_NULL, // const void* pNext;
422 0u, // VkDescriptorSetLayoutCreateFlags flags;
423 1u, // uint32_t bindingCount;
424 &descriptorSetLayoutBindings[i] // const VkDescriptorSetLayoutBinding* pBindings;
425 };
426
427 m_descriptorSetLayouts.push_back(createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo));
428 }
429 }
430
431 // Create pipeline layout
432 descriptorSetLayoutsPlain.resize(m_descriptorSetLayouts.size());
433 for (size_t i = 0; i < descriptorSetLayoutsPlain.size(); ++i)
434 descriptorSetLayoutsPlain[i] = m_descriptorSetLayouts[i].get();
435
436 const VkPipelineLayoutCreateInfo pipelineLayoutParams =
437 {
438 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
439 DE_NULL, // const void* pNext;
440 0u, // VkPipelineLayoutCreateFlags flags;
441 static_cast<deUint32>(descriptorSetLayoutsPlain.size()), // deUint32 descriptorSetCount;
442 descriptorSetLayoutsPlain.data(), // const VkDescriptorSetLayout* pSetLayouts;
443 0u, // deUint32 pushConstantRangeCount;
444 DE_NULL // const VkPushDescriptorRange* pPushDescriptorRanges;
445 };
446
447 m_pipelineLayout = PipelineLayoutWrapper(m_params.pipelineConstructionType, vk, vkDevice, &pipelineLayoutParams);
448 }
449
450 // Create buffer
451 {
452 vector<deUint8> hostBuffer((size_t)bufferSize, 0);
453 for (deUint32 colorIdx = 0; colorIdx < kNumTestColors; colorIdx++)
454 deMemcpy(&hostBuffer[(deUint32)colorBlockInputSize * colorIdx], &testColors[colorIdx], kColorSize);
455
456 const VkBufferUsageFlags usageFlags = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
457
458 const VkBufferCreateInfo bufferCreateInfo =
459 {
460 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
461 DE_NULL, // const void* pNext;
462 0u, // VkBufferCreateFlags flags
463 bufferSize, // VkDeviceSize size;
464 usageFlags, // VkBufferUsageFlags usage;
465 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
466 1u, // deUint32 queueFamilyCount;
467 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
468 };
469
470 m_buffer = createBuffer(vk, vkDevice, &bufferCreateInfo);
471 m_bufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_buffer), MemoryRequirement::HostVisible);
472 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_buffer, m_bufferAlloc->getMemory(), m_bufferAlloc->getOffset()));
473
474 deMemcpy(m_bufferAlloc->getHostPtr(), hostBuffer.data(), (size_t)bufferSize);
475 flushAlloc(vk, vkDevice, *m_bufferAlloc);
476 }
477
478 // Create descriptor pool
479 {
480 DescriptorPoolBuilder poolBuilder;
481 poolBuilder.addType(m_params.descriptorType, m_params.numDynamicBindings);
482 poolBuilder.addType(nonDynamicDescriptorType, m_params.numNonDynamicBindings);
483 m_descriptorPool = poolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, static_cast<deUint32>(m_descriptorSetLayouts.size()));
484 }
485
486 // Create descriptor sets
487 {
488 for (size_t i = 0; i < m_descriptorSetLayouts.size(); ++i)
489 {
490 const VkDescriptorSetAllocateInfo allocInfo =
491 {
492 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
493 DE_NULL, // const void* pNext;
494 *m_descriptorPool, // VkDescriptorPool descriptorPool;
495 1u, // deUint32 setLayoutCount;
496 &(m_descriptorSetLayouts[i].get()), // const VkDescriptorSetLayout* pSetLayouts;
497 };
498 m_descriptorSets.push_back(allocateDescriptorSet(vk, vkDevice, &allocInfo));
499 }
500 }
501
502 descriptorSetsPlain.resize(m_descriptorSets.size());
503 for (size_t i = 0; i < descriptorSetsPlain.size(); ++i)
504 descriptorSetsPlain[i] = m_descriptorSets[i].get();
505
506 // Update descriptor sets
507 for (deUint32 binding = 0; binding < numBindings; ++binding)
508 {
509 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
510 const VkDescriptorType descriptorType = (dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType);
511 const VkDescriptorBufferInfo descriptorBufferInfo =
512 {
513 *m_buffer, // VkBuffer buffer;
514 bindingOffset * binding, // VkDeviceSize offset;
515 kColorSize // VkDeviceSize range;
516 };
517
518 VkDescriptorSet bindingSet;
519 deUint32 bindingNumber;
520 deUint32 dstArrayElement;
521
522 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
523 {
524 bindingSet = m_descriptorSets[0].get();
525 bindingNumber = binding;
526 dstArrayElement = 0u;
527 }
528 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
529 {
530 bindingSet = m_descriptorSets[binding].get();
531 bindingNumber = 0u;
532 dstArrayElement = 0u;
533 }
534 else // GroupingStrategy::ARRAYS
535 {
536 bindingSet = (dynamicDesc ? m_descriptorSets[0].get() : m_descriptorSets[1].get());
537 bindingNumber = 0u;
538 dstArrayElement = (dynamicDesc ? binding : (binding - m_params.numDynamicBindings));
539 }
540
541 const VkWriteDescriptorSet writeDescriptorSet =
542 {
543 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
544 DE_NULL, // const void* pNext;
545 bindingSet, // VkDescriptorSet dstSet;
546 bindingNumber, // uint32_t dstBinding;
547 dstArrayElement, // uint32_t dstArrayElement;
548 1u, // uint32_t descriptorCount;
549 descriptorType, // VkDescriptorType descriptorType;
550 DE_NULL, // const VkDescriptorImageInfo* pImageInfo;
551 &descriptorBufferInfo, // const VkDescriptorBufferInfo* pBufferInfo;
552 DE_NULL // const VkBufferView* pTexelBufferView;
553 };
554
555 vk.updateDescriptorSets(vkDevice, 1u, &writeDescriptorSet, 0u, DE_NULL);
556 }
557
558 // Create shaders
559 {
560 m_vertexShaderModule = ShaderWrapper(vk, vkDevice, m_context.getBinaryCollection().get("vert"), 0u);
561 m_fragmentShaderModule = ShaderWrapper(vk, vkDevice, m_context.getBinaryCollection().get("frag"), 0u);
562 }
563
564 // Create pipelines
565 m_graphicsPipelines.reserve(m_params.numCmdBuffers);
566 for (deUint32 pipelineIdx = 0; pipelineIdx < m_params.numCmdBuffers; pipelineIdx++)
567 {
568 const VkVertexInputBindingDescription vertexInputBindingDescription =
569 {
570 0u, // deUint32 binding;
571 sizeof(Vertex4RGBA), // deUint32 strideInBytes;
572 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate stepRate;
573 };
574
575 const VkVertexInputAttributeDescription vertexInputAttributeDescriptions[] =
576 {
577 {
578 0u, // deUint32 location;
579 0u, // deUint32 binding;
580 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
581 0u // deUint32 offsetInBytes;
582 },
583 {
584 1u, // deUint32 location;
585 0u, // deUint32 binding;
586 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
587 DE_OFFSET_OF(Vertex4RGBA, color), // deUint32 offset;
588 }
589 };
590
591 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams
592 {
593 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
594 DE_NULL, // const void* pNext;
595 0u, // vkPipelineVertexInputStateCreateFlags flags;
596 1u, // deUint32 bindingCount;
597 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
598 2u, // deUint32 attributeCount;
599 vertexInputAttributeDescriptions // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
600 };
601
602 const vector<VkViewport> viewports { makeViewport(m_renderSize) };
603 const vector<VkRect2D> scissors { makeRect2D(m_renderSize) };
604
605 m_graphicsPipelines.emplace_back(vki, vk, physicalDevice, vkDevice, m_context.getDeviceExtensions(), m_params.pipelineConstructionType);
606 m_graphicsPipelines.back().setMonolithicPipelineLayout(m_pipelineLayout)
607 .setDefaultRasterizationState()
608 .setDefaultDepthStencilState()
609 .setDefaultColorBlendState()
610 .setDefaultMultisampleState()
611 .setupVertexInputState(&vertexInputStateParams)
612 .setupPreRasterizationShaderState(viewports,
613 scissors,
614 m_pipelineLayout,
615 **m_renderPasses[pipelineIdx],
616 0u,
617 m_vertexShaderModule)
618 .setupFragmentShaderState(m_pipelineLayout, **m_renderPasses[pipelineIdx], 0u, m_fragmentShaderModule)
619 .setupFragmentOutputState(**m_renderPasses[pipelineIdx])
620 .buildPipeline();
621 }
622
623 // Create vertex buffer
624 {
625 const VkBufferCreateInfo vertexBufferParams =
626 {
627 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
628 DE_NULL, // const void* pNext;
629 0u, // VkBufferCreateFlags flags;
630 (VkDeviceSize)(sizeof(Vertex4RGBA) * m_vertices.size()), // VkDeviceSize size;
631 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
632 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
633 1u, // deUint32 queueFamilyCount;
634 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
635 };
636
637 m_vertexBuffer = createBuffer(vk, vkDevice, &vertexBufferParams);
638 m_vertexBufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_vertexBuffer), MemoryRequirement::HostVisible);
639
640 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_vertexBuffer, m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset()));
641
642 // Load vertices into vertex buffer
643 deMemcpy(m_vertexBufferAlloc->getHostPtr(), m_vertices.data(), m_vertices.size() * sizeof(Vertex4RGBA));
644 flushAlloc(vk, vkDevice, *m_vertexBufferAlloc);
645 }
646
647 // Create command pool
648 m_cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
649
650 // Create command buffers
651 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
652 m_cmdBuffers.push_back(VkCommandBufferSp(new Unique<VkCommandBuffer>(allocateCommandBuffer(vk, vkDevice, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY))));
653
654 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
655 {
656 const VkClearValue attachmentClearValue = defaultClearValue(m_colorFormat);
657 const VkDeviceSize vertexBufferOffset = 0;
658 const deUint32 idx = m_params.reverseOrder ? m_params.numCmdBuffers - cmdBufferIdx - 1 : cmdBufferIdx;
659
660 beginCommandBuffer(vk, **m_cmdBuffers[idx], 0u);
661 m_renderPasses[idx]->begin(vk, **m_cmdBuffers[idx], makeRect2D(0, 0, m_renderSize.x(), m_renderSize.y()), attachmentClearValue);
662 m_graphicsPipelines[idx].bind(**m_cmdBuffers[idx]);
663 vk.cmdBindVertexBuffers(**m_cmdBuffers[idx], 0, 1, &m_vertexBuffer.get(), &vertexBufferOffset);
664
665 for (deUint32 i = 0; i < m_params.numDescriptorSetBindings; i++)
666 {
667 vector<deUint32> offsets;
668 for (deUint32 dynamicBindingIdx = 0; dynamicBindingIdx < m_params.numDynamicBindings; dynamicBindingIdx++)
669 offsets.push_back(offset + (deUint32)colorBlockInputSize * dynamicBindingIdx);
670
671 vk.cmdBindDescriptorSets(**m_cmdBuffers[idx], VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, static_cast<deUint32>(descriptorSetsPlain.size()), descriptorSetsPlain.data(), m_params.numDynamicBindings, offsets.data());
672 offset += (deUint32)colorBlockInputSize;
673
674 // Draw quad
675 vk.cmdDraw(**m_cmdBuffers[idx], 6, 1, 6 * quadNdx, 0);
676 quadNdx++;
677 }
678
679 m_renderPasses[idx]->end(vk, **m_cmdBuffers[idx]);
680 endCommandBuffer(vk, **m_cmdBuffers[idx]);
681 }
682 }
683
~DynamicOffsetGraphicsTestInstance(void)684 DynamicOffsetGraphicsTestInstance::~DynamicOffsetGraphicsTestInstance (void)
685 {
686 }
687
iterate(void)688 tcu::TestStatus DynamicOffsetGraphicsTestInstance::iterate (void)
689 {
690 init();
691
692 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
693 submitCommandsAndWait(m_context.getDeviceInterface(), m_context.getDevice(), m_context.getUniversalQueue(), **m_cmdBuffers[cmdBufferIdx]);
694
695 return verifyImage();
696 }
697
verifyImage(void)698 tcu::TestStatus DynamicOffsetGraphicsTestInstance::verifyImage (void)
699 {
700 const tcu::TextureFormat tcuColorFormat = mapVkFormat(m_colorFormat);
701 const tcu::TextureFormat tcuDepthFormat = tcu::TextureFormat();
702 const ColorVertexShader vertexShader;
703 const ColorFragmentShader fragmentShader (tcuColorFormat, tcuDepthFormat);
704 const rr::Program program (&vertexShader, &fragmentShader);
705 ReferenceRenderer refRenderer (m_renderSize.x(), m_renderSize.y(), 1, tcuColorFormat, tcuDepthFormat, &program);
706 bool compareOk = false;
707
708 // Render reference image
709 {
710 const deUint32 numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
711 const deUint32 bindingOffset = kNumTestColors / numBindings;
712
713 for (deUint32 quadIdx = 0; quadIdx < m_vertices.size() / 6; quadIdx++)
714 for (deUint32 vertexIdx = 0; vertexIdx < 6; vertexIdx++)
715 {
716 tcu::Vec4 refColor(0.0f);
717
718 for (deUint32 binding = 0; binding < m_params.numDynamicBindings; binding++)
719 refColor += testColors[quadIdx + binding * bindingOffset + binding];
720 for (deUint32 binding = 0; binding < m_params.numNonDynamicBindings; binding++)
721 refColor += testColors[(m_params.numDynamicBindings + binding) * bindingOffset];
722 refColor.w() = 1.0f;
723
724 m_vertices[quadIdx * 6 + vertexIdx].color.xyzw() = refColor;
725 }
726
727 refRenderer.draw(rr::RenderState(refRenderer.getViewportState(), m_context.getDeviceProperties().limits.subPixelPrecisionBits),
728 rr::PRIMITIVETYPE_TRIANGLES, m_vertices);
729 }
730
731 // Compare result with reference image
732 {
733 de::MovePtr<tcu::TextureLevel> result = readColorAttachment(
734 m_context.getDeviceInterface(), m_context.getDevice(), m_context.getUniversalQueue(),
735 m_context.getUniversalQueueFamilyIndex(), m_memAlloc, *m_colorImage, m_colorFormat, m_renderSize);
736
737 compareOk = tcu::intThresholdPositionDeviationCompare(m_context.getTestContext().getLog(),
738 "IntImageCompare",
739 "Image comparison",
740 refRenderer.getAccess(),
741 result->getAccess(),
742 tcu::UVec4(2, 2, 2, 2),
743 tcu::IVec3(1, 1, 0),
744 true,
745 tcu::COMPARE_LOG_RESULT);
746 }
747
748 if (compareOk)
749 return tcu::TestStatus::pass("Result image matches reference");
750 else
751 return tcu::TestStatus::fail("Image mismatch");
752 }
753 #ifndef CTS_USES_VULKANSC
754 class DynamicOffsetGraphicsTest : public vkt::TestCase
755 {
756 public:
757 DynamicOffsetGraphicsTest (tcu::TestContext& testContext,
758 const string& name,
759 const TestParams& params);
760 ~DynamicOffsetGraphicsTest (void);
761 void initPrograms (SourceCollections& sourceCollections) const;
762 TestInstance* createInstance (Context& context) const;
763 void checkSupport (Context& context) const;
764
765 protected:
766 const TestParams m_params;
767 };
768
DynamicOffsetGraphicsTest(tcu::TestContext & testContext,const string & name,const TestParams & params)769 DynamicOffsetGraphicsTest::DynamicOffsetGraphicsTest (tcu::TestContext& testContext,
770 const string& name,
771 const TestParams& params)
772 : vkt::TestCase (testContext, name)
773 , m_params (params)
774 {
775 }
776
~DynamicOffsetGraphicsTest(void)777 DynamicOffsetGraphicsTest::~DynamicOffsetGraphicsTest (void)
778 {
779 }
780
createInstance(Context & context) const781 TestInstance* DynamicOffsetGraphicsTest::createInstance (Context& context) const
782 {
783 return new DynamicOffsetGraphicsTestInstance(context, m_params);
784 }
785
checkSupport(Context & context) const786 void DynamicOffsetGraphicsTest::checkSupport(Context& context) const
787 {
788 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(), m_params.pipelineConstructionType);
789 }
790
initPrograms(SourceCollections & sourceCollections) const791 void DynamicOffsetGraphicsTest::initPrograms (SourceCollections& sourceCollections) const
792 {
793 const deUint32 numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
794 const string bufferType = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? "uniform" : "readonly buffer";
795 ostringstream inputBlocks;
796 ostringstream inputSum;
797 string setAndBinding;
798 string blockSuffix;
799 string accessSuffix;
800 bool dynArrayDecl = false; // Dynamic descriptor block array declared?
801 bool nonDynArrayDecl = false; // Nondynamic descriptor block array declared?
802
803 for (deUint32 b = 0; b < numBindings; b++)
804 {
805 const bool dynBind = (b < m_params.numDynamicBindings);
806 const string bStr = de::toString(b);
807
808 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
809 {
810 setAndBinding = "set = 0, binding = " + bStr;
811 blockSuffix = bStr;
812 accessSuffix = bStr;
813 }
814 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
815 {
816 setAndBinding = "set = " + bStr + ", binding = 0";
817 blockSuffix = bStr;
818 accessSuffix = bStr;
819 }
820 else // GroupingStrategy::ARRAYS
821 {
822 // In array mode, only two sets are declared, one with an array of dynamic descriptors and another one with an array of
823 // nondynamic descriptors.
824 setAndBinding = "set = " + string(dynBind ? "0" : "1") + ", binding = 0";
825 blockSuffix = string(dynBind ? "Dyn" : "NonDyn") + "[" + (dynBind ? de::toString(m_params.numDynamicBindings) : de::toString(m_params.numNonDynamicBindings)) + "]";
826 accessSuffix = string(dynBind ? "Dyn" : "NonDyn") + "[" + (dynBind ? de::toString(b) : de::toString(b - m_params.numDynamicBindings)) + "]";
827 }
828
829 // In array mode, declare the input block only once per descriptor type.
830 bool& arrayDeclFlag = (dynBind ? dynArrayDecl : nonDynArrayDecl);
831 if (m_params.groupingStrategy != GroupingStrategy::ARRAYS || !arrayDeclFlag)
832 {
833 inputBlocks
834 << "layout(" << setAndBinding << ") " << bufferType << " Block" << bStr << "\n"
835 << "{\n"
836 << " vec4 color;\n"
837 << "} inputData" << blockSuffix << ";\n"
838 ;
839 arrayDeclFlag = true;
840 }
841
842 // But the sum always needs to be added once per descriptor.
843 inputSum << " vtxColor.rgb += inputData" << accessSuffix << ".color.rgb;\n";
844 }
845
846 const string vertexSrc =
847 "#version 450\n"
848 "layout(location = 0) in highp vec4 position;\n"
849 "layout(location = 1) in highp vec4 color;\n"
850 "layout(location = 0) out highp vec4 vtxColor;\n"
851 + inputBlocks.str() +
852 "\n"
853 "out gl_PerVertex { vec4 gl_Position; };\n"
854 "\n"
855 "void main()\n"
856 "{\n"
857 " gl_Position = position;\n"
858 " vtxColor = vec4(0, 0, 0, 1);\n"
859 + inputSum.str() +
860 "}\n";
861
862 const string fragmentSrc =
863 "#version 450\n"
864 "layout(location = 0) in highp vec4 vtxColor;\n"
865 "layout(location = 0) out highp vec4 fragColor;\n"
866 "\n"
867 "void main (void)\n"
868 "{\n"
869 " fragColor = vtxColor;\n"
870 "}\n";
871
872 sourceCollections.glslSources.add("vert") << glu::VertexSource(vertexSrc);
873 sourceCollections.glslSources.add("frag") << glu::FragmentSource(fragmentSrc);
874 }
875 #endif // CTS_USES_VULKANSC
876 class DynamicOffsetComputeTestInstance : public DynamicOffsetTestInstance
877 {
878 public:
879 DynamicOffsetComputeTestInstance (Context& context, const TestParams& params);
880 virtual ~DynamicOffsetComputeTestInstance (void);
881 void init (void);
882 virtual tcu::TestStatus iterate (void);
883 tcu::TestStatus verifyOutput (void);
884
885 private:
886 const deUint32 m_numBindings;
887 const deUint32 m_numOutputColors;
888 const VkPhysicalDeviceLimits m_deviceLimits;
889 Move<VkBuffer> m_buffer;
890 de::MovePtr<Allocation> m_bufferAlloc;
891 vector<Move<VkDescriptorSetLayout>> m_descriptorSetLayouts;
892 Move<VkDescriptorPool> m_descriptorPool;
893 vector<Move<VkDescriptorSet>> m_descriptorSets;
894 PipelineLayoutWrapper m_pipelineLayout;
895 ComputePipelineWrapper m_computePipeline;
896 Move<VkBuffer> m_outputBuffer;
897 de::MovePtr<Allocation> m_outputBufferAlloc;
898 Move<VkCommandPool> m_cmdPool;
899 vector<VkCommandBufferSp> m_cmdBuffers;
900 };
901
DynamicOffsetComputeTestInstance(Context & context,const TestParams & params)902 DynamicOffsetComputeTestInstance::DynamicOffsetComputeTestInstance (Context& context, const TestParams& params)
903 : DynamicOffsetTestInstance (context, params)
904 , m_numBindings (params.numDynamicBindings + params.numNonDynamicBindings)
905 , m_numOutputColors (params.numCmdBuffers * params.numDescriptorSetBindings)
906 , m_deviceLimits (getPhysicalDeviceProperties(context.getInstanceInterface(), context.getPhysicalDevice()).limits)
907 {
908 }
909
init(void)910 void DynamicOffsetComputeTestInstance::init (void)
911 {
912 const DeviceInterface& vk = m_context.getDeviceInterface();
913 const VkDevice vkDevice = m_context.getDevice();
914 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
915 const VkDeviceSize inputAlignment = ((m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ? m_deviceLimits.minUniformBufferOffsetAlignment : m_deviceLimits.minStorageBufferOffsetAlignment);
916 const VkDeviceSize inputExtraBytes = kColorSize % inputAlignment;
917 const VkDeviceSize colorBlockInputSize = ((inputExtraBytes == 0ull) ? kColorSize : (kColorSize + inputAlignment - inputExtraBytes));
918 const deUint32 colorBlockInputSizeU32 = static_cast<deUint32>(colorBlockInputSize);
919 const VkDeviceSize outputExtraBytes = kColorSize % m_deviceLimits.minStorageBufferOffsetAlignment;
920 const VkDeviceSize colorBlockOutputSize = ((outputExtraBytes == 0ull) ? kColorSize : (kColorSize + m_deviceLimits.minStorageBufferOffsetAlignment - outputExtraBytes));
921 const deUint32 colorBlockOutputSizeU32 = static_cast<deUint32>(colorBlockOutputSize);
922 const VkDeviceSize bufferSize = colorBlockInputSize * kNumTestColors;
923 const VkDeviceSize bindingOffset = bufferSize / m_numBindings;
924 const VkDescriptorType nonDynamicDescriptorType = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
925 const VkDeviceSize outputBufferSize = colorBlockOutputSize * m_numOutputColors;
926
927 vector<VkDescriptorSetLayout> descriptorSetLayoutsPlain;
928 vector<VkDescriptorSet> descriptorSetsPlain;
929
930 // Create pipeline layout
931 {
932 // Create descriptor set layouts
933 vector<VkDescriptorSetLayoutBinding> descriptorSetLayoutBindings;
934
935 for (deUint32 binding = 0; binding < m_numBindings; binding++)
936 {
937 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
938 const VkDescriptorType descriptorType = (dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType);
939 const deUint32 bindingNumber = (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET ? binding : 0u);
940 const deUint32 descriptorCount = ((m_params.groupingStrategy == GroupingStrategy::ARRAYS) ? (dynamicDesc ? m_params.numDynamicBindings : m_params.numNonDynamicBindings) : 1u);
941 const VkDescriptorSetLayoutBinding descriptorSetLayoutBinding =
942 {
943 bindingNumber, // uint32_t binding;
944 descriptorType, // VkDescriptorType descriptorType;
945 descriptorCount, // uint32_t descriptorCount;
946 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags stageFlags;
947 DE_NULL // const VkSampler* pImmutableSamplers;
948 };
949
950 // Skip used descriptors in array mode.
951 if (m_params.groupingStrategy == GroupingStrategy::ARRAYS)
952 binding = (dynamicDesc ? m_params.numDynamicBindings - 1 : m_numBindings);
953
954 descriptorSetLayoutBindings.push_back(descriptorSetLayoutBinding);
955 }
956
957 const deUint32 bindingNumberOutput = (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET ? m_numBindings : 0u);
958 const VkDescriptorSetLayoutBinding descriptorSetLayoutBindingOutput =
959 {
960 bindingNumberOutput, // uint32_t binding;
961 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, // VkDescriptorType descriptorType;
962 1u, // uint32_t descriptorCount;
963 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags stageFlags;
964 DE_NULL // const VkSampler* pImmutableSamplers;
965 };
966
967 descriptorSetLayoutBindings.push_back(descriptorSetLayoutBindingOutput);
968
969 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
970 {
971 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo =
972 {
973 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
974 DE_NULL, // const void* pNext;
975 0u, // VkDescriptorSetLayoutCreateFlags flags;
976 m_numBindings + 1, // uint32_t bindingCount;
977 descriptorSetLayoutBindings.data() // const VkDescriptorSetLayoutBinding* pBindings;
978 };
979
980 m_descriptorSetLayouts.push_back(createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo, DE_NULL));
981 }
982 else
983 {
984 for (size_t i = 0; i < descriptorSetLayoutBindings.size(); ++i)
985 {
986 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo =
987 {
988 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
989 DE_NULL, // const void* pNext;
990 0u, // VkDescriptorSetLayoutCreateFlags flags;
991 1u, // uint32_t bindingCount;
992 &descriptorSetLayoutBindings[i] // const VkDescriptorSetLayoutBinding* pBindings;
993 };
994
995 m_descriptorSetLayouts.push_back(createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo, DE_NULL));
996 }
997 }
998
999 // Create pipeline layout
1000 descriptorSetLayoutsPlain.resize(m_descriptorSetLayouts.size());
1001 for (size_t i = 0; i < descriptorSetLayoutsPlain.size(); ++i)
1002 descriptorSetLayoutsPlain[i] = m_descriptorSetLayouts[i].get();
1003
1004 const VkPipelineLayoutCreateInfo pipelineLayoutParams =
1005 {
1006 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
1007 DE_NULL, // const void* pNext;
1008 0u, // VkPipelineLayoutCreateFlags flags;
1009 static_cast<deUint32>(descriptorSetLayoutsPlain.size()), // deUint32 descriptorSetCount;
1010 descriptorSetLayoutsPlain.data(), // const VkDescriptorSetLayout* pSetLayouts;
1011 0u, // deUint32 pushConstantRangeCount;
1012 DE_NULL // const VkPushDescriptorRange* pPushDescriptorRanges;
1013 };
1014
1015 m_pipelineLayout = PipelineLayoutWrapper(m_params.pipelineConstructionType, vk, vkDevice, &pipelineLayoutParams);
1016 }
1017
1018 // Create buffer
1019 {
1020 vector<deUint8> hostBuffer((deUint32)bufferSize, 0);
1021 for (deUint32 colorIdx = 0; colorIdx < kNumTestColors; colorIdx++)
1022 deMemcpy(&hostBuffer[colorBlockInputSizeU32 * colorIdx], &testColors[colorIdx], kColorSize);
1023
1024 const VkBufferUsageFlags usageFlags = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1025
1026 const VkBufferCreateInfo bufferCreateInfo =
1027 {
1028 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1029 DE_NULL, // const void* pNext;
1030 0u, // VkBufferCreateFlags flags
1031 bufferSize, // VkDeviceSize size;
1032 usageFlags, // VkBufferUsageFlags usage;
1033 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1034 1u, // deUint32 queueFamilyCount;
1035 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
1036 };
1037
1038 m_buffer = createBuffer(vk, vkDevice, &bufferCreateInfo);
1039 m_bufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_buffer), MemoryRequirement::HostVisible);
1040 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_buffer, m_bufferAlloc->getMemory(), m_bufferAlloc->getOffset()));
1041
1042 deMemcpy(m_bufferAlloc->getHostPtr(), hostBuffer.data(), (size_t)bufferSize);
1043 flushAlloc(vk, vkDevice, *m_bufferAlloc);
1044 }
1045
1046 // Create output buffer
1047 {
1048 const VkBufferCreateInfo bufferCreateInfo =
1049 {
1050 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1051 DE_NULL, // const void* pNext;
1052 0u, // VkBufferCreateFlags flags
1053 outputBufferSize, // VkDeviceSize size;
1054 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, // VkBufferUsageFlags usage;
1055 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1056 1u, // deUint32 queueFamilyCount;
1057 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
1058 };
1059
1060 m_outputBuffer = createBuffer(vk, vkDevice, &bufferCreateInfo);
1061 m_outputBufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_outputBuffer), MemoryRequirement::HostVisible);
1062 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_outputBuffer, m_outputBufferAlloc->getMemory(), m_outputBufferAlloc->getOffset()));
1063 }
1064
1065 // Create descriptor pool
1066 {
1067 DescriptorPoolBuilder poolBuilder;
1068 poolBuilder.addType(m_params.descriptorType, m_params.numDynamicBindings);
1069 poolBuilder.addType(nonDynamicDescriptorType, m_params.numNonDynamicBindings);
1070 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1u);
1071 m_descriptorPool = poolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, static_cast<deUint32>(m_descriptorSetLayouts.size()));
1072 }
1073
1074 // Create descriptor sets
1075 {
1076 for (size_t i = 0; i < m_descriptorSetLayouts.size(); ++i)
1077 {
1078 const VkDescriptorSetAllocateInfo allocInfo =
1079 {
1080 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
1081 DE_NULL, // const void* pNext;
1082 *m_descriptorPool, // VkDescriptorPool descriptorPool;
1083 1u, // deUint32 setLayoutCount;
1084 &(m_descriptorSetLayouts[i].get()), // const VkDescriptorSetLayout* pSetLayouts;
1085 };
1086 m_descriptorSets.push_back(allocateDescriptorSet(vk, vkDevice, &allocInfo));
1087 }
1088 }
1089
1090 descriptorSetsPlain.resize(m_descriptorSets.size());
1091 for (size_t i = 0; i < descriptorSetsPlain.size(); ++i)
1092 descriptorSetsPlain[i] = m_descriptorSets[i].get();
1093
1094 // Update input buffer descriptors
1095 for (deUint32 binding = 0; binding < m_numBindings; ++binding)
1096 {
1097 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
1098 const VkDescriptorType descriptorType = dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType;
1099 const VkDescriptorBufferInfo descriptorBufferInfo =
1100 {
1101 *m_buffer, // VkBuffer buffer;
1102 bindingOffset * binding, // VkDeviceSize offset;
1103 kColorSize // VkDeviceSize range;
1104 };
1105
1106 VkDescriptorSet bindingSet;
1107 deUint32 bindingNumber;
1108 deUint32 dstArrayElement;
1109
1110 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1111 {
1112 bindingSet = m_descriptorSets[0].get();
1113 bindingNumber = binding;
1114 dstArrayElement = 0u;
1115 }
1116 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1117 {
1118 bindingSet = m_descriptorSets[binding].get();
1119 bindingNumber = 0u;
1120 dstArrayElement = 0u;
1121 }
1122 else // GroupingStrategy::ARRAYS
1123 {
1124 bindingSet = (dynamicDesc ? m_descriptorSets[0].get() : m_descriptorSets[1].get());
1125 bindingNumber = 0u;
1126 dstArrayElement = (dynamicDesc ? binding : (binding - m_params.numDynamicBindings));
1127 }
1128
1129 const VkWriteDescriptorSet writeDescriptorSet =
1130 {
1131 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
1132 DE_NULL, // const void* pNext;
1133 bindingSet, // VkDescriptorSet dstSet;
1134 bindingNumber, // uint32_t dstBinding;
1135 dstArrayElement, // uint32_t dstArrayElement;
1136 1u, // uint32_t descriptorCount;
1137 descriptorType, // VkDescriptorType descriptorType;
1138 DE_NULL, // const VkDescriptorImageInfo* pImageInfo;
1139 &descriptorBufferInfo, // const VkDescriptorBufferInfo* pBufferInfo;
1140 DE_NULL // const VkBufferView* pTexelBufferView;
1141 };
1142
1143 vk.updateDescriptorSets(vkDevice, 1u, &writeDescriptorSet, 0u, DE_NULL);
1144 }
1145
1146 // Update output buffer descriptor
1147 {
1148 const VkDescriptorBufferInfo descriptorBufferInfo =
1149 {
1150 *m_outputBuffer, // VkBuffer buffer;
1151 0u, // VkDeviceSize offset;
1152 kColorSize // VkDeviceSize range;
1153 };
1154
1155 VkDescriptorSet bindingSet;
1156 deUint32 bindingNumber;
1157
1158 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1159 {
1160 bindingSet = m_descriptorSets[0].get();
1161 bindingNumber = m_numBindings;
1162 }
1163 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1164 {
1165 bindingSet = m_descriptorSets.back().get();
1166 bindingNumber = 0u;
1167 }
1168 else // GroupingStrategy::ARRAYS
1169 {
1170 bindingSet = m_descriptorSets.back().get();
1171 bindingNumber = 0u;
1172 }
1173
1174 const VkWriteDescriptorSet writeDescriptorSet =
1175 {
1176 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
1177 DE_NULL, // const void* pNext;
1178 bindingSet, // VkDescriptorSet dstSet;
1179 bindingNumber, // uint32_t dstBinding;
1180 0u, // uint32_t dstArrayElement;
1181 1u, // uint32_t descriptorCount;
1182 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, // VkDescriptorType descriptorType;
1183 DE_NULL, // const VkDescriptorImageInfo* pImageInfo;
1184 &descriptorBufferInfo, // const VkDescriptorBufferInfo* pBufferInfo;
1185 DE_NULL // const VkBufferView* pTexelBufferView;
1186 };
1187
1188 vk.updateDescriptorSets(vkDevice, 1u, &writeDescriptorSet, 0u, DE_NULL);
1189 }
1190
1191 // Create pipeline
1192 {
1193
1194 m_computePipeline = ComputePipelineWrapper(vk, vkDevice, graphicsToComputeConstructionType(m_params.pipelineConstructionType), m_context.getBinaryCollection().get("compute"));
1195 m_computePipeline.setDescriptorSetLayouts(m_pipelineLayout.getSetLayoutCount(), m_pipelineLayout.getSetLayouts());
1196 m_computePipeline.buildPipeline();
1197 }
1198
1199 // Create command pool
1200 m_cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
1201
1202 // Create command buffers
1203 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
1204 m_cmdBuffers.push_back(VkCommandBufferSp(new Unique<VkCommandBuffer>(allocateCommandBuffer(vk, vkDevice, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY))));
1205
1206 deUint32 inputOffset = 0u;
1207 deUint32 outputOffset = 0u;
1208
1209 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
1210 {
1211 const deUint32 idx = m_params.reverseOrder ? m_params.numCmdBuffers - cmdBufferIdx - 1 : cmdBufferIdx;
1212
1213 beginCommandBuffer(vk, **m_cmdBuffers[idx], 0u);
1214 m_computePipeline.bind(**m_cmdBuffers[idx]);
1215
1216 for (deUint32 i = 0; i < m_params.numDescriptorSetBindings; i++)
1217 {
1218 // Create pipeline barrier
1219 const vk::VkBufferMemoryBarrier bufferBarrier =
1220 {
1221 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1222 DE_NULL, // const void* pNext;
1223 vk::VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1224 vk::VK_ACCESS_SHADER_WRITE_BIT | vk::VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1225 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1226 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1227 *m_outputBuffer, // VkBuffer buffer;
1228 outputOffset, // VkDeviceSize offset;
1229 VK_WHOLE_SIZE // VkDeviceSize size;
1230 };
1231
1232 vector<deUint32> offsets;
1233
1234 // Offsets for input buffers
1235 for (deUint32 dynamicBindingIdx = 0; dynamicBindingIdx < m_params.numDynamicBindings; dynamicBindingIdx++)
1236 offsets.push_back(inputOffset + colorBlockInputSizeU32 * dynamicBindingIdx);
1237 inputOffset += colorBlockInputSizeU32;
1238
1239 // Offset for output buffer
1240 offsets.push_back(outputOffset);
1241 outputOffset += colorBlockOutputSizeU32;
1242
1243 vk.cmdBindDescriptorSets(**m_cmdBuffers[idx], VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout, 0u, static_cast<deUint32>(descriptorSetsPlain.size()), descriptorSetsPlain.data(), (deUint32)offsets.size(), offsets.data());
1244
1245 // Dispatch
1246 vk.cmdDispatch(**m_cmdBuffers[idx], 1, 1, 1);
1247
1248 vk.cmdPipelineBarrier(**m_cmdBuffers[idx], vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | vk::VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &bufferBarrier, 0u, DE_NULL);
1249 }
1250
1251 endCommandBuffer(vk, **m_cmdBuffers[idx]);
1252 }
1253 }
1254
~DynamicOffsetComputeTestInstance(void)1255 DynamicOffsetComputeTestInstance::~DynamicOffsetComputeTestInstance (void)
1256 {
1257 }
1258
iterate(void)1259 tcu::TestStatus DynamicOffsetComputeTestInstance::iterate (void)
1260 {
1261 init();
1262
1263 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
1264 submitCommandsAndWait(m_context.getDeviceInterface(), m_context.getDevice(), m_context.getUniversalQueue(), **m_cmdBuffers[cmdBufferIdx]);
1265
1266 return verifyOutput();
1267 }
1268
verifyOutput(void)1269 tcu::TestStatus DynamicOffsetComputeTestInstance::verifyOutput (void)
1270 {
1271 const deUint32 bindingOffset = kNumTestColors / m_numBindings;
1272 const deUint32 colorBlockOutputSize = static_cast<deUint32>(de::max(kColorSize, m_deviceLimits.minStorageBufferOffsetAlignment));
1273 vector<tcu::Vec4> refColors (m_numOutputColors);
1274 vector<tcu::Vec4> outColors (m_numOutputColors);
1275
1276 for (deUint32 i = 0; i < m_numOutputColors; i++)
1277 {
1278 tcu::Vec4 refColor(0.0f);
1279
1280 for (deUint32 binding = 0; binding < m_params.numDynamicBindings; binding++)
1281 refColor += testColors[i + binding * bindingOffset + binding];
1282 for (deUint32 binding = 0; binding < m_params.numNonDynamicBindings; binding++)
1283 refColor += testColors[(m_params.numDynamicBindings + binding) * bindingOffset];
1284 refColor.w() = 1.0f;
1285
1286 refColors[i] = refColor;
1287 }
1288
1289 invalidateAlloc(m_context.getDeviceInterface(), m_context.getDevice(), *m_outputBufferAlloc);
1290
1291 // Grab the output results using offset alignment
1292 for (deUint32 i = 0; i < m_numOutputColors; i++)
1293 outColors[i] = *(tcu::Vec4*)((deUint8*)m_outputBufferAlloc->getHostPtr() + colorBlockOutputSize * i);
1294
1295 // Verify results
1296 for (deUint32 i = 0; i < m_numOutputColors; i++)
1297 if (outColors[i] != refColors[i])
1298 return tcu::TestStatus::fail("Output mismatch");
1299
1300 return tcu::TestStatus::pass("Output matches expected values");
1301 }
1302
1303 class DynamicOffsetComputeTest : public vkt::TestCase
1304 {
1305 public:
1306 DynamicOffsetComputeTest (tcu::TestContext& testContext,
1307 const string& name,
1308 const TestParams& params);
1309 ~DynamicOffsetComputeTest (void);
1310 void initPrograms (SourceCollections& sourceCollections) const;
1311 TestInstance* createInstance (Context& context) const;
1312 void checkSupport (Context& context) const;
1313
1314 protected:
1315 const TestParams m_params;
1316 };
1317
DynamicOffsetComputeTest(tcu::TestContext & testContext,const string & name,const TestParams & params)1318 DynamicOffsetComputeTest::DynamicOffsetComputeTest (tcu::TestContext& testContext,
1319 const string& name,
1320 const TestParams& params)
1321 : vkt::TestCase (testContext, name)
1322 , m_params (params)
1323 {
1324 }
1325
~DynamicOffsetComputeTest(void)1326 DynamicOffsetComputeTest::~DynamicOffsetComputeTest (void)
1327 {
1328 }
1329
createInstance(Context & context) const1330 TestInstance* DynamicOffsetComputeTest::createInstance (Context& context) const
1331 {
1332 return new DynamicOffsetComputeTestInstance(context, m_params);
1333 }
1334
checkSupport(Context & context) const1335 void DynamicOffsetComputeTest::checkSupport(Context& context) const
1336 {
1337 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(), m_params.pipelineConstructionType);
1338 }
1339
initPrograms(SourceCollections & sourceCollections) const1340 void DynamicOffsetComputeTest::initPrograms (SourceCollections& sourceCollections) const
1341 {
1342 const deUint32 numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
1343 const string bufferType = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? "uniform" : "buffer";
1344 ostringstream inputBlocks;
1345 ostringstream inputSum;
1346 string setAndBinding;
1347 string blockSuffix;
1348 string accessSuffix;
1349 bool dynArrayDecl = false; // Dynamic descriptor block array declared?
1350 bool nonDynArrayDecl = false; // Nondynamic descriptor block array declared?
1351 string bStr;
1352
1353 for (deUint32 b = 0; b < numBindings; b++)
1354 {
1355 const bool dynBind = (b < m_params.numDynamicBindings);
1356 bStr = de::toString(b);
1357
1358 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1359 {
1360 setAndBinding = "set = 0, binding = " + bStr;
1361 blockSuffix = bStr;
1362 accessSuffix = bStr;
1363 }
1364 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1365 {
1366 setAndBinding = "set = " + bStr + ", binding = 0";
1367 blockSuffix = bStr;
1368 accessSuffix = bStr;
1369 }
1370 else // GroupingStrategy::ARRAYS
1371 {
1372 // In array mode, only two sets are declared, one with an array of dynamic descriptors and another one with an array of
1373 // nondynamic descriptors.
1374 setAndBinding = "set = " + string(dynBind ? "0" : "1") + ", binding = 0";
1375 blockSuffix = string(dynBind ? "Dyn" : "NonDyn") + "[" + (dynBind ? de::toString(m_params.numDynamicBindings) : de::toString(m_params.numNonDynamicBindings)) + "]";
1376 accessSuffix = string(dynBind ? "Dyn" : "NonDyn") + "[" + (dynBind ? de::toString(b) : de::toString(b - m_params.numDynamicBindings)) + "]";
1377 }
1378
1379 // In array mode, declare the input block only once per descriptor type.
1380 bool& arrayDeclFlag = (dynBind ? dynArrayDecl : nonDynArrayDecl);
1381 if (m_params.groupingStrategy != GroupingStrategy::ARRAYS || !arrayDeclFlag)
1382 {
1383 inputBlocks
1384 << "layout(" << setAndBinding << ") " << bufferType << " Block" << bStr << "\n"
1385 << "{\n"
1386 << " vec4 color;\n"
1387 << "} inputData" << blockSuffix << ";\n"
1388 ;
1389 arrayDeclFlag = true;
1390 }
1391
1392 // But the sum always needs to be added once per descriptor.
1393 inputSum << " outData.color.rgb += inputData" << accessSuffix << ".color.rgb;\n";
1394 }
1395
1396 bStr = de::toString(numBindings);
1397 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1398 {
1399 setAndBinding = "set = 0, binding = " + bStr;
1400 }
1401 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1402 {
1403 setAndBinding = "set = " + bStr + ", binding = 0";
1404 }
1405 else // GroupingStrategy::ARRAYS
1406 {
1407 // The output buffer goes to a separate set.
1408 deUint32 usedSets = 0u;
1409 if (dynArrayDecl) ++usedSets;
1410 if (nonDynArrayDecl) ++usedSets;
1411
1412 setAndBinding = "set = " + de::toString(usedSets) + ", binding = 0";
1413 }
1414
1415 const string computeSrc =
1416 "#version 450\n"
1417 + inputBlocks.str() +
1418 "layout(" + setAndBinding + ") writeonly buffer Output\n"
1419 "{\n"
1420 " vec4 color;\n"
1421 "} outData;\n"
1422 "\n"
1423 "void main()\n"
1424 "{\n"
1425 " outData.color = vec4(0, 0, 0, 1);\n"
1426 + inputSum.str() +
1427 "}\n";
1428
1429 sourceCollections.glslSources.add("compute") << glu::ComputeSource(computeSrc);
1430 }
1431
1432 class DynamicOffsetMixedTestInstance : public vkt::TestInstance
1433 {
1434 public:
DynamicOffsetMixedTestInstance(Context & context,const PipelineConstructionType pipelineConstructionType,const tcu::IVec2 renderSize,const deUint32 numInstances,const bool testAllOffsets,const bool reverseOrder,const bool runComputeFirst,const deUint32 vertexOffset,const deUint32 sharedUboOffset,const deUint32 fragUboOffset,const deUint32 ssboReadOffset,const deUint32 ssboWriteOffset)1435 DynamicOffsetMixedTestInstance (Context& context,
1436 const PipelineConstructionType pipelineConstructionType,
1437 const tcu::IVec2 renderSize,
1438 const deUint32 numInstances,
1439 const bool testAllOffsets,
1440 const bool reverseOrder,
1441 const bool runComputeFirst,
1442 const deUint32 vertexOffset,
1443 const deUint32 sharedUboOffset,
1444 const deUint32 fragUboOffset,
1445 const deUint32 ssboReadOffset,
1446 const deUint32 ssboWriteOffset)
1447 : vkt::TestInstance (context)
1448 , m_pipelineConstructionType (pipelineConstructionType)
1449 , m_renderSize (renderSize)
1450 , m_numInstances (numInstances)
1451 , m_testAllOffsets (testAllOffsets)
1452 , m_reverseOrder (reverseOrder)
1453 , m_runComputeFirst (runComputeFirst)
1454 , m_vertexOffset (vertexOffset)
1455 , m_sharedUboOffset (sharedUboOffset)
1456 , m_fragUboOffset (fragUboOffset)
1457 , m_ssboReadOffset (ssboReadOffset)
1458 , m_ssboWriteOffset (ssboWriteOffset)
1459 {}
1460
1461 ~DynamicOffsetMixedTestInstance ();
1462
1463 virtual tcu::TestStatus iterate (void);
1464
1465 private:
1466 struct VertexInfo
1467 {
1468 tcu::Vec4 position;
1469 tcu::Vec4 color;
1470 };
1471
1472 const VkFormat OUTPUT_COLOR_FORMAT = VK_FORMAT_R8G8B8A8_UNORM;
1473
1474 const PipelineConstructionType m_pipelineConstructionType;
1475 const tcu::IVec2 m_renderSize;
1476 const deUint32 m_numInstances;
1477 const bool m_testAllOffsets;
1478 const bool m_reverseOrder;
1479 const bool m_runComputeFirst;
1480 const deUint32 m_vertexOffset;
1481 const deUint32 m_sharedUboOffset;
1482 const deUint32 m_fragUboOffset;
1483 const deUint32 m_ssboReadOffset;
1484 const deUint32 m_ssboWriteOffset;
1485 };
1486
~DynamicOffsetMixedTestInstance()1487 DynamicOffsetMixedTestInstance::~DynamicOffsetMixedTestInstance ()
1488 {
1489 }
1490
iterate(void)1491 tcu::TestStatus DynamicOffsetMixedTestInstance::iterate (void)
1492 {
1493 tcu::TestLog& log = m_context.getTestContext().getLog();
1494 const InstanceInterface& vki = m_context.getInstanceInterface();
1495 const DeviceInterface& vk = m_context.getDeviceInterface();
1496 const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
1497 const VkDevice device = m_context.getDevice();
1498 Allocator& allocator = m_context.getDefaultAllocator();
1499 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1500
1501 // Create shaders
1502 const ShaderWrapper vertexShaderModule = ShaderWrapper(vk, device, m_context.getBinaryCollection().get("vert"), 0u);
1503 const ShaderWrapper fragmentShaderModule = ShaderWrapper(vk, device, m_context.getBinaryCollection().get("frag"), 0u);
1504 const ShaderWrapper computeShaderModule = ShaderWrapper(vk, device, m_context.getBinaryCollection().get("comp"), 0u);
1505
1506 const deUint32 vertexBufferBindId = 0u;
1507
1508 // Vertex input state and binding
1509 VkVertexInputBindingDescription bindingDescription
1510 {
1511 vertexBufferBindId, // uint32_t binding;
1512 sizeof(VertexInfo), // uint32_t stride;
1513 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputRate inputRate;
1514 };
1515
1516 const std::array<VkVertexInputAttributeDescription, 2> vertexAttributeDescs
1517 { {
1518 VkVertexInputAttributeDescription
1519 {
1520 0u, // uint32_t location;
1521 vertexBufferBindId, // uint32_t binding;
1522 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
1523 0u // uint32_t offset;
1524 },
1525
1526 VkVertexInputAttributeDescription
1527 {
1528 1u, // uint32_t location;
1529 vertexBufferBindId, // uint32_t binding;
1530 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
1531 deUint32(sizeof(float)) * 4u // uint32_t offset;
1532 }
1533 } };
1534
1535 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo
1536 {
1537 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
1538 DE_NULL, // const void* pNext;
1539 0u, // VkPipelineVertexInputStateCreateFlags flags;
1540 1u, // uint32_t vertexBindingDescriptionCount;
1541 &bindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
1542 static_cast<uint32_t>(vertexAttributeDescs.size()), // uint32_t vertexAttributeDescriptionCount;
1543 vertexAttributeDescs.data() // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
1544 };
1545
1546 // Descriptor pool and descriptor set
1547 DescriptorPoolBuilder poolBuilder;
1548
1549 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 3u);
1550 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 2u);
1551
1552 const Move<VkDescriptorPool> descriptorPool = poolBuilder.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1553
1554 DescriptorSetLayoutBuilder layoutBuilderAttachments;
1555 {
1556 if (!m_reverseOrder)
1557 {
1558 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, VK_SHADER_STAGE_VERTEX_BIT);
1559 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_COMPUTE_BIT);
1560 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, VK_SHADER_STAGE_COMPUTE_BIT);
1561 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, VK_SHADER_STAGE_FRAGMENT_BIT);
1562 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, VK_SHADER_STAGE_COMPUTE_BIT);
1563 }
1564 else
1565 {
1566 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, VK_SHADER_STAGE_COMPUTE_BIT);
1567 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, VK_SHADER_STAGE_FRAGMENT_BIT);
1568 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, VK_SHADER_STAGE_COMPUTE_BIT);
1569 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_COMPUTE_BIT);
1570 layoutBuilderAttachments.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, VK_SHADER_STAGE_VERTEX_BIT);
1571 }
1572 }
1573
1574 const Move<VkDescriptorSetLayout> descriptorSetLayout = layoutBuilderAttachments.build(vk, device);
1575
1576 const Move<VkDescriptorSet> descriptorSet = makeDescriptorSet(vk, device, descriptorPool.get(), descriptorSetLayout.get());
1577
1578 Move<VkImage> colorImage = (makeImage(vk, device, makeImageCreateInfo(m_renderSize, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT)));
1579
1580 // Allocate and bind color image memory
1581 const VkImageSubresourceRange colorSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
1582 const UniquePtr<Allocation> colorImageAlloc (bindImage(vk, device, allocator, *colorImage, MemoryRequirement::Any));
1583 Move<VkImageView> colorImageView = (makeImageView(vk, device, *colorImage, VK_IMAGE_VIEW_TYPE_2D, OUTPUT_COLOR_FORMAT, colorSubresourceRange));
1584
1585 // Create renderpass
1586 const VkAttachmentDescription attachmentDescription =
1587 {
1588 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags
1589 OUTPUT_COLOR_FORMAT, // VkFormat format
1590 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples
1591 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp
1592 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp
1593 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp
1594 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp
1595 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout
1596 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout
1597 };
1598
1599 const VkAttachmentReference attachmentReference =
1600 {
1601 0u, // deUint32 attachment
1602 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout
1603 };
1604
1605 const VkSubpassDescription subpassDescription =
1606 {
1607 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
1608 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
1609 0u, // deUint32 inputAttachmentCount
1610 DE_NULL, // const VkAttachmentReference* pInputAttachments
1611 1u, // deUint32 colorAttachmentCount
1612 &attachmentReference, // const VkAttachmentReference* pColorAttachments
1613 DE_NULL, // const VkAttachmentReference* pResolveAttachments
1614 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
1615 0u, // deUint32 preserveAttachmentCount
1616 DE_NULL // const deUint32* pPreserveAttachments
1617 };
1618
1619 const VkRenderPassCreateInfo renderPassInfo =
1620 {
1621 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureTypei sType
1622 DE_NULL, // const void* pNext
1623 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
1624 1u, // deUint32 attachmentCount
1625 &attachmentDescription, // const VkAttachmentDescription* pAttachments
1626 1u, // deUint32 subpassCount
1627 &subpassDescription, // const VkSubpassDescription* pSubpasses
1628 0u, // deUint32 dependencyCount
1629 DE_NULL // const VkSubpassDependency* pDependencies
1630 };
1631
1632 RenderPassWrapper renderPass (m_pipelineConstructionType, vk, device, &renderPassInfo);
1633
1634 // Create framebuffer
1635 const VkImageView attachmentBindInfos[] =
1636 {
1637 *colorImageView
1638 };
1639
1640 const VkFramebufferCreateInfo framebufferCreateInfo =
1641 {
1642 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
1643 DE_NULL, // const void* pNext;
1644 VkFramebufferCreateFlags(0), // VkFramebufferCreateFlags flags;
1645 *renderPass, // VkRenderPass renderPass;
1646 1u, // deUint32 attachmentCount;
1647 attachmentBindInfos, // const VkImageView* pAttachments;
1648 (deUint32)m_renderSize.x(), // deUint32 width;
1649 (deUint32)m_renderSize.y(), // deUint32 height;
1650 1u // deUint32 layers;
1651 };
1652
1653 renderPass.createFramebuffer(vk, device, &framebufferCreateInfo, *colorImage);
1654
1655 // Create pipeline layout
1656 const VkPipelineLayoutCreateInfo pipelineLayoutInfo =
1657 {
1658 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
1659 DE_NULL, // const void* pNext;
1660 0u, // VkPipelineLayoutCreateFlags flags;
1661 1u, // deUint32 descriptorSetCount;
1662 &descriptorSetLayout.get(), // const VkDescriptorSetLayout* pSetLayouts;
1663 0u, // deUint32 pushConstantRangeCount;
1664 DE_NULL // const VkPushDescriptorRange* pPushDescriptorRanges;
1665 };
1666
1667 PipelineLayoutWrapper pipelineLayout (m_pipelineConstructionType, vk, device, &pipelineLayoutInfo);
1668
1669 // Create graphics pipeline
1670 const std::vector<VkViewport> viewports(1, makeViewport(m_renderSize));
1671 const std::vector<VkRect2D> scissors(1, makeRect2D(m_renderSize));
1672
1673 const VkPipelineRasterizationStateCreateInfo rasterizationState =
1674 {
1675 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType
1676 DE_NULL, // const void* pNext
1677 0u, // VkPipelineRasterizationStateCreateFlags flags
1678 VK_FALSE, // VkBool32 depthClampEnable
1679 VK_FALSE, // VkBool32 rasterizerDiscardEnable
1680 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode
1681 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode
1682 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace
1683 VK_FALSE, // VkBool32 depthBiasEnable
1684 0.0f, // float depthBiasConstantFactor
1685 0.0f, // float depthBiasClamp
1686 0.0f, // float depthBiasSlopeFactor
1687 1.0f // float lineWidth
1688 };
1689
1690 GraphicsPipelineWrapper graphicsPipeline (vki, vk, physicalDevice, device, m_context.getDeviceExtensions(), m_pipelineConstructionType);
1691
1692 graphicsPipeline.setDefaultMultisampleState()
1693 .setDefaultColorBlendState()
1694 .setDefaultTopology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
1695 .setupVertexInputState(&vertexInputStateCreateInfo)
1696 .setupPreRasterizationShaderState((viewports),
1697 scissors,
1698 pipelineLayout,
1699 *renderPass,
1700 0u,
1701 vertexShaderModule,
1702 &rasterizationState)
1703 .setupFragmentShaderState(pipelineLayout,
1704 *renderPass,
1705 0u,
1706 fragmentShaderModule)
1707 .setupFragmentOutputState(*renderPass, 0u)
1708 .setMonolithicPipelineLayout(pipelineLayout)
1709 .buildPipeline();
1710
1711 ComputePipelineWrapper computePipeline (vk, device, graphicsToComputeConstructionType(m_pipelineConstructionType), m_context.getBinaryCollection().get("comp"));
1712 computePipeline.setDescriptorSetLayout(descriptorSetLayout.get());
1713 computePipeline.buildPipeline();
1714
1715 const VkQueue queue = m_context.getUniversalQueue();
1716 const VkPhysicalDeviceLimits deviceLimits = getPhysicalDeviceProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()).limits;
1717
1718 // Create vertex buffer
1719 const deUint32 numVertices = 6;
1720 const VkDeviceSize vertexBufferSizeBytes = 256;
1721 const Unique<VkBuffer> vertexBuffer (makeBuffer(vk, device, vertexBufferSizeBytes, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1722 const de::UniquePtr<Allocation> vertexBufferAlloc (bindBuffer(vk, device, allocator, *vertexBuffer, MemoryRequirement::HostVisible));
1723
1724 const deUint32 instanceSize = (deUint32)std::sqrt(m_numInstances);
1725 const float posIncrement = 1.0f / (float)m_numInstances * float(instanceSize);
1726
1727 // Result image has to be a square and multiple of 16.
1728 DE_ASSERT(instanceSize * instanceSize == m_numInstances && m_numInstances % 16u == 0);
1729
1730 {
1731 tcu::Vec4 vertexColor = tcu::Vec4(0.0f, 0.5f, 0.0f, 1.0f);
1732 VertexInfo* const pVertices = reinterpret_cast<VertexInfo*>(vertexBufferAlloc->getHostPtr());
1733
1734 pVertices[0] = { tcu::Vec4(posIncrement, -posIncrement, 0.0f, 1.0f), vertexColor };
1735 pVertices[1] = { tcu::Vec4(-posIncrement, -posIncrement, 0.0f, 1.0f), vertexColor };
1736 pVertices[2] = { tcu::Vec4(-posIncrement, posIncrement, 0.0f, 1.0f), vertexColor };
1737 pVertices[3] = { tcu::Vec4(-posIncrement, posIncrement, 1.0f, 1.0f), vertexColor };
1738 pVertices[4] = { tcu::Vec4(posIncrement, posIncrement, 1.0f, 1.0f), vertexColor };
1739 pVertices[5] = { tcu::Vec4(posIncrement, -posIncrement, 1.0f, 1.0f), vertexColor };
1740
1741 flushAlloc(vk, device, *vertexBufferAlloc);
1742 }
1743
1744 // Prepare buffers
1745 const vk::VkDeviceSize minUboAlignment = deviceLimits.minUniformBufferOffsetAlignment;
1746 const vk::VkDeviceSize minSsboAlignment = deviceLimits.minStorageBufferOffsetAlignment;
1747 const deUint32 bufferElementSizeVec4 = (deUint32)sizeof(tcu::Vec4);
1748 const deUint32 bufferElementSizeMat4 = (deUint32)sizeof(tcu::Mat4);
1749 deUint32 uboDynamicAlignmentVec4 = bufferElementSizeVec4;
1750 deUint32 uboDynamicAlignmentMat4 = bufferElementSizeMat4;
1751 deUint32 ssboDynamicAlignmentVec4 = bufferElementSizeVec4;
1752 deUint32 ssboDynamicAlignmentMat4 = bufferElementSizeMat4;
1753
1754 if (minUboAlignment > 0)
1755 {
1756 uboDynamicAlignmentVec4 = (uboDynamicAlignmentVec4 + (deUint32)minUboAlignment - 1) & ~((deUint32)minUboAlignment - 1);
1757 uboDynamicAlignmentMat4 = (uboDynamicAlignmentMat4 + (deUint32)minUboAlignment - 1) & ~((deUint32)minUboAlignment - 1);
1758 }
1759 if (minSsboAlignment > 0)
1760 {
1761 ssboDynamicAlignmentVec4 = (ssboDynamicAlignmentVec4 + (deUint32)minSsboAlignment - 1) & ~((deUint32)minSsboAlignment - 1);
1762 ssboDynamicAlignmentMat4 = (ssboDynamicAlignmentMat4 + (deUint32)minSsboAlignment - 1) & ~((deUint32)minSsboAlignment - 1);
1763 }
1764
1765 const deUint32 uboBufferSizeVec4 = m_numInstances * uboDynamicAlignmentVec4;
1766 const deUint32 uboBufferSizeMat4 = m_numInstances * uboDynamicAlignmentMat4;
1767 const deUint32 ssboBufferSizeVec4 = m_numInstances * ssboDynamicAlignmentVec4;
1768 const deUint32 ssboBufferSizeMat4 = m_numInstances * ssboDynamicAlignmentMat4;
1769
1770 const Unique<VkBuffer> uboBufferVertex (makeBuffer(vk, device, uboBufferSizeVec4, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT));
1771 const Unique<VkBuffer> uboBufferShared (makeBuffer(vk, device, uboBufferSizeVec4, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT));
1772 const Unique<VkBuffer> ssboBufferWrite (makeBuffer(vk, device, ssboBufferSizeVec4, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT));
1773 const Unique<VkBuffer> uboBufferFrag (makeBuffer(vk, device, uboBufferSizeMat4, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT));
1774 const Unique<VkBuffer> ssboBufferRead (makeBuffer(vk, device, ssboBufferSizeMat4, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT));
1775
1776 const UniquePtr<Allocation> uboBufferAllocVertex (bindBuffer(vk, device, allocator, *uboBufferVertex, MemoryRequirement::HostVisible));
1777 const UniquePtr<Allocation> uboBufferAllocShared (bindBuffer(vk, device, allocator, *uboBufferShared, MemoryRequirement::HostVisible));
1778 const UniquePtr<Allocation> ssboBufferAllocWrite (bindBuffer(vk, device, allocator, *ssboBufferWrite, MemoryRequirement::HostVisible));
1779 const UniquePtr<Allocation> uboBufferAllocFrag (bindBuffer(vk, device, allocator, *uboBufferFrag, MemoryRequirement::HostVisible));
1780 const UniquePtr<Allocation> ssboBufferAllocRead (bindBuffer(vk, device, allocator, *ssboBufferRead, MemoryRequirement::HostVisible));
1781
1782 const float colorIncrement = 1.0f / float(m_numInstances);
1783
1784 std::vector<tcu::Vec4> constVertexOffsets;
1785
1786 deUint32 columnCount = 0u;
1787 float columnOffset = posIncrement;
1788 float rowOffset = -1.0f + posIncrement;
1789
1790 for (deUint32 posId = 0; posId < m_numInstances; posId++)
1791 {
1792 constVertexOffsets.push_back(tcu::Vec4(-1.0f + columnOffset, rowOffset, 0.0f, 0.0f));
1793
1794 columnOffset += 2 * posIncrement;
1795 columnCount++;
1796
1797 if (columnCount >= instanceSize)
1798 {
1799 columnCount = 0;
1800 columnOffset = posIncrement;
1801 rowOffset += 2 * posIncrement;
1802 }
1803 }
1804
1805 // Fill buffers
1806 {
1807 char* pPosUboVertex = static_cast<char*>(uboBufferAllocVertex->getHostPtr());
1808 char* pPosUboShared = static_cast<char*>(uboBufferAllocShared->getHostPtr());
1809 char* pPosSsboWrite = static_cast<char*>(ssboBufferAllocWrite->getHostPtr());
1810 char* pPosUboFrag = static_cast<char*>(uboBufferAllocFrag->getHostPtr());
1811 char* pPosSsboRead = static_cast<char*>(ssboBufferAllocRead->getHostPtr());
1812
1813 if (m_testAllOffsets)
1814 {
1815 for (deUint32 posId = 0; posId < m_numInstances; posId++)
1816 {
1817 const float constFragMat[] =
1818 {
1819 colorIncrement, colorIncrement, colorIncrement, colorIncrement,
1820 colorIncrement, colorIncrement, colorIncrement, colorIncrement,
1821 colorIncrement, colorIncrement, colorIncrement, colorIncrement,
1822 colorIncrement, colorIncrement, colorIncrement, colorIncrement * float(posId + 1u)
1823 };
1824
1825 const float constReadMat[] =
1826 {
1827 1.0f, 0.0f, 1.0f, 0.0f,
1828 0.0f, 1.0f, 0.0f, 1.0f - colorIncrement * float(posId + 1u),
1829 1.0f, 0.0f, 1.0f, 0.17f,
1830 0.0f, 1.0f, 0.0f, 1.0f
1831 };
1832
1833 *((tcu::Vec4*)pPosUboVertex) = constVertexOffsets[posId];
1834 *((tcu::Vec4*)pPosUboShared) = tcu::Vec4(colorIncrement * float(posId + 1u), 0.0f, 0.0f, 1.0f);
1835 *((tcu::Vec4*)pPosSsboWrite) = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
1836 *((tcu::Mat4*)pPosUboFrag) = tcu::Mat4(constFragMat);
1837 *((tcu::Mat4*)pPosSsboRead) = tcu::Mat4(constReadMat);
1838 pPosUboVertex += uboDynamicAlignmentVec4;
1839 pPosUboShared += uboDynamicAlignmentVec4;
1840 pPosSsboWrite += ssboDynamicAlignmentVec4;
1841 pPosUboFrag += uboDynamicAlignmentMat4;
1842 pPosSsboRead += ssboDynamicAlignmentMat4;
1843 }
1844 }
1845 else
1846 {
1847 for (deUint32 posId = 0; posId < m_numInstances; posId++)
1848 {
1849 const float constFragMat[] =
1850 {
1851 0.0f, 0.0f, 0.0f, 0.0f,
1852 0.0f, 0.0f, 0.0f, 0.0f,
1853 0.0f, 0.0f, 0.0f, 0.0f,
1854 0.0f, 0.0f, 0.0f, m_fragUboOffset == posId ? 1.0f : 0.0f
1855 };
1856
1857 const float constReadMat[] =
1858 {
1859 0.0f, 0.0f, 0.0f, 0.0f,
1860 0.0f, 0.0f, 0.0f, m_ssboReadOffset == posId ? 0.25f : 0.0f,
1861 0.0f, 0.0f, 0.0f, m_ssboReadOffset == posId ? 0.17f : 0.0f,
1862 0.0f, 0.0f, 0.0f, 0.0f
1863 };
1864
1865 *((tcu::Vec4*)pPosUboVertex) = constVertexOffsets[posId];
1866 *((tcu::Vec4*)pPosUboShared) = m_sharedUboOffset == posId ? tcu::Vec4(1.0f, 0.0f, 0.0f, 1.0f) : tcu::Vec4(0);
1867 *((tcu::Vec4*)pPosSsboWrite) = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
1868 *((tcu::Mat4*)pPosUboFrag) = tcu::Mat4(constFragMat);
1869 *((tcu::Mat4*)pPosSsboRead) = tcu::Mat4(constReadMat);
1870 pPosUboVertex += uboDynamicAlignmentVec4;
1871 pPosUboShared += uboDynamicAlignmentVec4;
1872 pPosSsboWrite += ssboDynamicAlignmentVec4;
1873 pPosUboFrag += uboDynamicAlignmentMat4;
1874 pPosSsboRead += ssboDynamicAlignmentMat4;
1875 }
1876 }
1877
1878 flushAlloc(vk, device, *uboBufferAllocVertex);
1879 flushAlloc(vk, device, *uboBufferAllocShared);
1880 flushAlloc(vk, device, *ssboBufferAllocWrite);
1881 flushAlloc(vk, device, *uboBufferAllocFrag);
1882 flushAlloc(vk, device, *ssboBufferAllocRead);
1883 }
1884
1885 const vk::VkDescriptorBufferInfo uboInfoVertexVec = makeDescriptorBufferInfo(*uboBufferVertex, 0u, bufferElementSizeVec4);
1886 const vk::VkDescriptorBufferInfo uboInfoVec = makeDescriptorBufferInfo(*uboBufferShared, 0u, bufferElementSizeVec4);
1887 const vk::VkDescriptorBufferInfo ssboInfoVec = makeDescriptorBufferInfo(*ssboBufferWrite, 0u, bufferElementSizeVec4);
1888 const vk::VkDescriptorBufferInfo uboInfoMat = makeDescriptorBufferInfo(*uboBufferFrag, 0u, bufferElementSizeMat4);
1889 const vk::VkDescriptorBufferInfo ssboInfoMat = makeDescriptorBufferInfo(*ssboBufferRead, 0u, bufferElementSizeMat4);
1890
1891 // Update descriptors
1892 DescriptorSetUpdateBuilder builder;
1893
1894 builder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(m_reverseOrder ? 4u : 0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, &uboInfoVertexVec);
1895 builder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(m_reverseOrder ? 3u : 1u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, &uboInfoVec);
1896 builder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding( 2u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, &ssboInfoVec);
1897 builder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(m_reverseOrder ? 1u : 3u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, &uboInfoMat);
1898 builder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(m_reverseOrder ? 0u : 4u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, &ssboInfoMat);
1899 builder.update(vk, device);
1900
1901 // Command buffer
1902 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1903 const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
1904
1905 const VkDeviceSize vertexBufferOffset = 0u;
1906
1907 // Render result buffer
1908 const VkDeviceSize colorBufferSizeBytes = tcu::getPixelSize(mapVkFormat(OUTPUT_COLOR_FORMAT)) * static_cast<VkDeviceSize>(m_renderSize.x()) * static_cast<VkDeviceSize>(m_renderSize.y());
1909 const Unique<VkBuffer> colorBuffer (makeBuffer(vk, device, colorBufferSizeBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
1910 const UniquePtr<Allocation> colorBufferAlloc (bindBuffer(vk, device, allocator, *colorBuffer, MemoryRequirement::HostVisible));
1911
1912 const VkClearValue clearColorValue = defaultClearValue(OUTPUT_COLOR_FORMAT);
1913
1914 bool runGraphics = !m_runComputeFirst;
1915
1916 for (int i = 0; i < 2; i++)
1917 {
1918 beginCommandBuffer(vk, *cmdBuffer);
1919
1920 if (runGraphics)
1921 {
1922 renderPass.begin(vk, *cmdBuffer, makeRect2D(0, 0, m_renderSize.x(), m_renderSize.y()), clearColorValue);
1923 graphicsPipeline.bind(*cmdBuffer);
1924 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
1925 }
1926 else
1927 {
1928 computePipeline.bind(*cmdBuffer);
1929 }
1930
1931 if (m_testAllOffsets)
1932 {
1933 for (deUint32 instance = 0; instance < m_numInstances; instance++)
1934 {
1935 deUint32 uboOffsetVec4 = uboDynamicAlignmentVec4 * instance;
1936 deUint32 uboOffsetMat4 = uboDynamicAlignmentMat4 * instance;
1937 deUint32 ssboOffsetVec4 = ssboDynamicAlignmentVec4 * instance;
1938 deUint32 ssboOffsetMat4 = ssboDynamicAlignmentMat4 * instance;
1939 std::vector<deUint32> offsets;
1940
1941 offsets.push_back(m_reverseOrder ? ssboOffsetMat4 : uboOffsetVec4);
1942 offsets.push_back(m_reverseOrder ? uboOffsetMat4 : uboOffsetVec4);
1943 offsets.push_back(ssboOffsetVec4);
1944 offsets.push_back(m_reverseOrder ? uboOffsetVec4 : uboOffsetMat4);
1945 offsets.push_back(m_reverseOrder ? uboOffsetVec4 : ssboOffsetMat4);
1946
1947 if (runGraphics)
1948 {
1949 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &descriptorSet.get(), (deUint32)offsets.size(), offsets.data());
1950 vk.cmdDraw(*cmdBuffer, numVertices, 1u, 0u, 0u);
1951 }
1952 else
1953 {
1954 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), (deUint32)offsets.size(), offsets.data());
1955 vk.cmdDispatch(*cmdBuffer, 1, 1, 1);
1956 }
1957 }
1958 }
1959 else
1960 {
1961 std::vector<deUint32> offsets;
1962
1963 offsets.push_back(m_reverseOrder ? ssboDynamicAlignmentMat4 * m_ssboReadOffset : uboDynamicAlignmentVec4 * m_vertexOffset);
1964 offsets.push_back(m_reverseOrder ? uboDynamicAlignmentMat4 * m_fragUboOffset : uboDynamicAlignmentVec4 * m_sharedUboOffset);
1965 offsets.push_back(ssboDynamicAlignmentVec4 * m_ssboWriteOffset);
1966 offsets.push_back(m_reverseOrder ? uboDynamicAlignmentVec4 * m_sharedUboOffset : uboDynamicAlignmentMat4 * m_fragUboOffset);
1967 offsets.push_back(m_reverseOrder ? uboDynamicAlignmentVec4 * m_vertexOffset : ssboDynamicAlignmentMat4 * m_ssboReadOffset);
1968
1969 if (runGraphics)
1970 {
1971 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &descriptorSet.get(), (deUint32)offsets.size(), offsets.data());
1972 vk.cmdDraw(*cmdBuffer, numVertices, 1u, 0u, 0u);
1973 }
1974 else
1975 {
1976 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), (deUint32)offsets.size(), offsets.data());
1977 vk.cmdDispatch(*cmdBuffer, 1, 1, 1);
1978 }
1979 }
1980
1981 if (runGraphics)
1982 {
1983 renderPass.end(vk, *cmdBuffer);
1984 copyImageToBuffer(vk, *cmdBuffer, *colorImage, *colorBuffer, m_renderSize, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
1985 }
1986
1987 runGraphics = !runGraphics;
1988
1989 endCommandBuffer(vk, *cmdBuffer);
1990
1991 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1992 m_context.resetCommandPoolForVKSC(device, *cmdPool);
1993 }
1994
1995 // Check result image
1996 {
1997 tcu::TextureLevel referenceTexture (mapVkFormat(OUTPUT_COLOR_FORMAT), m_renderSize.x(), m_renderSize.y());
1998 const tcu::PixelBufferAccess referenceAccess = referenceTexture.getAccess();
1999 const deUint32 segmentSize = m_renderSize.x() / instanceSize;
2000
2001 // Create reference image
2002 if (m_testAllOffsets)
2003 {
2004 for (int y = 0; y < m_renderSize.y(); ++y)
2005 {
2006 for (int x = 0; x < m_renderSize.x(); ++x)
2007 {
2008 // While running test for all offsets, we create a nice gradient-like color for the pixels.
2009 float colorValue = (float)(y / segmentSize * instanceSize + x / segmentSize + 1u) * colorIncrement;
2010
2011 referenceAccess.setPixel(tcu::Vec4(colorValue, 0.5f, colorValue, 1.0f), x, y);
2012 }
2013 }
2014 }
2015 else
2016 {
2017 // At first we have to find a correct location for the drawn square.
2018 const deUint32 segmentCountPerRow = (deUint32)m_renderSize.x() / segmentSize;
2019 const deUint32 offsetY = m_vertexOffset > segmentCountPerRow ? m_vertexOffset / segmentCountPerRow : 0u;
2020 const deUint32 offsetX = offsetY > 0 ? m_vertexOffset - (segmentCountPerRow * offsetY) : m_vertexOffset;
2021 const deUint32 pixelOffsetY = segmentSize * offsetY;
2022 const deUint32 pixelOffsetX = segmentSize * offsetX;
2023
2024 for (int y = 0; y < m_renderSize.y(); ++y)
2025 {
2026 for (int x = 0; x < m_renderSize.x(); ++x)
2027 {
2028 float colorValueRed = clearColorValue.color.float32[0];
2029 float colorValueGreen = clearColorValue.color.float32[1];
2030 float colorValueBlue = clearColorValue.color.float32[2];
2031
2032 // Next, we fill the correct number of pixels with test color.
2033 if (x >= (int)pixelOffsetX && x < int(pixelOffsetX + segmentSize) && y >= (int)pixelOffsetY && y < int(pixelOffsetY + segmentSize))
2034 {
2035 // While running test only for one offset, the result color for pixel is constant.
2036 colorValueRed = 1.0f;
2037 colorValueGreen = 0.5f;
2038 colorValueBlue = colorValueRed;
2039 }
2040
2041 referenceAccess.setPixel(tcu::Vec4(colorValueRed, colorValueGreen, colorValueBlue, 1.0f), x, y);
2042 }
2043 }
2044 }
2045
2046 invalidateAlloc(vk, device, *colorBufferAlloc);
2047
2048 const tcu::ConstPixelBufferAccess resultPixelAccess(mapVkFormat(OUTPUT_COLOR_FORMAT), m_renderSize.x(), m_renderSize.y(), 1, colorBufferAlloc->getHostPtr());
2049
2050 if (!tcu::floatThresholdCompare(log, "color", "Image compare", referenceAccess, resultPixelAccess, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT))
2051 return tcu::TestStatus::fail("Rendered image is not correct");
2052 }
2053
2054 // Check result buffer values
2055 {
2056 invalidateAlloc(vk, device, *ssboBufferAllocWrite);
2057
2058 std::vector<tcu::Vec4> refColors;
2059 std::vector<tcu::Vec4> outColors;
2060
2061 for (deUint32 i = 0; i < m_numInstances; i++)
2062 {
2063 if (m_testAllOffsets)
2064 {
2065 refColors.push_back(tcu::Vec4(float(i + 1) * colorIncrement, 1.0f - float(i + 1) * colorIncrement, 0.17f, 1.0f));
2066 }
2067 else
2068 {
2069 refColors.push_back(m_ssboWriteOffset == i ? tcu::Vec4(1.0f, 0.25f, 0.17f, 1.0f) : tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
2070 }
2071
2072 outColors.push_back(*(tcu::Vec4*)((deUint8*)ssboBufferAllocWrite->getHostPtr() + ssboDynamicAlignmentVec4 * i));
2073
2074 if (!compareVectors(outColors[i], refColors[i], 0.01f))
2075 {
2076 log << tcu::TestLog::Message << "Reference: " << refColors[i].x() << ", " << refColors[i].y() << ", " << refColors[i].z() << ", " << refColors[i].w() << ", " << tcu::TestLog::EndMessage;
2077 log << tcu::TestLog::Message << "Result : " << outColors[i].x() << ", " << outColors[i].y() << ", " << outColors[i].z() << ", " << outColors[i].w() << ", " << tcu::TestLog::EndMessage;
2078
2079 return tcu::TestStatus::fail("Result value is not correct");
2080 }
2081 }
2082 }
2083
2084 return tcu::TestStatus::pass("Success");
2085 }
2086
2087 class DynamicOffsetMixedTest : public vkt::TestCase
2088 {
2089 public:
DynamicOffsetMixedTest(tcu::TestContext & testContext,const PipelineConstructionType pipelineConstructionType,const std::string & name,const tcu::IVec2 renderSize,const deUint32 numInstances,const bool testAllOffsets,const bool reverseOrder,const bool runComputeFirst=false,const deUint32 vertexOffset=0u,const deUint32 sharedUboOffset=0u,const deUint32 fragUboOffset=0u,const deUint32 ssboReadOffset=0u,const deUint32 ssboWriteOffset=0u)2090 DynamicOffsetMixedTest (tcu::TestContext& testContext,
2091 const PipelineConstructionType pipelineConstructionType,
2092 const std::string& name,
2093 const tcu::IVec2 renderSize,
2094 const deUint32 numInstances,
2095 const bool testAllOffsets,
2096 const bool reverseOrder,
2097 const bool runComputeFirst = false,
2098 const deUint32 vertexOffset = 0u,
2099 const deUint32 sharedUboOffset = 0u,
2100 const deUint32 fragUboOffset = 0u,
2101 const deUint32 ssboReadOffset = 0u,
2102 const deUint32 ssboWriteOffset = 0u)
2103 : vkt::TestCase (testContext, name)
2104 , m_pipelineConstructionType (pipelineConstructionType)
2105 , m_renderSize (renderSize)
2106 , m_numInstances (numInstances)
2107 , m_testAllOffsets (testAllOffsets)
2108 , m_reverseOrder (reverseOrder)
2109 , m_runComputeFirst (runComputeFirst)
2110 , m_vertexOffset (vertexOffset)
2111 , m_sharedUboOffset (sharedUboOffset)
2112 , m_fragUboOffset (fragUboOffset)
2113 , m_ssboReadOffset (ssboReadOffset)
2114 , m_ssboWriteOffset (ssboWriteOffset)
2115 {}
2116
2117 ~DynamicOffsetMixedTest (void);
2118
2119 void initPrograms (SourceCollections& sourceCollections) const;
2120 void checkSupport (vkt::Context& context) const;
2121 TestInstance *createInstance (Context& context) const;
2122 private:
2123 const PipelineConstructionType m_pipelineConstructionType;
2124 const tcu::IVec2 m_renderSize;
2125 const deUint32 m_numInstances;
2126 const bool m_testAllOffsets;
2127 const bool m_reverseOrder;
2128 const bool m_runComputeFirst;
2129 const deUint32 m_vertexOffset;
2130 const deUint32 m_sharedUboOffset;
2131 const deUint32 m_fragUboOffset;
2132 const deUint32 m_ssboReadOffset;
2133 const deUint32 m_ssboWriteOffset;
2134 };
2135
~DynamicOffsetMixedTest(void)2136 DynamicOffsetMixedTest::~DynamicOffsetMixedTest (void)
2137 {
2138 }
2139
initPrograms(SourceCollections & sourceCollections) const2140 void DynamicOffsetMixedTest::initPrograms (SourceCollections& sourceCollections) const
2141 {
2142 // Vertex
2143 {
2144 std::ostringstream src;
2145
2146 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
2147 << "\n"
2148 << "layout(set = 0, binding = " << (m_reverseOrder ? "4" : "0") << ") uniform uboVertexData\n"
2149 << "{\n"
2150 << " vec4 position;\n"
2151 << "} inputPosData;\n"
2152 << "\n"
2153 << "layout(location = 0) in vec4 inPosition;\n"
2154 << "layout(location = 1) in vec4 inColor;\n"
2155 << "layout(location = 0) out vec4 outColor;\n"
2156 << "\n"
2157 << "out gl_PerVertex\n"
2158 << "{\n"
2159 << " vec4 gl_Position;\n"
2160 << "};\n"
2161 << "\n"
2162 << "void main (void)\n"
2163 << "{\n"
2164 << " gl_Position = inPosition + inputPosData.position;\n"
2165 << " outColor = inColor;\n"
2166 << "}\n";
2167
2168 sourceCollections.glslSources.add("vert") << glu::VertexSource(src.str());
2169 }
2170
2171 // Fragment
2172 {
2173 std::ostringstream src;
2174
2175 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
2176 << "\n"
2177 << "layout(set = 0, binding = " << (m_reverseOrder ? "3" : "1") << ") uniform uboSharedData\n"
2178 << "{\n"
2179 << " vec4 color;\n"
2180 << "} inputData0;\n"
2181 << "\n"
2182 << "layout(set = 0, binding = " << (m_reverseOrder ? "1" : "3") << ") uniform uboFragOnly\n"
2183 << "{\n"
2184 << " mat4 color;\n"
2185 << "} inputData1;\n"
2186 << "\n"
2187 << "layout(location = 0) in vec4 inColor;\n"
2188 << "layout(location = 0) out vec4 outColor;\n"
2189 << "\n"
2190 << "void main (void)\n"
2191 << "{\n"
2192 << " outColor = inColor + inputData0.color;\n"
2193 << " outColor.b = inputData1.color[3][3];\n"
2194 << "}\n";
2195
2196 sourceCollections.glslSources.add("frag") << glu::FragmentSource(src.str());
2197 }
2198
2199 // Compute
2200 {
2201 std::ostringstream src;
2202
2203 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
2204 << "\n"
2205 << "layout(set = 0, binding = " << (m_reverseOrder ? "3" : "1") << ") uniform uboSharedData\n"
2206 << "{\n"
2207 << " vec4 color;\n"
2208 << "} inputData;\n"
2209 << "\n"
2210 << "layout(set = 0, binding = 2) writeonly buffer ssboOutput\n"
2211 << "{\n"
2212 << " vec4 color;\n"
2213 << "} outData;\n"
2214 << "\n"
2215 << "layout(set = 0, binding = " << (m_reverseOrder ? "0" : "4") << ") readonly buffer ssboInput\n"
2216 << "{\n"
2217 << " mat4 color;\n"
2218 << "} readData;\n"
2219 << "\n"
2220 << "void main (void)\n"
2221 << "{\n"
2222 << " outData.color = inputData.color;\n"
2223 << " outData.color.g = readData.color[3][1];\n"
2224 << " outData.color.b = readData.color[3][2];\n"
2225 << "}\n";
2226
2227 sourceCollections.glslSources.add("comp") << glu::ComputeSource(src.str());
2228 }
2229 }
2230
checkSupport(vkt::Context & context) const2231 void DynamicOffsetMixedTest::checkSupport (vkt::Context& context) const
2232 {
2233 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(), m_pipelineConstructionType);
2234 }
2235
createInstance(Context & context) const2236 TestInstance* DynamicOffsetMixedTest::createInstance (Context& context) const
2237 {
2238 return new DynamicOffsetMixedTestInstance (context,
2239 m_pipelineConstructionType,
2240 m_renderSize,
2241 m_numInstances,
2242 m_testAllOffsets,
2243 m_reverseOrder,
2244 m_runComputeFirst,
2245 m_vertexOffset,
2246 m_sharedUboOffset,
2247 m_fragUboOffset,
2248 m_ssboReadOffset,
2249 m_ssboWriteOffset);
2250 }
2251
2252 } // anonymous
2253
createDynamicOffsetTests(tcu::TestContext & testCtx,PipelineConstructionType pipelineConstructionType)2254 tcu::TestCaseGroup* createDynamicOffsetTests (tcu::TestContext& testCtx, PipelineConstructionType pipelineConstructionType)
2255 {
2256 const char* pipelineTypes[] = { "graphics", "compute" };
2257
2258 struct
2259 {
2260 const char* name;
2261 const GroupingStrategy strategy;
2262 }
2263 const groupingTypes[] =
2264 {
2265 { "single_set", GroupingStrategy::SINGLE_SET },
2266 { "multiset", GroupingStrategy::MULTISET },
2267 { "arrays", GroupingStrategy::ARRAYS },
2268 };
2269
2270 struct
2271 {
2272 const char* name;
2273 VkDescriptorType type;
2274 }
2275 const descriptorTypes[] =
2276 {
2277 { "uniform_buffer", VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC },
2278 { "storage_buffer", VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC }
2279 };
2280
2281 struct
2282 {
2283 const char* name;
2284 deUint32 num;
2285 }
2286 const numCmdBuffers[] =
2287 {
2288 { "numcmdbuffers_1", 1u },
2289 { "numcmdbuffers_2", 2u }
2290 };
2291
2292 struct
2293 {
2294 const char* name;
2295 bool reverse;
2296 }
2297 const reverseOrders[] =
2298 {
2299 { "reverseorder", true },
2300 { "sameorder", false }
2301 };
2302
2303 struct
2304 {
2305 const char* name;
2306 deUint32 num;
2307 }
2308 const numDescriptorSetBindings[] =
2309 {
2310 { "numdescriptorsetbindings_1", 1u },
2311 { "numdescriptorsetbindings_2", 2u }
2312 };
2313
2314 struct
2315 {
2316 const char* name;
2317 deUint32 num;
2318 }
2319 const numDynamicBindings[] =
2320 {
2321 { "numdynamicbindings_1", 1u },
2322 { "numdynamicbindings_2", 2u }
2323 };
2324
2325 struct
2326 {
2327 const char* name;
2328 deUint32 num;
2329 }
2330 const numNonDynamicBindings[] =
2331 {
2332 { "numnondynamicbindings_0", 0u },
2333 { "numnondynamicbindings_1", 1u }
2334 };
2335
2336 de::MovePtr<tcu::TestCaseGroup> dynamicOffsetTests (new tcu::TestCaseGroup(testCtx, "dynamic_offset"));
2337
2338 for (deUint32 pipelineTypeIdx = 0; pipelineTypeIdx < DE_LENGTH_OF_ARRAY(pipelineTypes); pipelineTypeIdx++)
2339 {
2340 // VK_EXT_graphics_pipeline_library can't be tested with compute pipeline
2341 if ((pipelineTypeIdx == 1) && (pipelineConstructionType != PIPELINE_CONSTRUCTION_TYPE_MONOLITHIC))
2342 continue;
2343
2344 de::MovePtr<tcu::TestCaseGroup> pipelineTypeGroup (new tcu::TestCaseGroup(testCtx, pipelineTypes[pipelineTypeIdx]));
2345
2346 for (deUint32 groupingTypeIdx = 0; groupingTypeIdx < DE_LENGTH_OF_ARRAY(groupingTypes); ++groupingTypeIdx)
2347 {
2348 de::MovePtr<tcu::TestCaseGroup> groupingTypeGroup (new tcu::TestCaseGroup(testCtx, groupingTypes[groupingTypeIdx].name));
2349
2350 for (deUint32 descriptorTypeIdx = 0; descriptorTypeIdx < DE_LENGTH_OF_ARRAY(descriptorTypes); descriptorTypeIdx++)
2351 {
2352 de::MovePtr<tcu::TestCaseGroup> descriptorTypeGroup (new tcu::TestCaseGroup(testCtx, descriptorTypes[descriptorTypeIdx].name));
2353
2354 for (deUint32 numCmdBuffersIdx = 0; numCmdBuffersIdx < DE_LENGTH_OF_ARRAY(numCmdBuffers); numCmdBuffersIdx++)
2355 {
2356 de::MovePtr<tcu::TestCaseGroup> numCmdBuffersGroup (new tcu::TestCaseGroup(testCtx, numCmdBuffers[numCmdBuffersIdx].name));
2357
2358 for (deUint32 reverseOrderIdx = 0; reverseOrderIdx < DE_LENGTH_OF_ARRAY(reverseOrders); reverseOrderIdx++)
2359 {
2360 if (numCmdBuffers[numCmdBuffersIdx].num < 2 && reverseOrders[reverseOrderIdx].reverse)
2361 continue;
2362
2363 de::MovePtr<tcu::TestCaseGroup> reverseOrderGroup (new tcu::TestCaseGroup(testCtx, reverseOrders[reverseOrderIdx].name));
2364
2365 for (deUint32 numDescriptorSetBindingsIdx = 0; numDescriptorSetBindingsIdx < DE_LENGTH_OF_ARRAY(numDescriptorSetBindings); numDescriptorSetBindingsIdx++)
2366 {
2367 if (numCmdBuffers[numCmdBuffersIdx].num > 1 && numDescriptorSetBindings[numDescriptorSetBindingsIdx].num > 1)
2368 continue;
2369
2370 de::MovePtr<tcu::TestCaseGroup> numDescriptorSetBindingsGroup (new tcu::TestCaseGroup(testCtx, numDescriptorSetBindings[numDescriptorSetBindingsIdx].name));
2371 for (deUint32 numDynamicBindingsIdx = 0; numDynamicBindingsIdx < DE_LENGTH_OF_ARRAY(numDynamicBindings); numDynamicBindingsIdx++)
2372 {
2373 de::MovePtr<tcu::TestCaseGroup> numDynamicBindingsGroup (new tcu::TestCaseGroup(testCtx, numDynamicBindings[numDynamicBindingsIdx].name));
2374
2375 for (deUint32 numNonDynamicBindingsIdx = 0; numNonDynamicBindingsIdx < DE_LENGTH_OF_ARRAY(numNonDynamicBindings); numNonDynamicBindingsIdx++)
2376 {
2377 TestParams params
2378 {
2379 pipelineConstructionType,
2380 descriptorTypes[descriptorTypeIdx].type,
2381 numCmdBuffers[numCmdBuffersIdx].num,
2382 reverseOrders[reverseOrderIdx].reverse,
2383 numDescriptorSetBindings[numDescriptorSetBindingsIdx].num,
2384 numDynamicBindings[numDynamicBindingsIdx].num,
2385 numNonDynamicBindings[numNonDynamicBindingsIdx].num,
2386 groupingTypes[groupingTypeIdx].strategy
2387 };
2388 #ifndef CTS_USES_VULKANSC
2389 if (strcmp(pipelineTypes[pipelineTypeIdx], "graphics") == 0)
2390 numDynamicBindingsGroup->addChild(new DynamicOffsetGraphicsTest(testCtx, numNonDynamicBindings[numNonDynamicBindingsIdx].name, params));
2391 else
2392 #endif // CTS_USES_VULKANSC
2393 numDynamicBindingsGroup->addChild(new DynamicOffsetComputeTest(testCtx, numNonDynamicBindings[numNonDynamicBindingsIdx].name, params));
2394 }
2395
2396 numDescriptorSetBindingsGroup->addChild(numDynamicBindingsGroup.release());
2397 }
2398
2399 reverseOrderGroup->addChild(numDescriptorSetBindingsGroup.release());
2400 }
2401
2402 numCmdBuffersGroup->addChild(reverseOrderGroup.release());
2403 }
2404
2405 descriptorTypeGroup->addChild(numCmdBuffersGroup.release());
2406 }
2407
2408 groupingTypeGroup->addChild(descriptorTypeGroup.release());
2409 }
2410
2411 pipelineTypeGroup->addChild(groupingTypeGroup.release());
2412 }
2413
2414 dynamicOffsetTests->addChild(pipelineTypeGroup.release());
2415 }
2416
2417 // Dynamic descriptor offset test for combined descriptor sets.
2418 if (pipelineConstructionType == PIPELINE_CONSTRUCTION_TYPE_MONOLITHIC) {
2419 de::MovePtr<tcu::TestCaseGroup> combinedDescriptorsTests(new tcu::TestCaseGroup(testCtx, "combined_descriptors"));
2420
2421 struct
2422 {
2423 const char* name;
2424 const bool reverseDescriptors;
2425 }
2426 const orders[] =
2427 {
2428 { "same_order", false },
2429 { "reverse_order", true }
2430 };
2431
2432 struct
2433 {
2434 const char* name;
2435 const deUint32 offsetCount;
2436 const deUint32 offsets[5];
2437 }
2438 const numOffsets[] =
2439 {
2440 { "16", 16u, { 15u, 7u, 2u, 3u, 5u } },
2441 { "64", 64u, { 27u, 22u, 45u, 19u, 59u } },
2442 { "256", 256u, { 197u, 244u, 110u, 238u, 88u } }
2443 };
2444
2445 struct
2446 {
2447 const char* name;
2448 const bool computeFirst;
2449 }
2450 const pipelineOrders[] =
2451 {
2452 { "graphics_first", false },
2453 { "compute_first", true }
2454 };
2455
2456 // Run tests for all offsets
2457 {
2458 de::MovePtr<tcu::TestCaseGroup> allOffsetsGroup(new tcu::TestCaseGroup(testCtx, "all_offsets"));
2459 de::MovePtr<tcu::TestCaseGroup> singleOffsetGroup(new tcu::TestCaseGroup(testCtx, "single_offset"));
2460
2461 for (const auto& order : orders)
2462 {
2463 for (const auto& offsets : numOffsets)
2464 {
2465 for (const auto& pipeline : pipelineOrders)
2466 {
2467 allOffsetsGroup->addChild(new DynamicOffsetMixedTest(
2468 testCtx,
2469 pipelineConstructionType,
2470 std::string(order.name) + "_" + std::string(offsets.name) + "_" + pipeline.name,
2471 tcu::IVec2(32, 32), // Render size
2472 offsets.offsetCount,
2473 true, // All offsets
2474 order.reverseDescriptors,
2475 pipeline.computeFirst));
2476 singleOffsetGroup->addChild(new DynamicOffsetMixedTest(
2477 testCtx,
2478 pipelineConstructionType,
2479 std::string(order.name) + "_" + std::string(offsets.name) + "_" + pipeline.name,
2480 tcu::IVec2(32, 32), // Render size
2481 offsets.offsetCount,
2482 false, // Single offset only
2483 order.reverseDescriptors,
2484 pipeline.computeFirst,
2485 offsets.offsets[0], // For vertex ubo
2486 offsets.offsets[1], // For shared ubo (fragment & compute)
2487 offsets.offsets[2], // For fragment ubo
2488 offsets.offsets[3], // For ssbo read only
2489 offsets.offsets[4])); // For ssbo write only
2490 }
2491 }
2492 }
2493 combinedDescriptorsTests->addChild(allOffsetsGroup.release());
2494 combinedDescriptorsTests->addChild(singleOffsetGroup.release());
2495 }
2496
2497 dynamicOffsetTests->addChild(combinedDescriptorsTests.release());
2498 }
2499
2500 return dynamicOffsetTests.release();
2501 }
2502
2503 } // pipeline
2504 } // vkt
2505