1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2018 The Khronos Group Inc.
6 * Copyright (c) 2018 Google Inc.
7 * Copyright (c) 2018 ARM Limited.
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 *//*!
22 * \file
23 * \brief Dynamic Offset Tests
24 *//*--------------------------------------------------------------------*/
25
26 #include "vktPipelineDynamicOffsetTests.hpp"
27 #include "vktPipelineClearUtil.hpp"
28 #include "vktPipelineImageUtil.hpp"
29 #include "vktPipelineVertexUtil.hpp"
30 #include "vktPipelineReferenceRenderer.hpp"
31 #include "vktTestCase.hpp"
32 #include "vkImageUtil.hpp"
33 #include "vkMemUtil.hpp"
34 #include "vkPrograms.hpp"
35 #include "vkQueryUtil.hpp"
36 #include "vkRef.hpp"
37 #include "vkRefUtil.hpp"
38 #include "vkTypeUtil.hpp"
39 #include "vkCmdUtil.hpp"
40 #include "vkObjUtil.hpp"
41 #include "vkDeviceUtil.hpp"
42 #include "vkBuilderUtil.hpp"
43 #include "tcuImageCompare.hpp"
44 #include "deMemory.h"
45 #include "deUniquePtr.hpp"
46 #include "tcuTestLog.hpp"
47 #include <vector>
48 #include <sstream>
49
50 namespace vkt
51 {
52 namespace pipeline
53 {
54
55 using namespace vk;
56 using namespace std;
57
58 namespace
59 {
60 typedef de::SharedPtr<Unique<VkBuffer> > VkBufferSp;
61 typedef de::SharedPtr<Allocation> AllocationSp;
62 typedef de::SharedPtr<Unique<VkCommandBuffer> > VkCommandBufferSp;
63 typedef de::SharedPtr<Unique<VkRenderPass> > VkRenderPassSp;
64 typedef de::SharedPtr<Unique<VkFramebuffer> > VkFramebufferSp;
65 typedef de::SharedPtr<Unique<VkPipeline> > VkPipelineSp;
66
67 enum class GroupingStrategy
68 {
69 SINGLE_SET = 0,
70 MULTISET = 1,
71 ARRAYS = 2,
72 };
73
74 struct TestParams
75 {
76 VkDescriptorType descriptorType;
77 deUint32 numCmdBuffers;
78 bool reverseOrder;
79 deUint32 numDescriptorSetBindings;
80 deUint32 numDynamicBindings;
81 deUint32 numNonDynamicBindings;
82 GroupingStrategy groupingStrategy;
83 };
84
createQuads(deUint32 numQuads,float size)85 vector<Vertex4RGBA> createQuads (deUint32 numQuads, float size)
86 {
87 vector<Vertex4RGBA> vertices;
88
89 for (deUint32 quadNdx = 0; quadNdx < numQuads; quadNdx++)
90 {
91 const float xOffset = -0.5f + (float)quadNdx;
92 const tcu::Vec4 color (0.0f);
93 const Vertex4RGBA lowerLeftVertex = {tcu::Vec4(-size + xOffset, -size, 0.0f, 1.0f), color};
94 const Vertex4RGBA lowerRightVertex = {tcu::Vec4(size + xOffset, -size, 0.0f, 1.0f), color};
95 const Vertex4RGBA UpperLeftVertex = {tcu::Vec4(-size + xOffset, size, 0.0f, 1.0f), color};
96 const Vertex4RGBA UpperRightVertex = {tcu::Vec4(size + xOffset, size, 0.0f, 1.0f), color};
97
98 vertices.push_back(lowerLeftVertex);
99 vertices.push_back(lowerRightVertex);
100 vertices.push_back(UpperLeftVertex);
101 vertices.push_back(UpperLeftVertex);
102 vertices.push_back(lowerRightVertex);
103 vertices.push_back(UpperRightVertex);
104 }
105
106 return vertices;
107 }
108
109 static const tcu::Vec4 testColors[] =
110 {
111 tcu::Vec4(0.3f, 0.0f, 0.0f, 1.0f),
112 tcu::Vec4(0.0f, 0.3f, 0.0f, 1.0f),
113 tcu::Vec4(0.0f, 0.0f, 0.3f, 1.0f),
114 tcu::Vec4(0.3f, 0.3f, 0.0f, 1.0f),
115 tcu::Vec4(0.0f, 0.3f, 0.3f, 1.0f),
116 tcu::Vec4(0.3f, 0.0f, 0.3f, 1.0f)
117 };
118 static constexpr VkDeviceSize kColorSize = static_cast<VkDeviceSize>(sizeof(testColors[0]));
119 static constexpr deUint32 kNumTestColors = static_cast<deUint32>(DE_LENGTH_OF_ARRAY(testColors));
120
121 class DynamicOffsetTestInstance : public vkt::TestInstance
122 {
123 public:
DynamicOffsetTestInstance(Context & context,const TestParams & params)124 DynamicOffsetTestInstance (Context& context, const TestParams& params)
125 : vkt::TestInstance (context)
126 , m_params (params)
127 , m_memAlloc (context.getDeviceInterface(), context.getDevice(), getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
128 {}
129
130 protected:
131 const TestParams m_params;
132 SimpleAllocator m_memAlloc;
133 };
134
135 class DynamicOffsetGraphicsTestInstance : public DynamicOffsetTestInstance
136 {
137 public:
138 DynamicOffsetGraphicsTestInstance (Context& context, const TestParams& params);
139 virtual ~DynamicOffsetGraphicsTestInstance (void);
140 void init (void);
141 virtual tcu::TestStatus iterate (void);
142 tcu::TestStatus verifyImage (void);
143
144 private:
145 const tcu::UVec2 m_renderSize;
146 const VkFormat m_colorFormat;
147 VkImageCreateInfo m_colorImageCreateInfo;
148 Move<VkImage> m_colorImage;
149 de::MovePtr<Allocation> m_colorImageAlloc;
150 Move<VkImageView> m_colorAttachmentView;
151 vector<VkRenderPassSp> m_renderPasses;
152 vector<VkFramebufferSp> m_framebuffers;
153 Move<VkShaderModule> m_vertexShaderModule;
154 Move<VkShaderModule> m_fragmentShaderModule;
155 Move<VkBuffer> m_vertexBuffer;
156 de::MovePtr<Allocation> m_vertexBufferAlloc;
157 Move<VkBuffer> m_buffer;
158 de::MovePtr<Allocation> m_bufferAlloc;
159 vector<Move<VkDescriptorSetLayout>> m_descriptorSetLayouts;
160 Move<VkDescriptorPool> m_descriptorPool;
161 vector<Move<VkDescriptorSet>> m_descriptorSets;
162 Move<VkPipelineLayout> m_pipelineLayout;
163 vector<VkPipelineSp> m_graphicsPipelines;
164 Move<VkCommandPool> m_cmdPool;
165 vector<VkCommandBufferSp> m_cmdBuffers;
166 vector<Vertex4RGBA> m_vertices;
167 };
168
DynamicOffsetGraphicsTestInstance(Context & context,const TestParams & params)169 DynamicOffsetGraphicsTestInstance::DynamicOffsetGraphicsTestInstance (Context& context, const TestParams& params)
170 : DynamicOffsetTestInstance (context, params)
171 , m_renderSize (32, 32)
172 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
173 , m_vertices (createQuads(m_params.numDescriptorSetBindings * m_params.numCmdBuffers, 0.25f))
174 {
175 }
176
init(void)177 void DynamicOffsetGraphicsTestInstance::init (void)
178 {
179 const VkComponentMapping componentMappingRGBA = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A };
180 const DeviceInterface& vk = m_context.getDeviceInterface();
181 const VkDevice vkDevice = m_context.getDevice();
182 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
183 const deUint32 numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
184 deUint32 offset = 0;
185 deUint32 quadNdx = 0;
186 const VkPhysicalDeviceLimits deviceLimits = getPhysicalDeviceProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()).limits;
187 const VkDeviceSize alignment = ((m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ? deviceLimits.minUniformBufferOffsetAlignment : deviceLimits.minStorageBufferOffsetAlignment);
188 const VkDeviceSize extraBytes = kColorSize % alignment;
189 const VkDeviceSize colorBlockInputSize = ((extraBytes == 0ull) ? kColorSize : (kColorSize + alignment - extraBytes));
190 const VkDeviceSize bufferSize = colorBlockInputSize * kNumTestColors;
191 const VkDeviceSize bindingOffset = bufferSize / numBindings;
192 const VkDescriptorType nonDynamicDescriptorType = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
193
194 vector<VkDescriptorSetLayout> descriptorSetLayoutsPlain;
195 vector<VkDescriptorSet> descriptorSetsPlain;
196
197 // Create color image
198 {
199
200 const VkImageCreateInfo colorImageParams =
201 {
202 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
203 DE_NULL, // const void* pNext;
204 0u, // VkImageCreateFlags flags;
205 VK_IMAGE_TYPE_2D, // VkImageType imageType;
206 m_colorFormat, // VkFormat format;
207 { m_renderSize.x(), m_renderSize.y(), 1u }, // VkExtent3D extent;
208 1u, // deUint32 mipLevels;
209 1u, // deUint32 arrayLayers;
210 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
211 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
212 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // VkImageUsageFlags usage;
213 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
214 1u, // deUint32 queueFamilyIndexCount;
215 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
216 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
217 };
218
219 m_colorImageCreateInfo = colorImageParams;
220 m_colorImage = createImage(vk, vkDevice, &m_colorImageCreateInfo);
221
222 // Allocate and bind color image memory
223 m_colorImageAlloc = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *m_colorImage), MemoryRequirement::Any);
224 VK_CHECK(vk.bindImageMemory(vkDevice, *m_colorImage, m_colorImageAlloc->getMemory(), m_colorImageAlloc->getOffset()));
225 }
226
227 // Create color attachment view
228 {
229 const VkImageViewCreateInfo colorAttachmentViewParams =
230 {
231 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
232 DE_NULL, // const void* pNext;
233 0u, // VkImageViewCreateFlags flags;
234 *m_colorImage, // VkImage image;
235 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
236 m_colorFormat, // VkFormat format;
237 componentMappingRGBA, // VkChannelMapping channels;
238 { VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u }, // VkImageSubresourceRange subresourceRange;
239 };
240
241 m_colorAttachmentView = createImageView(vk, vkDevice, &colorAttachmentViewParams);
242 }
243
244 // Create render passes
245 for (deUint32 renderPassIdx = 0; renderPassIdx < m_params.numCmdBuffers; renderPassIdx++)
246 {
247 // The first pass clears the output image, and the second one draws on top of the first pass.
248 const VkAttachmentLoadOp loadOps[] =
249 {
250 VK_ATTACHMENT_LOAD_OP_CLEAR,
251 VK_ATTACHMENT_LOAD_OP_LOAD
252 };
253
254 const VkImageLayout initialLayouts[] =
255 {
256 VK_IMAGE_LAYOUT_UNDEFINED,
257 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
258 };
259
260 const VkAttachmentDescription attachmentDescription =
261 {
262 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags
263 m_colorFormat, // VkFormat format
264 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples
265 loadOps[renderPassIdx], // VkAttachmentLoadOp loadOp
266 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp
267 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp
268 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp
269 initialLayouts[renderPassIdx], // VkImageLayout initialLayout
270 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout
271 };
272
273 const VkAttachmentReference attachmentRef =
274 {
275 0u, // deUint32 attachment
276 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout
277 };
278
279 const VkSubpassDescription subpassDescription =
280 {
281 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
282 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
283 0u, // deUint32 inputAttachmentCount
284 DE_NULL, // const VkAttachmentReference* pInputAttachments
285 1u, // deUint32 colorAttachmentCount
286 &attachmentRef, // const VkAttachmentReference* pColorAttachments
287 DE_NULL, // const VkAttachmentReference* pResolveAttachments
288 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
289 0u, // deUint32 preserveAttachmentCount
290 DE_NULL // const deUint32* pPreserveAttachments
291 };
292
293 const VkRenderPassCreateInfo renderPassInfo =
294 {
295 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureTypei sType
296 DE_NULL, // const void* pNext
297 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
298 1u, // deUint32 attachmentCount
299 &attachmentDescription, // const VkAttachmentDescription* pAttachments
300 1u, // deUint32 subpassCount
301 &subpassDescription, // const VkSubpassDescription* pSubpasses
302 0u, // deUint32 dependencyCount
303 DE_NULL // const VkSubpassDependency* pDependencies
304 };
305
306 m_renderPasses.push_back(VkRenderPassSp(new Unique<VkRenderPass>(createRenderPass(vk, vkDevice, &renderPassInfo))));
307 }
308
309 // Create framebuffers
310 for (deUint32 framebufferIdx = 0; framebufferIdx < m_params.numCmdBuffers; framebufferIdx++)
311 {
312 const VkImageView attachmentBindInfos[] =
313 {
314 *m_colorAttachmentView
315 };
316
317 const VkFramebufferCreateInfo framebufferParams =
318 {
319 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
320 DE_NULL, // const void* pNext;
321 0u, // VkFramebufferCreateFlags flags;
322 **m_renderPasses[framebufferIdx], // VkRenderPass renderPass;
323 1u, // deUint32 attachmentCount;
324 attachmentBindInfos, // const VkImageView* pAttachments;
325 (deUint32)m_renderSize.x(), // deUint32 width;
326 (deUint32)m_renderSize.y(), // deUint32 height;
327 1u // deUint32 layers;
328 };
329
330 m_framebuffers.push_back(VkFramebufferSp(new Unique<VkFramebuffer>(createFramebuffer(vk, vkDevice, &framebufferParams))));
331 }
332
333 // Create pipeline layout
334 {
335 // Create descriptor set layouts
336 vector<VkDescriptorSetLayoutBinding> descriptorSetLayoutBindings;
337
338 for (deUint32 binding = 0; binding < numBindings; binding++)
339 {
340 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
341 const VkDescriptorType descriptorType = (dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType);
342 const deUint32 bindingNumber = (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET ? binding : 0u);
343 const deUint32 descriptorCount = ((m_params.groupingStrategy == GroupingStrategy::ARRAYS) ? (dynamicDesc ? m_params.numDynamicBindings : m_params.numNonDynamicBindings) : 1u);
344 const VkDescriptorSetLayoutBinding descriptorSetLayoutBinding =
345 {
346 bindingNumber, // uint32_t binding;
347 descriptorType, // VkDescriptorType descriptorType;
348 descriptorCount, // uint32_t descriptorCount;
349 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlags stageFlags;
350 DE_NULL // const VkSampler* pImmutableSamplers;
351 };
352
353 // Skip used descriptors in array mode.
354 if (m_params.groupingStrategy == GroupingStrategy::ARRAYS)
355 binding = (dynamicDesc ? m_params.numDynamicBindings - 1 : numBindings);
356
357 descriptorSetLayoutBindings.push_back(descriptorSetLayoutBinding);
358 }
359
360 vector<VkDescriptorSetLayoutCreateInfo> descriptorSetLayoutCreateInfos;
361
362 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
363 {
364 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo =
365 {
366 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
367 DE_NULL, // const void* pNext;
368 0u, // VkDescriptorSetLayoutCreateFlags flags;
369 numBindings, // uint32_t bindingCount;
370 descriptorSetLayoutBindings.data() // const VkDescriptorSetLayoutBinding* pBindings;
371 };
372
373 m_descriptorSetLayouts.push_back(createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo));
374 }
375 else
376 {
377 for (size_t i = 0; i < descriptorSetLayoutBindings.size(); ++i)
378 {
379 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo =
380 {
381 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
382 DE_NULL, // const void* pNext;
383 0u, // VkDescriptorSetLayoutCreateFlags flags;
384 1u, // uint32_t bindingCount;
385 &descriptorSetLayoutBindings[i] // const VkDescriptorSetLayoutBinding* pBindings;
386 };
387
388 m_descriptorSetLayouts.push_back(createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo));
389 }
390 }
391
392 // Create pipeline layout
393 descriptorSetLayoutsPlain.resize(m_descriptorSetLayouts.size());
394 for (size_t i = 0; i < descriptorSetLayoutsPlain.size(); ++i)
395 descriptorSetLayoutsPlain[i] = m_descriptorSetLayouts[i].get();
396
397 const VkPipelineLayoutCreateInfo pipelineLayoutParams =
398 {
399 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
400 DE_NULL, // const void* pNext;
401 0u, // VkPipelineLayoutCreateFlags flags;
402 static_cast<deUint32>(descriptorSetLayoutsPlain.size()), // deUint32 descriptorSetCount;
403 descriptorSetLayoutsPlain.data(), // const VkDescriptorSetLayout* pSetLayouts;
404 0u, // deUint32 pushConstantRangeCount;
405 DE_NULL // const VkPushDescriptorRange* pPushDescriptorRanges;
406 };
407
408 m_pipelineLayout = createPipelineLayout(vk, vkDevice, &pipelineLayoutParams);
409 }
410
411 // Create buffer
412 {
413 vector<deUint8> hostBuffer((size_t)bufferSize, 0);
414 for (deUint32 colorIdx = 0; colorIdx < kNumTestColors; colorIdx++)
415 deMemcpy(&hostBuffer[(deUint32)colorBlockInputSize * colorIdx], &testColors[colorIdx], kColorSize);
416
417 const VkBufferUsageFlags usageFlags = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
418
419 const VkBufferCreateInfo bufferCreateInfo =
420 {
421 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
422 DE_NULL, // const void* pNext;
423 0u, // VkBufferCreateFlags flags
424 bufferSize, // VkDeviceSize size;
425 usageFlags, // VkBufferUsageFlags usage;
426 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
427 1u, // deUint32 queueFamilyCount;
428 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
429 };
430
431 m_buffer = createBuffer(vk, vkDevice, &bufferCreateInfo);
432 m_bufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_buffer), MemoryRequirement::HostVisible);
433 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_buffer, m_bufferAlloc->getMemory(), m_bufferAlloc->getOffset()));
434
435 deMemcpy(m_bufferAlloc->getHostPtr(), hostBuffer.data(), (size_t)bufferSize);
436 flushAlloc(vk, vkDevice, *m_bufferAlloc);
437 }
438
439 // Create descriptor pool
440 {
441 DescriptorPoolBuilder poolBuilder;
442 poolBuilder.addType(m_params.descriptorType, m_params.numDynamicBindings);
443 poolBuilder.addType(nonDynamicDescriptorType, m_params.numNonDynamicBindings);
444 m_descriptorPool = poolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, static_cast<deUint32>(m_descriptorSetLayouts.size()));
445 }
446
447 // Create descriptor sets
448 {
449 for (size_t i = 0; i < m_descriptorSetLayouts.size(); ++i)
450 {
451 const VkDescriptorSetAllocateInfo allocInfo =
452 {
453 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
454 DE_NULL, // const void* pNext;
455 *m_descriptorPool, // VkDescriptorPool descriptorPool;
456 1u, // deUint32 setLayoutCount;
457 &(m_descriptorSetLayouts[i].get()), // const VkDescriptorSetLayout* pSetLayouts;
458 };
459 m_descriptorSets.push_back(allocateDescriptorSet(vk, vkDevice, &allocInfo));
460 }
461 }
462
463 descriptorSetsPlain.resize(m_descriptorSets.size());
464 for (size_t i = 0; i < descriptorSetsPlain.size(); ++i)
465 descriptorSetsPlain[i] = m_descriptorSets[i].get();
466
467 // Update descriptor sets
468 for (deUint32 binding = 0; binding < numBindings; ++binding)
469 {
470 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
471 const VkDescriptorType descriptorType = (dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType);
472 const VkDescriptorBufferInfo descriptorBufferInfo =
473 {
474 *m_buffer, // VkBuffer buffer;
475 bindingOffset * binding, // VkDeviceSize offset;
476 kColorSize // VkDeviceSize range;
477 };
478
479 VkDescriptorSet bindingSet;
480 deUint32 bindingNumber;
481 deUint32 dstArrayElement;
482
483 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
484 {
485 bindingSet = m_descriptorSets[0].get();
486 bindingNumber = binding;
487 dstArrayElement = 0u;
488 }
489 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
490 {
491 bindingSet = m_descriptorSets[binding].get();
492 bindingNumber = 0u;
493 dstArrayElement = 0u;
494 }
495 else // GroupingStrategy::ARRAYS
496 {
497 bindingSet = (dynamicDesc ? m_descriptorSets[0].get() : m_descriptorSets[1].get());
498 bindingNumber = 0u;
499 dstArrayElement = (dynamicDesc ? binding : (binding - m_params.numDynamicBindings));
500 }
501
502 const VkWriteDescriptorSet writeDescriptorSet =
503 {
504 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
505 DE_NULL, // const void* pNext;
506 bindingSet, // VkDescriptorSet dstSet;
507 bindingNumber, // uint32_t dstBinding;
508 dstArrayElement, // uint32_t dstArrayElement;
509 1u, // uint32_t descriptorCount;
510 descriptorType, // VkDescriptorType descriptorType;
511 DE_NULL, // const VkDescriptorImageInfo* pImageInfo;
512 &descriptorBufferInfo, // const VkDescriptorBufferInfo* pBufferInfo;
513 DE_NULL // const VkBufferView* pTexelBufferView;
514 };
515
516 vk.updateDescriptorSets(vkDevice, 1u, &writeDescriptorSet, 0u, DE_NULL);
517 }
518
519 // Create shaders
520 {
521 m_vertexShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("vert"), 0u);
522 m_fragmentShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("frag"), 0u);
523 }
524
525 // Create pipelines
526 for (deUint32 pipelineIdx = 0; pipelineIdx < m_params.numCmdBuffers; pipelineIdx++)
527 {
528 const VkVertexInputBindingDescription vertexInputBindingDescription =
529 {
530 0u, // deUint32 binding;
531 sizeof(Vertex4RGBA), // deUint32 strideInBytes;
532 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate stepRate;
533 };
534
535 const VkVertexInputAttributeDescription vertexInputAttributeDescriptions[] =
536 {
537 {
538 0u, // deUint32 location;
539 0u, // deUint32 binding;
540 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
541 0u // deUint32 offsetInBytes;
542 },
543 {
544 1u, // deUint32 location;
545 0u, // deUint32 binding;
546 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
547 DE_OFFSET_OF(Vertex4RGBA, color), // deUint32 offset;
548 }
549 };
550
551 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
552 {
553 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
554 DE_NULL, // const void* pNext;
555 0u, // vkPipelineVertexInputStateCreateFlags flags;
556 1u, // deUint32 bindingCount;
557 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
558 2u, // deUint32 attributeCount;
559 vertexInputAttributeDescriptions // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
560 };
561
562 const VkPrimitiveTopology topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
563
564 const vector<VkViewport> viewports (1, makeViewport(m_renderSize));
565 const vector<VkRect2D> scissors (1, makeRect2D(m_renderSize));
566
567 m_graphicsPipelines.push_back(VkPipelineSp(new Unique<VkPipeline>(makeGraphicsPipeline(vk, // const DeviceInterface& vk
568 vkDevice, // const VkDevice device
569 *m_pipelineLayout, // const VkPipelineLayout pipelineLayout
570 *m_vertexShaderModule, // const VkShaderModule vertexShaderModule
571 DE_NULL, // const VkShaderModule tessellationControlShaderModule
572 DE_NULL, // const VkShaderModule tessellationEvalShaderModule
573 DE_NULL, // const VkShaderModule geometryShaderModule
574 *m_fragmentShaderModule, // const VkShaderModule fragmentShaderModule
575 **m_renderPasses[pipelineIdx], // const VkRenderPass renderPass
576 viewports, // const std::vector<VkViewport>& viewports
577 scissors, // const std::vector<VkRect2D>& scissors
578 topology, // const VkPrimitiveTopology topology
579 0u, // const deUint32 subpass
580 0u, // const deUint32 patchControlPoints
581 &vertexInputStateParams)))); // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
582 }
583
584 // Create vertex buffer
585 {
586 const VkBufferCreateInfo vertexBufferParams =
587 {
588 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
589 DE_NULL, // const void* pNext;
590 0u, // VkBufferCreateFlags flags;
591 (VkDeviceSize)(sizeof(Vertex4RGBA) * m_vertices.size()), // VkDeviceSize size;
592 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
593 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
594 1u, // deUint32 queueFamilyCount;
595 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
596 };
597
598 m_vertexBuffer = createBuffer(vk, vkDevice, &vertexBufferParams);
599 m_vertexBufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_vertexBuffer), MemoryRequirement::HostVisible);
600
601 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_vertexBuffer, m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset()));
602
603 // Load vertices into vertex buffer
604 deMemcpy(m_vertexBufferAlloc->getHostPtr(), m_vertices.data(), m_vertices.size() * sizeof(Vertex4RGBA));
605 flushAlloc(vk, vkDevice, *m_vertexBufferAlloc);
606 }
607
608 // Create command pool
609 m_cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
610
611 // Create command buffers
612 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
613 m_cmdBuffers.push_back(VkCommandBufferSp(new Unique<VkCommandBuffer>(allocateCommandBuffer(vk, vkDevice, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY))));
614
615 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
616 {
617 const VkClearValue attachmentClearValue = defaultClearValue(m_colorFormat);
618 const VkDeviceSize vertexBufferOffset = 0;
619 const deUint32 idx = m_params.reverseOrder ? m_params.numCmdBuffers - cmdBufferIdx - 1 : cmdBufferIdx;
620
621 beginCommandBuffer(vk, **m_cmdBuffers[idx], 0u);
622 beginRenderPass(vk, **m_cmdBuffers[idx], **m_renderPasses[idx], **m_framebuffers[idx], makeRect2D(0, 0, m_renderSize.x(), m_renderSize.y()), attachmentClearValue);
623 vk.cmdBindPipeline(**m_cmdBuffers[idx], VK_PIPELINE_BIND_POINT_GRAPHICS, **m_graphicsPipelines[idx]);
624 vk.cmdBindVertexBuffers(**m_cmdBuffers[idx], 0, 1, &m_vertexBuffer.get(), &vertexBufferOffset);
625
626 for (deUint32 i = 0; i < m_params.numDescriptorSetBindings; i++)
627 {
628 vector<deUint32> offsets;
629 for (deUint32 dynamicBindingIdx = 0; dynamicBindingIdx < m_params.numDynamicBindings; dynamicBindingIdx++)
630 offsets.push_back(offset + (deUint32)colorBlockInputSize * dynamicBindingIdx);
631
632 vk.cmdBindDescriptorSets(**m_cmdBuffers[idx], VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, static_cast<deUint32>(descriptorSetsPlain.size()), descriptorSetsPlain.data(), m_params.numDynamicBindings, offsets.data());
633 offset += (deUint32)colorBlockInputSize;
634
635 // Draw quad
636 vk.cmdDraw(**m_cmdBuffers[idx], 6, 1, 6 * quadNdx, 0);
637 quadNdx++;
638 }
639
640 endRenderPass(vk, **m_cmdBuffers[idx]);
641 endCommandBuffer(vk, **m_cmdBuffers[idx]);
642 }
643 }
644
~DynamicOffsetGraphicsTestInstance(void)645 DynamicOffsetGraphicsTestInstance::~DynamicOffsetGraphicsTestInstance (void)
646 {
647 }
648
iterate(void)649 tcu::TestStatus DynamicOffsetGraphicsTestInstance::iterate (void)
650 {
651 init();
652
653 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
654 submitCommandsAndWait(m_context.getDeviceInterface(), m_context.getDevice(), m_context.getUniversalQueue(), **m_cmdBuffers[cmdBufferIdx]);
655
656 return verifyImage();
657 }
658
verifyImage(void)659 tcu::TestStatus DynamicOffsetGraphicsTestInstance::verifyImage (void)
660 {
661 const tcu::TextureFormat tcuColorFormat = mapVkFormat(m_colorFormat);
662 const tcu::TextureFormat tcuDepthFormat = tcu::TextureFormat();
663 const ColorVertexShader vertexShader;
664 const ColorFragmentShader fragmentShader (tcuColorFormat, tcuDepthFormat);
665 const rr::Program program (&vertexShader, &fragmentShader);
666 ReferenceRenderer refRenderer (m_renderSize.x(), m_renderSize.y(), 1, tcuColorFormat, tcuDepthFormat, &program);
667 bool compareOk = false;
668
669 // Render reference image
670 {
671 const deUint32 numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
672 const deUint32 bindingOffset = kNumTestColors / numBindings;
673
674 for (deUint32 quadIdx = 0; quadIdx < m_vertices.size() / 6; quadIdx++)
675 for (deUint32 vertexIdx = 0; vertexIdx < 6; vertexIdx++)
676 {
677 tcu::Vec4 refColor(0.0f);
678
679 for (deUint32 binding = 0; binding < m_params.numDynamicBindings; binding++)
680 refColor += testColors[quadIdx + binding * bindingOffset + binding];
681 for (deUint32 binding = 0; binding < m_params.numNonDynamicBindings; binding++)
682 refColor += testColors[(m_params.numDynamicBindings + binding) * bindingOffset];
683 refColor.w() = 1.0f;
684
685 m_vertices[quadIdx * 6 + vertexIdx].color.xyzw() = refColor;
686 }
687
688 refRenderer.draw(rr::RenderState(refRenderer.getViewportState(), m_context.getDeviceProperties().limits.subPixelPrecisionBits),
689 rr::PRIMITIVETYPE_TRIANGLES, m_vertices);
690 }
691
692 // Compare result with reference image
693 {
694 de::MovePtr<tcu::TextureLevel> result = readColorAttachment(
695 m_context.getDeviceInterface(), m_context.getDevice(), m_context.getUniversalQueue(),
696 m_context.getUniversalQueueFamilyIndex(), m_memAlloc, *m_colorImage, m_colorFormat, m_renderSize);
697
698 compareOk = tcu::intThresholdPositionDeviationCompare(m_context.getTestContext().getLog(),
699 "IntImageCompare",
700 "Image comparison",
701 refRenderer.getAccess(),
702 result->getAccess(),
703 tcu::UVec4(2, 2, 2, 2),
704 tcu::IVec3(1, 1, 0),
705 true,
706 tcu::COMPARE_LOG_RESULT);
707 }
708
709 if (compareOk)
710 return tcu::TestStatus::pass("Result image matches reference");
711 else
712 return tcu::TestStatus::fail("Image mismatch");
713 }
714
715 class DynamicOffsetGraphicsTest : public vkt::TestCase
716 {
717 public:
718 DynamicOffsetGraphicsTest (tcu::TestContext& testContext,
719 const string& name,
720 const string& description,
721 const TestParams& params);
722 ~DynamicOffsetGraphicsTest (void);
723 void initPrograms (SourceCollections& sourceCollections) const;
724 TestInstance* createInstance (Context& context) const;
725
726 protected:
727 const TestParams m_params;
728 };
729
DynamicOffsetGraphicsTest(tcu::TestContext & testContext,const string & name,const string & description,const TestParams & params)730 DynamicOffsetGraphicsTest::DynamicOffsetGraphicsTest (tcu::TestContext& testContext,
731 const string& name,
732 const string& description,
733 const TestParams& params)
734 : vkt::TestCase (testContext, name, description)
735 , m_params (params)
736 {
737 }
738
~DynamicOffsetGraphicsTest(void)739 DynamicOffsetGraphicsTest::~DynamicOffsetGraphicsTest (void)
740 {
741 }
742
createInstance(Context & context) const743 TestInstance* DynamicOffsetGraphicsTest::createInstance (Context& context) const
744 {
745 return new DynamicOffsetGraphicsTestInstance(context, m_params);
746 }
747
initPrograms(SourceCollections & sourceCollections) const748 void DynamicOffsetGraphicsTest::initPrograms (SourceCollections& sourceCollections) const
749 {
750 const deUint32 numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
751 const string bufferType = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? "uniform" : "readonly buffer";
752 ostringstream inputBlocks;
753 ostringstream inputSum;
754 string setAndBinding;
755 string blockSuffix;
756 string accessSuffix;
757 bool dynArrayDecl = false; // Dynamic descriptor block array declared?
758 bool nonDynArrayDecl = false; // Nondynamic descriptor block array declared?
759
760 for (deUint32 b = 0; b < numBindings; b++)
761 {
762 const bool dynBind = (b < m_params.numDynamicBindings);
763 const string bStr = de::toString(b);
764
765 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
766 {
767 setAndBinding = "set = 0, binding = " + bStr;
768 blockSuffix = bStr;
769 accessSuffix = bStr;
770 }
771 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
772 {
773 setAndBinding = "set = " + bStr + ", binding = 0";
774 blockSuffix = bStr;
775 accessSuffix = bStr;
776 }
777 else // GroupingStrategy::ARRAYS
778 {
779 // In array mode, only two sets are declared, one with an array of dynamic descriptors and another one with an array of
780 // nondynamic descriptors.
781 setAndBinding = "set = " + string(dynBind ? "0" : "1") + ", binding = 0";
782 blockSuffix = string(dynBind ? "Dyn" : "NonDyn") + "[" + (dynBind ? de::toString(m_params.numDynamicBindings) : de::toString(m_params.numNonDynamicBindings)) + "]";
783 accessSuffix = string(dynBind ? "Dyn" : "NonDyn") + "[" + (dynBind ? de::toString(b) : de::toString(b - m_params.numDynamicBindings)) + "]";
784 }
785
786 // In array mode, declare the input block only once per descriptor type.
787 bool& arrayDeclFlag = (dynBind ? dynArrayDecl : nonDynArrayDecl);
788 if (m_params.groupingStrategy != GroupingStrategy::ARRAYS || !arrayDeclFlag)
789 {
790 inputBlocks
791 << "layout(" << setAndBinding << ") " << bufferType << " Block" << bStr << "\n"
792 << "{\n"
793 << " vec4 color;\n"
794 << "} inputData" << blockSuffix << ";\n"
795 ;
796 arrayDeclFlag = true;
797 }
798
799 // But the sum always needs to be added once per descriptor.
800 inputSum << " vtxColor.rgb += inputData" << accessSuffix << ".color.rgb;\n";
801 }
802
803 const string vertexSrc =
804 "#version 450\n"
805 "layout(location = 0) in highp vec4 position;\n"
806 "layout(location = 1) in highp vec4 color;\n"
807 "layout(location = 0) out highp vec4 vtxColor;\n"
808 + inputBlocks.str() +
809 "\n"
810 "out gl_PerVertex { vec4 gl_Position; };\n"
811 "\n"
812 "void main()\n"
813 "{\n"
814 " gl_Position = position;\n"
815 " vtxColor = vec4(0, 0, 0, 1);\n"
816 + inputSum.str() +
817 "}\n";
818
819 const string fragmentSrc =
820 "#version 450\n"
821 "layout(location = 0) in highp vec4 vtxColor;\n"
822 "layout(location = 0) out highp vec4 fragColor;\n"
823 "\n"
824 "void main (void)\n"
825 "{\n"
826 " fragColor = vtxColor;\n"
827 "}\n";
828
829 sourceCollections.glslSources.add("vert") << glu::VertexSource(vertexSrc);
830 sourceCollections.glslSources.add("frag") << glu::FragmentSource(fragmentSrc);
831 }
832
833 class DynamicOffsetComputeTestInstance : public DynamicOffsetTestInstance
834 {
835 public:
836 DynamicOffsetComputeTestInstance (Context& context, const TestParams& params);
837 virtual ~DynamicOffsetComputeTestInstance (void);
838 void init (void);
839 virtual tcu::TestStatus iterate (void);
840 tcu::TestStatus verifyOutput (void);
841
842 private:
843 const deUint32 m_numBindings;
844 const deUint32 m_numOutputColors;
845 const VkPhysicalDeviceLimits m_deviceLimits;
846 Move<VkShaderModule> m_computeShaderModule;
847 Move<VkBuffer> m_buffer;
848 de::MovePtr<Allocation> m_bufferAlloc;
849 vector<Move<VkDescriptorSetLayout>> m_descriptorSetLayouts;
850 Move<VkDescriptorPool> m_descriptorPool;
851 vector<Move<VkDescriptorSet>> m_descriptorSets;
852 Move<VkPipelineLayout> m_pipelineLayout;
853 Move<VkPipeline> m_computePipeline;
854 Move<VkBuffer> m_outputBuffer;
855 de::MovePtr<Allocation> m_outputBufferAlloc;
856 Move<VkCommandPool> m_cmdPool;
857 vector<VkCommandBufferSp> m_cmdBuffers;
858 };
859
DynamicOffsetComputeTestInstance(Context & context,const TestParams & params)860 DynamicOffsetComputeTestInstance::DynamicOffsetComputeTestInstance (Context& context, const TestParams& params)
861 : DynamicOffsetTestInstance (context, params)
862 , m_numBindings (params.numDynamicBindings + params.numNonDynamicBindings)
863 , m_numOutputColors (params.numCmdBuffers * params.numDescriptorSetBindings)
864 , m_deviceLimits (getPhysicalDeviceProperties(context.getInstanceInterface(), context.getPhysicalDevice()).limits)
865 {
866 }
867
init(void)868 void DynamicOffsetComputeTestInstance::init (void)
869 {
870 const DeviceInterface& vk = m_context.getDeviceInterface();
871 const VkDevice vkDevice = m_context.getDevice();
872 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
873 const VkDeviceSize inputAlignment = ((m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ? m_deviceLimits.minUniformBufferOffsetAlignment : m_deviceLimits.minStorageBufferOffsetAlignment);
874 const VkDeviceSize inputExtraBytes = kColorSize % inputAlignment;
875 const VkDeviceSize colorBlockInputSize = ((inputExtraBytes == 0ull) ? kColorSize : (kColorSize + inputAlignment - inputExtraBytes));
876 const deUint32 colorBlockInputSizeU32 = static_cast<deUint32>(colorBlockInputSize);
877 const VkDeviceSize outputExtraBytes = kColorSize % m_deviceLimits.minStorageBufferOffsetAlignment;
878 const VkDeviceSize colorBlockOutputSize = ((outputExtraBytes == 0ull) ? kColorSize : (kColorSize + m_deviceLimits.minStorageBufferOffsetAlignment - outputExtraBytes));
879 const deUint32 colorBlockOutputSizeU32 = static_cast<deUint32>(colorBlockOutputSize);
880 const VkDeviceSize bufferSize = colorBlockInputSize * kNumTestColors;
881 const VkDeviceSize bindingOffset = bufferSize / m_numBindings;
882 const VkDescriptorType nonDynamicDescriptorType = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
883 const VkDeviceSize outputBufferSize = colorBlockOutputSize * m_numOutputColors;
884
885 vector<VkDescriptorSetLayout> descriptorSetLayoutsPlain;
886 vector<VkDescriptorSet> descriptorSetsPlain;
887
888 // Create pipeline layout
889 {
890 // Create descriptor set layouts
891 vector<VkDescriptorSetLayoutBinding> descriptorSetLayoutBindings;
892
893 for (deUint32 binding = 0; binding < m_numBindings; binding++)
894 {
895 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
896 const VkDescriptorType descriptorType = (dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType);
897 const deUint32 bindingNumber = (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET ? binding : 0u);
898 const deUint32 descriptorCount = ((m_params.groupingStrategy == GroupingStrategy::ARRAYS) ? (dynamicDesc ? m_params.numDynamicBindings : m_params.numNonDynamicBindings) : 1u);
899 const VkDescriptorSetLayoutBinding descriptorSetLayoutBinding =
900 {
901 bindingNumber, // uint32_t binding;
902 descriptorType, // VkDescriptorType descriptorType;
903 descriptorCount, // uint32_t descriptorCount;
904 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags stageFlags;
905 DE_NULL // const VkSampler* pImmutableSamplers;
906 };
907
908 // Skip used descriptors in array mode.
909 if (m_params.groupingStrategy == GroupingStrategy::ARRAYS)
910 binding = (dynamicDesc ? m_params.numDynamicBindings - 1 : m_numBindings);
911
912 descriptorSetLayoutBindings.push_back(descriptorSetLayoutBinding);
913 }
914
915 const deUint32 bindingNumberOutput = (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET ? m_numBindings : 0u);
916 const VkDescriptorSetLayoutBinding descriptorSetLayoutBindingOutput =
917 {
918 bindingNumberOutput, // uint32_t binding;
919 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, // VkDescriptorType descriptorType;
920 1u, // uint32_t descriptorCount;
921 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags stageFlags;
922 DE_NULL // const VkSampler* pImmutableSamplers;
923 };
924
925 descriptorSetLayoutBindings.push_back(descriptorSetLayoutBindingOutput);
926
927 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
928 {
929 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo =
930 {
931 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
932 DE_NULL, // const void* pNext;
933 0u, // VkDescriptorSetLayoutCreateFlags flags;
934 m_numBindings + 1, // uint32_t bindingCount;
935 descriptorSetLayoutBindings.data() // const VkDescriptorSetLayoutBinding* pBindings;
936 };
937
938 m_descriptorSetLayouts.push_back(createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo, DE_NULL));
939 }
940 else
941 {
942 for (size_t i = 0; i < descriptorSetLayoutBindings.size(); ++i)
943 {
944 const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo =
945 {
946 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
947 DE_NULL, // const void* pNext;
948 0u, // VkDescriptorSetLayoutCreateFlags flags;
949 1u, // uint32_t bindingCount;
950 &descriptorSetLayoutBindings[i] // const VkDescriptorSetLayoutBinding* pBindings;
951 };
952
953 m_descriptorSetLayouts.push_back(createDescriptorSetLayout(vk, vkDevice, &descriptorSetLayoutCreateInfo, DE_NULL));
954 }
955 }
956
957 // Create pipeline layout
958 descriptorSetLayoutsPlain.resize(m_descriptorSetLayouts.size());
959 for (size_t i = 0; i < descriptorSetLayoutsPlain.size(); ++i)
960 descriptorSetLayoutsPlain[i] = m_descriptorSetLayouts[i].get();
961
962 const VkPipelineLayoutCreateInfo pipelineLayoutParams =
963 {
964 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
965 DE_NULL, // const void* pNext;
966 0u, // VkPipelineLayoutCreateFlags flags;
967 static_cast<deUint32>(descriptorSetLayoutsPlain.size()), // deUint32 descriptorSetCount;
968 descriptorSetLayoutsPlain.data(), // const VkDescriptorSetLayout* pSetLayouts;
969 0u, // deUint32 pushConstantRangeCount;
970 DE_NULL // const VkPushDescriptorRange* pPushDescriptorRanges;
971 };
972
973 m_pipelineLayout = createPipelineLayout(vk, vkDevice, &pipelineLayoutParams);
974 }
975
976 // Create buffer
977 {
978 vector<deUint8> hostBuffer((deUint32)bufferSize, 0);
979 for (deUint32 colorIdx = 0; colorIdx < kNumTestColors; colorIdx++)
980 deMemcpy(&hostBuffer[colorBlockInputSizeU32 * colorIdx], &testColors[colorIdx], kColorSize);
981
982 const VkBufferUsageFlags usageFlags = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
983
984 const VkBufferCreateInfo bufferCreateInfo =
985 {
986 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
987 DE_NULL, // const void* pNext;
988 0u, // VkBufferCreateFlags flags
989 bufferSize, // VkDeviceSize size;
990 usageFlags, // VkBufferUsageFlags usage;
991 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
992 1u, // deUint32 queueFamilyCount;
993 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
994 };
995
996 m_buffer = createBuffer(vk, vkDevice, &bufferCreateInfo);
997 m_bufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_buffer), MemoryRequirement::HostVisible);
998 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_buffer, m_bufferAlloc->getMemory(), m_bufferAlloc->getOffset()));
999
1000 deMemcpy(m_bufferAlloc->getHostPtr(), hostBuffer.data(), (size_t)bufferSize);
1001 flushAlloc(vk, vkDevice, *m_bufferAlloc);
1002 }
1003
1004 // Create output buffer
1005 {
1006 const VkBufferCreateInfo bufferCreateInfo =
1007 {
1008 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1009 DE_NULL, // const void* pNext;
1010 0u, // VkBufferCreateFlags flags
1011 outputBufferSize, // VkDeviceSize size;
1012 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, // VkBufferUsageFlags usage;
1013 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1014 1u, // deUint32 queueFamilyCount;
1015 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
1016 };
1017
1018 m_outputBuffer = createBuffer(vk, vkDevice, &bufferCreateInfo);
1019 m_outputBufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_outputBuffer), MemoryRequirement::HostVisible);
1020 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_outputBuffer, m_outputBufferAlloc->getMemory(), m_outputBufferAlloc->getOffset()));
1021 }
1022
1023 // Create descriptor pool
1024 {
1025 DescriptorPoolBuilder poolBuilder;
1026 poolBuilder.addType(m_params.descriptorType, m_params.numDynamicBindings);
1027 poolBuilder.addType(nonDynamicDescriptorType, m_params.numNonDynamicBindings);
1028 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1u);
1029 m_descriptorPool = poolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, static_cast<deUint32>(m_descriptorSetLayouts.size()));
1030 }
1031
1032 // Create descriptor sets
1033 {
1034 for (size_t i = 0; i < m_descriptorSetLayouts.size(); ++i)
1035 {
1036 const VkDescriptorSetAllocateInfo allocInfo =
1037 {
1038 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
1039 DE_NULL, // const void* pNext;
1040 *m_descriptorPool, // VkDescriptorPool descriptorPool;
1041 1u, // deUint32 setLayoutCount;
1042 &(m_descriptorSetLayouts[i].get()), // const VkDescriptorSetLayout* pSetLayouts;
1043 };
1044 m_descriptorSets.push_back(allocateDescriptorSet(vk, vkDevice, &allocInfo));
1045 }
1046 }
1047
1048 descriptorSetsPlain.resize(m_descriptorSets.size());
1049 for (size_t i = 0; i < descriptorSetsPlain.size(); ++i)
1050 descriptorSetsPlain[i] = m_descriptorSets[i].get();
1051
1052 // Update input buffer descriptors
1053 for (deUint32 binding = 0; binding < m_numBindings; ++binding)
1054 {
1055 const bool dynamicDesc = (binding < m_params.numDynamicBindings);
1056 const VkDescriptorType descriptorType = dynamicDesc ? m_params.descriptorType : nonDynamicDescriptorType;
1057 const VkDescriptorBufferInfo descriptorBufferInfo =
1058 {
1059 *m_buffer, // VkBuffer buffer;
1060 bindingOffset * binding, // VkDeviceSize offset;
1061 kColorSize // VkDeviceSize range;
1062 };
1063
1064 VkDescriptorSet bindingSet;
1065 deUint32 bindingNumber;
1066 deUint32 dstArrayElement;
1067
1068 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1069 {
1070 bindingSet = m_descriptorSets[0].get();
1071 bindingNumber = binding;
1072 dstArrayElement = 0u;
1073 }
1074 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1075 {
1076 bindingSet = m_descriptorSets[binding].get();
1077 bindingNumber = 0u;
1078 dstArrayElement = 0u;
1079 }
1080 else // GroupingStrategy::ARRAYS
1081 {
1082 bindingSet = (dynamicDesc ? m_descriptorSets[0].get() : m_descriptorSets[1].get());
1083 bindingNumber = 0u;
1084 dstArrayElement = (dynamicDesc ? binding : (binding - m_params.numDynamicBindings));
1085 }
1086
1087 const VkWriteDescriptorSet writeDescriptorSet =
1088 {
1089 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
1090 DE_NULL, // const void* pNext;
1091 bindingSet, // VkDescriptorSet dstSet;
1092 bindingNumber, // uint32_t dstBinding;
1093 dstArrayElement, // uint32_t dstArrayElement;
1094 1u, // uint32_t descriptorCount;
1095 descriptorType, // VkDescriptorType descriptorType;
1096 DE_NULL, // const VkDescriptorImageInfo* pImageInfo;
1097 &descriptorBufferInfo, // const VkDescriptorBufferInfo* pBufferInfo;
1098 DE_NULL // const VkBufferView* pTexelBufferView;
1099 };
1100
1101 vk.updateDescriptorSets(vkDevice, 1u, &writeDescriptorSet, 0u, DE_NULL);
1102 }
1103
1104 // Update output buffer descriptor
1105 {
1106 const VkDescriptorBufferInfo descriptorBufferInfo =
1107 {
1108 *m_outputBuffer, // VkBuffer buffer;
1109 0u, // VkDeviceSize offset;
1110 kColorSize // VkDeviceSize range;
1111 };
1112
1113 VkDescriptorSet bindingSet;
1114 deUint32 bindingNumber;
1115
1116 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1117 {
1118 bindingSet = m_descriptorSets[0].get();
1119 bindingNumber = m_numBindings;
1120 }
1121 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1122 {
1123 bindingSet = m_descriptorSets.back().get();
1124 bindingNumber = 0u;
1125 }
1126 else // GroupingStrategy::ARRAYS
1127 {
1128 bindingSet = m_descriptorSets.back().get();
1129 bindingNumber = 0u;
1130 }
1131
1132 const VkWriteDescriptorSet writeDescriptorSet =
1133 {
1134 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
1135 DE_NULL, // const void* pNext;
1136 bindingSet, // VkDescriptorSet dstSet;
1137 bindingNumber, // uint32_t dstBinding;
1138 0u, // uint32_t dstArrayElement;
1139 1u, // uint32_t descriptorCount;
1140 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, // VkDescriptorType descriptorType;
1141 DE_NULL, // const VkDescriptorImageInfo* pImageInfo;
1142 &descriptorBufferInfo, // const VkDescriptorBufferInfo* pBufferInfo;
1143 DE_NULL // const VkBufferView* pTexelBufferView;
1144 };
1145
1146 vk.updateDescriptorSets(vkDevice, 1u, &writeDescriptorSet, 0u, DE_NULL);
1147 }
1148
1149 // Create shader
1150 {
1151 m_computeShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("compute"), 0u);
1152 }
1153
1154 // Create pipeline
1155 {
1156 const VkPipelineShaderStageCreateInfo stageCreateInfo =
1157 {
1158 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
1159 DE_NULL, // const void* pNext;
1160 0u, // VkPipelineShaderStageCreateFlags flags;
1161 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
1162 *m_computeShaderModule, // VkShaderModule module;
1163 "main", // const char* pName;
1164 DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
1165 };
1166
1167 const VkComputePipelineCreateInfo createInfo =
1168 {
1169 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
1170 DE_NULL, // const void* pNext;
1171 0u, // VkPipelineCreateFlags flags;
1172 stageCreateInfo, // VkPipelineShaderStageCreateInfo stage;
1173 *m_pipelineLayout, // VkPipelineLayout layout;
1174 (VkPipeline)0, // VkPipeline basePipelineHandle;
1175 0u, // int32_t basePipelineIndex;
1176 };
1177
1178 m_computePipeline = createComputePipeline(vk, vkDevice, (vk::VkPipelineCache)0u, &createInfo);
1179 }
1180
1181 // Create command pool
1182 m_cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
1183
1184 // Create command buffers
1185 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
1186 m_cmdBuffers.push_back(VkCommandBufferSp(new Unique<VkCommandBuffer>(allocateCommandBuffer(vk, vkDevice, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY))));
1187
1188 deUint32 inputOffset = 0u;
1189 deUint32 outputOffset = 0u;
1190
1191 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
1192 {
1193 const deUint32 idx = m_params.reverseOrder ? m_params.numCmdBuffers - cmdBufferIdx - 1 : cmdBufferIdx;
1194
1195 beginCommandBuffer(vk, **m_cmdBuffers[idx], 0u);
1196 vk.cmdBindPipeline(**m_cmdBuffers[idx], VK_PIPELINE_BIND_POINT_COMPUTE, *m_computePipeline);
1197
1198 for (deUint32 i = 0; i < m_params.numDescriptorSetBindings; i++)
1199 {
1200 // Create pipeline barrier
1201 const vk::VkBufferMemoryBarrier bufferBarrier =
1202 {
1203 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1204 DE_NULL, // const void* pNext;
1205 vk::VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1206 vk::VK_ACCESS_SHADER_WRITE_BIT | vk::VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1207 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1208 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1209 *m_outputBuffer, // VkBuffer buffer;
1210 outputOffset, // VkDeviceSize offset;
1211 VK_WHOLE_SIZE // VkDeviceSize size;
1212 };
1213
1214 vector<deUint32> offsets;
1215
1216 // Offsets for input buffers
1217 for (deUint32 dynamicBindingIdx = 0; dynamicBindingIdx < m_params.numDynamicBindings; dynamicBindingIdx++)
1218 offsets.push_back(inputOffset + colorBlockInputSizeU32 * dynamicBindingIdx);
1219 inputOffset += colorBlockInputSizeU32;
1220
1221 // Offset for output buffer
1222 offsets.push_back(outputOffset);
1223 outputOffset += colorBlockOutputSizeU32;
1224
1225 vk.cmdBindDescriptorSets(**m_cmdBuffers[idx], VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout, 0u, static_cast<deUint32>(descriptorSetsPlain.size()), descriptorSetsPlain.data(), (deUint32)offsets.size(), offsets.data());
1226
1227 // Dispatch
1228 vk.cmdDispatch(**m_cmdBuffers[idx], 1, 1, 1);
1229
1230 vk.cmdPipelineBarrier(**m_cmdBuffers[idx], vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | vk::VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &bufferBarrier, 0u, DE_NULL);
1231 }
1232
1233 endCommandBuffer(vk, **m_cmdBuffers[idx]);
1234 }
1235 }
1236
~DynamicOffsetComputeTestInstance(void)1237 DynamicOffsetComputeTestInstance::~DynamicOffsetComputeTestInstance (void)
1238 {
1239 }
1240
iterate(void)1241 tcu::TestStatus DynamicOffsetComputeTestInstance::iterate (void)
1242 {
1243 init();
1244
1245 for (deUint32 cmdBufferIdx = 0; cmdBufferIdx < m_params.numCmdBuffers; cmdBufferIdx++)
1246 submitCommandsAndWait(m_context.getDeviceInterface(), m_context.getDevice(), m_context.getUniversalQueue(), **m_cmdBuffers[cmdBufferIdx]);
1247
1248 return verifyOutput();
1249 }
1250
verifyOutput(void)1251 tcu::TestStatus DynamicOffsetComputeTestInstance::verifyOutput (void)
1252 {
1253 const deUint32 bindingOffset = kNumTestColors / m_numBindings;
1254 const deUint32 colorBlockOutputSize = static_cast<deUint32>(de::max(kColorSize, m_deviceLimits.minStorageBufferOffsetAlignment));
1255 vector<tcu::Vec4> refColors (m_numOutputColors);
1256 vector<tcu::Vec4> outColors (m_numOutputColors);
1257
1258 for (deUint32 i = 0; i < m_numOutputColors; i++)
1259 {
1260 tcu::Vec4 refColor(0.0f);
1261
1262 for (deUint32 binding = 0; binding < m_params.numDynamicBindings; binding++)
1263 refColor += testColors[i + binding * bindingOffset + binding];
1264 for (deUint32 binding = 0; binding < m_params.numNonDynamicBindings; binding++)
1265 refColor += testColors[(m_params.numDynamicBindings + binding) * bindingOffset];
1266 refColor.w() = 1.0f;
1267
1268 refColors[i] = refColor;
1269 }
1270
1271 invalidateAlloc(m_context.getDeviceInterface(), m_context.getDevice(), *m_outputBufferAlloc);
1272
1273 // Grab the output results using offset alignment
1274 for (deUint32 i = 0; i < m_numOutputColors; i++)
1275 outColors[i] = *(tcu::Vec4*)((deUint8*)m_outputBufferAlloc->getHostPtr() + colorBlockOutputSize * i);
1276
1277 // Verify results
1278 for (deUint32 i = 0; i < m_numOutputColors; i++)
1279 if (outColors[i] != refColors[i])
1280 return tcu::TestStatus::fail("Output mismatch");
1281
1282 return tcu::TestStatus::pass("Output matches expected values");
1283 }
1284
1285 class DynamicOffsetComputeTest : public vkt::TestCase
1286 {
1287 public:
1288 DynamicOffsetComputeTest (tcu::TestContext& testContext,
1289 const string& name,
1290 const string& description,
1291 const TestParams& params);
1292 ~DynamicOffsetComputeTest (void);
1293 void initPrograms (SourceCollections& sourceCollections) const;
1294 TestInstance* createInstance (Context& context) const;
1295
1296 protected:
1297 const TestParams m_params;
1298 };
1299
DynamicOffsetComputeTest(tcu::TestContext & testContext,const string & name,const string & description,const TestParams & params)1300 DynamicOffsetComputeTest::DynamicOffsetComputeTest (tcu::TestContext& testContext,
1301 const string& name,
1302 const string& description,
1303 const TestParams& params)
1304 : vkt::TestCase (testContext, name, description)
1305 , m_params (params)
1306 {
1307 }
1308
~DynamicOffsetComputeTest(void)1309 DynamicOffsetComputeTest::~DynamicOffsetComputeTest (void)
1310 {
1311 }
1312
createInstance(Context & context) const1313 TestInstance* DynamicOffsetComputeTest::createInstance (Context& context) const
1314 {
1315 return new DynamicOffsetComputeTestInstance(context, m_params);
1316 }
1317
initPrograms(SourceCollections & sourceCollections) const1318 void DynamicOffsetComputeTest::initPrograms (SourceCollections& sourceCollections) const
1319 {
1320 const deUint32 numBindings = m_params.numDynamicBindings + m_params.numNonDynamicBindings;
1321 const string bufferType = m_params.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ? "uniform" : "buffer";
1322 ostringstream inputBlocks;
1323 ostringstream inputSum;
1324 string setAndBinding;
1325 string blockSuffix;
1326 string accessSuffix;
1327 bool dynArrayDecl = false; // Dynamic descriptor block array declared?
1328 bool nonDynArrayDecl = false; // Nondynamic descriptor block array declared?
1329 string bStr;
1330
1331 for (deUint32 b = 0; b < numBindings; b++)
1332 {
1333 const bool dynBind = (b < m_params.numDynamicBindings);
1334 bStr = de::toString(b);
1335
1336 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1337 {
1338 setAndBinding = "set = 0, binding = " + bStr;
1339 blockSuffix = bStr;
1340 accessSuffix = bStr;
1341 }
1342 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1343 {
1344 setAndBinding = "set = " + bStr + ", binding = 0";
1345 blockSuffix = bStr;
1346 accessSuffix = bStr;
1347 }
1348 else // GroupingStrategy::ARRAYS
1349 {
1350 // In array mode, only two sets are declared, one with an array of dynamic descriptors and another one with an array of
1351 // nondynamic descriptors.
1352 setAndBinding = "set = " + string(dynBind ? "0" : "1") + ", binding = 0";
1353 blockSuffix = string(dynBind ? "Dyn" : "NonDyn") + "[" + (dynBind ? de::toString(m_params.numDynamicBindings) : de::toString(m_params.numNonDynamicBindings)) + "]";
1354 accessSuffix = string(dynBind ? "Dyn" : "NonDyn") + "[" + (dynBind ? de::toString(b) : de::toString(b - m_params.numDynamicBindings)) + "]";
1355 }
1356
1357 // In array mode, declare the input block only once per descriptor type.
1358 bool& arrayDeclFlag = (dynBind ? dynArrayDecl : nonDynArrayDecl);
1359 if (m_params.groupingStrategy != GroupingStrategy::ARRAYS || !arrayDeclFlag)
1360 {
1361 inputBlocks
1362 << "layout(" << setAndBinding << ") " << bufferType << " Block" << bStr << "\n"
1363 << "{\n"
1364 << " vec4 color;\n"
1365 << "} inputData" << blockSuffix << ";\n"
1366 ;
1367 arrayDeclFlag = true;
1368 }
1369
1370 // But the sum always needs to be added once per descriptor.
1371 inputSum << " outData.color.rgb += inputData" << accessSuffix << ".color.rgb;\n";
1372 }
1373
1374 bStr = de::toString(numBindings);
1375 if (m_params.groupingStrategy == GroupingStrategy::SINGLE_SET)
1376 {
1377 setAndBinding = "set = 0, binding = " + bStr;
1378 }
1379 else if (m_params.groupingStrategy == GroupingStrategy::MULTISET)
1380 {
1381 setAndBinding = "set = " + bStr + ", binding = 0";
1382 }
1383 else // GroupingStrategy::ARRAYS
1384 {
1385 // The output buffer goes to a separate set.
1386 deUint32 usedSets = 0u;
1387 if (dynArrayDecl) ++usedSets;
1388 if (nonDynArrayDecl) ++usedSets;
1389
1390 setAndBinding = "set = " + de::toString(usedSets) + ", binding = 0";
1391 }
1392
1393 const string computeSrc =
1394 "#version 450\n"
1395 + inputBlocks.str() +
1396 "layout(" + setAndBinding + ") writeonly buffer Output\n"
1397 "{\n"
1398 " vec4 color;\n"
1399 "} outData;\n"
1400 "\n"
1401 "void main()\n"
1402 "{\n"
1403 " outData.color = vec4(0, 0, 0, 1);\n"
1404 + inputSum.str() +
1405 "}\n";
1406
1407 sourceCollections.glslSources.add("compute") << glu::ComputeSource(computeSrc);
1408 }
1409
1410 } // anonymous
1411
createDynamicOffsetTests(tcu::TestContext & testCtx)1412 tcu::TestCaseGroup* createDynamicOffsetTests (tcu::TestContext& testCtx)
1413 {
1414 const char* pipelineTypes[] = { "graphics", "compute" };
1415
1416 struct
1417 {
1418 const char* name;
1419 const GroupingStrategy strategy;
1420 }
1421 const groupingTypes[] =
1422 {
1423 { "single_set", GroupingStrategy::SINGLE_SET },
1424 { "multiset", GroupingStrategy::MULTISET },
1425 { "arrays", GroupingStrategy::ARRAYS },
1426 };
1427
1428 struct
1429 {
1430 const char* name;
1431 VkDescriptorType type;
1432 }
1433 const descriptorTypes[] =
1434 {
1435 { "uniform_buffer", VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC },
1436 { "storage_buffer", VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC }
1437 };
1438
1439 struct
1440 {
1441 const char* name;
1442 deUint32 num;
1443 }
1444 const numCmdBuffers[] =
1445 {
1446 { "numcmdbuffers_1", 1u },
1447 { "numcmdbuffers_2", 2u }
1448 };
1449
1450 struct
1451 {
1452 const char* name;
1453 bool reverse;
1454 }
1455 const reverseOrders[] =
1456 {
1457 { "reverseorder", true },
1458 { "sameorder", false }
1459 };
1460
1461 struct
1462 {
1463 const char* name;
1464 deUint32 num;
1465 }
1466 const numDescriptorSetBindings[] =
1467 {
1468 { "numdescriptorsetbindings_1", 1u },
1469 { "numdescriptorsetbindings_2", 2u }
1470 };
1471
1472 struct
1473 {
1474 const char* name;
1475 deUint32 num;
1476 }
1477 const numDynamicBindings[] =
1478 {
1479 { "numdynamicbindings_1", 1u },
1480 { "numdynamicbindings_2", 2u }
1481 };
1482
1483 struct
1484 {
1485 const char* name;
1486 deUint32 num;
1487 }
1488 const numNonDynamicBindings[] =
1489 {
1490 { "numnondynamicbindings_0", 0u },
1491 { "numnondynamicbindings_1", 1u }
1492 };
1493
1494 de::MovePtr<tcu::TestCaseGroup> dynamicOffsetTests (new tcu::TestCaseGroup(testCtx, "dynamic_offset", "Dynamic offset tests"));
1495
1496 for (deUint32 pipelineTypeIdx = 0; pipelineTypeIdx < DE_LENGTH_OF_ARRAY(pipelineTypes); pipelineTypeIdx++)
1497 {
1498 de::MovePtr<tcu::TestCaseGroup> pipelineTypeGroup (new tcu::TestCaseGroup(testCtx, pipelineTypes[pipelineTypeIdx], ""));
1499
1500 for (deUint32 groupingTypeIdx = 0; groupingTypeIdx < DE_LENGTH_OF_ARRAY(groupingTypes); ++groupingTypeIdx)
1501 {
1502 de::MovePtr<tcu::TestCaseGroup> groupingTypeGroup (new tcu::TestCaseGroup(testCtx, groupingTypes[groupingTypeIdx].name, ""));
1503
1504 for (deUint32 descriptorTypeIdx = 0; descriptorTypeIdx < DE_LENGTH_OF_ARRAY(descriptorTypes); descriptorTypeIdx++)
1505 {
1506 de::MovePtr<tcu::TestCaseGroup> descriptorTypeGroup (new tcu::TestCaseGroup(testCtx, descriptorTypes[descriptorTypeIdx].name, ""));
1507
1508 for (deUint32 numCmdBuffersIdx = 0; numCmdBuffersIdx < DE_LENGTH_OF_ARRAY(numCmdBuffers); numCmdBuffersIdx++)
1509 {
1510 de::MovePtr<tcu::TestCaseGroup> numCmdBuffersGroup (new tcu::TestCaseGroup(testCtx, numCmdBuffers[numCmdBuffersIdx].name, ""));
1511
1512 for (deUint32 reverseOrderIdx = 0; reverseOrderIdx < DE_LENGTH_OF_ARRAY(reverseOrders); reverseOrderIdx++)
1513 {
1514 if (numCmdBuffers[numCmdBuffersIdx].num < 2 && reverseOrders[reverseOrderIdx].reverse)
1515 continue;
1516
1517 de::MovePtr<tcu::TestCaseGroup> reverseOrderGroup (new tcu::TestCaseGroup(testCtx, reverseOrders[reverseOrderIdx].name, ""));
1518
1519 for (deUint32 numDescriptorSetBindingsIdx = 0; numDescriptorSetBindingsIdx < DE_LENGTH_OF_ARRAY(numDescriptorSetBindings); numDescriptorSetBindingsIdx++)
1520 {
1521 if (numCmdBuffers[numCmdBuffersIdx].num > 1 && numDescriptorSetBindings[numDescriptorSetBindingsIdx].num > 1)
1522 continue;
1523
1524 de::MovePtr<tcu::TestCaseGroup> numDescriptorSetBindingsGroup (new tcu::TestCaseGroup(testCtx, numDescriptorSetBindings[numDescriptorSetBindingsIdx].name, ""));
1525 for (deUint32 numDynamicBindingsIdx = 0; numDynamicBindingsIdx < DE_LENGTH_OF_ARRAY(numDynamicBindings); numDynamicBindingsIdx++)
1526 {
1527 de::MovePtr<tcu::TestCaseGroup> numDynamicBindingsGroup (new tcu::TestCaseGroup(testCtx, numDynamicBindings[numDynamicBindingsIdx].name, ""));
1528
1529 for (deUint32 numNonDynamicBindingsIdx = 0; numNonDynamicBindingsIdx < DE_LENGTH_OF_ARRAY(numNonDynamicBindings); numNonDynamicBindingsIdx++)
1530 {
1531 TestParams params;
1532 params.descriptorType = descriptorTypes[descriptorTypeIdx].type;
1533 params.numCmdBuffers = numCmdBuffers[numCmdBuffersIdx].num;
1534 params.reverseOrder = reverseOrders[reverseOrderIdx].reverse;
1535 params.numDescriptorSetBindings = numDescriptorSetBindings[numDescriptorSetBindingsIdx].num;
1536 params.numDynamicBindings = numDynamicBindings[numDynamicBindingsIdx].num;
1537 params.numNonDynamicBindings = numNonDynamicBindings[numNonDynamicBindingsIdx].num;
1538 params.groupingStrategy = groupingTypes[groupingTypeIdx].strategy;
1539
1540 if (strcmp(pipelineTypes[pipelineTypeIdx], "graphics") == 0)
1541 numDynamicBindingsGroup->addChild(new DynamicOffsetGraphicsTest(testCtx, numNonDynamicBindings[numNonDynamicBindingsIdx].name, "", params));
1542 else
1543 numDynamicBindingsGroup->addChild(new DynamicOffsetComputeTest(testCtx, numNonDynamicBindings[numNonDynamicBindingsIdx].name, "", params));
1544 }
1545
1546 numDescriptorSetBindingsGroup->addChild(numDynamicBindingsGroup.release());
1547 }
1548
1549 reverseOrderGroup->addChild(numDescriptorSetBindingsGroup.release());
1550 }
1551
1552 numCmdBuffersGroup->addChild(reverseOrderGroup.release());
1553 }
1554
1555 descriptorTypeGroup->addChild(numCmdBuffersGroup.release());
1556 }
1557
1558 groupingTypeGroup->addChild(descriptorTypeGroup.release());
1559 }
1560
1561 pipelineTypeGroup->addChild(groupingTypeGroup.release());
1562 }
1563
1564 dynamicOffsetTests->addChild(pipelineTypeGroup.release());
1565 }
1566
1567 return dynamicOffsetTests.release();
1568 }
1569
1570 } // pipeline
1571 } // vkt
1572