1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2021 The Khronos Group Inc.
6 * Copyright (c) 2021 Google LLC.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief SSBO corner case tests.
23 *//*--------------------------------------------------------------------*/
24 #include "deRandom.hpp"
25
26 #include "vktSSBOCornerCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vkMemUtil.hpp"
29 #include "vkBuilderUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkRefUtil.hpp"
32 #include "vkTypeUtil.hpp"
33 #include "vkCmdUtil.hpp"
34
35 #include <string>
36
37 namespace vkt
38 {
39 namespace ssbo
40 {
41 using std::string;
42 using std::vector;
43
44 namespace
45 {
46 class CornerCase : public TestCase
47 {
48 public:
CornerCase(tcu::TestContext & testCtx,const char * name)49 CornerCase (tcu::TestContext &testCtx, const char *name)
50 : TestCase (testCtx, name)
51 {
52 init();
53 }
54 virtual void delayedInit (void);
55 virtual void initPrograms (vk::SourceCollections &programCollection) const;
56 virtual TestInstance* createInstance (Context &context) const;
57
58 protected:
59 string m_computeShaderSrc;
60 const int m_testSize = 589; // This is the minimum value of the variable that causes a crash.
61 };
62
useCornerCaseShader(int loopCount)63 string useCornerCaseShader (int loopCount)
64 {
65 std::ostringstream src;
66 de::Random rnd(1);
67
68 src <<
69 "#version 310 es\n"
70 "#extension GL_EXT_buffer_reference : enable\n"
71 "layout(std430, buffer_reference) buffer BlockA\n"
72 "{\n"
73 " highp ivec4 a[];\n"
74 "};\n"
75 // ac_numIrrelevant is not used for anything, but is needed so that compiler doesn't optimize everything out.
76 "layout(std140, binding = 0) buffer AcBlock { highp uint ac_numIrrelevant; };\n"
77 "\n"
78 "layout (push_constant, std430) uniform PC {\n"
79 " BlockA blockA;\n"
80 "};\n"
81 "\n"
82 "bool compare_ivec4(highp ivec4 a, highp ivec4 b) { return a == b; }\n"
83 "\n"
84 "void main (void)\n"
85 "{\n"
86 " int allOk = int(true);\n";
87
88 for (int i = 0; i < loopCount; i++)
89 {
90 src << " allOk = allOk & int(compare_ivec4((blockA.a[" << i << "]), ivec4("
91 << rnd.getInt(-9,9) << ", "
92 << rnd.getInt(-9,9) << ", "
93 << rnd.getInt(-9,9) << ", "
94 << rnd.getInt(-9,9) << ")));\n";
95 }
96
97 src <<
98 " if (allOk != int(false))\n"
99 " {\n"
100 " ac_numIrrelevant++;\n"
101 " }\n"
102 "}\n";
103
104 return src.str();
105 }
106
107 struct Buffer
108 {
109 deUint32 buffer;
110 int size;
111
Buffervkt::ssbo::__anonbc462cd60111::Buffer112 Buffer (deUint32 buffer_, int size_) : buffer(buffer_), size(size_) {}
Buffervkt::ssbo::__anonbc462cd60111::Buffer113 Buffer (void) : buffer(0), size(0) {}
114 };
115
allocateAndBindMemory(Context & context,vk::VkBuffer buffer,vk::MemoryRequirement memReqs,vk::VkDeviceSize * allocationSize=DE_NULL)116 de::MovePtr<vk::Allocation> allocateAndBindMemory (Context &context, vk::VkBuffer buffer, vk::MemoryRequirement memReqs, vk::VkDeviceSize* allocationSize = DE_NULL)
117 {
118 const vk::DeviceInterface &vkd = context.getDeviceInterface();
119 const vk::VkMemoryRequirements bufReqs = vk::getBufferMemoryRequirements(vkd, context.getDevice(), buffer);
120 de::MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(bufReqs, memReqs);
121
122 vkd.bindBufferMemory(context.getDevice(), buffer, memory->getMemory(), memory->getOffset());
123 if (allocationSize)
124 {
125 *allocationSize = bufReqs.size;
126 }
127
128 return memory;
129 }
130
createBuffer(Context & context,vk::VkDeviceSize bufferSize,vk::VkBufferUsageFlags usageFlags)131 vk::Move<vk::VkBuffer> createBuffer (Context &context, vk::VkDeviceSize bufferSize, vk::VkBufferUsageFlags usageFlags)
132 {
133 const vk::VkDevice vkDevice = context.getDevice();
134 const vk::DeviceInterface &vk = context.getDeviceInterface();
135 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
136
137 const vk::VkBufferCreateInfo bufferInfo =
138 {
139 vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
140 DE_NULL, // const void* pNext;
141 0u, // VkBufferCreateFlags flags;
142 bufferSize, // VkDeviceSize size;
143 usageFlags, // VkBufferUsageFlags usage;
144 vk::VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
145 1u, // deUint32 queueFamilyCount;
146 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
147 };
148
149 return vk::createBuffer(vk, vkDevice, &bufferInfo);
150 }
151 class SSBOCornerCaseInstance : public TestInstance
152 {
153 public:
154 SSBOCornerCaseInstance (Context& context, int testSize);
155 virtual ~SSBOCornerCaseInstance (void);
156 virtual tcu::TestStatus iterate (void);
157
158 private:
159 int m_testSize;
160 };
SSBOCornerCaseInstance(Context & context,int testSize)161 SSBOCornerCaseInstance::SSBOCornerCaseInstance (Context& context, int testSize)
162 : TestInstance (context)
163 , m_testSize (testSize)
164 {
165 }
~SSBOCornerCaseInstance(void)166 SSBOCornerCaseInstance::~SSBOCornerCaseInstance (void)
167 {
168 }
169
iterate(void)170 tcu::TestStatus SSBOCornerCaseInstance::iterate (void)
171 {
172 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
173 const vk::VkDevice device = m_context.getDevice();
174 const vk::VkQueue queue = m_context.getUniversalQueue();
175 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
176
177 vk::Move<vk::VkBuffer> buffer;
178 de::MovePtr<vk::Allocation> alloc;
179
180 // Create descriptor set
181 const deUint32 acBufferSize = 4;
182 vk::Move<vk::VkBuffer> acBuffer (createBuffer(m_context, acBufferSize, vk:: VK_BUFFER_USAGE_STORAGE_BUFFER_BIT));
183 vk::VkDeviceSize acMemorySize = 0;
184 de::UniquePtr<vk::Allocation> acBufferAlloc (allocateAndBindMemory(m_context, *acBuffer, vk::MemoryRequirement::HostVisible, &acMemorySize));
185
186 deMemset(acBufferAlloc->getHostPtr(), 0, acBufferSize);
187 flushMappedMemoryRange(vk, device, acBufferAlloc->getMemory(), acBufferAlloc->getOffset(), acMemorySize);
188
189 vk::DescriptorSetLayoutBuilder setLayoutBuilder;
190 vk::DescriptorPoolBuilder poolBuilder;
191
192 setLayoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT);
193 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 2);
194
195 const vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout (setLayoutBuilder.build(vk, device));
196 const vk::Unique<vk::VkDescriptorPool> descriptorPool (poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
197
198 const vk::VkDescriptorSetAllocateInfo allocInfo =
199 {
200 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
201 DE_NULL,
202 *descriptorPool,
203 1u,
204 &descriptorSetLayout.get(),
205 };
206
207 const vk::Unique<vk::VkDescriptorSet> descriptorSet (allocateDescriptorSet(vk, device, &allocInfo));
208 const vk::VkDescriptorBufferInfo descriptorInfo = makeDescriptorBufferInfo(*acBuffer, 0ull, acBufferSize);
209
210 vk::DescriptorSetUpdateBuilder setUpdateBuilder;
211 vk::VkDescriptorBufferInfo descriptor;
212
213 setUpdateBuilder
214 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descriptorInfo);
215
216 vk::VkFlags usageFlags = vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | vk::VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
217 bool memoryDeviceAddress = false;
218
219 if (m_context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address"))
220 memoryDeviceAddress = true;
221
222 // Upload base buffers
223 const int bufferSize = 64 * m_testSize;
224 {
225 vk::VkPhysicalDeviceProperties properties;
226 m_context.getInstanceInterface().getPhysicalDeviceProperties(m_context.getPhysicalDevice(), &properties);
227
228 DE_ASSERT(bufferSize > 0);
229
230 buffer = createBuffer(m_context, bufferSize, usageFlags);
231 alloc = allocateAndBindMemory(m_context, *buffer, vk::MemoryRequirement::HostVisible | (memoryDeviceAddress ? vk::MemoryRequirement::DeviceAddress : vk::MemoryRequirement::Any));
232 descriptor = makeDescriptorBufferInfo(*buffer, 0, bufferSize);
233 }
234
235 vk::VkBufferDeviceAddressInfo info
236 {
237 vk::VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, // VkStructureType sType;
238 DE_NULL, // const void* pNext;
239 descriptor.buffer // VkBuffer buffer
240 };
241
242 vk::VkDeviceAddress addr = vk.getBufferDeviceAddress(device, &info);
243
244 setUpdateBuilder.update(vk, device);
245
246 const vk::VkPushConstantRange pushConstRange =
247 {
248 vk::VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags stageFlags
249 0, // deUint32 offset
250 (deUint32)(sizeof(vk::VkDeviceAddress)) // deUint32 size
251 };
252
253 // Must fit in spec min max
254 DE_ASSERT(pushConstRange.size <= 128);
255
256 const vk::VkPipelineLayoutCreateInfo pipelineLayoutParams =
257 {
258 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
259 DE_NULL, // const void* pNext;
260 (vk::VkPipelineLayoutCreateFlags)0,
261 1u, // deUint32 descriptorSetCount;
262 &*descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
263 1u, // deUint32 pushConstantRangeCount;
264 &pushConstRange, // const VkPushConstantRange* pPushConstantRanges;
265 };
266 vk::Move<vk::VkPipelineLayout> pipelineLayout (createPipelineLayout(vk, device, &pipelineLayoutParams));
267
268 vk::Move<vk::VkShaderModule> shaderModule (createShaderModule(vk, device, m_context.getBinaryCollection().get("compute"), 0));
269 const vk::VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
270 {
271 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,// VkStructureType sType;
272 DE_NULL, // const void* pNext;
273 (vk::VkPipelineShaderStageCreateFlags)0,
274 vk::VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStage stage;
275 *shaderModule, // VkShader shader;
276 "main", //
277 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
278 };
279 const vk::VkComputePipelineCreateInfo pipelineCreateInfo =
280 {
281 vk::VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
282 DE_NULL, // const void* pNext;
283 0, // VkPipelineCreateFlags flags;
284 pipelineShaderStageParams, // VkPipelineShaderStageCreateInfo stage;
285 *pipelineLayout, // VkPipelineLayout layout;
286 DE_NULL, // VkPipeline basePipelineHandle;
287 0, // deInt32 basePipelineIndex;
288 };
289 vk::Move<vk::VkPipeline> pipeline (createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo));
290
291 vk::Move<vk::VkCommandPool> cmdPool (createCommandPool(vk, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
292 vk::Move<vk::VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
293
294 beginCommandBuffer(vk, *cmdBuffer, 0u);
295
296 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
297
298 vk.cmdPushConstants(*cmdBuffer, *pipelineLayout, vk::VK_SHADER_STAGE_COMPUTE_BIT,0, (deUint32)(sizeof(addr)), &addr);
299
300 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
301
302 vk.cmdDispatch(*cmdBuffer, 1, 1, 1);
303
304 endCommandBuffer(vk, *cmdBuffer);
305
306 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
307
308 // Test always passes if it doesn't cause a crash.
309 return tcu::TestStatus::pass("Test did not cause a crash");
310 }
311
initPrograms(vk::SourceCollections & programCollection) const312 void CornerCase::initPrograms (vk::SourceCollections& programCollection) const
313 {
314 DE_ASSERT(!m_computeShaderSrc.empty());
315
316 programCollection.glslSources.add("compute") << glu::ComputeSource(m_computeShaderSrc);
317 }
318
createInstance(Context & context) const319 TestInstance* CornerCase::createInstance (Context& context) const
320 {
321 if (!context.isBufferDeviceAddressSupported())
322 TCU_THROW(NotSupportedError, "Physical storage buffer pointers not supported");
323 return new SSBOCornerCaseInstance(context, m_testSize);
324 }
325
delayedInit(void)326 void CornerCase::delayedInit (void)
327 {
328 m_computeShaderSrc = useCornerCaseShader(m_testSize);
329 }
330 } // anonymous
331
createSSBOCornerCaseTests(tcu::TestContext & testCtx)332 tcu::TestCaseGroup* createSSBOCornerCaseTests (tcu::TestContext& testCtx)
333 {
334 de::MovePtr<tcu::TestCaseGroup> cornerCaseGroup (new tcu::TestCaseGroup(testCtx, "corner_case"));
335 cornerCaseGroup->addChild(new CornerCase(testCtx, "long_shader_bitwise_and"));
336 return cornerCaseGroup.release();
337 }
338 } // ssbo
339 } // vkt
340