1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2021 The Khronos Group Inc.
6 * Copyright (c) 2021 Google LLC.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief SSBO corner case tests.
23 *//*--------------------------------------------------------------------*/
24 #include "deRandom.hpp"
25
26 #include "vktSSBOCornerCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vkMemUtil.hpp"
29 #include "vkBuilderUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkRefUtil.hpp"
32 #include "vkTypeUtil.hpp"
33 #include "vkCmdUtil.hpp"
34
35 #include <string>
36
37 namespace vkt
38 {
39 namespace ssbo
40 {
41 using std::string;
42 using std::vector;
43
44 namespace
45 {
46 class CornerCase : public TestCase
47 {
48 public:
CornerCase(tcu::TestContext & testCtx,const char * name,const char * description)49 CornerCase (tcu::TestContext &testCtx, const char *name, const char *description)
50 : TestCase (testCtx, name, description)
51 {
52 init();
53 }
54 virtual void delayedInit (void);
55 virtual void initPrograms (vk::SourceCollections &programCollection) const;
56 virtual TestInstance* createInstance (Context &context) const;
57
58 protected:
59 string m_computeShaderSrc;
60 const int m_testSize = 589; // This is the minimum value of the variable that causes a crash.
61 };
62
useCornerCaseShader(int loopCount)63 string useCornerCaseShader (int loopCount)
64 {
65 std::ostringstream src;
66 de::Random rnd(1);
67
68 src <<
69 "#version 310 es\n"
70 "#extension GL_EXT_buffer_reference : enable\n"
71 "layout(std430, buffer_reference) buffer BlockA\n"
72 "{\n"
73 " highp ivec4 a[];\n"
74 "};\n"
75 // ac_numIrrelevant is not used for anything, but is needed so that compiler doesn't optimize everything out.
76 "layout(std140, binding = 0) buffer AcBlock { highp uint ac_numIrrelevant; };\n"
77 "\n"
78 "layout (push_constant, std430) uniform PC {\n"
79 " BlockA blockA;\n"
80 "};\n"
81 "\n"
82 "bool compare_ivec4(highp ivec4 a, highp ivec4 b) { return a == b; }\n"
83 "\n"
84 "void main (void)\n"
85 "{\n"
86 " int allOk = int(true);\n";
87
88 for (int i = 0; i < loopCount; i++)
89 {
90 src << " allOk = allOk & int(compare_ivec4((blockA.a[" << i << "]), ivec4("
91 << rnd.getInt(-9,9) << ", "
92 << rnd.getInt(-9,9) << ", "
93 << rnd.getInt(-9,9) << ", "
94 << rnd.getInt(-9,9) << ")));\n";
95 }
96
97 src <<
98 " if (allOk != int(false))\n"
99 " {\n"
100 " ac_numIrrelevant++;\n"
101 " }\n"
102 "}\n";
103
104 return src.str();
105 }
106
107 struct Buffer
108 {
109 deUint32 buffer;
110 int size;
111
Buffervkt::ssbo::__anonfd9ddbb70111::Buffer112 Buffer (deUint32 buffer_, int size_) : buffer(buffer_), size(size_) {}
Buffervkt::ssbo::__anonfd9ddbb70111::Buffer113 Buffer (void) : buffer(0), size(0) {}
114 };
115
allocateAndBindMemory(Context & context,vk::VkBuffer buffer,vk::MemoryRequirement memReqs)116 de::MovePtr<vk::Allocation> allocateAndBindMemory (Context &context, vk::VkBuffer buffer, vk::MemoryRequirement memReqs)
117 {
118 const vk::DeviceInterface &vkd = context.getDeviceInterface();
119 const vk::VkMemoryRequirements bufReqs = vk::getBufferMemoryRequirements(vkd, context.getDevice(), buffer);
120 de::MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(bufReqs, memReqs);
121
122 vkd.bindBufferMemory(context.getDevice(), buffer, memory->getMemory(), memory->getOffset());
123
124 return memory;
125 }
126
createBuffer(Context & context,vk::VkDeviceSize bufferSize,vk::VkBufferUsageFlags usageFlags)127 vk::Move<vk::VkBuffer> createBuffer (Context &context, vk::VkDeviceSize bufferSize, vk::VkBufferUsageFlags usageFlags)
128 {
129 const vk::VkDevice vkDevice = context.getDevice();
130 const vk::DeviceInterface &vk = context.getDeviceInterface();
131 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
132
133 const vk::VkBufferCreateInfo bufferInfo =
134 {
135 vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
136 DE_NULL, // const void* pNext;
137 0u, // VkBufferCreateFlags flags;
138 bufferSize, // VkDeviceSize size;
139 usageFlags, // VkBufferUsageFlags usage;
140 vk::VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
141 1u, // deUint32 queueFamilyCount;
142 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
143 };
144
145 return vk::createBuffer(vk, vkDevice, &bufferInfo);
146 }
147 class SSBOCornerCaseInstance : public TestInstance
148 {
149 public:
150 SSBOCornerCaseInstance (Context& context, int testSize);
151 virtual ~SSBOCornerCaseInstance (void);
152 virtual tcu::TestStatus iterate (void);
153
154 private:
155 int m_testSize;
156 };
SSBOCornerCaseInstance(Context & context,int testSize)157 SSBOCornerCaseInstance::SSBOCornerCaseInstance (Context& context, int testSize)
158 : TestInstance (context)
159 , m_testSize (testSize)
160 {
161 }
~SSBOCornerCaseInstance(void)162 SSBOCornerCaseInstance::~SSBOCornerCaseInstance (void)
163 {
164 }
165
iterate(void)166 tcu::TestStatus SSBOCornerCaseInstance::iterate (void)
167 {
168 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
169 const vk::VkDevice device = m_context.getDevice();
170 const vk::VkQueue queue = m_context.getUniversalQueue();
171 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
172
173 vk::Move<vk::VkBuffer> buffer;
174 de::MovePtr<vk::Allocation> alloc;
175
176 // Create descriptor set
177 const deUint32 acBufferSize = 4;
178 vk::Move<vk::VkBuffer> acBuffer (createBuffer(m_context, acBufferSize, vk:: VK_BUFFER_USAGE_STORAGE_BUFFER_BIT));
179 de::UniquePtr<vk::Allocation> acBufferAlloc (allocateAndBindMemory(m_context, *acBuffer, vk::MemoryRequirement::HostVisible));
180
181 deMemset(acBufferAlloc->getHostPtr(), 0, acBufferSize);
182 flushMappedMemoryRange(vk, device, acBufferAlloc->getMemory(), acBufferAlloc->getOffset(), acBufferSize);
183
184 vk::DescriptorSetLayoutBuilder setLayoutBuilder;
185 vk::DescriptorPoolBuilder poolBuilder;
186
187 setLayoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT);
188 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 2);
189
190 const vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout (setLayoutBuilder.build(vk, device));
191 const vk::Unique<vk::VkDescriptorPool> descriptorPool (poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
192
193 const vk::VkDescriptorSetAllocateInfo allocInfo =
194 {
195 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
196 DE_NULL,
197 *descriptorPool,
198 1u,
199 &descriptorSetLayout.get(),
200 };
201
202 const vk::Unique<vk::VkDescriptorSet> descriptorSet (allocateDescriptorSet(vk, device, &allocInfo));
203 const vk::VkDescriptorBufferInfo descriptorInfo = makeDescriptorBufferInfo(*acBuffer, 0ull, acBufferSize);
204
205 vk::DescriptorSetUpdateBuilder setUpdateBuilder;
206 vk::VkDescriptorBufferInfo descriptor;
207
208 setUpdateBuilder
209 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descriptorInfo);
210
211 vk::VkFlags usageFlags = vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | vk::VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
212 bool memoryDeviceAddress = false;
213
214 if (m_context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address"))
215 memoryDeviceAddress = true;
216
217 // Upload base buffers
218 const int bufferSize = 64 * m_testSize;
219 {
220 vk::VkPhysicalDeviceProperties properties;
221 m_context.getInstanceInterface().getPhysicalDeviceProperties(m_context.getPhysicalDevice(), &properties);
222
223 DE_ASSERT(bufferSize > 0);
224
225 buffer = createBuffer(m_context, bufferSize, usageFlags);
226 alloc = allocateAndBindMemory(m_context, *buffer, vk::MemoryRequirement::HostVisible | (memoryDeviceAddress ? vk::MemoryRequirement::DeviceAddress : vk::MemoryRequirement::Any));
227 descriptor = makeDescriptorBufferInfo(*buffer, 0, bufferSize);
228 }
229
230 // Query the buffer device address and push them via push constants
231 const bool useKHR = m_context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address");
232
233 vk::VkBufferDeviceAddressInfo info =
234 {
235 vk::VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, // VkStructureType sType;
236 DE_NULL, // const void* pNext;
237 0, // VkBuffer buffer
238 };
239
240 info.buffer = descriptor.buffer;
241 vk::VkDeviceAddress addr;
242 if (useKHR)
243 addr = vk.getBufferDeviceAddress(device, &info);
244 else
245 addr = vk.getBufferDeviceAddressEXT(device, &info);
246
247 setUpdateBuilder.update(vk, device);
248
249 const vk::VkPushConstantRange pushConstRange =
250 {
251 vk::VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags stageFlags
252 0, // deUint32 offset
253 (deUint32)(sizeof(vk::VkDeviceAddress)) // deUint32 size
254 };
255
256 // Must fit in spec min max
257 DE_ASSERT(pushConstRange.size <= 128);
258
259 const vk::VkPipelineLayoutCreateInfo pipelineLayoutParams =
260 {
261 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
262 DE_NULL, // const void* pNext;
263 (vk::VkPipelineLayoutCreateFlags)0,
264 1u, // deUint32 descriptorSetCount;
265 &*descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
266 1u, // deUint32 pushConstantRangeCount;
267 &pushConstRange, // const VkPushConstantRange* pPushConstantRanges;
268 };
269 vk::Move<vk::VkPipelineLayout> pipelineLayout (createPipelineLayout(vk, device, &pipelineLayoutParams));
270
271 vk::Move<vk::VkShaderModule> shaderModule (createShaderModule(vk, device, m_context.getBinaryCollection().get("compute"), 0));
272 const vk::VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
273 {
274 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,// VkStructureType sType;
275 DE_NULL, // const void* pNext;
276 (vk::VkPipelineShaderStageCreateFlags)0,
277 vk::VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStage stage;
278 *shaderModule, // VkShader shader;
279 "main", //
280 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
281 };
282 const vk::VkComputePipelineCreateInfo pipelineCreateInfo =
283 {
284 vk::VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
285 DE_NULL, // const void* pNext;
286 0, // VkPipelineCreateFlags flags;
287 pipelineShaderStageParams, // VkPipelineShaderStageCreateInfo stage;
288 *pipelineLayout, // VkPipelineLayout layout;
289 DE_NULL, // VkPipeline basePipelineHandle;
290 0, // deInt32 basePipelineIndex;
291 };
292 vk::Move<vk::VkPipeline> pipeline (createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo));
293
294 vk::Move<vk::VkCommandPool> cmdPool (createCommandPool(vk, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
295 vk::Move<vk::VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
296
297 beginCommandBuffer(vk, *cmdBuffer, 0u);
298
299 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
300
301 vk.cmdPushConstants(*cmdBuffer, *pipelineLayout, vk::VK_SHADER_STAGE_COMPUTE_BIT,0, (deUint32)(sizeof(addr)), &addr);
302
303 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
304
305 vk.cmdDispatch(*cmdBuffer, 1, 1, 1);
306
307 endCommandBuffer(vk, *cmdBuffer);
308
309 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
310
311 // Test always passes if it doesn't cause a crash.
312 return tcu::TestStatus::pass("Test did not cause a crash");
313 }
314
initPrograms(vk::SourceCollections & programCollection) const315 void CornerCase::initPrograms (vk::SourceCollections& programCollection) const
316 {
317 DE_ASSERT(!m_computeShaderSrc.empty());
318
319 programCollection.glslSources.add("compute") << glu::ComputeSource(m_computeShaderSrc);
320 }
321
createInstance(Context & context) const322 TestInstance* CornerCase::createInstance (Context& context) const
323 {
324 if (!context.isBufferDeviceAddressSupported())
325 TCU_THROW(NotSupportedError, "Physical storage buffer pointers not supported");
326 return new SSBOCornerCaseInstance(context, m_testSize);
327 }
328
delayedInit(void)329 void CornerCase::delayedInit (void)
330 {
331 m_computeShaderSrc = useCornerCaseShader(m_testSize);
332 }
333 } // anonymous
334
createSSBOCornerCaseTests(tcu::TestContext & testCtx)335 tcu::TestCaseGroup* createSSBOCornerCaseTests (tcu::TestContext& testCtx)
336 {
337 de::MovePtr<tcu::TestCaseGroup> cornerCaseGroup (new tcu::TestCaseGroup(testCtx, "corner_case", "Corner cases"));
338 cornerCaseGroup->addChild(new CornerCase(testCtx, "long_shader_bitwise_and", ""));
339 return cornerCaseGroup.release();
340 }
341 } // ssbo
342 } // vkt
343