1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Compute Shader Built-in variable tests.
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktComputeShaderBuiltinVarTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktComputeTestsUtil.hpp"
28
29 #include "vkDefs.hpp"
30 #include "vkPlatform.hpp"
31 #include "vkRef.hpp"
32 #include "vkPrograms.hpp"
33 #include "vkStrUtil.hpp"
34 #include "vkRefUtil.hpp"
35 #include "vkQueryUtil.hpp"
36 #include "vkBarrierUtil.hpp"
37 #include "vkMemUtil.hpp"
38 #include "vkDeviceUtil.hpp"
39 #include "vkTypeUtil.hpp"
40 #include "vkBuilderUtil.hpp"
41 #include "vkCmdUtil.hpp"
42 #include "vkObjUtil.hpp"
43 #include "vkBufferWithMemory.hpp"
44
45 #include "tcuTestLog.hpp"
46 #include "tcuFormatUtil.hpp"
47 #include "tcuVectorUtil.hpp"
48 #include "tcuCommandLine.hpp"
49
50 #include "gluShaderUtil.hpp"
51
52 #include "deUniquePtr.hpp"
53 #include "deSharedPtr.hpp"
54
55 #include <map>
56 #include <string>
57 #include <vector>
58
59 namespace vkt
60 {
61 namespace compute
62 {
63 namespace
64 {
65
66 using namespace vk;
67 using std::string;
68 using std::vector;
69 using std::map;
70 using tcu::TestLog;
71 using tcu::UVec3;
72 using tcu::IVec3;
73
74 class ComputeBuiltinVarInstance;
75 class ComputeBuiltinVarCase;
76
77 static const string s_prefixProgramName ="compute_";
78
compareNumComponents(const UVec3 & a,const UVec3 & b,const int numComps)79 static inline bool compareNumComponents (const UVec3& a, const UVec3& b,const int numComps)
80 {
81 DE_ASSERT(numComps == 1 || numComps == 3);
82 return numComps == 3 ? tcu::allEqual(a, b) : a.x() == b.x();
83 }
84
readResultVec(const deUint32 * ptr,const int numComps)85 static inline UVec3 readResultVec (const deUint32* ptr, const int numComps)
86 {
87 UVec3 res;
88 for (int ndx = 0; ndx < numComps; ndx++)
89 res[ndx] = ptr[ndx];
90 return res;
91 }
92
93 struct LogComps
94 {
95 const UVec3& v;
96 int numComps;
97
LogCompsvkt::compute::__anonb1478e330111::LogComps98 LogComps (const UVec3 &v_, int numComps_) : v(v_), numComps(numComps_) {}
99 };
100
operator <<(std::ostream & str,const LogComps & c)101 static inline std::ostream& operator<< (std::ostream& str, const LogComps& c)
102 {
103 DE_ASSERT(c.numComps == 1 || c.numComps == 3);
104 return c.numComps == 3 ? str << c.v : str << c.v.x();
105 }
106
107 class SubCase
108 {
109 public:
110 // Use getters instead of public const members, because SubCase must be assignable
111 // in order to be stored in a vector.
112
localSize(void) const113 const UVec3& localSize (void) const { return m_localSize; }
numWorkGroups(void) const114 const UVec3& numWorkGroups (void) const { return m_numWorkGroups; }
115
SubCase(void)116 SubCase (void) {}
SubCase(const UVec3 & localSize_,const UVec3 & numWorkGroups_)117 SubCase (const UVec3& localSize_, const UVec3& numWorkGroups_)
118 : m_localSize (localSize_)
119 , m_numWorkGroups (numWorkGroups_) {}
120
121 private:
122 UVec3 m_localSize;
123 UVec3 m_numWorkGroups;
124 };
125
126
127 class ComputeBuiltinVarInstance : public vkt::TestInstance
128 {
129 public:
130 ComputeBuiltinVarInstance (Context& context,
131 const vector<SubCase>& subCases,
132 const glu::DataType varType,
133 const ComputeBuiltinVarCase* builtinVarCase);
134
135 virtual tcu::TestStatus iterate (void);
136
137 private:
138 const VkDevice m_device;
139 const DeviceInterface& m_vki;
140 const VkQueue m_queue;
141 const deUint32 m_queueFamilyIndex;
142 vector<SubCase> m_subCases;
143 const ComputeBuiltinVarCase* m_builtin_var_case;
144 int m_subCaseNdx;
145 const glu::DataType m_varType;
146 };
147
148 class ComputeBuiltinVarCase : public vkt::TestCase
149 {
150 public:
151 ComputeBuiltinVarCase (tcu::TestContext& context, const string& name, const char* varName, glu::DataType varType, bool readByComponent);
152 ~ComputeBuiltinVarCase (void);
153
createInstance(Context & context) const154 TestInstance* createInstance (Context& context) const
155 {
156 return new ComputeBuiltinVarInstance(context, m_subCases, m_varType, this);
157 }
158
159 virtual void initPrograms (SourceCollections& programCollection) const;
160 virtual UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const = 0;
161
162 protected:
163 string genBuiltinVarSource (const string& varName, glu::DataType varType, const UVec3& localSize, bool readByComponent) const;
164 vector<SubCase> m_subCases;
165
166 private:
167 deUint32 getProgram (const tcu::UVec3& localSize);
168
169 const string m_varName;
170 const glu::DataType m_varType;
171 int m_subCaseNdx;
172 bool m_readByComponent;
173
174 ComputeBuiltinVarCase (const ComputeBuiltinVarCase& other);
175 ComputeBuiltinVarCase& operator= (const ComputeBuiltinVarCase& other);
176 };
177
ComputeBuiltinVarCase(tcu::TestContext & context,const string & name,const char * varName,glu::DataType varType,bool readByComponent)178 ComputeBuiltinVarCase::ComputeBuiltinVarCase (tcu::TestContext& context, const string& name, const char* varName, glu::DataType varType, bool readByComponent)
179 : TestCase (context, name + (readByComponent ? "_component" : ""), varName)
180 , m_varName (varName)
181 , m_varType (varType)
182 , m_subCaseNdx (0)
183 , m_readByComponent (readByComponent)
184 {
185 }
186
~ComputeBuiltinVarCase(void)187 ComputeBuiltinVarCase::~ComputeBuiltinVarCase (void)
188 {
189 ComputeBuiltinVarCase::deinit();
190 }
191
initPrograms(SourceCollections & programCollection) const192 void ComputeBuiltinVarCase::initPrograms (SourceCollections& programCollection) const
193 {
194 for (std::size_t i = 0; i < m_subCases.size(); i++)
195 {
196 const SubCase& subCase = m_subCases[i];
197 std::ostringstream name;
198 name << s_prefixProgramName << i;
199 programCollection.glslSources.add(name.str()) << glu::ComputeSource(genBuiltinVarSource(m_varName, m_varType, subCase.localSize(), m_readByComponent).c_str());
200 }
201 }
202
genBuiltinVarSource(const string & varName,glu::DataType varType,const UVec3 & localSize,bool readByComponent) const203 string ComputeBuiltinVarCase::genBuiltinVarSource (const string& varName, glu::DataType varType, const UVec3& localSize, bool readByComponent) const
204 {
205 std::ostringstream src;
206
207 src << "#version 310 es\n"
208 << "layout (local_size_x = " << localSize.x() << ", local_size_y = " << localSize.y() << ", local_size_z = " << localSize.z() << ") in;\n";
209
210 // For the gl_WorkGroupSize case, force it to be specialized so that
211 // Glslang can't just bypass the read of the builtin variable.
212 // We will not override these spec constants.
213 src << "layout (local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in;\n";
214
215 src << "layout(set = 0, binding = 0) uniform Stride\n"
216 << "{\n"
217 << " uvec2 u_stride;\n"
218 << "}stride;\n"
219 << "layout(set = 0, binding = 1, std430) buffer Output\n"
220 << "{\n"
221 << " " << glu::getDataTypeName(varType) << " result[];\n"
222 << "} sb_out;\n"
223 << "\n"
224 << "void main (void)\n"
225 << "{\n"
226 << " highp uint offset = stride.u_stride.x*gl_GlobalInvocationID.z + stride.u_stride.y*gl_GlobalInvocationID.y + gl_GlobalInvocationID.x;\n";
227
228 if (readByComponent && varType != glu::TYPE_UINT) {
229 switch(varType)
230 {
231 case glu::TYPE_UINT_VEC4:
232 src << " sb_out.result[offset].w = " << varName << ".w;\n";
233 // Fall through
234 case glu::TYPE_UINT_VEC3:
235 src << " sb_out.result[offset].z = " << varName << ".z;\n";
236 // Fall through
237 case glu::TYPE_UINT_VEC2:
238 src << " sb_out.result[offset].y = " << varName << ".y;\n"
239 << " sb_out.result[offset].x = " << varName << ".x;\n";
240 break;
241 default:
242 DE_FATAL("Illegal data type");
243 break;
244 }
245 } else {
246 src << " sb_out.result[offset] = " << varName << ";\n";
247 }
248 src << "}\n";
249
250 return src.str();
251 }
252
253 class NumWorkGroupsCase : public ComputeBuiltinVarCase
254 {
255 public:
NumWorkGroupsCase(tcu::TestContext & context,bool readByCompnent)256 NumWorkGroupsCase (tcu::TestContext& context, bool readByCompnent)
257 : ComputeBuiltinVarCase(context, "num_work_groups", "gl_NumWorkGroups", glu::TYPE_UINT_VEC3, readByCompnent)
258 {
259 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
260 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(52, 1, 1)));
261 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
262 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 78)));
263 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
264 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
265 }
266
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const267 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
268 {
269 DE_UNREF(numWorkGroups);
270 DE_UNREF(workGroupSize);
271 DE_UNREF(workGroupID);
272 DE_UNREF(localInvocationID);
273 return numWorkGroups;
274 }
275 };
276
277 class WorkGroupSizeCase : public ComputeBuiltinVarCase
278 {
279 public:
WorkGroupSizeCase(tcu::TestContext & context,bool readByComponent)280 WorkGroupSizeCase (tcu::TestContext& context, bool readByComponent)
281 : ComputeBuiltinVarCase(context, "work_group_size", "gl_WorkGroupSize", glu::TYPE_UINT_VEC3, readByComponent)
282 {
283 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
284 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(2, 7, 3)));
285 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 1, 1)));
286 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 3, 5)));
287 m_subCases.push_back(SubCase(UVec3(1, 3, 1), UVec3(1, 1, 1)));
288 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(1, 1, 1)));
289 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(3, 3, 1)));
290 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
291 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
292 }
293
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const294 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
295 {
296 DE_UNREF(numWorkGroups);
297 DE_UNREF(workGroupID);
298 DE_UNREF(localInvocationID);
299 return workGroupSize;
300 }
301 };
302
303 //-----------------------------------------------------------------------
304 class WorkGroupIDCase : public ComputeBuiltinVarCase
305 {
306 public:
WorkGroupIDCase(tcu::TestContext & context,bool readbyComponent)307 WorkGroupIDCase (tcu::TestContext& context, bool readbyComponent)
308 : ComputeBuiltinVarCase(context, "work_group_id", "gl_WorkGroupID", glu::TYPE_UINT_VEC3, readbyComponent)
309 {
310 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
311 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(52, 1, 1)));
312 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
313 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 78)));
314 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
315 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
316 }
317
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const318 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
319 {
320 DE_UNREF(numWorkGroups);
321 DE_UNREF(workGroupSize);
322 DE_UNREF(localInvocationID);
323 return workGroupID;
324 }
325 };
326
327 class LocalInvocationIDCase : public ComputeBuiltinVarCase
328 {
329 public:
LocalInvocationIDCase(tcu::TestContext & context,bool readByComponent)330 LocalInvocationIDCase (tcu::TestContext& context, bool readByComponent)
331 : ComputeBuiltinVarCase(context, "local_invocation_id", "gl_LocalInvocationID", glu::TYPE_UINT_VEC3, readByComponent)
332 {
333 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
334 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(2, 7, 3)));
335 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 1, 1)));
336 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 3, 5)));
337 m_subCases.push_back(SubCase(UVec3(1, 3, 1), UVec3(1, 1, 1)));
338 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(1, 1, 1)));
339 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(3, 3, 1)));
340 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
341 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
342 }
343
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const344 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
345 {
346 DE_UNREF(numWorkGroups);
347 DE_UNREF(workGroupSize);
348 DE_UNREF(workGroupID);
349 return localInvocationID;
350 }
351 };
352
353 class GlobalInvocationIDCase : public ComputeBuiltinVarCase
354 {
355 public:
GlobalInvocationIDCase(tcu::TestContext & context,bool readByComponent)356 GlobalInvocationIDCase (tcu::TestContext& context, bool readByComponent)
357 : ComputeBuiltinVarCase(context, "global_invocation_id", "gl_GlobalInvocationID", glu::TYPE_UINT_VEC3, readByComponent)
358 {
359 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
360 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(52, 1, 1)));
361 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
362 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 78)));
363 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
364 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
365 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
366 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
367 }
368
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const369 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
370 {
371 DE_UNREF(numWorkGroups);
372 return workGroupID * workGroupSize + localInvocationID;
373 }
374 };
375
376 class LocalInvocationIndexCase : public ComputeBuiltinVarCase
377 {
378 public:
LocalInvocationIndexCase(tcu::TestContext & context,bool readByComponent)379 LocalInvocationIndexCase (tcu::TestContext& context, bool readByComponent)
380 : ComputeBuiltinVarCase(context, "local_invocation_index", "gl_LocalInvocationIndex", glu::TYPE_UINT, readByComponent)
381 {
382 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
383 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
384 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
385 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
386 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
387 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
388 }
389
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const390 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
391 {
392 DE_UNREF(workGroupID);
393 DE_UNREF(numWorkGroups);
394 return UVec3(localInvocationID.z()*workGroupSize.x()*workGroupSize.y() + localInvocationID.y()*workGroupSize.x() + localInvocationID.x(), 0, 0);
395 }
396 };
397
ComputeBuiltinVarInstance(Context & context,const vector<SubCase> & subCases,const glu::DataType varType,const ComputeBuiltinVarCase * builtinVarCase)398 ComputeBuiltinVarInstance::ComputeBuiltinVarInstance (Context& context,
399 const vector<SubCase>& subCases,
400 const glu::DataType varType,
401 const ComputeBuiltinVarCase* builtinVarCase)
402 : vkt::TestInstance (context)
403 , m_device (m_context.getDevice())
404 , m_vki (m_context.getDeviceInterface())
405 , m_queue (context.getUniversalQueue())
406 , m_queueFamilyIndex (context.getUniversalQueueFamilyIndex())
407 , m_subCases (subCases)
408 , m_builtin_var_case (builtinVarCase)
409 , m_subCaseNdx (0)
410 , m_varType (varType)
411 {
412 }
413
iterate(void)414 tcu::TestStatus ComputeBuiltinVarInstance::iterate (void)
415 {
416 std::ostringstream program_name;
417 program_name << s_prefixProgramName << m_subCaseNdx;
418
419 const SubCase& subCase = m_subCases[m_subCaseNdx];
420 const tcu::UVec3 globalSize = subCase.localSize()*subCase.numWorkGroups();
421 const tcu::UVec2 stride (globalSize[0] * globalSize[1], globalSize[0]);
422 const deUint32 sizeOfUniformBuffer = sizeof(stride);
423 const int numScalars = glu::getDataTypeScalarSize(m_varType);
424 const deUint32 numInvocations = subCase.localSize()[0] * subCase.localSize()[1] * subCase.localSize()[2] * subCase.numWorkGroups()[0] * subCase.numWorkGroups()[1] * subCase.numWorkGroups()[2];
425
426 deUint32 resultBufferStride = 0;
427 switch (m_varType)
428 {
429 case glu::TYPE_UINT:
430 resultBufferStride = sizeof(deUint32);
431 break;
432 case glu::TYPE_UINT_VEC2:
433 resultBufferStride = sizeof(tcu::UVec2);
434 break;
435 case glu::TYPE_UINT_VEC3:
436 case glu::TYPE_UINT_VEC4:
437 resultBufferStride = sizeof(tcu::UVec4);
438 break;
439 default:
440 DE_FATAL("Illegal data type");
441 }
442
443 const deUint32 resultBufferSize = numInvocations * resultBufferStride;
444
445 // Create result buffer
446 vk::BufferWithMemory uniformBuffer(m_vki, m_device, m_context.getDefaultAllocator(), makeBufferCreateInfo(sizeOfUniformBuffer, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT), MemoryRequirement::HostVisible);
447 vk::BufferWithMemory resultBuffer(m_vki, m_device, m_context.getDefaultAllocator(), makeBufferCreateInfo(resultBufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible);
448
449 {
450 const Allocation& alloc = uniformBuffer.getAllocation();
451 memcpy(alloc.getHostPtr(), &stride, sizeOfUniformBuffer);
452 flushAlloc(m_vki, m_device, alloc);
453 }
454
455 // Create descriptorSetLayout
456 const Unique<VkDescriptorSetLayout> descriptorSetLayout(
457 DescriptorSetLayoutBuilder()
458 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
459 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
460 .build(m_vki, m_device));
461
462 const Unique<VkShaderModule> shaderModule(createShaderModule(m_vki, m_device, m_context.getBinaryCollection().get(program_name.str()), 0u));
463 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(m_vki, m_device, *descriptorSetLayout));
464 const Unique<VkPipeline> pipeline(makeComputePipeline(m_vki, m_device, *pipelineLayout, *shaderModule));
465
466 const Unique<VkDescriptorPool> descriptorPool(
467 DescriptorPoolBuilder()
468 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
469 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
470 .build(m_vki, m_device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
471
472 const VkBufferMemoryBarrier bufferBarrier = makeBufferMemoryBarrier(
473 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *resultBuffer, 0ull, resultBufferSize);
474
475 const Unique<VkCommandPool> cmdPool(makeCommandPool(m_vki, m_device, m_queueFamilyIndex));
476 const Unique<VkCommandBuffer> cmdBuffer(allocateCommandBuffer(m_vki, m_device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
477
478 // Start recording commands
479 beginCommandBuffer(m_vki, *cmdBuffer);
480
481 m_vki.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
482
483 // Create descriptor set
484 const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(m_vki, m_device, *descriptorPool, *descriptorSetLayout));
485
486 const VkDescriptorBufferInfo resultDescriptorInfo = makeDescriptorBufferInfo(*resultBuffer, 0ull, resultBufferSize);
487 const VkDescriptorBufferInfo uniformDescriptorInfo = makeDescriptorBufferInfo(*uniformBuffer, 0ull, sizeOfUniformBuffer);
488
489 DescriptorSetUpdateBuilder()
490 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &uniformDescriptorInfo)
491 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultDescriptorInfo)
492 .update(m_vki, m_device);
493
494 m_vki.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
495
496 // Dispatch indirect compute command
497 m_vki.cmdDispatch(*cmdBuffer, subCase.numWorkGroups()[0], subCase.numWorkGroups()[1], subCase.numWorkGroups()[2]);
498
499 m_vki.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0,
500 0, (const VkMemoryBarrier*)DE_NULL,
501 1, &bufferBarrier,
502 0, (const VkImageMemoryBarrier*)DE_NULL);
503
504 // End recording commands
505 endCommandBuffer(m_vki, *cmdBuffer);
506
507 // Wait for command buffer execution finish
508 submitCommandsAndWait(m_vki, m_device, m_queue, *cmdBuffer);
509
510 const Allocation& resultAlloc = resultBuffer.getAllocation();
511 invalidateAlloc(m_vki, m_device, resultAlloc);
512
513 const deUint8* ptr = reinterpret_cast<deUint8*>(resultAlloc.getHostPtr());
514
515 int numFailed = 0;
516 const int maxLogPrints = 10;
517
518 tcu::TestContext& testCtx = m_context.getTestContext();
519
520 #ifdef CTS_USES_VULKANSC
521 if(testCtx.getCommandLine().isSubProcess())
522 #endif // CTS_USES_VULKANSC
523 {
524 for (deUint32 groupZ = 0; groupZ < subCase.numWorkGroups().z(); groupZ++)
525 for (deUint32 groupY = 0; groupY < subCase.numWorkGroups().y(); groupY++)
526 for (deUint32 groupX = 0; groupX < subCase.numWorkGroups().x(); groupX++)
527 for (deUint32 localZ = 0; localZ < subCase.localSize().z(); localZ++)
528 for (deUint32 localY = 0; localY < subCase.localSize().y(); localY++)
529 for (deUint32 localX = 0; localX < subCase.localSize().x(); localX++)
530 {
531 const UVec3 refGroupID(groupX, groupY, groupZ);
532 const UVec3 refLocalID(localX, localY, localZ);
533 const UVec3 refGlobalID = refGroupID * subCase.localSize() + refLocalID;
534
535 const deUint32 refOffset = stride.x()*refGlobalID.z() + stride.y()*refGlobalID.y() + refGlobalID.x();
536
537 const UVec3 refValue = m_builtin_var_case->computeReference(subCase.numWorkGroups(), subCase.localSize(), refGroupID, refLocalID);
538
539 const deUint32* resPtr = (const deUint32*)(ptr + refOffset * resultBufferStride);
540 const UVec3 resValue = readResultVec(resPtr, numScalars);
541
542 if (!compareNumComponents(refValue, resValue, numScalars))
543 {
544 if (numFailed < maxLogPrints)
545 testCtx.getLog()
546 << TestLog::Message
547 << "ERROR: comparison failed at offset " << refOffset
548 << ": expected " << LogComps(refValue, numScalars)
549 << ", got " << LogComps(resValue, numScalars)
550 << TestLog::EndMessage;
551 else if (numFailed == maxLogPrints)
552 testCtx.getLog() << TestLog::Message << "..." << TestLog::EndMessage;
553
554 numFailed += 1;
555 }
556 }
557 }
558
559 testCtx.getLog() << TestLog::Message << (numInvocations - numFailed) << " / " << numInvocations << " values passed" << TestLog::EndMessage;
560
561 if (numFailed > 0)
562 return tcu::TestStatus::fail("Comparison failed");
563
564 m_subCaseNdx += 1;
565 return (m_subCaseNdx < (int)m_subCases.size()) ? tcu::TestStatus::incomplete() :tcu::TestStatus::pass("Comparison succeeded");
566 }
567
568 class ComputeShaderBuiltinVarTests : public tcu::TestCaseGroup
569 {
570 public:
571 ComputeShaderBuiltinVarTests (tcu::TestContext& context);
572
573 void init (void);
574
575 private:
576 ComputeShaderBuiltinVarTests (const ComputeShaderBuiltinVarTests& other);
577 ComputeShaderBuiltinVarTests& operator= (const ComputeShaderBuiltinVarTests& other);
578 };
579
ComputeShaderBuiltinVarTests(tcu::TestContext & context)580 ComputeShaderBuiltinVarTests::ComputeShaderBuiltinVarTests (tcu::TestContext& context)
581 : TestCaseGroup(context, "builtin_var", "Shader builtin var tests")
582 {
583 }
584
init(void)585 void ComputeShaderBuiltinVarTests::init (void)
586 {
587 // Builtin variables with vector values should be read whole and by component.
588 for (int i = 0; i < 2; i++)
589 {
590 const bool readByComponent = (i != 0);
591 addChild(new NumWorkGroupsCase(this->getTestContext(), readByComponent));
592 addChild(new WorkGroupSizeCase(this->getTestContext(), readByComponent));
593 addChild(new WorkGroupIDCase(this->getTestContext(), readByComponent));
594 addChild(new LocalInvocationIDCase(this->getTestContext(), readByComponent));
595 addChild(new GlobalInvocationIDCase(this->getTestContext(), readByComponent));
596 }
597 // Local invocation index is already just a scalar.
598 addChild(new LocalInvocationIndexCase(this->getTestContext(), false));
599 }
600
601 } // anonymous
602
createComputeShaderBuiltinVarTests(tcu::TestContext & testCtx)603 tcu::TestCaseGroup* createComputeShaderBuiltinVarTests (tcu::TestContext& testCtx)
604 {
605 return new ComputeShaderBuiltinVarTests(testCtx);
606 }
607
608 } // compute
609 } // vkt
610