1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2017 Samsung Electronics Co., Ltd.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Protected memory storage buffer tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktProtectedMemStorageBufferTests.hpp"
26
27 #include "deRandom.hpp"
28 #include "deStringUtil.hpp"
29 #include "tcuTestLog.hpp"
30 #include "tcuVector.hpp"
31 #include "tcuStringTemplate.hpp"
32
33 #include "vkPrograms.hpp"
34 #include "vktTestCase.hpp"
35 #include "vktTestGroupUtil.hpp"
36 #include "vkTypeUtil.hpp"
37 #include "vkBuilderUtil.hpp"
38 #include "vkCmdUtil.hpp"
39
40 #include "vktProtectedMemBufferValidator.hpp"
41 #include "vktProtectedMemUtils.hpp"
42 #include "vktProtectedMemContext.hpp"
43
44 namespace vkt
45 {
46 namespace ProtectedMem
47 {
48
49 namespace
50 {
51
52 enum {
53 RENDER_HEIGHT = 128,
54 RENDER_WIDTH = 128,
55 };
56
57 enum {
58 RANDOM_TEST_COUNT = 10,
59 };
60
61 enum SSBOTestType {
62 SSBO_READ,
63 SSBO_WRITE,
64 SSBO_ATOMIC
65 };
66
67 enum SSBOAtomicType {
68 ATOMIC_ADD,
69 ATOMIC_MIN,
70 ATOMIC_MAX,
71 ATOMIC_AND,
72 ATOMIC_OR,
73 ATOMIC_XOR,
74 ATOMIC_EXCHANGE,
75 ATOMIC_COMPSWAP
76 };
77
78
getSSBOTestDescription(SSBOTestType type)79 const char* getSSBOTestDescription (SSBOTestType type)
80 {
81 switch (type) {
82 case SSBO_READ: return "Test for read storage buffer on protected memory.";
83 case SSBO_WRITE: return "Test for write storage buffer on protected memory.";
84 case SSBO_ATOMIC: return "Test for atomic storage buffer on protected memory.";
85 default: DE_FATAL("Invalid SSBO test type"); return "";
86 }
87 }
88
getSSBOTypeString(SSBOTestType type)89 const char* getSSBOTypeString (SSBOTestType type)
90 {
91 switch (type) {
92 case SSBO_READ: return "read";
93 case SSBO_WRITE: return "write";
94 case SSBO_ATOMIC: return "atomic";
95 default: DE_FATAL("Invalid SSBO test type"); return "";
96 }
97 }
98
getSSBOAtomicTypeString(SSBOAtomicType type)99 const char* getSSBOAtomicTypeString (SSBOAtomicType type)
100 {
101 switch (type)
102 {
103 case ATOMIC_ADD: return "add";
104 case ATOMIC_MIN: return "min";
105 case ATOMIC_MAX: return "max";
106 case ATOMIC_AND: return "and";
107 case ATOMIC_OR: return "or";
108 case ATOMIC_XOR: return "xor";
109 case ATOMIC_EXCHANGE: return "exchange";
110 case ATOMIC_COMPSWAP: return "compswap";
111 default: DE_FATAL("Invalid SSBO atomic operation type"); return "";
112 }
113 }
114
addBufferCopyCmd(const vk::DeviceInterface & vk,vk::VkCommandBuffer cmdBuffer,deUint32 queueFamilyIndex,vk::VkBuffer srcBuffer,vk::VkBuffer dstBuffer,deUint32 copySize,bool dstFragment)115 void static addBufferCopyCmd (const vk::DeviceInterface& vk,
116 vk::VkCommandBuffer cmdBuffer,
117 deUint32 queueFamilyIndex,
118 vk::VkBuffer srcBuffer,
119 vk::VkBuffer dstBuffer,
120 deUint32 copySize,
121 bool dstFragment)
122 {
123 const vk::VkBufferMemoryBarrier dstWriteStartBarrier =
124 {
125 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
126 DE_NULL, // const void* pNext
127 vk::VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask
128 vk::VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags dstAccessMask
129 queueFamilyIndex, // uint32_t srcQueueFamilyIndex
130 queueFamilyIndex, // uint32_t dstQueueFamilyIndex
131 srcBuffer, // VkBuffer buffer
132 0u, // VkDeviceSize offset
133 VK_WHOLE_SIZE, // VkDeviceSize size
134 };
135
136 vk.cmdPipelineBarrier(cmdBuffer,
137 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // srcStageMask
138 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, // dstStageMask
139 (vk::VkDependencyFlags)0,
140 0, (const vk::VkMemoryBarrier*)DE_NULL,
141 1, &dstWriteStartBarrier,
142 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
143
144 const vk::VkBufferCopy copyRegion =
145 {
146 0, // VkDeviceSize srcOffset
147 0, // VkDeviceSize dstOffset
148 copySize // VkDeviceSize size
149 };
150 vk.cmdCopyBuffer(cmdBuffer, srcBuffer, dstBuffer, 1, ©Region);
151
152 const vk::VkBufferMemoryBarrier dstWriteEndBarrier =
153 {
154 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
155 DE_NULL, // const void* pNext
156 vk::VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags srcAccessMask
157 vk::VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask
158 queueFamilyIndex, // uint32_t srcQueueFamilyIndex
159 queueFamilyIndex, // uint32_t dstQueueFamilyIndex
160 dstBuffer, // VkBuffer buffer
161 0u, // VkDeviceSize offset
162 VK_WHOLE_SIZE, // VkDeviceSize size
163 };
164
165 vk.cmdPipelineBarrier(cmdBuffer,
166 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, // srcStageMask
167 dstFragment ? vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT :
168 vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, // dstStageMask
169 (vk::VkDependencyFlags)0,
170 0, (const vk::VkMemoryBarrier*)DE_NULL,
171 1, &dstWriteEndBarrier,
172 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
173
174 }
175
getProtectionMode(const vk::VkPipelineCreateFlags flags)176 ProtectionMode getProtectionMode(const vk::VkPipelineCreateFlags flags) {
177 #ifndef CTS_USES_VULKANSC
178 if ((flags & vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT) != 0) {
179 return ProtectionMode::PROTECTION_DISABLED;
180 }
181 #endif
182 DE_UNREF(flags);
183 return ProtectionMode::PROTECTION_ENABLED;
184 }
185
186 template<typename T>
187 class StorageBufferTestInstance : public ProtectedTestInstance
188 {
189 public:
190 StorageBufferTestInstance (Context& ctx,
191 const SSBOTestType testType,
192 const glu::ShaderType shaderType,
193 const tcu::UVec4 testInput,
194 const BufferValidator<T>& validator,
195 const bool pipelineProtectedAccess,
196 const vk::VkPipelineCreateFlags pipelineFlags);
197 virtual tcu::TestStatus iterate (void);
198
199 private:
200 tcu::TestStatus executeFragmentTest (void);
201 tcu::TestStatus executeComputeTest (void);
202
203 const SSBOTestType m_testType;
204 const glu::ShaderType m_shaderType;
205 const tcu::UVec4 m_testInput;
206 const BufferValidator<T>& m_validator;
207 const vk::VkFormat m_imageFormat;
208 const vk::VkPipelineCreateFlags m_pipelineFlags;
209 const ProtectionMode m_protectionMode;
210 };
211
212 template<typename T>
213 class StorageBufferTestCase : public TestCase
214 {
215 public:
StorageBufferTestCase(tcu::TestContext & testctx,const SSBOTestType testType,const glu::ShaderType shaderType,const char * name,const tcu::UVec4 testInput,ValidationDataStorage<T> validationData,vk::VkFormat format,bool pipelineProtectedAccess,vk::VkPipelineCreateFlags pipelineFlags,const std::string & extraShader="")216 StorageBufferTestCase (tcu::TestContext& testctx,
217 const SSBOTestType testType,
218 const glu::ShaderType shaderType,
219 const char* name,
220 const tcu::UVec4 testInput,
221 ValidationDataStorage<T> validationData,
222 vk::VkFormat format,
223 bool pipelineProtectedAccess,
224 vk::VkPipelineCreateFlags pipelineFlags,
225 const std::string& extraShader = "")
226 : TestCase (testctx, name, getSSBOTestDescription(testType))
227 , m_testType (testType)
228 , m_shaderType (shaderType)
229 , m_testInput (testInput)
230 , m_validator (validationData, format)
231 , m_pipelineProtectedAccess (pipelineProtectedAccess)
232 , m_pipelineFlags (pipelineFlags)
233 , m_extraShader (extraShader)
234 , m_protectionMode (getProtectionMode(m_pipelineFlags))
235 {
236 }
createInstance(Context & ctx) const237 virtual TestInstance* createInstance (Context& ctx) const
238 {
239 return new StorageBufferTestInstance<T>(ctx, m_testType, m_shaderType, m_testInput, m_validator, m_pipelineProtectedAccess, m_pipelineFlags);
240 }
241 virtual void initPrograms (vk::SourceCollections& programCollection) const;
checkSupport(Context & context) const242 virtual void checkSupport (Context& context) const
243 {
244 checkProtectedQueueSupport(context);
245 }
246
~StorageBufferTestCase(void)247 virtual ~StorageBufferTestCase (void) {}
248
249 private:
250 const SSBOTestType m_testType;
251 const glu::ShaderType m_shaderType;
252 const tcu::UVec4 m_testInput;
253 const BufferValidator<T> m_validator;
254 const bool m_pipelineProtectedAccess;
255 const vk::VkPipelineCreateFlags m_pipelineFlags;
256 const std::string m_extraShader;
257 const ProtectionMode m_protectionMode;
258 };
259
260 template<typename T>
StorageBufferTestInstance(Context & ctx,const SSBOTestType testType,const glu::ShaderType shaderType,const tcu::UVec4 testInput,const BufferValidator<T> & validator,const bool pipelineProtectedAccess,const vk::VkPipelineCreateFlags pipelineFlags)261 StorageBufferTestInstance<T>::StorageBufferTestInstance (Context& ctx,
262 const SSBOTestType testType,
263 const glu::ShaderType shaderType,
264 const tcu::UVec4 testInput,
265 const BufferValidator<T>& validator,
266 const bool pipelineProtectedAccess,
267 const vk::VkPipelineCreateFlags pipelineFlags)
268 : ProtectedTestInstance (ctx, pipelineProtectedAccess ? std::vector<std::string>({ "VK_EXT_pipeline_protected_access" }) : std::vector<std::string>())
269 , m_testType (testType)
270 , m_shaderType (shaderType)
271 , m_testInput (testInput)
272 , m_validator (validator)
273 , m_imageFormat (vk::VK_FORMAT_R8G8B8A8_UNORM)
274 , m_pipelineFlags (pipelineFlags)
275 , m_protectionMode (getProtectionMode(m_pipelineFlags))
276 {
277 }
278
279 template<typename T>
initPrograms(vk::SourceCollections & programCollection) const280 void StorageBufferTestCase<T>::initPrograms (vk::SourceCollections& programCollection) const
281 {
282 const char* vertexShader =
283 "#version 450\n"
284 "layout(location=0) out vec4 vIndex;\n"
285 "void main() {\n"
286 " vec2 pos[4] = vec2[4]( vec2(-0.7, 0.7), vec2(0.7, 0.7), vec2(0.0, -0.7), vec2(-0.7, -0.7) );\n"
287 " vIndex = vec4(gl_VertexIndex);\n"
288 " gl_PointSize = 1.0;\n"
289 " gl_Position = vec4(pos[gl_VertexIndex], 0.0, 1.0);\n"
290 "}";
291
292 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uvec4)
293 // set = 0, location = 2 -> buffer ProtectedTestBufferSource (uvec4)
294 const char* readShaderTemplateStr =
295 "#version 450\n"
296 "${INPUT_DECLARATION}\n"
297 "\n"
298 "layout(set=0, binding=0, std140) buffer ProtectedTestBuffer\n"
299 "{\n"
300 " highp uvec4 protectedTestResultBuffer;\n"
301 "};\n"
302 "\n"
303 "layout(set=0, binding=2, std140) buffer ProtectedTestBufferSource\n"
304 "{\n"
305 " highp uvec4 protectedTestBufferSource;\n"
306 "};\n"
307 "\n"
308 "void main (void)\n"
309 "{\n"
310 " protectedTestResultBuffer = protectedTestBufferSource;\n"
311 " ${FRAGMENT_OUTPUT}\n"
312 "}\n";
313
314 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uvec4)
315 // set = 0, location = 1 -> uniform Data (uvec4)
316 const char* writeShaderTemplateStr =
317 "#version 450\n"
318 "${INPUT_DECLARATION}\n"
319 "\n"
320 "layout(set=0, binding=0, std140) buffer ProtectedTestBuffer\n"
321 "{\n"
322 " highp uvec4 protectedTestResultBuffer;\n"
323 "};\n"
324 "\n"
325 "layout(set=0, binding=1, std140) uniform Data\n"
326 "{\n"
327 " highp uvec4 testInput;\n"
328 "};\n"
329 "\n"
330 "void main (void)\n"
331 "{\n"
332 " protectedTestResultBuffer = testInput;\n"
333 " ${FRAGMENT_OUTPUT}\n"
334 "}\n";
335
336 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uint [4])
337 const char* atomicTestShaderTemplateStr =
338 "#version 450\n"
339 "${INPUT_DECLARATION}\n"
340 "\n"
341 "layout(set=0, binding=0, std430) buffer ProtectedTestBuffer\n"
342 "{\n"
343 " highp uint protectedTestResultBuffer[4];\n"
344 "};\n"
345 "\n"
346 "void main (void)\n"
347 "{\n"
348 " uint i = uint(${INVOCATION_ID});\n"
349 " ${ATOMIC_FUNCTION_CALL}\n"
350 " ${FRAGMENT_OUTPUT}\n"
351 "}\n";
352
353 const char* shaderTemplateStr;
354 std::map<std::string, std::string> shaderParam;
355 switch (m_testType) {
356 case SSBO_READ: shaderTemplateStr = readShaderTemplateStr; break;
357 case SSBO_WRITE: shaderTemplateStr = writeShaderTemplateStr; break;
358 case SSBO_ATOMIC: {
359 shaderTemplateStr = atomicTestShaderTemplateStr;
360 shaderParam["ATOMIC_FUNCTION_CALL"] = m_extraShader;
361 break;
362 }
363 default: DE_FATAL("Incorrect SSBO test type"); return;
364 }
365
366 if (m_shaderType == glu::SHADERTYPE_FRAGMENT)
367 {
368 shaderParam["INPUT_DECLARATION"] = "layout(location=0) out mediump vec4 o_color;\n"
369 "layout(location=0) in vec4 vIndex;\n";
370 shaderParam["FRAGMENT_OUTPUT"] = "o_color = vec4( 0.0, 0.4, 1.0, 1.0 );\n";
371 shaderParam["INVOCATION_ID"] = "vIndex.x";
372
373 programCollection.glslSources.add("vert") << glu::VertexSource(vertexShader);
374 programCollection.glslSources.add("TestShader") << glu::FragmentSource(tcu::StringTemplate(shaderTemplateStr).specialize(shaderParam));
375 }
376 else if (m_shaderType == glu::SHADERTYPE_COMPUTE)
377 {
378 shaderParam["INPUT_DECLARATION"] = "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
379 shaderParam["FRAGMENT_OUTPUT"] = "";
380 shaderParam["INVOCATION_ID"] = "gl_GlobalInvocationID.x";
381 programCollection.glslSources.add("TestShader") << glu::ComputeSource(tcu::StringTemplate(shaderTemplateStr).specialize(shaderParam));
382 }
383 else
384 DE_FATAL("Incorrect shader type");
385
386 m_validator.initPrograms(programCollection);
387 }
388
389 template<typename T>
executeFragmentTest(void)390 tcu::TestStatus StorageBufferTestInstance<T>::executeFragmentTest(void)
391 {
392 ProtectedContext& ctx (m_protectedContext);
393 const vk::DeviceInterface& vk = ctx.getDeviceInterface();
394 const vk::VkDevice device = ctx.getDevice();
395 const vk::VkQueue queue = ctx.getQueue();
396 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex();
397
398 const deUint32 testUniformSize = sizeof(m_testInput);
399 de::UniquePtr<vk::BufferWithMemory> testUniform (makeBuffer(ctx,
400 PROTECTION_DISABLED,
401 queueFamilyIndex,
402 testUniformSize,
403 vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
404 | vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
405 vk::MemoryRequirement::HostVisible));
406
407 // Set the test input uniform data
408 {
409 deMemcpy(testUniform->getAllocation().getHostPtr(), &m_testInput, testUniformSize);
410 vk::flushAlloc(vk, device, testUniform->getAllocation());
411 }
412
413 const vk::MemoryRequirement* memoryRequirement = &vk::MemoryRequirement::Any;
414 if (m_protectionMode == PROTECTION_ENABLED) {
415 memoryRequirement = &vk::MemoryRequirement::Protected;
416 }
417
418 const deUint32 testBufferSize = sizeof(ValidationDataStorage<T>);
419 de::MovePtr<vk::BufferWithMemory> testBuffer (makeBuffer(ctx,
420 m_protectionMode,
421 queueFamilyIndex,
422 testBufferSize,
423 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
424 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
425 *memoryRequirement));
426 de::MovePtr<vk::BufferWithMemory> testBufferSource (makeBuffer(ctx,
427 m_protectionMode,
428 queueFamilyIndex,
429 testBufferSize,
430 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
431 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
432 *memoryRequirement));
433
434 vk::Move<vk::VkShaderModule> vertexShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("vert"), 0));
435 vk::Unique<vk::VkShaderModule> testShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("TestShader"), 0));
436
437 // Create descriptors
438 vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(vk::DescriptorSetLayoutBuilder()
439 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_ALL)
440 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_ALL)
441 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_ALL)
442 .build(vk, device));
443 vk::Unique<vk::VkDescriptorPool> descriptorPool(vk::DescriptorPoolBuilder()
444 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
445 .addType(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u)
446 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
447 .build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
448 vk::Unique<vk::VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
449
450 // Update descriptor set information
451 {
452 vk::VkDescriptorBufferInfo descTestBuffer = makeDescriptorBufferInfo(**testBuffer, 0, testBufferSize);
453 vk::VkDescriptorBufferInfo descTestUniform = makeDescriptorBufferInfo(**testUniform, 0, testUniformSize);
454 vk::VkDescriptorBufferInfo descTestBufferSource = makeDescriptorBufferInfo(**testBufferSource, 0, testBufferSize);
455
456 vk::DescriptorSetUpdateBuilder()
457 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBuffer)
458 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descTestUniform)
459 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBufferSource)
460 .update(vk, device);
461 }
462
463 // Create output image
464 de::MovePtr<vk::ImageWithMemory> colorImage (createImage2D(ctx, m_protectionMode, queueFamilyIndex,
465 RENDER_WIDTH, RENDER_HEIGHT,
466 m_imageFormat,
467 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_SAMPLED_BIT));
468 vk::Unique<vk::VkImageView> colorImageView (createImageView(ctx, **colorImage, m_imageFormat));
469 vk::Unique<vk::VkRenderPass> renderPass (createRenderPass(ctx, m_imageFormat));
470 vk::Unique<vk::VkFramebuffer> framebuffer (createFramebuffer(ctx, RENDER_WIDTH, RENDER_HEIGHT, *renderPass, *colorImageView));
471
472 // Build pipeline
473 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
474 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, m_protectionMode, queueFamilyIndex));
475 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
476
477 // Create pipeline
478 vk::Unique<vk::VkPipeline> graphicsPipeline (makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass,
479 *vertexShader, *testShader,
480 std::vector<vk::VkVertexInputBindingDescription>(),
481 std::vector<vk::VkVertexInputAttributeDescription>(),
482 tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT),
483 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
484 m_pipelineFlags));
485
486 beginCommandBuffer(vk, *cmdBuffer);
487
488 if (m_testType == SSBO_READ || m_testType == SSBO_ATOMIC)
489 {
490 vk::VkBuffer targetBuffer = (m_testType == SSBO_ATOMIC) ? **testBuffer : **testBufferSource;
491 addBufferCopyCmd(vk, *cmdBuffer, queueFamilyIndex, **testUniform, targetBuffer, testUniformSize, true);
492 }
493
494 // Start image barrier
495 {
496 const vk::VkImageMemoryBarrier startImgBarrier =
497 {
498 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
499 DE_NULL, // pNext
500 0, // srcAccessMask
501 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask
502 vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
503 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
504 queueFamilyIndex, // srcQueueFamilyIndex
505 queueFamilyIndex, // dstQueueFamilyIndex
506 **colorImage, // image
507 {
508 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
509 0u, // baseMipLevel
510 1u, // mipLevels
511 0u, // baseArraySlice
512 1u, // subresourceRange
513 }
514 };
515
516 vk.cmdPipelineBarrier(*cmdBuffer,
517 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // srcStageMask
518 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // dstStageMask
519 (vk::VkDependencyFlags)0,
520 0, (const vk::VkMemoryBarrier*)DE_NULL,
521 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
522 1, &startImgBarrier);
523 }
524
525 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, vk::makeRect2D(0, 0, RENDER_WIDTH, RENDER_HEIGHT), tcu::Vec4(0.125f, 0.25f, 0.5f, 1.0f));
526 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
527 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
528
529 vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
530 endRenderPass(vk, *cmdBuffer);
531
532 {
533 const vk::VkImageMemoryBarrier endImgBarrier =
534 {
535 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
536 DE_NULL, // pNext
537 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask
538 vk::VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
539 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
540 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // newLayout
541 queueFamilyIndex, // srcQueueFamilyIndex
542 queueFamilyIndex, // dstQueueFamilyIndex
543 **colorImage, // image
544 {
545 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
546 0u, // baseMipLevel
547 1u, // mipLevels
548 0u, // baseArraySlice
549 1u, // subresourceRange
550 }
551 };
552 vk.cmdPipelineBarrier(*cmdBuffer,
553 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // srcStageMask
554 vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, // dstStageMask
555 (vk::VkDependencyFlags)0,
556 0, (const vk::VkMemoryBarrier*)DE_NULL,
557 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
558 1, &endImgBarrier);
559 }
560
561 endCommandBuffer(vk, *cmdBuffer);
562
563 // Execute Draw
564 {
565 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device));
566 VK_CHECK(vk.resetFences(device, 1, &fence.get()));
567 VK_CHECK(queueSubmit(ctx, m_protectionMode, queue, *cmdBuffer, *fence, ~0ull));
568 }
569
570 // Log inputs
571 ctx.getTestContext().getLog()
572 << tcu::TestLog::Message << "Input values: \n"
573 << "1: " << m_testInput << "\n"
574 << tcu::TestLog::EndMessage;
575
576 // Validate buffer
577 if (m_validator.validateBuffer(ctx, **testBuffer))
578 return tcu::TestStatus::pass("Everything went OK");
579 else
580 return tcu::TestStatus::fail("Something went really wrong");
581 }
582
583 template<typename T>
executeComputeTest(void)584 tcu::TestStatus StorageBufferTestInstance<T>::executeComputeTest(void)
585 {
586 ProtectedContext& ctx (m_protectedContext);
587 const vk::DeviceInterface& vk = ctx.getDeviceInterface();
588 const vk::VkDevice device = ctx.getDevice();
589 const vk::VkQueue queue = ctx.getQueue();
590 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex();
591
592 const deUint32 testUniformSize = sizeof(m_testInput);
593 de::UniquePtr<vk::BufferWithMemory> testUniform (makeBuffer(ctx,
594 PROTECTION_DISABLED,
595 queueFamilyIndex,
596 testUniformSize,
597 vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
598 | vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
599 vk::MemoryRequirement::HostVisible));
600
601 // Set the test input uniform data
602 {
603 deMemcpy(testUniform->getAllocation().getHostPtr(), &m_testInput, testUniformSize);
604 vk::flushAlloc(vk, device, testUniform->getAllocation());
605 }
606
607 const vk::MemoryRequirement* memoryRequirement = &vk::MemoryRequirement::Any;
608 if (m_protectionMode == PROTECTION_ENABLED) {
609 memoryRequirement = &vk::MemoryRequirement::Protected;
610 }
611
612 const deUint32 testBufferSize = sizeof(ValidationDataStorage<T>);
613 de::MovePtr<vk::BufferWithMemory> testBuffer (makeBuffer(ctx,
614 m_protectionMode,
615 queueFamilyIndex,
616 testBufferSize,
617 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
618 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
619 *memoryRequirement));
620 de::MovePtr<vk::BufferWithMemory> testBufferSource (makeBuffer(ctx,
621 m_protectionMode,
622 queueFamilyIndex,
623 testBufferSize,
624 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
625 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
626 *memoryRequirement));
627
628 vk::Unique<vk::VkShaderModule> testShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("TestShader"), 0));
629
630 // Create descriptors
631 vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(vk::DescriptorSetLayoutBuilder()
632 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT)
633 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT)
634 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT)
635 .build(vk, device));
636 vk::Unique<vk::VkDescriptorPool> descriptorPool(vk::DescriptorPoolBuilder()
637 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
638 .addType(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u)
639 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
640 .build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
641 vk::Unique<vk::VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
642
643 // Update descriptor set information
644 {
645 vk::VkDescriptorBufferInfo descTestBuffer = makeDescriptorBufferInfo(**testBuffer, 0, testBufferSize);
646 vk::VkDescriptorBufferInfo descTestUniform = makeDescriptorBufferInfo(**testUniform, 0, testUniformSize);
647 vk::VkDescriptorBufferInfo descTestBufferSource = makeDescriptorBufferInfo(**testBufferSource, 0, testBufferSize);
648
649 vk::DescriptorSetUpdateBuilder()
650 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBuffer)
651 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descTestUniform)
652 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBufferSource)
653 .update(vk, device);
654 }
655
656 // Build and execute test
657 {
658 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device));
659 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
660 vk::Unique<vk::VkPipeline> SSBOPipeline (makeComputePipeline(vk, device, *pipelineLayout, m_pipelineFlags, *testShader, (vk::VkPipelineShaderStageCreateFlags)0u));
661 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, m_protectionMode, queueFamilyIndex));
662 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
663 deUint32 dispatchCount = (m_testType == SSBO_ATOMIC) ? 4u : 1u;
664
665 beginCommandBuffer(vk, *cmdBuffer);
666
667 if (m_testType == SSBO_READ || m_testType == SSBO_ATOMIC)
668 {
669 vk::VkBuffer targetBuffer = (m_testType == SSBO_ATOMIC) ? **testBuffer : **testBufferSource;
670 addBufferCopyCmd(vk, *cmdBuffer, queueFamilyIndex, **testUniform, targetBuffer, testUniformSize, false);
671 }
672
673 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *SSBOPipeline);
674 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
675
676 vk.cmdDispatch(*cmdBuffer, dispatchCount, 1u, 1u);
677
678 endCommandBuffer(vk, *cmdBuffer);
679 VK_CHECK(queueSubmit(ctx, m_protectionMode, queue, *cmdBuffer, *fence, ~0ull));
680 }
681
682 ctx.getTestContext().getLog()
683 << tcu::TestLog::Message << "Input values: \n"
684 << "1: " << m_testInput << "\n"
685 << tcu::TestLog::EndMessage;
686
687 // Validate buffer
688 if (m_validator.validateBuffer(ctx, **testBuffer))
689 return tcu::TestStatus::pass("Everything went OK");
690 else
691 return tcu::TestStatus::fail("Something went really wrong");
692 }
693
694 template<typename T>
iterate(void)695 tcu::TestStatus StorageBufferTestInstance<T>::iterate(void)
696 {
697 switch (m_shaderType)
698 {
699 case glu::SHADERTYPE_FRAGMENT: return executeFragmentTest();
700 case glu::SHADERTYPE_COMPUTE: return executeComputeTest();
701 default:
702 DE_FATAL("Incorrect shader type"); return tcu::TestStatus::fail("");
703 }
704 }
705
createSpecifiedStorageBufferTests(tcu::TestContext & testCtx,const std::string groupName,SSBOTestType testType,const glu::ShaderType shaderType,const ValidationDataStorage<tcu::UVec4> testData[],size_t testCount,bool pipelineProtectedAccess,vk::VkPipelineCreateFlags pipelineFlags)706 tcu::TestCaseGroup* createSpecifiedStorageBufferTests (tcu::TestContext& testCtx,
707 const std::string groupName,
708 SSBOTestType testType,
709 const glu::ShaderType shaderType,
710 const ValidationDataStorage<tcu::UVec4> testData[],
711 size_t testCount,
712 bool pipelineProtectedAccess,
713 vk::VkPipelineCreateFlags pipelineFlags)
714 {
715 const std::string testTypeStr = getSSBOTypeString(testType);
716 const std::string description = "Storage buffer " + testTypeStr + " tests";
717 de::MovePtr<tcu::TestCaseGroup> testGroup (new tcu::TestCaseGroup(testCtx, groupName.c_str(), description.c_str()));
718
719 for (size_t ndx = 0; ndx < testCount; ++ndx)
720 {
721 const std::string name = testTypeStr + "_" + de::toString(ndx + 1);
722 testGroup->addChild(new StorageBufferTestCase<tcu::UVec4>(testCtx, testType, shaderType, name.c_str(), testData[ndx].values, testData[ndx], vk::VK_FORMAT_R32G32B32A32_UINT, pipelineProtectedAccess, pipelineFlags));
723 }
724
725 return testGroup.release();
726 }
727
createRandomizedBufferTests(tcu::TestContext & testCtx,SSBOTestType testType,const glu::ShaderType shaderType,size_t testCount,bool pipelineProtectedAccess,vk::VkPipelineCreateFlags pipelineFlags)728 tcu::TestCaseGroup* createRandomizedBufferTests (tcu::TestContext& testCtx, SSBOTestType testType, const glu::ShaderType shaderType, size_t testCount, bool pipelineProtectedAccess, vk::VkPipelineCreateFlags pipelineFlags)
729 {
730 de::Random rnd (testCtx.getCommandLine().getBaseSeed());
731 std::vector<ValidationDataStorage<tcu::UVec4> > testData;
732 testData.resize(testCount);
733
734 for (size_t ndx = 0; ndx < testCount; ++ndx)
735 for (deUint32 compIdx = 0; compIdx < 4; ++compIdx)
736 testData[ndx].values[compIdx] = rnd.getUint32();
737
738 return createSpecifiedStorageBufferTests(testCtx, "random", testType, shaderType, testData.data(), testData.size(), pipelineProtectedAccess, pipelineFlags);
739 }
740
741 struct
742 {
743 bool pipelineProtectedAccess;
744 const char* name;
745 } protectedAccess[] =
746 {
747 { false, "default"},
748 #ifndef CTS_USES_VULKANSC
749 { true, "protected_access"},
750 #endif
751 };
752 struct
753 {
754 vk::VkPipelineCreateFlags pipelineFlags;
755 const char* name;
756 } flags[] =
757 {
758 {
759 (vk::VkPipelineCreateFlagBits)0u, "none"},
760 #ifndef CTS_USES_VULKANSC
761 { vk::VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT, "protected_access_only"},
762 { vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT, "no_protected_access"},
763 #endif
764 };
765
createRWStorageBufferTests(tcu::TestContext & testCtx,const std::string groupName,const std::string groupDescription,SSBOTestType testType,const ValidationDataStorage<tcu::UVec4> testData[],size_t testCount)766 tcu::TestCaseGroup* createRWStorageBufferTests (tcu::TestContext& testCtx,
767 const std::string groupName,
768 const std::string groupDescription,
769 SSBOTestType testType,
770 const ValidationDataStorage<tcu::UVec4> testData[],
771 size_t testCount)
772 {
773 de::MovePtr<tcu::TestCaseGroup> ssboRWTestGroup (new tcu::TestCaseGroup(testCtx, groupName.c_str(), groupDescription.c_str()));
774
775 glu::ShaderType shaderTypes[] = {
776 glu::SHADERTYPE_FRAGMENT,
777 glu::SHADERTYPE_COMPUTE
778 };
779
780 for (int protectedAccessNdx = 0; protectedAccessNdx < DE_LENGTH_OF_ARRAY(protectedAccess); ++protectedAccessNdx) {
781 de::MovePtr<tcu::TestCaseGroup> protectedAccessGroup(new tcu::TestCaseGroup(testCtx, protectedAccess[protectedAccessNdx].name, ""));
782 for (int flagsNdx = 0; flagsNdx < DE_LENGTH_OF_ARRAY(flags); ++flagsNdx) {
783 de::MovePtr<tcu::TestCaseGroup> flagsGroup(new tcu::TestCaseGroup(testCtx, flags[flagsNdx].name, ""));
784 if (!protectedAccess[protectedAccessNdx].pipelineProtectedAccess && flags[flagsNdx].pipelineFlags != 0u) continue;
785
786 for (int shaderNdx = 0; shaderNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderNdx)
787 {
788 const glu::ShaderType shaderType = shaderTypes[shaderNdx];
789 const std::string shaderName = glu::getShaderTypeName(shaderType);
790 const std::string shaderGroupDesc = "Storage buffer tests for shader type: " + shaderName;
791 de::MovePtr<tcu::TestCaseGroup> testShaderGroup(new tcu::TestCaseGroup(testCtx, shaderName.c_str(), shaderGroupDesc.c_str()));
792
793 testShaderGroup->addChild(createSpecifiedStorageBufferTests(testCtx, "static", testType, shaderType, testData, testCount, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags));
794 testShaderGroup->addChild(createRandomizedBufferTests(testCtx, testType, shaderType, RANDOM_TEST_COUNT, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags));
795 flagsGroup->addChild(testShaderGroup.release());
796 }
797 protectedAccessGroup->addChild(flagsGroup.release());
798 }
799 ssboRWTestGroup->addChild(protectedAccessGroup.release());
800 }
801
802 return ssboRWTestGroup.release();
803 }
804
calculateAtomicOpData(SSBOAtomicType type,const tcu::UVec4 & inputValue,const deUint32 atomicArg,std::string & atomicCall,tcu::UVec4 & refValue,const deUint32 swapNdx=0)805 void calculateAtomicOpData (SSBOAtomicType type,
806 const tcu::UVec4& inputValue,
807 const deUint32 atomicArg,
808 std::string& atomicCall,
809 tcu::UVec4& refValue,
810 const deUint32 swapNdx = 0)
811 {
812 switch (type)
813 {
814 case ATOMIC_ADD:
815 {
816 refValue = inputValue + tcu::UVec4(atomicArg);
817 atomicCall = "atomicAdd(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
818 break;
819 }
820 case ATOMIC_MIN:
821 {
822 refValue = tcu::UVec4(std::min(inputValue.x(), atomicArg), std::min(inputValue.y(), atomicArg), std::min(inputValue.z(), atomicArg), std::min(inputValue.w(), atomicArg));
823 atomicCall = "atomicMin(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
824 break;
825 }
826 case ATOMIC_MAX:
827 {
828 refValue = tcu::UVec4(std::max(inputValue.x(), atomicArg), std::max(inputValue.y(), atomicArg), std::max(inputValue.z(), atomicArg), std::max(inputValue.w(), atomicArg));
829 atomicCall = "atomicMax(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
830 break;
831 }
832 case ATOMIC_AND:
833 {
834 refValue = tcu::UVec4(inputValue.x() & atomicArg, inputValue.y() & atomicArg, inputValue.z() & atomicArg, inputValue.w() & atomicArg);
835 atomicCall = "atomicAnd(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
836 break;
837 }
838 case ATOMIC_OR:
839 {
840 refValue = tcu::UVec4(inputValue.x() | atomicArg, inputValue.y() | atomicArg, inputValue.z() | atomicArg, inputValue.w() | atomicArg);
841 atomicCall = "atomicOr(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
842 break;
843 }
844 case ATOMIC_XOR:
845 {
846 refValue = tcu::UVec4(inputValue.x() ^ atomicArg, inputValue.y() ^ atomicArg, inputValue.z() ^ atomicArg, inputValue.w() ^ atomicArg);
847 atomicCall = "atomicXor(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
848 break;
849 }
850 case ATOMIC_EXCHANGE:
851 {
852 refValue = tcu::UVec4(atomicArg);
853 atomicCall = "atomicExchange(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
854 break;
855 }
856 case ATOMIC_COMPSWAP:
857 {
858 int selectedNdx = swapNdx % 4;
859 deUint32 selectedChange = inputValue[selectedNdx];
860
861 refValue = inputValue;
862 refValue[selectedNdx] = atomicArg;
863 atomicCall = "atomicCompSwap(protectedTestResultBuffer[i], " + de::toString(selectedChange) + "u, " + de::toString(atomicArg) + "u);";
864 break;
865 }
866 default: DE_FATAL("Incorrect atomic function type"); break;
867 }
868
869 }
870
871 } // anonymous
872
createReadStorageBufferTests(tcu::TestContext & testCtx)873 tcu::TestCaseGroup* createReadStorageBufferTests (tcu::TestContext& testCtx)
874 {
875 const ValidationDataStorage<tcu::UVec4> testData[] = {
876 { tcu::UVec4(0u, 0u, 0u, 0u) }, { tcu::UVec4(1u, 0u, 0u, 0u) },
877 { tcu::UVec4(0u, 1u, 0u, 0u) }, { tcu::UVec4(0u, 0u, 1u, 0u) },
878 { tcu::UVec4(0u, 0u, 0u, 1u) }, { tcu::UVec4(1u, 1u, 1u, 1u) }
879 };
880
881 return createRWStorageBufferTests(testCtx, "ssbo_read", "Storage Buffer Read Tests", SSBO_READ, testData, DE_LENGTH_OF_ARRAY(testData));
882 }
883
createWriteStorageBufferTests(tcu::TestContext & testCtx)884 tcu::TestCaseGroup* createWriteStorageBufferTests (tcu::TestContext& testCtx)
885 {
886 const ValidationDataStorage<tcu::UVec4> testData[] = {
887 { tcu::UVec4(0u, 0u, 0u, 0u) }, { tcu::UVec4(1u, 0u, 0u, 0u) },
888 { tcu::UVec4(0u, 1u, 0u, 0u) }, { tcu::UVec4(0u, 0u, 1u, 0u) },
889 { tcu::UVec4(0u, 0u, 0u, 1u) }, { tcu::UVec4(1u, 1u, 1u, 1u) }
890 };
891
892 return createRWStorageBufferTests(testCtx, "ssbo_write", "Storage Buffer Write Tests", SSBO_WRITE, testData, DE_LENGTH_OF_ARRAY(testData));
893 }
894
createAtomicStorageBufferTests(tcu::TestContext & testCtx)895 tcu::TestCaseGroup* createAtomicStorageBufferTests (tcu::TestContext& testCtx)
896 {
897 struct {
898 const tcu::UVec4 input;
899 const deUint32 atomicArg;
900 const deUint32 swapNdx;
901 } testData[] = {
902 { tcu::UVec4(0u, 1u, 2u, 3u), 10u, 0u },
903 { tcu::UVec4(10u, 20u, 30u, 40u), 3u, 2u },
904 { tcu::UVec4(800u, 400u, 230u, 999u), 50u, 3u },
905 { tcu::UVec4(100800u, 233400u, 22230u, 77999u), 800u, 1u },
906 };
907
908 SSBOAtomicType testTypes[] = {
909 ATOMIC_ADD,
910 ATOMIC_MIN,
911 ATOMIC_MAX,
912 ATOMIC_AND,
913 ATOMIC_OR,
914 ATOMIC_XOR,
915 ATOMIC_EXCHANGE,
916 ATOMIC_COMPSWAP
917 };
918
919 glu::ShaderType shaderTypes[] = {
920 glu::SHADERTYPE_FRAGMENT,
921 glu::SHADERTYPE_COMPUTE
922 };
923
924 de::Random rnd (testCtx.getCommandLine().getBaseSeed());
925 de::MovePtr<tcu::TestCaseGroup> ssboAtomicTests (new tcu::TestCaseGroup(testCtx, "ssbo_atomic", "Storage Buffer Atomic Tests"));
926
927 for (int shaderNdx = 0; shaderNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderNdx)
928 {
929 const glu::ShaderType shaderType = shaderTypes[shaderNdx];
930 const std::string shaderName = glu::getShaderTypeName(shaderType);
931 const std::string shaderDesc = "Storage Buffer Atomic Tests for shader type: " + shaderName;
932 de::MovePtr<tcu::TestCaseGroup> atomicShaderGroup (new tcu::TestCaseGroup(testCtx, shaderName.c_str(), shaderDesc.c_str()));
933
934 for (int protectedAccessNdx = 0; protectedAccessNdx < DE_LENGTH_OF_ARRAY(protectedAccess); ++protectedAccessNdx) {
935 de::MovePtr<tcu::TestCaseGroup> protectedAccessGroup(new tcu::TestCaseGroup(testCtx, protectedAccess[protectedAccessNdx].name, ""));
936 for (int flagsNdx = 0; flagsNdx < DE_LENGTH_OF_ARRAY(flags); ++flagsNdx) {
937 de::MovePtr<tcu::TestCaseGroup> flagsGroup(new tcu::TestCaseGroup(testCtx, flags[flagsNdx].name, ""));
938 if (!protectedAccess[protectedAccessNdx].pipelineProtectedAccess && flags[flagsNdx].pipelineFlags != 0u) continue;
939
940 for (int typeNdx = 0; typeNdx < DE_LENGTH_OF_ARRAY(testTypes); ++typeNdx)
941 {
942 SSBOAtomicType atomicType = testTypes[typeNdx];
943 const std::string atomicTypeStr = getSSBOAtomicTypeString(atomicType);
944 const std::string atomicDesc = "Storage Buffer Atomic Tests: " + atomicTypeStr;
945
946 de::MovePtr<tcu::TestCaseGroup> staticTests(new tcu::TestCaseGroup(testCtx, "static", (atomicDesc + " with static input").c_str()));
947 for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(testData); ++ndx)
948 {
949 const std::string name = "atomic_" + atomicTypeStr + "_" + de::toString(ndx + 1);
950 const tcu::UVec4& inputValue = testData[ndx].input;
951 const deUint32& atomicArg = testData[ndx].atomicArg;
952 std::string atomicCall;
953 tcu::UVec4 refValue;
954
955 calculateAtomicOpData(atomicType, inputValue, atomicArg, atomicCall, refValue, testData[ndx].swapNdx);
956
957 ValidationDataStorage<tcu::UVec4> validationData = { refValue };
958 staticTests->addChild(new StorageBufferTestCase<tcu::UVec4>(testCtx, SSBO_ATOMIC, shaderType, name.c_str(), inputValue, validationData, vk::VK_FORMAT_R32G32B32A32_UINT, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags, atomicCall));
959 }
960
961 de::MovePtr<tcu::TestCaseGroup> randomTests(new tcu::TestCaseGroup(testCtx, "random", (atomicDesc + " with random input").c_str()));
962 for (int ndx = 0; ndx < RANDOM_TEST_COUNT; ndx++)
963 {
964 const std::string name = "atomic_" + atomicTypeStr + "_" + de::toString(ndx + 1);
965 deUint32 atomicArg = rnd.getUint16();
966 tcu::UVec4 inputValue;
967 tcu::UVec4 refValue;
968 std::string atomicCall;
969
970 for (int i = 0; i < 4; i++)
971 inputValue[i] = rnd.getUint16();
972
973 calculateAtomicOpData(atomicType, inputValue, atomicArg, atomicCall, refValue, ndx);
974
975 ValidationDataStorage<tcu::UVec4> validationData = { refValue };
976 randomTests->addChild(new StorageBufferTestCase<tcu::UVec4>(testCtx, SSBO_ATOMIC, shaderType, name.c_str(), inputValue, validationData, vk::VK_FORMAT_R32G32B32A32_UINT, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags, atomicCall));
977
978 }
979
980 de::MovePtr<tcu::TestCaseGroup> atomicTests(new tcu::TestCaseGroup(testCtx, atomicTypeStr.c_str(), atomicDesc.c_str()));
981 atomicTests->addChild(staticTests.release());
982 atomicTests->addChild(randomTests.release());
983 flagsGroup->addChild(atomicTests.release());
984 }
985 protectedAccessGroup->addChild(flagsGroup.release());
986 }
987 atomicShaderGroup->addChild(protectedAccessGroup.release());
988 }
989 ssboAtomicTests->addChild(atomicShaderGroup.release());
990 }
991
992 return ssboAtomicTests.release();
993 }
994
995 } // ProtectedMem
996 } // vkt
997