1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2017 Samsung Electronics Co., Ltd.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Protected memory storage buffer tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktProtectedMemStorageBufferTests.hpp"
26
27 #include "deRandom.hpp"
28 #include "deStringUtil.hpp"
29 #include "tcuTestLog.hpp"
30 #include "tcuVector.hpp"
31 #include "tcuStringTemplate.hpp"
32
33 #include "vkPrograms.hpp"
34 #include "vktTestCase.hpp"
35 #include "vktTestGroupUtil.hpp"
36 #include "vkTypeUtil.hpp"
37 #include "vkBuilderUtil.hpp"
38 #include "vkCmdUtil.hpp"
39
40 #include "vktProtectedMemBufferValidator.hpp"
41 #include "vktProtectedMemUtils.hpp"
42 #include "vktProtectedMemContext.hpp"
43
44 namespace vkt
45 {
46 namespace ProtectedMem
47 {
48
49 namespace
50 {
51
52 enum {
53 RENDER_HEIGHT = 128,
54 RENDER_WIDTH = 128,
55 };
56
57 enum {
58 RANDOM_TEST_COUNT = 10,
59 };
60
61 enum SSBOTestType {
62 SSBO_READ,
63 SSBO_WRITE,
64 SSBO_ATOMIC
65 };
66
67 enum SSBOAtomicType {
68 ATOMIC_ADD,
69 ATOMIC_MIN,
70 ATOMIC_MAX,
71 ATOMIC_AND,
72 ATOMIC_OR,
73 ATOMIC_XOR,
74 ATOMIC_EXCHANGE,
75 ATOMIC_COMPSWAP
76 };
77
getSSBOTypeString(SSBOTestType type)78 const char* getSSBOTypeString (SSBOTestType type)
79 {
80 switch (type) {
81 case SSBO_READ: return "read";
82 case SSBO_WRITE: return "write";
83 case SSBO_ATOMIC: return "atomic";
84 default: DE_FATAL("Invalid SSBO test type"); return "";
85 }
86 }
87
getSSBOAtomicTypeString(SSBOAtomicType type)88 const char* getSSBOAtomicTypeString (SSBOAtomicType type)
89 {
90 switch (type)
91 {
92 case ATOMIC_ADD: return "add";
93 case ATOMIC_MIN: return "min";
94 case ATOMIC_MAX: return "max";
95 case ATOMIC_AND: return "and";
96 case ATOMIC_OR: return "or";
97 case ATOMIC_XOR: return "xor";
98 case ATOMIC_EXCHANGE: return "exchange";
99 case ATOMIC_COMPSWAP: return "compswap";
100 default: DE_FATAL("Invalid SSBO atomic operation type"); return "";
101 }
102 }
103
addBufferCopyCmd(const vk::DeviceInterface & vk,vk::VkCommandBuffer cmdBuffer,deUint32 queueFamilyIndex,vk::VkBuffer srcBuffer,vk::VkBuffer dstBuffer,deUint32 copySize,bool dstFragment)104 void static addBufferCopyCmd (const vk::DeviceInterface& vk,
105 vk::VkCommandBuffer cmdBuffer,
106 deUint32 queueFamilyIndex,
107 vk::VkBuffer srcBuffer,
108 vk::VkBuffer dstBuffer,
109 deUint32 copySize,
110 bool dstFragment)
111 {
112 const vk::VkBufferMemoryBarrier dstWriteStartBarrier =
113 {
114 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
115 DE_NULL, // const void* pNext
116 vk::VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask
117 vk::VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags dstAccessMask
118 queueFamilyIndex, // uint32_t srcQueueFamilyIndex
119 queueFamilyIndex, // uint32_t dstQueueFamilyIndex
120 srcBuffer, // VkBuffer buffer
121 0u, // VkDeviceSize offset
122 VK_WHOLE_SIZE, // VkDeviceSize size
123 };
124
125 vk.cmdPipelineBarrier(cmdBuffer,
126 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // srcStageMask
127 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, // dstStageMask
128 (vk::VkDependencyFlags)0,
129 0, (const vk::VkMemoryBarrier*)DE_NULL,
130 1, &dstWriteStartBarrier,
131 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
132
133 const vk::VkBufferCopy copyRegion =
134 {
135 0, // VkDeviceSize srcOffset
136 0, // VkDeviceSize dstOffset
137 copySize // VkDeviceSize size
138 };
139 vk.cmdCopyBuffer(cmdBuffer, srcBuffer, dstBuffer, 1, ©Region);
140
141 const vk::VkBufferMemoryBarrier dstWriteEndBarrier =
142 {
143 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
144 DE_NULL, // const void* pNext
145 vk::VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags srcAccessMask
146 vk::VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask
147 queueFamilyIndex, // uint32_t srcQueueFamilyIndex
148 queueFamilyIndex, // uint32_t dstQueueFamilyIndex
149 dstBuffer, // VkBuffer buffer
150 0u, // VkDeviceSize offset
151 VK_WHOLE_SIZE, // VkDeviceSize size
152 };
153
154 vk.cmdPipelineBarrier(cmdBuffer,
155 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, // srcStageMask
156 dstFragment ? vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT :
157 vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, // dstStageMask
158 (vk::VkDependencyFlags)0,
159 0, (const vk::VkMemoryBarrier*)DE_NULL,
160 1, &dstWriteEndBarrier,
161 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
162
163 }
164
getProtectionMode(const vk::VkPipelineCreateFlags flags)165 ProtectionMode getProtectionMode(const vk::VkPipelineCreateFlags flags) {
166 #ifndef CTS_USES_VULKANSC
167 if ((flags & vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT) != 0) {
168 return ProtectionMode::PROTECTION_DISABLED;
169 }
170 #endif
171 DE_UNREF(flags);
172 return ProtectionMode::PROTECTION_ENABLED;
173 }
174
175 template<typename T>
176 class StorageBufferTestInstance : public ProtectedTestInstance
177 {
178 public:
179 StorageBufferTestInstance (Context& ctx,
180 const SSBOTestType testType,
181 const glu::ShaderType shaderType,
182 const tcu::UVec4 testInput,
183 const BufferValidator<T>& validator,
184 const bool pipelineProtectedAccess,
185 const vk::VkPipelineCreateFlags pipelineFlags);
186 virtual tcu::TestStatus iterate (void);
187
188 private:
189 tcu::TestStatus executeFragmentTest (void);
190 tcu::TestStatus executeComputeTest (void);
191
192 const SSBOTestType m_testType;
193 const glu::ShaderType m_shaderType;
194 const tcu::UVec4 m_testInput;
195 const BufferValidator<T>& m_validator;
196 const vk::VkFormat m_imageFormat;
197 const vk::VkPipelineCreateFlags m_pipelineFlags;
198 const ProtectionMode m_protectionMode;
199 };
200
201 template<typename T>
202 class StorageBufferTestCase : public TestCase
203 {
204 public:
StorageBufferTestCase(tcu::TestContext & testctx,const SSBOTestType testType,const glu::ShaderType shaderType,const char * name,const tcu::UVec4 testInput,ValidationDataStorage<T> validationData,vk::VkFormat format,bool pipelineProtectedAccess,vk::VkPipelineCreateFlags pipelineFlags,const std::string & extraShader="")205 StorageBufferTestCase (tcu::TestContext& testctx,
206 const SSBOTestType testType,
207 const glu::ShaderType shaderType,
208 const char* name,
209 const tcu::UVec4 testInput,
210 ValidationDataStorage<T> validationData,
211 vk::VkFormat format,
212 bool pipelineProtectedAccess,
213 vk::VkPipelineCreateFlags pipelineFlags,
214 const std::string& extraShader = "")
215 : TestCase (testctx, name)
216 , m_testType (testType)
217 , m_shaderType (shaderType)
218 , m_testInput (testInput)
219 , m_validator (validationData, format)
220 , m_pipelineProtectedAccess (pipelineProtectedAccess)
221 , m_pipelineFlags (pipelineFlags)
222 , m_extraShader (extraShader)
223 , m_protectionMode (getProtectionMode(m_pipelineFlags))
224 {
225 }
createInstance(Context & ctx) const226 virtual TestInstance* createInstance (Context& ctx) const
227 {
228 return new StorageBufferTestInstance<T>(ctx, m_testType, m_shaderType, m_testInput, m_validator, m_pipelineProtectedAccess, m_pipelineFlags);
229 }
230 virtual void initPrograms (vk::SourceCollections& programCollection) const;
checkSupport(Context & context) const231 virtual void checkSupport (Context& context) const
232 {
233 checkProtectedQueueSupport(context);
234 }
235
~StorageBufferTestCase(void)236 virtual ~StorageBufferTestCase (void) {}
237
238 private:
239 const SSBOTestType m_testType;
240 const glu::ShaderType m_shaderType;
241 const tcu::UVec4 m_testInput;
242 const BufferValidator<T> m_validator;
243 const bool m_pipelineProtectedAccess;
244 const vk::VkPipelineCreateFlags m_pipelineFlags;
245 const std::string m_extraShader;
246 const ProtectionMode m_protectionMode;
247 };
248
249 template<typename T>
StorageBufferTestInstance(Context & ctx,const SSBOTestType testType,const glu::ShaderType shaderType,const tcu::UVec4 testInput,const BufferValidator<T> & validator,const bool pipelineProtectedAccess,const vk::VkPipelineCreateFlags pipelineFlags)250 StorageBufferTestInstance<T>::StorageBufferTestInstance (Context& ctx,
251 const SSBOTestType testType,
252 const glu::ShaderType shaderType,
253 const tcu::UVec4 testInput,
254 const BufferValidator<T>& validator,
255 const bool pipelineProtectedAccess,
256 const vk::VkPipelineCreateFlags pipelineFlags)
257 : ProtectedTestInstance (ctx, pipelineProtectedAccess ? std::vector<std::string>({ "VK_EXT_pipeline_protected_access" }) : std::vector<std::string>())
258 , m_testType (testType)
259 , m_shaderType (shaderType)
260 , m_testInput (testInput)
261 , m_validator (validator)
262 , m_imageFormat (vk::VK_FORMAT_R8G8B8A8_UNORM)
263 , m_pipelineFlags (pipelineFlags)
264 , m_protectionMode (getProtectionMode(m_pipelineFlags))
265 {
266 }
267
268 template<typename T>
initPrograms(vk::SourceCollections & programCollection) const269 void StorageBufferTestCase<T>::initPrograms (vk::SourceCollections& programCollection) const
270 {
271 const char* vertexShader =
272 "#version 450\n"
273 "layout(location=0) out vec4 vIndex;\n"
274 "void main() {\n"
275 " vec2 pos[4] = vec2[4]( vec2(-0.7, 0.7), vec2(0.7, 0.7), vec2(0.0, -0.7), vec2(-0.7, -0.7) );\n"
276 " vIndex = vec4(gl_VertexIndex);\n"
277 " gl_PointSize = 1.0;\n"
278 " gl_Position = vec4(pos[gl_VertexIndex], 0.0, 1.0);\n"
279 "}";
280
281 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uvec4)
282 // set = 0, location = 2 -> buffer ProtectedTestBufferSource (uvec4)
283 const char* readShaderTemplateStr =
284 "#version 450\n"
285 "${INPUT_DECLARATION}\n"
286 "\n"
287 "layout(set=0, binding=0, std140) buffer ProtectedTestBuffer\n"
288 "{\n"
289 " highp uvec4 protectedTestResultBuffer;\n"
290 "};\n"
291 "\n"
292 "layout(set=0, binding=2, std140) buffer ProtectedTestBufferSource\n"
293 "{\n"
294 " highp uvec4 protectedTestBufferSource;\n"
295 "};\n"
296 "\n"
297 "void main (void)\n"
298 "{\n"
299 " protectedTestResultBuffer = protectedTestBufferSource;\n"
300 " ${FRAGMENT_OUTPUT}\n"
301 "}\n";
302
303 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uvec4)
304 // set = 0, location = 1 -> uniform Data (uvec4)
305 const char* writeShaderTemplateStr =
306 "#version 450\n"
307 "${INPUT_DECLARATION}\n"
308 "\n"
309 "layout(set=0, binding=0, std140) buffer ProtectedTestBuffer\n"
310 "{\n"
311 " highp uvec4 protectedTestResultBuffer;\n"
312 "};\n"
313 "\n"
314 "layout(set=0, binding=1, std140) uniform Data\n"
315 "{\n"
316 " highp uvec4 testInput;\n"
317 "};\n"
318 "\n"
319 "void main (void)\n"
320 "{\n"
321 " protectedTestResultBuffer = testInput;\n"
322 " ${FRAGMENT_OUTPUT}\n"
323 "}\n";
324
325 // set = 0, location = 0 -> buffer ProtectedTestBuffer (uint [4])
326 const char* atomicTestShaderTemplateStr =
327 "#version 450\n"
328 "${INPUT_DECLARATION}\n"
329 "\n"
330 "layout(set=0, binding=0, std430) buffer ProtectedTestBuffer\n"
331 "{\n"
332 " highp uint protectedTestResultBuffer[4];\n"
333 "};\n"
334 "\n"
335 "void main (void)\n"
336 "{\n"
337 " uint i = uint(${INVOCATION_ID});\n"
338 " ${ATOMIC_FUNCTION_CALL}\n"
339 " ${FRAGMENT_OUTPUT}\n"
340 "}\n";
341
342 const char* shaderTemplateStr;
343 std::map<std::string, std::string> shaderParam;
344 switch (m_testType) {
345 case SSBO_READ: shaderTemplateStr = readShaderTemplateStr; break;
346 case SSBO_WRITE: shaderTemplateStr = writeShaderTemplateStr; break;
347 case SSBO_ATOMIC: {
348 shaderTemplateStr = atomicTestShaderTemplateStr;
349 shaderParam["ATOMIC_FUNCTION_CALL"] = m_extraShader;
350 break;
351 }
352 default: DE_FATAL("Incorrect SSBO test type"); return;
353 }
354
355 if (m_shaderType == glu::SHADERTYPE_FRAGMENT)
356 {
357 shaderParam["INPUT_DECLARATION"] = "layout(location=0) out mediump vec4 o_color;\n"
358 "layout(location=0) in vec4 vIndex;\n";
359 shaderParam["FRAGMENT_OUTPUT"] = "o_color = vec4( 0.0, 0.4, 1.0, 1.0 );\n";
360 shaderParam["INVOCATION_ID"] = "vIndex.x";
361
362 programCollection.glslSources.add("vert") << glu::VertexSource(vertexShader);
363 programCollection.glslSources.add("TestShader") << glu::FragmentSource(tcu::StringTemplate(shaderTemplateStr).specialize(shaderParam));
364 }
365 else if (m_shaderType == glu::SHADERTYPE_COMPUTE)
366 {
367 shaderParam["INPUT_DECLARATION"] = "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
368 shaderParam["FRAGMENT_OUTPUT"] = "";
369 shaderParam["INVOCATION_ID"] = "gl_GlobalInvocationID.x";
370 programCollection.glslSources.add("TestShader") << glu::ComputeSource(tcu::StringTemplate(shaderTemplateStr).specialize(shaderParam));
371 }
372 else
373 DE_FATAL("Incorrect shader type");
374
375 m_validator.initPrograms(programCollection);
376 }
377
378 template<typename T>
executeFragmentTest(void)379 tcu::TestStatus StorageBufferTestInstance<T>::executeFragmentTest(void)
380 {
381 ProtectedContext& ctx (m_protectedContext);
382 const vk::DeviceInterface& vk = ctx.getDeviceInterface();
383 const vk::VkDevice device = ctx.getDevice();
384 const vk::VkQueue queue = ctx.getQueue();
385 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex();
386
387 const deUint32 testUniformSize = sizeof(m_testInput);
388 de::UniquePtr<vk::BufferWithMemory> testUniform (makeBuffer(ctx,
389 PROTECTION_DISABLED,
390 queueFamilyIndex,
391 testUniformSize,
392 vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
393 | vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
394 vk::MemoryRequirement::HostVisible));
395
396 // Set the test input uniform data
397 {
398 deMemcpy(testUniform->getAllocation().getHostPtr(), &m_testInput, testUniformSize);
399 vk::flushAlloc(vk, device, testUniform->getAllocation());
400 }
401
402 const vk::MemoryRequirement* memoryRequirement = &vk::MemoryRequirement::Any;
403 if (m_protectionMode == PROTECTION_ENABLED) {
404 memoryRequirement = &vk::MemoryRequirement::Protected;
405 }
406
407 const deUint32 testBufferSize = sizeof(ValidationDataStorage<T>);
408 de::MovePtr<vk::BufferWithMemory> testBuffer (makeBuffer(ctx,
409 m_protectionMode,
410 queueFamilyIndex,
411 testBufferSize,
412 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
413 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
414 *memoryRequirement));
415 de::MovePtr<vk::BufferWithMemory> testBufferSource (makeBuffer(ctx,
416 m_protectionMode,
417 queueFamilyIndex,
418 testBufferSize,
419 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
420 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
421 *memoryRequirement));
422
423 vk::Move<vk::VkShaderModule> vertexShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("vert"), 0));
424 vk::Unique<vk::VkShaderModule> testShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("TestShader"), 0));
425
426 // Create descriptors
427 vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(vk::DescriptorSetLayoutBuilder()
428 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_ALL)
429 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_ALL)
430 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_ALL)
431 .build(vk, device));
432 vk::Unique<vk::VkDescriptorPool> descriptorPool(vk::DescriptorPoolBuilder()
433 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
434 .addType(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u)
435 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
436 .build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
437 vk::Unique<vk::VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
438
439 // Update descriptor set information
440 {
441 vk::VkDescriptorBufferInfo descTestBuffer = makeDescriptorBufferInfo(**testBuffer, 0, testBufferSize);
442 vk::VkDescriptorBufferInfo descTestUniform = makeDescriptorBufferInfo(**testUniform, 0, testUniformSize);
443 vk::VkDescriptorBufferInfo descTestBufferSource = makeDescriptorBufferInfo(**testBufferSource, 0, testBufferSize);
444
445 vk::DescriptorSetUpdateBuilder()
446 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBuffer)
447 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descTestUniform)
448 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBufferSource)
449 .update(vk, device);
450 }
451
452 // Create output image
453 de::MovePtr<vk::ImageWithMemory> colorImage (createImage2D(ctx, m_protectionMode, queueFamilyIndex,
454 RENDER_WIDTH, RENDER_HEIGHT,
455 m_imageFormat,
456 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_SAMPLED_BIT));
457 vk::Unique<vk::VkImageView> colorImageView (createImageView(ctx, **colorImage, m_imageFormat));
458 vk::Unique<vk::VkRenderPass> renderPass (createRenderPass(ctx, m_imageFormat));
459 vk::Unique<vk::VkFramebuffer> framebuffer (createFramebuffer(ctx, RENDER_WIDTH, RENDER_HEIGHT, *renderPass, *colorImageView));
460
461 // Build pipeline
462 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
463 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, m_protectionMode, queueFamilyIndex));
464 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
465
466 // Create pipeline
467 vk::Unique<vk::VkPipeline> graphicsPipeline (makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass,
468 *vertexShader, *testShader,
469 std::vector<vk::VkVertexInputBindingDescription>(),
470 std::vector<vk::VkVertexInputAttributeDescription>(),
471 tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT),
472 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
473 m_pipelineFlags));
474
475 beginCommandBuffer(vk, *cmdBuffer);
476
477 if (m_testType == SSBO_READ || m_testType == SSBO_ATOMIC)
478 {
479 vk::VkBuffer targetBuffer = (m_testType == SSBO_ATOMIC) ? **testBuffer : **testBufferSource;
480 addBufferCopyCmd(vk, *cmdBuffer, queueFamilyIndex, **testUniform, targetBuffer, testUniformSize, true);
481 }
482
483 // Start image barrier
484 {
485 const vk::VkImageMemoryBarrier startImgBarrier =
486 {
487 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
488 DE_NULL, // pNext
489 0, // srcAccessMask
490 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask
491 vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
492 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
493 queueFamilyIndex, // srcQueueFamilyIndex
494 queueFamilyIndex, // dstQueueFamilyIndex
495 **colorImage, // image
496 {
497 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
498 0u, // baseMipLevel
499 1u, // mipLevels
500 0u, // baseArraySlice
501 1u, // subresourceRange
502 }
503 };
504
505 vk.cmdPipelineBarrier(*cmdBuffer,
506 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // srcStageMask
507 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // dstStageMask
508 (vk::VkDependencyFlags)0,
509 0, (const vk::VkMemoryBarrier*)DE_NULL,
510 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
511 1, &startImgBarrier);
512 }
513
514 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, vk::makeRect2D(0, 0, RENDER_WIDTH, RENDER_HEIGHT), tcu::Vec4(0.125f, 0.25f, 0.5f, 1.0f));
515 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
516 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
517
518 vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
519 endRenderPass(vk, *cmdBuffer);
520
521 {
522 const vk::VkImageMemoryBarrier endImgBarrier =
523 {
524 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
525 DE_NULL, // pNext
526 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask
527 vk::VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
528 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
529 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // newLayout
530 queueFamilyIndex, // srcQueueFamilyIndex
531 queueFamilyIndex, // dstQueueFamilyIndex
532 **colorImage, // image
533 {
534 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
535 0u, // baseMipLevel
536 1u, // mipLevels
537 0u, // baseArraySlice
538 1u, // subresourceRange
539 }
540 };
541 vk.cmdPipelineBarrier(*cmdBuffer,
542 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // srcStageMask
543 vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, // dstStageMask
544 (vk::VkDependencyFlags)0,
545 0, (const vk::VkMemoryBarrier*)DE_NULL,
546 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
547 1, &endImgBarrier);
548 }
549
550 endCommandBuffer(vk, *cmdBuffer);
551
552 // Execute Draw
553 {
554 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device));
555 VK_CHECK(vk.resetFences(device, 1, &fence.get()));
556 VK_CHECK(queueSubmit(ctx, m_protectionMode, queue, *cmdBuffer, *fence, ~0ull));
557 }
558
559 // Log inputs
560 ctx.getTestContext().getLog()
561 << tcu::TestLog::Message << "Input values: \n"
562 << "1: " << m_testInput << "\n"
563 << tcu::TestLog::EndMessage;
564
565 // Validate buffer
566 if (m_validator.validateBuffer(ctx, **testBuffer))
567 return tcu::TestStatus::pass("Everything went OK");
568 else
569 return tcu::TestStatus::fail("Something went really wrong");
570 }
571
572 template<typename T>
executeComputeTest(void)573 tcu::TestStatus StorageBufferTestInstance<T>::executeComputeTest(void)
574 {
575 ProtectedContext& ctx (m_protectedContext);
576 const vk::DeviceInterface& vk = ctx.getDeviceInterface();
577 const vk::VkDevice device = ctx.getDevice();
578 const vk::VkQueue queue = ctx.getQueue();
579 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex();
580
581 const deUint32 testUniformSize = sizeof(m_testInput);
582 de::UniquePtr<vk::BufferWithMemory> testUniform (makeBuffer(ctx,
583 PROTECTION_DISABLED,
584 queueFamilyIndex,
585 testUniformSize,
586 vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
587 | vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
588 vk::MemoryRequirement::HostVisible));
589
590 // Set the test input uniform data
591 {
592 deMemcpy(testUniform->getAllocation().getHostPtr(), &m_testInput, testUniformSize);
593 vk::flushAlloc(vk, device, testUniform->getAllocation());
594 }
595
596 const vk::MemoryRequirement* memoryRequirement = &vk::MemoryRequirement::Any;
597 if (m_protectionMode == PROTECTION_ENABLED) {
598 memoryRequirement = &vk::MemoryRequirement::Protected;
599 }
600
601 const deUint32 testBufferSize = sizeof(ValidationDataStorage<T>);
602 de::MovePtr<vk::BufferWithMemory> testBuffer (makeBuffer(ctx,
603 m_protectionMode,
604 queueFamilyIndex,
605 testBufferSize,
606 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
607 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
608 *memoryRequirement));
609 de::MovePtr<vk::BufferWithMemory> testBufferSource (makeBuffer(ctx,
610 m_protectionMode,
611 queueFamilyIndex,
612 testBufferSize,
613 vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
614 | vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
615 *memoryRequirement));
616
617 vk::Unique<vk::VkShaderModule> testShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("TestShader"), 0));
618
619 // Create descriptors
620 vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(vk::DescriptorSetLayoutBuilder()
621 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT)
622 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT)
623 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT)
624 .build(vk, device));
625 vk::Unique<vk::VkDescriptorPool> descriptorPool(vk::DescriptorPoolBuilder()
626 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
627 .addType(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u)
628 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
629 .build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
630 vk::Unique<vk::VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
631
632 // Update descriptor set information
633 {
634 vk::VkDescriptorBufferInfo descTestBuffer = makeDescriptorBufferInfo(**testBuffer, 0, testBufferSize);
635 vk::VkDescriptorBufferInfo descTestUniform = makeDescriptorBufferInfo(**testUniform, 0, testUniformSize);
636 vk::VkDescriptorBufferInfo descTestBufferSource = makeDescriptorBufferInfo(**testBufferSource, 0, testBufferSize);
637
638 vk::DescriptorSetUpdateBuilder()
639 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBuffer)
640 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descTestUniform)
641 .writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descTestBufferSource)
642 .update(vk, device);
643 }
644
645 // Build and execute test
646 {
647 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device));
648 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
649 vk::Unique<vk::VkPipeline> SSBOPipeline (makeComputePipeline(vk, device, *pipelineLayout, m_pipelineFlags, nullptr, *testShader, (vk::VkPipelineShaderStageCreateFlags)0u));
650 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, m_protectionMode, queueFamilyIndex));
651 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
652 deUint32 dispatchCount = (m_testType == SSBO_ATOMIC) ? 4u : 1u;
653
654 beginCommandBuffer(vk, *cmdBuffer);
655
656 if (m_testType == SSBO_READ || m_testType == SSBO_ATOMIC)
657 {
658 vk::VkBuffer targetBuffer = (m_testType == SSBO_ATOMIC) ? **testBuffer : **testBufferSource;
659 addBufferCopyCmd(vk, *cmdBuffer, queueFamilyIndex, **testUniform, targetBuffer, testUniformSize, false);
660 }
661
662 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *SSBOPipeline);
663 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
664
665 vk.cmdDispatch(*cmdBuffer, dispatchCount, 1u, 1u);
666
667 endCommandBuffer(vk, *cmdBuffer);
668 VK_CHECK(queueSubmit(ctx, m_protectionMode, queue, *cmdBuffer, *fence, ~0ull));
669 }
670
671 ctx.getTestContext().getLog()
672 << tcu::TestLog::Message << "Input values: \n"
673 << "1: " << m_testInput << "\n"
674 << tcu::TestLog::EndMessage;
675
676 // Validate buffer
677 if (m_validator.validateBuffer(ctx, **testBuffer))
678 return tcu::TestStatus::pass("Everything went OK");
679 else
680 return tcu::TestStatus::fail("Something went really wrong");
681 }
682
683 template<typename T>
iterate(void)684 tcu::TestStatus StorageBufferTestInstance<T>::iterate(void)
685 {
686 switch (m_shaderType)
687 {
688 case glu::SHADERTYPE_FRAGMENT: return executeFragmentTest();
689 case glu::SHADERTYPE_COMPUTE: return executeComputeTest();
690 default:
691 DE_FATAL("Incorrect shader type"); return tcu::TestStatus::fail("");
692 }
693 }
694
createSpecifiedStorageBufferTests(tcu::TestContext & testCtx,const std::string groupName,SSBOTestType testType,const glu::ShaderType shaderType,const ValidationDataStorage<tcu::UVec4> testData[],size_t testCount,bool pipelineProtectedAccess,vk::VkPipelineCreateFlags pipelineFlags)695 tcu::TestCaseGroup* createSpecifiedStorageBufferTests (tcu::TestContext& testCtx,
696 const std::string groupName,
697 SSBOTestType testType,
698 const glu::ShaderType shaderType,
699 const ValidationDataStorage<tcu::UVec4> testData[],
700 size_t testCount,
701 bool pipelineProtectedAccess,
702 vk::VkPipelineCreateFlags pipelineFlags)
703 {
704 const std::string testTypeStr = getSSBOTypeString(testType);
705 const std::string description = "Storage buffer " + testTypeStr + " tests";
706 de::MovePtr<tcu::TestCaseGroup> testGroup (new tcu::TestCaseGroup(testCtx, groupName.c_str()));
707
708 for (size_t ndx = 0; ndx < testCount; ++ndx)
709 {
710 const std::string name = testTypeStr + "_" + de::toString(ndx + 1);
711 testGroup->addChild(new StorageBufferTestCase<tcu::UVec4>(testCtx, testType, shaderType, name.c_str(), testData[ndx].values, testData[ndx], vk::VK_FORMAT_R32G32B32A32_UINT, pipelineProtectedAccess, pipelineFlags));
712 }
713
714 return testGroup.release();
715 }
716
createRandomizedBufferTests(tcu::TestContext & testCtx,SSBOTestType testType,const glu::ShaderType shaderType,size_t testCount,bool pipelineProtectedAccess,vk::VkPipelineCreateFlags pipelineFlags)717 tcu::TestCaseGroup* createRandomizedBufferTests (tcu::TestContext& testCtx, SSBOTestType testType, const glu::ShaderType shaderType, size_t testCount, bool pipelineProtectedAccess, vk::VkPipelineCreateFlags pipelineFlags)
718 {
719 de::Random rnd (testCtx.getCommandLine().getBaseSeed());
720 std::vector<ValidationDataStorage<tcu::UVec4> > testData;
721 testData.resize(testCount);
722
723 for (size_t ndx = 0; ndx < testCount; ++ndx)
724 for (deUint32 compIdx = 0; compIdx < 4; ++compIdx)
725 testData[ndx].values[compIdx] = rnd.getUint32();
726
727 return createSpecifiedStorageBufferTests(testCtx, "random", testType, shaderType, testData.data(), testData.size(), pipelineProtectedAccess, pipelineFlags);
728 }
729
730 struct
731 {
732 bool pipelineProtectedAccess;
733 const char* name;
734 } protectedAccess[] =
735 {
736 { false, "default"},
737 #ifndef CTS_USES_VULKANSC
738 { true, "protected_access"},
739 #endif
740 };
741 struct
742 {
743 vk::VkPipelineCreateFlags pipelineFlags;
744 const char* name;
745 } flags[] =
746 {
747 {
748 (vk::VkPipelineCreateFlagBits)0u, "none"},
749 #ifndef CTS_USES_VULKANSC
750 { vk::VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT, "protected_access_only"},
751 { vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT, "no_protected_access"},
752 #endif
753 };
754
createRWStorageBufferTests(tcu::TestContext & testCtx,const std::string groupName,SSBOTestType testType,const ValidationDataStorage<tcu::UVec4> testData[],size_t testCount)755 tcu::TestCaseGroup* createRWStorageBufferTests (tcu::TestContext& testCtx,
756 const std::string groupName,
757 SSBOTestType testType,
758 const ValidationDataStorage<tcu::UVec4> testData[],
759 size_t testCount)
760 {
761 de::MovePtr<tcu::TestCaseGroup> ssboRWTestGroup (new tcu::TestCaseGroup(testCtx, groupName.c_str()));
762
763 glu::ShaderType shaderTypes[] = {
764 glu::SHADERTYPE_FRAGMENT,
765 glu::SHADERTYPE_COMPUTE
766 };
767
768 for (int protectedAccessNdx = 0; protectedAccessNdx < DE_LENGTH_OF_ARRAY(protectedAccess); ++protectedAccessNdx) {
769 de::MovePtr<tcu::TestCaseGroup> protectedAccessGroup(new tcu::TestCaseGroup(testCtx, protectedAccess[protectedAccessNdx].name));
770 for (int flagsNdx = 0; flagsNdx < DE_LENGTH_OF_ARRAY(flags); ++flagsNdx) {
771 de::MovePtr<tcu::TestCaseGroup> flagsGroup(new tcu::TestCaseGroup(testCtx, flags[flagsNdx].name));
772 if (!protectedAccess[protectedAccessNdx].pipelineProtectedAccess && flags[flagsNdx].pipelineFlags != 0u) continue;
773
774 for (int shaderNdx = 0; shaderNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderNdx)
775 {
776 const glu::ShaderType shaderType = shaderTypes[shaderNdx];
777 const std::string shaderName = glu::getShaderTypeName(shaderType);
778 const std::string shaderGroupDesc = "Storage buffer tests for shader type: " + shaderName;
779 de::MovePtr<tcu::TestCaseGroup> testShaderGroup(new tcu::TestCaseGroup(testCtx, shaderName.c_str()));
780
781 testShaderGroup->addChild(createSpecifiedStorageBufferTests(testCtx, "static", testType, shaderType, testData, testCount, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags));
782 testShaderGroup->addChild(createRandomizedBufferTests(testCtx, testType, shaderType, RANDOM_TEST_COUNT, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags));
783 flagsGroup->addChild(testShaderGroup.release());
784 }
785 protectedAccessGroup->addChild(flagsGroup.release());
786 }
787 ssboRWTestGroup->addChild(protectedAccessGroup.release());
788 }
789
790 return ssboRWTestGroup.release();
791 }
792
calculateAtomicOpData(SSBOAtomicType type,const tcu::UVec4 & inputValue,const deUint32 atomicArg,std::string & atomicCall,tcu::UVec4 & refValue,const deUint32 swapNdx=0)793 void calculateAtomicOpData (SSBOAtomicType type,
794 const tcu::UVec4& inputValue,
795 const deUint32 atomicArg,
796 std::string& atomicCall,
797 tcu::UVec4& refValue,
798 const deUint32 swapNdx = 0)
799 {
800 switch (type)
801 {
802 case ATOMIC_ADD:
803 {
804 refValue = inputValue + tcu::UVec4(atomicArg);
805 atomicCall = "atomicAdd(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
806 break;
807 }
808 case ATOMIC_MIN:
809 {
810 refValue = tcu::UVec4(std::min(inputValue.x(), atomicArg), std::min(inputValue.y(), atomicArg), std::min(inputValue.z(), atomicArg), std::min(inputValue.w(), atomicArg));
811 atomicCall = "atomicMin(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
812 break;
813 }
814 case ATOMIC_MAX:
815 {
816 refValue = tcu::UVec4(std::max(inputValue.x(), atomicArg), std::max(inputValue.y(), atomicArg), std::max(inputValue.z(), atomicArg), std::max(inputValue.w(), atomicArg));
817 atomicCall = "atomicMax(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
818 break;
819 }
820 case ATOMIC_AND:
821 {
822 refValue = tcu::UVec4(inputValue.x() & atomicArg, inputValue.y() & atomicArg, inputValue.z() & atomicArg, inputValue.w() & atomicArg);
823 atomicCall = "atomicAnd(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
824 break;
825 }
826 case ATOMIC_OR:
827 {
828 refValue = tcu::UVec4(inputValue.x() | atomicArg, inputValue.y() | atomicArg, inputValue.z() | atomicArg, inputValue.w() | atomicArg);
829 atomicCall = "atomicOr(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
830 break;
831 }
832 case ATOMIC_XOR:
833 {
834 refValue = tcu::UVec4(inputValue.x() ^ atomicArg, inputValue.y() ^ atomicArg, inputValue.z() ^ atomicArg, inputValue.w() ^ atomicArg);
835 atomicCall = "atomicXor(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
836 break;
837 }
838 case ATOMIC_EXCHANGE:
839 {
840 refValue = tcu::UVec4(atomicArg);
841 atomicCall = "atomicExchange(protectedTestResultBuffer[i], " + de::toString(atomicArg) + "u);";
842 break;
843 }
844 case ATOMIC_COMPSWAP:
845 {
846 int selectedNdx = swapNdx % 4;
847 deUint32 selectedChange = inputValue[selectedNdx];
848
849 refValue = inputValue;
850 refValue[selectedNdx] = atomicArg;
851 atomicCall = "atomicCompSwap(protectedTestResultBuffer[i], " + de::toString(selectedChange) + "u, " + de::toString(atomicArg) + "u);";
852 break;
853 }
854 default: DE_FATAL("Incorrect atomic function type"); break;
855 }
856
857 }
858
859 } // anonymous
860
createReadStorageBufferTests(tcu::TestContext & testCtx)861 tcu::TestCaseGroup* createReadStorageBufferTests (tcu::TestContext& testCtx)
862 {
863 const ValidationDataStorage<tcu::UVec4> testData[] = {
864 { tcu::UVec4(0u, 0u, 0u, 0u) }, { tcu::UVec4(1u, 0u, 0u, 0u) },
865 { tcu::UVec4(0u, 1u, 0u, 0u) }, { tcu::UVec4(0u, 0u, 1u, 0u) },
866 { tcu::UVec4(0u, 0u, 0u, 1u) }, { tcu::UVec4(1u, 1u, 1u, 1u) }
867 };
868
869 // Storage Buffer Read Tests
870 return createRWStorageBufferTests(testCtx, "ssbo_read", SSBO_READ, testData, DE_LENGTH_OF_ARRAY(testData));
871 }
872
createWriteStorageBufferTests(tcu::TestContext & testCtx)873 tcu::TestCaseGroup* createWriteStorageBufferTests (tcu::TestContext& testCtx)
874 {
875 const ValidationDataStorage<tcu::UVec4> testData[] = {
876 { tcu::UVec4(0u, 0u, 0u, 0u) }, { tcu::UVec4(1u, 0u, 0u, 0u) },
877 { tcu::UVec4(0u, 1u, 0u, 0u) }, { tcu::UVec4(0u, 0u, 1u, 0u) },
878 { tcu::UVec4(0u, 0u, 0u, 1u) }, { tcu::UVec4(1u, 1u, 1u, 1u) }
879 };
880
881 // Storage Buffer Write Tests
882 return createRWStorageBufferTests(testCtx, "ssbo_write", SSBO_WRITE, testData, DE_LENGTH_OF_ARRAY(testData));
883 }
884
createAtomicStorageBufferTests(tcu::TestContext & testCtx)885 tcu::TestCaseGroup* createAtomicStorageBufferTests (tcu::TestContext& testCtx)
886 {
887 struct {
888 const tcu::UVec4 input;
889 const deUint32 atomicArg;
890 const deUint32 swapNdx;
891 } testData[] = {
892 { tcu::UVec4(0u, 1u, 2u, 3u), 10u, 0u },
893 { tcu::UVec4(10u, 20u, 30u, 40u), 3u, 2u },
894 { tcu::UVec4(800u, 400u, 230u, 999u), 50u, 3u },
895 { tcu::UVec4(100800u, 233400u, 22230u, 77999u), 800u, 1u },
896 };
897
898 SSBOAtomicType testTypes[] = {
899 ATOMIC_ADD,
900 ATOMIC_MIN,
901 ATOMIC_MAX,
902 ATOMIC_AND,
903 ATOMIC_OR,
904 ATOMIC_XOR,
905 ATOMIC_EXCHANGE,
906 ATOMIC_COMPSWAP
907 };
908
909 glu::ShaderType shaderTypes[] = {
910 glu::SHADERTYPE_FRAGMENT,
911 glu::SHADERTYPE_COMPUTE
912 };
913
914 de::Random rnd (testCtx.getCommandLine().getBaseSeed());
915 // Storage Buffer Atomic Tests
916 de::MovePtr<tcu::TestCaseGroup> ssboAtomicTests (new tcu::TestCaseGroup(testCtx, "ssbo_atomic"));
917
918 for (int shaderNdx = 0; shaderNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderNdx)
919 {
920 const glu::ShaderType shaderType = shaderTypes[shaderNdx];
921 const std::string shaderName = glu::getShaderTypeName(shaderType);
922 const std::string shaderDesc = "Storage Buffer Atomic Tests for shader type: " + shaderName;
923 de::MovePtr<tcu::TestCaseGroup> atomicShaderGroup (new tcu::TestCaseGroup(testCtx, shaderName.c_str()));
924
925 for (int protectedAccessNdx = 0; protectedAccessNdx < DE_LENGTH_OF_ARRAY(protectedAccess); ++protectedAccessNdx) {
926 de::MovePtr<tcu::TestCaseGroup> protectedAccessGroup(new tcu::TestCaseGroup(testCtx, protectedAccess[protectedAccessNdx].name));
927 for (int flagsNdx = 0; flagsNdx < DE_LENGTH_OF_ARRAY(flags); ++flagsNdx) {
928 de::MovePtr<tcu::TestCaseGroup> flagsGroup(new tcu::TestCaseGroup(testCtx, flags[flagsNdx].name));
929 if (!protectedAccess[protectedAccessNdx].pipelineProtectedAccess && flags[flagsNdx].pipelineFlags != 0u) continue;
930
931 for (int typeNdx = 0; typeNdx < DE_LENGTH_OF_ARRAY(testTypes); ++typeNdx)
932 {
933 SSBOAtomicType atomicType = testTypes[typeNdx];
934 const std::string atomicTypeStr = getSSBOAtomicTypeString(atomicType);
935 const std::string atomicDesc = "Storage Buffer Atomic Tests: " + atomicTypeStr;
936
937 de::MovePtr<tcu::TestCaseGroup> staticTests(new tcu::TestCaseGroup(testCtx, "static"));
938 for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(testData); ++ndx)
939 {
940 const std::string name = "atomic_" + atomicTypeStr + "_" + de::toString(ndx + 1);
941 const tcu::UVec4& inputValue = testData[ndx].input;
942 const deUint32& atomicArg = testData[ndx].atomicArg;
943 std::string atomicCall;
944 tcu::UVec4 refValue;
945
946 calculateAtomicOpData(atomicType, inputValue, atomicArg, atomicCall, refValue, testData[ndx].swapNdx);
947
948 ValidationDataStorage<tcu::UVec4> validationData = { refValue };
949 staticTests->addChild(new StorageBufferTestCase<tcu::UVec4>(testCtx, SSBO_ATOMIC, shaderType, name.c_str(), inputValue, validationData, vk::VK_FORMAT_R32G32B32A32_UINT, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags, atomicCall));
950 }
951
952 de::MovePtr<tcu::TestCaseGroup> randomTests(new tcu::TestCaseGroup(testCtx, "random"));
953 for (int ndx = 0; ndx < RANDOM_TEST_COUNT; ndx++)
954 {
955 const std::string name = "atomic_" + atomicTypeStr + "_" + de::toString(ndx + 1);
956 deUint32 atomicArg = rnd.getUint16();
957 tcu::UVec4 inputValue;
958 tcu::UVec4 refValue;
959 std::string atomicCall;
960
961 for (int i = 0; i < 4; i++)
962 inputValue[i] = rnd.getUint16();
963
964 calculateAtomicOpData(atomicType, inputValue, atomicArg, atomicCall, refValue, ndx);
965
966 ValidationDataStorage<tcu::UVec4> validationData = { refValue };
967 randomTests->addChild(new StorageBufferTestCase<tcu::UVec4>(testCtx, SSBO_ATOMIC, shaderType, name.c_str(), inputValue, validationData, vk::VK_FORMAT_R32G32B32A32_UINT, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].pipelineFlags, atomicCall));
968
969 }
970
971 de::MovePtr<tcu::TestCaseGroup> atomicTests(new tcu::TestCaseGroup(testCtx, atomicTypeStr.c_str()));
972 atomicTests->addChild(staticTests.release());
973 atomicTests->addChild(randomTests.release());
974 flagsGroup->addChild(atomicTests.release());
975 }
976 protectedAccessGroup->addChild(flagsGroup.release());
977 }
978 atomicShaderGroup->addChild(protectedAccessGroup.release());
979 }
980 ssboAtomicTests->addChild(atomicShaderGroup.release());
981 }
982
983 return ssboAtomicTests.release();
984 }
985
986 } // ProtectedMem
987 } // vkt
988