1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2017 Samsung Electronics Co., Ltd.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Protected memory image access tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktProtectedMemShaderImageAccessTests.hpp"
26
27 #include "vktProtectedMemContext.hpp"
28 #include "vktProtectedMemUtils.hpp"
29 #include "vktProtectedMemImageValidator.hpp"
30 #include "vktTestCase.hpp"
31 #include "vktTestGroupUtil.hpp"
32
33 #include "vkPrograms.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkBuilderUtil.hpp"
36 #include "vkImageUtil.hpp"
37 #include "vkCmdUtil.hpp"
38
39 #include "tcuTestLog.hpp"
40 #include "tcuVector.hpp"
41 #include "tcuTextureUtil.hpp"
42 #include "tcuStringTemplate.hpp"
43
44 #include "gluTextureTestUtil.hpp"
45
46 #include "deRandom.hpp"
47
48 namespace vkt
49 {
50 namespace ProtectedMem
51 {
52
53 namespace
54 {
55
56 enum
57 {
58 RENDER_WIDTH = 128,
59 RENDER_HEIGHT = 128,
60 IMAGE_WIDTH = 128,
61 IMAGE_HEIGHT = 128,
62 };
63
64 enum AccessType
65 {
66 ACCESS_TYPE_SAMPLING = 0,
67 ACCESS_TYPE_TEXEL_FETCH,
68 ACCESS_TYPE_IMAGE_LOAD,
69 ACCESS_TYPE_IMAGE_STORE,
70 ACCESS_TYPE_IMAGE_ATOMICS,
71
72 ACCESS_TYPE_LAST
73 };
74
75 enum AtomicOperation
76 {
77 ATOMIC_OPERATION_ADD = 0,
78 ATOMIC_OPERATION_MIN,
79 ATOMIC_OPERATION_MAX,
80 ATOMIC_OPERATION_AND,
81 ATOMIC_OPERATION_OR,
82 ATOMIC_OPERATION_XOR,
83 ATOMIC_OPERATION_EXCHANGE,
84
85 ATOMIC_OPERATION_LAST
86 };
87
88 struct Params
89 {
90 glu::ShaderType shaderType;
91 AccessType accessType;
92 vk::VkFormat imageFormat;
93 AtomicOperation atomicOperation;
94
Paramsvkt::ProtectedMem::__anon5f473ff60111::Params95 Params (void)
96 : shaderType (glu::SHADERTYPE_LAST)
97 , accessType (ACCESS_TYPE_LAST)
98 , imageFormat (vk::VK_FORMAT_UNDEFINED)
99 , atomicOperation (ATOMIC_OPERATION_LAST)
100 {}
101
Paramsvkt::ProtectedMem::__anon5f473ff60111::Params102 Params (const glu::ShaderType shaderType_,
103 const AccessType accessType_,
104 const vk::VkFormat imageFormat_,
105 const AtomicOperation atomicOperation_ = ATOMIC_OPERATION_LAST)
106 : shaderType (shaderType_)
107 , accessType (accessType_)
108 , imageFormat (imageFormat_)
109 , atomicOperation (atomicOperation_)
110 {}
111 };
112
getSeedValue(const Params & params)113 static deUint32 getSeedValue (const Params& params)
114 {
115 return deInt32Hash(params.shaderType) ^ deInt32Hash(params.accessType) ^ deInt32Hash(params.imageFormat) ^ deInt32Hash(params.atomicOperation);
116 }
117
getAtomicOperationCaseName(const AtomicOperation op)118 static std::string getAtomicOperationCaseName (const AtomicOperation op)
119 {
120 switch (op)
121 {
122 case ATOMIC_OPERATION_ADD: return "add";
123 case ATOMIC_OPERATION_MIN: return "min";
124 case ATOMIC_OPERATION_MAX: return "max";
125 case ATOMIC_OPERATION_AND: return "and";
126 case ATOMIC_OPERATION_OR: return "or";
127 case ATOMIC_OPERATION_XOR: return "xor";
128 case ATOMIC_OPERATION_EXCHANGE: return "exchange";
129 default:
130 DE_FATAL("Impossible");
131 return "";
132 }
133 }
134
getAtomicOperationShaderFuncName(const AtomicOperation op)135 static std::string getAtomicOperationShaderFuncName (const AtomicOperation op)
136 {
137 switch (op)
138 {
139 case ATOMIC_OPERATION_ADD: return "imageAtomicAdd";
140 case ATOMIC_OPERATION_MIN: return "imageAtomicMin";
141 case ATOMIC_OPERATION_MAX: return "imageAtomicMax";
142 case ATOMIC_OPERATION_AND: return "imageAtomicAnd";
143 case ATOMIC_OPERATION_OR: return "imageAtomicOr";
144 case ATOMIC_OPERATION_XOR: return "imageAtomicXor";
145 case ATOMIC_OPERATION_EXCHANGE: return "imageAtomicExchange";
146 default:
147 DE_FATAL("Impossible");
148 return "";
149 }
150 }
151
152 //! Computes the result of an atomic operation where "a" is the data operated on and "b" is the parameter to the atomic function.
computeBinaryAtomicOperationResult(const AtomicOperation op,const deInt32 a,const deInt32 b)153 static deInt32 computeBinaryAtomicOperationResult (const AtomicOperation op, const deInt32 a, const deInt32 b)
154 {
155 switch (op)
156 {
157 case ATOMIC_OPERATION_ADD: return a + b;
158 case ATOMIC_OPERATION_MIN: return de::min(a, b);
159 case ATOMIC_OPERATION_MAX: return de::max(a, b);
160 case ATOMIC_OPERATION_AND: return a & b;
161 case ATOMIC_OPERATION_OR: return a | b;
162 case ATOMIC_OPERATION_XOR: return a ^ b;
163 case ATOMIC_OPERATION_EXCHANGE: return b;
164 default:
165 DE_FATAL("Impossible");
166 return -1;
167 }
168 }
169
getShaderImageFormatQualifier(const tcu::TextureFormat & format)170 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
171 {
172 const char* orderPart;
173 const char* typePart;
174
175 switch (format.order)
176 {
177 case tcu::TextureFormat::R: orderPart = "r"; break;
178 case tcu::TextureFormat::RG: orderPart = "rg"; break;
179 case tcu::TextureFormat::RGB: orderPart = "rgb"; break;
180 case tcu::TextureFormat::RGBA: orderPart = "rgba"; break;
181
182 default:
183 DE_FATAL("Impossible");
184 orderPart = DE_NULL;
185 }
186
187 switch (format.type)
188 {
189 case tcu::TextureFormat::FLOAT: typePart = "32f"; break;
190 case tcu::TextureFormat::HALF_FLOAT: typePart = "16f"; break;
191
192 case tcu::TextureFormat::UNSIGNED_INT32: typePart = "32ui"; break;
193 case tcu::TextureFormat::UNSIGNED_INT16: typePart = "16ui"; break;
194 case tcu::TextureFormat::UNSIGNED_INT8: typePart = "8ui"; break;
195
196 case tcu::TextureFormat::SIGNED_INT32: typePart = "32i"; break;
197 case tcu::TextureFormat::SIGNED_INT16: typePart = "16i"; break;
198 case tcu::TextureFormat::SIGNED_INT8: typePart = "8i"; break;
199
200 case tcu::TextureFormat::UNORM_INT16: typePart = "16"; break;
201 case tcu::TextureFormat::UNORM_INT8: typePart = "8"; break;
202
203 case tcu::TextureFormat::SNORM_INT16: typePart = "16_snorm"; break;
204 case tcu::TextureFormat::SNORM_INT8: typePart = "8_snorm"; break;
205
206 default:
207 DE_FATAL("Impossible");
208 typePart = DE_NULL;
209 }
210
211 return std::string() + orderPart + typePart;
212 }
213
getShaderSamplerOrImageType(const tcu::TextureFormat & format,bool isSampler)214 static std::string getShaderSamplerOrImageType (const tcu::TextureFormat& format, bool isSampler)
215 {
216 const std::string formatPart = tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ? "u" :
217 tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER ? "i" : "";
218
219 return formatPart + (isSampler ? "sampler2D" : "image2D");
220 }
221
222 class ImageAccessTestInstance : public ProtectedTestInstance
223 {
224 public:
225 ImageAccessTestInstance (Context& ctx,
226 const ImageValidator& validator,
227 const Params& params);
228 virtual tcu::TestStatus iterate (void);
229
230 private:
231 de::MovePtr<tcu::Texture2D> createTestTexture2D (void);
232 void calculateAtomicRef (tcu::Texture2D& texture2D);
233 tcu::TestStatus validateResult (vk::VkImage image,
234 vk::VkImageLayout imageLayout,
235 const tcu::Texture2D& texture2D,
236 const tcu::Sampler& refSampler);
237
238 tcu::TestStatus executeFragmentTest (void);
239 tcu::TestStatus executeComputeTest (void);
240
241 const ImageValidator& m_validator;
242 const Params& m_params;
243 };
244
245 class ImageAccessTestCase : public TestCase
246 {
247 public:
ImageAccessTestCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const Params & params)248 ImageAccessTestCase (tcu::TestContext& testCtx,
249 const std::string& name,
250 const std::string& description,
251 const Params& params)
252 : TestCase (testCtx, name, description)
253 , m_validator (params.imageFormat)
254 , m_params (params)
255 {
256 }
257
~ImageAccessTestCase(void)258 virtual ~ImageAccessTestCase (void) {}
createInstance(Context & ctx) const259 virtual TestInstance* createInstance (Context& ctx) const
260 {
261 return new ImageAccessTestInstance(ctx, m_validator, m_params);
262 }
263 virtual void initPrograms (vk::SourceCollections& programCollection) const;
264
265 private:
266 ImageValidator m_validator;
267 Params m_params;
268 };
269
initPrograms(vk::SourceCollections & programCollection) const270 void ImageAccessTestCase::initPrograms (vk::SourceCollections& programCollection) const
271 {
272 const tcu::TextureFormat& texFormat = mapVkFormat(m_params.imageFormat);
273 const std::string imageFormat = getShaderImageFormatQualifier(texFormat);
274 const std::string imageType = getShaderSamplerOrImageType(texFormat, false);
275 const std::string samplerType = getShaderSamplerOrImageType(texFormat, true);
276 const std::string colorVecType = isIntFormat(m_params.imageFormat) ? "ivec4" :
277 isUintFormat(m_params.imageFormat) ? "uvec4" : "vec4";
278
279 m_validator.initPrograms(programCollection);
280
281 if (m_params.shaderType == glu::SHADERTYPE_FRAGMENT)
282 {
283 {
284 // Vertex shader
285 const char* vert = "#version 450\n"
286 "layout(location = 0) in mediump vec2 a_position;\n"
287 "layout(location = 1) in mediump vec2 a_texCoord;\n"
288 "layout(location = 0) out mediump vec2 v_texCoord;\n"
289 "\n"
290 "void main() {\n"
291 " gl_Position = vec4(a_position, 0.0, 1.0);\n"
292 " v_texCoord = a_texCoord;\n"
293 "}\n";
294
295 programCollection.glslSources.add("vert") << glu::VertexSource(vert);
296 }
297
298 {
299 // Fragment shader
300 std::ostringstream frag;
301 frag << "#version 450\n"
302 "layout(location = 0) in mediump vec2 v_texCoord;\n"
303 "layout(location = 0) out highp ${COLOR_VEC_TYPE} o_color;\n";
304
305 switch (m_params.accessType)
306 {
307 case ACCESS_TYPE_SAMPLING:
308 case ACCESS_TYPE_TEXEL_FETCH:
309 frag << "layout(set = 0, binding = 0) uniform highp ${SAMPLER_TYPE} u_sampler;\n";
310 break;
311 case ACCESS_TYPE_IMAGE_LOAD:
312 frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_image;\n";
313 break;
314 case ACCESS_TYPE_IMAGE_STORE:
315 frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_imageA;\n";
316 frag << "layout(set = 0, binding = 1, ${IMAGE_FORMAT}) writeonly uniform highp ${IMAGE_TYPE} u_imageB;\n";
317 break;
318 case ACCESS_TYPE_IMAGE_ATOMICS:
319 frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) coherent uniform highp ${IMAGE_TYPE} u_image;\n";
320 break;
321 default:
322 DE_FATAL("Impossible");
323 break;
324 }
325
326 frag << "\n"
327 "void main() {\n";
328
329 switch (m_params.accessType)
330 {
331 case ACCESS_TYPE_SAMPLING:
332 frag << " o_color = texture(u_sampler, v_texCoord);\n";
333 break;
334 case ACCESS_TYPE_TEXEL_FETCH:
335 frag << " const highp int lod = 0;\n";
336 frag << " o_color = texelFetch(u_sampler, ivec2(v_texCoord), lod);\n";
337 break;
338 case ACCESS_TYPE_IMAGE_LOAD:
339 frag << " o_color = imageLoad(u_image, ivec2(v_texCoord));\n";
340 break;
341 case ACCESS_TYPE_IMAGE_STORE:
342 frag << " o_color = imageLoad(u_imageA, ivec2(v_texCoord));\n";
343 frag << " imageStore(u_imageB, ivec2(v_texCoord), o_color);\n";
344 break;
345 case ACCESS_TYPE_IMAGE_ATOMICS:
346 frag << " int gx = int(v_texCoord.x);\n";
347 frag << " int gy = int(v_texCoord.y);\n";
348 frag << " "
349 << getAtomicOperationShaderFuncName(m_params.atomicOperation)
350 << "(u_image, ivec2(v_texCoord), "
351 << (isUintFormat(m_params.imageFormat) ? "uint" : "int")
352 << "(gx*gx + gy*gy));\n";
353 frag << " o_color = imageLoad(u_image, ivec2(v_texCoord));\n";
354 break;
355 default:
356 DE_FATAL("Impossible");
357 break;
358 }
359
360 frag << "}\n";
361
362 std::map<std::string, std::string> fragParams;
363
364 fragParams["IMAGE_FORMAT"] = imageFormat;
365 fragParams["IMAGE_TYPE"] = imageType;
366 fragParams["SAMPLER_TYPE"] = samplerType;
367 fragParams["COLOR_VEC_TYPE"] = colorVecType;
368
369 programCollection.glslSources.add("frag") << glu::FragmentSource(tcu::StringTemplate(frag.str()).specialize(fragParams));
370 }
371 }
372 else if (m_params.shaderType == glu::SHADERTYPE_COMPUTE)
373 {
374 // Compute shader
375 std::ostringstream comp;
376 comp << "#version 450\n"
377 "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
378 "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) ${RES_MEM_QUALIFIER} uniform highp ${IMAGE_TYPE} u_resultImage;\n";
379
380 switch (m_params.accessType)
381 {
382 case ACCESS_TYPE_SAMPLING:
383 case ACCESS_TYPE_TEXEL_FETCH:
384 comp << "layout(set = 0, binding = 1) uniform highp ${SAMPLER_TYPE} u_sampler;\n";
385 break;
386 case ACCESS_TYPE_IMAGE_LOAD:
387 case ACCESS_TYPE_IMAGE_STORE:
388 comp << "layout(set = 0, binding = 1, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_srcImage;\n";
389 break;
390 case ACCESS_TYPE_IMAGE_ATOMICS:
391 break;
392 default:
393 DE_FATAL("Impossible");
394 break;
395 }
396
397 comp << "\n"
398 "void main() {\n"
399 " int gx = int(gl_GlobalInvocationID.x);\n"
400 " int gy = int(gl_GlobalInvocationID.y);\n";
401
402 switch (m_params.accessType)
403 {
404 case ACCESS_TYPE_SAMPLING:
405 comp << " ${COLOR_VEC_TYPE} color = texture(u_sampler, vec2(float(gx)/" << de::toString((int)IMAGE_WIDTH) << ", float(gy)/" << de::toString((int)IMAGE_HEIGHT) << "));\n";
406 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
407 break;
408 case ACCESS_TYPE_TEXEL_FETCH:
409 comp << " const highp int lod = 0;\n";
410 comp << " ${COLOR_VEC_TYPE} color = texelFetch(u_sampler, ivec2(gx, gy), lod);\n";
411 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
412 break;
413 case ACCESS_TYPE_IMAGE_LOAD:
414 case ACCESS_TYPE_IMAGE_STORE:
415 comp << " ${COLOR_VEC_TYPE} color = imageLoad(u_srcImage, ivec2(gx, gy));\n";
416 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
417 break;
418 case ACCESS_TYPE_IMAGE_ATOMICS:
419 comp << " "
420 << getAtomicOperationShaderFuncName(m_params.atomicOperation)
421 << "(u_resultImage, ivec2(gx, gy), "
422 << (isUintFormat(m_params.imageFormat) ? "uint" : "int")
423 << "(gx*gx + gy*gy));\n";
424 break;
425 default:
426 DE_FATAL("Impossible");
427 break;
428 }
429
430 comp << "}\n";
431
432 std::map<std::string, std::string> compParams;
433
434 compParams["IMAGE_FORMAT"] = imageFormat;
435 compParams["IMAGE_TYPE"] = imageType;
436 compParams["SAMPLER_TYPE"] = samplerType;
437 compParams["COLOR_VEC_TYPE"] = colorVecType;
438 compParams["RES_MEM_QUALIFIER"] = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? "coherent" : "writeonly";
439
440 programCollection.glslSources.add("comp") << glu::ComputeSource(tcu::StringTemplate(comp.str()).specialize(compParams));
441 }
442 else
443 DE_FATAL("Impossible");
444 }
445
ImageAccessTestInstance(Context & ctx,const ImageValidator & validator,const Params & params)446 ImageAccessTestInstance::ImageAccessTestInstance (Context& ctx,
447 const ImageValidator& validator,
448 const Params& params)
449 : ProtectedTestInstance (ctx)
450 , m_validator (validator)
451 , m_params (params)
452 {
453 }
454
createTestTexture2D(void)455 de::MovePtr<tcu::Texture2D> ImageAccessTestInstance::createTestTexture2D (void)
456 {
457 const tcu::TextureFormat texFmt = mapVkFormat(m_params.imageFormat);
458 const tcu::TextureFormatInfo fmtInfo = tcu::getTextureFormatInfo(texFmt);
459 de::MovePtr<tcu::Texture2D> texture2D (new tcu::Texture2D(texFmt, IMAGE_WIDTH, IMAGE_HEIGHT));
460
461 // \note generate only the base level
462 texture2D->allocLevel(0);
463
464 const tcu::PixelBufferAccess& level = texture2D->getLevel(0);
465
466 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
467 {
468 // use a smaller range than the format would allow
469 const float cMin = isIntFormat(m_params.imageFormat) ? -1000.0f : 0.0f;
470 const float cMax = +1000.0f;
471
472 fillWithRandomColorTiles(level, tcu::Vec4(cMin, 0, 0, 0), tcu::Vec4(cMax, 0, 0, 0), getSeedValue(m_params));
473 }
474 else
475 fillWithRandomColorTiles(level, fmtInfo.valueMin, fmtInfo.valueMax, getSeedValue(m_params));
476
477 return texture2D;
478 }
479
iterate(void)480 tcu::TestStatus ImageAccessTestInstance::iterate (void)
481 {
482 switch (m_params.shaderType)
483 {
484 case glu::SHADERTYPE_FRAGMENT: return executeFragmentTest();
485 case glu::SHADERTYPE_COMPUTE: return executeComputeTest();
486 default:
487 DE_FATAL("Impossible");
488 return tcu::TestStatus::fail("");
489 }
490 }
491
executeComputeTest(void)492 tcu::TestStatus ImageAccessTestInstance::executeComputeTest (void)
493 {
494 ProtectedContext& ctx (m_protectedContext);
495 const vk::DeviceInterface& vk = ctx.getDeviceInterface();
496 const vk::VkDevice device = ctx.getDevice();
497 const vk::VkQueue queue = ctx.getQueue();
498 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex();
499
500 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, PROTECTION_ENABLED, queueFamilyIndex));
501
502 de::MovePtr<tcu::Texture2D> texture2D = createTestTexture2D();
503 const tcu::Sampler refSampler = tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
504 tcu::Sampler::NEAREST, tcu::Sampler::NEAREST);
505
506 vk::Unique<vk::VkShaderModule> computeShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("comp"), 0));
507
508 de::MovePtr<vk::ImageWithMemory> imageSrc;
509 de::MovePtr<vk::ImageWithMemory> imageDst;
510 vk::Move<vk::VkSampler> sampler;
511 vk::Move<vk::VkImageView> imageViewSrc;
512 vk::Move<vk::VkImageView> imageViewDst;
513
514 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
515 vk::Move<vk::VkDescriptorPool> descriptorPool;
516 vk::Move<vk::VkDescriptorSet> descriptorSet;
517
518 // Create src and dst images
519 {
520 vk::VkImageUsageFlags imageUsageFlags = vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
521 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
522 vk::VK_IMAGE_USAGE_SAMPLED_BIT |
523 vk::VK_IMAGE_USAGE_STORAGE_BIT;
524
525 imageSrc = createImage2D(ctx, PROTECTION_ENABLED, queueFamilyIndex,
526 IMAGE_WIDTH, IMAGE_HEIGHT,
527 m_params.imageFormat,
528 imageUsageFlags);
529
530 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
531 {
532 imageDst = createImage2D(ctx, PROTECTION_ENABLED, queueFamilyIndex,
533 IMAGE_WIDTH, IMAGE_HEIGHT,
534 m_params.imageFormat,
535 imageUsageFlags);
536 }
537 }
538
539 // Upload source image
540 {
541 de::MovePtr<vk::ImageWithMemory> unprotectedImage = createImage2D(ctx, PROTECTION_DISABLED, queueFamilyIndex,
542 IMAGE_WIDTH, IMAGE_HEIGHT,
543 m_params.imageFormat,
544 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT);
545
546 // Upload data to an unprotected image
547 uploadImage(m_protectedContext, **unprotectedImage, *texture2D);
548
549 // Select vkImageLayout based upon accessType
550 vk::VkImageLayout imageSrcLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
551
552 switch (m_params.accessType)
553 {
554 case ACCESS_TYPE_SAMPLING:
555 case ACCESS_TYPE_TEXEL_FETCH:
556 {
557 imageSrcLayout = vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
558 break;
559 }
560 case ACCESS_TYPE_IMAGE_LOAD:
561 case ACCESS_TYPE_IMAGE_STORE:
562 case ACCESS_TYPE_IMAGE_ATOMICS:
563 {
564 imageSrcLayout = vk::VK_IMAGE_LAYOUT_GENERAL;
565 break;
566 }
567 default:
568 DE_FATAL("Impossible");
569 break;
570 }
571
572 // Copy unprotected image to protected image
573 copyToProtectedImage(m_protectedContext, **unprotectedImage, **imageSrc, imageSrcLayout, IMAGE_WIDTH, IMAGE_HEIGHT);
574 }
575
576 // Clear dst image
577 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
578 clearImage(m_protectedContext, **imageDst);
579
580 // Create descriptors
581 {
582 vk::DescriptorSetLayoutBuilder layoutBuilder;
583 vk::DescriptorPoolBuilder poolBuilder;
584
585 switch (m_params.accessType)
586 {
587 case ACCESS_TYPE_SAMPLING:
588 case ACCESS_TYPE_TEXEL_FETCH:
589 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
590 layoutBuilder.addSingleSamplerBinding(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, vk::VK_SHADER_STAGE_COMPUTE_BIT, DE_NULL);
591 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
592 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u);
593 break;
594 case ACCESS_TYPE_IMAGE_LOAD:
595 case ACCESS_TYPE_IMAGE_STORE:
596 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
597 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
598 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2u);
599 break;
600 case ACCESS_TYPE_IMAGE_ATOMICS:
601 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
602 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
603 break;
604 default:
605 DE_FATAL("Impossible");
606 break;
607 }
608
609 descriptorSetLayout = layoutBuilder.build(vk, device);
610 descriptorPool = poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
611 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
612 }
613
614 // Create pipeline layout
615 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
616
617 // Create sampler and image views
618 {
619 if (m_params.accessType == ACCESS_TYPE_SAMPLING || m_params.accessType == ACCESS_TYPE_TEXEL_FETCH)
620 {
621 const tcu::TextureFormat texFormat = mapVkFormat(m_params.imageFormat);
622 const vk::VkSamplerCreateInfo samplerParams = vk::mapSampler(refSampler, texFormat);
623
624 sampler = createSampler(vk, device, &samplerParams);
625 }
626
627 imageViewSrc = createImageView(ctx, **imageSrc, m_params.imageFormat);
628
629 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
630 imageViewDst = createImageView(ctx, **imageDst, m_params.imageFormat);
631 }
632
633 // Update descriptor set information
634 {
635 vk::DescriptorSetUpdateBuilder updateBuilder;
636
637 switch (m_params.accessType)
638 {
639 case ACCESS_TYPE_SAMPLING:
640 case ACCESS_TYPE_TEXEL_FETCH:
641 {
642 vk::VkDescriptorImageInfo descStorageImgDst = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
643 vk::VkDescriptorImageInfo descSampledImgSrc = makeDescriptorImageInfo(*sampler, *imageViewSrc, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
644
645 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
646 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descSampledImgSrc);
647 break;
648 }
649 case ACCESS_TYPE_IMAGE_LOAD:
650 case ACCESS_TYPE_IMAGE_STORE:
651 {
652 vk::VkDescriptorImageInfo descStorageImgDst = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
653 vk::VkDescriptorImageInfo descStorageImgSrc = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
654
655 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
656 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgSrc);
657 break;
658 }
659 case ACCESS_TYPE_IMAGE_ATOMICS:
660 {
661 vk::VkDescriptorImageInfo descStorageImg = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
662
663 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
664 break;
665 }
666 default:
667 DE_FATAL("Impossible");
668 break;
669 }
670
671 updateBuilder.update(vk, device);
672 }
673
674 // Create validation compute commands & submit
675 {
676 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device));
677 vk::Unique<vk::VkPipeline> pipeline (makeComputePipeline(vk, device, *pipelineLayout, *computeShader, DE_NULL));
678 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
679
680 beginCommandBuffer(vk, *cmdBuffer);
681
682 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
683 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
684 vk.cmdDispatch(*cmdBuffer, (deUint32)IMAGE_WIDTH, (deUint32)IMAGE_HEIGHT, 1u);
685 endCommandBuffer(vk, *cmdBuffer);
686
687 VK_CHECK(queueSubmit(ctx, PROTECTION_ENABLED, queue, *cmdBuffer, *fence, ~0ull));
688 }
689
690 // Calculate reference image
691 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
692 calculateAtomicRef(*texture2D);
693
694 // Validate result
695 {
696 const vk::VkImage resultImage = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? **imageSrc : **imageDst;
697
698 return validateResult(resultImage, vk::VK_IMAGE_LAYOUT_GENERAL, *texture2D, refSampler);
699 }
700 }
701
executeFragmentTest(void)702 tcu::TestStatus ImageAccessTestInstance::executeFragmentTest (void)
703 {
704 ProtectedContext& ctx (m_protectedContext);
705 const vk::DeviceInterface& vk = ctx.getDeviceInterface();
706 const vk::VkDevice device = ctx.getDevice();
707 const vk::VkQueue queue = ctx.getQueue();
708 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex();
709
710 // Create output image
711 de::MovePtr<vk::ImageWithMemory> colorImage (createImage2D(ctx, PROTECTION_ENABLED, queueFamilyIndex,
712 RENDER_WIDTH, RENDER_HEIGHT,
713 m_params.imageFormat,
714 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_SAMPLED_BIT));
715 vk::Unique<vk::VkImageView> colorImageView (createImageView(ctx, **colorImage, m_params.imageFormat));
716
717 vk::Unique<vk::VkRenderPass> renderPass (createRenderPass(ctx, m_params.imageFormat));
718 vk::Unique<vk::VkFramebuffer> framebuffer (createFramebuffer(ctx, RENDER_WIDTH, RENDER_HEIGHT, *renderPass, *colorImageView));
719
720 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, PROTECTION_ENABLED, queueFamilyIndex));
721 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
722
723 de::MovePtr<tcu::Texture2D> texture2D = createTestTexture2D();
724 const tcu::Sampler refSampler = tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
725 tcu::Sampler::NEAREST, tcu::Sampler::NEAREST);
726
727 vk::Move<vk::VkShaderModule> vertexShader = createShaderModule(vk, device, ctx.getBinaryCollection().get("vert"), 0);
728 vk::Move<vk::VkShaderModule> fragmentShader = createShaderModule(vk, device, ctx.getBinaryCollection().get("frag"), 0);
729
730 de::MovePtr<vk::ImageWithMemory> imageSrc;
731 de::MovePtr<vk::ImageWithMemory> imageDst;
732 vk::Move<vk::VkSampler> sampler;
733 vk::Move<vk::VkImageView> imageViewSrc;
734 vk::Move<vk::VkImageView> imageViewDst;
735
736 vk::Move<vk::VkPipeline> graphicsPipeline;
737 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
738 vk::Move<vk::VkDescriptorPool> descriptorPool;
739 vk::Move<vk::VkDescriptorSet> descriptorSet;
740
741 // Create src and dst images
742 {
743 vk::VkImageUsageFlags imageUsageFlags = vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
744 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
745 vk::VK_IMAGE_USAGE_SAMPLED_BIT;
746
747 switch (m_params.accessType)
748 {
749 case ACCESS_TYPE_IMAGE_LOAD:
750 case ACCESS_TYPE_IMAGE_STORE:
751 case ACCESS_TYPE_IMAGE_ATOMICS:
752 imageUsageFlags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
753 break;
754 default:
755 break;
756 }
757
758 imageSrc = createImage2D(ctx, PROTECTION_ENABLED, queueFamilyIndex,
759 IMAGE_WIDTH, IMAGE_HEIGHT,
760 m_params.imageFormat,
761 imageUsageFlags);
762
763 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
764 {
765 imageDst = createImage2D(ctx, PROTECTION_ENABLED, queueFamilyIndex,
766 IMAGE_WIDTH, IMAGE_HEIGHT,
767 m_params.imageFormat,
768 imageUsageFlags);
769 }
770 }
771
772 // Select vkImageLayout based upon accessType
773 vk::VkImageLayout imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
774
775 switch (m_params.accessType)
776 {
777 case ACCESS_TYPE_SAMPLING:
778 case ACCESS_TYPE_TEXEL_FETCH:
779 {
780 imageLayout = vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
781 break;
782 }
783 case ACCESS_TYPE_IMAGE_LOAD:
784 case ACCESS_TYPE_IMAGE_STORE:
785 case ACCESS_TYPE_IMAGE_ATOMICS:
786 {
787 imageLayout = vk::VK_IMAGE_LAYOUT_GENERAL;
788 break;
789 }
790 default:
791 DE_FATAL("Impossible");
792 break;
793 }
794
795 // Upload source image
796 {
797 de::MovePtr<vk::ImageWithMemory> unprotectedImage = createImage2D(ctx, PROTECTION_DISABLED, queueFamilyIndex,
798 IMAGE_WIDTH, IMAGE_HEIGHT,
799 m_params.imageFormat,
800 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT);
801
802 // Upload data to an unprotected image
803 uploadImage(m_protectedContext, **unprotectedImage, *texture2D);
804
805 // Copy unprotected image to protected image
806 copyToProtectedImage(m_protectedContext, **unprotectedImage, **imageSrc, imageLayout, IMAGE_WIDTH, IMAGE_HEIGHT);
807 }
808
809 // Clear dst image
810 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
811 clearImage(m_protectedContext, **imageDst);
812
813 // Create descriptors
814 {
815 vk::DescriptorSetLayoutBuilder layoutBuilder;
816 vk::DescriptorPoolBuilder poolBuilder;
817
818 switch (m_params.accessType)
819 {
820 case ACCESS_TYPE_SAMPLING:
821 case ACCESS_TYPE_TEXEL_FETCH:
822 layoutBuilder.addSingleSamplerBinding(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, vk::VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL);
823 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u);
824 break;
825 case ACCESS_TYPE_IMAGE_LOAD:
826 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
827 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
828 break;
829 case ACCESS_TYPE_IMAGE_STORE:
830 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
831 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
832 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2u);
833 break;
834 case ACCESS_TYPE_IMAGE_ATOMICS:
835 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
836 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
837 break;
838 default:
839 DE_FATAL("Impossible");
840 break;
841 }
842
843 descriptorSetLayout = layoutBuilder.build(vk, device);
844 descriptorPool = poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
845 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
846 }
847
848 // Create pipeline layout
849 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
850
851 // Create sampler and image views
852 {
853 if (m_params.accessType == ACCESS_TYPE_SAMPLING || m_params.accessType == ACCESS_TYPE_TEXEL_FETCH)
854 {
855 const tcu::TextureFormat texFormat = mapVkFormat(m_params.imageFormat);
856 const vk::VkSamplerCreateInfo samplerParams = vk::mapSampler(refSampler, texFormat);
857
858 sampler = createSampler(vk, device, &samplerParams);
859 }
860
861 imageViewSrc = createImageView(ctx, **imageSrc, m_params.imageFormat);
862
863 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
864 imageViewDst = createImageView(ctx, **imageDst, m_params.imageFormat);
865 }
866
867 // Update descriptor set information
868 {
869 vk::DescriptorSetUpdateBuilder updateBuilder;
870
871 switch (m_params.accessType)
872 {
873 case ACCESS_TYPE_SAMPLING:
874 case ACCESS_TYPE_TEXEL_FETCH:
875 {
876 vk::VkDescriptorImageInfo descSampledImg = makeDescriptorImageInfo(*sampler, *imageViewSrc, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
877
878 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descSampledImg);
879 break;
880 }
881 case ACCESS_TYPE_IMAGE_LOAD:
882 {
883 vk::VkDescriptorImageInfo descStorageImg = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
884
885 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
886 break;
887 }
888 case ACCESS_TYPE_IMAGE_STORE:
889 {
890 vk::VkDescriptorImageInfo descStorageImgSrc = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
891 vk::VkDescriptorImageInfo descStorageImgDst = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
892
893 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgSrc);
894 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
895 break;
896 }
897 case ACCESS_TYPE_IMAGE_ATOMICS:
898 {
899 vk::VkDescriptorImageInfo descStorageImg = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
900
901 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
902 break;
903 }
904 default:
905 DE_FATAL("Impossible");
906 break;
907 }
908
909 updateBuilder.update(vk, device);
910 }
911
912 // Create vertex buffer and vertex input descriptors
913 VertexBindings vertexBindings;
914 VertexAttribs vertexAttribs;
915 de::MovePtr<vk::BufferWithMemory> vertexBuffer;
916 {
917 const float positions[] =
918 {
919 -1.0f, -1.0f,
920 -1.0f, +1.0f,
921 +1.0f, -1.0f,
922 +1.0f, +1.0f,
923 };
924
925 std::vector<float> texCoord;
926
927 {
928 const tcu::Vec2 minCoords (0.0f, 0.0f);
929 const tcu::Vec2 maxCoords = m_params.accessType == ACCESS_TYPE_SAMPLING ?
930 tcu::Vec2(1.0f, 1.0f) :
931 tcu::Vec2((float)IMAGE_WIDTH - 0.1f, (float)IMAGE_HEIGHT - 0.1f);
932
933 glu::TextureTestUtil::computeQuadTexCoord2D(texCoord, minCoords, maxCoords);
934 }
935
936 const deUint32 vertexPositionStrideSize = (deUint32)sizeof(tcu::Vec2);
937 const deUint32 vertexTextureStrideSize = (deUint32)sizeof(tcu::Vec2);
938 const deUint32 positionDataSize = 4 * vertexPositionStrideSize;
939 const deUint32 textureCoordDataSize = 4 * vertexTextureStrideSize;
940 const deUint32 vertexBufferSize = positionDataSize + textureCoordDataSize;
941
942 {
943 const vk::VkVertexInputBindingDescription vertexInputBindingDescriptions[2] =
944 {
945 {
946 0u, // deUint32 binding;
947 vertexPositionStrideSize, // deUint32 strideInBytes;
948 vk::VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
949 },
950 {
951 1u, // deUint32 binding;
952 vertexTextureStrideSize, // deUint32 strideInBytes;
953 vk::VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
954 }
955 };
956 vertexBindings.push_back(vertexInputBindingDescriptions[0]);
957 vertexBindings.push_back(vertexInputBindingDescriptions[1]);
958
959 const vk::VkVertexInputAttributeDescription vertexInputAttributeDescriptions[2] =
960 {
961 {
962 0u, // deUint32 location;
963 0u, // deUint32 binding;
964 vk::VK_FORMAT_R32G32_SFLOAT, // VkFormat format;
965 0u // deUint32 offsetInBytes;
966 },
967 {
968 1u, // deUint32 location;
969 1u, // deUint32 binding;
970 vk::VK_FORMAT_R32G32_SFLOAT, // VkFormat format;
971 positionDataSize // deUint32 offsetInBytes;
972 }
973 };
974 vertexAttribs.push_back(vertexInputAttributeDescriptions[0]);
975 vertexAttribs.push_back(vertexInputAttributeDescriptions[1]);
976 }
977
978 vertexBuffer = makeBuffer(ctx,
979 PROTECTION_DISABLED,
980 queueFamilyIndex,
981 vertexBufferSize,
982 vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
983 vk::MemoryRequirement::HostVisible);
984
985 deMemcpy(vertexBuffer->getAllocation().getHostPtr(), positions, positionDataSize);
986 deMemcpy(reinterpret_cast<deUint8*>(vertexBuffer->getAllocation().getHostPtr()) + positionDataSize, texCoord.data(), textureCoordDataSize);
987 vk::flushMappedMemoryRange(vk, device, vertexBuffer->getAllocation().getMemory(), vertexBuffer->getAllocation().getOffset(), vertexBufferSize);
988 }
989
990 // Create pipeline
991 graphicsPipeline = makeGraphicsPipeline(vk,
992 device,
993 *pipelineLayout,
994 *renderPass,
995 *vertexShader,
996 *fragmentShader,
997 vertexBindings,
998 vertexAttribs,
999 tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT),
1000 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP);
1001
1002 // Begin cmd buffer
1003 beginCommandBuffer(vk, *cmdBuffer);
1004
1005 // Start image barrier
1006 {
1007 const vk::VkImageMemoryBarrier startImgBarrier =
1008 {
1009 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1010 DE_NULL, // pNext
1011 0, // srcAccessMask
1012 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask
1013 vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
1014 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
1015 queueFamilyIndex, // srcQueueFamilyIndex
1016 queueFamilyIndex, // dstQueueFamilyIndex
1017 **colorImage, // image
1018 {
1019 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1020 0u, // baseMipLevel
1021 1u, // mipLevels
1022 0u, // baseArraySlice
1023 1u, // subresourceRange
1024 }
1025 };
1026
1027 vk.cmdPipelineBarrier(*cmdBuffer,
1028 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // srcStageMask
1029 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // dstStageMask
1030 (vk::VkDependencyFlags)0,
1031 0, (const vk::VkMemoryBarrier*)DE_NULL,
1032 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1033 1, &startImgBarrier);
1034 }
1035
1036 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, vk::makeRect2D(0, 0, RENDER_WIDTH, RENDER_HEIGHT), tcu::Vec4(0.0f));
1037
1038 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
1039 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
1040
1041 {
1042 const vk::VkDeviceSize vertexBufferOffset = 0;
1043
1044 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer->get(), &vertexBufferOffset);
1045 vk.cmdBindVertexBuffers(*cmdBuffer, 1u, 1u, &vertexBuffer->get(), &vertexBufferOffset);
1046 }
1047
1048 vk.cmdDraw(*cmdBuffer, /*vertexCount*/ 4u, 1u, 0u, 1u);
1049
1050 endRenderPass(vk, *cmdBuffer);
1051
1052 {
1053 const vk::VkImageMemoryBarrier endImgBarrier =
1054 {
1055 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1056 DE_NULL, // pNext
1057 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask
1058 vk::VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
1059 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
1060 imageLayout, // newLayout
1061 queueFamilyIndex, // srcQueueFamilyIndex
1062 queueFamilyIndex, // dstQueueFamilyIndex
1063 **colorImage, // image
1064 {
1065 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1066 0u, // baseMipLevel
1067 1u, // mipLevels
1068 0u, // baseArraySlice
1069 1u, // subresourceRange
1070 }
1071 };
1072 vk.cmdPipelineBarrier(*cmdBuffer,
1073 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // srcStageMask
1074 vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, // dstStageMask
1075 (vk::VkDependencyFlags)0,
1076 0, (const vk::VkMemoryBarrier*)DE_NULL,
1077 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1078 1, &endImgBarrier);
1079 }
1080
1081 endCommandBuffer(vk, *cmdBuffer);
1082
1083 // Submit command buffer
1084 {
1085 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device));
1086 VK_CHECK(queueSubmit(ctx, PROTECTION_ENABLED, queue, *cmdBuffer, *fence, ~0ull));
1087 }
1088
1089 // Calculate reference image
1090 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
1091 calculateAtomicRef(*texture2D);
1092
1093 // Validate result
1094 {
1095 const vk::VkImage resultImage = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? **imageSrc :
1096 m_params.accessType == ACCESS_TYPE_IMAGE_STORE ? **imageDst : **colorImage;
1097
1098 return validateResult(resultImage, imageLayout, *texture2D, refSampler);
1099 }
1100 }
1101
calculateAtomicRef(tcu::Texture2D & texture2D)1102 void ImageAccessTestInstance::calculateAtomicRef (tcu::Texture2D& texture2D)
1103 {
1104 DE_ASSERT(m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS);
1105
1106 const tcu::PixelBufferAccess& reference = texture2D.getLevel(0);
1107
1108 for (int x = 0; x < reference.getWidth(); ++x)
1109 for (int y = 0; y < reference.getHeight(); ++y)
1110 {
1111 const deInt32 oldX = reference.getPixelInt(x, y).x();
1112 const deInt32 atomicArg = x*x + y*y;
1113 const deInt32 newX = computeBinaryAtomicOperationResult(m_params.atomicOperation, oldX, atomicArg);
1114
1115 reference.setPixel(tcu::IVec4(newX, 0, 0, 0), x, y);
1116 }
1117 }
1118
validateResult(vk::VkImage image,vk::VkImageLayout imageLayout,const tcu::Texture2D & texture2D,const tcu::Sampler & refSampler)1119 tcu::TestStatus ImageAccessTestInstance::validateResult (vk::VkImage image, vk::VkImageLayout imageLayout, const tcu::Texture2D& texture2D, const tcu::Sampler& refSampler)
1120 {
1121 de::Random rnd (getSeedValue(m_params));
1122 ValidationData refData;
1123
1124 for (int ndx = 0; ndx < 4; ++ndx)
1125 {
1126 const float lod = 0.0f;
1127 const float cx = rnd.getFloat(0.0f, 1.0f);
1128 const float cy = rnd.getFloat(0.0f, 1.0f);
1129
1130 refData.coords[ndx] = tcu::Vec4(cx, cy, 0.0f, 0.0f);
1131 refData.values[ndx] = texture2D.sample(refSampler, cx, cy, lod);
1132 }
1133
1134 if (!m_validator.validateImage(m_protectedContext, refData, image, m_params.imageFormat, imageLayout))
1135 return tcu::TestStatus::fail("Something went really wrong");
1136 else
1137 return tcu::TestStatus::pass("Everything went OK");
1138 }
1139
1140 } // anonymous
1141
createShaderImageAccessTests(tcu::TestContext & testCtx)1142 tcu::TestCaseGroup* createShaderImageAccessTests (tcu::TestContext& testCtx)
1143 {
1144 de::MovePtr<tcu::TestCaseGroup> accessGroup (new tcu::TestCaseGroup(testCtx, "access", "Shader Image Access Tests"));
1145
1146 static const struct
1147 {
1148 glu::ShaderType type;
1149 const char* name;
1150 const char* desc;
1151 } shaderTypes[] =
1152 {
1153 { glu::SHADERTYPE_FRAGMENT, "fragment", "Image access from fragment shader" },
1154 { glu::SHADERTYPE_COMPUTE, "compute", "Image access from compute shader" },
1155 };
1156
1157 static const struct
1158 {
1159 AccessType type;
1160 const char* name;
1161 const char* desc;
1162 } accessTypes[] =
1163 {
1164 { ACCESS_TYPE_SAMPLING, "sampling", "Sampling test" },
1165 { ACCESS_TYPE_TEXEL_FETCH, "texelfetch", "Texel fetch test" },
1166 { ACCESS_TYPE_IMAGE_LOAD, "imageload", "Image load test" },
1167 { ACCESS_TYPE_IMAGE_STORE, "imagestore", "Image store test" },
1168 { ACCESS_TYPE_IMAGE_ATOMICS, "imageatomics", "Image atomics test" },
1169 };
1170
1171 static const struct
1172 {
1173 vk::VkFormat format;
1174 const char* name;
1175 } formats[] =
1176 {
1177 { vk::VK_FORMAT_R8G8B8A8_UNORM, "rgba8" },
1178 { vk::VK_FORMAT_R32_SINT, "r32i" },
1179 { vk::VK_FORMAT_R32_UINT, "r32ui" },
1180 };
1181
1182 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderTypeNdx)
1183 {
1184 const glu::ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
1185 de::MovePtr<tcu::TestCaseGroup> shaderGroup (new tcu::TestCaseGroup(testCtx, shaderTypes[shaderTypeNdx].name, shaderTypes[shaderTypeNdx].desc));
1186
1187 for (int accessNdx = 0; accessNdx < DE_LENGTH_OF_ARRAY(accessTypes); ++accessNdx)
1188 {
1189 const AccessType accessType = accessTypes[accessNdx].type;
1190
1191 if (shaderType == glu::SHADERTYPE_COMPUTE && accessType == ACCESS_TYPE_IMAGE_STORE) // \note already tested in other tests
1192 continue;
1193
1194 de::MovePtr<tcu::TestCaseGroup> accessTypeGroup (new tcu::TestCaseGroup(testCtx, accessTypes[accessNdx].name, accessTypes[accessNdx].desc));
1195
1196 if (accessType == ACCESS_TYPE_IMAGE_ATOMICS)
1197 {
1198 for (deUint32 atomicOpI = 0; atomicOpI < ATOMIC_OPERATION_LAST; ++atomicOpI)
1199 {
1200 const AtomicOperation atomicOp = (AtomicOperation)atomicOpI;
1201 de::MovePtr<tcu::TestCaseGroup> operationGroup (new tcu::TestCaseGroup(testCtx, getAtomicOperationCaseName(atomicOp).c_str(), ""));
1202
1203 for (deUint32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
1204 {
1205 const vk::VkFormat format = formats[formatNdx].format;
1206
1207 if (format != vk::VK_FORMAT_R32_UINT && format != vk::VK_FORMAT_R32_SINT)
1208 continue;
1209
1210 operationGroup->addChild(new ImageAccessTestCase(testCtx, formats[formatNdx].name, "", Params(shaderType, accessType, format, atomicOp)));
1211 }
1212
1213 accessTypeGroup->addChild(operationGroup.release());
1214 }
1215 }
1216 else
1217 {
1218 for (deUint32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
1219 {
1220 const vk::VkFormat format = formats[formatNdx].format;
1221
1222 accessTypeGroup->addChild(new ImageAccessTestCase(testCtx, formats[formatNdx].name, "", Params(shaderType, accessType, format)));
1223 }
1224 }
1225
1226 shaderGroup->addChild(accessTypeGroup.release());
1227 }
1228
1229 accessGroup->addChild(shaderGroup.release());
1230 }
1231
1232 return accessGroup.release();
1233 }
1234
1235 } // ProtectedMem
1236 } // vkt
1237