1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2017 Samsung Electronics Co., Ltd.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Protected memory image access tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktProtectedMemShaderImageAccessTests.hpp"
26
27 #include "vktProtectedMemContext.hpp"
28 #include "vktProtectedMemUtils.hpp"
29 #include "vktProtectedMemImageValidator.hpp"
30 #include "vktTestCase.hpp"
31 #include "vktTestGroupUtil.hpp"
32
33 #include "vkPrograms.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkBuilderUtil.hpp"
36 #include "vkImageUtil.hpp"
37 #include "vkCmdUtil.hpp"
38 #include "vkObjUtil.hpp"
39
40 #include "tcuTestLog.hpp"
41 #include "tcuVector.hpp"
42 #include "tcuTextureUtil.hpp"
43 #include "tcuStringTemplate.hpp"
44
45 #include "gluTextureTestUtil.hpp"
46
47 #include "deRandom.hpp"
48
49 namespace vkt
50 {
51 namespace ProtectedMem
52 {
53
54 namespace
55 {
56
57 enum
58 {
59 RENDER_WIDTH = 128,
60 RENDER_HEIGHT = 128,
61 IMAGE_WIDTH = 128,
62 IMAGE_HEIGHT = 128,
63 };
64
65 enum AccessType
66 {
67 ACCESS_TYPE_SAMPLING = 0,
68 ACCESS_TYPE_TEXEL_FETCH,
69 ACCESS_TYPE_IMAGE_LOAD,
70 ACCESS_TYPE_IMAGE_STORE,
71 ACCESS_TYPE_IMAGE_ATOMICS,
72
73 ACCESS_TYPE_LAST
74 };
75
76 enum AtomicOperation
77 {
78 ATOMIC_OPERATION_ADD = 0,
79 ATOMIC_OPERATION_MIN,
80 ATOMIC_OPERATION_MAX,
81 ATOMIC_OPERATION_AND,
82 ATOMIC_OPERATION_OR,
83 ATOMIC_OPERATION_XOR,
84 ATOMIC_OPERATION_EXCHANGE,
85
86 ATOMIC_OPERATION_LAST
87 };
88
89 struct Params
90 {
91 glu::ShaderType shaderType;
92 AccessType accessType;
93 vk::VkFormat imageFormat;
94 AtomicOperation atomicOperation;
95 bool pipelineProtectedAccess;
96 vk::VkPipelineCreateFlags flags;
97 ProtectionMode protectionMode;
98
Paramsvkt::ProtectedMem::__anon27a2bbcb0111::Params99 Params (void)
100 : shaderType (glu::SHADERTYPE_LAST)
101 , accessType (ACCESS_TYPE_LAST)
102 , imageFormat (vk::VK_FORMAT_UNDEFINED)
103 , atomicOperation (ATOMIC_OPERATION_LAST)
104 , pipelineProtectedAccess (false)
105 , flags ((vk::VkPipelineCreateFlags)0u)
106 , protectionMode (PROTECTION_ENABLED)
107 {}
108
Paramsvkt::ProtectedMem::__anon27a2bbcb0111::Params109 Params (const glu::ShaderType shaderType_,
110 const AccessType accessType_,
111 const vk::VkFormat imageFormat_,
112 const AtomicOperation atomicOperation_,
113 const bool pipelineProtectedAccess_,
114 const vk::VkPipelineCreateFlags flags_)
115 : shaderType (shaderType_)
116 , accessType (accessType_)
117 , imageFormat (imageFormat_)
118 , atomicOperation (atomicOperation_)
119 , pipelineProtectedAccess (pipelineProtectedAccess_)
120 , flags (flags_)
121 , protectionMode (PROTECTION_ENABLED)
122 {
123 #ifndef CTS_USES_VULKANSC
124 if ((flags_ & vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT) != 0) {
125 protectionMode = PROTECTION_DISABLED;
126 }
127 #endif
128 }
129 };
130
getSeedValue(const Params & params)131 static deUint32 getSeedValue (const Params& params)
132 {
133 return deInt32Hash(params.shaderType) ^ deInt32Hash(params.accessType) ^ deInt32Hash(params.imageFormat) ^ deInt32Hash(params.atomicOperation);
134 }
135
getAtomicOperationCaseName(const AtomicOperation op)136 static std::string getAtomicOperationCaseName (const AtomicOperation op)
137 {
138 switch (op)
139 {
140 case ATOMIC_OPERATION_ADD: return "add";
141 case ATOMIC_OPERATION_MIN: return "min";
142 case ATOMIC_OPERATION_MAX: return "max";
143 case ATOMIC_OPERATION_AND: return "and";
144 case ATOMIC_OPERATION_OR: return "or";
145 case ATOMIC_OPERATION_XOR: return "xor";
146 case ATOMIC_OPERATION_EXCHANGE: return "exchange";
147 default:
148 DE_FATAL("Impossible");
149 return "";
150 }
151 }
152
getAtomicOperationShaderFuncName(const AtomicOperation op)153 static std::string getAtomicOperationShaderFuncName (const AtomicOperation op)
154 {
155 switch (op)
156 {
157 case ATOMIC_OPERATION_ADD: return "imageAtomicAdd";
158 case ATOMIC_OPERATION_MIN: return "imageAtomicMin";
159 case ATOMIC_OPERATION_MAX: return "imageAtomicMax";
160 case ATOMIC_OPERATION_AND: return "imageAtomicAnd";
161 case ATOMIC_OPERATION_OR: return "imageAtomicOr";
162 case ATOMIC_OPERATION_XOR: return "imageAtomicXor";
163 case ATOMIC_OPERATION_EXCHANGE: return "imageAtomicExchange";
164 default:
165 DE_FATAL("Impossible");
166 return "";
167 }
168 }
169
170 //! Computes the result of an atomic operation where "a" is the data operated on and "b" is the parameter to the atomic function.
computeBinaryAtomicOperationResult(const AtomicOperation op,const deInt32 a,const deInt32 b)171 static deInt32 computeBinaryAtomicOperationResult (const AtomicOperation op, const deInt32 a, const deInt32 b)
172 {
173 switch (op)
174 {
175 case ATOMIC_OPERATION_ADD: return a + b;
176 case ATOMIC_OPERATION_MIN: return de::min(a, b);
177 case ATOMIC_OPERATION_MAX: return de::max(a, b);
178 case ATOMIC_OPERATION_AND: return a & b;
179 case ATOMIC_OPERATION_OR: return a | b;
180 case ATOMIC_OPERATION_XOR: return a ^ b;
181 case ATOMIC_OPERATION_EXCHANGE: return b;
182 default:
183 DE_FATAL("Impossible");
184 return -1;
185 }
186 }
187
getShaderImageFormatQualifier(const tcu::TextureFormat & format)188 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
189 {
190 const char* orderPart;
191 const char* typePart;
192
193 switch (format.order)
194 {
195 case tcu::TextureFormat::R: orderPart = "r"; break;
196 case tcu::TextureFormat::RG: orderPart = "rg"; break;
197 case tcu::TextureFormat::RGB: orderPart = "rgb"; break;
198 case tcu::TextureFormat::RGBA: orderPart = "rgba"; break;
199
200 default:
201 DE_FATAL("Impossible");
202 orderPart = DE_NULL;
203 }
204
205 switch (format.type)
206 {
207 case tcu::TextureFormat::FLOAT: typePart = "32f"; break;
208 case tcu::TextureFormat::HALF_FLOAT: typePart = "16f"; break;
209
210 case tcu::TextureFormat::UNSIGNED_INT32: typePart = "32ui"; break;
211 case tcu::TextureFormat::UNSIGNED_INT16: typePart = "16ui"; break;
212 case tcu::TextureFormat::UNSIGNED_INT8: typePart = "8ui"; break;
213
214 case tcu::TextureFormat::SIGNED_INT32: typePart = "32i"; break;
215 case tcu::TextureFormat::SIGNED_INT16: typePart = "16i"; break;
216 case tcu::TextureFormat::SIGNED_INT8: typePart = "8i"; break;
217
218 case tcu::TextureFormat::UNORM_INT16: typePart = "16"; break;
219 case tcu::TextureFormat::UNORM_INT8: typePart = "8"; break;
220
221 case tcu::TextureFormat::SNORM_INT16: typePart = "16_snorm"; break;
222 case tcu::TextureFormat::SNORM_INT8: typePart = "8_snorm"; break;
223
224 default:
225 DE_FATAL("Impossible");
226 typePart = DE_NULL;
227 }
228
229 return std::string() + orderPart + typePart;
230 }
231
getShaderSamplerOrImageType(const tcu::TextureFormat & format,bool isSampler)232 static std::string getShaderSamplerOrImageType (const tcu::TextureFormat& format, bool isSampler)
233 {
234 const std::string formatPart = tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ? "u" :
235 tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER ? "i" : "";
236
237 return formatPart + (isSampler ? "sampler2D" : "image2D");
238 }
239
240 class ImageAccessTestInstance : public ProtectedTestInstance
241 {
242 public:
243 ImageAccessTestInstance (Context& ctx,
244 const ImageValidator& validator,
245 const Params& params);
246 virtual tcu::TestStatus iterate (void);
247
248 private:
249 de::MovePtr<tcu::Texture2D> createTestTexture2D (void);
250 void calculateAtomicRef (tcu::Texture2D& texture2D);
251 tcu::TestStatus validateResult (vk::VkImage image,
252 vk::VkImageLayout imageLayout,
253 const tcu::Texture2D& texture2D,
254 const tcu::Sampler& refSampler);
255
256 tcu::TestStatus executeFragmentTest (void);
257 tcu::TestStatus executeComputeTest (void);
258
259 const ImageValidator& m_validator;
260 const Params m_params;
261 };
262
263 class ImageAccessTestCase : public TestCase
264 {
265 public:
ImageAccessTestCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const Params & params)266 ImageAccessTestCase (tcu::TestContext& testCtx,
267 const std::string& name,
268 const std::string& description,
269 const Params& params)
270 : TestCase (testCtx, name, description)
271 , m_validator (params.imageFormat)
272 , m_params (params)
273 {
274 }
275
~ImageAccessTestCase(void)276 virtual ~ImageAccessTestCase (void) {}
createInstance(Context & ctx) const277 virtual TestInstance* createInstance (Context& ctx) const
278 {
279 return new ImageAccessTestInstance(ctx, m_validator, m_params);
280 }
281 virtual void initPrograms (vk::SourceCollections& programCollection) const;
checkSupport(Context & context) const282 virtual void checkSupport (Context& context) const
283 {
284 checkProtectedQueueSupport(context);
285 }
286
287 private:
288 ImageValidator m_validator;
289 Params m_params;
290 };
291
initPrograms(vk::SourceCollections & programCollection) const292 void ImageAccessTestCase::initPrograms (vk::SourceCollections& programCollection) const
293 {
294 const tcu::TextureFormat& texFormat = mapVkFormat(m_params.imageFormat);
295 const std::string imageFormat = getShaderImageFormatQualifier(texFormat);
296 const std::string imageType = getShaderSamplerOrImageType(texFormat, false);
297 const std::string samplerType = getShaderSamplerOrImageType(texFormat, true);
298 const std::string colorVecType = isIntFormat(m_params.imageFormat) ? "ivec4" :
299 isUintFormat(m_params.imageFormat) ? "uvec4" : "vec4";
300
301 m_validator.initPrograms(programCollection);
302
303 if (m_params.shaderType == glu::SHADERTYPE_FRAGMENT)
304 {
305 {
306 // Vertex shader
307 const char* vert = "#version 450\n"
308 "layout(location = 0) in mediump vec2 a_position;\n"
309 "layout(location = 1) in mediump vec2 a_texCoord;\n"
310 "layout(location = 0) out mediump vec2 v_texCoord;\n"
311 "\n"
312 "void main() {\n"
313 " gl_Position = vec4(a_position, 0.0, 1.0);\n"
314 " v_texCoord = a_texCoord;\n"
315 "}\n";
316
317 programCollection.glslSources.add("vert") << glu::VertexSource(vert);
318 }
319
320 {
321 // Fragment shader
322 std::ostringstream frag;
323 frag << "#version 450\n"
324 "layout(location = 0) in mediump vec2 v_texCoord;\n"
325 "layout(location = 0) out highp ${COLOR_VEC_TYPE} o_color;\n";
326
327 switch (m_params.accessType)
328 {
329 case ACCESS_TYPE_SAMPLING:
330 case ACCESS_TYPE_TEXEL_FETCH:
331 frag << "layout(set = 0, binding = 0) uniform highp ${SAMPLER_TYPE} u_sampler;\n";
332 break;
333 case ACCESS_TYPE_IMAGE_LOAD:
334 frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_image;\n";
335 break;
336 case ACCESS_TYPE_IMAGE_STORE:
337 frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_imageA;\n";
338 frag << "layout(set = 0, binding = 1, ${IMAGE_FORMAT}) writeonly uniform highp ${IMAGE_TYPE} u_imageB;\n";
339 break;
340 case ACCESS_TYPE_IMAGE_ATOMICS:
341 frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) coherent uniform highp ${IMAGE_TYPE} u_image;\n";
342 break;
343 default:
344 DE_FATAL("Impossible");
345 break;
346 }
347
348 frag << "\n"
349 "void main() {\n";
350
351 switch (m_params.accessType)
352 {
353 case ACCESS_TYPE_SAMPLING:
354 frag << " o_color = texture(u_sampler, v_texCoord);\n";
355 break;
356 case ACCESS_TYPE_TEXEL_FETCH:
357 frag << " const highp int lod = 0;\n";
358 frag << " o_color = texelFetch(u_sampler, ivec2(v_texCoord), lod);\n";
359 break;
360 case ACCESS_TYPE_IMAGE_LOAD:
361 frag << " o_color = imageLoad(u_image, ivec2(v_texCoord));\n";
362 break;
363 case ACCESS_TYPE_IMAGE_STORE:
364 frag << " o_color = imageLoad(u_imageA, ivec2(v_texCoord));\n";
365 frag << " imageStore(u_imageB, ivec2(v_texCoord), o_color);\n";
366 break;
367 case ACCESS_TYPE_IMAGE_ATOMICS:
368 frag << " int gx = int(v_texCoord.x);\n";
369 frag << " int gy = int(v_texCoord.y);\n";
370 frag << " "
371 << getAtomicOperationShaderFuncName(m_params.atomicOperation)
372 << "(u_image, ivec2(v_texCoord), "
373 << (isUintFormat(m_params.imageFormat) ? "uint" : "int")
374 << "(gx*gx + gy*gy));\n";
375 frag << " o_color = imageLoad(u_image, ivec2(v_texCoord));\n";
376 break;
377 default:
378 DE_FATAL("Impossible");
379 break;
380 }
381
382 frag << "}\n";
383
384 std::map<std::string, std::string> fragParams;
385
386 fragParams["IMAGE_FORMAT"] = imageFormat;
387 fragParams["IMAGE_TYPE"] = imageType;
388 fragParams["SAMPLER_TYPE"] = samplerType;
389 fragParams["COLOR_VEC_TYPE"] = colorVecType;
390
391 programCollection.glslSources.add("frag") << glu::FragmentSource(tcu::StringTemplate(frag.str()).specialize(fragParams));
392 }
393 }
394 else if (m_params.shaderType == glu::SHADERTYPE_COMPUTE)
395 {
396 // Compute shader
397 std::ostringstream comp;
398 comp << "#version 450\n"
399 "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
400 "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) ${RES_MEM_QUALIFIER} uniform highp ${IMAGE_TYPE} u_resultImage;\n";
401
402 switch (m_params.accessType)
403 {
404 case ACCESS_TYPE_SAMPLING:
405 case ACCESS_TYPE_TEXEL_FETCH:
406 comp << "layout(set = 0, binding = 1) uniform highp ${SAMPLER_TYPE} u_sampler;\n";
407 break;
408 case ACCESS_TYPE_IMAGE_LOAD:
409 case ACCESS_TYPE_IMAGE_STORE:
410 comp << "layout(set = 0, binding = 1, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_srcImage;\n";
411 break;
412 case ACCESS_TYPE_IMAGE_ATOMICS:
413 break;
414 default:
415 DE_FATAL("Impossible");
416 break;
417 }
418
419 comp << "\n"
420 "void main() {\n"
421 " int gx = int(gl_GlobalInvocationID.x);\n"
422 " int gy = int(gl_GlobalInvocationID.y);\n";
423
424 switch (m_params.accessType)
425 {
426 case ACCESS_TYPE_SAMPLING:
427 comp << " ${COLOR_VEC_TYPE} color = texture(u_sampler, vec2(float(gx)/" << de::toString((int)IMAGE_WIDTH) << ", float(gy)/" << de::toString((int)IMAGE_HEIGHT) << "));\n";
428 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
429 break;
430 case ACCESS_TYPE_TEXEL_FETCH:
431 comp << " const highp int lod = 0;\n";
432 comp << " ${COLOR_VEC_TYPE} color = texelFetch(u_sampler, ivec2(gx, gy), lod);\n";
433 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
434 break;
435 case ACCESS_TYPE_IMAGE_LOAD:
436 case ACCESS_TYPE_IMAGE_STORE:
437 comp << " ${COLOR_VEC_TYPE} color = imageLoad(u_srcImage, ivec2(gx, gy));\n";
438 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
439 break;
440 case ACCESS_TYPE_IMAGE_ATOMICS:
441 comp << " "
442 << getAtomicOperationShaderFuncName(m_params.atomicOperation)
443 << "(u_resultImage, ivec2(gx, gy), "
444 << (isUintFormat(m_params.imageFormat) ? "uint" : "int")
445 << "(gx*gx + gy*gy));\n";
446 break;
447 default:
448 DE_FATAL("Impossible");
449 break;
450 }
451
452 comp << "}\n";
453
454 std::map<std::string, std::string> compParams;
455
456 compParams["IMAGE_FORMAT"] = imageFormat;
457 compParams["IMAGE_TYPE"] = imageType;
458 compParams["SAMPLER_TYPE"] = samplerType;
459 compParams["COLOR_VEC_TYPE"] = colorVecType;
460 compParams["RES_MEM_QUALIFIER"] = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? "coherent" : "writeonly";
461
462 programCollection.glslSources.add("comp") << glu::ComputeSource(tcu::StringTemplate(comp.str()).specialize(compParams));
463 }
464 else
465 DE_FATAL("Impossible");
466 }
467
ImageAccessTestInstance(Context & ctx,const ImageValidator & validator,const Params & params)468 ImageAccessTestInstance::ImageAccessTestInstance (Context& ctx,
469 const ImageValidator& validator,
470 const Params& params)
471 : ProtectedTestInstance(ctx, params.pipelineProtectedAccess ? std::vector<std::string>({ "VK_EXT_pipeline_protected_access" }) : std::vector<std::string>())
472 , m_validator (validator)
473 , m_params (params)
474 {
475 }
476
createTestTexture2D(void)477 de::MovePtr<tcu::Texture2D> ImageAccessTestInstance::createTestTexture2D (void)
478 {
479 const tcu::TextureFormat texFmt = mapVkFormat(m_params.imageFormat);
480 const tcu::TextureFormatInfo fmtInfo = tcu::getTextureFormatInfo(texFmt);
481 de::MovePtr<tcu::Texture2D> texture2D (new tcu::Texture2D(texFmt, IMAGE_WIDTH, IMAGE_HEIGHT));
482
483 // \note generate only the base level
484 texture2D->allocLevel(0);
485
486 const tcu::PixelBufferAccess& level = texture2D->getLevel(0);
487
488 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
489 {
490 // use a smaller range than the format would allow
491 const float cMin = isIntFormat(m_params.imageFormat) ? -1000.0f : 0.0f;
492 const float cMax = +1000.0f;
493
494 fillWithRandomColorTiles(level, tcu::Vec4(cMin, 0, 0, 0), tcu::Vec4(cMax, 0, 0, 0), getSeedValue(m_params));
495 }
496 else
497 fillWithRandomColorTiles(level, fmtInfo.valueMin, fmtInfo.valueMax, getSeedValue(m_params));
498
499 return texture2D;
500 }
501
iterate(void)502 tcu::TestStatus ImageAccessTestInstance::iterate (void)
503 {
504 switch (m_params.shaderType)
505 {
506 case glu::SHADERTYPE_FRAGMENT: return executeFragmentTest();
507 case glu::SHADERTYPE_COMPUTE: return executeComputeTest();
508 default:
509 DE_FATAL("Impossible");
510 return tcu::TestStatus::fail("");
511 }
512 }
513
executeComputeTest(void)514 tcu::TestStatus ImageAccessTestInstance::executeComputeTest (void)
515 {
516 ProtectedContext& ctx (m_protectedContext);
517 const vk::DeviceInterface& vk = ctx.getDeviceInterface();
518 const vk::VkDevice device = ctx.getDevice();
519 const vk::VkQueue queue = ctx.getQueue();
520 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex();
521
522 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, m_params.protectionMode, queueFamilyIndex));
523
524 de::MovePtr<tcu::Texture2D> texture2D = createTestTexture2D();
525 const tcu::Sampler refSampler = tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
526 tcu::Sampler::NEAREST, tcu::Sampler::NEAREST,
527 00.0f /* LOD threshold */, true /* normalized coords */, tcu::Sampler::COMPAREMODE_NONE,
528 0 /* cmp channel */, tcu::Vec4(0.0f) /* border color */, true /* seamless cube map */);
529
530 vk::Unique<vk::VkShaderModule> computeShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("comp"), 0));
531
532 de::MovePtr<vk::ImageWithMemory> imageSrc;
533 de::MovePtr<vk::ImageWithMemory> imageDst;
534 vk::Move<vk::VkSampler> sampler;
535 vk::Move<vk::VkImageView> imageViewSrc;
536 vk::Move<vk::VkImageView> imageViewDst;
537
538 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
539 vk::Move<vk::VkDescriptorPool> descriptorPool;
540 vk::Move<vk::VkDescriptorSet> descriptorSet;
541
542 // Create src and dst images
543 {
544 vk::VkImageUsageFlags imageUsageFlags = vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
545 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
546 vk::VK_IMAGE_USAGE_SAMPLED_BIT |
547 vk::VK_IMAGE_USAGE_STORAGE_BIT;
548
549 imageSrc = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex,
550 IMAGE_WIDTH, IMAGE_HEIGHT,
551 m_params.imageFormat,
552 imageUsageFlags);
553
554 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
555 {
556 imageDst = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex,
557 IMAGE_WIDTH, IMAGE_HEIGHT,
558 m_params.imageFormat,
559 imageUsageFlags);
560 }
561 }
562
563 // Upload source image
564 {
565 de::MovePtr<vk::ImageWithMemory> unprotectedImage = createImage2D(ctx, PROTECTION_DISABLED, queueFamilyIndex,
566 IMAGE_WIDTH, IMAGE_HEIGHT,
567 m_params.imageFormat,
568 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT);
569
570 // Upload data to an unprotected image
571 uploadImage(m_protectedContext, **unprotectedImage, *texture2D);
572
573 // Select vkImageLayout based upon accessType
574 vk::VkImageLayout imageSrcLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
575
576 switch (m_params.accessType)
577 {
578 case ACCESS_TYPE_SAMPLING:
579 case ACCESS_TYPE_TEXEL_FETCH:
580 {
581 imageSrcLayout = vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
582 break;
583 }
584 case ACCESS_TYPE_IMAGE_LOAD:
585 case ACCESS_TYPE_IMAGE_STORE:
586 case ACCESS_TYPE_IMAGE_ATOMICS:
587 {
588 imageSrcLayout = vk::VK_IMAGE_LAYOUT_GENERAL;
589 break;
590 }
591 default:
592 DE_FATAL("Impossible");
593 break;
594 }
595
596 // Copy unprotected image to protected image
597 copyToProtectedImage(m_protectedContext, **unprotectedImage, **imageSrc, imageSrcLayout, IMAGE_WIDTH, IMAGE_HEIGHT, m_params.protectionMode);
598 }
599
600 // Clear dst image
601 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS && m_params.protectionMode == PROTECTION_ENABLED)
602 clearImage(m_protectedContext, **imageDst);
603
604 // Create descriptors
605 {
606 vk::DescriptorSetLayoutBuilder layoutBuilder;
607 vk::DescriptorPoolBuilder poolBuilder;
608
609 switch (m_params.accessType)
610 {
611 case ACCESS_TYPE_SAMPLING:
612 case ACCESS_TYPE_TEXEL_FETCH:
613 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
614 layoutBuilder.addSingleSamplerBinding(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, vk::VK_SHADER_STAGE_COMPUTE_BIT, DE_NULL);
615 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
616 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u);
617 break;
618 case ACCESS_TYPE_IMAGE_LOAD:
619 case ACCESS_TYPE_IMAGE_STORE:
620 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
621 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
622 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2u);
623 break;
624 case ACCESS_TYPE_IMAGE_ATOMICS:
625 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
626 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
627 break;
628 default:
629 DE_FATAL("Impossible");
630 break;
631 }
632
633 descriptorSetLayout = layoutBuilder.build(vk, device);
634 descriptorPool = poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
635 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
636 }
637
638 // Create pipeline layout
639 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
640
641 // Create sampler and image views
642 {
643 if (m_params.accessType == ACCESS_TYPE_SAMPLING || m_params.accessType == ACCESS_TYPE_TEXEL_FETCH)
644 {
645 const tcu::TextureFormat texFormat = mapVkFormat(m_params.imageFormat);
646 const vk::VkSamplerCreateInfo samplerParams = vk::mapSampler(refSampler, texFormat);
647
648 sampler = createSampler(vk, device, &samplerParams);
649 }
650
651 imageViewSrc = createImageView(ctx, **imageSrc, m_params.imageFormat);
652
653 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
654 imageViewDst = createImageView(ctx, **imageDst, m_params.imageFormat);
655 }
656
657 // Update descriptor set information
658 {
659 vk::DescriptorSetUpdateBuilder updateBuilder;
660
661 switch (m_params.accessType)
662 {
663 case ACCESS_TYPE_SAMPLING:
664 case ACCESS_TYPE_TEXEL_FETCH:
665 {
666 vk::VkDescriptorImageInfo descStorageImgDst = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
667 vk::VkDescriptorImageInfo descSampledImgSrc = makeDescriptorImageInfo(*sampler, *imageViewSrc, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
668
669 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
670 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descSampledImgSrc);
671 break;
672 }
673 case ACCESS_TYPE_IMAGE_LOAD:
674 case ACCESS_TYPE_IMAGE_STORE:
675 {
676 vk::VkDescriptorImageInfo descStorageImgDst = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
677 vk::VkDescriptorImageInfo descStorageImgSrc = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
678
679 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
680 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgSrc);
681 break;
682 }
683 case ACCESS_TYPE_IMAGE_ATOMICS:
684 {
685 vk::VkDescriptorImageInfo descStorageImg = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
686
687 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
688 break;
689 }
690 default:
691 DE_FATAL("Impossible");
692 break;
693 }
694
695 updateBuilder.update(vk, device);
696 }
697
698 // Create validation compute commands & submit
699 {
700 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device));
701 vk::Unique<vk::VkPipeline> pipeline (makeComputePipeline(vk, device, *pipelineLayout, m_params.flags, *computeShader, (vk::VkPipelineShaderStageCreateFlags)0u));
702 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
703
704 beginCommandBuffer(vk, *cmdBuffer);
705
706 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
707 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
708 vk.cmdDispatch(*cmdBuffer, (deUint32)IMAGE_WIDTH, (deUint32)IMAGE_HEIGHT, 1u);
709 endCommandBuffer(vk, *cmdBuffer);
710
711 VK_CHECK(queueSubmit(ctx, m_params.protectionMode, queue, *cmdBuffer, *fence, ~0ull));
712 }
713
714 // Calculate reference image
715 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
716 calculateAtomicRef(*texture2D);
717
718 // Validate result
719 {
720 const vk::VkImage resultImage = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? **imageSrc : **imageDst;
721
722 return validateResult(resultImage, vk::VK_IMAGE_LAYOUT_GENERAL, *texture2D, refSampler);
723 }
724 }
725
executeFragmentTest(void)726 tcu::TestStatus ImageAccessTestInstance::executeFragmentTest (void)
727 {
728 ProtectedContext& ctx (m_protectedContext);
729 const vk::DeviceInterface& vk = ctx.getDeviceInterface();
730 const vk::VkDevice device = ctx.getDevice();
731 const vk::VkQueue queue = ctx.getQueue();
732 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex();
733
734 // Create output image
735 de::MovePtr<vk::ImageWithMemory> colorImage (createImage2D(ctx, m_params.protectionMode, queueFamilyIndex,
736 RENDER_WIDTH, RENDER_HEIGHT,
737 m_params.imageFormat,
738 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_SAMPLED_BIT));
739 vk::Unique<vk::VkImageView> colorImageView (createImageView(ctx, **colorImage, m_params.imageFormat));
740
741 vk::Unique<vk::VkRenderPass> renderPass (createRenderPass(ctx, m_params.imageFormat));
742 vk::Unique<vk::VkFramebuffer> framebuffer (createFramebuffer(ctx, RENDER_WIDTH, RENDER_HEIGHT, *renderPass, *colorImageView));
743
744 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, m_params.protectionMode, queueFamilyIndex));
745 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
746
747 de::MovePtr<tcu::Texture2D> texture2D = createTestTexture2D();
748 const tcu::Sampler refSampler = tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
749 tcu::Sampler::NEAREST, tcu::Sampler::NEAREST,
750 00.0f /* LOD threshold */, true /* normalized coords */, tcu::Sampler::COMPAREMODE_NONE,
751 0 /* cmp channel */, tcu::Vec4(0.0f) /* border color */, true /* seamless cube map */);
752
753 vk::Move<vk::VkShaderModule> vertexShader = createShaderModule(vk, device, ctx.getBinaryCollection().get("vert"), 0);
754 vk::Move<vk::VkShaderModule> fragmentShader = createShaderModule(vk, device, ctx.getBinaryCollection().get("frag"), 0);
755
756 de::MovePtr<vk::ImageWithMemory> imageSrc;
757 de::MovePtr<vk::ImageWithMemory> imageDst;
758 vk::Move<vk::VkSampler> sampler;
759 vk::Move<vk::VkImageView> imageViewSrc;
760 vk::Move<vk::VkImageView> imageViewDst;
761
762 vk::Move<vk::VkPipeline> graphicsPipeline;
763 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
764 vk::Move<vk::VkDescriptorPool> descriptorPool;
765 vk::Move<vk::VkDescriptorSet> descriptorSet;
766
767 // Create src and dst images
768 {
769 vk::VkImageUsageFlags imageUsageFlags = vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
770 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
771 vk::VK_IMAGE_USAGE_SAMPLED_BIT;
772
773 switch (m_params.accessType)
774 {
775 case ACCESS_TYPE_IMAGE_LOAD:
776 case ACCESS_TYPE_IMAGE_STORE:
777 case ACCESS_TYPE_IMAGE_ATOMICS:
778 imageUsageFlags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
779 break;
780 default:
781 break;
782 }
783
784 imageSrc = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex,
785 IMAGE_WIDTH, IMAGE_HEIGHT,
786 m_params.imageFormat,
787 imageUsageFlags);
788
789 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
790 {
791 imageDst = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex,
792 IMAGE_WIDTH, IMAGE_HEIGHT,
793 m_params.imageFormat,
794 imageUsageFlags);
795 }
796 }
797
798 // Select vkImageLayout based upon accessType
799 vk::VkImageLayout imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
800
801 switch (m_params.accessType)
802 {
803 case ACCESS_TYPE_SAMPLING:
804 case ACCESS_TYPE_TEXEL_FETCH:
805 {
806 imageLayout = vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
807 break;
808 }
809 case ACCESS_TYPE_IMAGE_LOAD:
810 case ACCESS_TYPE_IMAGE_STORE:
811 case ACCESS_TYPE_IMAGE_ATOMICS:
812 {
813 imageLayout = vk::VK_IMAGE_LAYOUT_GENERAL;
814 break;
815 }
816 default:
817 DE_FATAL("Impossible");
818 break;
819 }
820
821 // Upload source image
822 {
823 de::MovePtr<vk::ImageWithMemory> unprotectedImage = createImage2D(ctx, PROTECTION_DISABLED, queueFamilyIndex,
824 IMAGE_WIDTH, IMAGE_HEIGHT,
825 m_params.imageFormat,
826 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT);
827
828 // Upload data to an unprotected image
829 uploadImage(m_protectedContext, **unprotectedImage, *texture2D);
830
831 // Copy unprotected image to protected image
832 copyToProtectedImage(m_protectedContext, **unprotectedImage, **imageSrc, imageLayout, IMAGE_WIDTH, IMAGE_HEIGHT, m_params.protectionMode);
833 }
834
835 // Clear dst image
836 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE && m_params.protectionMode == PROTECTION_ENABLED)
837 clearImage(m_protectedContext, **imageDst);
838
839 // Create descriptors
840 {
841 vk::DescriptorSetLayoutBuilder layoutBuilder;
842 vk::DescriptorPoolBuilder poolBuilder;
843
844 switch (m_params.accessType)
845 {
846 case ACCESS_TYPE_SAMPLING:
847 case ACCESS_TYPE_TEXEL_FETCH:
848 layoutBuilder.addSingleSamplerBinding(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, vk::VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL);
849 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u);
850 break;
851 case ACCESS_TYPE_IMAGE_LOAD:
852 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
853 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
854 break;
855 case ACCESS_TYPE_IMAGE_STORE:
856 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
857 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
858 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2u);
859 break;
860 case ACCESS_TYPE_IMAGE_ATOMICS:
861 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
862 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
863 break;
864 default:
865 DE_FATAL("Impossible");
866 break;
867 }
868
869 descriptorSetLayout = layoutBuilder.build(vk, device);
870 descriptorPool = poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
871 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
872 }
873
874 // Create pipeline layout
875 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
876
877 // Create sampler and image views
878 {
879 if (m_params.accessType == ACCESS_TYPE_SAMPLING || m_params.accessType == ACCESS_TYPE_TEXEL_FETCH)
880 {
881 const tcu::TextureFormat texFormat = mapVkFormat(m_params.imageFormat);
882 const vk::VkSamplerCreateInfo samplerParams = vk::mapSampler(refSampler, texFormat);
883
884 sampler = createSampler(vk, device, &samplerParams);
885 }
886
887 imageViewSrc = createImageView(ctx, **imageSrc, m_params.imageFormat);
888
889 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
890 imageViewDst = createImageView(ctx, **imageDst, m_params.imageFormat);
891 }
892
893 // Update descriptor set information
894 {
895 vk::DescriptorSetUpdateBuilder updateBuilder;
896
897 switch (m_params.accessType)
898 {
899 case ACCESS_TYPE_SAMPLING:
900 case ACCESS_TYPE_TEXEL_FETCH:
901 {
902 vk::VkDescriptorImageInfo descSampledImg = makeDescriptorImageInfo(*sampler, *imageViewSrc, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
903
904 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descSampledImg);
905 break;
906 }
907 case ACCESS_TYPE_IMAGE_LOAD:
908 {
909 vk::VkDescriptorImageInfo descStorageImg = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
910
911 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
912 break;
913 }
914 case ACCESS_TYPE_IMAGE_STORE:
915 {
916 vk::VkDescriptorImageInfo descStorageImgSrc = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
917 vk::VkDescriptorImageInfo descStorageImgDst = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
918
919 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgSrc);
920 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
921 break;
922 }
923 case ACCESS_TYPE_IMAGE_ATOMICS:
924 {
925 vk::VkDescriptorImageInfo descStorageImg = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
926
927 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
928 break;
929 }
930 default:
931 DE_FATAL("Impossible");
932 break;
933 }
934
935 updateBuilder.update(vk, device);
936 }
937
938 // Create vertex buffer and vertex input descriptors
939 VertexBindings vertexBindings;
940 VertexAttribs vertexAttribs;
941 de::MovePtr<vk::BufferWithMemory> vertexBuffer;
942 {
943 const float positions[] =
944 {
945 -1.0f, -1.0f,
946 -1.0f, +1.0f,
947 +1.0f, -1.0f,
948 +1.0f, +1.0f,
949 };
950
951 std::vector<float> texCoord;
952
953 {
954 const tcu::Vec2 minCoords (0.0f, 0.0f);
955 const tcu::Vec2 maxCoords = m_params.accessType == ACCESS_TYPE_SAMPLING ?
956 tcu::Vec2(1.0f, 1.0f) :
957 tcu::Vec2((float)IMAGE_WIDTH - 0.1f, (float)IMAGE_HEIGHT - 0.1f);
958
959 glu::TextureTestUtil::computeQuadTexCoord2D(texCoord, minCoords, maxCoords);
960 }
961
962 const deUint32 vertexPositionStrideSize = (deUint32)sizeof(tcu::Vec2);
963 const deUint32 vertexTextureStrideSize = (deUint32)sizeof(tcu::Vec2);
964 const deUint32 positionDataSize = 4 * vertexPositionStrideSize;
965 const deUint32 textureCoordDataSize = 4 * vertexTextureStrideSize;
966 const deUint32 vertexBufferSize = positionDataSize + textureCoordDataSize;
967
968 {
969 const vk::VkVertexInputBindingDescription vertexInputBindingDescriptions[2] =
970 {
971 {
972 0u, // deUint32 binding;
973 vertexPositionStrideSize, // deUint32 strideInBytes;
974 vk::VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
975 },
976 {
977 1u, // deUint32 binding;
978 vertexTextureStrideSize, // deUint32 strideInBytes;
979 vk::VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
980 }
981 };
982 vertexBindings.push_back(vertexInputBindingDescriptions[0]);
983 vertexBindings.push_back(vertexInputBindingDescriptions[1]);
984
985 const vk::VkVertexInputAttributeDescription vertexInputAttributeDescriptions[2] =
986 {
987 {
988 0u, // deUint32 location;
989 0u, // deUint32 binding;
990 vk::VK_FORMAT_R32G32_SFLOAT, // VkFormat format;
991 0u // deUint32 offsetInBytes;
992 },
993 {
994 1u, // deUint32 location;
995 1u, // deUint32 binding;
996 vk::VK_FORMAT_R32G32_SFLOAT, // VkFormat format;
997 positionDataSize // deUint32 offsetInBytes;
998 }
999 };
1000 vertexAttribs.push_back(vertexInputAttributeDescriptions[0]);
1001 vertexAttribs.push_back(vertexInputAttributeDescriptions[1]);
1002 }
1003
1004 vertexBuffer = makeBuffer(ctx,
1005 PROTECTION_DISABLED,
1006 queueFamilyIndex,
1007 vertexBufferSize,
1008 vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
1009 vk::MemoryRequirement::HostVisible);
1010
1011 deMemcpy(vertexBuffer->getAllocation().getHostPtr(), positions, positionDataSize);
1012 deMemcpy(reinterpret_cast<deUint8*>(vertexBuffer->getAllocation().getHostPtr()) + positionDataSize, texCoord.data(), textureCoordDataSize);
1013 vk::flushAlloc(vk, device, vertexBuffer->getAllocation());
1014 }
1015
1016 // Create pipeline
1017 graphicsPipeline = makeGraphicsPipeline(vk,
1018 device,
1019 *pipelineLayout,
1020 *renderPass,
1021 *vertexShader,
1022 *fragmentShader,
1023 vertexBindings,
1024 vertexAttribs,
1025 tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT),
1026 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
1027 m_params.flags);
1028
1029 // Begin cmd buffer
1030 beginCommandBuffer(vk, *cmdBuffer);
1031
1032 // Start image barrier
1033 {
1034 const vk::VkImageMemoryBarrier startImgBarrier =
1035 {
1036 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1037 DE_NULL, // pNext
1038 0, // srcAccessMask
1039 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask
1040 vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
1041 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
1042 queueFamilyIndex, // srcQueueFamilyIndex
1043 queueFamilyIndex, // dstQueueFamilyIndex
1044 **colorImage, // image
1045 {
1046 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1047 0u, // baseMipLevel
1048 1u, // mipLevels
1049 0u, // baseArraySlice
1050 1u, // subresourceRange
1051 }
1052 };
1053
1054 vk.cmdPipelineBarrier(*cmdBuffer,
1055 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // srcStageMask
1056 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // dstStageMask
1057 (vk::VkDependencyFlags)0,
1058 0, (const vk::VkMemoryBarrier*)DE_NULL,
1059 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1060 1, &startImgBarrier);
1061 }
1062
1063 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, vk::makeRect2D(0, 0, RENDER_WIDTH, RENDER_HEIGHT), tcu::Vec4(0.0f));
1064
1065 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
1066 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
1067
1068 {
1069 const vk::VkDeviceSize vertexBufferOffset = 0;
1070
1071 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer->get(), &vertexBufferOffset);
1072 vk.cmdBindVertexBuffers(*cmdBuffer, 1u, 1u, &vertexBuffer->get(), &vertexBufferOffset);
1073 }
1074
1075 vk.cmdDraw(*cmdBuffer, /*vertexCount*/ 4u, 1u, 0u, 1u);
1076
1077 endRenderPass(vk, *cmdBuffer);
1078
1079 {
1080 const vk::VkImageMemoryBarrier endImgBarrier =
1081 {
1082 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1083 DE_NULL, // pNext
1084 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask
1085 vk::VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
1086 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
1087 imageLayout, // newLayout
1088 queueFamilyIndex, // srcQueueFamilyIndex
1089 queueFamilyIndex, // dstQueueFamilyIndex
1090 **colorImage, // image
1091 {
1092 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1093 0u, // baseMipLevel
1094 1u, // mipLevels
1095 0u, // baseArraySlice
1096 1u, // subresourceRange
1097 }
1098 };
1099 vk.cmdPipelineBarrier(*cmdBuffer,
1100 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // srcStageMask
1101 vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, // dstStageMask
1102 (vk::VkDependencyFlags)0,
1103 0, (const vk::VkMemoryBarrier*)DE_NULL,
1104 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1105 1, &endImgBarrier);
1106 }
1107
1108 endCommandBuffer(vk, *cmdBuffer);
1109
1110 // Submit command buffer
1111 {
1112 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device));
1113 VK_CHECK(queueSubmit(ctx, m_params.protectionMode, queue, *cmdBuffer, *fence, ~0ull));
1114 }
1115
1116 // Calculate reference image
1117 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
1118 calculateAtomicRef(*texture2D);
1119
1120 // Validate result
1121 {
1122 const vk::VkImage resultImage = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? **imageSrc :
1123 m_params.accessType == ACCESS_TYPE_IMAGE_STORE ? **imageDst : **colorImage;
1124
1125 return validateResult(resultImage, imageLayout, *texture2D, refSampler);
1126 }
1127 }
1128
calculateAtomicRef(tcu::Texture2D & texture2D)1129 void ImageAccessTestInstance::calculateAtomicRef (tcu::Texture2D& texture2D)
1130 {
1131 DE_ASSERT(m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS);
1132
1133 const tcu::PixelBufferAccess& reference = texture2D.getLevel(0);
1134
1135 for (int x = 0; x < reference.getWidth(); ++x)
1136 for (int y = 0; y < reference.getHeight(); ++y)
1137 {
1138 const deInt32 oldX = reference.getPixelInt(x, y).x();
1139 const deInt32 atomicArg = x*x + y*y;
1140 const deInt32 newX = computeBinaryAtomicOperationResult(m_params.atomicOperation, oldX, atomicArg);
1141
1142 reference.setPixel(tcu::IVec4(newX, 0, 0, 0), x, y);
1143 }
1144 }
1145
validateResult(vk::VkImage image,vk::VkImageLayout imageLayout,const tcu::Texture2D & texture2D,const tcu::Sampler & refSampler)1146 tcu::TestStatus ImageAccessTestInstance::validateResult (vk::VkImage image, vk::VkImageLayout imageLayout, const tcu::Texture2D& texture2D, const tcu::Sampler& refSampler)
1147 {
1148 de::Random rnd (getSeedValue(m_params));
1149 ValidationData refData;
1150
1151 for (int ndx = 0; ndx < 4; ++ndx)
1152 {
1153 const float lod = 0.0f;
1154 const float cx = rnd.getFloat(0.0f, 1.0f);
1155 const float cy = rnd.getFloat(0.0f, 1.0f);
1156
1157 refData.coords[ndx] = tcu::Vec4(cx, cy, 0.0f, 0.0f);
1158 refData.values[ndx] = texture2D.sample(refSampler, cx, cy, lod);
1159 }
1160
1161 if (!m_validator.validateImage(m_protectedContext, refData, image, m_params.imageFormat, imageLayout))
1162 return tcu::TestStatus::fail("Something went really wrong");
1163 else
1164 return tcu::TestStatus::pass("Everything went OK");
1165 }
1166
1167 } // anonymous
1168
createShaderImageAccessTests(tcu::TestContext & testCtx)1169 tcu::TestCaseGroup* createShaderImageAccessTests (tcu::TestContext& testCtx)
1170 {
1171 de::MovePtr<tcu::TestCaseGroup> accessGroup (new tcu::TestCaseGroup(testCtx, "access", "Shader Image Access Tests"));
1172
1173 static const struct
1174 {
1175 glu::ShaderType type;
1176 const char* name;
1177 const char* desc;
1178 } shaderTypes[] =
1179 {
1180 { glu::SHADERTYPE_FRAGMENT, "fragment", "Image access from fragment shader" },
1181 { glu::SHADERTYPE_COMPUTE, "compute", "Image access from compute shader" },
1182 };
1183
1184 static const struct
1185 {
1186 AccessType type;
1187 const char* name;
1188 const char* desc;
1189 } accessTypes[] =
1190 {
1191 { ACCESS_TYPE_SAMPLING, "sampling", "Sampling test" },
1192 { ACCESS_TYPE_TEXEL_FETCH, "texelfetch", "Texel fetch test" },
1193 { ACCESS_TYPE_IMAGE_LOAD, "imageload", "Image load test" },
1194 { ACCESS_TYPE_IMAGE_STORE, "imagestore", "Image store test" },
1195 { ACCESS_TYPE_IMAGE_ATOMICS, "imageatomics", "Image atomics test" },
1196 };
1197
1198 static const struct
1199 {
1200 vk::VkFormat format;
1201 const char* name;
1202 } formats[] =
1203 {
1204 { vk::VK_FORMAT_R8G8B8A8_UNORM, "rgba8" },
1205 { vk::VK_FORMAT_R32_SINT, "r32i" },
1206 { vk::VK_FORMAT_R32_UINT, "r32ui" },
1207 };
1208
1209 static const struct
1210 {
1211 bool pipelineProtectedAccess;
1212 const char* name;
1213 } protectedAccess[] =
1214 {
1215 { false, "default"},
1216 #ifndef CTS_USES_VULKANSC
1217 { true, "protected_access"},
1218 #endif
1219 };
1220 static const struct
1221 {
1222 vk::VkPipelineCreateFlags flags;
1223 const char* name;
1224 } flags[] =
1225 {
1226 { (vk::VkPipelineCreateFlagBits)0u, "none"},
1227 #ifndef CTS_USES_VULKANSC
1228 { vk::VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT, "protected_access_only"},
1229 { vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT, "no_protected_access"},
1230 #endif
1231 };
1232
1233 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderTypeNdx)
1234 {
1235 const glu::ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
1236 de::MovePtr<tcu::TestCaseGroup> shaderGroup(new tcu::TestCaseGroup(testCtx, shaderTypes[shaderTypeNdx].name, shaderTypes[shaderTypeNdx].desc));
1237
1238 for (int protectedAccessNdx = 0; protectedAccessNdx < DE_LENGTH_OF_ARRAY(protectedAccess); ++protectedAccessNdx) {
1239 de::MovePtr<tcu::TestCaseGroup> protectedAccessGroup(new tcu::TestCaseGroup(testCtx, protectedAccess[protectedAccessNdx].name, ""));
1240 for (int flagsNdx = 0; flagsNdx < DE_LENGTH_OF_ARRAY(flags); ++flagsNdx) {
1241 de::MovePtr<tcu::TestCaseGroup> flagsGroup(new tcu::TestCaseGroup(testCtx, flags[flagsNdx].name, ""));
1242 if (!protectedAccess[protectedAccessNdx].pipelineProtectedAccess && flags[flagsNdx].flags != 0u) continue;
1243 for (int accessNdx = 0; accessNdx < DE_LENGTH_OF_ARRAY(accessTypes); ++accessNdx)
1244 {
1245 const AccessType accessType = accessTypes[accessNdx].type;
1246
1247 if (shaderType == glu::SHADERTYPE_COMPUTE && accessType == ACCESS_TYPE_IMAGE_STORE) // \note already tested in other tests
1248 continue;
1249
1250 de::MovePtr<tcu::TestCaseGroup> accessTypeGroup(new tcu::TestCaseGroup(testCtx, accessTypes[accessNdx].name, accessTypes[accessNdx].desc));
1251
1252 if (accessType == ACCESS_TYPE_IMAGE_ATOMICS)
1253 {
1254 for (deUint32 atomicOpI = 0; atomicOpI < ATOMIC_OPERATION_LAST; ++atomicOpI)
1255 {
1256 const AtomicOperation atomicOp = (AtomicOperation)atomicOpI;
1257 de::MovePtr<tcu::TestCaseGroup> operationGroup(new tcu::TestCaseGroup(testCtx, getAtomicOperationCaseName(atomicOp).c_str(), ""));
1258
1259 for (deUint32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
1260 {
1261 const vk::VkFormat format = formats[formatNdx].format;
1262
1263 if (format != vk::VK_FORMAT_R32_UINT && format != vk::VK_FORMAT_R32_SINT)
1264 continue;
1265
1266 operationGroup->addChild(new ImageAccessTestCase(testCtx, formats[formatNdx].name, "", Params(shaderType, accessType, format, atomicOp, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].flags)));
1267 }
1268
1269 accessTypeGroup->addChild(operationGroup.release());
1270 }
1271 }
1272 else
1273 {
1274 for (deUint32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
1275 {
1276 const vk::VkFormat format = formats[formatNdx].format;
1277
1278 accessTypeGroup->addChild(new ImageAccessTestCase(testCtx, formats[formatNdx].name, "", Params(shaderType, accessType, format, ATOMIC_OPERATION_LAST, protectedAccess[protectedAccessNdx].pipelineProtectedAccess, flags[flagsNdx].flags)));
1279 }
1280 }
1281
1282 flagsGroup->addChild(accessTypeGroup.release());
1283 }
1284 protectedAccessGroup->addChild(flagsGroup.release());
1285 }
1286 shaderGroup->addChild(protectedAccessGroup.release());
1287 }
1288
1289 accessGroup->addChild(shaderGroup.release());
1290 }
1291
1292
1293 return accessGroup.release();
1294 }
1295
1296 } // ProtectedMem
1297 } // vkt
1298