1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2017 Samsung Electronics Co., Ltd.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Protected memory image access tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktProtectedMemShaderImageAccessTests.hpp"
26
27 #include "vktProtectedMemContext.hpp"
28 #include "vktProtectedMemUtils.hpp"
29 #include "vktProtectedMemImageValidator.hpp"
30 #include "vktTestCase.hpp"
31 #include "vktTestGroupUtil.hpp"
32
33 #include "vkPrograms.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkBuilderUtil.hpp"
36 #include "vkImageUtil.hpp"
37 #include "vkCmdUtil.hpp"
38 #include "vkObjUtil.hpp"
39
40 #include "tcuTestLog.hpp"
41 #include "tcuVector.hpp"
42 #include "tcuTextureUtil.hpp"
43 #include "tcuStringTemplate.hpp"
44
45 #include "gluTextureTestUtil.hpp"
46
47 #include "deRandom.hpp"
48
49 namespace vkt
50 {
51 namespace ProtectedMem
52 {
53
54 namespace
55 {
56
57 enum
58 {
59 RENDER_WIDTH = 128,
60 RENDER_HEIGHT = 128,
61 IMAGE_WIDTH = 128,
62 IMAGE_HEIGHT = 128,
63 };
64
65 enum AccessType
66 {
67 ACCESS_TYPE_SAMPLING = 0,
68 ACCESS_TYPE_TEXEL_FETCH,
69 ACCESS_TYPE_IMAGE_LOAD,
70 ACCESS_TYPE_IMAGE_STORE,
71 ACCESS_TYPE_IMAGE_ATOMICS,
72
73 ACCESS_TYPE_LAST
74 };
75
76 enum AtomicOperation
77 {
78 ATOMIC_OPERATION_ADD = 0,
79 ATOMIC_OPERATION_MIN,
80 ATOMIC_OPERATION_MAX,
81 ATOMIC_OPERATION_AND,
82 ATOMIC_OPERATION_OR,
83 ATOMIC_OPERATION_XOR,
84 ATOMIC_OPERATION_EXCHANGE,
85
86 ATOMIC_OPERATION_LAST
87 };
88
89 struct Params
90 {
91 glu::ShaderType shaderType;
92 AccessType accessType;
93 vk::VkFormat imageFormat;
94 AtomicOperation atomicOperation;
95
Paramsvkt::ProtectedMem::__anonb34f9a0c0111::Params96 Params (void)
97 : shaderType (glu::SHADERTYPE_LAST)
98 , accessType (ACCESS_TYPE_LAST)
99 , imageFormat (vk::VK_FORMAT_UNDEFINED)
100 , atomicOperation (ATOMIC_OPERATION_LAST)
101 {}
102
Paramsvkt::ProtectedMem::__anonb34f9a0c0111::Params103 Params (const glu::ShaderType shaderType_,
104 const AccessType accessType_,
105 const vk::VkFormat imageFormat_,
106 const AtomicOperation atomicOperation_ = ATOMIC_OPERATION_LAST)
107 : shaderType (shaderType_)
108 , accessType (accessType_)
109 , imageFormat (imageFormat_)
110 , atomicOperation (atomicOperation_)
111 {}
112 };
113
getSeedValue(const Params & params)114 static deUint32 getSeedValue (const Params& params)
115 {
116 return deInt32Hash(params.shaderType) ^ deInt32Hash(params.accessType) ^ deInt32Hash(params.imageFormat) ^ deInt32Hash(params.atomicOperation);
117 }
118
getAtomicOperationCaseName(const AtomicOperation op)119 static std::string getAtomicOperationCaseName (const AtomicOperation op)
120 {
121 switch (op)
122 {
123 case ATOMIC_OPERATION_ADD: return "add";
124 case ATOMIC_OPERATION_MIN: return "min";
125 case ATOMIC_OPERATION_MAX: return "max";
126 case ATOMIC_OPERATION_AND: return "and";
127 case ATOMIC_OPERATION_OR: return "or";
128 case ATOMIC_OPERATION_XOR: return "xor";
129 case ATOMIC_OPERATION_EXCHANGE: return "exchange";
130 default:
131 DE_FATAL("Impossible");
132 return "";
133 }
134 }
135
getAtomicOperationShaderFuncName(const AtomicOperation op)136 static std::string getAtomicOperationShaderFuncName (const AtomicOperation op)
137 {
138 switch (op)
139 {
140 case ATOMIC_OPERATION_ADD: return "imageAtomicAdd";
141 case ATOMIC_OPERATION_MIN: return "imageAtomicMin";
142 case ATOMIC_OPERATION_MAX: return "imageAtomicMax";
143 case ATOMIC_OPERATION_AND: return "imageAtomicAnd";
144 case ATOMIC_OPERATION_OR: return "imageAtomicOr";
145 case ATOMIC_OPERATION_XOR: return "imageAtomicXor";
146 case ATOMIC_OPERATION_EXCHANGE: return "imageAtomicExchange";
147 default:
148 DE_FATAL("Impossible");
149 return "";
150 }
151 }
152
153 //! Computes the result of an atomic operation where "a" is the data operated on and "b" is the parameter to the atomic function.
computeBinaryAtomicOperationResult(const AtomicOperation op,const deInt32 a,const deInt32 b)154 static deInt32 computeBinaryAtomicOperationResult (const AtomicOperation op, const deInt32 a, const deInt32 b)
155 {
156 switch (op)
157 {
158 case ATOMIC_OPERATION_ADD: return a + b;
159 case ATOMIC_OPERATION_MIN: return de::min(a, b);
160 case ATOMIC_OPERATION_MAX: return de::max(a, b);
161 case ATOMIC_OPERATION_AND: return a & b;
162 case ATOMIC_OPERATION_OR: return a | b;
163 case ATOMIC_OPERATION_XOR: return a ^ b;
164 case ATOMIC_OPERATION_EXCHANGE: return b;
165 default:
166 DE_FATAL("Impossible");
167 return -1;
168 }
169 }
170
getShaderImageFormatQualifier(const tcu::TextureFormat & format)171 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
172 {
173 const char* orderPart;
174 const char* typePart;
175
176 switch (format.order)
177 {
178 case tcu::TextureFormat::R: orderPart = "r"; break;
179 case tcu::TextureFormat::RG: orderPart = "rg"; break;
180 case tcu::TextureFormat::RGB: orderPart = "rgb"; break;
181 case tcu::TextureFormat::RGBA: orderPart = "rgba"; break;
182
183 default:
184 DE_FATAL("Impossible");
185 orderPart = DE_NULL;
186 }
187
188 switch (format.type)
189 {
190 case tcu::TextureFormat::FLOAT: typePart = "32f"; break;
191 case tcu::TextureFormat::HALF_FLOAT: typePart = "16f"; break;
192
193 case tcu::TextureFormat::UNSIGNED_INT32: typePart = "32ui"; break;
194 case tcu::TextureFormat::UNSIGNED_INT16: typePart = "16ui"; break;
195 case tcu::TextureFormat::UNSIGNED_INT8: typePart = "8ui"; break;
196
197 case tcu::TextureFormat::SIGNED_INT32: typePart = "32i"; break;
198 case tcu::TextureFormat::SIGNED_INT16: typePart = "16i"; break;
199 case tcu::TextureFormat::SIGNED_INT8: typePart = "8i"; break;
200
201 case tcu::TextureFormat::UNORM_INT16: typePart = "16"; break;
202 case tcu::TextureFormat::UNORM_INT8: typePart = "8"; break;
203
204 case tcu::TextureFormat::SNORM_INT16: typePart = "16_snorm"; break;
205 case tcu::TextureFormat::SNORM_INT8: typePart = "8_snorm"; break;
206
207 default:
208 DE_FATAL("Impossible");
209 typePart = DE_NULL;
210 }
211
212 return std::string() + orderPart + typePart;
213 }
214
getShaderSamplerOrImageType(const tcu::TextureFormat & format,bool isSampler)215 static std::string getShaderSamplerOrImageType (const tcu::TextureFormat& format, bool isSampler)
216 {
217 const std::string formatPart = tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ? "u" :
218 tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER ? "i" : "";
219
220 return formatPart + (isSampler ? "sampler2D" : "image2D");
221 }
222
223 class ImageAccessTestInstance : public ProtectedTestInstance
224 {
225 public:
226 ImageAccessTestInstance (Context& ctx,
227 const ImageValidator& validator,
228 const Params& params);
229 virtual tcu::TestStatus iterate (void);
230
231 private:
232 de::MovePtr<tcu::Texture2D> createTestTexture2D (void);
233 void calculateAtomicRef (tcu::Texture2D& texture2D);
234 tcu::TestStatus validateResult (vk::VkImage image,
235 vk::VkImageLayout imageLayout,
236 const tcu::Texture2D& texture2D,
237 const tcu::Sampler& refSampler);
238
239 tcu::TestStatus executeFragmentTest (void);
240 tcu::TestStatus executeComputeTest (void);
241
242 const ImageValidator& m_validator;
243 const Params& m_params;
244 };
245
246 class ImageAccessTestCase : public TestCase
247 {
248 public:
ImageAccessTestCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const Params & params)249 ImageAccessTestCase (tcu::TestContext& testCtx,
250 const std::string& name,
251 const std::string& description,
252 const Params& params)
253 : TestCase (testCtx, name, description)
254 , m_validator (params.imageFormat)
255 , m_params (params)
256 {
257 }
258
~ImageAccessTestCase(void)259 virtual ~ImageAccessTestCase (void) {}
createInstance(Context & ctx) const260 virtual TestInstance* createInstance (Context& ctx) const
261 {
262 return new ImageAccessTestInstance(ctx, m_validator, m_params);
263 }
264 virtual void initPrograms (vk::SourceCollections& programCollection) const;
checkSupport(Context & context) const265 virtual void checkSupport (Context& context) const
266 {
267 checkProtectedQueueSupport(context);
268 }
269
270 private:
271 ImageValidator m_validator;
272 Params m_params;
273 };
274
initPrograms(vk::SourceCollections & programCollection) const275 void ImageAccessTestCase::initPrograms (vk::SourceCollections& programCollection) const
276 {
277 const tcu::TextureFormat& texFormat = mapVkFormat(m_params.imageFormat);
278 const std::string imageFormat = getShaderImageFormatQualifier(texFormat);
279 const std::string imageType = getShaderSamplerOrImageType(texFormat, false);
280 const std::string samplerType = getShaderSamplerOrImageType(texFormat, true);
281 const std::string colorVecType = isIntFormat(m_params.imageFormat) ? "ivec4" :
282 isUintFormat(m_params.imageFormat) ? "uvec4" : "vec4";
283
284 m_validator.initPrograms(programCollection);
285
286 if (m_params.shaderType == glu::SHADERTYPE_FRAGMENT)
287 {
288 {
289 // Vertex shader
290 const char* vert = "#version 450\n"
291 "layout(location = 0) in mediump vec2 a_position;\n"
292 "layout(location = 1) in mediump vec2 a_texCoord;\n"
293 "layout(location = 0) out mediump vec2 v_texCoord;\n"
294 "\n"
295 "void main() {\n"
296 " gl_Position = vec4(a_position, 0.0, 1.0);\n"
297 " v_texCoord = a_texCoord;\n"
298 "}\n";
299
300 programCollection.glslSources.add("vert") << glu::VertexSource(vert);
301 }
302
303 {
304 // Fragment shader
305 std::ostringstream frag;
306 frag << "#version 450\n"
307 "layout(location = 0) in mediump vec2 v_texCoord;\n"
308 "layout(location = 0) out highp ${COLOR_VEC_TYPE} o_color;\n";
309
310 switch (m_params.accessType)
311 {
312 case ACCESS_TYPE_SAMPLING:
313 case ACCESS_TYPE_TEXEL_FETCH:
314 frag << "layout(set = 0, binding = 0) uniform highp ${SAMPLER_TYPE} u_sampler;\n";
315 break;
316 case ACCESS_TYPE_IMAGE_LOAD:
317 frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_image;\n";
318 break;
319 case ACCESS_TYPE_IMAGE_STORE:
320 frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_imageA;\n";
321 frag << "layout(set = 0, binding = 1, ${IMAGE_FORMAT}) writeonly uniform highp ${IMAGE_TYPE} u_imageB;\n";
322 break;
323 case ACCESS_TYPE_IMAGE_ATOMICS:
324 frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) coherent uniform highp ${IMAGE_TYPE} u_image;\n";
325 break;
326 default:
327 DE_FATAL("Impossible");
328 break;
329 }
330
331 frag << "\n"
332 "void main() {\n";
333
334 switch (m_params.accessType)
335 {
336 case ACCESS_TYPE_SAMPLING:
337 frag << " o_color = texture(u_sampler, v_texCoord);\n";
338 break;
339 case ACCESS_TYPE_TEXEL_FETCH:
340 frag << " const highp int lod = 0;\n";
341 frag << " o_color = texelFetch(u_sampler, ivec2(v_texCoord), lod);\n";
342 break;
343 case ACCESS_TYPE_IMAGE_LOAD:
344 frag << " o_color = imageLoad(u_image, ivec2(v_texCoord));\n";
345 break;
346 case ACCESS_TYPE_IMAGE_STORE:
347 frag << " o_color = imageLoad(u_imageA, ivec2(v_texCoord));\n";
348 frag << " imageStore(u_imageB, ivec2(v_texCoord), o_color);\n";
349 break;
350 case ACCESS_TYPE_IMAGE_ATOMICS:
351 frag << " int gx = int(v_texCoord.x);\n";
352 frag << " int gy = int(v_texCoord.y);\n";
353 frag << " "
354 << getAtomicOperationShaderFuncName(m_params.atomicOperation)
355 << "(u_image, ivec2(v_texCoord), "
356 << (isUintFormat(m_params.imageFormat) ? "uint" : "int")
357 << "(gx*gx + gy*gy));\n";
358 frag << " o_color = imageLoad(u_image, ivec2(v_texCoord));\n";
359 break;
360 default:
361 DE_FATAL("Impossible");
362 break;
363 }
364
365 frag << "}\n";
366
367 std::map<std::string, std::string> fragParams;
368
369 fragParams["IMAGE_FORMAT"] = imageFormat;
370 fragParams["IMAGE_TYPE"] = imageType;
371 fragParams["SAMPLER_TYPE"] = samplerType;
372 fragParams["COLOR_VEC_TYPE"] = colorVecType;
373
374 programCollection.glslSources.add("frag") << glu::FragmentSource(tcu::StringTemplate(frag.str()).specialize(fragParams));
375 }
376 }
377 else if (m_params.shaderType == glu::SHADERTYPE_COMPUTE)
378 {
379 // Compute shader
380 std::ostringstream comp;
381 comp << "#version 450\n"
382 "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
383 "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) ${RES_MEM_QUALIFIER} uniform highp ${IMAGE_TYPE} u_resultImage;\n";
384
385 switch (m_params.accessType)
386 {
387 case ACCESS_TYPE_SAMPLING:
388 case ACCESS_TYPE_TEXEL_FETCH:
389 comp << "layout(set = 0, binding = 1) uniform highp ${SAMPLER_TYPE} u_sampler;\n";
390 break;
391 case ACCESS_TYPE_IMAGE_LOAD:
392 case ACCESS_TYPE_IMAGE_STORE:
393 comp << "layout(set = 0, binding = 1, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_srcImage;\n";
394 break;
395 case ACCESS_TYPE_IMAGE_ATOMICS:
396 break;
397 default:
398 DE_FATAL("Impossible");
399 break;
400 }
401
402 comp << "\n"
403 "void main() {\n"
404 " int gx = int(gl_GlobalInvocationID.x);\n"
405 " int gy = int(gl_GlobalInvocationID.y);\n";
406
407 switch (m_params.accessType)
408 {
409 case ACCESS_TYPE_SAMPLING:
410 comp << " ${COLOR_VEC_TYPE} color = texture(u_sampler, vec2(float(gx)/" << de::toString((int)IMAGE_WIDTH) << ", float(gy)/" << de::toString((int)IMAGE_HEIGHT) << "));\n";
411 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
412 break;
413 case ACCESS_TYPE_TEXEL_FETCH:
414 comp << " const highp int lod = 0;\n";
415 comp << " ${COLOR_VEC_TYPE} color = texelFetch(u_sampler, ivec2(gx, gy), lod);\n";
416 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
417 break;
418 case ACCESS_TYPE_IMAGE_LOAD:
419 case ACCESS_TYPE_IMAGE_STORE:
420 comp << " ${COLOR_VEC_TYPE} color = imageLoad(u_srcImage, ivec2(gx, gy));\n";
421 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
422 break;
423 case ACCESS_TYPE_IMAGE_ATOMICS:
424 comp << " "
425 << getAtomicOperationShaderFuncName(m_params.atomicOperation)
426 << "(u_resultImage, ivec2(gx, gy), "
427 << (isUintFormat(m_params.imageFormat) ? "uint" : "int")
428 << "(gx*gx + gy*gy));\n";
429 break;
430 default:
431 DE_FATAL("Impossible");
432 break;
433 }
434
435 comp << "}\n";
436
437 std::map<std::string, std::string> compParams;
438
439 compParams["IMAGE_FORMAT"] = imageFormat;
440 compParams["IMAGE_TYPE"] = imageType;
441 compParams["SAMPLER_TYPE"] = samplerType;
442 compParams["COLOR_VEC_TYPE"] = colorVecType;
443 compParams["RES_MEM_QUALIFIER"] = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? "coherent" : "writeonly";
444
445 programCollection.glslSources.add("comp") << glu::ComputeSource(tcu::StringTemplate(comp.str()).specialize(compParams));
446 }
447 else
448 DE_FATAL("Impossible");
449 }
450
ImageAccessTestInstance(Context & ctx,const ImageValidator & validator,const Params & params)451 ImageAccessTestInstance::ImageAccessTestInstance (Context& ctx,
452 const ImageValidator& validator,
453 const Params& params)
454 : ProtectedTestInstance (ctx)
455 , m_validator (validator)
456 , m_params (params)
457 {
458 }
459
createTestTexture2D(void)460 de::MovePtr<tcu::Texture2D> ImageAccessTestInstance::createTestTexture2D (void)
461 {
462 const tcu::TextureFormat texFmt = mapVkFormat(m_params.imageFormat);
463 const tcu::TextureFormatInfo fmtInfo = tcu::getTextureFormatInfo(texFmt);
464 de::MovePtr<tcu::Texture2D> texture2D (new tcu::Texture2D(texFmt, IMAGE_WIDTH, IMAGE_HEIGHT));
465
466 // \note generate only the base level
467 texture2D->allocLevel(0);
468
469 const tcu::PixelBufferAccess& level = texture2D->getLevel(0);
470
471 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
472 {
473 // use a smaller range than the format would allow
474 const float cMin = isIntFormat(m_params.imageFormat) ? -1000.0f : 0.0f;
475 const float cMax = +1000.0f;
476
477 fillWithRandomColorTiles(level, tcu::Vec4(cMin, 0, 0, 0), tcu::Vec4(cMax, 0, 0, 0), getSeedValue(m_params));
478 }
479 else
480 fillWithRandomColorTiles(level, fmtInfo.valueMin, fmtInfo.valueMax, getSeedValue(m_params));
481
482 return texture2D;
483 }
484
iterate(void)485 tcu::TestStatus ImageAccessTestInstance::iterate (void)
486 {
487 switch (m_params.shaderType)
488 {
489 case glu::SHADERTYPE_FRAGMENT: return executeFragmentTest();
490 case glu::SHADERTYPE_COMPUTE: return executeComputeTest();
491 default:
492 DE_FATAL("Impossible");
493 return tcu::TestStatus::fail("");
494 }
495 }
496
executeComputeTest(void)497 tcu::TestStatus ImageAccessTestInstance::executeComputeTest (void)
498 {
499 ProtectedContext& ctx (m_protectedContext);
500 const vk::DeviceInterface& vk = ctx.getDeviceInterface();
501 const vk::VkDevice device = ctx.getDevice();
502 const vk::VkQueue queue = ctx.getQueue();
503 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex();
504
505 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, PROTECTION_ENABLED, queueFamilyIndex));
506
507 de::MovePtr<tcu::Texture2D> texture2D = createTestTexture2D();
508 const tcu::Sampler refSampler = tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
509 tcu::Sampler::NEAREST, tcu::Sampler::NEAREST);
510
511 vk::Unique<vk::VkShaderModule> computeShader (vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("comp"), 0));
512
513 de::MovePtr<vk::ImageWithMemory> imageSrc;
514 de::MovePtr<vk::ImageWithMemory> imageDst;
515 vk::Move<vk::VkSampler> sampler;
516 vk::Move<vk::VkImageView> imageViewSrc;
517 vk::Move<vk::VkImageView> imageViewDst;
518
519 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
520 vk::Move<vk::VkDescriptorPool> descriptorPool;
521 vk::Move<vk::VkDescriptorSet> descriptorSet;
522
523 // Create src and dst images
524 {
525 vk::VkImageUsageFlags imageUsageFlags = vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
526 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
527 vk::VK_IMAGE_USAGE_SAMPLED_BIT |
528 vk::VK_IMAGE_USAGE_STORAGE_BIT;
529
530 imageSrc = createImage2D(ctx, PROTECTION_ENABLED, queueFamilyIndex,
531 IMAGE_WIDTH, IMAGE_HEIGHT,
532 m_params.imageFormat,
533 imageUsageFlags);
534
535 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
536 {
537 imageDst = createImage2D(ctx, PROTECTION_ENABLED, queueFamilyIndex,
538 IMAGE_WIDTH, IMAGE_HEIGHT,
539 m_params.imageFormat,
540 imageUsageFlags);
541 }
542 }
543
544 // Upload source image
545 {
546 de::MovePtr<vk::ImageWithMemory> unprotectedImage = createImage2D(ctx, PROTECTION_DISABLED, queueFamilyIndex,
547 IMAGE_WIDTH, IMAGE_HEIGHT,
548 m_params.imageFormat,
549 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT);
550
551 // Upload data to an unprotected image
552 uploadImage(m_protectedContext, **unprotectedImage, *texture2D);
553
554 // Select vkImageLayout based upon accessType
555 vk::VkImageLayout imageSrcLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
556
557 switch (m_params.accessType)
558 {
559 case ACCESS_TYPE_SAMPLING:
560 case ACCESS_TYPE_TEXEL_FETCH:
561 {
562 imageSrcLayout = vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
563 break;
564 }
565 case ACCESS_TYPE_IMAGE_LOAD:
566 case ACCESS_TYPE_IMAGE_STORE:
567 case ACCESS_TYPE_IMAGE_ATOMICS:
568 {
569 imageSrcLayout = vk::VK_IMAGE_LAYOUT_GENERAL;
570 break;
571 }
572 default:
573 DE_FATAL("Impossible");
574 break;
575 }
576
577 // Copy unprotected image to protected image
578 copyToProtectedImage(m_protectedContext, **unprotectedImage, **imageSrc, imageSrcLayout, IMAGE_WIDTH, IMAGE_HEIGHT);
579 }
580
581 // Clear dst image
582 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
583 clearImage(m_protectedContext, **imageDst);
584
585 // Create descriptors
586 {
587 vk::DescriptorSetLayoutBuilder layoutBuilder;
588 vk::DescriptorPoolBuilder poolBuilder;
589
590 switch (m_params.accessType)
591 {
592 case ACCESS_TYPE_SAMPLING:
593 case ACCESS_TYPE_TEXEL_FETCH:
594 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
595 layoutBuilder.addSingleSamplerBinding(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, vk::VK_SHADER_STAGE_COMPUTE_BIT, DE_NULL);
596 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
597 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u);
598 break;
599 case ACCESS_TYPE_IMAGE_LOAD:
600 case ACCESS_TYPE_IMAGE_STORE:
601 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
602 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
603 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2u);
604 break;
605 case ACCESS_TYPE_IMAGE_ATOMICS:
606 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
607 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
608 break;
609 default:
610 DE_FATAL("Impossible");
611 break;
612 }
613
614 descriptorSetLayout = layoutBuilder.build(vk, device);
615 descriptorPool = poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
616 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
617 }
618
619 // Create pipeline layout
620 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
621
622 // Create sampler and image views
623 {
624 if (m_params.accessType == ACCESS_TYPE_SAMPLING || m_params.accessType == ACCESS_TYPE_TEXEL_FETCH)
625 {
626 const tcu::TextureFormat texFormat = mapVkFormat(m_params.imageFormat);
627 const vk::VkSamplerCreateInfo samplerParams = vk::mapSampler(refSampler, texFormat);
628
629 sampler = createSampler(vk, device, &samplerParams);
630 }
631
632 imageViewSrc = createImageView(ctx, **imageSrc, m_params.imageFormat);
633
634 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
635 imageViewDst = createImageView(ctx, **imageDst, m_params.imageFormat);
636 }
637
638 // Update descriptor set information
639 {
640 vk::DescriptorSetUpdateBuilder updateBuilder;
641
642 switch (m_params.accessType)
643 {
644 case ACCESS_TYPE_SAMPLING:
645 case ACCESS_TYPE_TEXEL_FETCH:
646 {
647 vk::VkDescriptorImageInfo descStorageImgDst = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
648 vk::VkDescriptorImageInfo descSampledImgSrc = makeDescriptorImageInfo(*sampler, *imageViewSrc, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
649
650 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
651 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descSampledImgSrc);
652 break;
653 }
654 case ACCESS_TYPE_IMAGE_LOAD:
655 case ACCESS_TYPE_IMAGE_STORE:
656 {
657 vk::VkDescriptorImageInfo descStorageImgDst = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
658 vk::VkDescriptorImageInfo descStorageImgSrc = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
659
660 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
661 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgSrc);
662 break;
663 }
664 case ACCESS_TYPE_IMAGE_ATOMICS:
665 {
666 vk::VkDescriptorImageInfo descStorageImg = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
667
668 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
669 break;
670 }
671 default:
672 DE_FATAL("Impossible");
673 break;
674 }
675
676 updateBuilder.update(vk, device);
677 }
678
679 // Create validation compute commands & submit
680 {
681 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device));
682 vk::Unique<vk::VkPipeline> pipeline (makeComputePipeline(vk, device, *pipelineLayout, *computeShader, DE_NULL));
683 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
684
685 beginCommandBuffer(vk, *cmdBuffer);
686
687 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
688 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
689 vk.cmdDispatch(*cmdBuffer, (deUint32)IMAGE_WIDTH, (deUint32)IMAGE_HEIGHT, 1u);
690 endCommandBuffer(vk, *cmdBuffer);
691
692 VK_CHECK(queueSubmit(ctx, PROTECTION_ENABLED, queue, *cmdBuffer, *fence, ~0ull));
693 }
694
695 // Calculate reference image
696 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
697 calculateAtomicRef(*texture2D);
698
699 // Validate result
700 {
701 const vk::VkImage resultImage = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? **imageSrc : **imageDst;
702
703 return validateResult(resultImage, vk::VK_IMAGE_LAYOUT_GENERAL, *texture2D, refSampler);
704 }
705 }
706
executeFragmentTest(void)707 tcu::TestStatus ImageAccessTestInstance::executeFragmentTest (void)
708 {
709 ProtectedContext& ctx (m_protectedContext);
710 const vk::DeviceInterface& vk = ctx.getDeviceInterface();
711 const vk::VkDevice device = ctx.getDevice();
712 const vk::VkQueue queue = ctx.getQueue();
713 const deUint32 queueFamilyIndex = ctx.getQueueFamilyIndex();
714
715 // Create output image
716 de::MovePtr<vk::ImageWithMemory> colorImage (createImage2D(ctx, PROTECTION_ENABLED, queueFamilyIndex,
717 RENDER_WIDTH, RENDER_HEIGHT,
718 m_params.imageFormat,
719 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_SAMPLED_BIT));
720 vk::Unique<vk::VkImageView> colorImageView (createImageView(ctx, **colorImage, m_params.imageFormat));
721
722 vk::Unique<vk::VkRenderPass> renderPass (createRenderPass(ctx, m_params.imageFormat));
723 vk::Unique<vk::VkFramebuffer> framebuffer (createFramebuffer(ctx, RENDER_WIDTH, RENDER_HEIGHT, *renderPass, *colorImageView));
724
725 vk::Unique<vk::VkCommandPool> cmdPool (makeCommandPool(vk, device, PROTECTION_ENABLED, queueFamilyIndex));
726 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
727
728 de::MovePtr<tcu::Texture2D> texture2D = createTestTexture2D();
729 const tcu::Sampler refSampler = tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
730 tcu::Sampler::NEAREST, tcu::Sampler::NEAREST);
731
732 vk::Move<vk::VkShaderModule> vertexShader = createShaderModule(vk, device, ctx.getBinaryCollection().get("vert"), 0);
733 vk::Move<vk::VkShaderModule> fragmentShader = createShaderModule(vk, device, ctx.getBinaryCollection().get("frag"), 0);
734
735 de::MovePtr<vk::ImageWithMemory> imageSrc;
736 de::MovePtr<vk::ImageWithMemory> imageDst;
737 vk::Move<vk::VkSampler> sampler;
738 vk::Move<vk::VkImageView> imageViewSrc;
739 vk::Move<vk::VkImageView> imageViewDst;
740
741 vk::Move<vk::VkPipeline> graphicsPipeline;
742 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
743 vk::Move<vk::VkDescriptorPool> descriptorPool;
744 vk::Move<vk::VkDescriptorSet> descriptorSet;
745
746 // Create src and dst images
747 {
748 vk::VkImageUsageFlags imageUsageFlags = vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
749 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
750 vk::VK_IMAGE_USAGE_SAMPLED_BIT;
751
752 switch (m_params.accessType)
753 {
754 case ACCESS_TYPE_IMAGE_LOAD:
755 case ACCESS_TYPE_IMAGE_STORE:
756 case ACCESS_TYPE_IMAGE_ATOMICS:
757 imageUsageFlags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
758 break;
759 default:
760 break;
761 }
762
763 imageSrc = createImage2D(ctx, PROTECTION_ENABLED, queueFamilyIndex,
764 IMAGE_WIDTH, IMAGE_HEIGHT,
765 m_params.imageFormat,
766 imageUsageFlags);
767
768 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
769 {
770 imageDst = createImage2D(ctx, PROTECTION_ENABLED, queueFamilyIndex,
771 IMAGE_WIDTH, IMAGE_HEIGHT,
772 m_params.imageFormat,
773 imageUsageFlags);
774 }
775 }
776
777 // Select vkImageLayout based upon accessType
778 vk::VkImageLayout imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
779
780 switch (m_params.accessType)
781 {
782 case ACCESS_TYPE_SAMPLING:
783 case ACCESS_TYPE_TEXEL_FETCH:
784 {
785 imageLayout = vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
786 break;
787 }
788 case ACCESS_TYPE_IMAGE_LOAD:
789 case ACCESS_TYPE_IMAGE_STORE:
790 case ACCESS_TYPE_IMAGE_ATOMICS:
791 {
792 imageLayout = vk::VK_IMAGE_LAYOUT_GENERAL;
793 break;
794 }
795 default:
796 DE_FATAL("Impossible");
797 break;
798 }
799
800 // Upload source image
801 {
802 de::MovePtr<vk::ImageWithMemory> unprotectedImage = createImage2D(ctx, PROTECTION_DISABLED, queueFamilyIndex,
803 IMAGE_WIDTH, IMAGE_HEIGHT,
804 m_params.imageFormat,
805 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT);
806
807 // Upload data to an unprotected image
808 uploadImage(m_protectedContext, **unprotectedImage, *texture2D);
809
810 // Copy unprotected image to protected image
811 copyToProtectedImage(m_protectedContext, **unprotectedImage, **imageSrc, imageLayout, IMAGE_WIDTH, IMAGE_HEIGHT);
812 }
813
814 // Clear dst image
815 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
816 clearImage(m_protectedContext, **imageDst);
817
818 // Create descriptors
819 {
820 vk::DescriptorSetLayoutBuilder layoutBuilder;
821 vk::DescriptorPoolBuilder poolBuilder;
822
823 switch (m_params.accessType)
824 {
825 case ACCESS_TYPE_SAMPLING:
826 case ACCESS_TYPE_TEXEL_FETCH:
827 layoutBuilder.addSingleSamplerBinding(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, vk::VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL);
828 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u);
829 break;
830 case ACCESS_TYPE_IMAGE_LOAD:
831 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
832 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
833 break;
834 case ACCESS_TYPE_IMAGE_STORE:
835 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
836 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
837 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2u);
838 break;
839 case ACCESS_TYPE_IMAGE_ATOMICS:
840 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
841 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
842 break;
843 default:
844 DE_FATAL("Impossible");
845 break;
846 }
847
848 descriptorSetLayout = layoutBuilder.build(vk, device);
849 descriptorPool = poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
850 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
851 }
852
853 // Create pipeline layout
854 vk::Unique<vk::VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
855
856 // Create sampler and image views
857 {
858 if (m_params.accessType == ACCESS_TYPE_SAMPLING || m_params.accessType == ACCESS_TYPE_TEXEL_FETCH)
859 {
860 const tcu::TextureFormat texFormat = mapVkFormat(m_params.imageFormat);
861 const vk::VkSamplerCreateInfo samplerParams = vk::mapSampler(refSampler, texFormat);
862
863 sampler = createSampler(vk, device, &samplerParams);
864 }
865
866 imageViewSrc = createImageView(ctx, **imageSrc, m_params.imageFormat);
867
868 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
869 imageViewDst = createImageView(ctx, **imageDst, m_params.imageFormat);
870 }
871
872 // Update descriptor set information
873 {
874 vk::DescriptorSetUpdateBuilder updateBuilder;
875
876 switch (m_params.accessType)
877 {
878 case ACCESS_TYPE_SAMPLING:
879 case ACCESS_TYPE_TEXEL_FETCH:
880 {
881 vk::VkDescriptorImageInfo descSampledImg = makeDescriptorImageInfo(*sampler, *imageViewSrc, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
882
883 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descSampledImg);
884 break;
885 }
886 case ACCESS_TYPE_IMAGE_LOAD:
887 {
888 vk::VkDescriptorImageInfo descStorageImg = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
889
890 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
891 break;
892 }
893 case ACCESS_TYPE_IMAGE_STORE:
894 {
895 vk::VkDescriptorImageInfo descStorageImgSrc = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
896 vk::VkDescriptorImageInfo descStorageImgDst = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
897
898 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgSrc);
899 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
900 break;
901 }
902 case ACCESS_TYPE_IMAGE_ATOMICS:
903 {
904 vk::VkDescriptorImageInfo descStorageImg = makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
905
906 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
907 break;
908 }
909 default:
910 DE_FATAL("Impossible");
911 break;
912 }
913
914 updateBuilder.update(vk, device);
915 }
916
917 // Create vertex buffer and vertex input descriptors
918 VertexBindings vertexBindings;
919 VertexAttribs vertexAttribs;
920 de::MovePtr<vk::BufferWithMemory> vertexBuffer;
921 {
922 const float positions[] =
923 {
924 -1.0f, -1.0f,
925 -1.0f, +1.0f,
926 +1.0f, -1.0f,
927 +1.0f, +1.0f,
928 };
929
930 std::vector<float> texCoord;
931
932 {
933 const tcu::Vec2 minCoords (0.0f, 0.0f);
934 const tcu::Vec2 maxCoords = m_params.accessType == ACCESS_TYPE_SAMPLING ?
935 tcu::Vec2(1.0f, 1.0f) :
936 tcu::Vec2((float)IMAGE_WIDTH - 0.1f, (float)IMAGE_HEIGHT - 0.1f);
937
938 glu::TextureTestUtil::computeQuadTexCoord2D(texCoord, minCoords, maxCoords);
939 }
940
941 const deUint32 vertexPositionStrideSize = (deUint32)sizeof(tcu::Vec2);
942 const deUint32 vertexTextureStrideSize = (deUint32)sizeof(tcu::Vec2);
943 const deUint32 positionDataSize = 4 * vertexPositionStrideSize;
944 const deUint32 textureCoordDataSize = 4 * vertexTextureStrideSize;
945 const deUint32 vertexBufferSize = positionDataSize + textureCoordDataSize;
946
947 {
948 const vk::VkVertexInputBindingDescription vertexInputBindingDescriptions[2] =
949 {
950 {
951 0u, // deUint32 binding;
952 vertexPositionStrideSize, // deUint32 strideInBytes;
953 vk::VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
954 },
955 {
956 1u, // deUint32 binding;
957 vertexTextureStrideSize, // deUint32 strideInBytes;
958 vk::VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
959 }
960 };
961 vertexBindings.push_back(vertexInputBindingDescriptions[0]);
962 vertexBindings.push_back(vertexInputBindingDescriptions[1]);
963
964 const vk::VkVertexInputAttributeDescription vertexInputAttributeDescriptions[2] =
965 {
966 {
967 0u, // deUint32 location;
968 0u, // deUint32 binding;
969 vk::VK_FORMAT_R32G32_SFLOAT, // VkFormat format;
970 0u // deUint32 offsetInBytes;
971 },
972 {
973 1u, // deUint32 location;
974 1u, // deUint32 binding;
975 vk::VK_FORMAT_R32G32_SFLOAT, // VkFormat format;
976 positionDataSize // deUint32 offsetInBytes;
977 }
978 };
979 vertexAttribs.push_back(vertexInputAttributeDescriptions[0]);
980 vertexAttribs.push_back(vertexInputAttributeDescriptions[1]);
981 }
982
983 vertexBuffer = makeBuffer(ctx,
984 PROTECTION_DISABLED,
985 queueFamilyIndex,
986 vertexBufferSize,
987 vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
988 vk::MemoryRequirement::HostVisible);
989
990 deMemcpy(vertexBuffer->getAllocation().getHostPtr(), positions, positionDataSize);
991 deMemcpy(reinterpret_cast<deUint8*>(vertexBuffer->getAllocation().getHostPtr()) + positionDataSize, texCoord.data(), textureCoordDataSize);
992 vk::flushAlloc(vk, device, vertexBuffer->getAllocation());
993 }
994
995 // Create pipeline
996 graphicsPipeline = makeGraphicsPipeline(vk,
997 device,
998 *pipelineLayout,
999 *renderPass,
1000 *vertexShader,
1001 *fragmentShader,
1002 vertexBindings,
1003 vertexAttribs,
1004 tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT),
1005 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP);
1006
1007 // Begin cmd buffer
1008 beginCommandBuffer(vk, *cmdBuffer);
1009
1010 // Start image barrier
1011 {
1012 const vk::VkImageMemoryBarrier startImgBarrier =
1013 {
1014 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1015 DE_NULL, // pNext
1016 0, // srcAccessMask
1017 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask
1018 vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
1019 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
1020 queueFamilyIndex, // srcQueueFamilyIndex
1021 queueFamilyIndex, // dstQueueFamilyIndex
1022 **colorImage, // image
1023 {
1024 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1025 0u, // baseMipLevel
1026 1u, // mipLevels
1027 0u, // baseArraySlice
1028 1u, // subresourceRange
1029 }
1030 };
1031
1032 vk.cmdPipelineBarrier(*cmdBuffer,
1033 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // srcStageMask
1034 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // dstStageMask
1035 (vk::VkDependencyFlags)0,
1036 0, (const vk::VkMemoryBarrier*)DE_NULL,
1037 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1038 1, &startImgBarrier);
1039 }
1040
1041 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, vk::makeRect2D(0, 0, RENDER_WIDTH, RENDER_HEIGHT), tcu::Vec4(0.0f));
1042
1043 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
1044 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
1045
1046 {
1047 const vk::VkDeviceSize vertexBufferOffset = 0;
1048
1049 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer->get(), &vertexBufferOffset);
1050 vk.cmdBindVertexBuffers(*cmdBuffer, 1u, 1u, &vertexBuffer->get(), &vertexBufferOffset);
1051 }
1052
1053 vk.cmdDraw(*cmdBuffer, /*vertexCount*/ 4u, 1u, 0u, 1u);
1054
1055 endRenderPass(vk, *cmdBuffer);
1056
1057 {
1058 const vk::VkImageMemoryBarrier endImgBarrier =
1059 {
1060 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1061 DE_NULL, // pNext
1062 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask
1063 vk::VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
1064 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
1065 imageLayout, // newLayout
1066 queueFamilyIndex, // srcQueueFamilyIndex
1067 queueFamilyIndex, // dstQueueFamilyIndex
1068 **colorImage, // image
1069 {
1070 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1071 0u, // baseMipLevel
1072 1u, // mipLevels
1073 0u, // baseArraySlice
1074 1u, // subresourceRange
1075 }
1076 };
1077 vk.cmdPipelineBarrier(*cmdBuffer,
1078 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // srcStageMask
1079 vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, // dstStageMask
1080 (vk::VkDependencyFlags)0,
1081 0, (const vk::VkMemoryBarrier*)DE_NULL,
1082 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1083 1, &endImgBarrier);
1084 }
1085
1086 endCommandBuffer(vk, *cmdBuffer);
1087
1088 // Submit command buffer
1089 {
1090 const vk::Unique<vk::VkFence> fence (vk::createFence(vk, device));
1091 VK_CHECK(queueSubmit(ctx, PROTECTION_ENABLED, queue, *cmdBuffer, *fence, ~0ull));
1092 }
1093
1094 // Calculate reference image
1095 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
1096 calculateAtomicRef(*texture2D);
1097
1098 // Validate result
1099 {
1100 const vk::VkImage resultImage = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? **imageSrc :
1101 m_params.accessType == ACCESS_TYPE_IMAGE_STORE ? **imageDst : **colorImage;
1102
1103 return validateResult(resultImage, imageLayout, *texture2D, refSampler);
1104 }
1105 }
1106
calculateAtomicRef(tcu::Texture2D & texture2D)1107 void ImageAccessTestInstance::calculateAtomicRef (tcu::Texture2D& texture2D)
1108 {
1109 DE_ASSERT(m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS);
1110
1111 const tcu::PixelBufferAccess& reference = texture2D.getLevel(0);
1112
1113 for (int x = 0; x < reference.getWidth(); ++x)
1114 for (int y = 0; y < reference.getHeight(); ++y)
1115 {
1116 const deInt32 oldX = reference.getPixelInt(x, y).x();
1117 const deInt32 atomicArg = x*x + y*y;
1118 const deInt32 newX = computeBinaryAtomicOperationResult(m_params.atomicOperation, oldX, atomicArg);
1119
1120 reference.setPixel(tcu::IVec4(newX, 0, 0, 0), x, y);
1121 }
1122 }
1123
validateResult(vk::VkImage image,vk::VkImageLayout imageLayout,const tcu::Texture2D & texture2D,const tcu::Sampler & refSampler)1124 tcu::TestStatus ImageAccessTestInstance::validateResult (vk::VkImage image, vk::VkImageLayout imageLayout, const tcu::Texture2D& texture2D, const tcu::Sampler& refSampler)
1125 {
1126 de::Random rnd (getSeedValue(m_params));
1127 ValidationData refData;
1128
1129 for (int ndx = 0; ndx < 4; ++ndx)
1130 {
1131 const float lod = 0.0f;
1132 const float cx = rnd.getFloat(0.0f, 1.0f);
1133 const float cy = rnd.getFloat(0.0f, 1.0f);
1134
1135 refData.coords[ndx] = tcu::Vec4(cx, cy, 0.0f, 0.0f);
1136 refData.values[ndx] = texture2D.sample(refSampler, cx, cy, lod);
1137 }
1138
1139 if (!m_validator.validateImage(m_protectedContext, refData, image, m_params.imageFormat, imageLayout))
1140 return tcu::TestStatus::fail("Something went really wrong");
1141 else
1142 return tcu::TestStatus::pass("Everything went OK");
1143 }
1144
1145 } // anonymous
1146
createShaderImageAccessTests(tcu::TestContext & testCtx)1147 tcu::TestCaseGroup* createShaderImageAccessTests (tcu::TestContext& testCtx)
1148 {
1149 de::MovePtr<tcu::TestCaseGroup> accessGroup (new tcu::TestCaseGroup(testCtx, "access", "Shader Image Access Tests"));
1150
1151 static const struct
1152 {
1153 glu::ShaderType type;
1154 const char* name;
1155 const char* desc;
1156 } shaderTypes[] =
1157 {
1158 { glu::SHADERTYPE_FRAGMENT, "fragment", "Image access from fragment shader" },
1159 { glu::SHADERTYPE_COMPUTE, "compute", "Image access from compute shader" },
1160 };
1161
1162 static const struct
1163 {
1164 AccessType type;
1165 const char* name;
1166 const char* desc;
1167 } accessTypes[] =
1168 {
1169 { ACCESS_TYPE_SAMPLING, "sampling", "Sampling test" },
1170 { ACCESS_TYPE_TEXEL_FETCH, "texelfetch", "Texel fetch test" },
1171 { ACCESS_TYPE_IMAGE_LOAD, "imageload", "Image load test" },
1172 { ACCESS_TYPE_IMAGE_STORE, "imagestore", "Image store test" },
1173 { ACCESS_TYPE_IMAGE_ATOMICS, "imageatomics", "Image atomics test" },
1174 };
1175
1176 static const struct
1177 {
1178 vk::VkFormat format;
1179 const char* name;
1180 } formats[] =
1181 {
1182 { vk::VK_FORMAT_R8G8B8A8_UNORM, "rgba8" },
1183 { vk::VK_FORMAT_R32_SINT, "r32i" },
1184 { vk::VK_FORMAT_R32_UINT, "r32ui" },
1185 };
1186
1187 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderTypeNdx)
1188 {
1189 const glu::ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
1190 de::MovePtr<tcu::TestCaseGroup> shaderGroup (new tcu::TestCaseGroup(testCtx, shaderTypes[shaderTypeNdx].name, shaderTypes[shaderTypeNdx].desc));
1191
1192 for (int accessNdx = 0; accessNdx < DE_LENGTH_OF_ARRAY(accessTypes); ++accessNdx)
1193 {
1194 const AccessType accessType = accessTypes[accessNdx].type;
1195
1196 if (shaderType == glu::SHADERTYPE_COMPUTE && accessType == ACCESS_TYPE_IMAGE_STORE) // \note already tested in other tests
1197 continue;
1198
1199 de::MovePtr<tcu::TestCaseGroup> accessTypeGroup (new tcu::TestCaseGroup(testCtx, accessTypes[accessNdx].name, accessTypes[accessNdx].desc));
1200
1201 if (accessType == ACCESS_TYPE_IMAGE_ATOMICS)
1202 {
1203 for (deUint32 atomicOpI = 0; atomicOpI < ATOMIC_OPERATION_LAST; ++atomicOpI)
1204 {
1205 const AtomicOperation atomicOp = (AtomicOperation)atomicOpI;
1206 de::MovePtr<tcu::TestCaseGroup> operationGroup (new tcu::TestCaseGroup(testCtx, getAtomicOperationCaseName(atomicOp).c_str(), ""));
1207
1208 for (deUint32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
1209 {
1210 const vk::VkFormat format = formats[formatNdx].format;
1211
1212 if (format != vk::VK_FORMAT_R32_UINT && format != vk::VK_FORMAT_R32_SINT)
1213 continue;
1214
1215 operationGroup->addChild(new ImageAccessTestCase(testCtx, formats[formatNdx].name, "", Params(shaderType, accessType, format, atomicOp)));
1216 }
1217
1218 accessTypeGroup->addChild(operationGroup.release());
1219 }
1220 }
1221 else
1222 {
1223 for (deUint32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
1224 {
1225 const vk::VkFormat format = formats[formatNdx].format;
1226
1227 accessTypeGroup->addChild(new ImageAccessTestCase(testCtx, formats[formatNdx].name, "", Params(shaderType, accessType, format)));
1228 }
1229 }
1230
1231 shaderGroup->addChild(accessTypeGroup.release());
1232 }
1233
1234 accessGroup->addChild(shaderGroup.release());
1235 }
1236
1237 return accessGroup.release();
1238 }
1239
1240 } // ProtectedMem
1241 } // vkt
1242