1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 *//*!
22 * \file
23 * \brief Vulkan ShaderRenderCase
24 *//*--------------------------------------------------------------------*/
25
26 #include "vktShaderRender.hpp"
27
28 #include "tcuImageCompare.hpp"
29 #include "tcuImageIO.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuTextureUtil.hpp"
32 #include "tcuSurface.hpp"
33 #include "tcuVector.hpp"
34
35 #include "deFilePath.hpp"
36 #include "deMath.h"
37 #include "deUniquePtr.hpp"
38
39 #include "vkDeviceUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkPlatform.hpp"
42 #include "vkQueryUtil.hpp"
43 #include "vkRef.hpp"
44 #include "vkRefUtil.hpp"
45 #include "vkStrUtil.hpp"
46 #include "vkTypeUtil.hpp"
47 #include "vkCmdUtil.hpp"
48 #include "vkObjUtil.hpp"
49
50 #include <vector>
51 #include <string>
52
53 namespace vkt
54 {
55 namespace sr
56 {
57
58 using namespace vk;
59
textureTypeToImageViewType(TextureBinding::Type type)60 VkImageViewType textureTypeToImageViewType (TextureBinding::Type type)
61 {
62 switch (type)
63 {
64 case TextureBinding::TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
65 case TextureBinding::TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
66 case TextureBinding::TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
67 case TextureBinding::TYPE_CUBE_MAP: return VK_IMAGE_VIEW_TYPE_CUBE;
68 case TextureBinding::TYPE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
69 case TextureBinding::TYPE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
70 case TextureBinding::TYPE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
71
72 default:
73 DE_FATAL("Impossible");
74 return (VkImageViewType)0;
75 }
76 }
77
viewTypeToImageType(VkImageViewType type)78 VkImageType viewTypeToImageType (VkImageViewType type)
79 {
80 switch (type)
81 {
82 case VK_IMAGE_VIEW_TYPE_1D:
83 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: return VK_IMAGE_TYPE_1D;
84 case VK_IMAGE_VIEW_TYPE_2D:
85 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: return VK_IMAGE_TYPE_2D;
86 case VK_IMAGE_VIEW_TYPE_3D: return VK_IMAGE_TYPE_3D;
87 case VK_IMAGE_VIEW_TYPE_CUBE:
88 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return VK_IMAGE_TYPE_2D;
89
90 default:
91 DE_FATAL("Impossible");
92 return (VkImageType)0;
93 }
94 }
95
textureUsageFlags(void)96 vk::VkImageUsageFlags textureUsageFlags (void)
97 {
98 return (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
99 }
100
textureCreateFlags(vk::VkImageViewType viewType,ShaderRenderCaseInstance::ImageBackingMode backingMode)101 vk::VkImageCreateFlags textureCreateFlags (vk::VkImageViewType viewType, ShaderRenderCaseInstance::ImageBackingMode backingMode)
102 {
103 const bool isCube = (viewType == VK_IMAGE_VIEW_TYPE_CUBE || viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY);
104 VkImageCreateFlags imageCreateFlags = (isCube ? static_cast<VkImageCreateFlags>(VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) : 0u);
105
106 if (backingMode == ShaderRenderCaseInstance::IMAGE_BACKING_MODE_SPARSE)
107 imageCreateFlags |= (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT);
108
109 return imageCreateFlags;
110 }
111
112 namespace
113 {
114
115 static const deUint32 MAX_RENDER_WIDTH = 128;
116 static const deUint32 MAX_RENDER_HEIGHT = 128;
117 static const tcu::Vec4 DEFAULT_CLEAR_COLOR = tcu::Vec4(0.125f, 0.25f, 0.5f, 1.0f);
118
119 /*! Gets the next multiple of a given divisor */
getNextMultiple(deUint32 divisor,deUint32 value)120 static deUint32 getNextMultiple (deUint32 divisor, deUint32 value)
121 {
122 if (value % divisor == 0)
123 {
124 return value;
125 }
126 return value + divisor - (value % divisor);
127 }
128
129 /*! Gets the next value that is multiple of all given divisors */
getNextMultiple(const std::vector<deUint32> & divisors,deUint32 value)130 static deUint32 getNextMultiple (const std::vector<deUint32>& divisors, deUint32 value)
131 {
132 deUint32 nextMultiple = value;
133 bool nextMultipleFound = false;
134
135 while (true)
136 {
137 nextMultipleFound = true;
138
139 for (size_t divNdx = 0; divNdx < divisors.size(); divNdx++)
140 nextMultipleFound = nextMultipleFound && (nextMultiple % divisors[divNdx] == 0);
141
142 if (nextMultipleFound)
143 break;
144
145 DE_ASSERT(nextMultiple < ~((deUint32)0u));
146 nextMultiple = getNextMultiple(divisors[0], nextMultiple + 1);
147 }
148
149 return nextMultiple;
150 }
151
152 } // anonymous
153
154 // QuadGrid.
155
156 class QuadGrid
157 {
158 public:
159 QuadGrid (int gridSize,
160 int screenWidth,
161 int screenHeight,
162 const tcu::Vec4& constCoords,
163 const std::vector<tcu::Mat4>& userAttribTransforms,
164 const std::vector<TextureBindingSp>& textures);
165 ~QuadGrid (void);
166
getGridSize(void) const167 int getGridSize (void) const { return m_gridSize; }
getNumVertices(void) const168 int getNumVertices (void) const { return m_numVertices; }
getNumTriangles(void) const169 int getNumTriangles (void) const { return m_numTriangles; }
getConstCoords(void) const170 const tcu::Vec4& getConstCoords (void) const { return m_constCoords; }
getUserAttribTransforms(void) const171 const std::vector<tcu::Mat4> getUserAttribTransforms (void) const { return m_userAttribTransforms; }
getTextures(void) const172 const std::vector<TextureBindingSp>& getTextures (void) const { return m_textures; }
173
getPositions(void) const174 const tcu::Vec4* getPositions (void) const { return &m_positions[0]; }
getAttribOne(void) const175 const float* getAttribOne (void) const { return &m_attribOne[0]; }
getCoords(void) const176 const tcu::Vec4* getCoords (void) const { return &m_coords[0]; }
getUnitCoords(void) const177 const tcu::Vec4* getUnitCoords (void) const { return &m_unitCoords[0]; }
178
getUserAttrib(int attribNdx) const179 const tcu::Vec4* getUserAttrib (int attribNdx) const { return &m_userAttribs[attribNdx][0]; }
getIndices(void) const180 const deUint16* getIndices (void) const { return &m_indices[0]; }
181
182 tcu::Vec4 getCoords (float sx, float sy) const;
183 tcu::Vec4 getUnitCoords (float sx, float sy) const;
184
getNumUserAttribs(void) const185 int getNumUserAttribs (void) const { return (int)m_userAttribTransforms.size(); }
186 tcu::Vec4 getUserAttrib (int attribNdx, float sx, float sy) const;
187
188 private:
189 const int m_gridSize;
190 const int m_numVertices;
191 const int m_numTriangles;
192 const tcu::Vec4 m_constCoords;
193 const std::vector<tcu::Mat4> m_userAttribTransforms;
194
195 const std::vector<TextureBindingSp>& m_textures;
196
197 std::vector<tcu::Vec4> m_screenPos;
198 std::vector<tcu::Vec4> m_positions;
199 std::vector<tcu::Vec4> m_coords; //!< Near-unit coordinates, roughly [-2.0 .. 2.0].
200 std::vector<tcu::Vec4> m_unitCoords; //!< Positive-only coordinates [0.0 .. 1.5].
201 std::vector<float> m_attribOne;
202 std::vector<tcu::Vec4> m_userAttribs[ShaderEvalContext::MAX_TEXTURES];
203 std::vector<deUint16> m_indices;
204 };
205
QuadGrid(int gridSize,int width,int height,const tcu::Vec4 & constCoords,const std::vector<tcu::Mat4> & userAttribTransforms,const std::vector<TextureBindingSp> & textures)206 QuadGrid::QuadGrid (int gridSize,
207 int width,
208 int height,
209 const tcu::Vec4& constCoords,
210 const std::vector<tcu::Mat4>& userAttribTransforms,
211 const std::vector<TextureBindingSp>& textures)
212 : m_gridSize (gridSize)
213 , m_numVertices ((gridSize + 1) * (gridSize + 1))
214 , m_numTriangles (gridSize * gridSize * 2)
215 , m_constCoords (constCoords)
216 , m_userAttribTransforms (userAttribTransforms)
217 , m_textures (textures)
218 {
219 const tcu::Vec4 viewportScale ((float)width, (float)height, 0.0f, 0.0f);
220
221 // Compute vertices.
222 m_screenPos.resize(m_numVertices);
223 m_positions.resize(m_numVertices);
224 m_coords.resize(m_numVertices);
225 m_unitCoords.resize(m_numVertices);
226 m_attribOne.resize(m_numVertices);
227
228 // User attributes.
229 for (int attrNdx = 0; attrNdx < DE_LENGTH_OF_ARRAY(m_userAttribs); attrNdx++)
230 m_userAttribs[attrNdx].resize(m_numVertices);
231
232 for (int y = 0; y < gridSize+1; y++)
233 for (int x = 0; x < gridSize+1; x++)
234 {
235 float sx = (float)x / (float)gridSize;
236 float sy = (float)y / (float)gridSize;
237 float fx = 2.0f * sx - 1.0f;
238 float fy = 2.0f * sy - 1.0f;
239 int vtxNdx = ((y * (gridSize+1)) + x);
240
241 m_positions[vtxNdx] = tcu::Vec4(fx, fy, 0.0f, 1.0f);
242 m_coords[vtxNdx] = getCoords(sx, sy);
243 m_unitCoords[vtxNdx] = getUnitCoords(sx, sy);
244 m_attribOne[vtxNdx] = 1.0f;
245
246 m_screenPos[vtxNdx] = tcu::Vec4(sx, sy, 0.0f, 1.0f) * viewportScale;
247
248 for (int attribNdx = 0; attribNdx < getNumUserAttribs(); attribNdx++)
249 m_userAttribs[attribNdx][vtxNdx] = getUserAttrib(attribNdx, sx, sy);
250 }
251
252 // Compute indices.
253 m_indices.resize(3 * m_numTriangles);
254 for (int y = 0; y < gridSize; y++)
255 for (int x = 0; x < gridSize; x++)
256 {
257 int stride = gridSize + 1;
258 int v00 = (y * stride) + x;
259 int v01 = (y * stride) + x + 1;
260 int v10 = ((y+1) * stride) + x;
261 int v11 = ((y+1) * stride) + x + 1;
262
263 int baseNdx = ((y * gridSize) + x) * 6;
264 m_indices[baseNdx + 0] = (deUint16)v10;
265 m_indices[baseNdx + 1] = (deUint16)v00;
266 m_indices[baseNdx + 2] = (deUint16)v01;
267
268 m_indices[baseNdx + 3] = (deUint16)v10;
269 m_indices[baseNdx + 4] = (deUint16)v01;
270 m_indices[baseNdx + 5] = (deUint16)v11;
271 }
272 }
273
~QuadGrid(void)274 QuadGrid::~QuadGrid (void)
275 {
276 }
277
getCoords(float sx,float sy) const278 inline tcu::Vec4 QuadGrid::getCoords (float sx, float sy) const
279 {
280 const float fx = 2.0f * sx - 1.0f;
281 const float fy = 2.0f * sy - 1.0f;
282 return tcu::Vec4(fx, fy, -fx + 0.33f*fy, -0.275f*fx - fy);
283 }
284
getUnitCoords(float sx,float sy) const285 inline tcu::Vec4 QuadGrid::getUnitCoords (float sx, float sy) const
286 {
287 return tcu::Vec4(sx, sy, 0.33f*sx + 0.5f*sy, 0.5f*sx + 0.25f*sy);
288 }
289
getUserAttrib(int attribNdx,float sx,float sy) const290 inline tcu::Vec4 QuadGrid::getUserAttrib (int attribNdx, float sx, float sy) const
291 {
292 // homogeneous normalized screen-space coordinates
293 return m_userAttribTransforms[attribNdx] * tcu::Vec4(sx, sy, 0.0f, 1.0f);
294 }
295
296 // TextureBinding
297
TextureBinding(const tcu::Archive & archive,const char * filename,const Type type,const tcu::Sampler & sampler)298 TextureBinding::TextureBinding (const tcu::Archive& archive,
299 const char* filename,
300 const Type type,
301 const tcu::Sampler& sampler)
302 : m_type (type)
303 , m_sampler (sampler)
304 {
305 switch(m_type)
306 {
307 case TYPE_2D: m_binding.tex2D = loadTexture2D(archive, filename).release(); break;
308 default:
309 DE_FATAL("Unsupported texture type");
310 }
311 }
312
TextureBinding(const tcu::Texture1D * tex1D,const tcu::Sampler & sampler)313 TextureBinding::TextureBinding (const tcu::Texture1D* tex1D, const tcu::Sampler& sampler)
314 : m_type (TYPE_1D)
315 , m_sampler (sampler)
316 {
317 m_binding.tex1D = tex1D;
318 }
319
TextureBinding(const tcu::Texture2D * tex2D,const tcu::Sampler & sampler)320 TextureBinding::TextureBinding (const tcu::Texture2D* tex2D, const tcu::Sampler& sampler)
321 : m_type (TYPE_2D)
322 , m_sampler (sampler)
323 {
324 m_binding.tex2D = tex2D;
325 }
326
TextureBinding(const tcu::Texture3D * tex3D,const tcu::Sampler & sampler)327 TextureBinding::TextureBinding (const tcu::Texture3D* tex3D, const tcu::Sampler& sampler)
328 : m_type (TYPE_3D)
329 , m_sampler (sampler)
330 {
331 m_binding.tex3D = tex3D;
332 }
333
TextureBinding(const tcu::TextureCube * texCube,const tcu::Sampler & sampler)334 TextureBinding::TextureBinding (const tcu::TextureCube* texCube, const tcu::Sampler& sampler)
335 : m_type (TYPE_CUBE_MAP)
336 , m_sampler (sampler)
337 {
338 m_binding.texCube = texCube;
339 }
340
TextureBinding(const tcu::Texture1DArray * tex1DArray,const tcu::Sampler & sampler)341 TextureBinding::TextureBinding (const tcu::Texture1DArray* tex1DArray, const tcu::Sampler& sampler)
342 : m_type (TYPE_1D_ARRAY)
343 , m_sampler (sampler)
344 {
345 m_binding.tex1DArray = tex1DArray;
346 }
347
TextureBinding(const tcu::Texture2DArray * tex2DArray,const tcu::Sampler & sampler)348 TextureBinding::TextureBinding (const tcu::Texture2DArray* tex2DArray, const tcu::Sampler& sampler)
349 : m_type (TYPE_2D_ARRAY)
350 , m_sampler (sampler)
351 {
352 m_binding.tex2DArray = tex2DArray;
353 }
354
TextureBinding(const tcu::TextureCubeArray * texCubeArray,const tcu::Sampler & sampler)355 TextureBinding::TextureBinding (const tcu::TextureCubeArray* texCubeArray, const tcu::Sampler& sampler)
356 : m_type (TYPE_CUBE_ARRAY)
357 , m_sampler (sampler)
358 {
359 m_binding.texCubeArray = texCubeArray;
360 }
361
~TextureBinding(void)362 TextureBinding::~TextureBinding (void)
363 {
364 switch(m_type)
365 {
366 case TYPE_1D: delete m_binding.tex1D; break;
367 case TYPE_2D: delete m_binding.tex2D; break;
368 case TYPE_3D: delete m_binding.tex3D; break;
369 case TYPE_CUBE_MAP: delete m_binding.texCube; break;
370 case TYPE_1D_ARRAY: delete m_binding.tex1DArray; break;
371 case TYPE_2D_ARRAY: delete m_binding.tex2DArray; break;
372 case TYPE_CUBE_ARRAY: delete m_binding.texCubeArray; break;
373 default: break;
374 }
375 }
376
loadTexture2D(const tcu::Archive & archive,const char * filename)377 de::MovePtr<tcu::Texture2D> TextureBinding::loadTexture2D (const tcu::Archive& archive, const char* filename)
378 {
379 tcu::TextureLevel level;
380 tcu::ImageIO::loadImage(level, archive, filename);
381
382 TCU_CHECK_INTERNAL(level.getFormat() == tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8) ||
383 level.getFormat() == tcu::TextureFormat(tcu::TextureFormat::RGB, tcu::TextureFormat::UNORM_INT8));
384
385 // \todo [2015-10-08 elecro] for some reason we get better when using RGBA texture even in RGB case, this needs to be investigated
386 de::MovePtr<tcu::Texture2D> texture(new tcu::Texture2D(tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8), level.getWidth(), level.getHeight()));
387
388 // Fill level 0.
389 texture->allocLevel(0);
390 tcu::copy(texture->getLevel(0), level.getAccess());
391
392 return texture;
393 }
394
395 // ShaderEvalContext.
396
ShaderEvalContext(const QuadGrid & quadGrid)397 ShaderEvalContext::ShaderEvalContext (const QuadGrid& quadGrid)
398 : constCoords (quadGrid.getConstCoords())
399 , isDiscarded (false)
400 , m_quadGrid (quadGrid)
401 {
402 const std::vector<TextureBindingSp>& bindings = m_quadGrid.getTextures();
403 DE_ASSERT((int)bindings.size() <= MAX_TEXTURES);
404
405 // Fill in texture array.
406 for (int ndx = 0; ndx < (int)bindings.size(); ndx++)
407 {
408 const TextureBinding& binding = *bindings[ndx];
409
410 if (binding.getType() == TextureBinding::TYPE_NONE)
411 continue;
412
413 textures[ndx].sampler = binding.getSampler();
414
415 switch (binding.getType())
416 {
417 case TextureBinding::TYPE_1D: textures[ndx].tex1D = &binding.get1D(); break;
418 case TextureBinding::TYPE_2D: textures[ndx].tex2D = &binding.get2D(); break;
419 case TextureBinding::TYPE_3D: textures[ndx].tex3D = &binding.get3D(); break;
420 case TextureBinding::TYPE_CUBE_MAP: textures[ndx].texCube = &binding.getCube(); break;
421 case TextureBinding::TYPE_1D_ARRAY: textures[ndx].tex1DArray = &binding.get1DArray(); break;
422 case TextureBinding::TYPE_2D_ARRAY: textures[ndx].tex2DArray = &binding.get2DArray(); break;
423 case TextureBinding::TYPE_CUBE_ARRAY: textures[ndx].texCubeArray = &binding.getCubeArray(); break;
424 default:
425 TCU_THROW(InternalError, "Handling of texture binding type not implemented");
426 }
427 }
428 }
429
~ShaderEvalContext(void)430 ShaderEvalContext::~ShaderEvalContext (void)
431 {
432 }
433
reset(float sx,float sy)434 void ShaderEvalContext::reset (float sx, float sy)
435 {
436 // Clear old values
437 color = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
438 isDiscarded = false;
439
440 // Compute coords
441 coords = m_quadGrid.getCoords(sx, sy);
442 unitCoords = m_quadGrid.getUnitCoords(sx, sy);
443
444 // Compute user attributes.
445 const int numAttribs = m_quadGrid.getNumUserAttribs();
446 DE_ASSERT(numAttribs <= MAX_USER_ATTRIBS);
447 for (int attribNdx = 0; attribNdx < numAttribs; attribNdx++)
448 in[attribNdx] = m_quadGrid.getUserAttrib(attribNdx, sx, sy);
449 }
450
texture2D(int unitNdx,const tcu::Vec2 & texCoords)451 tcu::Vec4 ShaderEvalContext::texture2D (int unitNdx, const tcu::Vec2& texCoords)
452 {
453 if (textures[unitNdx].tex2D)
454 return textures[unitNdx].tex2D->sample(textures[unitNdx].sampler, texCoords.x(), texCoords.y(), 0.0f);
455 else
456 return tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
457 }
458
459 // ShaderEvaluator.
460
ShaderEvaluator(void)461 ShaderEvaluator::ShaderEvaluator (void)
462 : m_evalFunc(DE_NULL)
463 {
464 }
465
ShaderEvaluator(ShaderEvalFunc evalFunc)466 ShaderEvaluator::ShaderEvaluator (ShaderEvalFunc evalFunc)
467 : m_evalFunc(evalFunc)
468 {
469 }
470
~ShaderEvaluator(void)471 ShaderEvaluator::~ShaderEvaluator (void)
472 {
473 }
474
evaluate(ShaderEvalContext & ctx) const475 void ShaderEvaluator::evaluate (ShaderEvalContext& ctx) const
476 {
477 DE_ASSERT(m_evalFunc);
478 m_evalFunc(ctx);
479 }
480
481 // UniformSetup.
482
UniformSetup(void)483 UniformSetup::UniformSetup (void)
484 : m_setupFunc(DE_NULL)
485 {
486 }
487
UniformSetup(UniformSetupFunc setupFunc)488 UniformSetup::UniformSetup (UniformSetupFunc setupFunc)
489 : m_setupFunc(setupFunc)
490 {
491 }
492
~UniformSetup(void)493 UniformSetup::~UniformSetup (void)
494 {
495 }
496
setup(ShaderRenderCaseInstance & instance,const tcu::Vec4 & constCoords) const497 void UniformSetup::setup (ShaderRenderCaseInstance& instance, const tcu::Vec4& constCoords) const
498 {
499 if (m_setupFunc)
500 m_setupFunc(instance, constCoords);
501 }
502
503 // ShaderRenderCase.
504
ShaderRenderCase(tcu::TestContext & testCtx,const std::string & name,const bool isVertexCase,const ShaderEvalFunc evalFunc,const UniformSetup * uniformSetup,const AttributeSetupFunc attribFunc)505 ShaderRenderCase::ShaderRenderCase (tcu::TestContext& testCtx,
506 const std::string& name,
507 const bool isVertexCase,
508 const ShaderEvalFunc evalFunc,
509 const UniformSetup* uniformSetup,
510 const AttributeSetupFunc attribFunc)
511 : vkt::TestCase (testCtx, name)
512 , m_isVertexCase (isVertexCase)
513 , m_evaluator (new ShaderEvaluator(evalFunc))
514 , m_uniformSetup (uniformSetup ? uniformSetup : new UniformSetup())
515 , m_attribFunc (attribFunc)
516 {}
517
ShaderRenderCase(tcu::TestContext & testCtx,const std::string & name,const bool isVertexCase,const ShaderEvaluator * evaluator,const UniformSetup * uniformSetup,const AttributeSetupFunc attribFunc)518 ShaderRenderCase::ShaderRenderCase (tcu::TestContext& testCtx,
519 const std::string& name,
520 const bool isVertexCase,
521 const ShaderEvaluator* evaluator,
522 const UniformSetup* uniformSetup,
523 const AttributeSetupFunc attribFunc)
524 : vkt::TestCase (testCtx, name)
525 , m_isVertexCase (isVertexCase)
526 , m_evaluator (evaluator)
527 , m_uniformSetup (uniformSetup ? uniformSetup : new UniformSetup())
528 , m_attribFunc (attribFunc)
529 {}
530
~ShaderRenderCase(void)531 ShaderRenderCase::~ShaderRenderCase (void)
532 {
533 }
534
initPrograms(vk::SourceCollections & programCollection) const535 void ShaderRenderCase::initPrograms (vk::SourceCollections& programCollection) const
536 {
537 programCollection.glslSources.add("vert") << glu::VertexSource(m_vertShaderSource);
538 programCollection.glslSources.add("frag") << glu::FragmentSource(m_fragShaderSource);
539 }
540
createInstance(Context & context) const541 TestInstance* ShaderRenderCase::createInstance (Context& context) const
542 {
543 DE_ASSERT(m_evaluator != DE_NULL);
544 DE_ASSERT(m_uniformSetup != DE_NULL);
545 return new ShaderRenderCaseInstance(context, m_isVertexCase, *m_evaluator, *m_uniformSetup, m_attribFunc);
546 }
547
548 // ShaderRenderCaseInstance.
549
ShaderRenderCaseInstance(Context & context)550 ShaderRenderCaseInstance::ShaderRenderCaseInstance (Context& context)
551 : vkt::TestInstance (context)
552 , m_imageBackingMode (IMAGE_BACKING_MODE_REGULAR)
553 , m_quadGridSize (static_cast<deUint32>(GRID_SIZE_DEFAULT_FRAGMENT))
554 , m_memAlloc (getAllocator())
555 , m_clearColor (DEFAULT_CLEAR_COLOR)
556 , m_isVertexCase (false)
557 , m_vertexShaderName ("vert")
558 , m_fragmentShaderName ("frag")
559 , m_renderSize (MAX_RENDER_WIDTH, MAX_RENDER_HEIGHT)
560 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
561 , m_evaluator (DE_NULL)
562 , m_uniformSetup (DE_NULL)
563 , m_attribFunc (DE_NULL)
564 , m_sampleCount (VK_SAMPLE_COUNT_1_BIT)
565 , m_fuzzyCompare (true)
566 {
567 }
568
569
ShaderRenderCaseInstance(Context & context,const bool isVertexCase,const ShaderEvaluator & evaluator,const UniformSetup & uniformSetup,const AttributeSetupFunc attribFunc,const ImageBackingMode imageBackingMode,const deUint32 gridSize,const bool fuzzyCompare)570 ShaderRenderCaseInstance::ShaderRenderCaseInstance (Context& context,
571 const bool isVertexCase,
572 const ShaderEvaluator& evaluator,
573 const UniformSetup& uniformSetup,
574 const AttributeSetupFunc attribFunc,
575 const ImageBackingMode imageBackingMode,
576 const deUint32 gridSize,
577 const bool fuzzyCompare)
578 : vkt::TestInstance (context)
579 , m_imageBackingMode (imageBackingMode)
580 , m_quadGridSize (gridSize == static_cast<deUint32>(GRID_SIZE_DEFAULTS)
581 ? (isVertexCase
582 ? static_cast<deUint32>(GRID_SIZE_DEFAULT_VERTEX)
583 : static_cast<deUint32>(GRID_SIZE_DEFAULT_FRAGMENT))
584 : gridSize)
585 , m_memAlloc (getAllocator())
586 , m_clearColor (DEFAULT_CLEAR_COLOR)
587 , m_isVertexCase (isVertexCase)
588 , m_vertexShaderName ("vert")
589 , m_fragmentShaderName ("frag")
590 , m_renderSize (MAX_RENDER_WIDTH, MAX_RENDER_HEIGHT)
591 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
592 , m_evaluator (&evaluator)
593 , m_uniformSetup (&uniformSetup)
594 , m_attribFunc (attribFunc)
595 , m_sampleCount (VK_SAMPLE_COUNT_1_BIT)
596 , m_fuzzyCompare (fuzzyCompare)
597 {
598 }
599
ShaderRenderCaseInstance(Context & context,const bool isVertexCase,const ShaderEvaluator * evaluator,const UniformSetup * uniformSetup,const AttributeSetupFunc attribFunc,const ImageBackingMode imageBackingMode,const deUint32 gridSize)600 ShaderRenderCaseInstance::ShaderRenderCaseInstance (Context& context,
601 const bool isVertexCase,
602 const ShaderEvaluator* evaluator,
603 const UniformSetup* uniformSetup,
604 const AttributeSetupFunc attribFunc,
605 const ImageBackingMode imageBackingMode,
606 const deUint32 gridSize)
607 : vkt::TestInstance (context)
608 , m_imageBackingMode (imageBackingMode)
609 , m_quadGridSize (gridSize == static_cast<deUint32>(GRID_SIZE_DEFAULTS)
610 ? (isVertexCase
611 ? static_cast<deUint32>(GRID_SIZE_DEFAULT_VERTEX)
612 : static_cast<deUint32>(GRID_SIZE_DEFAULT_FRAGMENT))
613 : gridSize)
614 , m_memAlloc (getAllocator())
615 , m_clearColor (DEFAULT_CLEAR_COLOR)
616 , m_isVertexCase (isVertexCase)
617 , m_vertexShaderName ("vert")
618 , m_fragmentShaderName ("frag")
619 , m_renderSize (MAX_RENDER_WIDTH, MAX_RENDER_HEIGHT)
620 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
621 , m_evaluator (evaluator)
622 , m_uniformSetup (uniformSetup)
623 , m_attribFunc (attribFunc)
624 , m_sampleCount (VK_SAMPLE_COUNT_1_BIT)
625 , m_fuzzyCompare (false)
626 {
627 }
628
getAllocator(void) const629 vk::Allocator& ShaderRenderCaseInstance::getAllocator (void) const
630 {
631 return m_context.getDefaultAllocator();
632 }
633
~ShaderRenderCaseInstance(void)634 ShaderRenderCaseInstance::~ShaderRenderCaseInstance (void)
635 {
636 }
637
getDevice(void) const638 VkDevice ShaderRenderCaseInstance::getDevice (void) const
639 {
640 return m_context.getDevice();
641 }
642
getUniversalQueueFamilyIndex(void) const643 deUint32 ShaderRenderCaseInstance::getUniversalQueueFamilyIndex (void) const
644 {
645 return m_context.getUniversalQueueFamilyIndex();
646 }
647
getSparseQueueFamilyIndex(void) const648 deUint32 ShaderRenderCaseInstance::getSparseQueueFamilyIndex (void) const
649 {
650 return m_context.getSparseQueueFamilyIndex();
651 }
652
getDeviceInterface(void) const653 const DeviceInterface& ShaderRenderCaseInstance::getDeviceInterface (void) const
654 {
655 return m_context.getDeviceInterface();
656 }
657
getUniversalQueue(void) const658 VkQueue ShaderRenderCaseInstance::getUniversalQueue (void) const
659 {
660 return m_context.getUniversalQueue();
661 }
662
getSparseQueue(void) const663 VkQueue ShaderRenderCaseInstance::getSparseQueue (void) const
664 {
665 return m_context.getSparseQueue();
666 }
667
getPhysicalDevice(void) const668 VkPhysicalDevice ShaderRenderCaseInstance::getPhysicalDevice (void) const
669 {
670 return m_context.getPhysicalDevice();
671 }
672
getInstanceInterface(void) const673 const InstanceInterface& ShaderRenderCaseInstance::getInstanceInterface (void) const
674 {
675 return m_context.getInstanceInterface();
676 }
677
iterate(void)678 tcu::TestStatus ShaderRenderCaseInstance::iterate (void)
679 {
680 setup();
681
682 // Create quad grid.
683 const tcu::UVec2 viewportSize = getViewportSize();
684 const int width = viewportSize.x();
685 const int height = viewportSize.y();
686
687 m_quadGrid = de::MovePtr<QuadGrid>(new QuadGrid(m_quadGridSize, width, height, getDefaultConstCoords(), m_userAttribTransforms, m_textures));
688
689 // Render result.
690 tcu::Surface resImage (width, height);
691
692 render(m_quadGrid->getNumVertices(), m_quadGrid->getNumTriangles(), m_quadGrid->getIndices(), m_quadGrid->getConstCoords());
693 tcu::copy(resImage.getAccess(), m_resultImage.getAccess());
694
695 // Compute reference.
696 tcu::Surface refImage (width, height);
697 if (m_isVertexCase)
698 computeVertexReference(refImage, *m_quadGrid);
699 else
700 computeFragmentReference(refImage, *m_quadGrid);
701
702 // Compare.
703 const bool compareOk = compareImages(resImage, refImage, 0.2f);
704
705 if (compareOk)
706 return tcu::TestStatus::pass("Result image matches reference");
707 else
708 return tcu::TestStatus::fail("Image mismatch");
709 }
710
setup(void)711 void ShaderRenderCaseInstance::setup (void)
712 {
713 m_resultImage = tcu::TextureLevel();
714 m_descriptorSetLayoutBuilder = de::MovePtr<DescriptorSetLayoutBuilder> (new DescriptorSetLayoutBuilder());
715 m_descriptorPoolBuilder = de::MovePtr<DescriptorPoolBuilder> (new DescriptorPoolBuilder());
716 m_descriptorSetUpdateBuilder = de::MovePtr<DescriptorSetUpdateBuilder> (new DescriptorSetUpdateBuilder());
717
718 m_uniformInfos.clear();
719 m_vertexBindingDescription.clear();
720 m_vertexAttributeDescription.clear();
721 m_vertexBuffers.clear();
722 m_vertexBufferAllocs.clear();
723 m_pushConstantRanges.clear();
724 }
725
setupUniformData(deUint32 bindingLocation,size_t size,const void * dataPtr)726 void ShaderRenderCaseInstance::setupUniformData (deUint32 bindingLocation, size_t size, const void* dataPtr)
727 {
728 const VkDevice vkDevice = getDevice();
729 const DeviceInterface& vk = getDeviceInterface();
730 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
731
732 const VkBufferCreateInfo uniformBufferParams =
733 {
734 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
735 DE_NULL, // const void* pNext;
736 0u, // VkBufferCreateFlags flags;
737 size, // VkDeviceSize size;
738 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, // VkBufferUsageFlags usage;
739 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
740 1u, // deUint32 queueFamilyCount;
741 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
742 };
743
744 Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &uniformBufferParams);
745 de::MovePtr<Allocation> alloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
746 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, alloc->getMemory(), alloc->getOffset()));
747
748 deMemcpy(alloc->getHostPtr(), dataPtr, size);
749 flushAlloc(vk, vkDevice, *alloc);
750
751 de::MovePtr<BufferUniform> uniformInfo(new BufferUniform());
752 uniformInfo->type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
753 uniformInfo->descriptor = makeDescriptorBufferInfo(*buffer, 0u, size);
754 uniformInfo->location = bindingLocation;
755 uniformInfo->buffer = VkBufferSp(new vk::Unique<VkBuffer>(buffer));
756 uniformInfo->alloc = AllocationSp(alloc.release());
757
758 m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(uniformInfo)));
759 }
760
addUniform(deUint32 bindingLocation,vk::VkDescriptorType descriptorType,size_t dataSize,const void * data)761 void ShaderRenderCaseInstance::addUniform (deUint32 bindingLocation, vk::VkDescriptorType descriptorType, size_t dataSize, const void* data)
762 {
763 m_descriptorSetLayoutBuilder->addSingleBinding(descriptorType, vk::VK_SHADER_STAGE_ALL);
764 m_descriptorPoolBuilder->addType(descriptorType);
765
766 setupUniformData(bindingLocation, dataSize, data);
767 }
768
addAttribute(deUint32 bindingLocation,vk::VkFormat format,deUint32 sizePerElement,deUint32 count,const void * dataPtr)769 void ShaderRenderCaseInstance::addAttribute (deUint32 bindingLocation,
770 vk::VkFormat format,
771 deUint32 sizePerElement,
772 deUint32 count,
773 const void* dataPtr)
774 {
775 // Portability requires stride to be multiply of minVertexInputBindingStrideAlignment
776 // this value is usually 4 and current tests meet this requirement but
777 // if this changes in future then this limit should be verified in checkSupport
778 #ifndef CTS_USES_VULKANSC
779 if (m_context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
780 ((sizePerElement % m_context.getPortabilitySubsetProperties().minVertexInputBindingStrideAlignment) != 0))
781 {
782 DE_FATAL("stride is not multiply of minVertexInputBindingStrideAlignment");
783 }
784 #endif // CTS_USES_VULKANSC
785
786 // Add binding specification
787 const deUint32 binding = (deUint32)m_vertexBindingDescription.size();
788 const VkVertexInputBindingDescription bindingDescription =
789 {
790 binding, // deUint32 binding;
791 sizePerElement, // deUint32 stride;
792 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputRate stepRate;
793 };
794
795 m_vertexBindingDescription.push_back(bindingDescription);
796
797 // Add location and format specification
798 const VkVertexInputAttributeDescription attributeDescription =
799 {
800 bindingLocation, // deUint32 location;
801 binding, // deUint32 binding;
802 format, // VkFormat format;
803 0u, // deUint32 offset;
804 };
805
806 m_vertexAttributeDescription.push_back(attributeDescription);
807
808 // Upload data to buffer
809 const VkDevice vkDevice = getDevice();
810 const DeviceInterface& vk = getDeviceInterface();
811 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
812
813 const VkDeviceSize inputSize = sizePerElement * count;
814 const VkBufferCreateInfo vertexBufferParams =
815 {
816 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
817 DE_NULL, // const void* pNext;
818 0u, // VkBufferCreateFlags flags;
819 inputSize, // VkDeviceSize size;
820 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
821 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
822 1u, // deUint32 queueFamilyCount;
823 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
824 };
825
826 Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &vertexBufferParams);
827 de::MovePtr<vk::Allocation> alloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
828 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, alloc->getMemory(), alloc->getOffset()));
829
830 deMemcpy(alloc->getHostPtr(), dataPtr, (size_t)inputSize);
831 flushAlloc(vk, vkDevice, *alloc);
832
833 m_vertexBuffers.push_back(VkBufferSp(new vk::Unique<VkBuffer>(buffer)));
834 m_vertexBufferAllocs.push_back(AllocationSp(alloc.release()));
835 }
836
useAttribute(deUint32 bindingLocation,BaseAttributeType type)837 void ShaderRenderCaseInstance::useAttribute (deUint32 bindingLocation, BaseAttributeType type)
838 {
839 const EnabledBaseAttribute attribute =
840 {
841 bindingLocation, // deUint32 location;
842 type // BaseAttributeType type;
843 };
844 m_enabledBaseAttributes.push_back(attribute);
845 }
846
setupUniforms(const tcu::Vec4 & constCoords)847 void ShaderRenderCaseInstance::setupUniforms (const tcu::Vec4& constCoords)
848 {
849 if (m_uniformSetup)
850 m_uniformSetup->setup(*this, constCoords);
851 }
852
useUniform(deUint32 bindingLocation,BaseUniformType type)853 void ShaderRenderCaseInstance::useUniform (deUint32 bindingLocation, BaseUniformType type)
854 {
855 #define UNIFORM_CASE(type, value) case type: addUniform(bindingLocation, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, value); break
856
857 switch(type)
858 {
859 // Bool
860 UNIFORM_CASE(UB_FALSE, 0);
861 UNIFORM_CASE(UB_TRUE, 1);
862
863 // BVec4
864 UNIFORM_CASE(UB4_FALSE, tcu::Vec4(0));
865 UNIFORM_CASE(UB4_TRUE, tcu::Vec4(1));
866
867 // Integer
868 UNIFORM_CASE(UI_ZERO, 0);
869 UNIFORM_CASE(UI_ONE, 1);
870 UNIFORM_CASE(UI_TWO, 2);
871 UNIFORM_CASE(UI_THREE, 3);
872 UNIFORM_CASE(UI_FOUR, 4);
873 UNIFORM_CASE(UI_FIVE, 5);
874 UNIFORM_CASE(UI_SIX, 6);
875 UNIFORM_CASE(UI_SEVEN, 7);
876 UNIFORM_CASE(UI_EIGHT, 8);
877 UNIFORM_CASE(UI_ONEHUNDREDONE, 101);
878
879 // IVec2
880 UNIFORM_CASE(UI2_MINUS_ONE, tcu::IVec2(-1));
881 UNIFORM_CASE(UI2_ZERO, tcu::IVec2(0));
882 UNIFORM_CASE(UI2_ONE, tcu::IVec2(1));
883 UNIFORM_CASE(UI2_TWO, tcu::IVec2(2));
884 UNIFORM_CASE(UI2_THREE, tcu::IVec2(3));
885 UNIFORM_CASE(UI2_FOUR, tcu::IVec2(4));
886 UNIFORM_CASE(UI2_FIVE, tcu::IVec2(5));
887
888 // IVec3
889 UNIFORM_CASE(UI3_MINUS_ONE, tcu::IVec3(-1));
890 UNIFORM_CASE(UI3_ZERO, tcu::IVec3(0));
891 UNIFORM_CASE(UI3_ONE, tcu::IVec3(1));
892 UNIFORM_CASE(UI3_TWO, tcu::IVec3(2));
893 UNIFORM_CASE(UI3_THREE, tcu::IVec3(3));
894 UNIFORM_CASE(UI3_FOUR, tcu::IVec3(4));
895 UNIFORM_CASE(UI3_FIVE, tcu::IVec3(5));
896
897 // IVec4
898 UNIFORM_CASE(UI4_MINUS_ONE, tcu::IVec4(-1));
899 UNIFORM_CASE(UI4_ZERO, tcu::IVec4(0));
900 UNIFORM_CASE(UI4_ONE, tcu::IVec4(1));
901 UNIFORM_CASE(UI4_TWO, tcu::IVec4(2));
902 UNIFORM_CASE(UI4_THREE, tcu::IVec4(3));
903 UNIFORM_CASE(UI4_FOUR, tcu::IVec4(4));
904 UNIFORM_CASE(UI4_FIVE, tcu::IVec4(5));
905
906 // Float
907 UNIFORM_CASE(UF_ZERO, 0.0f);
908 UNIFORM_CASE(UF_ONE, 1.0f);
909 UNIFORM_CASE(UF_TWO, 2.0f);
910 UNIFORM_CASE(UF_THREE, 3.0f);
911 UNIFORM_CASE(UF_FOUR, 4.0f);
912 UNIFORM_CASE(UF_FIVE, 5.0f);
913 UNIFORM_CASE(UF_SIX, 6.0f);
914 UNIFORM_CASE(UF_SEVEN, 7.0f);
915 UNIFORM_CASE(UF_EIGHT, 8.0f);
916
917 UNIFORM_CASE(UF_HALF, 1.0f / 2.0f);
918 UNIFORM_CASE(UF_THIRD, 1.0f / 3.0f);
919 UNIFORM_CASE(UF_FOURTH, 1.0f / 4.0f);
920 UNIFORM_CASE(UF_FIFTH, 1.0f / 5.0f);
921 UNIFORM_CASE(UF_SIXTH, 1.0f / 6.0f);
922 UNIFORM_CASE(UF_SEVENTH, 1.0f / 7.0f);
923 UNIFORM_CASE(UF_EIGHTH, 1.0f / 8.0f);
924
925 // Vec2
926 UNIFORM_CASE(UV2_MINUS_ONE, tcu::Vec2(-1.0f));
927 UNIFORM_CASE(UV2_ZERO, tcu::Vec2(0.0f));
928 UNIFORM_CASE(UV2_ONE, tcu::Vec2(1.0f));
929 UNIFORM_CASE(UV2_TWO, tcu::Vec2(2.0f));
930 UNIFORM_CASE(UV2_THREE, tcu::Vec2(3.0f));
931
932 UNIFORM_CASE(UV2_HALF, tcu::Vec2(1.0f / 2.0f));
933
934 // Vec3
935 UNIFORM_CASE(UV3_MINUS_ONE, tcu::Vec3(-1.0f));
936 UNIFORM_CASE(UV3_ZERO, tcu::Vec3(0.0f));
937 UNIFORM_CASE(UV3_ONE, tcu::Vec3(1.0f));
938 UNIFORM_CASE(UV3_TWO, tcu::Vec3(2.0f));
939 UNIFORM_CASE(UV3_THREE, tcu::Vec3(3.0f));
940
941 UNIFORM_CASE(UV3_HALF, tcu::Vec3(1.0f / 2.0f));
942
943 // Vec4
944 UNIFORM_CASE(UV4_MINUS_ONE, tcu::Vec4(-1.0f));
945 UNIFORM_CASE(UV4_ZERO, tcu::Vec4(0.0f));
946 UNIFORM_CASE(UV4_ONE, tcu::Vec4(1.0f));
947 UNIFORM_CASE(UV4_TWO, tcu::Vec4(2.0f));
948 UNIFORM_CASE(UV4_THREE, tcu::Vec4(3.0f));
949
950 UNIFORM_CASE(UV4_HALF, tcu::Vec4(1.0f / 2.0f));
951
952 UNIFORM_CASE(UV4_BLACK, tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
953 UNIFORM_CASE(UV4_GRAY, tcu::Vec4(0.5f, 0.5f, 0.5f, 1.0f));
954 UNIFORM_CASE(UV4_WHITE, tcu::Vec4(1.0f, 1.0f, 1.0f, 1.0f));
955
956 default:
957 m_context.getTestContext().getLog() << tcu::TestLog::Message << "Unknown Uniform type: " << type << tcu::TestLog::EndMessage;
958 break;
959 }
960
961 #undef UNIFORM_CASE
962 }
963
getViewportSize(void) const964 const tcu::UVec2 ShaderRenderCaseInstance::getViewportSize (void) const
965 {
966 return tcu::UVec2(de::min(m_renderSize.x(), MAX_RENDER_WIDTH),
967 de::min(m_renderSize.y(), MAX_RENDER_HEIGHT));
968 }
969
setSampleCount(VkSampleCountFlagBits sampleCount)970 void ShaderRenderCaseInstance::setSampleCount (VkSampleCountFlagBits sampleCount)
971 {
972 m_sampleCount = sampleCount;
973 }
974
isMultiSampling(void) const975 bool ShaderRenderCaseInstance::isMultiSampling (void) const
976 {
977 return m_sampleCount != VK_SAMPLE_COUNT_1_BIT;
978 }
979
uploadImage(const tcu::TextureFormat & texFormat,const TextureData & textureData,const tcu::Sampler & refSampler,deUint32 mipLevels,deUint32 arrayLayers,VkImage destImage)980 void ShaderRenderCaseInstance::uploadImage (const tcu::TextureFormat& texFormat,
981 const TextureData& textureData,
982 const tcu::Sampler& refSampler,
983 deUint32 mipLevels,
984 deUint32 arrayLayers,
985 VkImage destImage)
986 {
987 const VkDevice vkDevice = getDevice();
988 const DeviceInterface& vk = getDeviceInterface();
989 const VkQueue queue = getUniversalQueue();
990 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
991
992 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
993 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
994 deUint32 bufferSize = 0u;
995 Move<VkBuffer> buffer;
996 de::MovePtr<Allocation> bufferAlloc;
997 std::vector<VkBufferImageCopy> copyRegions;
998 std::vector<deUint32> offsetMultiples;
999
1000 offsetMultiples.push_back(4u);
1001 offsetMultiples.push_back(texFormat.getPixelSize());
1002
1003 // Calculate buffer size
1004 for (TextureData::const_iterator mit = textureData.begin(); mit != textureData.end(); ++mit)
1005 {
1006 for (TextureLayerData::const_iterator lit = mit->begin(); lit != mit->end(); ++lit)
1007 {
1008 const tcu::ConstPixelBufferAccess& access = *lit;
1009
1010 bufferSize = getNextMultiple(offsetMultiples, bufferSize);
1011 bufferSize += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1012 }
1013 }
1014
1015 // Create source buffer
1016 {
1017 const VkBufferCreateInfo bufferParams =
1018 {
1019 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1020 DE_NULL, // const void* pNext;
1021 0u, // VkBufferCreateFlags flags;
1022 bufferSize, // VkDeviceSize size;
1023 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
1024 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1025 0u, // deUint32 queueFamilyIndexCount;
1026 DE_NULL, // const deUint32* pQueueFamilyIndices;
1027 };
1028
1029 buffer = createBuffer(vk, vkDevice, &bufferParams);
1030 bufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
1031 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
1032 }
1033
1034 // Get copy regions and write buffer data
1035 {
1036 deUint32 layerDataOffset = 0;
1037 deUint8* destPtr = (deUint8*)bufferAlloc->getHostPtr();
1038
1039 for (size_t levelNdx = 0; levelNdx < textureData.size(); levelNdx++)
1040 {
1041 const TextureLayerData& layerData = textureData[levelNdx];
1042
1043 for (size_t layerNdx = 0; layerNdx < layerData.size(); layerNdx++)
1044 {
1045 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1046
1047 const tcu::ConstPixelBufferAccess& access = layerData[layerNdx];
1048 const tcu::PixelBufferAccess destAccess (access.getFormat(), access.getSize(), destPtr + layerDataOffset);
1049
1050 const VkBufferImageCopy layerRegion =
1051 {
1052 layerDataOffset, // VkDeviceSize bufferOffset;
1053 (deUint32)access.getWidth(), // deUint32 bufferRowLength;
1054 (deUint32)access.getHeight(), // deUint32 bufferImageHeight;
1055 { // VkImageSubresourceLayers imageSubresource;
1056 aspectMask, // VkImageAspectFlags aspectMask;
1057 (deUint32)levelNdx, // uint32_t mipLevel;
1058 (deUint32)layerNdx, // uint32_t baseArrayLayer;
1059 1u // uint32_t layerCount;
1060 },
1061 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1062 { // VkExtent3D imageExtent;
1063 (deUint32)access.getWidth(),
1064 (deUint32)access.getHeight(),
1065 (deUint32)access.getDepth()
1066 }
1067 };
1068
1069 copyRegions.push_back(layerRegion);
1070 tcu::copy(destAccess, access);
1071
1072 layerDataOffset += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1073 }
1074 }
1075 }
1076
1077 flushAlloc(vk, vkDevice, *bufferAlloc);
1078
1079 if(m_externalCommandPool.get() != DE_NULL)
1080 copyBufferToImage(vk, vkDevice, queue, queueFamilyIndex, *buffer, bufferSize, copyRegions, DE_NULL, aspectMask, mipLevels, arrayLayers, destImage, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, &(m_externalCommandPool.get()->get()));
1081 else
1082 copyBufferToImage(vk, vkDevice, queue, queueFamilyIndex, *buffer, bufferSize, copyRegions, DE_NULL, aspectMask, mipLevels, arrayLayers, destImage);
1083 }
1084
clearImage(const tcu::Sampler & refSampler,deUint32 mipLevels,deUint32 arrayLayers,VkImage destImage)1085 void ShaderRenderCaseInstance::clearImage (const tcu::Sampler& refSampler,
1086 deUint32 mipLevels,
1087 deUint32 arrayLayers,
1088 VkImage destImage)
1089 {
1090 const VkDevice vkDevice = m_context.getDevice();
1091 const DeviceInterface& vk = m_context.getDeviceInterface();
1092 const VkQueue queue = m_context.getUniversalQueue();
1093 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1094
1095 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1096 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1097 Move<VkCommandPool> cmdPool;
1098 Move<VkCommandBuffer> cmdBuffer;
1099
1100 VkClearValue clearValue;
1101 deMemset(&clearValue, 0, sizeof(clearValue));
1102
1103
1104 // Create command pool
1105 VkCommandPool activeCmdPool;
1106 if (m_externalCommandPool.get() == DE_NULL)
1107 {
1108 // Create local command pool
1109 cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
1110 activeCmdPool = *cmdPool;
1111 }
1112 else
1113 {
1114 // Use external command pool if available
1115 activeCmdPool = m_externalCommandPool.get()->get();
1116 }
1117 // Create command buffer
1118 cmdBuffer = allocateCommandBuffer(vk, vkDevice, activeCmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1119
1120 const VkImageMemoryBarrier preImageBarrier =
1121 {
1122 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1123 DE_NULL, // const void* pNext;
1124 0u, // VkAccessFlags srcAccessMask;
1125 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1126 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1127 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1128 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1129 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1130 destImage, // VkImage image;
1131 { // VkImageSubresourceRange subresourceRange;
1132 aspectMask, // VkImageAspect aspect;
1133 0u, // deUint32 baseMipLevel;
1134 mipLevels, // deUint32 mipLevels;
1135 0u, // deUint32 baseArraySlice;
1136 arrayLayers // deUint32 arraySize;
1137 }
1138 };
1139
1140 const VkImageMemoryBarrier postImageBarrier =
1141 {
1142 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1143 DE_NULL, // const void* pNext;
1144 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1145 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1146 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1147 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
1148 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1149 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1150 destImage, // VkImage image;
1151 { // VkImageSubresourceRange subresourceRange;
1152 aspectMask, // VkImageAspect aspect;
1153 0u, // deUint32 baseMipLevel;
1154 mipLevels, // deUint32 mipLevels;
1155 0u, // deUint32 baseArraySlice;
1156 arrayLayers // deUint32 arraySize;
1157 }
1158 };
1159
1160 const VkImageSubresourceRange clearRange =
1161 {
1162 aspectMask, // VkImageAspectFlags aspectMask;
1163 0u, // deUint32 baseMipLevel;
1164 mipLevels, // deUint32 levelCount;
1165 0u, // deUint32 baseArrayLayer;
1166 arrayLayers // deUint32 layerCount;
1167 };
1168
1169 // Copy buffer to image
1170 beginCommandBuffer(vk, *cmdBuffer);
1171 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
1172 if (aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
1173 {
1174 vk.cmdClearColorImage(*cmdBuffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue.color, 1, &clearRange);
1175 }
1176 else
1177 {
1178 vk.cmdClearDepthStencilImage(*cmdBuffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue.depthStencil, 1, &clearRange);
1179 }
1180 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
1181 endCommandBuffer(vk, *cmdBuffer);
1182
1183 submitCommandsAndWait(vk, vkDevice, queue, cmdBuffer.get());
1184 }
1185
mipLevelExtents(const VkExtent3D & baseExtents,const deUint32 mipLevel)1186 VkExtent3D mipLevelExtents (const VkExtent3D& baseExtents, const deUint32 mipLevel)
1187 {
1188 VkExtent3D result;
1189
1190 result.width = std::max(baseExtents.width >> mipLevel, 1u);
1191 result.height = std::max(baseExtents.height >> mipLevel, 1u);
1192 result.depth = std::max(baseExtents.depth >> mipLevel, 1u);
1193
1194 return result;
1195 }
1196
alignedDivide(const VkExtent3D & extent,const VkExtent3D & divisor)1197 tcu::UVec3 alignedDivide (const VkExtent3D& extent, const VkExtent3D& divisor)
1198 {
1199 tcu::UVec3 result;
1200
1201 result.x() = extent.width / divisor.width + ((extent.width % divisor.width != 0) ? 1u : 0u);
1202 result.y() = extent.height / divisor.height + ((extent.height % divisor.height != 0) ? 1u : 0u);
1203 result.z() = extent.depth / divisor.depth + ((extent.depth % divisor.depth != 0) ? 1u : 0u);
1204
1205 return result;
1206 }
1207
isImageSizeSupported(const VkImageType imageType,const tcu::UVec3 & imageSize,const vk::VkPhysicalDeviceLimits & limits)1208 bool isImageSizeSupported (const VkImageType imageType, const tcu::UVec3& imageSize, const vk::VkPhysicalDeviceLimits& limits)
1209 {
1210 switch (imageType)
1211 {
1212 case VK_IMAGE_TYPE_1D:
1213 return (imageSize.x() <= limits.maxImageDimension1D
1214 && imageSize.y() == 1
1215 && imageSize.z() == 1);
1216 case VK_IMAGE_TYPE_2D:
1217 return (imageSize.x() <= limits.maxImageDimension2D
1218 && imageSize.y() <= limits.maxImageDimension2D
1219 && imageSize.z() == 1);
1220 case VK_IMAGE_TYPE_3D:
1221 return (imageSize.x() <= limits.maxImageDimension3D
1222 && imageSize.y() <= limits.maxImageDimension3D
1223 && imageSize.z() <= limits.maxImageDimension3D);
1224 default:
1225 DE_FATAL("Unknown image type");
1226 return false;
1227 }
1228 }
1229
checkSparseSupport(const VkImageCreateInfo & imageInfo) const1230 void ShaderRenderCaseInstance::checkSparseSupport (const VkImageCreateInfo& imageInfo) const
1231 {
1232 #ifdef CTS_USES_VULKANSC
1233 TCU_THROW(NotSupportedError, "Vulkan SC does not support sparse operations");
1234 #endif // CTS_USES_VULKANSC
1235 const InstanceInterface& instance = getInstanceInterface();
1236 const VkPhysicalDevice physicalDevice = getPhysicalDevice();
1237 const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(instance, physicalDevice);
1238 #ifndef CTS_USES_VULKANSC
1239 const std::vector<VkSparseImageFormatProperties> sparseImageFormatPropVec = getPhysicalDeviceSparseImageFormatProperties(
1240 instance, physicalDevice, imageInfo.format, imageInfo.imageType, imageInfo.samples, imageInfo.usage, imageInfo.tiling);
1241 #endif // CTS_USES_VULKANSC
1242
1243 if (!deviceFeatures.shaderResourceResidency)
1244 TCU_THROW(NotSupportedError, "Required feature: shaderResourceResidency.");
1245
1246 if (!deviceFeatures.sparseBinding)
1247 TCU_THROW(NotSupportedError, "Required feature: sparseBinding.");
1248
1249 if (imageInfo.imageType == VK_IMAGE_TYPE_2D && !deviceFeatures.sparseResidencyImage2D)
1250 TCU_THROW(NotSupportedError, "Required feature: sparseResidencyImage2D.");
1251
1252 if (imageInfo.imageType == VK_IMAGE_TYPE_3D && !deviceFeatures.sparseResidencyImage3D)
1253 TCU_THROW(NotSupportedError, "Required feature: sparseResidencyImage3D.");
1254 #ifndef CTS_USES_VULKANSC
1255 if (sparseImageFormatPropVec.size() == 0)
1256 TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
1257 #endif // CTS_USES_VULKANSC
1258 }
1259
1260 #ifndef CTS_USES_VULKANSC
uploadSparseImage(const tcu::TextureFormat & texFormat,const TextureData & textureData,const tcu::Sampler & refSampler,const deUint32 mipLevels,const deUint32 arrayLayers,const VkImage sparseImage,const VkImageCreateInfo & imageCreateInfo,const tcu::UVec3 texSize)1261 void ShaderRenderCaseInstance::uploadSparseImage (const tcu::TextureFormat& texFormat,
1262 const TextureData& textureData,
1263 const tcu::Sampler& refSampler,
1264 const deUint32 mipLevels,
1265 const deUint32 arrayLayers,
1266 const VkImage sparseImage,
1267 const VkImageCreateInfo& imageCreateInfo,
1268 const tcu::UVec3 texSize)
1269 {
1270 const VkDevice vkDevice = getDevice();
1271 const DeviceInterface& vk = getDeviceInterface();
1272 const VkPhysicalDevice physicalDevice = getPhysicalDevice();
1273 const VkQueue queue = getUniversalQueue();
1274 const VkQueue sparseQueue = getSparseQueue();
1275 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
1276 const InstanceInterface& instance = getInstanceInterface();
1277 const VkPhysicalDeviceProperties deviceProperties = getPhysicalDeviceProperties(instance, physicalDevice);
1278 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1279 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1280 const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(vk, vkDevice));
1281 Move<VkBuffer> buffer;
1282 deUint32 bufferSize = 0u;
1283 de::MovePtr<Allocation> bufferAlloc;
1284 std::vector<VkBufferImageCopy> copyRegions;
1285 std::vector<deUint32> offsetMultiples;
1286
1287 offsetMultiples.push_back(4u);
1288 offsetMultiples.push_back(texFormat.getPixelSize());
1289
1290 if (isImageSizeSupported(imageCreateInfo.imageType, texSize, deviceProperties.limits) == false)
1291 TCU_THROW(NotSupportedError, "Image size not supported for device.");
1292
1293 allocateAndBindSparseImage(vk, vkDevice, physicalDevice, instance, imageCreateInfo, *imageMemoryBindSemaphore, sparseQueue, m_memAlloc, m_allocations, texFormat, sparseImage);
1294
1295 // Calculate buffer size
1296 for (TextureData::const_iterator mit = textureData.begin(); mit != textureData.end(); ++mit)
1297 {
1298 for (TextureLayerData::const_iterator lit = mit->begin(); lit != mit->end(); ++lit)
1299 {
1300 const tcu::ConstPixelBufferAccess& access = *lit;
1301
1302 bufferSize = getNextMultiple(offsetMultiples, bufferSize);
1303 bufferSize += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1304 }
1305 }
1306
1307 {
1308 // Create source buffer
1309 const VkBufferCreateInfo bufferParams =
1310 {
1311 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1312 DE_NULL, // const void* pNext;
1313 0u, // VkBufferCreateFlags flags;
1314 bufferSize, // VkDeviceSize size;
1315 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
1316 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1317 0u, // deUint32 queueFamilyIndexCount;
1318 DE_NULL, // const deUint32* pQueueFamilyIndices;
1319 };
1320
1321 buffer = createBuffer(vk, vkDevice, &bufferParams);
1322 bufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
1323
1324 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
1325 }
1326
1327 // Get copy regions and write buffer data
1328 {
1329 deUint32 layerDataOffset = 0;
1330 deUint8* destPtr = (deUint8*)bufferAlloc->getHostPtr();
1331
1332 for (size_t levelNdx = 0; levelNdx < textureData.size(); levelNdx++)
1333 {
1334 const TextureLayerData& layerData = textureData[levelNdx];
1335
1336 for (size_t layerNdx = 0; layerNdx < layerData.size(); layerNdx++)
1337 {
1338 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1339
1340 const tcu::ConstPixelBufferAccess& access = layerData[layerNdx];
1341 const tcu::PixelBufferAccess destAccess (access.getFormat(), access.getSize(), destPtr + layerDataOffset);
1342
1343 const VkBufferImageCopy layerRegion =
1344 {
1345 layerDataOffset, // VkDeviceSize bufferOffset;
1346 (deUint32)access.getWidth(), // deUint32 bufferRowLength;
1347 (deUint32)access.getHeight(), // deUint32 bufferImageHeight;
1348 { // VkImageSubresourceLayers imageSubresource;
1349 aspectMask, // VkImageAspectFlags aspectMask;
1350 (deUint32)levelNdx, // uint32_t mipLevel;
1351 (deUint32)layerNdx, // uint32_t baseArrayLayer;
1352 1u // uint32_t layerCount;
1353 },
1354 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1355 { // VkExtent3D imageExtent;
1356 (deUint32)access.getWidth(),
1357 (deUint32)access.getHeight(),
1358 (deUint32)access.getDepth()
1359 }
1360 };
1361
1362 copyRegions.push_back(layerRegion);
1363 tcu::copy(destAccess, access);
1364
1365 layerDataOffset += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1366 }
1367 }
1368 }
1369 copyBufferToImage(vk, vkDevice, queue, queueFamilyIndex, *buffer, bufferSize, copyRegions, &(*imageMemoryBindSemaphore), aspectMask, mipLevels, arrayLayers, sparseImage);
1370 }
1371 #endif // CTS_USES_VULKANSC
1372
useSampler(deUint32 bindingLocation,deUint32 textureId)1373 void ShaderRenderCaseInstance::useSampler (deUint32 bindingLocation, deUint32 textureId)
1374 {
1375 DE_ASSERT(textureId < m_textures.size());
1376
1377 const TextureBinding& textureBinding = *m_textures[textureId];
1378 const TextureBinding::Type textureType = textureBinding.getType();
1379 const tcu::Sampler& refSampler = textureBinding.getSampler();
1380 const TextureBinding::Parameters& textureParams = textureBinding.getParameters();
1381 const bool isMSTexture = textureParams.samples != vk::VK_SAMPLE_COUNT_1_BIT;
1382 deUint32 mipLevels = 1u;
1383 deUint32 arrayLayers = 1u;
1384 tcu::TextureFormat texFormat;
1385 tcu::UVec3 texSize;
1386 TextureData textureData;
1387
1388 if (textureType == TextureBinding::TYPE_2D)
1389 {
1390 const tcu::Texture2D& texture = textureBinding.get2D();
1391
1392 texFormat = texture.getFormat();
1393 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), 1u);
1394 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1395 arrayLayers = 1u;
1396
1397 textureData.resize(mipLevels);
1398
1399 for (deUint32 level = 0; level < mipLevels; ++level)
1400 {
1401 if (texture.isLevelEmpty(level))
1402 continue;
1403
1404 textureData[level].push_back(texture.getLevel(level));
1405 }
1406 }
1407 else if (textureType == TextureBinding::TYPE_CUBE_MAP)
1408 {
1409 const tcu::TextureCube& texture = textureBinding.getCube();
1410
1411 texFormat = texture.getFormat();
1412 texSize = tcu::UVec3(texture.getSize(), texture.getSize(), 1u);
1413 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1414 arrayLayers = 6u;
1415
1416 static const tcu::CubeFace cubeFaceMapping[tcu::CUBEFACE_LAST] =
1417 {
1418 tcu::CUBEFACE_POSITIVE_X,
1419 tcu::CUBEFACE_NEGATIVE_X,
1420 tcu::CUBEFACE_POSITIVE_Y,
1421 tcu::CUBEFACE_NEGATIVE_Y,
1422 tcu::CUBEFACE_POSITIVE_Z,
1423 tcu::CUBEFACE_NEGATIVE_Z
1424 };
1425
1426 textureData.resize(mipLevels);
1427
1428 for (deUint32 level = 0; level < mipLevels; ++level)
1429 {
1430 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; ++faceNdx)
1431 {
1432 tcu::CubeFace face = cubeFaceMapping[faceNdx];
1433
1434 if (texture.isLevelEmpty(face, level))
1435 continue;
1436
1437 textureData[level].push_back(texture.getLevelFace(level, face));
1438 }
1439 }
1440 }
1441 else if (textureType == TextureBinding::TYPE_2D_ARRAY)
1442 {
1443 const tcu::Texture2DArray& texture = textureBinding.get2DArray();
1444
1445 texFormat = texture.getFormat();
1446 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), 1u);
1447 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1448 arrayLayers = (deUint32)texture.getNumLayers();
1449
1450 textureData.resize(mipLevels);
1451
1452 for (deUint32 level = 0; level < mipLevels; ++level)
1453 {
1454 if (texture.isLevelEmpty(level))
1455 continue;
1456
1457 const tcu::ConstPixelBufferAccess& levelLayers = texture.getLevel(level);
1458 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1459
1460 for (deUint32 layer = 0; layer < arrayLayers; ++layer)
1461 {
1462 const deUint32 layerOffset = layerSize * layer;
1463 tcu::ConstPixelBufferAccess layerData (levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1464 textureData[level].push_back(layerData);
1465 }
1466 }
1467 }
1468 else if (textureType == TextureBinding::TYPE_3D)
1469 {
1470 const tcu::Texture3D& texture = textureBinding.get3D();
1471
1472 texFormat = texture.getFormat();
1473 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), texture.getDepth());
1474 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1475 arrayLayers = 1u;
1476
1477 textureData.resize(mipLevels);
1478
1479 for (deUint32 level = 0; level < mipLevels; ++level)
1480 {
1481 if (texture.isLevelEmpty(level))
1482 continue;
1483
1484 textureData[level].push_back(texture.getLevel(level));
1485 }
1486 }
1487 else if (textureType == TextureBinding::TYPE_1D)
1488 {
1489 const tcu::Texture1D& texture = textureBinding.get1D();
1490
1491 texFormat = texture.getFormat();
1492 texSize = tcu::UVec3(texture.getWidth(), 1, 1);
1493 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1494 arrayLayers = 1u;
1495
1496 textureData.resize(mipLevels);
1497
1498 for (deUint32 level = 0; level < mipLevels; ++level)
1499 {
1500 if (texture.isLevelEmpty(level))
1501 continue;
1502
1503 textureData[level].push_back(texture.getLevel(level));
1504 }
1505 }
1506 else if (textureType == TextureBinding::TYPE_1D_ARRAY)
1507 {
1508 const tcu::Texture1DArray& texture = textureBinding.get1DArray();
1509
1510 texFormat = texture.getFormat();
1511 texSize = tcu::UVec3(texture.getWidth(), 1, 1);
1512 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1513 arrayLayers = (deUint32)texture.getNumLayers();
1514
1515 textureData.resize(mipLevels);
1516
1517 for (deUint32 level = 0; level < mipLevels; ++level)
1518 {
1519 if (texture.isLevelEmpty(level))
1520 continue;
1521
1522 const tcu::ConstPixelBufferAccess& levelLayers = texture.getLevel(level);
1523 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize();
1524
1525 for (deUint32 layer = 0; layer < arrayLayers; ++layer)
1526 {
1527 const deUint32 layerOffset = layerSize * layer;
1528 tcu::ConstPixelBufferAccess layerData (levelLayers.getFormat(), levelLayers.getWidth(), 1, 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1529 textureData[level].push_back(layerData);
1530 }
1531 }
1532 }
1533 else if (textureType == TextureBinding::TYPE_CUBE_ARRAY)
1534 {
1535 const tcu::TextureCubeArray& texture = textureBinding.getCubeArray();
1536 texFormat = texture.getFormat();
1537 texSize = tcu::UVec3(texture.getSize(), texture.getSize(), 1);
1538 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1539 arrayLayers = texture.getDepth();
1540
1541 textureData.resize(mipLevels);
1542
1543 for (deUint32 level = 0; level < mipLevels; ++level)
1544 {
1545 if (texture.isLevelEmpty(level))
1546 continue;
1547
1548 const tcu::ConstPixelBufferAccess& levelLayers = texture.getLevel(level);
1549 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1550
1551 for (deUint32 layer = 0; layer < arrayLayers; ++layer)
1552 {
1553 const deUint32 layerOffset = layerSize * layer;
1554 tcu::ConstPixelBufferAccess layerData (levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1555 textureData[level].push_back(layerData);
1556 }
1557 }
1558 }
1559 else
1560 {
1561 TCU_THROW(InternalError, "Invalid texture type");
1562 }
1563
1564 createSamplerUniform(bindingLocation, textureType, textureBinding.getParameters().initialization, texFormat, texSize, textureData, refSampler, mipLevels, arrayLayers, textureParams);
1565 }
1566
setPushConstantRanges(const deUint32 rangeCount,const vk::VkPushConstantRange * const pcRanges)1567 void ShaderRenderCaseInstance::setPushConstantRanges (const deUint32 rangeCount, const vk::VkPushConstantRange* const pcRanges)
1568 {
1569 m_pushConstantRanges.clear();
1570 for (deUint32 i = 0; i < rangeCount; ++i)
1571 {
1572 m_pushConstantRanges.push_back(pcRanges[i]);
1573 }
1574 }
1575
updatePushConstants(vk::VkCommandBuffer,vk::VkPipelineLayout)1576 void ShaderRenderCaseInstance::updatePushConstants (vk::VkCommandBuffer, vk::VkPipelineLayout)
1577 {
1578 }
1579
createSamplerUniform(deUint32 bindingLocation,TextureBinding::Type textureType,TextureBinding::Init textureInit,const tcu::TextureFormat & texFormat,const tcu::UVec3 texSize,const TextureData & textureData,const tcu::Sampler & refSampler,deUint32 mipLevels,deUint32 arrayLayers,TextureBinding::Parameters textureParams)1580 void ShaderRenderCaseInstance::createSamplerUniform (deUint32 bindingLocation,
1581 TextureBinding::Type textureType,
1582 TextureBinding::Init textureInit,
1583 const tcu::TextureFormat& texFormat,
1584 const tcu::UVec3 texSize,
1585 const TextureData& textureData,
1586 const tcu::Sampler& refSampler,
1587 deUint32 mipLevels,
1588 deUint32 arrayLayers,
1589 TextureBinding::Parameters textureParams)
1590 {
1591 const VkDevice vkDevice = getDevice();
1592 const DeviceInterface& vk = getDeviceInterface();
1593 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
1594 const deUint32 sparseFamilyIndex = (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE) ? getSparseQueueFamilyIndex() : queueFamilyIndex;
1595
1596 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1597
1598 // when isShadowSampler is true mapSampler utill will set compareEnabled in
1599 // VkSamplerCreateInfo to true and in portability this functionality is under
1600 // feature flag - note that this is safety check as this is known at the
1601 // TestCase level and NotSupportedError should be thrown from checkSupport
1602 #ifndef CTS_USES_VULKANSC
1603 if (isShadowSampler &&
1604 m_context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
1605 !m_context.getPortabilitySubsetFeatures().mutableComparisonSamplers)
1606 {
1607 DE_FATAL("mutableComparisonSamplers support should be checked in checkSupport");
1608 }
1609 #endif // CTS_USES_VULKANSC
1610
1611 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1612 const VkImageViewType imageViewType = textureTypeToImageViewType(textureType);
1613 const VkImageType imageType = viewTypeToImageType(imageViewType);
1614 const VkSharingMode sharingMode = (queueFamilyIndex != sparseFamilyIndex) ? VK_SHARING_MODE_CONCURRENT : VK_SHARING_MODE_EXCLUSIVE;
1615 const VkFormat format = mapTextureFormat(texFormat);
1616 const VkImageUsageFlags imageUsageFlags = textureUsageFlags();
1617 const VkImageCreateFlags imageCreateFlags = textureCreateFlags(imageViewType, m_imageBackingMode);
1618
1619 const deUint32 queueIndexCount = (queueFamilyIndex != sparseFamilyIndex) ? 2 : 1;
1620 const deUint32 queueIndices[] =
1621 {
1622 queueFamilyIndex,
1623 sparseFamilyIndex
1624 };
1625
1626 Move<VkImage> vkTexture;
1627 de::MovePtr<Allocation> allocation;
1628
1629 // Create image
1630 const VkImageCreateInfo imageParams =
1631 {
1632 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1633 DE_NULL, // const void* pNext;
1634 imageCreateFlags, // VkImageCreateFlags flags;
1635 imageType, // VkImageType imageType;
1636 format, // VkFormat format;
1637 { // VkExtent3D extent;
1638 texSize.x(),
1639 texSize.y(),
1640 texSize.z()
1641 },
1642 mipLevels, // deUint32 mipLevels;
1643 arrayLayers, // deUint32 arrayLayers;
1644 textureParams.samples, // VkSampleCountFlagBits samples;
1645 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1646 imageUsageFlags, // VkImageUsageFlags usage;
1647 sharingMode, // VkSharingMode sharingMode;
1648 queueIndexCount, // deUint32 queueFamilyIndexCount;
1649 queueIndices, // const deUint32* pQueueFamilyIndices;
1650 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
1651 };
1652
1653 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
1654 {
1655 checkSparseSupport(imageParams);
1656 }
1657
1658 vkTexture = createImage(vk, vkDevice, &imageParams);
1659 allocation = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *vkTexture), MemoryRequirement::Any);
1660
1661 if (m_imageBackingMode != IMAGE_BACKING_MODE_SPARSE)
1662 {
1663 VK_CHECK(vk.bindImageMemory(vkDevice, *vkTexture, allocation->getMemory(), allocation->getOffset()));
1664 }
1665
1666 switch (textureInit)
1667 {
1668 case TextureBinding::INIT_UPLOAD_DATA:
1669 {
1670 // upload*Image functions use cmdCopyBufferToImage, which is invalid for multisample images
1671 DE_ASSERT(textureParams.samples == VK_SAMPLE_COUNT_1_BIT);
1672
1673 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
1674 {
1675 #ifndef CTS_USES_VULKANSC
1676 uploadSparseImage(texFormat, textureData, refSampler, mipLevels, arrayLayers, *vkTexture, imageParams, texSize);
1677 #endif // CTS_USES_VULKANSC
1678 }
1679 else
1680 {
1681 // Upload texture data
1682 uploadImage(texFormat, textureData, refSampler, mipLevels, arrayLayers, *vkTexture);
1683 }
1684 break;
1685 }
1686 case TextureBinding::INIT_CLEAR:
1687 clearImage(refSampler, mipLevels, arrayLayers, *vkTexture);
1688 break;
1689 default:
1690 DE_FATAL("Impossible");
1691 }
1692
1693 // Create sampler
1694 const auto& minMaxLod = textureParams.minMaxLod;
1695 const VkSamplerCreateInfo samplerParams = (minMaxLod
1696 ? mapSampler(refSampler, texFormat, minMaxLod.get().minLod, minMaxLod.get().maxLod)
1697 : mapSampler(refSampler, texFormat));
1698 Move<VkSampler> sampler = createSampler(vk, vkDevice, &samplerParams);
1699 const deUint32 baseMipLevel = textureParams.baseMipLevel;
1700 const vk::VkComponentMapping components = textureParams.componentMapping;
1701 const VkImageViewCreateInfo viewParams =
1702 {
1703 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
1704 NULL, // const voide* pNext;
1705 0u, // VkImageViewCreateFlags flags;
1706 *vkTexture, // VkImage image;
1707 imageViewType, // VkImageViewType viewType;
1708 format, // VkFormat format;
1709 components, // VkChannelMapping channels;
1710 {
1711 aspectMask, // VkImageAspectFlags aspectMask;
1712 baseMipLevel, // deUint32 baseMipLevel;
1713 mipLevels - baseMipLevel, // deUint32 mipLevels;
1714 0, // deUint32 baseArraySlice;
1715 arrayLayers // deUint32 arraySize;
1716 }, // VkImageSubresourceRange subresourceRange;
1717 };
1718
1719 Move<VkImageView> imageView = createImageView(vk, vkDevice, &viewParams);
1720
1721 const vk::VkDescriptorImageInfo descriptor =
1722 {
1723 sampler.get(), // VkSampler sampler;
1724 imageView.get(), // VkImageView imageView;
1725 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout imageLayout;
1726 };
1727
1728 de::MovePtr<SamplerUniform> uniform(new SamplerUniform());
1729 uniform->type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1730 uniform->descriptor = descriptor;
1731 uniform->location = bindingLocation;
1732 uniform->image = VkImageSp(new vk::Unique<VkImage>(vkTexture));
1733 uniform->imageView = VkImageViewSp(new vk::Unique<VkImageView>(imageView));
1734 uniform->sampler = VkSamplerSp(new vk::Unique<VkSampler>(sampler));
1735 uniform->alloc = AllocationSp(allocation.release());
1736
1737 m_descriptorSetLayoutBuilder->addSingleSamplerBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, vk::VK_SHADER_STAGE_ALL, DE_NULL);
1738 m_descriptorPoolBuilder->addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
1739
1740 m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(uniform)));
1741 }
1742
setupDefaultInputs(void)1743 void ShaderRenderCaseInstance::setupDefaultInputs (void)
1744 {
1745 /* Configuration of the vertex input attributes:
1746 a_position is at location 0
1747 a_coords is at location 1
1748 a_unitCoords is at location 2
1749 a_one is at location 3
1750
1751 User attributes starts from at the location 4.
1752 */
1753
1754 DE_ASSERT(m_quadGrid);
1755 const QuadGrid& quadGrid = *m_quadGrid;
1756
1757 addAttribute(0u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getPositions());
1758 addAttribute(1u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getCoords());
1759 addAttribute(2u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getUnitCoords());
1760 addAttribute(3u, VK_FORMAT_R32_SFLOAT, sizeof(float), quadGrid.getNumVertices(), quadGrid.getAttribOne());
1761
1762 static const struct
1763 {
1764 BaseAttributeType type;
1765 int userNdx;
1766 } userAttributes[] =
1767 {
1768 { A_IN0, 0 },
1769 { A_IN1, 1 },
1770 { A_IN2, 2 },
1771 { A_IN3, 3 }
1772 };
1773
1774 static const struct
1775 {
1776 BaseAttributeType matrixType;
1777 int numCols;
1778 int numRows;
1779 } matrices[] =
1780 {
1781 { MAT2, 2, 2 },
1782 { MAT2x3, 2, 3 },
1783 { MAT2x4, 2, 4 },
1784 { MAT3x2, 3, 2 },
1785 { MAT3, 3, 3 },
1786 { MAT3x4, 3, 4 },
1787 { MAT4x2, 4, 2 },
1788 { MAT4x3, 4, 3 },
1789 { MAT4, 4, 4 }
1790 };
1791
1792 for (size_t attrNdx = 0; attrNdx < m_enabledBaseAttributes.size(); attrNdx++)
1793 {
1794 for (int userNdx = 0; userNdx < DE_LENGTH_OF_ARRAY(userAttributes); userNdx++)
1795 {
1796 if (userAttributes[userNdx].type != m_enabledBaseAttributes[attrNdx].type)
1797 continue;
1798
1799 addAttribute(m_enabledBaseAttributes[attrNdx].location, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getUserAttrib(userNdx));
1800 }
1801
1802 for (int matNdx = 0; matNdx < DE_LENGTH_OF_ARRAY(matrices); matNdx++)
1803 {
1804
1805 if (matrices[matNdx].matrixType != m_enabledBaseAttributes[attrNdx].type)
1806 continue;
1807
1808 const int numCols = matrices[matNdx].numCols;
1809
1810 for (int colNdx = 0; colNdx < numCols; colNdx++)
1811 {
1812 addAttribute(m_enabledBaseAttributes[attrNdx].location + colNdx, VK_FORMAT_R32G32B32A32_SFLOAT, (deUint32)(4 * sizeof(float)), quadGrid.getNumVertices(), quadGrid.getUserAttrib(colNdx));
1813 }
1814 }
1815 }
1816 }
1817
render(deUint32 numVertices,deUint32 numTriangles,const deUint16 * indices,const tcu::Vec4 & constCoords)1818 void ShaderRenderCaseInstance::render (deUint32 numVertices,
1819 deUint32 numTriangles,
1820 const deUint16* indices,
1821 const tcu::Vec4& constCoords)
1822 {
1823 render(numVertices, numTriangles * 3, indices, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, constCoords);
1824 }
1825
render(deUint32 numVertices,deUint32 numIndices,const deUint16 * indices,VkPrimitiveTopology topology,const tcu::Vec4 & constCoords)1826 void ShaderRenderCaseInstance::render (deUint32 numVertices,
1827 deUint32 numIndices,
1828 const deUint16* indices,
1829 VkPrimitiveTopology topology,
1830 const tcu::Vec4& constCoords)
1831 {
1832 const VkDevice vkDevice = getDevice();
1833 const DeviceInterface& vk = getDeviceInterface();
1834 const VkQueue queue = getUniversalQueue();
1835 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
1836
1837 vk::Move<vk::VkImage> colorImage;
1838 de::MovePtr<vk::Allocation> colorImageAlloc;
1839 vk::Move<vk::VkImageView> colorImageView;
1840 vk::Move<vk::VkImage> resolvedImage;
1841 de::MovePtr<vk::Allocation> resolvedImageAlloc;
1842 vk::Move<vk::VkImageView> resolvedImageView;
1843 vk::Move<vk::VkRenderPass> renderPass;
1844 vk::Move<vk::VkFramebuffer> framebuffer;
1845 vk::Move<vk::VkPipelineLayout> pipelineLayout;
1846 vk::Move<vk::VkPipeline> graphicsPipeline;
1847 vk::Move<vk::VkShaderModule> vertexShaderModule;
1848 vk::Move<vk::VkShaderModule> fragmentShaderModule;
1849 vk::Move<vk::VkBuffer> indexBuffer;
1850 de::MovePtr<vk::Allocation> indexBufferAlloc;
1851 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
1852 vk::Move<vk::VkDescriptorPool> descriptorPool;
1853 vk::Move<vk::VkDescriptorSet> descriptorSet;
1854 vk::Move<vk::VkCommandPool> cmdPool;
1855 vk::Move<vk::VkCommandBuffer> cmdBuffer;
1856
1857 // Create color image
1858 {
1859 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1860 VkImageFormatProperties properties;
1861
1862 if ((getInstanceInterface().getPhysicalDeviceImageFormatProperties(getPhysicalDevice(),
1863 m_colorFormat,
1864 VK_IMAGE_TYPE_2D,
1865 VK_IMAGE_TILING_OPTIMAL,
1866 imageUsage,
1867 0u,
1868 &properties) == VK_ERROR_FORMAT_NOT_SUPPORTED))
1869 {
1870 TCU_THROW(NotSupportedError, "Format not supported");
1871 }
1872
1873 if ((properties.sampleCounts & m_sampleCount) != m_sampleCount)
1874 {
1875 TCU_THROW(NotSupportedError, "Format not supported");
1876 }
1877
1878 const VkImageCreateInfo colorImageParams =
1879 {
1880 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1881 DE_NULL, // const void* pNext;
1882 0u, // VkImageCreateFlags flags;
1883 VK_IMAGE_TYPE_2D, // VkImageType imageType;
1884 m_colorFormat, // VkFormat format;
1885 { m_renderSize.x(), m_renderSize.y(), 1u }, // VkExtent3D extent;
1886 1u, // deUint32 mipLevels;
1887 1u, // deUint32 arraySize;
1888 m_sampleCount, // deUint32 samples;
1889 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1890 imageUsage, // VkImageUsageFlags usage;
1891 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1892 1u, // deUint32 queueFamilyCount;
1893 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
1894 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1895 };
1896
1897 colorImage = createImage(vk, vkDevice, &colorImageParams);
1898
1899 // Allocate and bind color image memory
1900 colorImageAlloc = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *colorImage), MemoryRequirement::Any);
1901 VK_CHECK(vk.bindImageMemory(vkDevice, *colorImage, colorImageAlloc->getMemory(), colorImageAlloc->getOffset()));
1902 }
1903
1904 // Create color attachment view
1905 {
1906 const VkImageViewCreateInfo colorImageViewParams =
1907 {
1908 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
1909 DE_NULL, // const void* pNext;
1910 0u, // VkImageViewCreateFlags flags;
1911 *colorImage, // VkImage image;
1912 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
1913 m_colorFormat, // VkFormat format;
1914 {
1915 VK_COMPONENT_SWIZZLE_R, // VkChannelSwizzle r;
1916 VK_COMPONENT_SWIZZLE_G, // VkChannelSwizzle g;
1917 VK_COMPONENT_SWIZZLE_B, // VkChannelSwizzle b;
1918 VK_COMPONENT_SWIZZLE_A // VkChannelSwizzle a;
1919 }, // VkChannelMapping channels;
1920 {
1921 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1922 0, // deUint32 baseMipLevel;
1923 1, // deUint32 mipLevels;
1924 0, // deUint32 baseArraySlice;
1925 1 // deUint32 arraySize;
1926 }, // VkImageSubresourceRange subresourceRange;
1927 };
1928
1929 colorImageView = createImageView(vk, vkDevice, &colorImageViewParams);
1930 }
1931
1932 if (isMultiSampling())
1933 {
1934 // Resolved Image
1935 {
1936 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1937 VkImageFormatProperties properties;
1938
1939 if ((getInstanceInterface().getPhysicalDeviceImageFormatProperties(getPhysicalDevice(),
1940 m_colorFormat,
1941 VK_IMAGE_TYPE_2D,
1942 VK_IMAGE_TILING_OPTIMAL,
1943 imageUsage,
1944 0,
1945 &properties) == VK_ERROR_FORMAT_NOT_SUPPORTED))
1946 {
1947 TCU_THROW(NotSupportedError, "Format not supported");
1948 }
1949
1950 const VkImageCreateInfo imageCreateInfo =
1951 {
1952 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1953 DE_NULL, // const void* pNext;
1954 0u, // VkImageCreateFlags flags;
1955 VK_IMAGE_TYPE_2D, // VkImageType imageType;
1956 m_colorFormat, // VkFormat format;
1957 { m_renderSize.x(), m_renderSize.y(), 1u }, // VkExtent3D extent;
1958 1u, // deUint32 mipLevels;
1959 1u, // deUint32 arrayLayers;
1960 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1961 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1962 imageUsage, // VkImageUsageFlags usage;
1963 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1964 1u, // deUint32 queueFamilyIndexCount;
1965 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
1966 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
1967 };
1968
1969 resolvedImage = vk::createImage(vk, vkDevice, &imageCreateInfo, DE_NULL);
1970 resolvedImageAlloc = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *resolvedImage), MemoryRequirement::Any);
1971 VK_CHECK(vk.bindImageMemory(vkDevice, *resolvedImage, resolvedImageAlloc->getMemory(), resolvedImageAlloc->getOffset()));
1972 }
1973
1974 // Resolved Image View
1975 {
1976 const VkImageViewCreateInfo imageViewCreateInfo =
1977 {
1978 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
1979 DE_NULL, // const void* pNext;
1980 0u, // VkImageViewCreateFlags flags;
1981 *resolvedImage, // VkImage image;
1982 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
1983 m_colorFormat, // VkFormat format;
1984 {
1985 VK_COMPONENT_SWIZZLE_R, // VkChannelSwizzle r;
1986 VK_COMPONENT_SWIZZLE_G, // VkChannelSwizzle g;
1987 VK_COMPONENT_SWIZZLE_B, // VkChannelSwizzle b;
1988 VK_COMPONENT_SWIZZLE_A // VkChannelSwizzle a;
1989 },
1990 {
1991 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1992 0u, // deUint32 baseMipLevel;
1993 1u, // deUint32 mipLevels;
1994 0u, // deUint32 baseArrayLayer;
1995 1u, // deUint32 arraySize;
1996 }, // VkImageSubresourceRange subresourceRange;
1997 };
1998
1999 resolvedImageView = vk::createImageView(vk, vkDevice, &imageViewCreateInfo, DE_NULL);
2000 }
2001 }
2002
2003 // Create render pass
2004 {
2005 const VkAttachmentDescription attachmentDescription[] =
2006 {
2007 {
2008 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
2009 m_colorFormat, // VkFormat format;
2010 m_sampleCount, // deUint32 samples;
2011 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
2012 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
2013 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
2014 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
2015 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
2016 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
2017 },
2018 {
2019 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
2020 m_colorFormat, // VkFormat format;
2021 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2022 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp loadOp;
2023 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
2024 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
2025 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
2026 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
2027 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
2028 }
2029 };
2030
2031 const VkAttachmentReference attachmentReference =
2032 {
2033 0u, // deUint32 attachment;
2034 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
2035 };
2036
2037 const VkAttachmentReference resolveAttachmentRef =
2038 {
2039 1u, // deUint32 attachment;
2040 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
2041 };
2042
2043 const VkSubpassDescription subpassDescription =
2044 {
2045 0u, // VkSubpassDescriptionFlags flags;
2046 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
2047 0u, // deUint32 inputCount;
2048 DE_NULL, // constVkAttachmentReference* pInputAttachments;
2049 1u, // deUint32 colorCount;
2050 &attachmentReference, // constVkAttachmentReference* pColorAttachments;
2051 isMultiSampling() ? &resolveAttachmentRef : DE_NULL,// constVkAttachmentReference* pResolveAttachments;
2052 DE_NULL, // VkAttachmentReference depthStencilAttachment;
2053 0u, // deUint32 preserveCount;
2054 DE_NULL // constVkAttachmentReference* pPreserveAttachments;
2055 };
2056
2057 const VkRenderPassCreateInfo renderPassParams =
2058 {
2059 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
2060 DE_NULL, // const void* pNext;
2061 0u, // VkRenderPassCreateFlags flags;
2062 isMultiSampling() ? 2u : 1u, // deUint32 attachmentCount;
2063 attachmentDescription, // const VkAttachmentDescription* pAttachments;
2064 1u, // deUint32 subpassCount;
2065 &subpassDescription, // const VkSubpassDescription* pSubpasses;
2066 0u, // deUint32 dependencyCount;
2067 DE_NULL // const VkSubpassDependency* pDependencies;
2068 };
2069
2070 renderPass = createRenderPass(vk, vkDevice, &renderPassParams);
2071 }
2072
2073 // Create framebuffer
2074 {
2075 const VkImageView attachments[] =
2076 {
2077 *colorImageView,
2078 *resolvedImageView
2079 };
2080
2081 const VkFramebufferCreateInfo framebufferParams =
2082 {
2083 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
2084 DE_NULL, // const void* pNext;
2085 (VkFramebufferCreateFlags)0,
2086 *renderPass, // VkRenderPass renderPass;
2087 isMultiSampling() ? 2u : 1u, // deUint32 attachmentCount;
2088 attachments, // const VkImageView* pAttachments;
2089 (deUint32)m_renderSize.x(), // deUint32 width;
2090 (deUint32)m_renderSize.y(), // deUint32 height;
2091 1u // deUint32 layers;
2092 };
2093
2094 framebuffer = createFramebuffer(vk, vkDevice, &framebufferParams);
2095 }
2096
2097 // Create descriptors
2098 {
2099 setupUniforms(constCoords);
2100
2101 descriptorSetLayout = m_descriptorSetLayoutBuilder->build(vk, vkDevice);
2102 if (!m_uniformInfos.empty())
2103 {
2104 descriptorPool = m_descriptorPoolBuilder->build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
2105 const VkDescriptorSetAllocateInfo allocInfo =
2106 {
2107 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
2108 DE_NULL,
2109 *descriptorPool,
2110 1u,
2111 &descriptorSetLayout.get(),
2112 };
2113
2114 descriptorSet = allocateDescriptorSet(vk, vkDevice, &allocInfo);
2115 }
2116
2117 for (deUint32 i = 0; i < m_uniformInfos.size(); i++)
2118 {
2119 const UniformInfo* uniformInfo = m_uniformInfos[i].get()->get();
2120 deUint32 location = uniformInfo->location;
2121
2122 if (uniformInfo->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
2123 {
2124 const BufferUniform* bufferInfo = dynamic_cast<const BufferUniform*>(uniformInfo);
2125
2126 m_descriptorSetUpdateBuilder->writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(location), uniformInfo->type, &bufferInfo->descriptor);
2127 }
2128 else if (uniformInfo->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2129 {
2130 const SamplerUniform* samplerInfo = dynamic_cast<const SamplerUniform*>(uniformInfo);
2131
2132 m_descriptorSetUpdateBuilder->writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(location), uniformInfo->type, &samplerInfo->descriptor);
2133 }
2134 else
2135 DE_FATAL("Impossible");
2136 }
2137
2138 m_descriptorSetUpdateBuilder->update(vk, vkDevice);
2139 }
2140
2141 // Create pipeline layout
2142 {
2143 const VkPushConstantRange* const pcRanges = m_pushConstantRanges.empty() ? DE_NULL : &m_pushConstantRanges[0];
2144 const VkPipelineLayoutCreateInfo pipelineLayoutParams =
2145 {
2146 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
2147 DE_NULL, // const void* pNext;
2148 (VkPipelineLayoutCreateFlags)0,
2149 1u, // deUint32 descriptorSetCount;
2150 &*descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
2151 deUint32(m_pushConstantRanges.size()), // deUint32 pushConstantRangeCount;
2152 pcRanges // const VkPushConstantRange* pPushConstantRanges;
2153 };
2154
2155 pipelineLayout = createPipelineLayout(vk, vkDevice, &pipelineLayoutParams);
2156 }
2157
2158 // Create shaders
2159 {
2160 vertexShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get(m_vertexShaderName), 0);
2161 fragmentShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get(m_fragmentShaderName), 0);
2162 }
2163
2164 // Create pipeline
2165 {
2166 // Add test case specific attributes
2167 if (m_attribFunc)
2168 m_attribFunc(*this, numVertices);
2169
2170 // Add base attributes
2171 setupDefaultInputs();
2172
2173 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
2174 {
2175 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
2176 DE_NULL, // const void* pNext;
2177 (VkPipelineVertexInputStateCreateFlags)0,
2178 (deUint32)m_vertexBindingDescription.size(), // deUint32 bindingCount;
2179 &m_vertexBindingDescription[0], // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
2180 (deUint32)m_vertexAttributeDescription.size(), // deUint32 attributeCount;
2181 &m_vertexAttributeDescription[0], // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
2182 };
2183
2184 const std::vector<VkViewport> viewports (1, makeViewport(m_renderSize));
2185 const std::vector<VkRect2D> scissors (1, makeRect2D(m_renderSize));
2186
2187 const VkPipelineMultisampleStateCreateInfo multisampleStateParams =
2188 {
2189 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
2190 DE_NULL, // const void* pNext;
2191 0u, // VkPipelineMultisampleStateCreateFlags flags;
2192 m_sampleCount, // VkSampleCountFlagBits rasterizationSamples;
2193 VK_FALSE, // VkBool32 sampleShadingEnable;
2194 0.0f, // float minSampleShading;
2195 DE_NULL, // const VkSampleMask* pSampleMask;
2196 VK_FALSE, // VkBool32 alphaToCoverageEnable;
2197 VK_FALSE // VkBool32 alphaToOneEnable;
2198 };
2199
2200 graphicsPipeline = makeGraphicsPipeline(vk, // const DeviceInterface& vk
2201 vkDevice, // const VkDevice device
2202 *pipelineLayout, // const VkPipelineLayout pipelineLayout
2203 *vertexShaderModule, // const VkShaderModule vertexShaderModule
2204 DE_NULL, // const VkShaderModule tessellationControlShaderModule
2205 DE_NULL, // const VkShaderModule tessellationEvalShaderModule
2206 DE_NULL, // const VkShaderModule geometryShaderModule
2207 *fragmentShaderModule, // const VkShaderModule fragmentShaderModule
2208 *renderPass, // const VkRenderPass renderPass
2209 viewports, // const std::vector<VkViewport>& viewports
2210 scissors, // const std::vector<VkRect2D>& scissors
2211 topology, // const VkPrimitiveTopology topology
2212 0u, // const deUint32 subpass
2213 0u, // const deUint32 patchControlPoints
2214 &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
2215 DE_NULL, // const VkPipelineRasterizationStateCreateInfo* rasterizationStateCreateInfo
2216 &multisampleStateParams); // const VkPipelineMultisampleStateCreateInfo* multisampleStateCreateInfo
2217 }
2218
2219 // Create vertex indices buffer
2220 if (numIndices != 0)
2221 {
2222 const VkDeviceSize indexBufferSize = numIndices * sizeof(deUint16);
2223 const VkBufferCreateInfo indexBufferParams =
2224 {
2225 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
2226 DE_NULL, // const void* pNext;
2227 0u, // VkBufferCreateFlags flags;
2228 indexBufferSize, // VkDeviceSize size;
2229 VK_BUFFER_USAGE_INDEX_BUFFER_BIT, // VkBufferUsageFlags usage;
2230 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2231 1u, // deUint32 queueFamilyCount;
2232 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
2233 };
2234
2235 indexBuffer = createBuffer(vk, vkDevice, &indexBufferParams);
2236 indexBufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *indexBuffer), MemoryRequirement::HostVisible);
2237
2238 VK_CHECK(vk.bindBufferMemory(vkDevice, *indexBuffer, indexBufferAlloc->getMemory(), indexBufferAlloc->getOffset()));
2239
2240 // Load vertice indices into buffer
2241 deMemcpy(indexBufferAlloc->getHostPtr(), indices, (size_t)indexBufferSize);
2242 flushAlloc(vk, vkDevice, *indexBufferAlloc);
2243 }
2244
2245 VkCommandPool activeCmdPool;
2246 if (m_externalCommandPool.get() == DE_NULL)
2247 {
2248 // Create local command pool
2249 cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
2250 activeCmdPool = *cmdPool;
2251 }
2252 else
2253 {
2254 // Use external command pool if available
2255 activeCmdPool = m_externalCommandPool.get()->get();
2256 }
2257
2258 // Create command buffer
2259 {
2260 cmdBuffer = allocateCommandBuffer(vk, vkDevice, activeCmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2261
2262 beginCommandBuffer(vk, *cmdBuffer);
2263
2264 {
2265 const VkImageMemoryBarrier imageBarrier =
2266 {
2267 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2268 DE_NULL, // const void* pNext;
2269 0u, // VkAccessFlags srcAccessMask;
2270 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
2271 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
2272 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
2273 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
2274 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
2275 *colorImage, // VkImage image;
2276 { // VkImageSubresourceRange subresourceRange;
2277 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2278 0u, // deUint32 baseMipLevel;
2279 1u, // deUint32 mipLevels;
2280 0u, // deUint32 baseArrayLayer;
2281 1u, // deUint32 arraySize;
2282 }
2283 };
2284
2285 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, DE_NULL, 1, &imageBarrier);
2286
2287 if (isMultiSampling()) {
2288 // add multisample barrier
2289 const VkImageMemoryBarrier multiSampleImageBarrier =
2290 {
2291 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2292 DE_NULL, // const void* pNext;
2293 0u, // VkAccessFlags srcAccessMask;
2294 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
2295 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
2296 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
2297 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
2298 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
2299 *resolvedImage, // VkImage image;
2300 { // VkImageSubresourceRange subresourceRange;
2301 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2302 0u, // deUint32 baseMipLevel;
2303 1u, // deUint32 mipLevels;
2304 0u, // deUint32 baseArrayLayer;
2305 1u, // deUint32 arraySize;
2306 }
2307 };
2308
2309 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, DE_NULL, 1, &multiSampleImageBarrier);
2310 }
2311 }
2312
2313 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(0, 0, m_renderSize.x(), m_renderSize.y()), m_clearColor);
2314
2315 updatePushConstants(*cmdBuffer, *pipelineLayout);
2316 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
2317 if (!m_uniformInfos.empty())
2318 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1, &*descriptorSet, 0u, DE_NULL);
2319
2320 const deUint32 numberOfVertexAttributes = (deUint32)m_vertexBuffers.size();
2321 const std::vector<VkDeviceSize> offsets(numberOfVertexAttributes, 0);
2322
2323 std::vector<VkBuffer> buffers(numberOfVertexAttributes);
2324 for (size_t i = 0; i < numberOfVertexAttributes; i++)
2325 {
2326 buffers[i] = m_vertexBuffers[i].get()->get();
2327 }
2328
2329 vk.cmdBindVertexBuffers(*cmdBuffer, 0, numberOfVertexAttributes, &buffers[0], &offsets[0]);
2330 if (numIndices != 0)
2331 {
2332 vk.cmdBindIndexBuffer(*cmdBuffer, *indexBuffer, 0, VK_INDEX_TYPE_UINT16);
2333 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1, 0, 0, 0);
2334 }
2335 else
2336 vk.cmdDraw(*cmdBuffer, numVertices, 1, 0, 0);
2337
2338 endRenderPass(vk, *cmdBuffer);
2339 endCommandBuffer(vk, *cmdBuffer);
2340 }
2341
2342 // Execute Draw
2343 submitCommandsAndWait(vk, vkDevice, queue, cmdBuffer.get());
2344
2345 // Read back the result
2346 {
2347 const tcu::TextureFormat resultFormat = mapVkFormat(m_colorFormat);
2348 const VkDeviceSize imageSizeBytes = (VkDeviceSize)(resultFormat.getPixelSize() * m_renderSize.x() * m_renderSize.y());
2349 const VkBufferCreateInfo readImageBufferParams =
2350 {
2351 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
2352 DE_NULL, // const void* pNext;
2353 0u, // VkBufferCreateFlags flags;
2354 imageSizeBytes, // VkDeviceSize size;
2355 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // VkBufferUsageFlags usage;
2356 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2357 1u, // deUint32 queueFamilyCount;
2358 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
2359 };
2360 const Unique<VkBuffer> readImageBuffer (createBuffer(vk, vkDevice, &readImageBufferParams));
2361 const de::UniquePtr<Allocation> readImageBufferMemory (m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *readImageBuffer), MemoryRequirement::HostVisible));
2362
2363 VK_CHECK(vk.bindBufferMemory(vkDevice, *readImageBuffer, readImageBufferMemory->getMemory(), readImageBufferMemory->getOffset()));
2364
2365 // Copy image to buffer
2366 const Move<VkCommandBuffer> resultCmdBuffer = allocateCommandBuffer(vk, vkDevice, activeCmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2367
2368 beginCommandBuffer(vk, *resultCmdBuffer);
2369
2370 copyImageToBuffer(vk, *resultCmdBuffer, isMultiSampling() ? *resolvedImage : *colorImage, *readImageBuffer, tcu::IVec2(m_renderSize.x(), m_renderSize.y()));
2371
2372 endCommandBuffer(vk, *resultCmdBuffer);
2373
2374 submitCommandsAndWait(vk, vkDevice, queue, resultCmdBuffer.get());
2375
2376 invalidateAlloc(vk, vkDevice, *readImageBufferMemory);
2377
2378 const tcu::ConstPixelBufferAccess resultAccess (resultFormat, m_renderSize.x(), m_renderSize.y(), 1, readImageBufferMemory->getHostPtr());
2379
2380 m_resultImage.setStorage(resultFormat, m_renderSize.x(), m_renderSize.y());
2381 tcu::copy(m_resultImage.getAccess(), resultAccess);
2382 }
2383 }
2384
computeVertexReference(tcu::Surface & result,const QuadGrid & quadGrid)2385 void ShaderRenderCaseInstance::computeVertexReference (tcu::Surface& result, const QuadGrid& quadGrid)
2386 {
2387 DE_ASSERT(m_evaluator);
2388
2389 // Buffer info.
2390 const int width = result.getWidth();
2391 const int height = result.getHeight();
2392 const int gridSize = quadGrid.getGridSize();
2393 const int stride = gridSize + 1;
2394 const bool hasAlpha = true; // \todo [2015-09-07 elecro] add correct alpha check
2395 ShaderEvalContext evalCtx (quadGrid);
2396
2397 // Evaluate color for each vertex.
2398 std::vector<tcu::Vec4> colors ((gridSize + 1) * (gridSize + 1));
2399 for (int y = 0; y < gridSize+1; y++)
2400 for (int x = 0; x < gridSize+1; x++)
2401 {
2402 const float sx = (float)x / (float)gridSize;
2403 const float sy = (float)y / (float)gridSize;
2404 const int vtxNdx = ((y * (gridSize+1)) + x);
2405
2406 evalCtx.reset(sx, sy);
2407 m_evaluator->evaluate(evalCtx);
2408 DE_ASSERT(!evalCtx.isDiscarded); // Discard is not available in vertex shader.
2409 tcu::Vec4 color = evalCtx.color;
2410
2411 if (!hasAlpha)
2412 color.w() = 1.0f;
2413
2414 colors[vtxNdx] = color;
2415 }
2416
2417 // Render quads.
2418 for (int y = 0; y < gridSize; y++)
2419 for (int x = 0; x < gridSize; x++)
2420 {
2421 const float x0 = (float)x / (float)gridSize;
2422 const float x1 = (float)(x + 1) / (float)gridSize;
2423 const float y0 = (float)y / (float)gridSize;
2424 const float y1 = (float)(y + 1) / (float)gridSize;
2425
2426 const float sx0 = x0 * (float)width;
2427 const float sx1 = x1 * (float)width;
2428 const float sy0 = y0 * (float)height;
2429 const float sy1 = y1 * (float)height;
2430 const float oosx = 1.0f / (sx1 - sx0);
2431 const float oosy = 1.0f / (sy1 - sy0);
2432
2433 const int ix0 = deCeilFloatToInt32(sx0 - 0.5f);
2434 const int ix1 = deCeilFloatToInt32(sx1 - 0.5f);
2435 const int iy0 = deCeilFloatToInt32(sy0 - 0.5f);
2436 const int iy1 = deCeilFloatToInt32(sy1 - 0.5f);
2437
2438 const int v00 = (y * stride) + x;
2439 const int v01 = (y * stride) + x + 1;
2440 const int v10 = ((y + 1) * stride) + x;
2441 const int v11 = ((y + 1) * stride) + x + 1;
2442 const tcu::Vec4 c00 = colors[v00];
2443 const tcu::Vec4 c01 = colors[v01];
2444 const tcu::Vec4 c10 = colors[v10];
2445 const tcu::Vec4 c11 = colors[v11];
2446
2447 //printf("(%d,%d) -> (%f..%f, %f..%f) (%d..%d, %d..%d)\n", x, y, sx0, sx1, sy0, sy1, ix0, ix1, iy0, iy1);
2448
2449 for (int iy = iy0; iy < iy1; iy++)
2450 for (int ix = ix0; ix < ix1; ix++)
2451 {
2452 DE_ASSERT(deInBounds32(ix, 0, width));
2453 DE_ASSERT(deInBounds32(iy, 0, height));
2454
2455 const float sfx = (float)ix + 0.5f;
2456 const float sfy = (float)iy + 0.5f;
2457 const float fx1 = deFloatClamp((sfx - sx0) * oosx, 0.0f, 1.0f);
2458 const float fy1 = deFloatClamp((sfy - sy0) * oosy, 0.0f, 1.0f);
2459
2460 // Triangle quad interpolation.
2461 const bool tri = fx1 + fy1 <= 1.0f;
2462 const float tx = tri ? fx1 : (1.0f-fx1);
2463 const float ty = tri ? fy1 : (1.0f-fy1);
2464 const tcu::Vec4& t0 = tri ? c00 : c11;
2465 const tcu::Vec4& t1 = tri ? c01 : c10;
2466 const tcu::Vec4& t2 = tri ? c10 : c01;
2467 const tcu::Vec4 color = t0 + (t1-t0)*tx + (t2-t0)*ty;
2468
2469 result.setPixel(ix, iy, tcu::RGBA(color));
2470 }
2471 }
2472 }
2473
computeFragmentReference(tcu::Surface & result,const QuadGrid & quadGrid)2474 void ShaderRenderCaseInstance::computeFragmentReference (tcu::Surface& result, const QuadGrid& quadGrid)
2475 {
2476 DE_ASSERT(m_evaluator);
2477
2478 // Buffer info.
2479 const int width = result.getWidth();
2480 const int height = result.getHeight();
2481 const bool hasAlpha = true; // \todo [2015-09-07 elecro] add correct alpha check
2482 ShaderEvalContext evalCtx (quadGrid);
2483
2484 // Render.
2485 for (int y = 0; y < height; y++)
2486 for (int x = 0; x < width; x++)
2487 {
2488 const float sx = ((float)x + 0.5f) / (float)width;
2489 const float sy = ((float)y + 0.5f) / (float)height;
2490
2491 evalCtx.reset(sx, sy);
2492 m_evaluator->evaluate(evalCtx);
2493 // Select either clear color or computed color based on discarded bit.
2494 tcu::Vec4 color = evalCtx.isDiscarded ? m_clearColor : evalCtx.color;
2495
2496 if (!hasAlpha)
2497 color.w() = 1.0f;
2498
2499 result.setPixel(x, y, tcu::RGBA(color));
2500 }
2501 }
2502
compareImages(const tcu::Surface & resImage,const tcu::Surface & refImage,float errorThreshold)2503 bool ShaderRenderCaseInstance::compareImages (const tcu::Surface& resImage, const tcu::Surface& refImage, float errorThreshold)
2504 {
2505 if (m_fuzzyCompare)
2506 return tcu::fuzzyCompare(m_context.getTestContext().getLog(), "ComparisonResult", "Image comparison result", refImage, resImage, errorThreshold, tcu::COMPARE_LOG_EVERYTHING);
2507 else
2508 return tcu::pixelThresholdCompare(m_context.getTestContext().getLog(), "ComparisonResult", "Image comparison result", refImage, resImage, tcu::RGBA(1, 1, 1, 1), tcu::COMPARE_LOG_EVERYTHING);
2509 }
2510
2511 } // sr
2512 } // vkt
2513