1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 *//*!
22 * \file
23 * \brief Vulkan ShaderRenderCase
24 *//*--------------------------------------------------------------------*/
25
26 #include "vktShaderRender.hpp"
27
28 #include "tcuImageCompare.hpp"
29 #include "tcuImageIO.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuTextureUtil.hpp"
32 #include "tcuSurface.hpp"
33 #include "tcuVector.hpp"
34
35 #include "deFilePath.hpp"
36 #include "deMath.h"
37 #include "deUniquePtr.hpp"
38
39 #include "vkDeviceUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkPlatform.hpp"
42 #include "vkQueryUtil.hpp"
43 #include "vkRef.hpp"
44 #include "vkRefUtil.hpp"
45 #include "vkStrUtil.hpp"
46 #include "vkTypeUtil.hpp"
47
48 #include <vector>
49 #include <string>
50
51 namespace vkt
52 {
53 namespace sr
54 {
55
56 using namespace vk;
57
58 namespace
59 {
60
61 static const int GRID_SIZE = 64;
62 static const deUint32 MAX_RENDER_WIDTH = 128;
63 static const deUint32 MAX_RENDER_HEIGHT = 128;
64 static const tcu::Vec4 DEFAULT_CLEAR_COLOR = tcu::Vec4(0.125f, 0.25f, 0.5f, 1.0f);
65
textureTypeToImageViewType(TextureBinding::Type type)66 static VkImageViewType textureTypeToImageViewType (TextureBinding::Type type)
67 {
68 switch (type)
69 {
70 case TextureBinding::TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
71 case TextureBinding::TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
72 case TextureBinding::TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
73 case TextureBinding::TYPE_CUBE_MAP: return VK_IMAGE_VIEW_TYPE_CUBE;
74 case TextureBinding::TYPE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
75 case TextureBinding::TYPE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
76 case TextureBinding::TYPE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
77
78 default:
79 DE_FATAL("Impossible");
80 return (VkImageViewType)0;
81 }
82 }
83
viewTypeToImageType(VkImageViewType type)84 static VkImageType viewTypeToImageType (VkImageViewType type)
85 {
86 switch (type)
87 {
88 case VK_IMAGE_VIEW_TYPE_1D:
89 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: return VK_IMAGE_TYPE_1D;
90 case VK_IMAGE_VIEW_TYPE_2D:
91 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: return VK_IMAGE_TYPE_2D;
92 case VK_IMAGE_VIEW_TYPE_3D: return VK_IMAGE_TYPE_3D;
93 case VK_IMAGE_VIEW_TYPE_CUBE:
94 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return VK_IMAGE_TYPE_2D;
95
96 default:
97 DE_FATAL("Impossible");
98 return (VkImageType)0;
99 }
100 }
101
102 /*! Gets the next multiple of a given divisor */
getNextMultiple(deUint32 divisor,deUint32 value)103 static deUint32 getNextMultiple (deUint32 divisor, deUint32 value)
104 {
105 if (value % divisor == 0)
106 {
107 return value;
108 }
109 return value + divisor - (value % divisor);
110 }
111
112 /*! Gets the next value that is multiple of all given divisors */
getNextMultiple(const std::vector<deUint32> & divisors,deUint32 value)113 static deUint32 getNextMultiple (const std::vector<deUint32>& divisors, deUint32 value)
114 {
115 deUint32 nextMultiple = value;
116 bool nextMultipleFound = false;
117
118 while (true)
119 {
120 nextMultipleFound = true;
121
122 for (size_t divNdx = 0; divNdx < divisors.size(); divNdx++)
123 nextMultipleFound = nextMultipleFound && (nextMultiple % divisors[divNdx] == 0);
124
125 if (nextMultipleFound)
126 break;
127
128 DE_ASSERT(nextMultiple < ~((deUint32)0u));
129 nextMultiple = getNextMultiple(divisors[0], nextMultiple + 1);
130 }
131
132 return nextMultiple;
133 }
134
135 } // anonymous
136
137 // QuadGrid.
138
139 class QuadGrid
140 {
141 public:
142 QuadGrid (int gridSize,
143 int screenWidth,
144 int screenHeight,
145 const tcu::Vec4& constCoords,
146 const std::vector<tcu::Mat4>& userAttribTransforms,
147 const std::vector<TextureBindingSp>& textures);
148 ~QuadGrid (void);
149
getGridSize(void) const150 int getGridSize (void) const { return m_gridSize; }
getNumVertices(void) const151 int getNumVertices (void) const { return m_numVertices; }
getNumTriangles(void) const152 int getNumTriangles (void) const { return m_numTriangles; }
getConstCoords(void) const153 const tcu::Vec4& getConstCoords (void) const { return m_constCoords; }
getUserAttribTransforms(void) const154 const std::vector<tcu::Mat4> getUserAttribTransforms (void) const { return m_userAttribTransforms; }
getTextures(void) const155 const std::vector<TextureBindingSp>& getTextures (void) const { return m_textures; }
156
getPositions(void) const157 const tcu::Vec4* getPositions (void) const { return &m_positions[0]; }
getAttribOne(void) const158 const float* getAttribOne (void) const { return &m_attribOne[0]; }
getCoords(void) const159 const tcu::Vec4* getCoords (void) const { return &m_coords[0]; }
getUnitCoords(void) const160 const tcu::Vec4* getUnitCoords (void) const { return &m_unitCoords[0]; }
161
getUserAttrib(int attribNdx) const162 const tcu::Vec4* getUserAttrib (int attribNdx) const { return &m_userAttribs[attribNdx][0]; }
getIndices(void) const163 const deUint16* getIndices (void) const { return &m_indices[0]; }
164
165 tcu::Vec4 getCoords (float sx, float sy) const;
166 tcu::Vec4 getUnitCoords (float sx, float sy) const;
167
getNumUserAttribs(void) const168 int getNumUserAttribs (void) const { return (int)m_userAttribTransforms.size(); }
169 tcu::Vec4 getUserAttrib (int attribNdx, float sx, float sy) const;
170
171 private:
172 const int m_gridSize;
173 const int m_numVertices;
174 const int m_numTriangles;
175 const tcu::Vec4 m_constCoords;
176 const std::vector<tcu::Mat4> m_userAttribTransforms;
177
178 const std::vector<TextureBindingSp>& m_textures;
179
180 std::vector<tcu::Vec4> m_screenPos;
181 std::vector<tcu::Vec4> m_positions;
182 std::vector<tcu::Vec4> m_coords; //!< Near-unit coordinates, roughly [-2.0 .. 2.0].
183 std::vector<tcu::Vec4> m_unitCoords; //!< Positive-only coordinates [0.0 .. 1.5].
184 std::vector<float> m_attribOne;
185 std::vector<tcu::Vec4> m_userAttribs[ShaderEvalContext::MAX_TEXTURES];
186 std::vector<deUint16> m_indices;
187 };
188
QuadGrid(int gridSize,int width,int height,const tcu::Vec4 & constCoords,const std::vector<tcu::Mat4> & userAttribTransforms,const std::vector<TextureBindingSp> & textures)189 QuadGrid::QuadGrid (int gridSize,
190 int width,
191 int height,
192 const tcu::Vec4& constCoords,
193 const std::vector<tcu::Mat4>& userAttribTransforms,
194 const std::vector<TextureBindingSp>& textures)
195 : m_gridSize (gridSize)
196 , m_numVertices ((gridSize + 1) * (gridSize + 1))
197 , m_numTriangles (gridSize * gridSize * 2)
198 , m_constCoords (constCoords)
199 , m_userAttribTransforms (userAttribTransforms)
200 , m_textures (textures)
201 {
202 const tcu::Vec4 viewportScale ((float)width, (float)height, 0.0f, 0.0f);
203
204 // Compute vertices.
205 m_screenPos.resize(m_numVertices);
206 m_positions.resize(m_numVertices);
207 m_coords.resize(m_numVertices);
208 m_unitCoords.resize(m_numVertices);
209 m_attribOne.resize(m_numVertices);
210
211 // User attributes.
212 for (int attrNdx = 0; attrNdx < DE_LENGTH_OF_ARRAY(m_userAttribs); attrNdx++)
213 m_userAttribs[attrNdx].resize(m_numVertices);
214
215 for (int y = 0; y < gridSize+1; y++)
216 for (int x = 0; x < gridSize+1; x++)
217 {
218 float sx = (float)x / (float)gridSize;
219 float sy = (float)y / (float)gridSize;
220 float fx = 2.0f * sx - 1.0f;
221 float fy = 2.0f * sy - 1.0f;
222 int vtxNdx = ((y * (gridSize+1)) + x);
223
224 m_positions[vtxNdx] = tcu::Vec4(fx, fy, 0.0f, 1.0f);
225 m_coords[vtxNdx] = getCoords(sx, sy);
226 m_unitCoords[vtxNdx] = getUnitCoords(sx, sy);
227 m_attribOne[vtxNdx] = 1.0f;
228
229 m_screenPos[vtxNdx] = tcu::Vec4(sx, sy, 0.0f, 1.0f) * viewportScale;
230
231 for (int attribNdx = 0; attribNdx < getNumUserAttribs(); attribNdx++)
232 m_userAttribs[attribNdx][vtxNdx] = getUserAttrib(attribNdx, sx, sy);
233 }
234
235 // Compute indices.
236 m_indices.resize(3 * m_numTriangles);
237 for (int y = 0; y < gridSize; y++)
238 for (int x = 0; x < gridSize; x++)
239 {
240 int stride = gridSize + 1;
241 int v00 = (y * stride) + x;
242 int v01 = (y * stride) + x + 1;
243 int v10 = ((y+1) * stride) + x;
244 int v11 = ((y+1) * stride) + x + 1;
245
246 int baseNdx = ((y * gridSize) + x) * 6;
247 m_indices[baseNdx + 0] = (deUint16)v10;
248 m_indices[baseNdx + 1] = (deUint16)v00;
249 m_indices[baseNdx + 2] = (deUint16)v01;
250
251 m_indices[baseNdx + 3] = (deUint16)v10;
252 m_indices[baseNdx + 4] = (deUint16)v01;
253 m_indices[baseNdx + 5] = (deUint16)v11;
254 }
255 }
256
~QuadGrid(void)257 QuadGrid::~QuadGrid (void)
258 {
259 }
260
getCoords(float sx,float sy) const261 inline tcu::Vec4 QuadGrid::getCoords (float sx, float sy) const
262 {
263 const float fx = 2.0f * sx - 1.0f;
264 const float fy = 2.0f * sy - 1.0f;
265 return tcu::Vec4(fx, fy, -fx + 0.33f*fy, -0.275f*fx - fy);
266 }
267
getUnitCoords(float sx,float sy) const268 inline tcu::Vec4 QuadGrid::getUnitCoords (float sx, float sy) const
269 {
270 return tcu::Vec4(sx, sy, 0.33f*sx + 0.5f*sy, 0.5f*sx + 0.25f*sy);
271 }
272
getUserAttrib(int attribNdx,float sx,float sy) const273 inline tcu::Vec4 QuadGrid::getUserAttrib (int attribNdx, float sx, float sy) const
274 {
275 // homogeneous normalized screen-space coordinates
276 return m_userAttribTransforms[attribNdx] * tcu::Vec4(sx, sy, 0.0f, 1.0f);
277 }
278
279 // TextureBinding
280
TextureBinding(const tcu::Archive & archive,const char * filename,const Type type,const tcu::Sampler & sampler)281 TextureBinding::TextureBinding (const tcu::Archive& archive,
282 const char* filename,
283 const Type type,
284 const tcu::Sampler& sampler)
285 : m_type (type)
286 , m_sampler (sampler)
287 {
288 switch(m_type)
289 {
290 case TYPE_2D: m_binding.tex2D = loadTexture2D(archive, filename).release(); break;
291 default:
292 DE_FATAL("Unsupported texture type");
293 }
294 }
295
TextureBinding(const tcu::Texture1D * tex1D,const tcu::Sampler & sampler)296 TextureBinding::TextureBinding (const tcu::Texture1D* tex1D, const tcu::Sampler& sampler)
297 : m_type (TYPE_1D)
298 , m_sampler (sampler)
299 {
300 m_binding.tex1D = tex1D;
301 }
302
TextureBinding(const tcu::Texture2D * tex2D,const tcu::Sampler & sampler)303 TextureBinding::TextureBinding (const tcu::Texture2D* tex2D, const tcu::Sampler& sampler)
304 : m_type (TYPE_2D)
305 , m_sampler (sampler)
306 {
307 m_binding.tex2D = tex2D;
308 }
309
TextureBinding(const tcu::Texture3D * tex3D,const tcu::Sampler & sampler)310 TextureBinding::TextureBinding (const tcu::Texture3D* tex3D, const tcu::Sampler& sampler)
311 : m_type (TYPE_3D)
312 , m_sampler (sampler)
313 {
314 m_binding.tex3D = tex3D;
315 }
316
TextureBinding(const tcu::TextureCube * texCube,const tcu::Sampler & sampler)317 TextureBinding::TextureBinding (const tcu::TextureCube* texCube, const tcu::Sampler& sampler)
318 : m_type (TYPE_CUBE_MAP)
319 , m_sampler (sampler)
320 {
321 m_binding.texCube = texCube;
322 }
323
TextureBinding(const tcu::Texture1DArray * tex1DArray,const tcu::Sampler & sampler)324 TextureBinding::TextureBinding (const tcu::Texture1DArray* tex1DArray, const tcu::Sampler& sampler)
325 : m_type (TYPE_1D_ARRAY)
326 , m_sampler (sampler)
327 {
328 m_binding.tex1DArray = tex1DArray;
329 }
330
TextureBinding(const tcu::Texture2DArray * tex2DArray,const tcu::Sampler & sampler)331 TextureBinding::TextureBinding (const tcu::Texture2DArray* tex2DArray, const tcu::Sampler& sampler)
332 : m_type (TYPE_2D_ARRAY)
333 , m_sampler (sampler)
334 {
335 m_binding.tex2DArray = tex2DArray;
336 }
337
TextureBinding(const tcu::TextureCubeArray * texCubeArray,const tcu::Sampler & sampler)338 TextureBinding::TextureBinding (const tcu::TextureCubeArray* texCubeArray, const tcu::Sampler& sampler)
339 : m_type (TYPE_CUBE_ARRAY)
340 , m_sampler (sampler)
341 {
342 m_binding.texCubeArray = texCubeArray;
343 }
344
~TextureBinding(void)345 TextureBinding::~TextureBinding (void)
346 {
347 switch(m_type)
348 {
349 case TYPE_1D: delete m_binding.tex1D; break;
350 case TYPE_2D: delete m_binding.tex2D; break;
351 case TYPE_3D: delete m_binding.tex3D; break;
352 case TYPE_CUBE_MAP: delete m_binding.texCube; break;
353 case TYPE_1D_ARRAY: delete m_binding.tex1DArray; break;
354 case TYPE_2D_ARRAY: delete m_binding.tex2DArray; break;
355 case TYPE_CUBE_ARRAY: delete m_binding.texCubeArray; break;
356 default: break;
357 }
358 }
359
loadTexture2D(const tcu::Archive & archive,const char * filename)360 de::MovePtr<tcu::Texture2D> TextureBinding::loadTexture2D (const tcu::Archive& archive, const char* filename)
361 {
362 tcu::TextureLevel level;
363 tcu::ImageIO::loadImage(level, archive, filename);
364
365 TCU_CHECK_INTERNAL(level.getFormat() == tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8) ||
366 level.getFormat() == tcu::TextureFormat(tcu::TextureFormat::RGB, tcu::TextureFormat::UNORM_INT8));
367
368 // \todo [2015-10-08 elecro] for some reason we get better when using RGBA texture even in RGB case, this needs to be investigated
369 de::MovePtr<tcu::Texture2D> texture(new tcu::Texture2D(tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8), level.getWidth(), level.getHeight()));
370
371 // Fill level 0.
372 texture->allocLevel(0);
373 tcu::copy(texture->getLevel(0), level.getAccess());
374
375 return texture;
376 }
377
378 // ShaderEvalContext.
379
ShaderEvalContext(const QuadGrid & quadGrid)380 ShaderEvalContext::ShaderEvalContext (const QuadGrid& quadGrid)
381 : constCoords (quadGrid.getConstCoords())
382 , isDiscarded (false)
383 , m_quadGrid (quadGrid)
384 {
385 const std::vector<TextureBindingSp>& bindings = m_quadGrid.getTextures();
386 DE_ASSERT((int)bindings.size() <= MAX_TEXTURES);
387
388 // Fill in texture array.
389 for (int ndx = 0; ndx < (int)bindings.size(); ndx++)
390 {
391 const TextureBinding& binding = *bindings[ndx];
392
393 if (binding.getType() == TextureBinding::TYPE_NONE)
394 continue;
395
396 textures[ndx].sampler = binding.getSampler();
397
398 switch (binding.getType())
399 {
400 case TextureBinding::TYPE_1D: textures[ndx].tex1D = &binding.get1D(); break;
401 case TextureBinding::TYPE_2D: textures[ndx].tex2D = &binding.get2D(); break;
402 case TextureBinding::TYPE_3D: textures[ndx].tex3D = &binding.get3D(); break;
403 case TextureBinding::TYPE_CUBE_MAP: textures[ndx].texCube = &binding.getCube(); break;
404 case TextureBinding::TYPE_1D_ARRAY: textures[ndx].tex1DArray = &binding.get1DArray(); break;
405 case TextureBinding::TYPE_2D_ARRAY: textures[ndx].tex2DArray = &binding.get2DArray(); break;
406 case TextureBinding::TYPE_CUBE_ARRAY: textures[ndx].texCubeArray = &binding.getCubeArray(); break;
407 default:
408 TCU_THROW(InternalError, "Handling of texture binding type not implemented");
409 }
410 }
411 }
412
~ShaderEvalContext(void)413 ShaderEvalContext::~ShaderEvalContext (void)
414 {
415 }
416
reset(float sx,float sy)417 void ShaderEvalContext::reset (float sx, float sy)
418 {
419 // Clear old values
420 color = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
421 isDiscarded = false;
422
423 // Compute coords
424 coords = m_quadGrid.getCoords(sx, sy);
425 unitCoords = m_quadGrid.getUnitCoords(sx, sy);
426
427 // Compute user attributes.
428 const int numAttribs = m_quadGrid.getNumUserAttribs();
429 DE_ASSERT(numAttribs <= MAX_USER_ATTRIBS);
430 for (int attribNdx = 0; attribNdx < numAttribs; attribNdx++)
431 in[attribNdx] = m_quadGrid.getUserAttrib(attribNdx, sx, sy);
432 }
433
texture2D(int unitNdx,const tcu::Vec2 & texCoords)434 tcu::Vec4 ShaderEvalContext::texture2D (int unitNdx, const tcu::Vec2& texCoords)
435 {
436 if (textures[unitNdx].tex2D)
437 return textures[unitNdx].tex2D->sample(textures[unitNdx].sampler, texCoords.x(), texCoords.y(), 0.0f);
438 else
439 return tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
440 }
441
442 // ShaderEvaluator.
443
ShaderEvaluator(void)444 ShaderEvaluator::ShaderEvaluator (void)
445 : m_evalFunc(DE_NULL)
446 {
447 }
448
ShaderEvaluator(ShaderEvalFunc evalFunc)449 ShaderEvaluator::ShaderEvaluator (ShaderEvalFunc evalFunc)
450 : m_evalFunc(evalFunc)
451 {
452 }
453
~ShaderEvaluator(void)454 ShaderEvaluator::~ShaderEvaluator (void)
455 {
456 }
457
evaluate(ShaderEvalContext & ctx) const458 void ShaderEvaluator::evaluate (ShaderEvalContext& ctx) const
459 {
460 DE_ASSERT(m_evalFunc);
461 m_evalFunc(ctx);
462 }
463
464 // UniformSetup.
465
UniformSetup(void)466 UniformSetup::UniformSetup (void)
467 : m_setupFunc(DE_NULL)
468 {
469 }
470
UniformSetup(UniformSetupFunc setupFunc)471 UniformSetup::UniformSetup (UniformSetupFunc setupFunc)
472 : m_setupFunc(setupFunc)
473 {
474 }
475
~UniformSetup(void)476 UniformSetup::~UniformSetup (void)
477 {
478 }
479
setup(ShaderRenderCaseInstance & instance,const tcu::Vec4 & constCoords) const480 void UniformSetup::setup (ShaderRenderCaseInstance& instance, const tcu::Vec4& constCoords) const
481 {
482 if (m_setupFunc)
483 m_setupFunc(instance, constCoords);
484 }
485
486 // ShaderRenderCase.
487
ShaderRenderCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const bool isVertexCase,const ShaderEvalFunc evalFunc,const UniformSetup * uniformSetup,const AttributeSetupFunc attribFunc)488 ShaderRenderCase::ShaderRenderCase (tcu::TestContext& testCtx,
489 const std::string& name,
490 const std::string& description,
491 const bool isVertexCase,
492 const ShaderEvalFunc evalFunc,
493 const UniformSetup* uniformSetup,
494 const AttributeSetupFunc attribFunc)
495 : vkt::TestCase (testCtx, name, description)
496 , m_isVertexCase (isVertexCase)
497 , m_evaluator (new ShaderEvaluator(evalFunc))
498 , m_uniformSetup (uniformSetup ? uniformSetup : new UniformSetup())
499 , m_attribFunc (attribFunc)
500 {}
501
ShaderRenderCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const bool isVertexCase,const ShaderEvaluator * evaluator,const UniformSetup * uniformSetup,const AttributeSetupFunc attribFunc)502 ShaderRenderCase::ShaderRenderCase (tcu::TestContext& testCtx,
503 const std::string& name,
504 const std::string& description,
505 const bool isVertexCase,
506 const ShaderEvaluator* evaluator,
507 const UniformSetup* uniformSetup,
508 const AttributeSetupFunc attribFunc)
509 : vkt::TestCase (testCtx, name, description)
510 , m_isVertexCase (isVertexCase)
511 , m_evaluator (evaluator)
512 , m_uniformSetup (uniformSetup ? uniformSetup : new UniformSetup())
513 , m_attribFunc (attribFunc)
514 {}
515
~ShaderRenderCase(void)516 ShaderRenderCase::~ShaderRenderCase (void)
517 {
518 }
519
initPrograms(vk::SourceCollections & programCollection) const520 void ShaderRenderCase::initPrograms (vk::SourceCollections& programCollection) const
521 {
522 programCollection.glslSources.add("vert") << glu::VertexSource(m_vertShaderSource);
523 programCollection.glslSources.add("frag") << glu::FragmentSource(m_fragShaderSource);
524 }
525
createInstance(Context & context) const526 TestInstance* ShaderRenderCase::createInstance (Context& context) const
527 {
528 DE_ASSERT(m_evaluator != DE_NULL);
529 DE_ASSERT(m_uniformSetup != DE_NULL);
530 return new ShaderRenderCaseInstance(context, m_isVertexCase, *m_evaluator, *m_uniformSetup, m_attribFunc);
531 }
532
533 // ShaderRenderCaseInstance.
534
ShaderRenderCaseInstance(Context & context)535 ShaderRenderCaseInstance::ShaderRenderCaseInstance (Context& context)
536 : vkt::TestInstance (context)
537 , m_imageBackingMode (IMAGE_BACKING_MODE_REGULAR)
538 , m_sparseContext (createSparseContext())
539 , m_memAlloc (getAllocator())
540 , m_clearColor (DEFAULT_CLEAR_COLOR)
541 , m_isVertexCase (false)
542 , m_vertexShaderName ("vert")
543 , m_fragmentShaderName ("frag")
544 , m_renderSize (128, 128)
545 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
546 , m_evaluator (DE_NULL)
547 , m_uniformSetup (DE_NULL)
548 , m_attribFunc (DE_NULL)
549 , m_sampleCount (VK_SAMPLE_COUNT_1_BIT)
550 {
551 }
552
553
ShaderRenderCaseInstance(Context & context,const bool isVertexCase,const ShaderEvaluator & evaluator,const UniformSetup & uniformSetup,const AttributeSetupFunc attribFunc,const ImageBackingMode imageBackingMode)554 ShaderRenderCaseInstance::ShaderRenderCaseInstance (Context& context,
555 const bool isVertexCase,
556 const ShaderEvaluator& evaluator,
557 const UniformSetup& uniformSetup,
558 const AttributeSetupFunc attribFunc,
559 const ImageBackingMode imageBackingMode)
560 : vkt::TestInstance (context)
561 , m_imageBackingMode (imageBackingMode)
562 , m_sparseContext (createSparseContext())
563 , m_memAlloc (getAllocator())
564 , m_clearColor (DEFAULT_CLEAR_COLOR)
565 , m_isVertexCase (isVertexCase)
566 , m_vertexShaderName ("vert")
567 , m_fragmentShaderName ("frag")
568 , m_renderSize (128, 128)
569 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
570 , m_evaluator (&evaluator)
571 , m_uniformSetup (&uniformSetup)
572 , m_attribFunc (attribFunc)
573 , m_sampleCount (VK_SAMPLE_COUNT_1_BIT)
574 {
575 }
576
ShaderRenderCaseInstance(Context & context,const bool isVertexCase,const ShaderEvaluator * evaluator,const UniformSetup * uniformSetup,const AttributeSetupFunc attribFunc,const ImageBackingMode imageBackingMode)577 ShaderRenderCaseInstance::ShaderRenderCaseInstance (Context& context,
578 const bool isVertexCase,
579 const ShaderEvaluator* evaluator,
580 const UniformSetup* uniformSetup,
581 const AttributeSetupFunc attribFunc,
582 const ImageBackingMode imageBackingMode)
583 : vkt::TestInstance (context)
584 , m_imageBackingMode (imageBackingMode)
585 , m_sparseContext (createSparseContext())
586 , m_memAlloc (getAllocator())
587 , m_clearColor (DEFAULT_CLEAR_COLOR)
588 , m_isVertexCase (isVertexCase)
589 , m_vertexShaderName ("vert")
590 , m_fragmentShaderName ("frag")
591 , m_renderSize (128, 128)
592 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
593 , m_evaluator (evaluator)
594 , m_uniformSetup (uniformSetup)
595 , m_attribFunc (attribFunc)
596 , m_sampleCount (VK_SAMPLE_COUNT_1_BIT)
597 {
598 }
599
findQueueFamilyIndexWithCaps(const InstanceInterface & vkInstance,VkPhysicalDevice physicalDevice,VkQueueFlags requiredCaps)600 static deUint32 findQueueFamilyIndexWithCaps (const InstanceInterface& vkInstance, VkPhysicalDevice physicalDevice, VkQueueFlags requiredCaps)
601 {
602 const std::vector<VkQueueFamilyProperties> queueProps = getPhysicalDeviceQueueFamilyProperties(vkInstance, physicalDevice);
603
604 for (size_t queueNdx = 0; queueNdx < queueProps.size(); queueNdx++)
605 {
606 if ((queueProps[queueNdx].queueFlags & requiredCaps) == requiredCaps)
607 return (deUint32)queueNdx;
608 }
609
610 TCU_THROW(NotSupportedError, "No matching queue found");
611 }
612
613
SparseContext(vkt::Context & context)614 ShaderRenderCaseInstance::SparseContext::SparseContext (vkt::Context& context)
615 : m_context (context)
616 , m_queueFamilyIndex (findQueueFamilyIndexWithCaps(context.getInstanceInterface(), context.getPhysicalDevice(), VK_QUEUE_GRAPHICS_BIT|VK_QUEUE_SPARSE_BINDING_BIT))
617 , m_device (createDevice())
618 , m_deviceInterface (context.getInstanceInterface(), *m_device)
619 , m_allocator (createAllocator())
620 {
621 m_deviceInterface.getDeviceQueue(*m_device, m_queueFamilyIndex, 0, &m_queue);
622 }
623
createDevice() const624 Move<VkDevice> ShaderRenderCaseInstance::SparseContext::createDevice () const
625 {
626 const InstanceInterface& vk = m_context.getInstanceInterface();
627 const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
628 const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(vk, physicalDevice);
629
630 VkDeviceQueueCreateInfo queueInfo;
631 VkDeviceCreateInfo deviceInfo;
632 const float queuePriority = 1.0f;
633
634 deMemset(&queueInfo, 0, sizeof(queueInfo));
635 deMemset(&deviceInfo, 0, sizeof(deviceInfo));
636
637 queueInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
638 queueInfo.pNext = DE_NULL;
639 queueInfo.flags = (VkDeviceQueueCreateFlags)0u;
640 queueInfo.queueFamilyIndex = m_queueFamilyIndex;
641 queueInfo.queueCount = 1u;
642 queueInfo.pQueuePriorities = &queuePriority;
643
644 deviceInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
645 deviceInfo.pNext = DE_NULL;
646 deviceInfo.queueCreateInfoCount = 1u;
647 deviceInfo.pQueueCreateInfos = &queueInfo;
648 deviceInfo.enabledExtensionCount = 0u;
649 deviceInfo.ppEnabledExtensionNames = DE_NULL;
650 deviceInfo.enabledLayerCount = 0u;
651 deviceInfo.ppEnabledLayerNames = DE_NULL;
652 deviceInfo.pEnabledFeatures = &deviceFeatures;
653
654 return vk::createDevice(vk, physicalDevice, &deviceInfo);
655 }
656
createAllocator() const657 vk::Allocator* ShaderRenderCaseInstance::SparseContext::createAllocator () const
658 {
659 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
660 return new SimpleAllocator(m_deviceInterface, *m_device, memoryProperties);
661 }
662
createSparseContext(void) const663 ShaderRenderCaseInstance::SparseContext* ShaderRenderCaseInstance::createSparseContext (void) const
664 {
665 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
666 {
667 return new SparseContext(m_context);
668 }
669
670 return DE_NULL;
671 }
672
getAllocator(void) const673 vk::Allocator& ShaderRenderCaseInstance::getAllocator (void) const
674 {
675 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
676 {
677 return *m_sparseContext->m_allocator;
678 }
679
680 return m_context.getDefaultAllocator();
681 }
682
~ShaderRenderCaseInstance(void)683 ShaderRenderCaseInstance::~ShaderRenderCaseInstance (void)
684 {
685 }
686
getDevice(void) const687 VkDevice ShaderRenderCaseInstance::getDevice (void) const
688 {
689 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
690 return *m_sparseContext->m_device;
691
692 return m_context.getDevice();
693 }
694
getUniversalQueueFamilyIndex(void) const695 deUint32 ShaderRenderCaseInstance::getUniversalQueueFamilyIndex (void) const
696 {
697 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
698 return m_sparseContext->m_queueFamilyIndex;
699
700 return m_context.getUniversalQueueFamilyIndex();
701 }
702
getDeviceInterface(void) const703 const DeviceInterface& ShaderRenderCaseInstance::getDeviceInterface (void) const
704 {
705 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
706 return m_sparseContext->m_deviceInterface;
707
708 return m_context.getDeviceInterface();
709 }
710
getUniversalQueue(void) const711 VkQueue ShaderRenderCaseInstance::getUniversalQueue (void) const
712 {
713 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
714 return m_sparseContext->m_queue;
715
716 return m_context.getUniversalQueue();
717 }
718
getPhysicalDevice(void) const719 VkPhysicalDevice ShaderRenderCaseInstance::getPhysicalDevice (void) const
720 {
721 // Same in sparse and regular case
722 return m_context.getPhysicalDevice();
723 }
724
getInstanceInterface(void) const725 const InstanceInterface& ShaderRenderCaseInstance::getInstanceInterface (void) const
726 {
727 // Same in sparse and regular case
728 return m_context.getInstanceInterface();
729 }
730
iterate(void)731 tcu::TestStatus ShaderRenderCaseInstance::iterate (void)
732 {
733 setup();
734
735 // Create quad grid.
736 const tcu::UVec2 viewportSize = getViewportSize();
737 const int width = viewportSize.x();
738 const int height = viewportSize.y();
739
740 m_quadGrid = de::MovePtr<QuadGrid>(new QuadGrid(m_isVertexCase ? GRID_SIZE : 4, width, height, getDefaultConstCoords(), m_userAttribTransforms, m_textures));
741
742 // Render result.
743 tcu::Surface resImage (width, height);
744
745 render(m_quadGrid->getNumVertices(), m_quadGrid->getNumTriangles(), m_quadGrid->getIndices(), m_quadGrid->getConstCoords());
746 tcu::copy(resImage.getAccess(), m_resultImage.getAccess());
747
748 // Compute reference.
749 tcu::Surface refImage (width, height);
750 if (m_isVertexCase)
751 computeVertexReference(refImage, *m_quadGrid);
752 else
753 computeFragmentReference(refImage, *m_quadGrid);
754
755 // Compare.
756 const bool compareOk = compareImages(resImage, refImage, 0.1f);
757
758 if (compareOk)
759 return tcu::TestStatus::pass("Result image matches reference");
760 else
761 return tcu::TestStatus::fail("Image mismatch");
762 }
763
setup(void)764 void ShaderRenderCaseInstance::setup (void)
765 {
766 m_resultImage = tcu::TextureLevel();
767 m_descriptorSetLayoutBuilder = de::MovePtr<DescriptorSetLayoutBuilder> (new DescriptorSetLayoutBuilder());
768 m_descriptorPoolBuilder = de::MovePtr<DescriptorPoolBuilder> (new DescriptorPoolBuilder());
769 m_descriptorSetUpdateBuilder = de::MovePtr<DescriptorSetUpdateBuilder> (new DescriptorSetUpdateBuilder());
770
771 m_uniformInfos.clear();
772 m_vertexBindingDescription.clear();
773 m_vertexAttributeDescription.clear();
774 m_vertexBuffers.clear();
775 m_vertexBufferAllocs.clear();
776 m_pushConstantRanges.clear();
777 }
778
setupUniformData(deUint32 bindingLocation,size_t size,const void * dataPtr)779 void ShaderRenderCaseInstance::setupUniformData (deUint32 bindingLocation, size_t size, const void* dataPtr)
780 {
781 const VkDevice vkDevice = getDevice();
782 const DeviceInterface& vk = getDeviceInterface();
783 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
784
785 const VkBufferCreateInfo uniformBufferParams =
786 {
787 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
788 DE_NULL, // const void* pNext;
789 0u, // VkBufferCreateFlags flags;
790 size, // VkDeviceSize size;
791 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, // VkBufferUsageFlags usage;
792 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
793 1u, // deUint32 queueFamilyCount;
794 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
795 };
796
797 Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &uniformBufferParams);
798 de::MovePtr<Allocation> alloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
799 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, alloc->getMemory(), alloc->getOffset()));
800
801 deMemcpy(alloc->getHostPtr(), dataPtr, size);
802 flushMappedMemoryRange(vk, vkDevice, alloc->getMemory(), alloc->getOffset(), size);
803
804 de::MovePtr<BufferUniform> uniformInfo(new BufferUniform());
805 uniformInfo->type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
806 uniformInfo->descriptor = makeDescriptorBufferInfo(*buffer, 0u, size);
807 uniformInfo->location = bindingLocation;
808 uniformInfo->buffer = VkBufferSp(new vk::Unique<VkBuffer>(buffer));
809 uniformInfo->alloc = AllocationSp(alloc.release());
810
811 m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(uniformInfo)));
812 }
813
addUniform(deUint32 bindingLocation,vk::VkDescriptorType descriptorType,size_t dataSize,const void * data)814 void ShaderRenderCaseInstance::addUniform (deUint32 bindingLocation, vk::VkDescriptorType descriptorType, size_t dataSize, const void* data)
815 {
816 m_descriptorSetLayoutBuilder->addSingleBinding(descriptorType, vk::VK_SHADER_STAGE_ALL);
817 m_descriptorPoolBuilder->addType(descriptorType);
818
819 setupUniformData(bindingLocation, dataSize, data);
820 }
821
addAttribute(deUint32 bindingLocation,vk::VkFormat format,deUint32 sizePerElement,deUint32 count,const void * dataPtr)822 void ShaderRenderCaseInstance::addAttribute (deUint32 bindingLocation,
823 vk::VkFormat format,
824 deUint32 sizePerElement,
825 deUint32 count,
826 const void* dataPtr)
827 {
828 // Add binding specification
829 const deUint32 binding = (deUint32)m_vertexBindingDescription.size();
830 const VkVertexInputBindingDescription bindingDescription =
831 {
832 binding, // deUint32 binding;
833 sizePerElement, // deUint32 stride;
834 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputRate stepRate;
835 };
836
837 m_vertexBindingDescription.push_back(bindingDescription);
838
839 // Add location and format specification
840 const VkVertexInputAttributeDescription attributeDescription =
841 {
842 bindingLocation, // deUint32 location;
843 binding, // deUint32 binding;
844 format, // VkFormat format;
845 0u, // deUint32 offset;
846 };
847
848 m_vertexAttributeDescription.push_back(attributeDescription);
849
850 // Upload data to buffer
851 const VkDevice vkDevice = getDevice();
852 const DeviceInterface& vk = getDeviceInterface();
853 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
854
855 const VkDeviceSize inputSize = sizePerElement * count;
856 const VkBufferCreateInfo vertexBufferParams =
857 {
858 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
859 DE_NULL, // const void* pNext;
860 0u, // VkBufferCreateFlags flags;
861 inputSize, // VkDeviceSize size;
862 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
863 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
864 1u, // deUint32 queueFamilyCount;
865 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
866 };
867
868 Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &vertexBufferParams);
869 de::MovePtr<vk::Allocation> alloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
870 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, alloc->getMemory(), alloc->getOffset()));
871
872 deMemcpy(alloc->getHostPtr(), dataPtr, (size_t)inputSize);
873 flushMappedMemoryRange(vk, vkDevice, alloc->getMemory(), alloc->getOffset(), inputSize);
874
875 m_vertexBuffers.push_back(VkBufferSp(new vk::Unique<VkBuffer>(buffer)));
876 m_vertexBufferAllocs.push_back(AllocationSp(alloc.release()));
877 }
878
useAttribute(deUint32 bindingLocation,BaseAttributeType type)879 void ShaderRenderCaseInstance::useAttribute (deUint32 bindingLocation, BaseAttributeType type)
880 {
881 const EnabledBaseAttribute attribute =
882 {
883 bindingLocation, // deUint32 location;
884 type // BaseAttributeType type;
885 };
886 m_enabledBaseAttributes.push_back(attribute);
887 }
888
setupUniforms(const tcu::Vec4 & constCoords)889 void ShaderRenderCaseInstance::setupUniforms (const tcu::Vec4& constCoords)
890 {
891 if (m_uniformSetup)
892 m_uniformSetup->setup(*this, constCoords);
893 }
894
useUniform(deUint32 bindingLocation,BaseUniformType type)895 void ShaderRenderCaseInstance::useUniform (deUint32 bindingLocation, BaseUniformType type)
896 {
897 #define UNIFORM_CASE(type, value) case type: addUniform(bindingLocation, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, value); break
898
899 switch(type)
900 {
901 // Bool
902 UNIFORM_CASE(UB_FALSE, 0);
903 UNIFORM_CASE(UB_TRUE, 1);
904
905 // BVec4
906 UNIFORM_CASE(UB4_FALSE, tcu::Vec4(0));
907 UNIFORM_CASE(UB4_TRUE, tcu::Vec4(1));
908
909 // Integer
910 UNIFORM_CASE(UI_ZERO, 0);
911 UNIFORM_CASE(UI_ONE, 1);
912 UNIFORM_CASE(UI_TWO, 2);
913 UNIFORM_CASE(UI_THREE, 3);
914 UNIFORM_CASE(UI_FOUR, 4);
915 UNIFORM_CASE(UI_FIVE, 5);
916 UNIFORM_CASE(UI_SIX, 6);
917 UNIFORM_CASE(UI_SEVEN, 7);
918 UNIFORM_CASE(UI_EIGHT, 8);
919 UNIFORM_CASE(UI_ONEHUNDREDONE, 101);
920
921 // IVec2
922 UNIFORM_CASE(UI2_MINUS_ONE, tcu::IVec2(-1));
923 UNIFORM_CASE(UI2_ZERO, tcu::IVec2(0));
924 UNIFORM_CASE(UI2_ONE, tcu::IVec2(1));
925 UNIFORM_CASE(UI2_TWO, tcu::IVec2(2));
926 UNIFORM_CASE(UI2_THREE, tcu::IVec2(3));
927 UNIFORM_CASE(UI2_FOUR, tcu::IVec2(4));
928 UNIFORM_CASE(UI2_FIVE, tcu::IVec2(5));
929
930 // IVec3
931 UNIFORM_CASE(UI3_MINUS_ONE, tcu::IVec3(-1));
932 UNIFORM_CASE(UI3_ZERO, tcu::IVec3(0));
933 UNIFORM_CASE(UI3_ONE, tcu::IVec3(1));
934 UNIFORM_CASE(UI3_TWO, tcu::IVec3(2));
935 UNIFORM_CASE(UI3_THREE, tcu::IVec3(3));
936 UNIFORM_CASE(UI3_FOUR, tcu::IVec3(4));
937 UNIFORM_CASE(UI3_FIVE, tcu::IVec3(5));
938
939 // IVec4
940 UNIFORM_CASE(UI4_MINUS_ONE, tcu::IVec4(-1));
941 UNIFORM_CASE(UI4_ZERO, tcu::IVec4(0));
942 UNIFORM_CASE(UI4_ONE, tcu::IVec4(1));
943 UNIFORM_CASE(UI4_TWO, tcu::IVec4(2));
944 UNIFORM_CASE(UI4_THREE, tcu::IVec4(3));
945 UNIFORM_CASE(UI4_FOUR, tcu::IVec4(4));
946 UNIFORM_CASE(UI4_FIVE, tcu::IVec4(5));
947
948 // Float
949 UNIFORM_CASE(UF_ZERO, 0.0f);
950 UNIFORM_CASE(UF_ONE, 1.0f);
951 UNIFORM_CASE(UF_TWO, 2.0f);
952 UNIFORM_CASE(UF_THREE, 3.0f);
953 UNIFORM_CASE(UF_FOUR, 4.0f);
954 UNIFORM_CASE(UF_FIVE, 5.0f);
955 UNIFORM_CASE(UF_SIX, 6.0f);
956 UNIFORM_CASE(UF_SEVEN, 7.0f);
957 UNIFORM_CASE(UF_EIGHT, 8.0f);
958
959 UNIFORM_CASE(UF_HALF, 1.0f / 2.0f);
960 UNIFORM_CASE(UF_THIRD, 1.0f / 3.0f);
961 UNIFORM_CASE(UF_FOURTH, 1.0f / 4.0f);
962 UNIFORM_CASE(UF_FIFTH, 1.0f / 5.0f);
963 UNIFORM_CASE(UF_SIXTH, 1.0f / 6.0f);
964 UNIFORM_CASE(UF_SEVENTH, 1.0f / 7.0f);
965 UNIFORM_CASE(UF_EIGHTH, 1.0f / 8.0f);
966
967 // Vec2
968 UNIFORM_CASE(UV2_MINUS_ONE, tcu::Vec2(-1.0f));
969 UNIFORM_CASE(UV2_ZERO, tcu::Vec2(0.0f));
970 UNIFORM_CASE(UV2_ONE, tcu::Vec2(1.0f));
971 UNIFORM_CASE(UV2_TWO, tcu::Vec2(2.0f));
972 UNIFORM_CASE(UV2_THREE, tcu::Vec2(3.0f));
973
974 UNIFORM_CASE(UV2_HALF, tcu::Vec2(1.0f / 2.0f));
975
976 // Vec3
977 UNIFORM_CASE(UV3_MINUS_ONE, tcu::Vec3(-1.0f));
978 UNIFORM_CASE(UV3_ZERO, tcu::Vec3(0.0f));
979 UNIFORM_CASE(UV3_ONE, tcu::Vec3(1.0f));
980 UNIFORM_CASE(UV3_TWO, tcu::Vec3(2.0f));
981 UNIFORM_CASE(UV3_THREE, tcu::Vec3(3.0f));
982
983 UNIFORM_CASE(UV3_HALF, tcu::Vec3(1.0f / 2.0f));
984
985 // Vec4
986 UNIFORM_CASE(UV4_MINUS_ONE, tcu::Vec4(-1.0f));
987 UNIFORM_CASE(UV4_ZERO, tcu::Vec4(0.0f));
988 UNIFORM_CASE(UV4_ONE, tcu::Vec4(1.0f));
989 UNIFORM_CASE(UV4_TWO, tcu::Vec4(2.0f));
990 UNIFORM_CASE(UV4_THREE, tcu::Vec4(3.0f));
991
992 UNIFORM_CASE(UV4_HALF, tcu::Vec4(1.0f / 2.0f));
993
994 UNIFORM_CASE(UV4_BLACK, tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
995 UNIFORM_CASE(UV4_GRAY, tcu::Vec4(0.5f, 0.5f, 0.5f, 1.0f));
996 UNIFORM_CASE(UV4_WHITE, tcu::Vec4(1.0f, 1.0f, 1.0f, 1.0f));
997
998 default:
999 m_context.getTestContext().getLog() << tcu::TestLog::Message << "Unknown Uniform type: " << type << tcu::TestLog::EndMessage;
1000 break;
1001 }
1002
1003 #undef UNIFORM_CASE
1004 }
1005
getViewportSize(void) const1006 const tcu::UVec2 ShaderRenderCaseInstance::getViewportSize (void) const
1007 {
1008 return tcu::UVec2(de::min(m_renderSize.x(), MAX_RENDER_WIDTH),
1009 de::min(m_renderSize.y(), MAX_RENDER_HEIGHT));
1010 }
1011
setSampleCount(VkSampleCountFlagBits sampleCount)1012 void ShaderRenderCaseInstance::setSampleCount (VkSampleCountFlagBits sampleCount)
1013 {
1014 m_sampleCount = sampleCount;
1015 }
1016
isMultiSampling(void) const1017 bool ShaderRenderCaseInstance::isMultiSampling (void) const
1018 {
1019 return m_sampleCount != VK_SAMPLE_COUNT_1_BIT;
1020 }
1021
uploadImage(const tcu::TextureFormat & texFormat,const TextureData & textureData,const tcu::Sampler & refSampler,deUint32 mipLevels,deUint32 arrayLayers,VkImage destImage)1022 void ShaderRenderCaseInstance::uploadImage (const tcu::TextureFormat& texFormat,
1023 const TextureData& textureData,
1024 const tcu::Sampler& refSampler,
1025 deUint32 mipLevels,
1026 deUint32 arrayLayers,
1027 VkImage destImage)
1028 {
1029 const VkDevice vkDevice = getDevice();
1030 const DeviceInterface& vk = getDeviceInterface();
1031 const VkQueue queue = getUniversalQueue();
1032 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
1033
1034 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1035 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1036 deUint32 bufferSize = 0u;
1037 Move<VkBuffer> buffer;
1038 de::MovePtr<Allocation> bufferAlloc;
1039 Move<VkCommandPool> cmdPool;
1040 Move<VkCommandBuffer> cmdBuffer;
1041 Move<VkFence> fence;
1042 std::vector<VkBufferImageCopy> copyRegions;
1043 std::vector<deUint32> offsetMultiples;
1044
1045 offsetMultiples.push_back(4u);
1046 offsetMultiples.push_back(texFormat.getPixelSize());
1047
1048 // Calculate buffer size
1049 for (TextureData::const_iterator mit = textureData.begin(); mit != textureData.end(); ++mit)
1050 {
1051 for (TextureLayerData::const_iterator lit = mit->begin(); lit != mit->end(); ++lit)
1052 {
1053 const tcu::ConstPixelBufferAccess& access = *lit;
1054
1055 bufferSize = getNextMultiple(offsetMultiples, bufferSize);
1056 bufferSize += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1057 }
1058 }
1059
1060 // Create source buffer
1061 {
1062 const VkBufferCreateInfo bufferParams =
1063 {
1064 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1065 DE_NULL, // const void* pNext;
1066 0u, // VkBufferCreateFlags flags;
1067 bufferSize, // VkDeviceSize size;
1068 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
1069 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1070 0u, // deUint32 queueFamilyIndexCount;
1071 DE_NULL, // const deUint32* pQueueFamilyIndices;
1072 };
1073
1074 buffer = createBuffer(vk, vkDevice, &bufferParams);
1075 bufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
1076 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
1077 }
1078
1079 // Create command pool and buffer
1080 {
1081 const VkCommandPoolCreateInfo cmdPoolParams =
1082 {
1083 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // VkStructureType sType;
1084 DE_NULL, // const void* pNext;
1085 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // VkCommandPoolCreateFlags flags;
1086 queueFamilyIndex, // deUint32 queueFamilyIndex;
1087 };
1088
1089 cmdPool = createCommandPool(vk, vkDevice, &cmdPoolParams);
1090
1091 const VkCommandBufferAllocateInfo cmdBufferAllocateInfo =
1092 {
1093 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
1094 DE_NULL, // const void* pNext;
1095 *cmdPool, // VkCommandPool commandPool;
1096 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCommandBufferLevel level;
1097 1u, // deUint32 bufferCount;
1098 };
1099
1100 cmdBuffer = allocateCommandBuffer(vk, vkDevice, &cmdBufferAllocateInfo);
1101 }
1102
1103 // Create fence
1104 {
1105 const VkFenceCreateInfo fenceParams =
1106 {
1107 VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
1108 DE_NULL, // const void* pNext;
1109 0u // VkFenceCreateFlags flags;
1110 };
1111
1112 fence = createFence(vk, vkDevice, &fenceParams);
1113 }
1114
1115 // Barriers for copying buffer to image
1116 const VkBufferMemoryBarrier preBufferBarrier =
1117 {
1118 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1119 DE_NULL, // const void* pNext;
1120 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1121 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1122 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1123 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1124 *buffer, // VkBuffer buffer;
1125 0u, // VkDeviceSize offset;
1126 bufferSize // VkDeviceSize size;
1127 };
1128
1129 const VkImageMemoryBarrier preImageBarrier =
1130 {
1131 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1132 DE_NULL, // const void* pNext;
1133 0u, // VkAccessFlags srcAccessMask;
1134 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1135 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1136 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1137 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1138 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1139 destImage, // VkImage image;
1140 { // VkImageSubresourceRange subresourceRange;
1141 aspectMask, // VkImageAspect aspect;
1142 0u, // deUint32 baseMipLevel;
1143 mipLevels, // deUint32 mipLevels;
1144 0u, // deUint32 baseArraySlice;
1145 arrayLayers // deUint32 arraySize;
1146 }
1147 };
1148
1149 const VkImageMemoryBarrier postImageBarrier =
1150 {
1151 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1152 DE_NULL, // const void* pNext;
1153 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1154 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1155 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1156 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
1157 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1158 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1159 destImage, // VkImage image;
1160 { // VkImageSubresourceRange subresourceRange;
1161 aspectMask, // VkImageAspect aspect;
1162 0u, // deUint32 baseMipLevel;
1163 mipLevels, // deUint32 mipLevels;
1164 0u, // deUint32 baseArraySlice;
1165 arrayLayers // deUint32 arraySize;
1166 }
1167 };
1168
1169 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
1170 {
1171 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
1172 DE_NULL, // const void* pNext;
1173 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
1174 (const VkCommandBufferInheritanceInfo*)DE_NULL,
1175 };
1176
1177 // Get copy regions and write buffer data
1178 {
1179 deUint32 layerDataOffset = 0;
1180 deUint8* destPtr = (deUint8*)bufferAlloc->getHostPtr();
1181
1182 for (size_t levelNdx = 0; levelNdx < textureData.size(); levelNdx++)
1183 {
1184 const TextureLayerData& layerData = textureData[levelNdx];
1185
1186 for (size_t layerNdx = 0; layerNdx < layerData.size(); layerNdx++)
1187 {
1188 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1189
1190 const tcu::ConstPixelBufferAccess& access = layerData[layerNdx];
1191 const tcu::PixelBufferAccess destAccess (access.getFormat(), access.getSize(), destPtr + layerDataOffset);
1192
1193 const VkBufferImageCopy layerRegion =
1194 {
1195 layerDataOffset, // VkDeviceSize bufferOffset;
1196 (deUint32)access.getWidth(), // deUint32 bufferRowLength;
1197 (deUint32)access.getHeight(), // deUint32 bufferImageHeight;
1198 { // VkImageSubresourceLayers imageSubresource;
1199 aspectMask, // VkImageAspectFlags aspectMask;
1200 (deUint32)levelNdx, // uint32_t mipLevel;
1201 (deUint32)layerNdx, // uint32_t baseArrayLayer;
1202 1u // uint32_t layerCount;
1203 },
1204 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1205 { // VkExtent3D imageExtent;
1206 (deUint32)access.getWidth(),
1207 (deUint32)access.getHeight(),
1208 (deUint32)access.getDepth()
1209 }
1210 };
1211
1212 copyRegions.push_back(layerRegion);
1213 tcu::copy(destAccess, access);
1214
1215 layerDataOffset += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1216 }
1217 }
1218 }
1219
1220 flushMappedMemoryRange(vk, vkDevice, bufferAlloc->getMemory(), bufferAlloc->getOffset(), bufferSize);
1221
1222 // Copy buffer to image
1223 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
1224 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &preBufferBarrier, 1, &preImageBarrier);
1225 vk.cmdCopyBufferToImage(*cmdBuffer, *buffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copyRegions.size(), copyRegions.data());
1226 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
1227 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1228
1229 const VkSubmitInfo submitInfo =
1230 {
1231 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
1232 DE_NULL, // const void* pNext;
1233 0u, // deUint32 waitSemaphoreCount;
1234 DE_NULL, // const VkSemaphore* pWaitSemaphores;
1235 DE_NULL, // const VkPipelineStageFlags* pWaitDstStageMask;
1236 1u, // deUint32 commandBufferCount;
1237 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
1238 0u, // deUint32 signalSemaphoreCount;
1239 DE_NULL // const VkSemaphore* pSignalSemaphores;
1240 };
1241
1242 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
1243 VK_CHECK(vk.waitForFences(vkDevice, 1, &fence.get(), true, ~(0ull) /* infinity */));
1244 }
1245
clearImage(const tcu::Sampler & refSampler,deUint32 mipLevels,deUint32 arrayLayers,VkImage destImage)1246 void ShaderRenderCaseInstance::clearImage (const tcu::Sampler& refSampler,
1247 deUint32 mipLevels,
1248 deUint32 arrayLayers,
1249 VkImage destImage)
1250 {
1251 const VkDevice vkDevice = m_context.getDevice();
1252 const DeviceInterface& vk = m_context.getDeviceInterface();
1253 const VkQueue queue = m_context.getUniversalQueue();
1254 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1255
1256 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1257 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1258 Move<VkCommandPool> cmdPool;
1259 Move<VkCommandBuffer> cmdBuffer;
1260 Move<VkFence> fence;
1261
1262 VkClearValue clearValue;
1263 deMemset(&clearValue, 0, sizeof(clearValue));
1264
1265
1266 // Create command pool and buffer
1267 {
1268 const VkCommandPoolCreateInfo cmdPoolParams =
1269 {
1270 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // VkStructureType sType;
1271 DE_NULL, // const void* pNext;
1272 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // VkCommandPoolCreateFlags flags;
1273 queueFamilyIndex, // deUint32 queueFamilyIndex;
1274 };
1275
1276 cmdPool = createCommandPool(vk, vkDevice, &cmdPoolParams);
1277
1278 const VkCommandBufferAllocateInfo cmdBufferAllocateInfo =
1279 {
1280 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
1281 DE_NULL, // const void* pNext;
1282 *cmdPool, // VkCommandPool commandPool;
1283 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCommandBufferLevel level;
1284 1u, // deUint32 bufferCount;
1285 };
1286
1287 cmdBuffer = allocateCommandBuffer(vk, vkDevice, &cmdBufferAllocateInfo);
1288 }
1289
1290 // Create fence
1291 {
1292 const VkFenceCreateInfo fenceParams =
1293 {
1294 VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
1295 DE_NULL, // const void* pNext;
1296 0u // VkFenceCreateFlags flags;
1297 };
1298
1299 fence = createFence(vk, vkDevice, &fenceParams);
1300 }
1301
1302 const VkImageMemoryBarrier preImageBarrier =
1303 {
1304 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1305 DE_NULL, // const void* pNext;
1306 0u, // VkAccessFlags srcAccessMask;
1307 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1308 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1309 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1310 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1311 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1312 destImage, // VkImage image;
1313 { // VkImageSubresourceRange subresourceRange;
1314 aspectMask, // VkImageAspect aspect;
1315 0u, // deUint32 baseMipLevel;
1316 mipLevels, // deUint32 mipLevels;
1317 0u, // deUint32 baseArraySlice;
1318 arrayLayers // deUint32 arraySize;
1319 }
1320 };
1321
1322 const VkImageMemoryBarrier postImageBarrier =
1323 {
1324 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1325 DE_NULL, // const void* pNext;
1326 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1327 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1328 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1329 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
1330 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1331 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1332 destImage, // VkImage image;
1333 { // VkImageSubresourceRange subresourceRange;
1334 aspectMask, // VkImageAspect aspect;
1335 0u, // deUint32 baseMipLevel;
1336 mipLevels, // deUint32 mipLevels;
1337 0u, // deUint32 baseArraySlice;
1338 arrayLayers // deUint32 arraySize;
1339 }
1340 };
1341
1342 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
1343 {
1344 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
1345 DE_NULL, // const void* pNext;
1346 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
1347 (const VkCommandBufferInheritanceInfo*)DE_NULL,
1348 };
1349
1350
1351 const VkImageSubresourceRange clearRange =
1352 {
1353 aspectMask, // VkImageAspectFlags aspectMask;
1354 0u, // deUint32 baseMipLevel;
1355 mipLevels, // deUint32 levelCount;
1356 0u, // deUint32 baseArrayLayer;
1357 arrayLayers // deUint32 layerCount;
1358 };
1359
1360 // Copy buffer to image
1361 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
1362 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
1363 if (aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
1364 {
1365 vk.cmdClearColorImage(*cmdBuffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue.color, 1, &clearRange);
1366 }
1367 else
1368 {
1369 vk.cmdClearDepthStencilImage(*cmdBuffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue.depthStencil, 1, &clearRange);
1370 }
1371 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
1372 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1373
1374 const VkSubmitInfo submitInfo =
1375 {
1376 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
1377 DE_NULL, // const void* pNext;
1378 0u, // deUint32 waitSemaphoreCount;
1379 DE_NULL, // const VkSemaphore* pWaitSemaphores;
1380 DE_NULL, // const VkPipelineStageFlags* pWaitDstStageMask;
1381 1u, // deUint32 commandBufferCount;
1382 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
1383 0u, // deUint32 signalSemaphoreCount;
1384 DE_NULL // const VkSemaphore* pSignalSemaphores;
1385 };
1386
1387 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
1388 VK_CHECK(vk.waitForFences(vkDevice, 1, &fence.get(), true, ~(0ull) /* infinity */));
1389 }
1390
1391 // Sparse utility function
makeSemaphore(const DeviceInterface & vk,const VkDevice device)1392 Move<VkSemaphore> makeSemaphore (const DeviceInterface& vk, const VkDevice device)
1393 {
1394 const VkSemaphoreCreateInfo semaphoreCreateInfo =
1395 {
1396 VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
1397 DE_NULL,
1398 0u
1399 };
1400
1401 return createSemaphore(vk, device, &semaphoreCreateInfo);
1402 }
1403
mipLevelExtents(const VkExtent3D & baseExtents,const deUint32 mipLevel)1404 VkExtent3D mipLevelExtents (const VkExtent3D& baseExtents, const deUint32 mipLevel)
1405 {
1406 VkExtent3D result;
1407
1408 result.width = std::max(baseExtents.width >> mipLevel, 1u);
1409 result.height = std::max(baseExtents.height >> mipLevel, 1u);
1410 result.depth = std::max(baseExtents.depth >> mipLevel, 1u);
1411
1412 return result;
1413 }
1414
alignedDivide(const VkExtent3D & extent,const VkExtent3D & divisor)1415 tcu::UVec3 alignedDivide (const VkExtent3D& extent, const VkExtent3D& divisor)
1416 {
1417 tcu::UVec3 result;
1418
1419 result.x() = extent.width / divisor.width + ((extent.width % divisor.width != 0) ? 1u : 0u);
1420 result.y() = extent.height / divisor.height + ((extent.height % divisor.height != 0) ? 1u : 0u);
1421 result.z() = extent.depth / divisor.depth + ((extent.depth % divisor.depth != 0) ? 1u : 0u);
1422
1423 return result;
1424 }
1425
isImageSizeSupported(const VkImageType imageType,const tcu::UVec3 & imageSize,const vk::VkPhysicalDeviceLimits & limits)1426 bool isImageSizeSupported (const VkImageType imageType, const tcu::UVec3& imageSize, const vk::VkPhysicalDeviceLimits& limits)
1427 {
1428 switch (imageType)
1429 {
1430 case VK_IMAGE_TYPE_1D:
1431 return (imageSize.x() <= limits.maxImageDimension1D
1432 && imageSize.y() == 1
1433 && imageSize.z() == 1);
1434 case VK_IMAGE_TYPE_2D:
1435 return (imageSize.x() <= limits.maxImageDimension2D
1436 && imageSize.y() <= limits.maxImageDimension2D
1437 && imageSize.z() == 1);
1438 case VK_IMAGE_TYPE_3D:
1439 return (imageSize.x() <= limits.maxImageDimension3D
1440 && imageSize.y() <= limits.maxImageDimension3D
1441 && imageSize.z() <= limits.maxImageDimension3D);
1442 default:
1443 DE_FATAL("Unknown image type");
1444 return false;
1445 }
1446 }
1447
checkSparseSupport(const VkImageType imageType) const1448 void ShaderRenderCaseInstance::checkSparseSupport (const VkImageType imageType) const
1449 {
1450 const InstanceInterface& instance = getInstanceInterface();
1451 const VkPhysicalDevice physicalDevice = getPhysicalDevice();
1452 const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(instance, physicalDevice);
1453
1454 if (!deviceFeatures.shaderResourceResidency)
1455 TCU_THROW(NotSupportedError, "Required feature: shaderResourceResidency.");
1456
1457 if (!deviceFeatures.sparseBinding)
1458 TCU_THROW(NotSupportedError, "Required feature: sparseBinding.");
1459
1460 if (imageType == VK_IMAGE_TYPE_2D && !deviceFeatures.sparseResidencyImage2D)
1461 TCU_THROW(NotSupportedError, "Required feature: sparseResidencyImage2D.");
1462
1463 if (imageType == VK_IMAGE_TYPE_3D && !deviceFeatures.sparseResidencyImage3D)
1464 TCU_THROW(NotSupportedError, "Required feature: sparseResidencyImage3D.");
1465 }
1466
uploadSparseImage(const tcu::TextureFormat & texFormat,const TextureData & textureData,const tcu::Sampler & refSampler,const deUint32 mipLevels,const deUint32 arrayLayers,const VkImage sparseImage,const VkImageCreateInfo & imageCreateInfo,const tcu::UVec3 texSize)1467 void ShaderRenderCaseInstance::uploadSparseImage (const tcu::TextureFormat& texFormat,
1468 const TextureData& textureData,
1469 const tcu::Sampler& refSampler,
1470 const deUint32 mipLevels,
1471 const deUint32 arrayLayers,
1472 const VkImage sparseImage,
1473 const VkImageCreateInfo& imageCreateInfo,
1474 const tcu::UVec3 texSize)
1475 {
1476 const VkDevice vkDevice = getDevice();
1477 const DeviceInterface& vk = getDeviceInterface();
1478 const VkPhysicalDevice physicalDevice = getPhysicalDevice();
1479 const VkQueue queue = getUniversalQueue();
1480 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
1481 const InstanceInterface& instance = getInstanceInterface();
1482 const VkPhysicalDeviceProperties deviceProperties = getPhysicalDeviceProperties(instance, physicalDevice);
1483 const VkPhysicalDeviceMemoryProperties deviceMemoryProperties = getPhysicalDeviceMemoryProperties(instance, physicalDevice);
1484 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1485 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1486
1487 const Unique<VkSemaphore> imageMemoryBindSemaphore(makeSemaphore(vk, vkDevice));
1488 deUint32 bufferSize = 0u;
1489 std::vector<deUint32> offsetMultiples;
1490 offsetMultiples.push_back(4u);
1491 offsetMultiples.push_back(texFormat.getPixelSize());
1492
1493 if (isImageSizeSupported(imageCreateInfo.imageType, texSize, deviceProperties.limits) == false)
1494 TCU_THROW(NotSupportedError, "Image size not supported for device.");
1495
1496 // Calculate buffer size
1497 for (TextureData::const_iterator mit = textureData.begin(); mit != textureData.end(); ++mit)
1498 {
1499 for (TextureLayerData::const_iterator lit = mit->begin(); lit != mit->end(); ++lit)
1500 {
1501 const tcu::ConstPixelBufferAccess& access = *lit;
1502
1503 bufferSize = getNextMultiple(offsetMultiples, bufferSize);
1504 bufferSize += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1505 }
1506 }
1507
1508 {
1509 deUint32 sparseMemoryReqCount = 0;
1510
1511 vk.getImageSparseMemoryRequirements(vkDevice, sparseImage, &sparseMemoryReqCount, DE_NULL);
1512
1513 DE_ASSERT(sparseMemoryReqCount != 0);
1514
1515 std::vector<VkSparseImageMemoryRequirements> sparseImageMemoryRequirements;
1516 sparseImageMemoryRequirements.resize(sparseMemoryReqCount);
1517
1518 vk.getImageSparseMemoryRequirements(vkDevice, sparseImage, &sparseMemoryReqCount, &sparseImageMemoryRequirements[0]);
1519
1520 const deUint32 noMatchFound = ~((deUint32)0);
1521
1522 deUint32 colorAspectIndex = noMatchFound;
1523 for (deUint32 memoryReqNdx = 0; memoryReqNdx < sparseMemoryReqCount; ++memoryReqNdx)
1524 {
1525 if (sparseImageMemoryRequirements[memoryReqNdx].formatProperties.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT)
1526 {
1527 colorAspectIndex = memoryReqNdx;
1528 break;
1529 }
1530 }
1531
1532 if (colorAspectIndex == noMatchFound)
1533 TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT.");
1534
1535 const VkMemoryRequirements memoryRequirements = getImageMemoryRequirements(vk, vkDevice, sparseImage);
1536
1537 deUint32 memoryType = noMatchFound;
1538 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < deviceMemoryProperties.memoryTypeCount; ++memoryTypeNdx)
1539 {
1540 if ((memoryRequirements.memoryTypeBits & (1u << memoryTypeNdx)) != 0 &&
1541 MemoryRequirement::Any.matchesHeap(deviceMemoryProperties.memoryTypes[memoryTypeNdx].propertyFlags))
1542 {
1543 memoryType = memoryTypeNdx;
1544 break;
1545 }
1546 }
1547
1548 if (memoryType == noMatchFound)
1549 TCU_THROW(NotSupportedError, "No matching memory type found.");
1550
1551 if (memoryRequirements.size > deviceProperties.limits.sparseAddressSpaceSize)
1552 TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits.");
1553
1554 // Check if the image format supports sparse oprerations
1555 const std::vector<VkSparseImageFormatProperties> sparseImageFormatPropVec =
1556 getPhysicalDeviceSparseImageFormatProperties(instance, physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType, imageCreateInfo.samples, imageCreateInfo.usage, imageCreateInfo.tiling);
1557
1558 if (sparseImageFormatPropVec.size() == 0)
1559 TCU_THROW(NotSupportedError, "The image format does not support sparse operations.");
1560
1561 const VkSparseImageMemoryRequirements aspectRequirements = sparseImageMemoryRequirements[colorAspectIndex];
1562 const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity;
1563
1564 std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
1565 std::vector<VkSparseMemoryBind> imageMipTailMemoryBinds;
1566
1567 for (deUint32 layerNdx = 0; layerNdx < arrayLayers; ++ layerNdx)
1568 {
1569 for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
1570 {
1571 const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, mipLevelNdx);
1572 const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity);
1573 const tcu::UVec3 lastBlockExtent = tcu::UVec3(mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width,
1574 mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height,
1575 mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth );
1576
1577 for (deUint32 z = 0; z < numSparseBinds.z(); ++z)
1578 for (deUint32 y = 0; y < numSparseBinds.y(); ++y)
1579 for (deUint32 x = 0; x < numSparseBinds.x(); ++x)
1580 {
1581 const VkMemoryRequirements allocRequirements =
1582 {
1583 // 28.7.5 alignment shows the block size in bytes
1584 memoryRequirements.alignment, // VkDeviceSize size;
1585 memoryRequirements.alignment, // VkDeviceSize alignment;
1586 memoryRequirements.memoryTypeBits, // uint32_t memoryTypeBits;
1587 };
1588
1589 de::SharedPtr<Allocation> allocation(m_memAlloc.allocate(allocRequirements, MemoryRequirement::Any).release());
1590
1591 m_allocations.push_back(allocation);
1592
1593 VkOffset3D offset;
1594 offset.x = x*imageGranularity.width;
1595 offset.y = y*imageGranularity.height;
1596 offset.z = z*imageGranularity.depth;
1597
1598 VkExtent3D extent;
1599 extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width;
1600 extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height;
1601 extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth;
1602
1603 const VkSparseImageMemoryBind imageMemoryBind =
1604 {
1605 {
1606 aspectMask, // VkImageAspectFlags aspectMask;
1607 mipLevelNdx,// uint32_t mipLevel;
1608 layerNdx, // uint32_t arrayLayer;
1609 }, // VkImageSubresource subresource;
1610 offset, // VkOffset3D offset;
1611 extent, // VkExtent3D extent;
1612 allocation->getMemory(), // VkDeviceMemory memory;
1613 allocation->getOffset(), // VkDeviceSize memoryOffset;
1614 0u, // VkSparseMemoryBindFlags flags;
1615 };
1616
1617 imageResidencyMemoryBinds.push_back(imageMemoryBind);
1618 }
1619 }
1620
1621 // Handle MIP tail. There are two cases to consider here:
1622 //
1623 // 1) VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT is requested by the driver: each layer needs a separate tail.
1624 // 2) otherwise: only one tail is needed.
1625 {
1626 if ( imageMipTailMemoryBinds.size() == 0 ||
1627 (imageMipTailMemoryBinds.size() != 0 && (aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) == 0))
1628 {
1629 const VkMemoryRequirements allocRequirements =
1630 {
1631 aspectRequirements.imageMipTailSize, // VkDeviceSize size;
1632 memoryRequirements.alignment, // VkDeviceSize alignment;
1633 memoryRequirements.memoryTypeBits, // uint32_t memoryTypeBits;
1634 };
1635
1636 const de::SharedPtr<Allocation> allocation(m_memAlloc.allocate(allocRequirements, MemoryRequirement::Any).release());
1637
1638 const VkSparseMemoryBind imageMipTailMemoryBind =
1639 {
1640 aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride, // VkDeviceSize resourceOffset;
1641 aspectRequirements.imageMipTailSize, // VkDeviceSize size;
1642 allocation->getMemory(), // VkDeviceMemory memory;
1643 allocation->getOffset(), // VkDeviceSize memoryOffset;
1644 0u, // VkSparseMemoryBindFlags flags;
1645 };
1646
1647 m_allocations.push_back(allocation);
1648 imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
1649 }
1650 }
1651 }
1652
1653 VkBindSparseInfo bindSparseInfo =
1654 {
1655 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
1656 DE_NULL, //const void* pNext;
1657 0u, //deUint32 waitSemaphoreCount;
1658 DE_NULL, //const VkSemaphore* pWaitSemaphores;
1659 0u, //deUint32 bufferBindCount;
1660 DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
1661 0u, //deUint32 imageOpaqueBindCount;
1662 DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
1663 0u, //deUint32 imageBindCount;
1664 DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
1665 1u, //deUint32 signalSemaphoreCount;
1666 &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
1667 };
1668
1669 VkSparseImageMemoryBindInfo imageResidencyBindInfo;
1670 VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
1671
1672 if (imageResidencyMemoryBinds.size() > 0)
1673 {
1674 imageResidencyBindInfo.image = sparseImage;
1675 imageResidencyBindInfo.bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
1676 imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0];
1677
1678 bindSparseInfo.imageBindCount = 1u;
1679 bindSparseInfo.pImageBinds = &imageResidencyBindInfo;
1680 }
1681
1682 if (imageMipTailMemoryBinds.size() > 0)
1683 {
1684 imageMipTailBindInfo.image = sparseImage;
1685 imageMipTailBindInfo.bindCount = static_cast<deUint32>(imageMipTailMemoryBinds.size());
1686 imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0];
1687
1688 bindSparseInfo.imageOpaqueBindCount = 1u;
1689 bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo;
1690 }
1691
1692 VK_CHECK(vk.queueBindSparse(queue, 1u, &bindSparseInfo, DE_NULL));
1693 }
1694
1695 Move<VkCommandPool> cmdPool;
1696 Move<VkCommandBuffer> cmdBuffer;
1697 // Create command pool
1698 {
1699 const VkCommandPoolCreateInfo cmdPoolParams =
1700 {
1701 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // VkStructureType sType;
1702 DE_NULL, // const void* pNext;
1703 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // VkCommandPoolCreateFlags flags;
1704 queueFamilyIndex, // deUint32 queueFamilyIndex;
1705 };
1706
1707 cmdPool = createCommandPool(vk, vkDevice, &cmdPoolParams);
1708 }
1709
1710 {
1711 // Create command buffer
1712 const VkCommandBufferAllocateInfo cmdBufferAllocateInfo =
1713 {
1714 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
1715 DE_NULL, // const void* pNext;
1716 *cmdPool, // VkCommandPool commandPool;
1717 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCommandBufferLevel level;
1718 1u, // deUint32 bufferCount;
1719 };
1720
1721 cmdBuffer = allocateCommandBuffer(vk, vkDevice, &cmdBufferAllocateInfo);
1722 }
1723
1724 // Create source buffer
1725 const VkBufferCreateInfo bufferParams =
1726 {
1727 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1728 DE_NULL, // const void* pNext;
1729 0u, // VkBufferCreateFlags flags;
1730 bufferSize, // VkDeviceSize size;
1731 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
1732 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1733 0u, // deUint32 queueFamilyIndexCount;
1734 DE_NULL, // const deUint32* pQueueFamilyIndices;
1735 };
1736
1737 Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &bufferParams);
1738 de::MovePtr<Allocation> bufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
1739 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
1740
1741 // Barriers for copying buffer to image
1742 const VkBufferMemoryBarrier preBufferBarrier =
1743 {
1744 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1745 DE_NULL, // const void* pNext;
1746 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1747 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1748 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1749 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1750 *buffer, // VkBuffer buffer;
1751 0u, // VkDeviceSize offset;
1752 bufferSize // VkDeviceSize size;
1753 };
1754
1755 const VkImageMemoryBarrier preImageBarrier =
1756 {
1757 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1758 DE_NULL, // const void* pNext;
1759 0u, // VkAccessFlags srcAccessMask;
1760 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1761 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1762 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1763 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1764 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1765 sparseImage, // VkImage image;
1766 { // VkImageSubresourceRange subresourceRange;
1767 aspectMask, // VkImageAspect aspect;
1768 0u, // deUint32 baseMipLevel;
1769 mipLevels, // deUint32 mipLevels;
1770 0u, // deUint32 baseArraySlice;
1771 arrayLayers // deUint32 arraySize;
1772 }
1773 };
1774
1775 const VkImageMemoryBarrier postImageBarrier =
1776 {
1777 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1778 DE_NULL, // const void* pNext;
1779 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1780 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1781 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1782 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
1783 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1784 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1785 sparseImage, // VkImage image;
1786 { // VkImageSubresourceRange subresourceRange;
1787 aspectMask, // VkImageAspect aspect;
1788 0u, // deUint32 baseMipLevel;
1789 mipLevels, // deUint32 mipLevels;
1790 0u, // deUint32 baseArraySlice;
1791 arrayLayers // deUint32 arraySize;
1792 }
1793 };
1794
1795 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
1796 {
1797 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
1798 DE_NULL, // const void* pNext;
1799 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
1800 (const VkCommandBufferInheritanceInfo*)DE_NULL,
1801 };
1802
1803 std::vector<VkBufferImageCopy> copyRegions;
1804 // Get copy regions and write buffer data
1805 {
1806 deUint32 layerDataOffset = 0;
1807 deUint8* destPtr = (deUint8*)bufferAlloc->getHostPtr();
1808
1809 for (size_t levelNdx = 0; levelNdx < textureData.size(); levelNdx++)
1810 {
1811 const TextureLayerData& layerData = textureData[levelNdx];
1812
1813 for (size_t layerNdx = 0; layerNdx < layerData.size(); layerNdx++)
1814 {
1815 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1816
1817 const tcu::ConstPixelBufferAccess& access = layerData[layerNdx];
1818 const tcu::PixelBufferAccess destAccess (access.getFormat(), access.getSize(), destPtr + layerDataOffset);
1819
1820 const VkBufferImageCopy layerRegion =
1821 {
1822 layerDataOffset, // VkDeviceSize bufferOffset;
1823 (deUint32)access.getWidth(), // deUint32 bufferRowLength;
1824 (deUint32)access.getHeight(), // deUint32 bufferImageHeight;
1825 { // VkImageSubresourceLayers imageSubresource;
1826 aspectMask, // VkImageAspectFlags aspectMask;
1827 (deUint32)levelNdx, // uint32_t mipLevel;
1828 (deUint32)layerNdx, // uint32_t baseArrayLayer;
1829 1u // uint32_t layerCount;
1830 },
1831 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1832 { // VkExtent3D imageExtent;
1833 (deUint32)access.getWidth(),
1834 (deUint32)access.getHeight(),
1835 (deUint32)access.getDepth()
1836 }
1837 };
1838
1839 copyRegions.push_back(layerRegion);
1840 tcu::copy(destAccess, access);
1841
1842 layerDataOffset += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1843 }
1844 }
1845 }
1846
1847 // Copy buffer to image
1848 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
1849 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &preBufferBarrier, 1, &preImageBarrier);
1850 vk.cmdCopyBufferToImage(*cmdBuffer, *buffer, sparseImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copyRegions.size(), copyRegions.data());
1851 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
1852 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1853
1854 const VkPipelineStageFlags pipelineStageFlags = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
1855
1856 const VkSubmitInfo submitInfo =
1857 {
1858 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
1859 DE_NULL, // const void* pNext;
1860 1u, // deUint32 waitSemaphoreCount;
1861 &imageMemoryBindSemaphore.get(), // const VkSemaphore* pWaitSemaphores;
1862 &pipelineStageFlags, // const VkPipelineStageFlags* pWaitDstStageMask;
1863 1u, // deUint32 commandBufferCount;
1864 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
1865 0u, // deUint32 signalSemaphoreCount;
1866 DE_NULL // const VkSemaphore* pSignalSemaphores;
1867 };
1868
1869 const VkFenceCreateInfo fenceParams =
1870 {
1871 VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
1872 DE_NULL, // const void* pNext;
1873 0u // VkFenceCreateFlags flags;
1874 };
1875
1876 Move<VkFence> fence = createFence(vk, vkDevice, &fenceParams);
1877
1878 try
1879 {
1880 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
1881 VK_CHECK(vk.waitForFences(vkDevice, 1, &fence.get(), true, ~(0ull) /* infinity */));
1882 }
1883 catch (...)
1884 {
1885 VK_CHECK(vk.deviceWaitIdle(vkDevice));
1886 throw;
1887 }
1888 }
1889
useSampler(deUint32 bindingLocation,deUint32 textureId)1890 void ShaderRenderCaseInstance::useSampler (deUint32 bindingLocation, deUint32 textureId)
1891 {
1892 DE_ASSERT(textureId < m_textures.size());
1893
1894 const TextureBinding& textureBinding = *m_textures[textureId];
1895 const TextureBinding::Type textureType = textureBinding.getType();
1896 const tcu::Sampler& refSampler = textureBinding.getSampler();
1897 const TextureBinding::Parameters& textureParams = textureBinding.getParameters();
1898 const bool isMSTexture = textureParams.samples != vk::VK_SAMPLE_COUNT_1_BIT;
1899 deUint32 mipLevels = 1u;
1900 deUint32 arrayLayers = 1u;
1901 tcu::TextureFormat texFormat;
1902 tcu::UVec3 texSize;
1903 TextureData textureData;
1904
1905 if (textureType == TextureBinding::TYPE_2D)
1906 {
1907 const tcu::Texture2D& texture = textureBinding.get2D();
1908
1909 texFormat = texture.getFormat();
1910 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), 1u);
1911 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1912 arrayLayers = 1u;
1913
1914 textureData.resize(mipLevels);
1915
1916 for (deUint32 level = 0; level < mipLevels; ++level)
1917 {
1918 if (texture.isLevelEmpty(level))
1919 continue;
1920
1921 textureData[level].push_back(texture.getLevel(level));
1922 }
1923 }
1924 else if (textureType == TextureBinding::TYPE_CUBE_MAP)
1925 {
1926 const tcu::TextureCube& texture = textureBinding.getCube();
1927
1928 texFormat = texture.getFormat();
1929 texSize = tcu::UVec3(texture.getSize(), texture.getSize(), 1u);
1930 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1931 arrayLayers = 6u;
1932
1933 static const tcu::CubeFace cubeFaceMapping[tcu::CUBEFACE_LAST] =
1934 {
1935 tcu::CUBEFACE_POSITIVE_X,
1936 tcu::CUBEFACE_NEGATIVE_X,
1937 tcu::CUBEFACE_POSITIVE_Y,
1938 tcu::CUBEFACE_NEGATIVE_Y,
1939 tcu::CUBEFACE_POSITIVE_Z,
1940 tcu::CUBEFACE_NEGATIVE_Z
1941 };
1942
1943 textureData.resize(mipLevels);
1944
1945 for (deUint32 level = 0; level < mipLevels; ++level)
1946 {
1947 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; ++faceNdx)
1948 {
1949 tcu::CubeFace face = cubeFaceMapping[faceNdx];
1950
1951 if (texture.isLevelEmpty(face, level))
1952 continue;
1953
1954 textureData[level].push_back(texture.getLevelFace(level, face));
1955 }
1956 }
1957 }
1958 else if (textureType == TextureBinding::TYPE_2D_ARRAY)
1959 {
1960 const tcu::Texture2DArray& texture = textureBinding.get2DArray();
1961
1962 texFormat = texture.getFormat();
1963 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), 1u);
1964 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1965 arrayLayers = (deUint32)texture.getNumLayers();
1966
1967 textureData.resize(mipLevels);
1968
1969 for (deUint32 level = 0; level < mipLevels; ++level)
1970 {
1971 if (texture.isLevelEmpty(level))
1972 continue;
1973
1974 const tcu::ConstPixelBufferAccess& levelLayers = texture.getLevel(level);
1975 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1976
1977 for (deUint32 layer = 0; layer < arrayLayers; ++layer)
1978 {
1979 const deUint32 layerOffset = layerSize * layer;
1980 tcu::ConstPixelBufferAccess layerData (levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1981 textureData[level].push_back(layerData);
1982 }
1983 }
1984 }
1985 else if (textureType == TextureBinding::TYPE_3D)
1986 {
1987 const tcu::Texture3D& texture = textureBinding.get3D();
1988
1989 texFormat = texture.getFormat();
1990 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), texture.getDepth());
1991 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1992 arrayLayers = 1u;
1993
1994 textureData.resize(mipLevels);
1995
1996 for (deUint32 level = 0; level < mipLevels; ++level)
1997 {
1998 if (texture.isLevelEmpty(level))
1999 continue;
2000
2001 textureData[level].push_back(texture.getLevel(level));
2002 }
2003 }
2004 else if (textureType == TextureBinding::TYPE_1D)
2005 {
2006 const tcu::Texture1D& texture = textureBinding.get1D();
2007
2008 texFormat = texture.getFormat();
2009 texSize = tcu::UVec3(texture.getWidth(), 1, 1);
2010 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
2011 arrayLayers = 1u;
2012
2013 textureData.resize(mipLevels);
2014
2015 for (deUint32 level = 0; level < mipLevels; ++level)
2016 {
2017 if (texture.isLevelEmpty(level))
2018 continue;
2019
2020 textureData[level].push_back(texture.getLevel(level));
2021 }
2022 }
2023 else if (textureType == TextureBinding::TYPE_1D_ARRAY)
2024 {
2025 const tcu::Texture1DArray& texture = textureBinding.get1DArray();
2026
2027 texFormat = texture.getFormat();
2028 texSize = tcu::UVec3(texture.getWidth(), 1, 1);
2029 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
2030 arrayLayers = (deUint32)texture.getNumLayers();
2031
2032 textureData.resize(mipLevels);
2033
2034 for (deUint32 level = 0; level < mipLevels; ++level)
2035 {
2036 if (texture.isLevelEmpty(level))
2037 continue;
2038
2039 const tcu::ConstPixelBufferAccess& levelLayers = texture.getLevel(level);
2040 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize();
2041
2042 for (deUint32 layer = 0; layer < arrayLayers; ++layer)
2043 {
2044 const deUint32 layerOffset = layerSize * layer;
2045 tcu::ConstPixelBufferAccess layerData (levelLayers.getFormat(), levelLayers.getWidth(), 1, 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
2046 textureData[level].push_back(layerData);
2047 }
2048 }
2049 }
2050 else if (textureType == TextureBinding::TYPE_CUBE_ARRAY)
2051 {
2052 const tcu::TextureCubeArray& texture = textureBinding.getCubeArray();
2053 texFormat = texture.getFormat();
2054 texSize = tcu::UVec3(texture.getSize(), texture.getSize(), 1);
2055 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
2056 arrayLayers = texture.getDepth();
2057
2058 textureData.resize(mipLevels);
2059
2060 for (deUint32 level = 0; level < mipLevels; ++level)
2061 {
2062 if (texture.isLevelEmpty(level))
2063 continue;
2064
2065 const tcu::ConstPixelBufferAccess& levelLayers = texture.getLevel(level);
2066 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
2067
2068 for (deUint32 layer = 0; layer < arrayLayers; ++layer)
2069 {
2070 const deUint32 layerOffset = layerSize * layer;
2071 tcu::ConstPixelBufferAccess layerData (levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
2072 textureData[level].push_back(layerData);
2073 }
2074 }
2075 }
2076 else
2077 {
2078 TCU_THROW(InternalError, "Invalid texture type");
2079 }
2080
2081 createSamplerUniform(bindingLocation, textureType, textureBinding.getParameters().initialization, texFormat, texSize, textureData, refSampler, mipLevels, arrayLayers, textureParams);
2082 }
2083
setPushConstantRanges(const deUint32 rangeCount,const vk::VkPushConstantRange * const pcRanges)2084 void ShaderRenderCaseInstance::setPushConstantRanges (const deUint32 rangeCount, const vk::VkPushConstantRange* const pcRanges)
2085 {
2086 m_pushConstantRanges.clear();
2087 for (deUint32 i = 0; i < rangeCount; ++i)
2088 {
2089 m_pushConstantRanges.push_back(pcRanges[i]);
2090 }
2091 }
2092
updatePushConstants(vk::VkCommandBuffer,vk::VkPipelineLayout)2093 void ShaderRenderCaseInstance::updatePushConstants (vk::VkCommandBuffer, vk::VkPipelineLayout)
2094 {
2095 }
2096
createSamplerUniform(deUint32 bindingLocation,TextureBinding::Type textureType,TextureBinding::Init textureInit,const tcu::TextureFormat & texFormat,const tcu::UVec3 texSize,const TextureData & textureData,const tcu::Sampler & refSampler,deUint32 mipLevels,deUint32 arrayLayers,TextureBinding::Parameters textureParams)2097 void ShaderRenderCaseInstance::createSamplerUniform (deUint32 bindingLocation,
2098 TextureBinding::Type textureType,
2099 TextureBinding::Init textureInit,
2100 const tcu::TextureFormat& texFormat,
2101 const tcu::UVec3 texSize,
2102 const TextureData& textureData,
2103 const tcu::Sampler& refSampler,
2104 deUint32 mipLevels,
2105 deUint32 arrayLayers,
2106 TextureBinding::Parameters textureParams)
2107 {
2108 const VkDevice vkDevice = getDevice();
2109 const DeviceInterface& vk = getDeviceInterface();
2110 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
2111
2112 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
2113 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
2114 const VkImageViewType imageViewType = textureTypeToImageViewType(textureType);
2115 const VkImageType imageType = viewTypeToImageType(imageViewType);
2116 const VkFormat format = mapTextureFormat(texFormat);
2117 const bool isCube = imageViewType == VK_IMAGE_VIEW_TYPE_CUBE || imageViewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
2118 VkImageCreateFlags imageCreateFlags = isCube ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : (VkImageCreateFlags)0;
2119 VkImageUsageFlags imageUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
2120 Move<VkImage> vkTexture;
2121 de::MovePtr<Allocation> allocation;
2122
2123 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
2124 {
2125 checkSparseSupport(imageType);
2126 imageCreateFlags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT;
2127 }
2128
2129 // Create image
2130 const VkImageCreateInfo imageParams =
2131 {
2132 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2133 DE_NULL, // const void* pNext;
2134 imageCreateFlags, // VkImageCreateFlags flags;
2135 imageType, // VkImageType imageType;
2136 format, // VkFormat format;
2137 { // VkExtent3D extent;
2138 texSize.x(),
2139 texSize.y(),
2140 texSize.z()
2141 },
2142 mipLevels, // deUint32 mipLevels;
2143 arrayLayers, // deUint32 arrayLayers;
2144 textureParams.samples, // VkSampleCountFlagBits samples;
2145 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2146 imageUsageFlags, // VkImageUsageFlags usage;
2147 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2148 1u, // deUint32 queueFamilyIndexCount;
2149 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
2150 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
2151 };
2152
2153 vkTexture = createImage(vk, vkDevice, &imageParams);
2154 allocation = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *vkTexture), MemoryRequirement::Any);
2155
2156 if (m_imageBackingMode != IMAGE_BACKING_MODE_SPARSE)
2157 {
2158 VK_CHECK(vk.bindImageMemory(vkDevice, *vkTexture, allocation->getMemory(), allocation->getOffset()));
2159 }
2160
2161 switch (textureInit)
2162 {
2163 case TextureBinding::INIT_UPLOAD_DATA:
2164 {
2165 // upload*Image functions use cmdCopyBufferToImage, which is invalid for multisample images
2166 DE_ASSERT(textureParams.samples == VK_SAMPLE_COUNT_1_BIT);
2167
2168 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
2169 {
2170 uploadSparseImage(texFormat, textureData, refSampler, mipLevels, arrayLayers, *vkTexture, imageParams, texSize);
2171 }
2172 else
2173 {
2174 // Upload texture data
2175 uploadImage(texFormat, textureData, refSampler, mipLevels, arrayLayers, *vkTexture);
2176 }
2177 break;
2178 }
2179 case TextureBinding::INIT_CLEAR:
2180 clearImage(refSampler, mipLevels, arrayLayers, *vkTexture);
2181 break;
2182 default:
2183 DE_FATAL("Impossible");
2184 }
2185
2186 // Create sampler
2187 const VkSamplerCreateInfo samplerParams = mapSampler(refSampler, texFormat);
2188 Move<VkSampler> sampler = createSampler(vk, vkDevice, &samplerParams);
2189 const deUint32 baseMipLevel = textureParams.baseMipLevel;
2190 const vk::VkComponentMapping components = textureParams.componentMapping;
2191 const VkImageViewCreateInfo viewParams =
2192 {
2193 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
2194 NULL, // const voide* pNext;
2195 0u, // VkImageViewCreateFlags flags;
2196 *vkTexture, // VkImage image;
2197 imageViewType, // VkImageViewType viewType;
2198 format, // VkFormat format;
2199 components, // VkChannelMapping channels;
2200 {
2201 aspectMask, // VkImageAspectFlags aspectMask;
2202 baseMipLevel, // deUint32 baseMipLevel;
2203 mipLevels - baseMipLevel, // deUint32 mipLevels;
2204 0, // deUint32 baseArraySlice;
2205 arrayLayers // deUint32 arraySize;
2206 }, // VkImageSubresourceRange subresourceRange;
2207 };
2208
2209 Move<VkImageView> imageView = createImageView(vk, vkDevice, &viewParams);
2210
2211 const vk::VkDescriptorImageInfo descriptor =
2212 {
2213 sampler.get(), // VkSampler sampler;
2214 imageView.get(), // VkImageView imageView;
2215 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout imageLayout;
2216 };
2217
2218 de::MovePtr<SamplerUniform> uniform(new SamplerUniform());
2219 uniform->type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2220 uniform->descriptor = descriptor;
2221 uniform->location = bindingLocation;
2222 uniform->image = VkImageSp(new vk::Unique<VkImage>(vkTexture));
2223 uniform->imageView = VkImageViewSp(new vk::Unique<VkImageView>(imageView));
2224 uniform->sampler = VkSamplerSp(new vk::Unique<VkSampler>(sampler));
2225 uniform->alloc = AllocationSp(allocation.release());
2226
2227 m_descriptorSetLayoutBuilder->addSingleSamplerBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, vk::VK_SHADER_STAGE_ALL, DE_NULL);
2228 m_descriptorPoolBuilder->addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
2229
2230 m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(uniform)));
2231 }
2232
setupDefaultInputs(void)2233 void ShaderRenderCaseInstance::setupDefaultInputs (void)
2234 {
2235 /* Configuration of the vertex input attributes:
2236 a_position is at location 0
2237 a_coords is at location 1
2238 a_unitCoords is at location 2
2239 a_one is at location 3
2240
2241 User attributes starts from at the location 4.
2242 */
2243
2244 DE_ASSERT(m_quadGrid);
2245 const QuadGrid& quadGrid = *m_quadGrid;
2246
2247 addAttribute(0u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getPositions());
2248 addAttribute(1u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getCoords());
2249 addAttribute(2u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getUnitCoords());
2250 addAttribute(3u, VK_FORMAT_R32_SFLOAT, sizeof(float), quadGrid.getNumVertices(), quadGrid.getAttribOne());
2251
2252 static const struct
2253 {
2254 BaseAttributeType type;
2255 int userNdx;
2256 } userAttributes[] =
2257 {
2258 { A_IN0, 0 },
2259 { A_IN1, 1 },
2260 { A_IN2, 2 },
2261 { A_IN3, 3 }
2262 };
2263
2264 static const struct
2265 {
2266 BaseAttributeType matrixType;
2267 int numCols;
2268 int numRows;
2269 } matrices[] =
2270 {
2271 { MAT2, 2, 2 },
2272 { MAT2x3, 2, 3 },
2273 { MAT2x4, 2, 4 },
2274 { MAT3x2, 3, 2 },
2275 { MAT3, 3, 3 },
2276 { MAT3x4, 3, 4 },
2277 { MAT4x2, 4, 2 },
2278 { MAT4x3, 4, 3 },
2279 { MAT4, 4, 4 }
2280 };
2281
2282 for (size_t attrNdx = 0; attrNdx < m_enabledBaseAttributes.size(); attrNdx++)
2283 {
2284 for (int userNdx = 0; userNdx < DE_LENGTH_OF_ARRAY(userAttributes); userNdx++)
2285 {
2286 if (userAttributes[userNdx].type != m_enabledBaseAttributes[attrNdx].type)
2287 continue;
2288
2289 addAttribute(m_enabledBaseAttributes[attrNdx].location, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getUserAttrib(userNdx));
2290 }
2291
2292 for (int matNdx = 0; matNdx < DE_LENGTH_OF_ARRAY(matrices); matNdx++)
2293 {
2294
2295 if (matrices[matNdx].matrixType != m_enabledBaseAttributes[attrNdx].type)
2296 continue;
2297
2298 const int numCols = matrices[matNdx].numCols;
2299
2300 for (int colNdx = 0; colNdx < numCols; colNdx++)
2301 {
2302 addAttribute(m_enabledBaseAttributes[attrNdx].location + colNdx, VK_FORMAT_R32G32B32A32_SFLOAT, (deUint32)(4 * sizeof(float)), quadGrid.getNumVertices(), quadGrid.getUserAttrib(colNdx));
2303 }
2304 }
2305 }
2306 }
2307
render(deUint32 numVertices,deUint32 numTriangles,const deUint16 * indices,const tcu::Vec4 & constCoords)2308 void ShaderRenderCaseInstance::render (deUint32 numVertices,
2309 deUint32 numTriangles,
2310 const deUint16* indices,
2311 const tcu::Vec4& constCoords)
2312 {
2313 render(numVertices, numTriangles * 3, indices, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, constCoords);
2314 }
2315
render(deUint32 numVertices,deUint32 numIndices,const deUint16 * indices,VkPrimitiveTopology topology,const tcu::Vec4 & constCoords)2316 void ShaderRenderCaseInstance::render (deUint32 numVertices,
2317 deUint32 numIndices,
2318 const deUint16* indices,
2319 VkPrimitiveTopology topology,
2320 const tcu::Vec4& constCoords)
2321 {
2322 const VkDevice vkDevice = getDevice();
2323 const DeviceInterface& vk = getDeviceInterface();
2324 const VkQueue queue = getUniversalQueue();
2325 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
2326
2327 vk::Move<vk::VkImage> colorImage;
2328 de::MovePtr<vk::Allocation> colorImageAlloc;
2329 vk::Move<vk::VkImageView> colorImageView;
2330 vk::Move<vk::VkImage> resolvedImage;
2331 de::MovePtr<vk::Allocation> resolvedImageAlloc;
2332 vk::Move<vk::VkImageView> resolvedImageView;
2333 vk::Move<vk::VkRenderPass> renderPass;
2334 vk::Move<vk::VkFramebuffer> framebuffer;
2335 vk::Move<vk::VkPipelineLayout> pipelineLayout;
2336 vk::Move<vk::VkPipeline> graphicsPipeline;
2337 vk::Move<vk::VkShaderModule> vertexShaderModule;
2338 vk::Move<vk::VkShaderModule> fragmentShaderModule;
2339 vk::Move<vk::VkBuffer> indexBuffer;
2340 de::MovePtr<vk::Allocation> indexBufferAlloc;
2341 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
2342 vk::Move<vk::VkDescriptorPool> descriptorPool;
2343 vk::Move<vk::VkDescriptorSet> descriptorSet;
2344 vk::Move<vk::VkCommandPool> cmdPool;
2345 vk::Move<vk::VkCommandBuffer> cmdBuffer;
2346 vk::Move<vk::VkFence> fence;
2347
2348 // Create color image
2349 {
2350 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2351 VkImageFormatProperties properties;
2352
2353 if ((getInstanceInterface().getPhysicalDeviceImageFormatProperties(getPhysicalDevice(),
2354 m_colorFormat,
2355 VK_IMAGE_TYPE_2D,
2356 VK_IMAGE_TILING_OPTIMAL,
2357 imageUsage,
2358 0u,
2359 &properties) == VK_ERROR_FORMAT_NOT_SUPPORTED))
2360 {
2361 TCU_THROW(NotSupportedError, "Format not supported");
2362 }
2363
2364 if ((properties.sampleCounts & m_sampleCount) != m_sampleCount)
2365 {
2366 TCU_THROW(NotSupportedError, "Format not supported");
2367 }
2368
2369 const VkImageCreateInfo colorImageParams =
2370 {
2371 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2372 DE_NULL, // const void* pNext;
2373 0u, // VkImageCreateFlags flags;
2374 VK_IMAGE_TYPE_2D, // VkImageType imageType;
2375 m_colorFormat, // VkFormat format;
2376 { m_renderSize.x(), m_renderSize.y(), 1u }, // VkExtent3D extent;
2377 1u, // deUint32 mipLevels;
2378 1u, // deUint32 arraySize;
2379 m_sampleCount, // deUint32 samples;
2380 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2381 imageUsage, // VkImageUsageFlags usage;
2382 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2383 1u, // deUint32 queueFamilyCount;
2384 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
2385 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
2386 };
2387
2388 colorImage = createImage(vk, vkDevice, &colorImageParams);
2389
2390 // Allocate and bind color image memory
2391 colorImageAlloc = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *colorImage), MemoryRequirement::Any);
2392 VK_CHECK(vk.bindImageMemory(vkDevice, *colorImage, colorImageAlloc->getMemory(), colorImageAlloc->getOffset()));
2393 }
2394
2395 // Create color attachment view
2396 {
2397 const VkImageViewCreateInfo colorImageViewParams =
2398 {
2399 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
2400 DE_NULL, // const void* pNext;
2401 0u, // VkImageViewCreateFlags flags;
2402 *colorImage, // VkImage image;
2403 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
2404 m_colorFormat, // VkFormat format;
2405 {
2406 VK_COMPONENT_SWIZZLE_R, // VkChannelSwizzle r;
2407 VK_COMPONENT_SWIZZLE_G, // VkChannelSwizzle g;
2408 VK_COMPONENT_SWIZZLE_B, // VkChannelSwizzle b;
2409 VK_COMPONENT_SWIZZLE_A // VkChannelSwizzle a;
2410 }, // VkChannelMapping channels;
2411 {
2412 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2413 0, // deUint32 baseMipLevel;
2414 1, // deUint32 mipLevels;
2415 0, // deUint32 baseArraySlice;
2416 1 // deUint32 arraySize;
2417 }, // VkImageSubresourceRange subresourceRange;
2418 };
2419
2420 colorImageView = createImageView(vk, vkDevice, &colorImageViewParams);
2421 }
2422
2423 if (isMultiSampling())
2424 {
2425 // Resolved Image
2426 {
2427 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
2428 VkImageFormatProperties properties;
2429
2430 if ((getInstanceInterface().getPhysicalDeviceImageFormatProperties(getPhysicalDevice(),
2431 m_colorFormat,
2432 VK_IMAGE_TYPE_2D,
2433 VK_IMAGE_TILING_OPTIMAL,
2434 imageUsage,
2435 0,
2436 &properties) == VK_ERROR_FORMAT_NOT_SUPPORTED))
2437 {
2438 TCU_THROW(NotSupportedError, "Format not supported");
2439 }
2440
2441 const VkImageCreateInfo imageCreateInfo =
2442 {
2443 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2444 DE_NULL, // const void* pNext;
2445 0u, // VkImageCreateFlags flags;
2446 VK_IMAGE_TYPE_2D, // VkImageType imageType;
2447 m_colorFormat, // VkFormat format;
2448 { m_renderSize.x(), m_renderSize.y(), 1u }, // VkExtent3D extent;
2449 1u, // deUint32 mipLevels;
2450 1u, // deUint32 arrayLayers;
2451 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2452 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2453 imageUsage, // VkImageUsageFlags usage;
2454 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2455 1u, // deUint32 queueFamilyIndexCount;
2456 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
2457 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
2458 };
2459
2460 resolvedImage = vk::createImage(vk, vkDevice, &imageCreateInfo, DE_NULL);
2461 resolvedImageAlloc = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *resolvedImage), MemoryRequirement::Any);
2462 VK_CHECK(vk.bindImageMemory(vkDevice, *resolvedImage, resolvedImageAlloc->getMemory(), resolvedImageAlloc->getOffset()));
2463 }
2464
2465 // Resolved Image View
2466 {
2467 const VkImageViewCreateInfo imageViewCreateInfo =
2468 {
2469 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
2470 DE_NULL, // const void* pNext;
2471 0u, // VkImageViewCreateFlags flags;
2472 *resolvedImage, // VkImage image;
2473 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
2474 m_colorFormat, // VkFormat format;
2475 {
2476 VK_COMPONENT_SWIZZLE_R, // VkChannelSwizzle r;
2477 VK_COMPONENT_SWIZZLE_G, // VkChannelSwizzle g;
2478 VK_COMPONENT_SWIZZLE_B, // VkChannelSwizzle b;
2479 VK_COMPONENT_SWIZZLE_A // VkChannelSwizzle a;
2480 },
2481 {
2482 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2483 0u, // deUint32 baseMipLevel;
2484 1u, // deUint32 mipLevels;
2485 0u, // deUint32 baseArrayLayer;
2486 1u, // deUint32 arraySize;
2487 }, // VkImageSubresourceRange subresourceRange;
2488 };
2489
2490 resolvedImageView = vk::createImageView(vk, vkDevice, &imageViewCreateInfo, DE_NULL);
2491 }
2492 }
2493
2494 // Create render pass
2495 {
2496 const VkAttachmentDescription attachmentDescription[] =
2497 {
2498 {
2499 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
2500 m_colorFormat, // VkFormat format;
2501 m_sampleCount, // deUint32 samples;
2502 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
2503 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
2504 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
2505 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
2506 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
2507 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
2508 },
2509 {
2510 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
2511 m_colorFormat, // VkFormat format;
2512 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2513 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp loadOp;
2514 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
2515 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
2516 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
2517 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
2518 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
2519 }
2520 };
2521
2522 const VkAttachmentReference attachmentReference =
2523 {
2524 0u, // deUint32 attachment;
2525 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
2526 };
2527
2528 const VkAttachmentReference resolveAttachmentRef =
2529 {
2530 1u, // deUint32 attachment;
2531 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
2532 };
2533
2534 const VkSubpassDescription subpassDescription =
2535 {
2536 0u, // VkSubpassDescriptionFlags flags;
2537 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
2538 0u, // deUint32 inputCount;
2539 DE_NULL, // constVkAttachmentReference* pInputAttachments;
2540 1u, // deUint32 colorCount;
2541 &attachmentReference, // constVkAttachmentReference* pColorAttachments;
2542 isMultiSampling() ? &resolveAttachmentRef : DE_NULL,// constVkAttachmentReference* pResolveAttachments;
2543 DE_NULL, // VkAttachmentReference depthStencilAttachment;
2544 0u, // deUint32 preserveCount;
2545 DE_NULL // constVkAttachmentReference* pPreserveAttachments;
2546 };
2547
2548 const VkRenderPassCreateInfo renderPassParams =
2549 {
2550 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
2551 DE_NULL, // const void* pNext;
2552 0u, // VkRenderPassCreateFlags flags;
2553 isMultiSampling() ? 2u : 1u, // deUint32 attachmentCount;
2554 attachmentDescription, // const VkAttachmentDescription* pAttachments;
2555 1u, // deUint32 subpassCount;
2556 &subpassDescription, // const VkSubpassDescription* pSubpasses;
2557 0u, // deUint32 dependencyCount;
2558 DE_NULL // const VkSubpassDependency* pDependencies;
2559 };
2560
2561 renderPass = createRenderPass(vk, vkDevice, &renderPassParams);
2562 }
2563
2564 // Create framebuffer
2565 {
2566 const VkImageView attachments[] =
2567 {
2568 *colorImageView,
2569 *resolvedImageView
2570 };
2571
2572 const VkFramebufferCreateInfo framebufferParams =
2573 {
2574 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
2575 DE_NULL, // const void* pNext;
2576 (VkFramebufferCreateFlags)0,
2577 *renderPass, // VkRenderPass renderPass;
2578 isMultiSampling() ? 2u : 1u, // deUint32 attachmentCount;
2579 attachments, // const VkImageView* pAttachments;
2580 (deUint32)m_renderSize.x(), // deUint32 width;
2581 (deUint32)m_renderSize.y(), // deUint32 height;
2582 1u // deUint32 layers;
2583 };
2584
2585 framebuffer = createFramebuffer(vk, vkDevice, &framebufferParams);
2586 }
2587
2588 // Create descriptors
2589 {
2590 setupUniforms(constCoords);
2591
2592 descriptorSetLayout = m_descriptorSetLayoutBuilder->build(vk, vkDevice);
2593 if (!m_uniformInfos.empty())
2594 {
2595 descriptorPool = m_descriptorPoolBuilder->build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
2596 const VkDescriptorSetAllocateInfo allocInfo =
2597 {
2598 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
2599 DE_NULL,
2600 *descriptorPool,
2601 1u,
2602 &descriptorSetLayout.get(),
2603 };
2604
2605 descriptorSet = allocateDescriptorSet(vk, vkDevice, &allocInfo);
2606 }
2607
2608 for (deUint32 i = 0; i < m_uniformInfos.size(); i++)
2609 {
2610 const UniformInfo* uniformInfo = m_uniformInfos[i].get()->get();
2611 deUint32 location = uniformInfo->location;
2612
2613 if (uniformInfo->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
2614 {
2615 const BufferUniform* bufferInfo = dynamic_cast<const BufferUniform*>(uniformInfo);
2616
2617 m_descriptorSetUpdateBuilder->writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(location), uniformInfo->type, &bufferInfo->descriptor);
2618 }
2619 else if (uniformInfo->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2620 {
2621 const SamplerUniform* samplerInfo = dynamic_cast<const SamplerUniform*>(uniformInfo);
2622
2623 m_descriptorSetUpdateBuilder->writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(location), uniformInfo->type, &samplerInfo->descriptor);
2624 }
2625 else
2626 DE_FATAL("Impossible");
2627 }
2628
2629 m_descriptorSetUpdateBuilder->update(vk, vkDevice);
2630 }
2631
2632 // Create pipeline layout
2633 {
2634 const VkPushConstantRange* const pcRanges = m_pushConstantRanges.empty() ? DE_NULL : &m_pushConstantRanges[0];
2635 const VkPipelineLayoutCreateInfo pipelineLayoutParams =
2636 {
2637 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
2638 DE_NULL, // const void* pNext;
2639 (VkPipelineLayoutCreateFlags)0,
2640 1u, // deUint32 descriptorSetCount;
2641 &*descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
2642 deUint32(m_pushConstantRanges.size()), // deUint32 pushConstantRangeCount;
2643 pcRanges // const VkPushConstantRange* pPushConstantRanges;
2644 };
2645
2646 pipelineLayout = createPipelineLayout(vk, vkDevice, &pipelineLayoutParams);
2647 }
2648
2649 // Create shaders
2650 {
2651 vertexShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get(m_vertexShaderName), 0);
2652 fragmentShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get(m_fragmentShaderName), 0);
2653 }
2654
2655 // Create pipeline
2656 {
2657 const VkPipelineShaderStageCreateInfo shaderStageParams[2] =
2658 {
2659 {
2660 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
2661 DE_NULL, // const void* pNext;
2662 (VkPipelineShaderStageCreateFlags)0,
2663 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStage stage;
2664 *vertexShaderModule, // VkShader shader;
2665 "main",
2666 DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
2667 },
2668 {
2669 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
2670 DE_NULL, // const void* pNext;
2671 (VkPipelineShaderStageCreateFlags)0,
2672 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStage stage;
2673 *fragmentShaderModule, // VkShader shader;
2674 "main",
2675 DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
2676 }
2677 };
2678
2679 // Add test case specific attributes
2680 if (m_attribFunc)
2681 m_attribFunc(*this, numVertices);
2682
2683 // Add base attributes
2684 setupDefaultInputs();
2685
2686 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
2687 {
2688 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
2689 DE_NULL, // const void* pNext;
2690 (VkPipelineVertexInputStateCreateFlags)0,
2691 (deUint32)m_vertexBindingDescription.size(), // deUint32 bindingCount;
2692 &m_vertexBindingDescription[0], // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
2693 (deUint32)m_vertexAttributeDescription.size(), // deUint32 attributeCount;
2694 &m_vertexAttributeDescription[0], // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
2695 };
2696
2697 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateParams =
2698 {
2699 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
2700 DE_NULL, // const void* pNext;
2701 (VkPipelineInputAssemblyStateCreateFlags)0,
2702 topology, // VkPrimitiveTopology topology;
2703 false // VkBool32 primitiveRestartEnable;
2704 };
2705
2706 const VkViewport viewport =
2707 {
2708 0.0f, // float originX;
2709 0.0f, // float originY;
2710 (float)m_renderSize.x(), // float width;
2711 (float)m_renderSize.y(), // float height;
2712 0.0f, // float minDepth;
2713 1.0f // float maxDepth;
2714 };
2715
2716 const VkRect2D scissor =
2717 {
2718 {
2719 0u, // deUint32 x;
2720 0u, // deUint32 y;
2721 }, // VkOffset2D offset;
2722 {
2723 m_renderSize.x(), // deUint32 width;
2724 m_renderSize.y(), // deUint32 height;
2725 }, // VkExtent2D extent;
2726 };
2727
2728 const VkPipelineViewportStateCreateInfo viewportStateParams =
2729 {
2730 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
2731 DE_NULL, // const void* pNext;
2732 0u, // VkPipelineViewportStateCreateFlags flags;
2733 1u, // deUint32 viewportCount;
2734 &viewport, // const VkViewport* pViewports;
2735 1u, // deUint32 scissorsCount;
2736 &scissor, // const VkRect2D* pScissors;
2737 };
2738
2739 const VkPipelineRasterizationStateCreateInfo rasterStateParams =
2740 {
2741 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
2742 DE_NULL, // const void* pNext;
2743 (VkPipelineRasterizationStateCreateFlags)0,
2744 false, // VkBool32 depthClipEnable;
2745 false, // VkBool32 rasterizerDiscardEnable;
2746 VK_POLYGON_MODE_FILL, // VkFillMode fillMode;
2747 VK_CULL_MODE_NONE, // VkCullMode cullMode;
2748 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
2749 false, // VkBool32 depthBiasEnable;
2750 0.0f, // float depthBias;
2751 0.0f, // float depthBiasClamp;
2752 0.0f, // float slopeScaledDepthBias;
2753 1.0f, // float lineWidth;
2754 };
2755
2756 const VkPipelineMultisampleStateCreateInfo multisampleStateParams =
2757 {
2758 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
2759 DE_NULL, // const void* pNext;
2760 0u, // VkPipelineMultisampleStateCreateFlags flags;
2761 m_sampleCount, // VkSampleCountFlagBits rasterizationSamples;
2762 VK_FALSE, // VkBool32 sampleShadingEnable;
2763 0.0f, // float minSampleShading;
2764 DE_NULL, // const VkSampleMask* pSampleMask;
2765 VK_FALSE, // VkBool32 alphaToCoverageEnable;
2766 VK_FALSE // VkBool32 alphaToOneEnable;
2767 };
2768
2769 const VkPipelineColorBlendAttachmentState colorBlendAttachmentState =
2770 {
2771 false, // VkBool32 blendEnable;
2772 VK_BLEND_FACTOR_ONE, // VkBlend srcBlendColor;
2773 VK_BLEND_FACTOR_ZERO, // VkBlend destBlendColor;
2774 VK_BLEND_OP_ADD, // VkBlendOp blendOpColor;
2775 VK_BLEND_FACTOR_ONE, // VkBlend srcBlendAlpha;
2776 VK_BLEND_FACTOR_ZERO, // VkBlend destBlendAlpha;
2777 VK_BLEND_OP_ADD, // VkBlendOp blendOpAlpha;
2778 (VK_COLOR_COMPONENT_R_BIT |
2779 VK_COLOR_COMPONENT_G_BIT |
2780 VK_COLOR_COMPONENT_B_BIT |
2781 VK_COLOR_COMPONENT_A_BIT), // VkChannelFlags channelWriteMask;
2782 };
2783
2784 const VkPipelineColorBlendStateCreateInfo colorBlendStateParams =
2785 {
2786 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
2787 DE_NULL, // const void* pNext;
2788 (VkPipelineColorBlendStateCreateFlags)0,
2789 false, // VkBool32 logicOpEnable;
2790 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
2791 1u, // deUint32 attachmentCount;
2792 &colorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
2793 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConst[4];
2794 };
2795
2796 const VkGraphicsPipelineCreateInfo graphicsPipelineParams =
2797 {
2798 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
2799 DE_NULL, // const void* pNext;
2800 0u, // VkPipelineCreateFlags flags;
2801 2u, // deUint32 stageCount;
2802 shaderStageParams, // const VkPipelineShaderStageCreateInfo* pStages;
2803 &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
2804 &inputAssemblyStateParams, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
2805 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
2806 &viewportStateParams, // const VkPipelineViewportStateCreateInfo* pViewportState;
2807 &rasterStateParams, // const VkPipelineRasterStateCreateInfo* pRasterState;
2808 &multisampleStateParams, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
2809 DE_NULL, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
2810 &colorBlendStateParams, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
2811 (const VkPipelineDynamicStateCreateInfo*)DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
2812 *pipelineLayout, // VkPipelineLayout layout;
2813 *renderPass, // VkRenderPass renderPass;
2814 0u, // deUint32 subpass;
2815 0u, // VkPipeline basePipelineHandle;
2816 0u // deInt32 basePipelineIndex;
2817 };
2818
2819 graphicsPipeline = createGraphicsPipeline(vk, vkDevice, DE_NULL, &graphicsPipelineParams);
2820 }
2821
2822 // Create vertex indices buffer
2823 if (numIndices != 0)
2824 {
2825 const VkDeviceSize indexBufferSize = numIndices * sizeof(deUint16);
2826 const VkBufferCreateInfo indexBufferParams =
2827 {
2828 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
2829 DE_NULL, // const void* pNext;
2830 0u, // VkBufferCreateFlags flags;
2831 indexBufferSize, // VkDeviceSize size;
2832 VK_BUFFER_USAGE_INDEX_BUFFER_BIT, // VkBufferUsageFlags usage;
2833 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2834 1u, // deUint32 queueFamilyCount;
2835 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
2836 };
2837
2838 indexBuffer = createBuffer(vk, vkDevice, &indexBufferParams);
2839 indexBufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *indexBuffer), MemoryRequirement::HostVisible);
2840
2841 VK_CHECK(vk.bindBufferMemory(vkDevice, *indexBuffer, indexBufferAlloc->getMemory(), indexBufferAlloc->getOffset()));
2842
2843 // Load vertice indices into buffer
2844 deMemcpy(indexBufferAlloc->getHostPtr(), indices, (size_t)indexBufferSize);
2845 flushMappedMemoryRange(vk, vkDevice, indexBufferAlloc->getMemory(), indexBufferAlloc->getOffset(), indexBufferSize);
2846 }
2847
2848 // Create command pool
2849 {
2850 const VkCommandPoolCreateInfo cmdPoolParams =
2851 {
2852 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // VkStructureType sType;
2853 DE_NULL, // const void* pNext;
2854 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // VkCmdPoolCreateFlags flags;
2855 queueFamilyIndex, // deUint32 queueFamilyIndex;
2856 };
2857
2858 cmdPool = createCommandPool(vk, vkDevice, &cmdPoolParams);
2859 }
2860
2861 // Create command buffer
2862 {
2863 const VkCommandBufferAllocateInfo cmdBufferParams =
2864 {
2865 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
2866 DE_NULL, // const void* pNext;
2867 *cmdPool, // VkCmdPool cmdPool;
2868 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCmdBufferLevel level;
2869 1u // deUint32 bufferCount;
2870 };
2871
2872 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
2873 {
2874 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
2875 DE_NULL, // const void* pNext;
2876 0u, // VkCmdBufferOptimizeFlags flags;
2877 (const VkCommandBufferInheritanceInfo*)DE_NULL,
2878 };
2879
2880 const VkClearValue clearValues = makeClearValueColorF32(m_clearColor.x(),
2881 m_clearColor.y(),
2882 m_clearColor.z(),
2883 m_clearColor.w());
2884
2885 const VkRenderPassBeginInfo renderPassBeginInfo =
2886 {
2887 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
2888 DE_NULL, // const void* pNext;
2889 *renderPass, // VkRenderPass renderPass;
2890 *framebuffer, // VkFramebuffer framebuffer;
2891 { { 0, 0 }, {m_renderSize.x(), m_renderSize.y() } }, // VkRect2D renderArea;
2892 1, // deUint32 clearValueCount;
2893 &clearValues, // const VkClearValue* pClearValues;
2894 };
2895
2896 cmdBuffer = allocateCommandBuffer(vk, vkDevice, &cmdBufferParams);
2897
2898 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
2899
2900 {
2901 const VkImageMemoryBarrier imageBarrier =
2902 {
2903 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2904 DE_NULL, // const void* pNext;
2905 0u, // VkAccessFlags srcAccessMask;
2906 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
2907 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
2908 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
2909 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
2910 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
2911 *colorImage, // VkImage image;
2912 { // VkImageSubresourceRange subresourceRange;
2913 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2914 0u, // deUint32 baseMipLevel;
2915 1u, // deUint32 mipLevels;
2916 0u, // deUint32 baseArrayLayer;
2917 1u, // deUint32 arraySize;
2918 }
2919 };
2920
2921 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, DE_NULL, 1, &imageBarrier);
2922
2923 if (isMultiSampling()) {
2924 // add multisample barrier
2925 const VkImageMemoryBarrier multiSampleImageBarrier =
2926 {
2927 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2928 DE_NULL, // const void* pNext;
2929 0u, // VkAccessFlags srcAccessMask;
2930 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
2931 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
2932 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
2933 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
2934 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
2935 *resolvedImage, // VkImage image;
2936 { // VkImageSubresourceRange subresourceRange;
2937 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2938 0u, // deUint32 baseMipLevel;
2939 1u, // deUint32 mipLevels;
2940 0u, // deUint32 baseArrayLayer;
2941 1u, // deUint32 arraySize;
2942 }
2943 };
2944
2945 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, DE_NULL, 1, &multiSampleImageBarrier);
2946 }
2947 }
2948
2949 vk.cmdBeginRenderPass(*cmdBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
2950 updatePushConstants(*cmdBuffer, *pipelineLayout);
2951 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
2952 if (!m_uniformInfos.empty())
2953 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1, &*descriptorSet, 0u, DE_NULL);
2954
2955 const deUint32 numberOfVertexAttributes = (deUint32)m_vertexBuffers.size();
2956 const std::vector<VkDeviceSize> offsets(numberOfVertexAttributes, 0);
2957
2958 std::vector<VkBuffer> buffers(numberOfVertexAttributes);
2959 for (size_t i = 0; i < numberOfVertexAttributes; i++)
2960 {
2961 buffers[i] = m_vertexBuffers[i].get()->get();
2962 }
2963
2964 vk.cmdBindVertexBuffers(*cmdBuffer, 0, numberOfVertexAttributes, &buffers[0], &offsets[0]);
2965 if (numIndices != 0)
2966 {
2967 vk.cmdBindIndexBuffer(*cmdBuffer, *indexBuffer, 0, VK_INDEX_TYPE_UINT16);
2968 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1, 0, 0, 0);
2969 }
2970 else
2971 vk.cmdDraw(*cmdBuffer, numVertices, 1, 0, 1);
2972
2973 vk.cmdEndRenderPass(*cmdBuffer);
2974 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
2975 }
2976
2977 // Create fence
2978 {
2979 const VkFenceCreateInfo fenceParams =
2980 {
2981 VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
2982 DE_NULL, // const void* pNext;
2983 0u // VkFenceCreateFlags flags;
2984 };
2985 fence = createFence(vk, vkDevice, &fenceParams);
2986 }
2987
2988 // Execute Draw
2989 {
2990 const VkSubmitInfo submitInfo =
2991 {
2992 VK_STRUCTURE_TYPE_SUBMIT_INFO,
2993 DE_NULL,
2994 0u,
2995 (const VkSemaphore*)DE_NULL,
2996 (const VkPipelineStageFlags*)DE_NULL,
2997 1u,
2998 &cmdBuffer.get(),
2999 0u,
3000 (const VkSemaphore*)DE_NULL,
3001 };
3002
3003 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
3004 VK_CHECK(vk.waitForFences(vkDevice, 1, &fence.get(), true, ~(0ull) /* infinity*/));
3005 }
3006
3007 // Read back the result
3008 {
3009 const tcu::TextureFormat resultFormat = mapVkFormat(m_colorFormat);
3010 const VkDeviceSize imageSizeBytes = (VkDeviceSize)(resultFormat.getPixelSize() * m_renderSize.x() * m_renderSize.y());
3011 const VkBufferCreateInfo readImageBufferParams =
3012 {
3013 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
3014 DE_NULL, // const void* pNext;
3015 0u, // VkBufferCreateFlags flags;
3016 imageSizeBytes, // VkDeviceSize size;
3017 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // VkBufferUsageFlags usage;
3018 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
3019 1u, // deUint32 queueFamilyCount;
3020 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
3021 };
3022 const Unique<VkBuffer> readImageBuffer (createBuffer(vk, vkDevice, &readImageBufferParams));
3023 const de::UniquePtr<Allocation> readImageBufferMemory (m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *readImageBuffer), MemoryRequirement::HostVisible));
3024
3025 VK_CHECK(vk.bindBufferMemory(vkDevice, *readImageBuffer, readImageBufferMemory->getMemory(), readImageBufferMemory->getOffset()));
3026
3027 // Copy image to buffer
3028 const VkCommandBufferAllocateInfo cmdBufferParams =
3029 {
3030 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
3031 DE_NULL, // const void* pNext;
3032 *cmdPool, // VkCmdPool cmdPool;
3033 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCmdBufferLevel level;
3034 1u // deUint32 bufferCount;
3035 };
3036
3037 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
3038 {
3039 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
3040 DE_NULL, // const void* pNext;
3041 0u, // VkCmdBufferOptimizeFlags flags;
3042 (const VkCommandBufferInheritanceInfo*)DE_NULL,
3043 };
3044
3045 const Move<VkCommandBuffer> resultCmdBuffer = allocateCommandBuffer(vk, vkDevice, &cmdBufferParams);
3046
3047 const VkBufferImageCopy copyParams =
3048 {
3049 0u, // VkDeviceSize bufferOffset;
3050 (deUint32)m_renderSize.x(), // deUint32 bufferRowLength;
3051 (deUint32)m_renderSize.y(), // deUint32 bufferImageHeight;
3052 {
3053 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspect aspect;
3054 0u, // deUint32 mipLevel;
3055 0u, // deUint32 arraySlice;
3056 1u, // deUint32 arraySize;
3057 }, // VkImageSubresourceCopy imageSubresource;
3058 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
3059 { m_renderSize.x(), m_renderSize.y(), 1u } // VkExtent3D imageExtent;
3060 };
3061 const VkSubmitInfo submitInfo =
3062 {
3063 VK_STRUCTURE_TYPE_SUBMIT_INFO,
3064 DE_NULL,
3065 0u,
3066 (const VkSemaphore*)DE_NULL,
3067 (const VkPipelineStageFlags*)DE_NULL,
3068 1u,
3069 &resultCmdBuffer.get(),
3070 0u,
3071 (const VkSemaphore*)DE_NULL,
3072 };
3073
3074 VK_CHECK(vk.beginCommandBuffer(*resultCmdBuffer, &cmdBufferBeginInfo));
3075
3076 const VkImageMemoryBarrier imageBarrier =
3077 {
3078 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
3079 DE_NULL, // const void* pNext;
3080 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
3081 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
3082 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
3083 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
3084 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
3085 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
3086 isMultiSampling() ? *resolvedImage : *colorImage, // VkImage image;
3087 { // VkImageSubresourceRange subresourceRange;
3088 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
3089 0u, // deUint32 baseMipLevel;
3090 1u, // deUint32 mipLevels;
3091 0u, // deUint32 baseArraySlice;
3092 1u // deUint32 arraySize;
3093 }
3094 };
3095
3096 const VkBufferMemoryBarrier bufferBarrier =
3097 {
3098 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
3099 DE_NULL, // const void* pNext;
3100 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
3101 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
3102 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
3103 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
3104 *readImageBuffer, // VkBuffer buffer;
3105 0u, // VkDeviceSize offset;
3106 imageSizeBytes // VkDeviceSize size;
3107 };
3108
3109 vk.cmdPipelineBarrier(*resultCmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
3110 vk.cmdCopyImageToBuffer(*resultCmdBuffer, isMultiSampling() ? *resolvedImage : *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *readImageBuffer, 1u, ©Params);
3111 vk.cmdPipelineBarrier(*resultCmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
3112
3113 VK_CHECK(vk.endCommandBuffer(*resultCmdBuffer));
3114
3115 VK_CHECK(vk.resetFences(vkDevice, 1, &fence.get()));
3116 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
3117 VK_CHECK(vk.waitForFences(vkDevice, 1, &fence.get(), true, ~(0ull) /* infinity */));
3118
3119 invalidateMappedMemoryRange(vk, vkDevice, readImageBufferMemory->getMemory(), readImageBufferMemory->getOffset(), imageSizeBytes);
3120
3121 const tcu::ConstPixelBufferAccess resultAccess (resultFormat, m_renderSize.x(), m_renderSize.y(), 1, readImageBufferMemory->getHostPtr());
3122
3123 m_resultImage.setStorage(resultFormat, m_renderSize.x(), m_renderSize.y());
3124 tcu::copy(m_resultImage.getAccess(), resultAccess);
3125 }
3126 }
3127
computeVertexReference(tcu::Surface & result,const QuadGrid & quadGrid)3128 void ShaderRenderCaseInstance::computeVertexReference (tcu::Surface& result, const QuadGrid& quadGrid)
3129 {
3130 DE_ASSERT(m_evaluator);
3131
3132 // Buffer info.
3133 const int width = result.getWidth();
3134 const int height = result.getHeight();
3135 const int gridSize = quadGrid.getGridSize();
3136 const int stride = gridSize + 1;
3137 const bool hasAlpha = true; // \todo [2015-09-07 elecro] add correct alpha check
3138 ShaderEvalContext evalCtx (quadGrid);
3139
3140 // Evaluate color for each vertex.
3141 std::vector<tcu::Vec4> colors ((gridSize + 1) * (gridSize + 1));
3142 for (int y = 0; y < gridSize+1; y++)
3143 for (int x = 0; x < gridSize+1; x++)
3144 {
3145 const float sx = (float)x / (float)gridSize;
3146 const float sy = (float)y / (float)gridSize;
3147 const int vtxNdx = ((y * (gridSize+1)) + x);
3148
3149 evalCtx.reset(sx, sy);
3150 m_evaluator->evaluate(evalCtx);
3151 DE_ASSERT(!evalCtx.isDiscarded); // Discard is not available in vertex shader.
3152 tcu::Vec4 color = evalCtx.color;
3153
3154 if (!hasAlpha)
3155 color.w() = 1.0f;
3156
3157 colors[vtxNdx] = color;
3158 }
3159
3160 // Render quads.
3161 for (int y = 0; y < gridSize; y++)
3162 for (int x = 0; x < gridSize; x++)
3163 {
3164 const float x0 = (float)x / (float)gridSize;
3165 const float x1 = (float)(x + 1) / (float)gridSize;
3166 const float y0 = (float)y / (float)gridSize;
3167 const float y1 = (float)(y + 1) / (float)gridSize;
3168
3169 const float sx0 = x0 * (float)width;
3170 const float sx1 = x1 * (float)width;
3171 const float sy0 = y0 * (float)height;
3172 const float sy1 = y1 * (float)height;
3173 const float oosx = 1.0f / (sx1 - sx0);
3174 const float oosy = 1.0f / (sy1 - sy0);
3175
3176 const int ix0 = deCeilFloatToInt32(sx0 - 0.5f);
3177 const int ix1 = deCeilFloatToInt32(sx1 - 0.5f);
3178 const int iy0 = deCeilFloatToInt32(sy0 - 0.5f);
3179 const int iy1 = deCeilFloatToInt32(sy1 - 0.5f);
3180
3181 const int v00 = (y * stride) + x;
3182 const int v01 = (y * stride) + x + 1;
3183 const int v10 = ((y + 1) * stride) + x;
3184 const int v11 = ((y + 1) * stride) + x + 1;
3185 const tcu::Vec4 c00 = colors[v00];
3186 const tcu::Vec4 c01 = colors[v01];
3187 const tcu::Vec4 c10 = colors[v10];
3188 const tcu::Vec4 c11 = colors[v11];
3189
3190 //printf("(%d,%d) -> (%f..%f, %f..%f) (%d..%d, %d..%d)\n", x, y, sx0, sx1, sy0, sy1, ix0, ix1, iy0, iy1);
3191
3192 for (int iy = iy0; iy < iy1; iy++)
3193 for (int ix = ix0; ix < ix1; ix++)
3194 {
3195 DE_ASSERT(deInBounds32(ix, 0, width));
3196 DE_ASSERT(deInBounds32(iy, 0, height));
3197
3198 const float sfx = (float)ix + 0.5f;
3199 const float sfy = (float)iy + 0.5f;
3200 const float fx1 = deFloatClamp((sfx - sx0) * oosx, 0.0f, 1.0f);
3201 const float fy1 = deFloatClamp((sfy - sy0) * oosy, 0.0f, 1.0f);
3202
3203 // Triangle quad interpolation.
3204 const bool tri = fx1 + fy1 <= 1.0f;
3205 const float tx = tri ? fx1 : (1.0f-fx1);
3206 const float ty = tri ? fy1 : (1.0f-fy1);
3207 const tcu::Vec4& t0 = tri ? c00 : c11;
3208 const tcu::Vec4& t1 = tri ? c01 : c10;
3209 const tcu::Vec4& t2 = tri ? c10 : c01;
3210 const tcu::Vec4 color = t0 + (t1-t0)*tx + (t2-t0)*ty;
3211
3212 result.setPixel(ix, iy, tcu::RGBA(color));
3213 }
3214 }
3215 }
3216
computeFragmentReference(tcu::Surface & result,const QuadGrid & quadGrid)3217 void ShaderRenderCaseInstance::computeFragmentReference (tcu::Surface& result, const QuadGrid& quadGrid)
3218 {
3219 DE_ASSERT(m_evaluator);
3220
3221 // Buffer info.
3222 const int width = result.getWidth();
3223 const int height = result.getHeight();
3224 const bool hasAlpha = true; // \todo [2015-09-07 elecro] add correct alpha check
3225 ShaderEvalContext evalCtx (quadGrid);
3226
3227 // Render.
3228 for (int y = 0; y < height; y++)
3229 for (int x = 0; x < width; x++)
3230 {
3231 const float sx = ((float)x + 0.5f) / (float)width;
3232 const float sy = ((float)y + 0.5f) / (float)height;
3233
3234 evalCtx.reset(sx, sy);
3235 m_evaluator->evaluate(evalCtx);
3236 // Select either clear color or computed color based on discarded bit.
3237 tcu::Vec4 color = evalCtx.isDiscarded ? m_clearColor : evalCtx.color;
3238
3239 if (!hasAlpha)
3240 color.w() = 1.0f;
3241
3242 result.setPixel(x, y, tcu::RGBA(color));
3243 }
3244 }
3245
compareImages(const tcu::Surface & resImage,const tcu::Surface & refImage,float errorThreshold)3246 bool ShaderRenderCaseInstance::compareImages (const tcu::Surface& resImage, const tcu::Surface& refImage, float errorThreshold)
3247 {
3248 return tcu::fuzzyCompare(m_context.getTestContext().getLog(), "ComparisonResult", "Image comparison result", refImage, resImage, errorThreshold, tcu::COMPARE_LOG_RESULT);
3249 }
3250
3251 } // sr
3252 } // vkt
3253