1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2023 LunarG, Inc.
7 * Copyright (c) 2023 Nintendo
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 *//*!
22 * \file vktPipelineRenderToImageTests.cpp
23 * \brief Render to image tests
24 *//*--------------------------------------------------------------------*/
25
26 #include "vktPipelineRenderToImageTests.hpp"
27 #include "vktPipelineMakeUtil.hpp"
28 #include "vktTestCase.hpp"
29 #include "vktTestCaseUtil.hpp"
30 #include "vktPipelineVertexUtil.hpp"
31 #include "vktTestGroupUtil.hpp"
32 #include "vkObjUtil.hpp"
33
34 #include "vkMemUtil.hpp"
35 #include "vkQueryUtil.hpp"
36 #include "vkTypeUtil.hpp"
37 #include "vkRefUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkPrograms.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkCmdUtil.hpp"
42
43 #include "tcuTextureUtil.hpp"
44 #include "tcuImageCompare.hpp"
45 #include "tcuTestLog.hpp"
46 #include "tcuPlatform.hpp"
47 #include "vkPlatform.hpp"
48
49 #include "deUniquePtr.hpp"
50 #include "deSharedPtr.hpp"
51
52 #include <string>
53 #include <vector>
54 #include <set>
55 #include <algorithm>
56
57 namespace vkt
58 {
59 namespace pipeline
60 {
61 namespace
62 {
63 using namespace vk;
64 using de::UniquePtr;
65 using de::MovePtr;
66 using de::SharedPtr;
67 using tcu::IVec3;
68 using tcu::Vec4;
69 using tcu::UVec4;
70 using tcu::IVec2;
71 using tcu::IVec4;
72 using tcu::BVec4;
73 using std::vector;
74
75 typedef SharedPtr<Unique<VkImageView> > SharedPtrVkImageView;
76
77 enum Constants
78 {
79 NUM_CUBE_FACES = 6,
80 REFERENCE_COLOR_VALUE = 125,
81 REFERENCE_STENCIL_VALUE = 42,
82 MAX_SIZE = -1, //!< Should be queried at runtime and replaced with max possible value
83 MAX_VERIFICATION_REGION_SIZE = 32, //!< Limit the checked area to a small size, especially for huge images
84 MAX_VERIFICATION_REGION_DEPTH = 8,
85
86 MASK_W = (1 | 0 | 0 | 0),
87 MASK_W_LAYERS = (1 | 0 | 0 | 8),
88 MASK_WH = (1 | 2 | 0 | 0),
89 MASK_WH_LAYERS = (1 | 2 | 0 | 8),
90 MASK_WHD = (1 | 2 | 4 | 0),
91 };
92
93 enum AllocationKind
94 {
95 ALLOCATION_KIND_SUBALLOCATED = 0,
96 ALLOCATION_KIND_DEDICATED,
97 };
98
99 static const float REFERENCE_DEPTH_VALUE = 1.0f;
100 static const Vec4 COLOR_TABLE[] =
101 {
102 Vec4(0.9f, 0.0f, 0.0f, 1.0f),
103 Vec4(0.6f, 1.0f, 0.0f, 1.0f),
104 Vec4(0.3f, 0.0f, 1.0f, 1.0f),
105 Vec4(0.1f, 1.0f, 1.0f, 1.0f),
106 Vec4(0.8f, 1.0f, 0.0f, 1.0f),
107 Vec4(0.5f, 0.0f, 1.0f, 1.0f),
108 Vec4(0.2f, 0.0f, 0.0f, 1.0f),
109 Vec4(1.0f, 1.0f, 0.0f, 1.0f),
110 };
111
112 struct CaseDef
113 {
114 PipelineConstructionType pipelineConstructionType;
115 VkImageViewType viewType;
116 IVec4 imageSizeHint; //!< (w, h, d, layers), a component may have a symbolic value MAX_SIZE
117 VkFormat colorFormat;
118 VkFormat depthStencilFormat; //! A depth/stencil format, or UNDEFINED if not used
119 AllocationKind allocationKind;
120 };
121
122 template<typename T>
makeSharedPtr(Move<T> move)123 inline SharedPtr<Unique<T> > makeSharedPtr (Move<T> move)
124 {
125 return SharedPtr<Unique<T> >(new Unique<T>(move));
126 }
127
128 template<typename T>
sizeInBytes(const vector<T> & vec)129 inline VkDeviceSize sizeInBytes (const vector<T>& vec)
130 {
131 return vec.size() * sizeof(vec[0]);
132 }
133
isCube(const VkImageViewType viewType)134 inline bool isCube (const VkImageViewType viewType)
135 {
136 return (viewType == VK_IMAGE_VIEW_TYPE_CUBE || viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY);
137 }
138
product(const IVec4 & v)139 inline VkDeviceSize product (const IVec4& v)
140 {
141 return ((static_cast<VkDeviceSize>(v.x()) * v.y()) * v.z()) * v.w();
142 }
143
144 template<typename T>
sum(const vector<T> & v)145 inline T sum (const vector<T>& v)
146 {
147 T total = static_cast<T>(0);
148 for (typename vector<T>::const_iterator it = v.begin(); it != v.end(); ++it)
149 total += *it;
150 return total;
151 }
152
153 template <typename T, int Size>
findIndexOfMaxComponent(const tcu::Vector<T,Size> & vec)154 int findIndexOfMaxComponent (const tcu::Vector<T, Size>& vec)
155 {
156 int index = 0;
157 T value = vec[0];
158
159 for (int i = 1; i < Size; ++i)
160 {
161 if (vec[i] > value)
162 {
163 index = i;
164 value = vec[i];
165 }
166 }
167
168 return index;
169 }
170
maxLayersOrDepth(const IVec4 & size)171 inline int maxLayersOrDepth (const IVec4& size)
172 {
173 // This is safe because 3D images must have layers (w) = 1
174 return deMax32(size.z(), size.w());
175 }
176
bindBuffer(const InstanceInterface & vki,const DeviceInterface & vkd,const VkPhysicalDevice & physDevice,const VkDevice device,const VkBuffer & buffer,const MemoryRequirement requirement,Allocator & allocator,AllocationKind allocationKind)177 de::MovePtr<Allocation> bindBuffer (const InstanceInterface& vki,
178 const DeviceInterface& vkd,
179 const VkPhysicalDevice& physDevice,
180 const VkDevice device,
181 const VkBuffer& buffer,
182 const MemoryRequirement requirement,
183 Allocator& allocator,
184 AllocationKind allocationKind)
185 {
186 switch (allocationKind)
187 {
188 case ALLOCATION_KIND_SUBALLOCATED:
189 {
190 return vk::bindBuffer(vkd, device, allocator, buffer, requirement);
191 }
192
193 case ALLOCATION_KIND_DEDICATED:
194 {
195 return bindBufferDedicated(vki, vkd, physDevice, device, buffer, requirement);
196 }
197
198 default:
199 {
200 TCU_THROW(InternalError, "Invalid allocation kind");
201 }
202 }
203 }
204
bindImage(const InstanceInterface & vki,const DeviceInterface & vkd,const VkPhysicalDevice & physDevice,const VkDevice device,const VkImage & image,const MemoryRequirement requirement,Allocator & allocator,AllocationKind allocationKind)205 de::MovePtr<Allocation> bindImage (const InstanceInterface& vki,
206 const DeviceInterface& vkd,
207 const VkPhysicalDevice& physDevice,
208 const VkDevice device,
209 const VkImage& image,
210 const MemoryRequirement requirement,
211 Allocator& allocator,
212 AllocationKind allocationKind)
213 {
214 switch (allocationKind)
215 {
216 case ALLOCATION_KIND_SUBALLOCATED:
217 {
218 return vk::bindImage(vkd, device, allocator, image, requirement);
219 }
220
221 case ALLOCATION_KIND_DEDICATED:
222 {
223 return bindImageDedicated(vki, vkd, physDevice, device, image, requirement);
224 }
225
226 default:
227 {
228 TCU_THROW(InternalError, "Invalid allocation kind");
229 }
230 }
231 }
232
233 // This is very test specific, so be careful if you want to reuse this code.
preparePipelineWrapper(GraphicsPipelineWrapper & gpw,const VkPipeline basePipeline,const PipelineLayoutWrapper & pipelineLayout,const VkRenderPass renderPass,const ShaderWrapper vertexModule,const ShaderWrapper fragmentModule,const IVec2 & renderSize,const VkPrimitiveTopology topology,const deUint32 subpass,const bool useDepth,const bool useStencil)234 void preparePipelineWrapper(GraphicsPipelineWrapper& gpw,
235 const VkPipeline basePipeline, // for derivatives
236 const PipelineLayoutWrapper& pipelineLayout,
237 const VkRenderPass renderPass,
238 const ShaderWrapper vertexModule,
239 const ShaderWrapper fragmentModule,
240 const IVec2& renderSize,
241 const VkPrimitiveTopology topology,
242 const deUint32 subpass,
243 const bool useDepth,
244 const bool useStencil)
245 {
246 const VkVertexInputBindingDescription vertexInputBindingDescription =
247 {
248 0u, // uint32_t binding;
249 sizeof(Vertex4RGBA), // uint32_t stride;
250 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
251 };
252
253 const VkVertexInputAttributeDescription vertexInputAttributeDescriptions[] =
254 {
255 {
256 0u, // uint32_t location;
257 0u, // uint32_t binding;
258 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
259 0u, // uint32_t offset;
260 },
261 {
262 1u, // uint32_t location;
263 0u, // uint32_t binding;
264 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
265 sizeof(Vec4), // uint32_t offset;
266 }
267 };
268
269 const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo =
270 {
271 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
272 DE_NULL, // const void* pNext;
273 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
274 1u, // uint32_t vertexBindingDescriptionCount;
275 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
276 DE_LENGTH_OF_ARRAY(vertexInputAttributeDescriptions), // uint32_t vertexAttributeDescriptionCount;
277 vertexInputAttributeDescriptions, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
278 };
279
280 const std::vector<VkViewport> viewport { makeViewport(renderSize) };
281 const std::vector<VkRect2D> scissor { makeRect2D(renderSize) };
282
283 const VkStencilOpState stencilOpState = makeStencilOpState(
284 VK_STENCIL_OP_KEEP, // stencil fail
285 VK_STENCIL_OP_KEEP, // depth & stencil pass
286 VK_STENCIL_OP_KEEP, // depth only fail
287 VK_COMPARE_OP_EQUAL, // compare op
288 ~0u, // compare mask
289 ~0u, // write mask
290 static_cast<deUint32>(REFERENCE_STENCIL_VALUE)); // reference
291
292 VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo =
293 {
294 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
295 DE_NULL, // const void* pNext;
296 (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags;
297 useDepth, // VkBool32 depthTestEnable;
298 VK_FALSE, // VkBool32 depthWriteEnable;
299 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
300 VK_FALSE, // VkBool32 depthBoundsTestEnable;
301 useStencil, // VkBool32 stencilTestEnable;
302 stencilOpState, // VkStencilOpState front;
303 stencilOpState, // VkStencilOpState back;
304 0.0f, // float minDepthBounds;
305 1.0f, // float maxDepthBounds;
306 };
307
308 const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
309 // Number of blend attachments must equal the number of color attachments during any subpass.
310 const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState =
311 {
312 VK_FALSE, // VkBool32 blendEnable;
313 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcColorBlendFactor;
314 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor;
315 VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
316 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcAlphaBlendFactor;
317 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor;
318 VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
319 colorComponentsAll, // VkColorComponentFlags colorWriteMask;
320 };
321
322 const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo =
323 {
324 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
325 DE_NULL, // const void* pNext;
326 (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags;
327 VK_FALSE, // VkBool32 logicOpEnable;
328 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
329 1u, // deUint32 attachmentCount;
330 &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
331 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
332 };
333
334 gpw.setDefaultTopology(topology)
335 .setDefaultRasterizationState()
336 .setDefaultMultisampleState()
337 .setupVertexInputState(&vertexInputStateInfo)
338 .setupPreRasterizationShaderState(viewport,
339 scissor,
340 pipelineLayout,
341 renderPass,
342 subpass,
343 vertexModule)
344 .setupFragmentShaderState(pipelineLayout, renderPass, subpass, fragmentModule, &pipelineDepthStencilStateInfo)
345 .setupFragmentOutputState(renderPass, subpass, &pipelineColorBlendStateInfo)
346 .setMonolithicPipelineLayout(pipelineLayout)
347 .buildPipeline(DE_NULL, basePipeline, -1);
348 }
349
350 //! Make a render pass with one subpass per color attachment and depth/stencil attachment (if used).
makeRenderPass(const DeviceInterface & vk,const VkDevice device,const PipelineConstructionType pipelineConstructionType,const VkFormat colorFormat,const VkFormat depthStencilFormat,const deUint32 numLayers,const VkImageLayout initialColorImageLayout=VK_IMAGE_LAYOUT_UNDEFINED,const VkImageLayout initialDepthStencilImageLayout=VK_IMAGE_LAYOUT_UNDEFINED)351 RenderPassWrapper makeRenderPass (const DeviceInterface& vk,
352 const VkDevice device,
353 const PipelineConstructionType pipelineConstructionType,
354 const VkFormat colorFormat,
355 const VkFormat depthStencilFormat,
356 const deUint32 numLayers,
357 const VkImageLayout initialColorImageLayout = VK_IMAGE_LAYOUT_UNDEFINED,
358 const VkImageLayout initialDepthStencilImageLayout = VK_IMAGE_LAYOUT_UNDEFINED)
359 {
360 const VkAttachmentDescription colorAttachmentDescription =
361 {
362 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
363 colorFormat, // VkFormat format;
364 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
365 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
366 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
367 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
368 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
369 initialColorImageLayout, // VkImageLayout initialLayout;
370 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
371 };
372 vector<VkAttachmentDescription> attachmentDescriptions(numLayers, colorAttachmentDescription);
373
374 const VkAttachmentDescription depthStencilAttachmentDescription =
375 {
376 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
377 depthStencilFormat, // VkFormat format;
378 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
379 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
380 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp storeOp;
381 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp stencilLoadOp;
382 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
383 initialDepthStencilImageLayout, // VkImageLayout initialLayout;
384 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
385 };
386
387 if (depthStencilFormat != VK_FORMAT_UNDEFINED)
388 attachmentDescriptions.insert(attachmentDescriptions.end(), numLayers, depthStencilAttachmentDescription);
389
390 // Create a subpass for each attachment (each attachement is a layer of an arrayed image).
391 vector<VkAttachmentReference> colorAttachmentReferences (numLayers);
392 vector<VkAttachmentReference> depthStencilAttachmentReferences(numLayers);
393 vector<VkSubpassDescription> subpasses;
394
395 // Ordering here must match the framebuffer attachments
396 for (deUint32 i = 0; i < numLayers; ++i)
397 {
398 const VkAttachmentReference attachmentRef =
399 {
400 i, // deUint32 attachment;
401 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
402 };
403 const VkAttachmentReference depthStencilAttachmentRef =
404 {
405 i + numLayers, // deUint32 attachment;
406 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // VkImageLayout layout;
407 };
408
409 colorAttachmentReferences[i] = attachmentRef;
410 depthStencilAttachmentReferences[i] = depthStencilAttachmentRef;
411
412 const VkAttachmentReference* pDepthStencilAttachment = (depthStencilFormat != VK_FORMAT_UNDEFINED ? &depthStencilAttachmentReferences[i] : DE_NULL);
413 const VkSubpassDescription subpassDescription =
414 {
415 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
416 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
417 0u, // deUint32 inputAttachmentCount;
418 DE_NULL, // const VkAttachmentReference* pInputAttachments;
419 1u, // deUint32 colorAttachmentCount;
420 &colorAttachmentReferences[i], // const VkAttachmentReference* pColorAttachments;
421 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
422 pDepthStencilAttachment, // const VkAttachmentReference* pDepthStencilAttachment;
423 0u, // deUint32 preserveAttachmentCount;
424 DE_NULL // const deUint32* pPreserveAttachments;
425 };
426 subpasses.push_back(subpassDescription);
427 }
428
429 const VkRenderPassCreateInfo renderPassInfo =
430 {
431 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
432 DE_NULL, // const void* pNext;
433 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags;
434 static_cast<deUint32>(attachmentDescriptions.size()), // deUint32 attachmentCount;
435 &attachmentDescriptions[0], // const VkAttachmentDescription* pAttachments;
436 static_cast<deUint32>(subpasses.size()), // deUint32 subpassCount;
437 &subpasses[0], // const VkSubpassDescription* pSubpasses;
438 0u, // deUint32 dependencyCount;
439 DE_NULL // const VkSubpassDependency* pDependencies;
440 };
441
442 return RenderPassWrapper(pipelineConstructionType, vk, device, &renderPassInfo);
443 }
444
makeImage(const DeviceInterface & vk,const VkDevice device,VkImageCreateFlags flags,VkImageType imageType,const VkFormat format,const IVec3 & size,const deUint32 numMipLevels,const deUint32 numLayers,const VkImageUsageFlags usage)445 Move<VkImage> makeImage (const DeviceInterface& vk,
446 const VkDevice device,
447 VkImageCreateFlags flags,
448 VkImageType imageType,
449 const VkFormat format,
450 const IVec3& size,
451 const deUint32 numMipLevels,
452 const deUint32 numLayers,
453 const VkImageUsageFlags usage)
454 {
455 const VkImageCreateInfo imageParams =
456 {
457 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
458 DE_NULL, // const void* pNext;
459 flags, // VkImageCreateFlags flags;
460 imageType, // VkImageType imageType;
461 format, // VkFormat format;
462 makeExtent3D(size), // VkExtent3D extent;
463 numMipLevels, // deUint32 mipLevels;
464 numLayers, // deUint32 arrayLayers;
465 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
466 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
467 usage, // VkImageUsageFlags usage;
468 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
469 0u, // deUint32 queueFamilyIndexCount;
470 DE_NULL, // const deUint32* pQueueFamilyIndices;
471 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
472 };
473 return createImage(vk, device, &imageParams);
474 }
475
makeColorSubresourceRange(const int baseArrayLayer,const int layerCount)476 inline VkImageSubresourceRange makeColorSubresourceRange (const int baseArrayLayer, const int layerCount)
477 {
478 return makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, static_cast<deUint32>(baseArrayLayer), static_cast<deUint32>(layerCount));
479 }
480
481 //! Get a reference clear value based on color format.
getClearValue(const VkFormat format)482 VkClearValue getClearValue (const VkFormat format)
483 {
484 if (isUintFormat(format) || isIntFormat(format))
485 return makeClearValueColorU32(REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE);
486 else
487 return makeClearValueColorF32(1.0f, 1.0f, 1.0f, 1.0f);
488 }
489
getColorFormatStr(const int numComponents,const bool isUint,const bool isSint)490 std::string getColorFormatStr (const int numComponents, const bool isUint, const bool isSint)
491 {
492 std::ostringstream str;
493 if (numComponents == 1)
494 str << (isUint ? "uint" : isSint ? "int" : "float");
495 else
496 str << (isUint ? "u" : isSint ? "i" : "") << "vec" << numComponents;
497
498 return str.str();
499 }
500
501 //! A half-viewport quad. Use with TRIANGLE_STRIP topology.
genFullQuadVertices(const int subpassCount)502 vector<Vertex4RGBA> genFullQuadVertices (const int subpassCount)
503 {
504 vector<Vertex4RGBA> vectorData;
505 for (int subpassNdx = 0; subpassNdx < subpassCount; ++subpassNdx)
506 {
507 Vertex4RGBA data =
508 {
509 Vec4(0.0f, -1.0f, 0.0f, 1.0f),
510 COLOR_TABLE[subpassNdx % DE_LENGTH_OF_ARRAY(COLOR_TABLE)],
511 };
512 vectorData.push_back(data);
513 data.position = Vec4(0.0f, 1.0f, 0.0f, 1.0f);
514 vectorData.push_back(data);
515 data.position = Vec4(1.0f, -1.0f, 0.0f, 1.0f);
516 vectorData.push_back(data);
517 data.position = Vec4(1.0f, 1.0f, 0.0f, 1.0f);
518 vectorData.push_back(data);
519 }
520 return vectorData;
521 }
522
getImageType(const VkImageViewType viewType)523 VkImageType getImageType (const VkImageViewType viewType)
524 {
525 switch (viewType)
526 {
527 case VK_IMAGE_VIEW_TYPE_1D:
528 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
529 return VK_IMAGE_TYPE_1D;
530
531 case VK_IMAGE_VIEW_TYPE_2D:
532 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
533 case VK_IMAGE_VIEW_TYPE_CUBE:
534 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
535 return VK_IMAGE_TYPE_2D;
536
537 case VK_IMAGE_VIEW_TYPE_3D:
538 return VK_IMAGE_TYPE_3D;
539
540 default:
541 DE_ASSERT(0);
542 return VK_IMAGE_TYPE_LAST;
543 }
544 }
545
546 //! ImageViewType for accessing a single layer/slice of an image
getImageViewSliceType(const VkImageViewType viewType)547 VkImageViewType getImageViewSliceType (const VkImageViewType viewType)
548 {
549 switch (viewType)
550 {
551 case VK_IMAGE_VIEW_TYPE_1D:
552 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
553 return VK_IMAGE_VIEW_TYPE_1D;
554
555 case VK_IMAGE_VIEW_TYPE_2D:
556 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
557 case VK_IMAGE_VIEW_TYPE_CUBE:
558 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
559 case VK_IMAGE_VIEW_TYPE_3D:
560 return VK_IMAGE_VIEW_TYPE_2D;
561
562 default:
563 DE_ASSERT(0);
564 return VK_IMAGE_VIEW_TYPE_LAST;
565 }
566 }
567
getImageCreateFlags(const VkImageViewType viewType)568 VkImageCreateFlags getImageCreateFlags (const VkImageViewType viewType)
569 {
570 VkImageCreateFlags flags = (VkImageCreateFlags)0;
571
572 if (viewType == VK_IMAGE_VIEW_TYPE_3D) flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
573 if (isCube(viewType)) flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
574
575 return flags;
576 }
577
generateExpectedImage(const tcu::PixelBufferAccess & outputImage,const IVec2 & renderSize,const int colorDepthOffset)578 void generateExpectedImage (const tcu::PixelBufferAccess& outputImage, const IVec2& renderSize, const int colorDepthOffset)
579 {
580 const tcu::TextureChannelClass channelClass = tcu::getTextureChannelClass(outputImage.getFormat().type);
581 const bool isInt = (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER || channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER);
582 const VkClearValue clearValue = getClearValue(mapTextureFormat(outputImage.getFormat()));
583
584 if (isInt)
585 tcu::clear(outputImage, IVec4(clearValue.color.int32));
586 else
587 tcu::clear(outputImage, Vec4(clearValue.color.float32));
588
589 for (int z = 0; z < outputImage.getDepth(); ++z)
590 {
591 const Vec4& setColor = COLOR_TABLE[(z + colorDepthOffset) % DE_LENGTH_OF_ARRAY(COLOR_TABLE)];
592 const IVec4 setColorInt = (static_cast<float>(REFERENCE_COLOR_VALUE) * setColor).cast<deInt32>();
593
594 for (int y = 0; y < renderSize.y(); ++y)
595 for (int x = renderSize.x()/2; x < renderSize.x(); ++x)
596 {
597 if (isInt)
598 outputImage.setPixel(setColorInt, x, y, z);
599 else
600 outputImage.setPixel(setColor, x, y, z);
601 }
602 }
603 }
604
getMaxImageSize(const VkImageViewType viewType,const IVec4 & sizeHint)605 IVec4 getMaxImageSize (const VkImageViewType viewType, const IVec4& sizeHint)
606 {
607 //Limits have been taken from the vulkan specification
608 IVec4 size = IVec4(
609 sizeHint.x() != MAX_SIZE ? sizeHint.x() : 4096,
610 sizeHint.y() != MAX_SIZE ? sizeHint.y() : 4096,
611 sizeHint.z() != MAX_SIZE ? sizeHint.z() : 256,
612 sizeHint.w() != MAX_SIZE ? sizeHint.w() : 256);
613
614 switch (viewType)
615 {
616 case VK_IMAGE_VIEW_TYPE_1D:
617 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
618 size.x() = deMin32(4096, size.x());
619 break;
620
621 case VK_IMAGE_VIEW_TYPE_2D:
622 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
623 size.x() = deMin32(4096, size.x());
624 size.y() = deMin32(4096, size.y());
625 break;
626
627 case VK_IMAGE_VIEW_TYPE_3D:
628 size.x() = deMin32(256, size.x());
629 size.y() = deMin32(256, size.y());
630 break;
631
632 case VK_IMAGE_VIEW_TYPE_CUBE:
633 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
634 size.x() = deMin32(4096, size.x());
635 size.y() = deMin32(4096, size.y());
636 size.w() = deMin32(252, size.w());
637 size.w() = NUM_CUBE_FACES * (size.w() / NUM_CUBE_FACES); // round down to 6 faces
638 break;
639
640 default:
641 DE_ASSERT(0);
642 return IVec4();
643 }
644
645 return size;
646 }
647
648 //! Get a smaller image size. Returns a vector of zeroes, if it can't reduce more.
getReducedImageSize(const CaseDef & caseDef,IVec4 size)649 IVec4 getReducedImageSize (const CaseDef& caseDef, IVec4 size)
650 {
651 const int maxIndex = findIndexOfMaxComponent(size);
652 const int reducedSize = size[maxIndex] >> 1;
653
654 switch (caseDef.viewType)
655 {
656 case VK_IMAGE_VIEW_TYPE_CUBE:
657 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
658 if (maxIndex < 2)
659 size.x() = size.y() = reducedSize;
660 else if (maxIndex == 3 && reducedSize >= NUM_CUBE_FACES)
661 size.w() = NUM_CUBE_FACES * (reducedSize / NUM_CUBE_FACES); // round down to a multiple of 6
662 else
663 size = IVec4(0);
664 break;
665
666 default:
667 size[maxIndex] = reducedSize;
668 break;
669 }
670
671 if (reducedSize == 0)
672 size = IVec4(0);
673
674 return size;
675 }
676
677 //! Get the image memory requirements for the image size under test, expecting potential image
678 //! creation failure if the required size is larger than the device's maxResourceSize, returning
679 //! false if creation failed.
getSupportedImageMemoryRequirements(Context & context,const CaseDef & caseDef,const VkFormat format,const IVec4 size,const VkImageUsageFlags usage,VkMemoryRequirements & imageMemoryRequiements)680 bool getSupportedImageMemoryRequirements(Context& context, const CaseDef& caseDef, const VkFormat format, const IVec4 size, const VkImageUsageFlags usage, VkMemoryRequirements& imageMemoryRequiements)
681 {
682 const DeviceInterface& vk = context.getDeviceInterface();
683 const VkDevice device = context.getDevice();
684 bool imageCreationPossible = true;
685
686 try
687 {
688 Move<VkImage> image = makeImage(
689 vk,
690 device,
691 getImageCreateFlags(caseDef.viewType),
692 getImageType(caseDef.viewType),
693 format,
694 size.swizzle(0, 1, 2),
695 1u,
696 size.w(),
697 usage
698 );
699
700 vk.getImageMemoryRequirements(device, *image, &imageMemoryRequiements);
701 }
702 // vkCreateImage is allowed to return VK_ERROR_OUT_OF_HOST_MEMORY if the image's
703 // memory requirements will exceed maxResourceSize.
704 catch (const vk::OutOfMemoryError& e)
705 {
706 imageCreationPossible = false;
707 }
708
709 return imageCreationPossible;
710 }
711
isDepthStencilFormatSupported(const InstanceInterface & vki,const VkPhysicalDevice physDevice,const VkFormat format)712 bool isDepthStencilFormatSupported (const InstanceInterface& vki, const VkPhysicalDevice physDevice, const VkFormat format)
713 {
714 const VkFormatProperties properties = getPhysicalDeviceFormatProperties(vki, physDevice, format);
715 return (properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) != 0;
716 }
717
getFormatAspectFlags(const VkFormat format)718 VkImageAspectFlags getFormatAspectFlags (const VkFormat format)
719 {
720 if (format == VK_FORMAT_UNDEFINED)
721 return 0;
722
723 const tcu::TextureFormat::ChannelOrder order = mapVkFormat(format).order;
724
725 switch (order)
726 {
727 case tcu::TextureFormat::DS: return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
728 case tcu::TextureFormat::D: return VK_IMAGE_ASPECT_DEPTH_BIT;
729 case tcu::TextureFormat::S: return VK_IMAGE_ASPECT_STENCIL_BIT;
730 default: return VK_IMAGE_ASPECT_COLOR_BIT;
731 }
732 }
733
initPrograms(SourceCollections & programCollection,const CaseDef caseDef)734 void initPrograms (SourceCollections& programCollection, const CaseDef caseDef)
735 {
736 const int numComponents = getNumUsedChannels(mapVkFormat(caseDef.colorFormat).order);
737 const bool isUint = isUintFormat(caseDef.colorFormat);
738 const bool isSint = isIntFormat(caseDef.colorFormat);
739
740 // Vertex shader
741 {
742 std::ostringstream src;
743 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
744 << "\n"
745 << "layout(location = 0) in vec4 in_position;\n"
746 << "layout(location = 1) in vec4 in_color;\n"
747 << "layout(location = 0) out vec4 out_color;\n"
748 << "\n"
749 << "out gl_PerVertex {\n"
750 << " vec4 gl_Position;\n"
751 << "};\n"
752 << "\n"
753 << "void main(void)\n"
754 << "{\n"
755 << " gl_Position = in_position;\n"
756 << " out_color = in_color;\n"
757 << "}\n";
758
759 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
760 }
761
762 // Fragment shader
763 {
764 std::ostringstream colorValue;
765 colorValue << REFERENCE_COLOR_VALUE;
766 const std::string colorFormat = getColorFormatStr(numComponents, isUint, isSint);
767 const std::string colorInteger = (isUint || isSint ? " * "+colorFormat+"("+colorValue.str()+")" :"");
768
769 std::ostringstream src;
770 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
771 << "\n"
772 << "layout(location = 0) in vec4 in_color;\n"
773 << "layout(location = 0) out " << colorFormat << " o_color;\n"
774 << "\n"
775 << "void main(void)\n"
776 << "{\n"
777 << " o_color = " << colorFormat << "("
778 << (numComponents == 1 ? "in_color.r" :
779 numComponents == 2 ? "in_color.rg" :
780 numComponents == 3 ? "in_color.rgb" : "in_color")
781 << colorInteger
782 << ");\n"
783 << "}\n";
784
785 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
786 }
787 }
788
789 //! See testAttachmentSize() description
testWithSizeReduction(Context & context,const CaseDef & caseDef)790 tcu::TestStatus testWithSizeReduction (Context& context, const CaseDef& caseDef)
791 {
792 const DeviceInterface& vk = context.getDeviceInterface();
793 const InstanceInterface& vki = context.getInstanceInterface();
794 const VkDevice device = context.getDevice();
795 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
796 const VkQueue queue = context.getUniversalQueue();
797 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
798 Allocator& allocator = context.getDefaultAllocator();
799
800 IVec4 imageSize = getMaxImageSize(caseDef.viewType, caseDef.imageSizeHint);
801
802 const VkImageUsageFlags colorImageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
803 const VkImageUsageFlags depthStencilImageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
804 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED);
805
806 {
807 VkImageFormatProperties colorImageFormatProperties;
808 const auto result = vki.getPhysicalDeviceImageFormatProperties(
809 physDevice,
810 caseDef.colorFormat,
811 getImageType(caseDef.viewType),
812 VK_IMAGE_TILING_OPTIMAL,
813 colorImageUsage,
814 getImageCreateFlags(caseDef.viewType),
815 &colorImageFormatProperties
816 );
817
818 VK_CHECK(result);
819
820 imageSize.x() = std::min(static_cast<deUint32>(imageSize.x()), colorImageFormatProperties.maxExtent.width);
821 imageSize.y() = std::min(static_cast<deUint32>(imageSize.y()), colorImageFormatProperties.maxExtent.height);
822 imageSize.z() = std::min(static_cast<deUint32>(imageSize.z()), colorImageFormatProperties.maxExtent.depth);
823 imageSize.w() = std::min(static_cast<deUint32>(imageSize.w()), colorImageFormatProperties.maxArrayLayers);
824 }
825
826 if (useDepthStencil)
827 {
828 VkImageFormatProperties depthStencilImageFormatProperties;
829 const auto result = vki.getPhysicalDeviceImageFormatProperties(
830 physDevice,
831 caseDef.depthStencilFormat,
832 getImageType(caseDef.viewType),
833 VK_IMAGE_TILING_OPTIMAL,
834 depthStencilImageUsage,
835 getImageCreateFlags(caseDef.viewType),
836 &depthStencilImageFormatProperties
837 );
838
839 VK_CHECK(result);
840
841 imageSize.x() = std::min(static_cast<deUint32>(imageSize.x()), depthStencilImageFormatProperties.maxExtent.width);
842 imageSize.y() = std::min(static_cast<deUint32>(imageSize.y()), depthStencilImageFormatProperties.maxExtent.height);
843 imageSize.z() = std::min(static_cast<deUint32>(imageSize.z()), depthStencilImageFormatProperties.maxExtent.depth);
844 imageSize.w() = std::min(static_cast<deUint32>(imageSize.w()), depthStencilImageFormatProperties.maxArrayLayers);
845 }
846
847 bool allocationPossible = false;
848 while (!allocationPossible)
849 {
850 // Get the image memory requirements
851 VkMemoryRequirements colorImageMemReqs;
852 VkDeviceSize neededMemory = 0;
853 deUint32 memoryTypeNdx = 0;
854
855 if (!getSupportedImageMemoryRequirements(context, caseDef, caseDef.colorFormat, imageSize, colorImageUsage, colorImageMemReqs))
856 {
857 // Try again with reduced image size
858 imageSize = getReducedImageSize(caseDef, imageSize);
859 if (imageSize == IVec4())
860 return tcu::TestStatus::fail("Couldn't create an image with required size");
861 else
862 continue;
863 }
864
865 neededMemory = colorImageMemReqs.size;
866
867 if (useDepthStencil)
868 {
869 VkMemoryRequirements depthStencilImageMemReqs;
870
871 if (!getSupportedImageMemoryRequirements(context, caseDef, caseDef.depthStencilFormat, imageSize, depthStencilImageUsage, depthStencilImageMemReqs))
872 {
873 // Try again with reduced image size
874 imageSize = getReducedImageSize(caseDef, imageSize);
875 if (imageSize == IVec4())
876 return tcu::TestStatus::fail("Couldn't create an image with required size");
877 else
878 continue;
879 }
880
881 neededMemory += depthStencilImageMemReqs.size;
882 }
883
884 // Reserve an additional 15% device memory, plus the 512KB for checking results
885 {
886 const VkDeviceSize reserveForChecking = 500ull * 1024ull;
887 const float additionalMemory = 1.15f;
888 neededMemory = static_cast<VkDeviceSize>(static_cast<float>(neededMemory) * additionalMemory) + reserveForChecking;
889 }
890
891 // Query the available memory in the corresponding memory heap
892 {
893 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physDevice);
894 // Use the color image memory requirements, assume depth stencil uses the same memory type
895 memoryTypeNdx = selectMatchingMemoryType(memoryProperties, colorImageMemReqs.memoryTypeBits, MemoryRequirement::Any);
896 tcu::PlatformMemoryLimits memoryLimits;
897 context.getTestContext().getPlatform().getMemoryLimits(memoryLimits);
898 VkDeviceSize maxMemory = std::min(
899 memoryProperties.memoryHeaps[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].size,
900 VkDeviceSize(memoryLimits.totalSystemMemory)
901 );
902
903 if (neededMemory > maxMemory)
904 {
905 // Try again with reduced image size
906 imageSize = getReducedImageSize(caseDef, imageSize);
907 if (imageSize == IVec4())
908 return tcu::TestStatus::fail("Couldn't create an image with required size");
909 else
910 continue;
911 }
912 }
913
914 // Attempt a memory allocation
915 {
916 VkDeviceMemory object = 0;
917 const VkMemoryAllocateInfo allocateInfo =
918 {
919 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, //VkStructureType sType;
920 DE_NULL, //const void* pNext;
921 neededMemory, //VkDeviceSize allocationSize;
922 memoryTypeNdx //deUint32 memoryTypeIndex;
923 };
924
925 const VkResult result = vk.allocateMemory(device, &allocateInfo, DE_NULL, &object);
926
927 if (VK_ERROR_OUT_OF_DEVICE_MEMORY == result || VK_ERROR_OUT_OF_HOST_MEMORY == result)
928 {
929 // Try again with reduced image size
930 imageSize = getReducedImageSize(caseDef, imageSize);
931 if (imageSize == IVec4())
932 return tcu::TestStatus::fail("Couldn't create an image with required size");
933 }
934 else if (VK_SUCCESS != result)
935 {
936 return tcu::TestStatus::fail("Couldn't allocate memory");
937 }
938 else
939 {
940 //free memory using Move pointer
941 Move<VkDeviceMemory> memoryAllocated (check<VkDeviceMemory>(object), Deleter<VkDeviceMemory>(vk, device, DE_NULL));
942 allocationPossible = true;
943 }
944 }
945 }
946
947 context.getTestContext().getLog()
948 << tcu::TestLog::Message << "Using an image with size (width, height, depth, layers) = " << imageSize << tcu::TestLog::EndMessage;
949
950 // "Slices" is either the depth of a 3D image, or the number of layers of an arrayed image
951 const deInt32 numSlices = maxLayersOrDepth(imageSize);
952
953 // Determine the verification bounds. The checked region will be in the center of the rendered image
954 const IVec4 checkSize = tcu::min(imageSize, IVec4(MAX_VERIFICATION_REGION_SIZE,
955 MAX_VERIFICATION_REGION_SIZE,
956 MAX_VERIFICATION_REGION_DEPTH,
957 MAX_VERIFICATION_REGION_DEPTH));
958 const IVec4 checkOffset = (imageSize - checkSize) / 2;
959
960 // Only make enough space for the check region
961 const VkDeviceSize colorBufferSize = product(checkSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
962 const Unique<VkBuffer> colorBuffer (makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
963 const UniquePtr<Allocation> colorBufferAlloc (bindBuffer(vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind));
964
965 {
966 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize));
967 flushAlloc(vk, device, *colorBufferAlloc);
968 }
969
970 const ShaderWrapper vertexModule (ShaderWrapper (vk, device, context.getBinaryCollection().get("vert"), 0u));
971 const ShaderWrapper fragmentModule (ShaderWrapper (vk, device, context.getBinaryCollection().get("frag"), 0u));
972 RenderPassWrapper renderPass (makeRenderPass (vk, device, caseDef.pipelineConstructionType, caseDef.colorFormat, caseDef.depthStencilFormat, static_cast<deUint32>(numSlices),
973 (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D) ? VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
974 : VK_IMAGE_LAYOUT_UNDEFINED));
975 const PipelineLayoutWrapper pipelineLayout (caseDef.pipelineConstructionType, vk, device);
976 vector<GraphicsPipelineWrapper> pipelines;
977
978 Move<VkImage> colorImage;
979 MovePtr<Allocation> colorImageAlloc;
980 vector<SharedPtrVkImageView> colorAttachments;
981 Move<VkImage> depthStencilImage;
982 MovePtr<Allocation> depthStencilImageAlloc;
983 vector<SharedPtrVkImageView> depthStencilAttachments;
984 vector<VkImage> images;
985 vector<VkImageView> attachmentHandles; // all attachments (color and d/s)
986 Move<VkBuffer> vertexBuffer;
987 MovePtr<Allocation> vertexBufferAlloc;
988
989 // Create a color image
990 {
991 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat,
992 imageSize.swizzle(0, 1, 2), 1u, imageSize.w(), colorImageUsage);
993 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
994 }
995
996 // Create a depth/stencil image (always a 2D image, optionally layered)
997 if (useDepthStencil)
998 {
999 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat,
1000 IVec3(imageSize.x(), imageSize.y(), 1), 1u, numSlices, depthStencilImageUsage);
1001 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1002 }
1003
1004 // Create a vertex buffer
1005 {
1006 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices);
1007 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices);
1008
1009 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
1010 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind);
1011
1012 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize));
1013 flushAlloc(vk, device, *vertexBufferAlloc);
1014 }
1015
1016 // Prepare color image upfront for rendering to individual slices. 3D slices aren't separate subresources, so they shouldn't be transitioned
1017 // during each subpass like array layers.
1018 if (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D)
1019 {
1020 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1021 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1022
1023 beginCommandBuffer(vk, *cmdBuffer);
1024
1025 const VkImageMemoryBarrier imageBarrier =
1026 {
1027 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1028 DE_NULL, // const void* pNext;
1029 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1030 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1031 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1032 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1033 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1034 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1035 *colorImage, // VkImage image;
1036 { // VkImageSubresourceRange subresourceRange;
1037 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1038 0u, // uint32_t baseMipLevel;
1039 1u, // uint32_t levelCount;
1040 0u, // uint32_t baseArrayLayer;
1041 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
1042 }
1043 };
1044
1045 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0u,
1046 0u, DE_NULL, 0u, DE_NULL, 1u, &imageBarrier);
1047
1048 endCommandBuffer(vk, *cmdBuffer);
1049 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1050 }
1051
1052 // For each image layer or slice (3D), create an attachment and a pipeline
1053 {
1054 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat);
1055 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0;
1056 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
1057 VkPipeline basePipeline = DE_NULL;
1058
1059 // Color attachments are first in the framebuffer
1060 pipelines.reserve(numSlices);
1061 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1062 {
1063 colorAttachments.push_back(makeSharedPtr(
1064 makeImageView(vk, device, *colorImage, getImageViewSliceType(caseDef.viewType), caseDef.colorFormat, makeColorSubresourceRange(subpassNdx, 1))));
1065 images.push_back(*colorImage);
1066 attachmentHandles.push_back(**colorAttachments.back());
1067
1068 #ifndef CTS_USES_VULKANSC // Pipeline derivatives are forbidden in Vulkan SC
1069 // We also have to create pipelines for each subpass
1070 pipelines.emplace_back(vki, vk, physDevice, device, context.getDeviceExtensions(), caseDef.pipelineConstructionType, (basePipeline == DE_NULL ? VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT
1071 : VK_PIPELINE_CREATE_DERIVATIVE_BIT));
1072 #else
1073 pipelines.emplace_back(vki, vk, physDevice, device, context.getDeviceExtensions(), caseDef.pipelineConstructionType, 0u);
1074 #endif // CTS_USES_VULKANSC
1075 preparePipelineWrapper(pipelines.back(), basePipeline, pipelineLayout, *renderPass, vertexModule, fragmentModule,
1076 imageSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, static_cast<deUint32>(subpassNdx), useDepth, useStencil);
1077
1078 if (pipelines.front().wasBuild())
1079 basePipeline = pipelines.front().getPipeline();
1080 }
1081
1082 // Then D/S attachments, if any
1083 if (useDepthStencil)
1084 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1085 {
1086 depthStencilAttachments.push_back(makeSharedPtr(
1087 makeImageView(vk, device, *depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat, makeImageSubresourceRange(depthStencilAspect, 0u, 1u, subpassNdx, 1u))));
1088 images.push_back(*depthStencilImage);
1089 attachmentHandles.push_back(**depthStencilAttachments.back());
1090 }
1091 }
1092
1093 renderPass.createFramebuffer(vk, device, static_cast<deUint32>(attachmentHandles.size()), &images[0], &attachmentHandles[0], static_cast<deUint32>(imageSize.x()), static_cast<deUint32>(imageSize.y()));
1094
1095 {
1096 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1097 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1098
1099 beginCommandBuffer(vk, *cmdBuffer);
1100 {
1101 vector<VkClearValue> clearValues (numSlices, getClearValue(caseDef.colorFormat));
1102
1103 if (useDepthStencil)
1104 clearValues.insert(clearValues.end(), numSlices, makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE));
1105
1106 const VkDeviceSize vertexBufferOffset = 0ull;
1107
1108 renderPass.begin(vk, *cmdBuffer, makeRect2D(0, 0, imageSize.x(), imageSize.y()), (deUint32)clearValues.size(), &clearValues[0]);
1109 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
1110 }
1111
1112 // Draw
1113 for (deUint32 subpassNdx = 0; subpassNdx < static_cast<deUint32>(numSlices); ++subpassNdx)
1114 {
1115 if (subpassNdx != 0)
1116 renderPass.nextSubpass(vk, *cmdBuffer, VK_SUBPASS_CONTENTS_INLINE);
1117
1118 pipelines[subpassNdx].bind(*cmdBuffer);
1119 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx*4u, 0u);
1120 }
1121
1122 renderPass.end(vk, *cmdBuffer);
1123
1124 // Copy colorImage -> host visible colorBuffer
1125 {
1126 const VkImageMemoryBarrier imageBarriers[] =
1127 {
1128 {
1129 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1130 DE_NULL, // const void* pNext;
1131 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags outputMask;
1132 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags inputMask;
1133 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1134 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1135 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1136 VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
1137 *colorImage, // VkImage image;
1138 makeColorSubresourceRange(0, imageSize.w()) // VkImageSubresourceRange subresourceRange;
1139 }
1140 };
1141
1142 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
1143 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers);
1144
1145 // Copy the checked region rather than the whole image
1146 const VkImageSubresourceLayers subresource =
1147 {
1148 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1149 0u, // uint32_t mipLevel;
1150 static_cast<deUint32>(checkOffset.w()), // uint32_t baseArrayLayer;
1151 static_cast<deUint32>(checkSize.w()), // uint32_t layerCount;
1152 };
1153
1154 const VkBufferImageCopy region =
1155 {
1156 0ull, // VkDeviceSize bufferOffset;
1157 0u, // uint32_t bufferRowLength;
1158 0u, // uint32_t bufferImageHeight;
1159 subresource, // VkImageSubresourceLayers imageSubresource;
1160 makeOffset3D(checkOffset.x(), checkOffset.y(), checkOffset.z()), // VkOffset3D imageOffset;
1161 makeExtent3D(checkSize.swizzle(0, 1, 2)), // VkExtent3D imageExtent;
1162 };
1163
1164 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ®ion);
1165
1166 const VkBufferMemoryBarrier bufferBarriers[] =
1167 {
1168 {
1169 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1170 DE_NULL, // const void* pNext;
1171 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1172 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1173 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1174 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1175 *colorBuffer, // VkBuffer buffer;
1176 0ull, // VkDeviceSize offset;
1177 VK_WHOLE_SIZE, // VkDeviceSize size;
1178 },
1179 };
1180
1181 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
1182 0u, DE_NULL, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, DE_NULL);
1183 }
1184
1185 endCommandBuffer(vk, *cmdBuffer);
1186 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1187 }
1188
1189 // Verify results
1190 {
1191 invalidateAlloc(vk, device, *colorBufferAlloc);
1192
1193 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat);
1194 const int checkDepth = maxLayersOrDepth(checkSize);
1195 const int depthOffset = maxLayersOrDepth(checkOffset);
1196 const tcu::ConstPixelBufferAccess resultImage (format, checkSize.x(), checkSize.y(), checkDepth, colorBufferAlloc->getHostPtr());
1197 tcu::TextureLevel textureLevel (format, checkSize.x(), checkSize.y(), checkDepth);
1198 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess();
1199 bool ok = false;
1200
1201 generateExpectedImage(expectedImage, checkSize.swizzle(0, 1), depthOffset);
1202
1203 if (isFloatFormat(caseDef.colorFormat))
1204 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage, resultImage, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
1205 else
1206 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage, resultImage, tcu::UVec4(2), tcu::COMPARE_LOG_RESULT);
1207
1208 return ok ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail");
1209 }
1210 }
1211
checkImageViewTypeRequirements(Context & context,const VkImageViewType viewType)1212 void checkImageViewTypeRequirements (Context& context, const VkImageViewType viewType)
1213 {
1214 #ifndef CTS_USES_VULKANSC
1215 if (viewType == VK_IMAGE_VIEW_TYPE_3D)
1216 {
1217 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
1218 !context.getPortabilitySubsetFeatures().imageView2DOn3DImage)
1219 {
1220 TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: Implementation does not support 2D or 2D array image view to be created on a 3D VkImage");
1221 }
1222
1223 context.requireDeviceFunctionality("VK_KHR_maintenance1");
1224 }
1225 #endif // CTS_USES_VULKANSC
1226
1227 if (viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1228 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
1229 }
1230
checkSupportAttachmentSize(Context & context,const CaseDef caseDef)1231 void checkSupportAttachmentSize (Context& context, const CaseDef caseDef)
1232 {
1233 const InstanceInterface& vki = context.getInstanceInterface();
1234 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
1235
1236 checkImageViewTypeRequirements(context, caseDef.viewType);
1237
1238 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED)
1239 context.requireDeviceFunctionality("VK_KHR_dedicated_allocation");
1240
1241 {
1242 VkImageFormatProperties colorImageFormatProperties;
1243 const auto result = vki.getPhysicalDeviceImageFormatProperties(
1244 physDevice,
1245 caseDef.colorFormat,
1246 getImageType(caseDef.viewType),
1247 VK_IMAGE_TILING_OPTIMAL,
1248 (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT),
1249 getImageCreateFlags(caseDef.viewType),
1250 &colorImageFormatProperties
1251 );
1252
1253 if (result != VK_SUCCESS)
1254 {
1255 TCU_THROW(NotSupportedError, "Unsupported color attachment format");
1256 }
1257 }
1258
1259 if (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED)
1260 {
1261
1262 VkImageFormatProperties depthStencilImageFormatProperties;
1263 const auto result = vki.getPhysicalDeviceImageFormatProperties(
1264 physDevice,
1265 caseDef.depthStencilFormat,
1266 getImageType(caseDef.viewType),
1267 VK_IMAGE_TILING_OPTIMAL,
1268 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
1269 getImageCreateFlags(caseDef.viewType),
1270 &depthStencilImageFormatProperties
1271 );
1272
1273 if (result != VK_SUCCESS)
1274 {
1275 TCU_THROW(NotSupportedError, "Unsupported depth/stencil attachment format");
1276 }
1277 }
1278
1279 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(), caseDef.pipelineConstructionType);
1280 }
1281
1282 //! A test that can exercise very big color and depth/stencil attachment sizes.
1283 //! If the total memory consumed by images is too large, or if the implementation returns OUT_OF_MEMORY error somewhere,
1284 //! the test can be retried with a next increment of size reduction index, making the attachments smaller.
testAttachmentSize(Context & context,const CaseDef caseDef)1285 tcu::TestStatus testAttachmentSize (Context& context, const CaseDef caseDef)
1286 {
1287 return testWithSizeReduction(context, caseDef);
1288 // Never reached
1289 }
1290
getMipLevelSizes(IVec4 baseSize)1291 vector<IVec4> getMipLevelSizes (IVec4 baseSize)
1292 {
1293 vector<IVec4> levels;
1294 levels.push_back(baseSize);
1295
1296 while (baseSize.x() != 1 || baseSize.y() != 1 || baseSize.z() != 1)
1297 {
1298 baseSize.x() = deMax32(baseSize.x() >> 1, 1);
1299 baseSize.y() = deMax32(baseSize.y() >> 1, 1);
1300 baseSize.z() = deMax32(baseSize.z() >> 1, 1);
1301 levels.push_back(baseSize);
1302 }
1303
1304 return levels;
1305 }
1306
1307 //! Compute memory consumed by each mip level, including all layers. Sizes include a padding for alignment.
getPerMipLevelStorageSize(const vector<IVec4> & mipLevelSizes,const VkDeviceSize pixelSize)1308 vector<VkDeviceSize> getPerMipLevelStorageSize (const vector<IVec4>& mipLevelSizes, const VkDeviceSize pixelSize)
1309 {
1310 const deInt64 levelAlignment = 16;
1311 vector<VkDeviceSize> storageSizes;
1312
1313 for (vector<IVec4>::const_iterator it = mipLevelSizes.begin(); it != mipLevelSizes.end(); ++it)
1314 storageSizes.push_back(deAlign64(pixelSize * product(*it), levelAlignment));
1315
1316 return storageSizes;
1317 }
1318
drawToMipLevel(const Context & context,const CaseDef & caseDef,const int mipLevel,const IVec4 & mipSize,const int numSlices,const VkImage colorImage,const VkImage depthStencilImage,const VkBuffer vertexBuffer,const PipelineLayoutWrapper & pipelineLayout,const ShaderWrapper vertexModule,const ShaderWrapper fragmentModule)1319 void drawToMipLevel (const Context& context,
1320 const CaseDef& caseDef,
1321 const int mipLevel,
1322 const IVec4& mipSize,
1323 const int numSlices,
1324 const VkImage colorImage,
1325 const VkImage depthStencilImage,
1326 const VkBuffer vertexBuffer,
1327 const PipelineLayoutWrapper& pipelineLayout,
1328 const ShaderWrapper vertexModule,
1329 const ShaderWrapper fragmentModule)
1330 {
1331 const InstanceInterface& vki = context.getInstanceInterface();
1332 const DeviceInterface& vk = context.getDeviceInterface();
1333 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
1334 const VkDevice device = context.getDevice();
1335 const VkQueue queue = context.getUniversalQueue();
1336 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1337 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat);
1338 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0;
1339 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
1340 RenderPassWrapper renderPass (makeRenderPass(vk, device, caseDef.pipelineConstructionType, caseDef.colorFormat, caseDef.depthStencilFormat, static_cast<deUint32>(numSlices),
1341 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1342 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL));
1343 vector<GraphicsPipelineWrapper> pipelines;
1344 vector<SharedPtrVkImageView> colorAttachments;
1345 vector<SharedPtrVkImageView> depthStencilAttachments;
1346 vector<VkImage> images;
1347 vector<VkImageView> attachmentHandles; // all attachments (color and d/s)
1348
1349 // For each image layer or slice (3D), create an attachment and a pipeline
1350 {
1351 VkPipeline basePipeline = DE_NULL;
1352
1353 // Color attachments are first in the framebuffer
1354 pipelines.reserve(numSlices);
1355 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1356 {
1357 colorAttachments.push_back(makeSharedPtr(makeImageView(
1358 vk, device, colorImage, getImageViewSliceType(caseDef.viewType), caseDef.colorFormat,
1359 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 1u, subpassNdx, 1u))));
1360 images.push_back(colorImage);
1361 attachmentHandles.push_back(**colorAttachments.back());
1362
1363 // We also have to create pipelines for each subpass
1364 #ifndef CTS_USES_VULKANSC // Pipeline derivatives are forbidden in Vulkan SC
1365 pipelines.emplace_back(vki, vk, physDevice, device, context.getDeviceExtensions(), caseDef.pipelineConstructionType, (basePipeline == DE_NULL ? VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT
1366 : VK_PIPELINE_CREATE_DERIVATIVE_BIT));
1367 #else
1368 pipelines.emplace_back(vki, vk, physDevice, device, context.getDeviceExtensions(), caseDef.pipelineConstructionType, 0u);
1369 #endif // CTS_USES_VULKANSC
1370 preparePipelineWrapper(pipelines.back(), basePipeline, pipelineLayout, *renderPass, vertexModule, fragmentModule,
1371 mipSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, static_cast<deUint32>(subpassNdx), useDepth, useStencil);
1372
1373 if (pipelines.front().wasBuild())
1374 basePipeline = pipelines.front().getPipeline();
1375 }
1376
1377 // Then D/S attachments, if any
1378 if (useDepth || useStencil)
1379 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1380 {
1381 depthStencilAttachments.push_back(makeSharedPtr(makeImageView(
1382 vk, device, depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat,
1383 makeImageSubresourceRange(depthStencilAspect, mipLevel, 1u, subpassNdx, 1u))));
1384 images.push_back(depthStencilImage);
1385 attachmentHandles.push_back(**depthStencilAttachments.back());
1386 }
1387 }
1388
1389 renderPass.createFramebuffer(vk, device, static_cast<deUint32>(attachmentHandles.size()), &images[0], &attachmentHandles[0],
1390 static_cast<deUint32>(mipSize.x()), static_cast<deUint32>(mipSize.y()));
1391
1392 {
1393 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1394 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1395
1396 beginCommandBuffer(vk, *cmdBuffer);
1397 {
1398 vector<VkClearValue> clearValues (numSlices, getClearValue(caseDef.colorFormat));
1399
1400 if (useDepth || useStencil)
1401 clearValues.insert(clearValues.end(), numSlices, makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE));
1402
1403 const VkDeviceSize vertexBufferOffset = 0ull;
1404
1405 renderPass.begin(vk, *cmdBuffer, makeRect2D(0, 0, mipSize.x(), mipSize.y()), (deUint32)clearValues.size(), &clearValues[0]);
1406 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
1407 }
1408
1409 // Draw
1410 for (deUint32 subpassNdx = 0; subpassNdx < static_cast<deUint32>(numSlices); ++subpassNdx)
1411 {
1412 if (subpassNdx != 0)
1413 renderPass.nextSubpass(vk, *cmdBuffer, VK_SUBPASS_CONTENTS_INLINE);
1414
1415 pipelines[subpassNdx].bind(*cmdBuffer);
1416 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx*4u, 0u);
1417 }
1418
1419 renderPass.end(vk, *cmdBuffer);
1420
1421 endCommandBuffer(vk, *cmdBuffer);
1422 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1423 }
1424 }
1425
checkSupportRenderToMipMaps(Context & context,const CaseDef caseDef)1426 void checkSupportRenderToMipMaps (Context& context, const CaseDef caseDef)
1427 {
1428 checkImageViewTypeRequirements(context, caseDef.viewType);
1429
1430 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED)
1431 context.requireDeviceFunctionality("VK_KHR_dedicated_allocation");
1432
1433 if (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED && !isDepthStencilFormatSupported(context.getInstanceInterface(), context.getPhysicalDevice(), caseDef.depthStencilFormat))
1434 TCU_THROW(NotSupportedError, "Unsupported depth/stencil format");
1435
1436 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(), caseDef.pipelineConstructionType);
1437 }
1438
1439 //! Use image mip levels as attachments
testRenderToMipMaps(Context & context,const CaseDef caseDef)1440 tcu::TestStatus testRenderToMipMaps (Context& context, const CaseDef caseDef)
1441 {
1442 const DeviceInterface& vk = context.getDeviceInterface();
1443 const InstanceInterface& vki = context.getInstanceInterface();
1444 const VkDevice device = context.getDevice();
1445 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
1446 const VkQueue queue = context.getUniversalQueue();
1447 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1448 Allocator& allocator = context.getDefaultAllocator();
1449
1450 const IVec4 imageSize = caseDef.imageSizeHint; // MAX_SIZE is not used in this test
1451 const deInt32 numSlices = maxLayersOrDepth(imageSize);
1452 const vector<IVec4> mipLevelSizes = getMipLevelSizes(imageSize);
1453 const vector<VkDeviceSize> mipLevelStorageSizes = getPerMipLevelStorageSize(mipLevelSizes, tcu::getPixelSize(mapVkFormat(caseDef.colorFormat)));
1454 const int numMipLevels = static_cast<int>(mipLevelSizes.size());
1455 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED);
1456
1457 // Create a color buffer big enough to hold all layers and mip levels
1458 const VkDeviceSize colorBufferSize = sum(mipLevelStorageSizes);
1459 const Unique<VkBuffer> colorBuffer (makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
1460 const UniquePtr<Allocation> colorBufferAlloc (bindBuffer(vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind));
1461
1462 {
1463 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize));
1464 flushAlloc(vk, device, *colorBufferAlloc);
1465 }
1466
1467 const ShaderWrapper vertexModule (ShaderWrapper (vk, device, context.getBinaryCollection().get("vert"), 0u));
1468 const ShaderWrapper fragmentModule (ShaderWrapper (vk, device, context.getBinaryCollection().get("frag"), 0u));
1469 const PipelineLayoutWrapper pipelineLayout (caseDef.pipelineConstructionType, vk, device);
1470
1471 Move<VkImage> colorImage;
1472 MovePtr<Allocation> colorImageAlloc;
1473 Move<VkImage> depthStencilImage;
1474 MovePtr<Allocation> depthStencilImageAlloc;
1475 Move<VkBuffer> vertexBuffer;
1476 MovePtr<Allocation> vertexBufferAlloc;
1477
1478 // Create a color image
1479 {
1480 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1481
1482 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat,
1483 imageSize.swizzle(0, 1, 2), numMipLevels, imageSize.w(), imageUsage);
1484 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1485 }
1486
1487 // Create a depth/stencil image (always a 2D image, optionally layered)
1488 if (useDepthStencil)
1489 {
1490 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
1491
1492 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat,
1493 IVec3(imageSize.x(), imageSize.y(), 1), numMipLevels, numSlices, imageUsage);
1494 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1495 }
1496
1497 // Create a vertex buffer
1498 {
1499 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices);
1500 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices);
1501
1502 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
1503 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind);
1504
1505 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize));
1506 flushAlloc(vk, device, *vertexBufferAlloc);
1507 }
1508
1509 // Prepare images
1510 {
1511 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1512 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1513
1514 beginCommandBuffer(vk, *cmdBuffer);
1515
1516 const VkImageMemoryBarrier imageBarriers[] =
1517 {
1518 {
1519 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1520 DE_NULL, // const void* pNext;
1521 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1522 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1523 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1524 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1525 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1526 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1527 *colorImage, // VkImage image;
1528 { // VkImageSubresourceRange subresourceRange;
1529 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1530 0u, // uint32_t baseMipLevel;
1531 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1532 0u, // uint32_t baseArrayLayer;
1533 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
1534 },
1535 },
1536 {
1537 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1538 DE_NULL, // const void* pNext;
1539 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1540 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1541 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1542 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1543 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1544 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1545 *depthStencilImage, // VkImage image;
1546 { // VkImageSubresourceRange subresourceRange;
1547 getFormatAspectFlags(caseDef.depthStencilFormat), // VkImageAspectFlags aspectMask;
1548 0u, // uint32_t baseMipLevel;
1549 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1550 0u, // uint32_t baseArrayLayer;
1551 static_cast<deUint32>(numSlices), // uint32_t layerCount;
1552 },
1553 }
1554 };
1555
1556 const deUint32 numImageBarriers = static_cast<deUint32>(DE_LENGTH_OF_ARRAY(imageBarriers) - (useDepthStencil ? 0 : 1));
1557
1558 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, 0u,
1559 0u, DE_NULL, 0u, DE_NULL, numImageBarriers, imageBarriers);
1560
1561 endCommandBuffer(vk, *cmdBuffer);
1562 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1563 }
1564
1565 // Draw
1566 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1567 {
1568 const IVec4& mipSize = mipLevelSizes[mipLevel];
1569 const int levelSlices = maxLayersOrDepth(mipSize);
1570
1571 drawToMipLevel (context, caseDef, mipLevel, mipSize, levelSlices, *colorImage, *depthStencilImage, *vertexBuffer, pipelineLayout,
1572 vertexModule, fragmentModule);
1573 }
1574
1575 // Copy results: colorImage -> host visible colorBuffer
1576 {
1577 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1578 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1579
1580 beginCommandBuffer(vk, *cmdBuffer);
1581
1582 {
1583 const VkImageMemoryBarrier imageBarriers[] =
1584 {
1585 {
1586 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1587 DE_NULL, // const void* pNext;
1588 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
1589 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1590 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1591 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1592 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1593 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1594 *colorImage, // VkImage image;
1595 { // VkImageSubresourceRange subresourceRange;
1596 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1597 0u, // uint32_t baseMipLevel;
1598 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1599 0u, // uint32_t baseArrayLayer;
1600 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
1601 },
1602 }
1603 };
1604
1605 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
1606 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers);
1607 }
1608 {
1609 vector<VkBufferImageCopy> regions;
1610 VkDeviceSize levelOffset = 0ull;
1611 VkBufferImageCopy workRegion =
1612 {
1613 0ull, // VkDeviceSize bufferOffset;
1614 0u, // uint32_t bufferRowLength;
1615 0u, // uint32_t bufferImageHeight;
1616 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, imageSize.w()), // VkImageSubresourceLayers imageSubresource;
1617 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
1618 makeExtent3D(0, 0, 0), // VkExtent3D imageExtent;
1619 };
1620
1621 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1622 {
1623 workRegion.bufferOffset = levelOffset;
1624 workRegion.imageSubresource.mipLevel = static_cast<deUint32>(mipLevel);
1625 workRegion.imageExtent = makeExtent3D(mipLevelSizes[mipLevel].swizzle(0, 1, 2));
1626
1627 regions.push_back(workRegion);
1628
1629 levelOffset += mipLevelStorageSizes[mipLevel];
1630 }
1631
1632 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, static_cast<deUint32>(regions.size()), ®ions[0]);
1633 }
1634 {
1635 const VkBufferMemoryBarrier bufferBarriers[] =
1636 {
1637 {
1638 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1639 DE_NULL, // const void* pNext;
1640 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1641 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1642 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1643 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1644 *colorBuffer, // VkBuffer buffer;
1645 0ull, // VkDeviceSize offset;
1646 VK_WHOLE_SIZE, // VkDeviceSize size;
1647 },
1648 };
1649
1650 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
1651 0u, DE_NULL, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, DE_NULL);
1652 }
1653
1654 endCommandBuffer(vk, *cmdBuffer);
1655 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1656 }
1657
1658 // Verify results (per mip level)
1659 {
1660 invalidateAlloc(vk, device, *colorBufferAlloc);
1661
1662 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat);
1663
1664 VkDeviceSize levelOffset = 0ull;
1665 bool allOk = true;
1666
1667 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1668 {
1669 const IVec4& mipSize = mipLevelSizes[mipLevel];
1670 const void* const pLevelData = static_cast<const deUint8*>(colorBufferAlloc->getHostPtr()) + levelOffset;
1671 const int levelDepth = maxLayersOrDepth(mipSize);
1672 const tcu::ConstPixelBufferAccess resultImage (format, mipSize.x(), mipSize.y(), levelDepth, pLevelData);
1673 tcu::TextureLevel textureLevel (format, mipSize.x(), mipSize.y(), levelDepth);
1674 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess();
1675 const std::string comparisonName = "Mip level " + de::toString(mipLevel);
1676 bool ok = false;
1677
1678 generateExpectedImage(expectedImage, mipSize.swizzle(0, 1), 0);
1679
1680 if (isFloatFormat(caseDef.colorFormat))
1681 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison", comparisonName.c_str(), expectedImage, resultImage, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
1682 else
1683 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison", comparisonName.c_str(), expectedImage, resultImage, tcu::UVec4(2), tcu::COMPARE_LOG_RESULT);
1684
1685 allOk = allOk && ok; // keep testing all levels, even if we know it's a fail overall
1686 levelOffset += mipLevelStorageSizes[mipLevel];
1687 }
1688
1689 return allOk ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail");
1690 }
1691 }
1692
getSizeDescription(const IVec4 & size)1693 std::string getSizeDescription (const IVec4& size)
1694 {
1695 std::ostringstream str;
1696
1697 const char* const description[4] =
1698 {
1699 "width", "height", "depth", "layers"
1700 };
1701
1702 int numMaxComponents = 0;
1703
1704 for (int i = 0; i < 4; ++i)
1705 {
1706 if (size[i] == MAX_SIZE)
1707 {
1708 if (numMaxComponents > 0)
1709 str << "_";
1710
1711 str << description[i];
1712 ++numMaxComponents;
1713 }
1714 }
1715
1716 if (numMaxComponents == 0)
1717 str << "small";
1718
1719 return str.str();
1720 }
1721
getFormatString(const VkFormat format)1722 inline std::string getFormatString (const VkFormat format)
1723 {
1724 std::string name(getFormatName(format));
1725 return de::toLower(name.substr(10));
1726 }
1727
getFormatString(const VkFormat colorFormat,const VkFormat depthStencilFormat)1728 std::string getFormatString (const VkFormat colorFormat, const VkFormat depthStencilFormat)
1729 {
1730 std::ostringstream str;
1731 str << getFormatString(colorFormat);
1732 if (depthStencilFormat != VK_FORMAT_UNDEFINED)
1733 str << "_" << getFormatString(depthStencilFormat);
1734 return str.str();
1735 }
1736
getShortImageViewTypeName(const VkImageViewType imageViewType)1737 std::string getShortImageViewTypeName (const VkImageViewType imageViewType)
1738 {
1739 std::string s(getImageViewTypeName(imageViewType));
1740 return de::toLower(s.substr(19));
1741 }
1742
bvecFromMask(deUint32 mask)1743 inline BVec4 bvecFromMask (deUint32 mask)
1744 {
1745 return BVec4((mask >> 0) & 1,
1746 (mask >> 1) & 1,
1747 (mask >> 2) & 1,
1748 (mask >> 3) & 1);
1749 }
1750
genSizeCombinations(const IVec4 & baselineSize,const deUint32 sizeMask,const VkImageViewType imageViewType)1751 vector<IVec4> genSizeCombinations (const IVec4& baselineSize, const deUint32 sizeMask, const VkImageViewType imageViewType)
1752 {
1753 vector<IVec4> sizes;
1754 std::set<deUint32> masks;
1755
1756 for (deUint32 i = 0; i < (1u << 4); ++i)
1757 {
1758 // Cube images have square faces
1759 if (isCube(imageViewType) && ((i & MASK_WH) != 0))
1760 i |= MASK_WH;
1761
1762 masks.insert(i & sizeMask);
1763 }
1764
1765 for (std::set<deUint32>::const_iterator it = masks.begin(); it != masks.end(); ++it)
1766 sizes.push_back(tcu::select(IVec4(MAX_SIZE), baselineSize, bvecFromMask(*it)));
1767
1768 return sizes;
1769 }
1770
addTestCasesWithFunctions(tcu::TestCaseGroup * group,PipelineConstructionType pipelineConstructionType,AllocationKind allocationKind)1771 void addTestCasesWithFunctions (tcu::TestCaseGroup* group, PipelineConstructionType pipelineConstructionType, AllocationKind allocationKind)
1772 {
1773 const struct
1774 {
1775 VkImageViewType viewType;
1776 IVec4 baselineSize; //!< image size: (dimX, dimY, dimZ, arraySize)
1777 deUint32 sizeMask; //!< if a dimension is masked, generate a huge size case for it
1778 } testCase[] =
1779 {
1780 { VK_IMAGE_VIEW_TYPE_1D, IVec4(54, 1, 1, 1), MASK_W },
1781 { VK_IMAGE_VIEW_TYPE_1D_ARRAY, IVec4(54, 1, 1, 4), MASK_W_LAYERS },
1782 { VK_IMAGE_VIEW_TYPE_2D, IVec4(44, 23, 1, 1), MASK_WH },
1783 { VK_IMAGE_VIEW_TYPE_2D_ARRAY, IVec4(44, 23, 1, 4), MASK_WH_LAYERS },
1784 { VK_IMAGE_VIEW_TYPE_3D, IVec4(22, 31, 7, 1), MASK_WHD },
1785 { VK_IMAGE_VIEW_TYPE_CUBE, IVec4(35, 35, 1, 6), MASK_WH },
1786 { VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, IVec4(35, 35, 1, 2*6), MASK_WH_LAYERS },
1787 };
1788
1789 const VkFormat format[] =
1790 {
1791 VK_FORMAT_R8G8B8A8_UNORM,
1792 VK_FORMAT_R32_UINT,
1793 VK_FORMAT_R16G16_SINT,
1794 VK_FORMAT_R32G32B32A32_SFLOAT,
1795 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
1796 VK_FORMAT_R5G6B5_UNORM_PACK16,
1797 VK_FORMAT_A2B10G10R10_UINT_PACK32,
1798 VK_FORMAT_A2B10G10R10_UNORM_PACK32
1799 };
1800
1801 const VkFormat depthStencilFormat[] =
1802 {
1803 VK_FORMAT_UNDEFINED, // don't use a depth/stencil attachment
1804 VK_FORMAT_D16_UNORM,
1805 VK_FORMAT_S8_UINT,
1806 VK_FORMAT_D24_UNORM_S8_UINT, // one of the following mixed formats must be supported
1807 VK_FORMAT_D32_SFLOAT_S8_UINT,
1808 };
1809
1810 for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(testCase); ++caseNdx)
1811 {
1812 MovePtr<tcu::TestCaseGroup> imageGroup(new tcu::TestCaseGroup(group->getTestContext(), getShortImageViewTypeName(testCase[caseNdx].viewType).c_str()));
1813
1814 // Generate attachment size cases
1815 {
1816 vector<IVec4> sizes = genSizeCombinations(testCase[caseNdx].baselineSize, testCase[caseNdx].sizeMask, testCase[caseNdx].viewType);
1817
1818 #ifdef CTS_USES_VULKANSC
1819 // filter out sizes in which width and height is equal to maximimum values
1820 sizes.erase(std::remove_if(begin(sizes), end(sizes), [&](const IVec4& v) { return v.x() == MAX_SIZE && v.y() == MAX_SIZE; }), end(sizes));
1821 #endif // CTS_USES_VULKANSC
1822
1823 MovePtr<tcu::TestCaseGroup> smallGroup(new tcu::TestCaseGroup(group->getTestContext(), "small"));
1824 MovePtr<tcu::TestCaseGroup> hugeGroup (new tcu::TestCaseGroup(group->getTestContext(), "huge"));
1825
1826 imageGroup->addChild(smallGroup.get());
1827 imageGroup->addChild(hugeGroup.get());
1828
1829 for (vector<IVec4>::const_iterator sizeIter = sizes.begin(); sizeIter != sizes.end(); ++sizeIter)
1830 {
1831 // The first size is the baseline size, put it in a dedicated group
1832 if (sizeIter == sizes.begin())
1833 {
1834 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1835 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx)
1836 {
1837 const CaseDef caseDef
1838 {
1839 pipelineConstructionType, // PipelineConstructionType pipelineConstructionType;
1840 testCase[caseNdx].viewType, // VkImageViewType imageType;
1841 *sizeIter, // IVec4 imageSizeHint;
1842 format[formatNdx], // VkFormat colorFormat;
1843 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1844 allocationKind // AllocationKind allocationKind;
1845 };
1846 addFunctionCaseWithPrograms(smallGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]), checkSupportAttachmentSize, initPrograms, testAttachmentSize, caseDef);
1847 }
1848 }
1849 else // All huge cases go into a separate group
1850 {
1851 if (allocationKind != ALLOCATION_KIND_DEDICATED)
1852 {
1853 MovePtr<tcu::TestCaseGroup> sizeGroup (new tcu::TestCaseGroup(group->getTestContext(), getSizeDescription(*sizeIter).c_str()));
1854 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
1855
1856 // Use the same color format for all cases, to reduce the number of permutations
1857 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1858 {
1859 const CaseDef caseDef
1860 {
1861 pipelineConstructionType, // PipelineConstructionType pipelineConstructionType;
1862 testCase[caseNdx].viewType, // VkImageViewType viewType;
1863 *sizeIter, // IVec4 imageSizeHint;
1864 colorFormat, // VkFormat colorFormat;
1865 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1866 allocationKind // AllocationKind allocationKind;
1867 };
1868 addFunctionCaseWithPrograms(sizeGroup.get(), getFormatString(colorFormat, depthStencilFormat[dsFormatNdx]), checkSupportAttachmentSize, initPrograms, testAttachmentSize, caseDef);
1869 }
1870 hugeGroup->addChild(sizeGroup.release());
1871 }
1872 }
1873 }
1874 smallGroup.release();
1875 hugeGroup.release();
1876 }
1877
1878 // Generate mip map cases
1879 {
1880 MovePtr<tcu::TestCaseGroup> mipmapGroup(new tcu::TestCaseGroup(group->getTestContext(), "mipmap"));
1881
1882 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1883 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx)
1884 {
1885 const CaseDef caseDef
1886 {
1887 pipelineConstructionType, // PipelineConstructionType pipelineConstructionType;
1888 testCase[caseNdx].viewType, // VkImageViewType imageType;
1889 testCase[caseNdx].baselineSize, // IVec4 imageSizeHint;
1890 format[formatNdx], // VkFormat colorFormat;
1891 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1892 allocationKind // AllocationKind allocationKind;
1893 };
1894 addFunctionCaseWithPrograms(mipmapGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]), checkSupportRenderToMipMaps, initPrograms, testRenderToMipMaps, caseDef);
1895 }
1896 imageGroup->addChild(mipmapGroup.release());
1897 }
1898
1899 group->addChild(imageGroup.release());
1900 }
1901 }
1902
addCoreRenderToImageTests(tcu::TestCaseGroup * group,PipelineConstructionType pipelineConstructionType)1903 void addCoreRenderToImageTests (tcu::TestCaseGroup* group, PipelineConstructionType pipelineConstructionType)
1904 {
1905 addTestCasesWithFunctions(group, pipelineConstructionType, ALLOCATION_KIND_SUBALLOCATED);
1906 }
1907
addDedicatedAllocationRenderToImageTests(tcu::TestCaseGroup * group,PipelineConstructionType pipelineConstructionType)1908 void addDedicatedAllocationRenderToImageTests (tcu::TestCaseGroup* group, PipelineConstructionType pipelineConstructionType)
1909 {
1910 addTestCasesWithFunctions(group, pipelineConstructionType, ALLOCATION_KIND_DEDICATED);
1911 }
1912
1913 } // anonymous ns
1914
createRenderToImageTests(tcu::TestContext & testCtx,PipelineConstructionType pipelineConstructionType)1915 tcu::TestCaseGroup* createRenderToImageTests (tcu::TestContext& testCtx, PipelineConstructionType pipelineConstructionType)
1916 {
1917 de::MovePtr<tcu::TestCaseGroup> renderToImageTests (new tcu::TestCaseGroup(testCtx, "render_to_image"));
1918
1919 // Core render to image tests
1920 renderToImageTests->addChild(createTestGroup(testCtx, "core", addCoreRenderToImageTests, pipelineConstructionType));
1921 // Render to image tests for dedicated memory allocation
1922 renderToImageTests->addChild(createTestGroup(testCtx, "dedicated_allocation", addDedicatedAllocationRenderToImageTests, pipelineConstructionType));
1923
1924 return renderToImageTests.release();
1925 }
1926
1927 } // pipeline
1928 } // vkt
1929