1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2023 LunarG, Inc.
7 * Copyright (c) 2023 Nintendo
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 *//*!
22 * \file vktPipelineRenderToImageTests.cpp
23 * \brief Render to image tests
24 *//*--------------------------------------------------------------------*/
25
26 #include "vktPipelineRenderToImageTests.hpp"
27 #include "vktPipelineMakeUtil.hpp"
28 #include "vktTestCase.hpp"
29 #include "vktTestCaseUtil.hpp"
30 #include "vktPipelineVertexUtil.hpp"
31 #include "vktTestGroupUtil.hpp"
32 #include "vkObjUtil.hpp"
33
34 #include "vkMemUtil.hpp"
35 #include "vkQueryUtil.hpp"
36 #include "vkTypeUtil.hpp"
37 #include "vkRefUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkPrograms.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkCmdUtil.hpp"
42
43 #include "tcuTextureUtil.hpp"
44 #include "tcuImageCompare.hpp"
45 #include "tcuTestLog.hpp"
46 #include "tcuPlatform.hpp"
47 #include "vkPlatform.hpp"
48
49 #include "deUniquePtr.hpp"
50 #include "deSharedPtr.hpp"
51
52 #include <string>
53 #include <vector>
54 #include <set>
55 #include <algorithm>
56
57 namespace vkt
58 {
59 namespace pipeline
60 {
61 namespace
62 {
63 using namespace vk;
64 using de::MovePtr;
65 using de::SharedPtr;
66 using de::UniquePtr;
67 using std::vector;
68 using tcu::BVec4;
69 using tcu::IVec2;
70 using tcu::IVec3;
71 using tcu::IVec4;
72 using tcu::UVec4;
73 using tcu::Vec4;
74
75 typedef SharedPtr<Unique<VkImageView>> SharedPtrVkImageView;
76
77 enum Constants
78 {
79 NUM_CUBE_FACES = 6,
80 REFERENCE_COLOR_VALUE = 125,
81 REFERENCE_STENCIL_VALUE = 42,
82 MAX_SIZE = -1, //!< Should be queried at runtime and replaced with max possible value
83 MAX_VERIFICATION_REGION_SIZE = 32, //!< Limit the checked area to a small size, especially for huge images
84 MAX_VERIFICATION_REGION_DEPTH = 8,
85
86 MASK_W = (1 | 0 | 0 | 0),
87 MASK_W_LAYERS = (1 | 0 | 0 | 8),
88 MASK_WH = (1 | 2 | 0 | 0),
89 MASK_WH_LAYERS = (1 | 2 | 0 | 8),
90 MASK_WHD = (1 | 2 | 4 | 0),
91 };
92
93 enum AllocationKind
94 {
95 ALLOCATION_KIND_SUBALLOCATED = 0,
96 ALLOCATION_KIND_DEDICATED,
97 };
98
99 static const float REFERENCE_DEPTH_VALUE = 1.0f;
100 static const Vec4 COLOR_TABLE[] = {
101 Vec4(0.9f, 0.0f, 0.0f, 1.0f), Vec4(0.6f, 1.0f, 0.0f, 1.0f), Vec4(0.3f, 0.0f, 1.0f, 1.0f),
102 Vec4(0.1f, 1.0f, 1.0f, 1.0f), Vec4(0.8f, 1.0f, 0.0f, 1.0f), Vec4(0.5f, 0.0f, 1.0f, 1.0f),
103 Vec4(0.2f, 0.0f, 0.0f, 1.0f), Vec4(1.0f, 1.0f, 0.0f, 1.0f),
104 };
105
106 struct CaseDef
107 {
108 PipelineConstructionType pipelineConstructionType;
109 VkImageViewType viewType;
110 IVec4 imageSizeHint; //!< (w, h, d, layers), a component may have a symbolic value MAX_SIZE
111 VkFormat colorFormat;
112 VkFormat depthStencilFormat; //! A depth/stencil format, or UNDEFINED if not used
113 AllocationKind allocationKind;
114 };
115
116 template <typename T>
makeSharedPtr(Move<T> move)117 inline SharedPtr<Unique<T>> makeSharedPtr(Move<T> move)
118 {
119 return SharedPtr<Unique<T>>(new Unique<T>(move));
120 }
121
122 template <typename T>
sizeInBytes(const vector<T> & vec)123 inline VkDeviceSize sizeInBytes(const vector<T> &vec)
124 {
125 return vec.size() * sizeof(vec[0]);
126 }
127
isCube(const VkImageViewType viewType)128 inline bool isCube(const VkImageViewType viewType)
129 {
130 return (viewType == VK_IMAGE_VIEW_TYPE_CUBE || viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY);
131 }
132
product(const IVec4 & v)133 inline VkDeviceSize product(const IVec4 &v)
134 {
135 return ((static_cast<VkDeviceSize>(v.x()) * v.y()) * v.z()) * v.w();
136 }
137
138 template <typename T>
sum(const vector<T> & v)139 inline T sum(const vector<T> &v)
140 {
141 T total = static_cast<T>(0);
142 for (typename vector<T>::const_iterator it = v.begin(); it != v.end(); ++it)
143 total += *it;
144 return total;
145 }
146
147 template <typename T, int Size>
findIndexOfMaxComponent(const tcu::Vector<T,Size> & vec)148 int findIndexOfMaxComponent(const tcu::Vector<T, Size> &vec)
149 {
150 int index = 0;
151 T value = vec[0];
152
153 for (int i = 1; i < Size; ++i)
154 {
155 if (vec[i] > value)
156 {
157 index = i;
158 value = vec[i];
159 }
160 }
161
162 return index;
163 }
164
maxLayersOrDepth(const IVec4 & size)165 inline int maxLayersOrDepth(const IVec4 &size)
166 {
167 // This is safe because 3D images must have layers (w) = 1
168 return deMax32(size.z(), size.w());
169 }
170
bindBuffer(const InstanceInterface & vki,const DeviceInterface & vkd,const VkPhysicalDevice & physDevice,const VkDevice device,const VkBuffer & buffer,const MemoryRequirement requirement,Allocator & allocator,AllocationKind allocationKind)171 de::MovePtr<Allocation> bindBuffer(const InstanceInterface &vki, const DeviceInterface &vkd,
172 const VkPhysicalDevice &physDevice, const VkDevice device, const VkBuffer &buffer,
173 const MemoryRequirement requirement, Allocator &allocator,
174 AllocationKind allocationKind)
175 {
176 switch (allocationKind)
177 {
178 case ALLOCATION_KIND_SUBALLOCATED:
179 {
180 return vk::bindBuffer(vkd, device, allocator, buffer, requirement);
181 }
182
183 case ALLOCATION_KIND_DEDICATED:
184 {
185 return bindBufferDedicated(vki, vkd, physDevice, device, buffer, requirement);
186 }
187
188 default:
189 {
190 TCU_THROW(InternalError, "Invalid allocation kind");
191 }
192 }
193 }
194
bindImage(const InstanceInterface & vki,const DeviceInterface & vkd,const VkPhysicalDevice & physDevice,const VkDevice device,const VkImage & image,const MemoryRequirement requirement,Allocator & allocator,AllocationKind allocationKind)195 de::MovePtr<Allocation> bindImage(const InstanceInterface &vki, const DeviceInterface &vkd,
196 const VkPhysicalDevice &physDevice, const VkDevice device, const VkImage &image,
197 const MemoryRequirement requirement, Allocator &allocator,
198 AllocationKind allocationKind)
199 {
200 switch (allocationKind)
201 {
202 case ALLOCATION_KIND_SUBALLOCATED:
203 {
204 return vk::bindImage(vkd, device, allocator, image, requirement);
205 }
206
207 case ALLOCATION_KIND_DEDICATED:
208 {
209 return bindImageDedicated(vki, vkd, physDevice, device, image, requirement);
210 }
211
212 default:
213 {
214 TCU_THROW(InternalError, "Invalid allocation kind");
215 }
216 }
217 }
218
219 // This is very test specific, so be careful if you want to reuse this code.
preparePipelineWrapper(GraphicsPipelineWrapper & gpw,const VkPipeline basePipeline,const PipelineLayoutWrapper & pipelineLayout,const VkRenderPass renderPass,const ShaderWrapper vertexModule,const ShaderWrapper fragmentModule,const IVec2 & renderSize,const VkPrimitiveTopology topology,const uint32_t subpass,const bool useDepth,const bool useStencil)220 void preparePipelineWrapper(GraphicsPipelineWrapper &gpw,
221 const VkPipeline basePipeline, // for derivatives
222 const PipelineLayoutWrapper &pipelineLayout, const VkRenderPass renderPass,
223 const ShaderWrapper vertexModule, const ShaderWrapper fragmentModule,
224 const IVec2 &renderSize, const VkPrimitiveTopology topology, const uint32_t subpass,
225 const bool useDepth, const bool useStencil)
226 {
227 const VkVertexInputBindingDescription vertexInputBindingDescription = {
228 0u, // uint32_t binding;
229 sizeof(Vertex4RGBA), // uint32_t stride;
230 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
231 };
232
233 const VkVertexInputAttributeDescription vertexInputAttributeDescriptions[] = {
234 {
235 0u, // uint32_t location;
236 0u, // uint32_t binding;
237 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
238 0u, // uint32_t offset;
239 },
240 {
241 1u, // uint32_t location;
242 0u, // uint32_t binding;
243 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
244 sizeof(Vec4), // uint32_t offset;
245 }};
246
247 const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo = {
248 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
249 nullptr, // const void* pNext;
250 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
251 1u, // uint32_t vertexBindingDescriptionCount;
252 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
253 DE_LENGTH_OF_ARRAY(vertexInputAttributeDescriptions), // uint32_t vertexAttributeDescriptionCount;
254 vertexInputAttributeDescriptions, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
255 };
256
257 const std::vector<VkViewport> viewport{makeViewport(renderSize)};
258 const std::vector<VkRect2D> scissor{makeRect2D(renderSize)};
259
260 const VkStencilOpState stencilOpState =
261 makeStencilOpState(VK_STENCIL_OP_KEEP, // stencil fail
262 VK_STENCIL_OP_KEEP, // depth & stencil pass
263 VK_STENCIL_OP_KEEP, // depth only fail
264 VK_COMPARE_OP_EQUAL, // compare op
265 ~0u, // compare mask
266 ~0u, // write mask
267 static_cast<uint32_t>(REFERENCE_STENCIL_VALUE)); // reference
268
269 VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo = {
270 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
271 nullptr, // const void* pNext;
272 (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags;
273 useDepth, // VkBool32 depthTestEnable;
274 VK_FALSE, // VkBool32 depthWriteEnable;
275 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
276 VK_FALSE, // VkBool32 depthBoundsTestEnable;
277 useStencil, // VkBool32 stencilTestEnable;
278 stencilOpState, // VkStencilOpState front;
279 stencilOpState, // VkStencilOpState back;
280 0.0f, // float minDepthBounds;
281 1.0f, // float maxDepthBounds;
282 };
283
284 const VkColorComponentFlags colorComponentsAll =
285 VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
286 // Number of blend attachments must equal the number of color attachments during any subpass.
287 const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState = {
288 VK_FALSE, // VkBool32 blendEnable;
289 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcColorBlendFactor;
290 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor;
291 VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
292 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcAlphaBlendFactor;
293 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor;
294 VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
295 colorComponentsAll, // VkColorComponentFlags colorWriteMask;
296 };
297
298 const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo = {
299 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
300 nullptr, // const void* pNext;
301 (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags;
302 VK_FALSE, // VkBool32 logicOpEnable;
303 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
304 1u, // uint32_t attachmentCount;
305 &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
306 {0.0f, 0.0f, 0.0f, 0.0f}, // float blendConstants[4];
307 };
308
309 gpw.setDefaultTopology(topology)
310 .setDefaultRasterizationState()
311 .setDefaultMultisampleState()
312 .setupVertexInputState(&vertexInputStateInfo)
313 .setupPreRasterizationShaderState(viewport, scissor, pipelineLayout, renderPass, subpass, vertexModule)
314 .setupFragmentShaderState(pipelineLayout, renderPass, subpass, fragmentModule, &pipelineDepthStencilStateInfo)
315 .setupFragmentOutputState(renderPass, subpass, &pipelineColorBlendStateInfo)
316 .setMonolithicPipelineLayout(pipelineLayout)
317 .buildPipeline(VK_NULL_HANDLE, basePipeline, -1);
318 }
319
320 //! Make a render pass with one subpass per color attachment and depth/stencil attachment (if used).
makeRenderPass(const DeviceInterface & vk,const VkDevice device,const PipelineConstructionType pipelineConstructionType,const VkFormat colorFormat,const VkFormat depthStencilFormat,const uint32_t numLayers,const VkImageLayout initialColorImageLayout=VK_IMAGE_LAYOUT_UNDEFINED,const VkImageLayout initialDepthStencilImageLayout=VK_IMAGE_LAYOUT_UNDEFINED)321 RenderPassWrapper makeRenderPass(const DeviceInterface &vk, const VkDevice device,
322 const PipelineConstructionType pipelineConstructionType, const VkFormat colorFormat,
323 const VkFormat depthStencilFormat, const uint32_t numLayers,
324 const VkImageLayout initialColorImageLayout = VK_IMAGE_LAYOUT_UNDEFINED,
325 const VkImageLayout initialDepthStencilImageLayout = VK_IMAGE_LAYOUT_UNDEFINED)
326 {
327 const VkAttachmentDescription colorAttachmentDescription = {
328 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
329 colorFormat, // VkFormat format;
330 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
331 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
332 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
333 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
334 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
335 initialColorImageLayout, // VkImageLayout initialLayout;
336 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
337 };
338 vector<VkAttachmentDescription> attachmentDescriptions(numLayers, colorAttachmentDescription);
339
340 const VkAttachmentDescription depthStencilAttachmentDescription = {
341 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
342 depthStencilFormat, // VkFormat format;
343 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
344 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
345 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp storeOp;
346 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp stencilLoadOp;
347 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
348 initialDepthStencilImageLayout, // VkImageLayout initialLayout;
349 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
350 };
351
352 if (depthStencilFormat != VK_FORMAT_UNDEFINED)
353 attachmentDescriptions.insert(attachmentDescriptions.end(), numLayers, depthStencilAttachmentDescription);
354
355 // Create a subpass for each attachment (each attachement is a layer of an arrayed image).
356 vector<VkAttachmentReference> colorAttachmentReferences(numLayers);
357 vector<VkAttachmentReference> depthStencilAttachmentReferences(numLayers);
358 vector<VkSubpassDescription> subpasses;
359
360 // Ordering here must match the framebuffer attachments
361 for (uint32_t i = 0; i < numLayers; ++i)
362 {
363 const VkAttachmentReference attachmentRef = {
364 i, // uint32_t attachment;
365 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
366 };
367 const VkAttachmentReference depthStencilAttachmentRef = {
368 i + numLayers, // uint32_t attachment;
369 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // VkImageLayout layout;
370 };
371
372 colorAttachmentReferences[i] = attachmentRef;
373 depthStencilAttachmentReferences[i] = depthStencilAttachmentRef;
374
375 const VkAttachmentReference *pDepthStencilAttachment =
376 (depthStencilFormat != VK_FORMAT_UNDEFINED ? &depthStencilAttachmentReferences[i] : nullptr);
377 const VkSubpassDescription subpassDescription = {
378 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
379 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
380 0u, // uint32_t inputAttachmentCount;
381 nullptr, // const VkAttachmentReference* pInputAttachments;
382 1u, // uint32_t colorAttachmentCount;
383 &colorAttachmentReferences[i], // const VkAttachmentReference* pColorAttachments;
384 nullptr, // const VkAttachmentReference* pResolveAttachments;
385 pDepthStencilAttachment, // const VkAttachmentReference* pDepthStencilAttachment;
386 0u, // uint32_t preserveAttachmentCount;
387 nullptr // const uint32_t* pPreserveAttachments;
388 };
389 subpasses.push_back(subpassDescription);
390 }
391
392 const VkRenderPassCreateInfo renderPassInfo = {
393 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
394 nullptr, // const void* pNext;
395 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags;
396 static_cast<uint32_t>(attachmentDescriptions.size()), // uint32_t attachmentCount;
397 &attachmentDescriptions[0], // const VkAttachmentDescription* pAttachments;
398 static_cast<uint32_t>(subpasses.size()), // uint32_t subpassCount;
399 &subpasses[0], // const VkSubpassDescription* pSubpasses;
400 0u, // uint32_t dependencyCount;
401 nullptr // const VkSubpassDependency* pDependencies;
402 };
403
404 return RenderPassWrapper(pipelineConstructionType, vk, device, &renderPassInfo);
405 }
406
makeImage(const DeviceInterface & vk,const VkDevice device,VkImageCreateFlags flags,VkImageType imageType,const VkFormat format,const IVec3 & size,const uint32_t numMipLevels,const uint32_t numLayers,const VkImageUsageFlags usage)407 Move<VkImage> makeImage(const DeviceInterface &vk, const VkDevice device, VkImageCreateFlags flags,
408 VkImageType imageType, const VkFormat format, const IVec3 &size, const uint32_t numMipLevels,
409 const uint32_t numLayers, const VkImageUsageFlags usage)
410 {
411 const VkImageCreateInfo imageParams = {
412 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
413 nullptr, // const void* pNext;
414 flags, // VkImageCreateFlags flags;
415 imageType, // VkImageType imageType;
416 format, // VkFormat format;
417 makeExtent3D(size), // VkExtent3D extent;
418 numMipLevels, // uint32_t mipLevels;
419 numLayers, // uint32_t arrayLayers;
420 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
421 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
422 usage, // VkImageUsageFlags usage;
423 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
424 0u, // uint32_t queueFamilyIndexCount;
425 nullptr, // const uint32_t* pQueueFamilyIndices;
426 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
427 };
428 return createImage(vk, device, &imageParams);
429 }
430
makeColorSubresourceRange(const int baseArrayLayer,const int layerCount)431 inline VkImageSubresourceRange makeColorSubresourceRange(const int baseArrayLayer, const int layerCount)
432 {
433 return makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, static_cast<uint32_t>(baseArrayLayer),
434 static_cast<uint32_t>(layerCount));
435 }
436
437 //! Get a reference clear value based on color format.
getClearValue(const VkFormat format)438 VkClearValue getClearValue(const VkFormat format)
439 {
440 if (isUintFormat(format) || isIntFormat(format))
441 return makeClearValueColorU32(REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE,
442 REFERENCE_COLOR_VALUE);
443 else
444 return makeClearValueColorF32(1.0f, 1.0f, 1.0f, 1.0f);
445 }
446
getColorFormatStr(const int numComponents,const bool isUint,const bool isSint)447 std::string getColorFormatStr(const int numComponents, const bool isUint, const bool isSint)
448 {
449 std::ostringstream str;
450 if (numComponents == 1)
451 str << (isUint ? "uint" : isSint ? "int" : "float");
452 else
453 str << (isUint ? "u" : isSint ? "i" : "") << "vec" << numComponents;
454
455 return str.str();
456 }
457
458 //! A half-viewport quad. Use with TRIANGLE_STRIP topology.
genFullQuadVertices(const int subpassCount)459 vector<Vertex4RGBA> genFullQuadVertices(const int subpassCount)
460 {
461 vector<Vertex4RGBA> vectorData;
462 for (int subpassNdx = 0; subpassNdx < subpassCount; ++subpassNdx)
463 {
464 Vertex4RGBA data = {
465 Vec4(0.0f, -1.0f, 0.0f, 1.0f),
466 COLOR_TABLE[subpassNdx % DE_LENGTH_OF_ARRAY(COLOR_TABLE)],
467 };
468 vectorData.push_back(data);
469 data.position = Vec4(0.0f, 1.0f, 0.0f, 1.0f);
470 vectorData.push_back(data);
471 data.position = Vec4(1.0f, -1.0f, 0.0f, 1.0f);
472 vectorData.push_back(data);
473 data.position = Vec4(1.0f, 1.0f, 0.0f, 1.0f);
474 vectorData.push_back(data);
475 }
476 return vectorData;
477 }
478
getImageType(const VkImageViewType viewType)479 VkImageType getImageType(const VkImageViewType viewType)
480 {
481 switch (viewType)
482 {
483 case VK_IMAGE_VIEW_TYPE_1D:
484 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
485 return VK_IMAGE_TYPE_1D;
486
487 case VK_IMAGE_VIEW_TYPE_2D:
488 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
489 case VK_IMAGE_VIEW_TYPE_CUBE:
490 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
491 return VK_IMAGE_TYPE_2D;
492
493 case VK_IMAGE_VIEW_TYPE_3D:
494 return VK_IMAGE_TYPE_3D;
495
496 default:
497 DE_ASSERT(0);
498 return VK_IMAGE_TYPE_LAST;
499 }
500 }
501
502 //! ImageViewType for accessing a single layer/slice of an image
getImageViewSliceType(const VkImageViewType viewType)503 VkImageViewType getImageViewSliceType(const VkImageViewType viewType)
504 {
505 switch (viewType)
506 {
507 case VK_IMAGE_VIEW_TYPE_1D:
508 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
509 return VK_IMAGE_VIEW_TYPE_1D;
510
511 case VK_IMAGE_VIEW_TYPE_2D:
512 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
513 case VK_IMAGE_VIEW_TYPE_CUBE:
514 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
515 case VK_IMAGE_VIEW_TYPE_3D:
516 return VK_IMAGE_VIEW_TYPE_2D;
517
518 default:
519 DE_ASSERT(0);
520 return VK_IMAGE_VIEW_TYPE_LAST;
521 }
522 }
523
getImageCreateFlags(const VkImageViewType viewType)524 VkImageCreateFlags getImageCreateFlags(const VkImageViewType viewType)
525 {
526 VkImageCreateFlags flags = (VkImageCreateFlags)0;
527
528 if (viewType == VK_IMAGE_VIEW_TYPE_3D)
529 flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
530 if (isCube(viewType))
531 flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
532
533 return flags;
534 }
535
generateExpectedImage(const tcu::PixelBufferAccess & outputImage,const IVec2 & renderSize,const int colorDepthOffset)536 void generateExpectedImage(const tcu::PixelBufferAccess &outputImage, const IVec2 &renderSize,
537 const int colorDepthOffset)
538 {
539 const tcu::TextureChannelClass channelClass = tcu::getTextureChannelClass(outputImage.getFormat().type);
540 const bool isInt = (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER ||
541 channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER);
542 const VkClearValue clearValue = getClearValue(mapTextureFormat(outputImage.getFormat()));
543
544 if (isInt)
545 tcu::clear(outputImage, IVec4(clearValue.color.int32));
546 else
547 tcu::clear(outputImage, Vec4(clearValue.color.float32));
548
549 for (int z = 0; z < outputImage.getDepth(); ++z)
550 {
551 const Vec4 &setColor = COLOR_TABLE[(z + colorDepthOffset) % DE_LENGTH_OF_ARRAY(COLOR_TABLE)];
552 const IVec4 setColorInt = (static_cast<float>(REFERENCE_COLOR_VALUE) * setColor).cast<int32_t>();
553
554 for (int y = 0; y < renderSize.y(); ++y)
555 for (int x = renderSize.x() / 2; x < renderSize.x(); ++x)
556 {
557 if (isInt)
558 outputImage.setPixel(setColorInt, x, y, z);
559 else
560 outputImage.setPixel(setColor, x, y, z);
561 }
562 }
563 }
564
getMaxImageSize(const VkImageViewType viewType,const IVec4 & sizeHint)565 IVec4 getMaxImageSize(const VkImageViewType viewType, const IVec4 &sizeHint)
566 {
567 //Limits have been taken from the vulkan specification
568 IVec4 size = IVec4(sizeHint.x() != MAX_SIZE ? sizeHint.x() : 4096, sizeHint.y() != MAX_SIZE ? sizeHint.y() : 4096,
569 sizeHint.z() != MAX_SIZE ? sizeHint.z() : 256, sizeHint.w() != MAX_SIZE ? sizeHint.w() : 256);
570
571 switch (viewType)
572 {
573 case VK_IMAGE_VIEW_TYPE_1D:
574 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
575 size.x() = deMin32(4096, size.x());
576 break;
577
578 case VK_IMAGE_VIEW_TYPE_2D:
579 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
580 size.x() = deMin32(4096, size.x());
581 size.y() = deMin32(4096, size.y());
582 break;
583
584 case VK_IMAGE_VIEW_TYPE_3D:
585 size.x() = deMin32(256, size.x());
586 size.y() = deMin32(256, size.y());
587 break;
588
589 case VK_IMAGE_VIEW_TYPE_CUBE:
590 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
591 size.x() = deMin32(4096, size.x());
592 size.y() = deMin32(4096, size.y());
593 size.w() = deMin32(252, size.w());
594 size.w() = NUM_CUBE_FACES * (size.w() / NUM_CUBE_FACES); // round down to 6 faces
595 break;
596
597 default:
598 DE_ASSERT(0);
599 return IVec4();
600 }
601
602 return size;
603 }
604
605 //! Get a smaller image size. Returns a vector of zeroes, if it can't reduce more.
getReducedImageSize(const CaseDef & caseDef,IVec4 size)606 IVec4 getReducedImageSize(const CaseDef &caseDef, IVec4 size)
607 {
608 const int maxIndex = findIndexOfMaxComponent(size);
609 const int reducedSize = size[maxIndex] >> 1;
610
611 switch (caseDef.viewType)
612 {
613 case VK_IMAGE_VIEW_TYPE_CUBE:
614 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
615 if (maxIndex < 2)
616 size.x() = size.y() = reducedSize;
617 else if (maxIndex == 3 && reducedSize >= NUM_CUBE_FACES)
618 size.w() = NUM_CUBE_FACES * (reducedSize / NUM_CUBE_FACES); // round down to a multiple of 6
619 else
620 size = IVec4(0);
621 break;
622
623 default:
624 size[maxIndex] = reducedSize;
625 break;
626 }
627
628 if (reducedSize == 0)
629 size = IVec4(0);
630
631 return size;
632 }
633
634 //! Get the image memory requirements for the image size under test, expecting potential image
635 //! creation failure if the required size is larger than the device's maxResourceSize, returning
636 //! false if creation failed.
getSupportedImageMemoryRequirements(Context & context,const CaseDef & caseDef,const VkFormat format,const IVec4 size,const VkImageUsageFlags usage,VkMemoryRequirements & imageMemoryRequiements)637 bool getSupportedImageMemoryRequirements(Context &context, const CaseDef &caseDef, const VkFormat format,
638 const IVec4 size, const VkImageUsageFlags usage,
639 VkMemoryRequirements &imageMemoryRequiements)
640 {
641 const DeviceInterface &vk = context.getDeviceInterface();
642 const VkDevice device = context.getDevice();
643 bool imageCreationPossible = true;
644
645 try
646 {
647 Move<VkImage> image =
648 makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), format,
649 size.swizzle(0, 1, 2), 1u, size.w(), usage);
650
651 vk.getImageMemoryRequirements(device, *image, &imageMemoryRequiements);
652 }
653 // vkCreateImage is allowed to return VK_ERROR_OUT_OF_HOST_MEMORY if the image's
654 // memory requirements will exceed maxResourceSize.
655 catch (const vk::OutOfMemoryError &)
656 {
657 imageCreationPossible = false;
658 }
659 if (imageMemoryRequiements.size == 0)
660 {
661 imageCreationPossible = false;
662 }
663
664 return imageCreationPossible;
665 }
666
isDepthStencilFormatSupported(const InstanceInterface & vki,const VkPhysicalDevice physDevice,const VkFormat format)667 bool isDepthStencilFormatSupported(const InstanceInterface &vki, const VkPhysicalDevice physDevice,
668 const VkFormat format)
669 {
670 const VkFormatProperties properties = getPhysicalDeviceFormatProperties(vki, physDevice, format);
671 return (properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) != 0;
672 }
673
getFormatAspectFlags(const VkFormat format)674 VkImageAspectFlags getFormatAspectFlags(const VkFormat format)
675 {
676 if (format == VK_FORMAT_UNDEFINED)
677 return 0;
678
679 const tcu::TextureFormat::ChannelOrder order = mapVkFormat(format).order;
680
681 switch (order)
682 {
683 case tcu::TextureFormat::DS:
684 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
685 case tcu::TextureFormat::D:
686 return VK_IMAGE_ASPECT_DEPTH_BIT;
687 case tcu::TextureFormat::S:
688 return VK_IMAGE_ASPECT_STENCIL_BIT;
689 default:
690 return VK_IMAGE_ASPECT_COLOR_BIT;
691 }
692 }
693
initPrograms(SourceCollections & programCollection,const CaseDef caseDef)694 void initPrograms(SourceCollections &programCollection, const CaseDef caseDef)
695 {
696 const int numComponents = getNumUsedChannels(mapVkFormat(caseDef.colorFormat).order);
697 const bool isUint = isUintFormat(caseDef.colorFormat);
698 const bool isSint = isIntFormat(caseDef.colorFormat);
699
700 // Vertex shader
701 {
702 std::ostringstream src;
703 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
704 << "\n"
705 << "layout(location = 0) in vec4 in_position;\n"
706 << "layout(location = 1) in vec4 in_color;\n"
707 << "layout(location = 0) out vec4 out_color;\n"
708 << "\n"
709 << "out gl_PerVertex {\n"
710 << " vec4 gl_Position;\n"
711 << "};\n"
712 << "\n"
713 << "void main(void)\n"
714 << "{\n"
715 << " gl_Position = in_position;\n"
716 << " out_color = in_color;\n"
717 << "}\n";
718
719 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
720 }
721
722 // Fragment shader
723 {
724 std::ostringstream colorValue;
725 colorValue << REFERENCE_COLOR_VALUE;
726 const std::string colorFormat = getColorFormatStr(numComponents, isUint, isSint);
727 const std::string colorInteger = (isUint || isSint ? " * " + colorFormat + "(" + colorValue.str() + ")" : "");
728
729 std::ostringstream src;
730 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
731 << "\n"
732 << "layout(location = 0) in vec4 in_color;\n"
733 << "layout(location = 0) out " << colorFormat << " o_color;\n"
734 << "\n"
735 << "void main(void)\n"
736 << "{\n"
737 << " o_color = " << colorFormat << "("
738 << (numComponents == 1 ? "in_color.r" :
739 numComponents == 2 ? "in_color.rg" :
740 numComponents == 3 ? "in_color.rgb" :
741 "in_color")
742 << colorInteger << ");\n"
743 << "}\n";
744
745 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
746 }
747 }
748
749 //! See testAttachmentSize() description
testWithSizeReduction(Context & context,const CaseDef & caseDef)750 tcu::TestStatus testWithSizeReduction(Context &context, const CaseDef &caseDef)
751 {
752 const DeviceInterface &vk = context.getDeviceInterface();
753 const InstanceInterface &vki = context.getInstanceInterface();
754 const VkDevice device = context.getDevice();
755 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
756 const VkQueue queue = context.getUniversalQueue();
757 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
758 Allocator &allocator = context.getDefaultAllocator();
759
760 IVec4 imageSize = getMaxImageSize(caseDef.viewType, caseDef.imageSizeHint);
761
762 const VkImageUsageFlags colorImageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
763 const VkImageUsageFlags depthStencilImageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
764 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED);
765
766 {
767 VkImageFormatProperties colorImageFormatProperties;
768 const auto result = vki.getPhysicalDeviceImageFormatProperties(
769 physDevice, caseDef.colorFormat, getImageType(caseDef.viewType), VK_IMAGE_TILING_OPTIMAL, colorImageUsage,
770 getImageCreateFlags(caseDef.viewType), &colorImageFormatProperties);
771
772 VK_CHECK(result);
773
774 imageSize.x() = std::min(static_cast<uint32_t>(imageSize.x()), colorImageFormatProperties.maxExtent.width);
775 imageSize.y() = std::min(static_cast<uint32_t>(imageSize.y()), colorImageFormatProperties.maxExtent.height);
776 imageSize.z() = std::min(static_cast<uint32_t>(imageSize.z()), colorImageFormatProperties.maxExtent.depth);
777 imageSize.w() = std::min(static_cast<uint32_t>(imageSize.w()), colorImageFormatProperties.maxArrayLayers);
778 }
779
780 if (useDepthStencil)
781 {
782 VkImageFormatProperties depthStencilImageFormatProperties;
783 const auto result = vki.getPhysicalDeviceImageFormatProperties(
784 physDevice, caseDef.depthStencilFormat, getImageType(caseDef.viewType), VK_IMAGE_TILING_OPTIMAL,
785 depthStencilImageUsage, getImageCreateFlags(caseDef.viewType), &depthStencilImageFormatProperties);
786
787 VK_CHECK(result);
788
789 imageSize.x() =
790 std::min(static_cast<uint32_t>(imageSize.x()), depthStencilImageFormatProperties.maxExtent.width);
791 imageSize.y() =
792 std::min(static_cast<uint32_t>(imageSize.y()), depthStencilImageFormatProperties.maxExtent.height);
793 imageSize.z() =
794 std::min(static_cast<uint32_t>(imageSize.z()), depthStencilImageFormatProperties.maxExtent.depth);
795 imageSize.w() =
796 std::min(static_cast<uint32_t>(imageSize.w()), depthStencilImageFormatProperties.maxArrayLayers);
797 }
798
799 bool allocationPossible = false;
800 while (!allocationPossible)
801 {
802 // Get the image memory requirements
803 VkMemoryRequirements colorImageMemReqs;
804 VkDeviceSize neededMemory = 0;
805 uint32_t memoryTypeNdx = 0;
806
807 if (!getSupportedImageMemoryRequirements(context, caseDef, caseDef.colorFormat, imageSize, colorImageUsage,
808 colorImageMemReqs))
809 {
810 // Try again with reduced image size
811 imageSize = getReducedImageSize(caseDef, imageSize);
812 if (imageSize == IVec4())
813 return tcu::TestStatus::fail("Couldn't create an image with required size");
814 else
815 continue;
816 }
817
818 neededMemory = colorImageMemReqs.size;
819
820 if (useDepthStencil)
821 {
822 VkMemoryRequirements depthStencilImageMemReqs;
823
824 if (!getSupportedImageMemoryRequirements(context, caseDef, caseDef.depthStencilFormat, imageSize,
825 depthStencilImageUsage, depthStencilImageMemReqs))
826 {
827 // Try again with reduced image size
828 imageSize = getReducedImageSize(caseDef, imageSize);
829 if (imageSize == IVec4())
830 return tcu::TestStatus::fail("Couldn't create an image with required size");
831 else
832 continue;
833 }
834
835 neededMemory += depthStencilImageMemReqs.size;
836 }
837
838 // Reserve an additional 15% device memory, plus the 512KB for checking results
839 {
840 const VkDeviceSize reserveForChecking = 500ull * 1024ull;
841 const float additionalMemory = 1.15f;
842 neededMemory =
843 static_cast<VkDeviceSize>(static_cast<float>(neededMemory) * additionalMemory) + reserveForChecking;
844 }
845
846 // Query the available memory in the corresponding memory heap
847 {
848 const VkPhysicalDeviceMemoryProperties memoryProperties =
849 getPhysicalDeviceMemoryProperties(vki, physDevice);
850 // Use the color image memory requirements, assume depth stencil uses the same memory type
851 memoryTypeNdx =
852 selectMatchingMemoryType(memoryProperties, colorImageMemReqs.memoryTypeBits, MemoryRequirement::Any);
853 tcu::PlatformMemoryLimits memoryLimits;
854 context.getTestContext().getPlatform().getMemoryLimits(memoryLimits);
855 VkDeviceSize maxMemory =
856 std::min(memoryProperties.memoryHeaps[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].size,
857 VkDeviceSize(memoryLimits.totalSystemMemory));
858
859 if (neededMemory > maxMemory)
860 {
861 // Try again with reduced image size
862 imageSize = getReducedImageSize(caseDef, imageSize);
863 if (imageSize == IVec4())
864 return tcu::TestStatus::fail("Couldn't create an image with required size");
865 else
866 continue;
867 }
868 }
869
870 // Attempt a memory allocation
871 {
872 VkDeviceMemory object = VK_NULL_HANDLE;
873 const VkMemoryAllocateInfo allocateInfo = {
874 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, //VkStructureType sType;
875 nullptr, //const void* pNext;
876 neededMemory, //VkDeviceSize allocationSize;
877 memoryTypeNdx //uint32_t memoryTypeIndex;
878 };
879
880 const VkResult result = vk.allocateMemory(device, &allocateInfo, nullptr, &object);
881
882 if (VK_ERROR_OUT_OF_DEVICE_MEMORY == result || VK_ERROR_OUT_OF_HOST_MEMORY == result)
883 {
884 // Try again with reduced image size
885 imageSize = getReducedImageSize(caseDef, imageSize);
886 if (imageSize == IVec4())
887 return tcu::TestStatus::fail("Couldn't create an image with required size");
888 }
889 else if (VK_SUCCESS != result)
890 {
891 return tcu::TestStatus::fail("Couldn't allocate memory");
892 }
893 else
894 {
895 //free memory using Move pointer
896 Move<VkDeviceMemory> memoryAllocated(check<VkDeviceMemory>(object),
897 Deleter<VkDeviceMemory>(vk, device, nullptr));
898 allocationPossible = true;
899 }
900 }
901 }
902
903 context.getTestContext().getLog() << tcu::TestLog::Message
904 << "Using an image with size (width, height, depth, layers) = " << imageSize
905 << tcu::TestLog::EndMessage;
906
907 // "Slices" is either the depth of a 3D image, or the number of layers of an arrayed image
908 const int32_t numSlices = maxLayersOrDepth(imageSize);
909
910 // Determine the verification bounds. The checked region will be in the center of the rendered image
911 const IVec4 checkSize = tcu::min(imageSize, IVec4(MAX_VERIFICATION_REGION_SIZE, MAX_VERIFICATION_REGION_SIZE,
912 MAX_VERIFICATION_REGION_DEPTH, MAX_VERIFICATION_REGION_DEPTH));
913 const IVec4 checkOffset = (imageSize - checkSize) / 2;
914
915 // Only make enough space for the check region
916 const VkDeviceSize colorBufferSize = product(checkSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
917 const Unique<VkBuffer> colorBuffer(makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
918 const UniquePtr<Allocation> colorBufferAlloc(bindBuffer(
919 vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind));
920
921 {
922 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize));
923 flushAlloc(vk, device, *colorBufferAlloc);
924 }
925
926 const ShaderWrapper vertexModule(ShaderWrapper(vk, device, context.getBinaryCollection().get("vert"), 0u));
927 const ShaderWrapper fragmentModule(ShaderWrapper(vk, device, context.getBinaryCollection().get("frag"), 0u));
928 RenderPassWrapper renderPass(makeRenderPass(vk, device, caseDef.pipelineConstructionType, caseDef.colorFormat,
929 caseDef.depthStencilFormat, static_cast<uint32_t>(numSlices),
930 (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D) ?
931 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL :
932 VK_IMAGE_LAYOUT_UNDEFINED));
933 const PipelineLayoutWrapper pipelineLayout(caseDef.pipelineConstructionType, vk, device);
934 vector<GraphicsPipelineWrapper> pipelines;
935
936 Move<VkImage> colorImage;
937 MovePtr<Allocation> colorImageAlloc;
938 vector<SharedPtrVkImageView> colorAttachments;
939 Move<VkImage> depthStencilImage;
940 MovePtr<Allocation> depthStencilImageAlloc;
941 vector<SharedPtrVkImageView> depthStencilAttachments;
942 vector<VkImage> images;
943 vector<VkImageView> attachmentHandles; // all attachments (color and d/s)
944 Move<VkBuffer> vertexBuffer;
945 MovePtr<Allocation> vertexBufferAlloc;
946
947 // Create a color image
948 {
949 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType),
950 caseDef.colorFormat, imageSize.swizzle(0, 1, 2), 1u, imageSize.w(), colorImageUsage);
951 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator,
952 caseDef.allocationKind);
953 }
954
955 // Create a depth/stencil image (always a 2D image, optionally layered)
956 if (useDepthStencil)
957 {
958 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat,
959 IVec3(imageSize.x(), imageSize.y(), 1), 1u, numSlices, depthStencilImageUsage);
960 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any,
961 allocator, caseDef.allocationKind);
962 }
963
964 // Create a vertex buffer
965 {
966 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices);
967 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices);
968
969 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
970 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible,
971 allocator, caseDef.allocationKind);
972
973 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize));
974 flushAlloc(vk, device, *vertexBufferAlloc);
975 }
976
977 // Prepare color image upfront for rendering to individual slices. 3D slices aren't separate subresources, so they shouldn't be transitioned
978 // during each subpass like array layers.
979 if (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D)
980 {
981 const Unique<VkCommandPool> cmdPool(
982 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
983 const Unique<VkCommandBuffer> cmdBuffer(makeCommandBuffer(vk, device, *cmdPool));
984
985 beginCommandBuffer(vk, *cmdBuffer);
986
987 const VkImageMemoryBarrier imageBarrier = {
988 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
989 nullptr, // const void* pNext;
990 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
991 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
992 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
993 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
994 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
995 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
996 *colorImage, // VkImage image;
997 {
998 // VkImageSubresourceRange subresourceRange;
999 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1000 0u, // uint32_t baseMipLevel;
1001 1u, // uint32_t levelCount;
1002 0u, // uint32_t baseArrayLayer;
1003 static_cast<uint32_t>(imageSize.w()), // uint32_t layerCount;
1004 }};
1005
1006 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1007 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0u, 0u, nullptr, 0u, nullptr, 1u,
1008 &imageBarrier);
1009
1010 endCommandBuffer(vk, *cmdBuffer);
1011 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1012 }
1013
1014 // For each image layer or slice (3D), create an attachment and a pipeline
1015 {
1016 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat);
1017 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0;
1018 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
1019 VkPipeline basePipeline = VK_NULL_HANDLE;
1020
1021 // Color attachments are first in the framebuffer
1022 pipelines.reserve(numSlices);
1023 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1024 {
1025 colorAttachments.push_back(
1026 makeSharedPtr(makeImageView(vk, device, *colorImage, getImageViewSliceType(caseDef.viewType),
1027 caseDef.colorFormat, makeColorSubresourceRange(subpassNdx, 1))));
1028 images.push_back(*colorImage);
1029 attachmentHandles.push_back(**colorAttachments.back());
1030
1031 #ifndef CTS_USES_VULKANSC // Pipeline derivatives are forbidden in Vulkan SC
1032 // We also have to create pipelines for each subpass
1033 pipelines.emplace_back(vki, vk, physDevice, device, context.getDeviceExtensions(),
1034 caseDef.pipelineConstructionType,
1035 (basePipeline == VK_NULL_HANDLE ? VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT :
1036 VK_PIPELINE_CREATE_DERIVATIVE_BIT));
1037 #else
1038 pipelines.emplace_back(vki, vk, physDevice, device, context.getDeviceExtensions(),
1039 caseDef.pipelineConstructionType, 0u);
1040 #endif // CTS_USES_VULKANSC
1041 preparePipelineWrapper(pipelines.back(), basePipeline, pipelineLayout, *renderPass, vertexModule,
1042 fragmentModule, imageSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
1043 static_cast<uint32_t>(subpassNdx), useDepth, useStencil);
1044
1045 if (pipelines.front().wasBuild())
1046 basePipeline = pipelines.front().getPipeline();
1047 }
1048
1049 // Then D/S attachments, if any
1050 if (useDepthStencil)
1051 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1052 {
1053 depthStencilAttachments.push_back(makeSharedPtr(
1054 makeImageView(vk, device, *depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat,
1055 makeImageSubresourceRange(depthStencilAspect, 0u, 1u, subpassNdx, 1u))));
1056 images.push_back(*depthStencilImage);
1057 attachmentHandles.push_back(**depthStencilAttachments.back());
1058 }
1059 }
1060
1061 renderPass.createFramebuffer(vk, device, static_cast<uint32_t>(attachmentHandles.size()), &images[0],
1062 &attachmentHandles[0], static_cast<uint32_t>(imageSize.x()),
1063 static_cast<uint32_t>(imageSize.y()));
1064
1065 {
1066 const Unique<VkCommandPool> cmdPool(
1067 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1068 const Unique<VkCommandBuffer> cmdBuffer(makeCommandBuffer(vk, device, *cmdPool));
1069
1070 beginCommandBuffer(vk, *cmdBuffer);
1071 {
1072 vector<VkClearValue> clearValues(numSlices, getClearValue(caseDef.colorFormat));
1073
1074 if (useDepthStencil)
1075 clearValues.insert(clearValues.end(), numSlices,
1076 makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE));
1077
1078 const VkDeviceSize vertexBufferOffset = 0ull;
1079
1080 renderPass.begin(vk, *cmdBuffer, makeRect2D(0, 0, imageSize.x(), imageSize.y()),
1081 (uint32_t)clearValues.size(), &clearValues[0]);
1082 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
1083 }
1084
1085 // Draw
1086 for (uint32_t subpassNdx = 0; subpassNdx < static_cast<uint32_t>(numSlices); ++subpassNdx)
1087 {
1088 if (subpassNdx != 0)
1089 renderPass.nextSubpass(vk, *cmdBuffer, VK_SUBPASS_CONTENTS_INLINE);
1090
1091 pipelines[subpassNdx].bind(*cmdBuffer);
1092 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx * 4u, 0u);
1093 }
1094
1095 renderPass.end(vk, *cmdBuffer);
1096
1097 // Copy colorImage -> host visible colorBuffer
1098 {
1099 const VkImageMemoryBarrier imageBarriers[] = {{
1100 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1101 nullptr, // const void* pNext;
1102 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags outputMask;
1103 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags inputMask;
1104 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1105 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1106 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1107 VK_QUEUE_FAMILY_IGNORED, // uint32_t destQueueFamilyIndex;
1108 *colorImage, // VkImage image;
1109 makeColorSubresourceRange(0, imageSize.w()) // VkImageSubresourceRange subresourceRange;
1110 }};
1111
1112 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
1113 VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, nullptr, 0u, nullptr,
1114 DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers);
1115
1116 // Copy the checked region rather than the whole image
1117 const VkImageSubresourceLayers subresource = {
1118 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1119 0u, // uint32_t mipLevel;
1120 static_cast<uint32_t>(checkOffset.w()), // uint32_t baseArrayLayer;
1121 static_cast<uint32_t>(checkSize.w()), // uint32_t layerCount;
1122 };
1123
1124 const VkBufferImageCopy region = {
1125 0ull, // VkDeviceSize bufferOffset;
1126 0u, // uint32_t bufferRowLength;
1127 0u, // uint32_t bufferImageHeight;
1128 subresource, // VkImageSubresourceLayers imageSubresource;
1129 makeOffset3D(checkOffset.x(), checkOffset.y(),
1130 checkOffset.z()), // VkOffset3D imageOffset;
1131 makeExtent3D(checkSize.swizzle(0, 1, 2)), // VkExtent3D imageExtent;
1132 };
1133
1134 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u,
1135 ®ion);
1136
1137 const VkBufferMemoryBarrier bufferBarriers[] = {
1138 {
1139 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1140 nullptr, // const void* pNext;
1141 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1142 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1143 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1144 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1145 *colorBuffer, // VkBuffer buffer;
1146 0ull, // VkDeviceSize offset;
1147 VK_WHOLE_SIZE, // VkDeviceSize size;
1148 },
1149 };
1150
1151 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u,
1152 nullptr, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, nullptr);
1153 }
1154
1155 endCommandBuffer(vk, *cmdBuffer);
1156 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1157 }
1158
1159 // Verify results
1160 {
1161 invalidateAlloc(vk, device, *colorBufferAlloc);
1162
1163 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat);
1164 const int checkDepth = maxLayersOrDepth(checkSize);
1165 const int depthOffset = maxLayersOrDepth(checkOffset);
1166 const tcu::ConstPixelBufferAccess resultImage(format, checkSize.x(), checkSize.y(), checkDepth,
1167 colorBufferAlloc->getHostPtr());
1168 tcu::TextureLevel textureLevel(format, checkSize.x(), checkSize.y(), checkDepth);
1169 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess();
1170 bool ok = false;
1171
1172 generateExpectedImage(expectedImage, checkSize.swizzle(0, 1), depthOffset);
1173
1174 if (isFloatFormat(caseDef.colorFormat))
1175 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage,
1176 resultImage, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
1177 else
1178 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage,
1179 resultImage, tcu::UVec4(2), tcu::COMPARE_LOG_RESULT);
1180
1181 return ok ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail");
1182 }
1183 }
1184
checkImageViewTypeRequirements(Context & context,const VkImageViewType viewType)1185 void checkImageViewTypeRequirements(Context &context, const VkImageViewType viewType)
1186 {
1187 #ifndef CTS_USES_VULKANSC
1188 if (viewType == VK_IMAGE_VIEW_TYPE_3D)
1189 {
1190 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
1191 !context.getPortabilitySubsetFeatures().imageView2DOn3DImage)
1192 {
1193 TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: Implementation does not support 2D or 2D array "
1194 "image view to be created on a 3D VkImage");
1195 }
1196
1197 context.requireDeviceFunctionality("VK_KHR_maintenance1");
1198 }
1199 #endif // CTS_USES_VULKANSC
1200
1201 if (viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1202 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
1203 }
1204
checkSupportAttachmentSize(Context & context,const CaseDef caseDef)1205 void checkSupportAttachmentSize(Context &context, const CaseDef caseDef)
1206 {
1207 const InstanceInterface &vki = context.getInstanceInterface();
1208 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
1209
1210 checkImageViewTypeRequirements(context, caseDef.viewType);
1211
1212 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED)
1213 context.requireDeviceFunctionality("VK_KHR_dedicated_allocation");
1214
1215 {
1216 VkImageFormatProperties colorImageFormatProperties;
1217 const auto result = vki.getPhysicalDeviceImageFormatProperties(
1218 physDevice, caseDef.colorFormat, getImageType(caseDef.viewType), VK_IMAGE_TILING_OPTIMAL,
1219 (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT),
1220 getImageCreateFlags(caseDef.viewType), &colorImageFormatProperties);
1221
1222 if (result != VK_SUCCESS)
1223 {
1224 TCU_THROW(NotSupportedError, "Unsupported color attachment format");
1225 }
1226 }
1227
1228 if (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED)
1229 {
1230
1231 VkImageFormatProperties depthStencilImageFormatProperties;
1232 const auto result = vki.getPhysicalDeviceImageFormatProperties(
1233 physDevice, caseDef.depthStencilFormat, getImageType(caseDef.viewType), VK_IMAGE_TILING_OPTIMAL,
1234 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, getImageCreateFlags(caseDef.viewType),
1235 &depthStencilImageFormatProperties);
1236
1237 if (result != VK_SUCCESS)
1238 {
1239 TCU_THROW(NotSupportedError, "Unsupported depth/stencil attachment format");
1240 }
1241 }
1242
1243 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(),
1244 caseDef.pipelineConstructionType);
1245 }
1246
1247 //! A test that can exercise very big color and depth/stencil attachment sizes.
1248 //! If the total memory consumed by images is too large, or if the implementation returns OUT_OF_MEMORY error somewhere,
1249 //! the test can be retried with a next increment of size reduction index, making the attachments smaller.
testAttachmentSize(Context & context,const CaseDef caseDef)1250 tcu::TestStatus testAttachmentSize(Context &context, const CaseDef caseDef)
1251 {
1252 return testWithSizeReduction(context, caseDef);
1253 // Never reached
1254 }
1255
getMipLevelSizes(IVec4 baseSize)1256 vector<IVec4> getMipLevelSizes(IVec4 baseSize)
1257 {
1258 vector<IVec4> levels;
1259 levels.push_back(baseSize);
1260
1261 while (baseSize.x() != 1 || baseSize.y() != 1 || baseSize.z() != 1)
1262 {
1263 baseSize.x() = deMax32(baseSize.x() >> 1, 1);
1264 baseSize.y() = deMax32(baseSize.y() >> 1, 1);
1265 baseSize.z() = deMax32(baseSize.z() >> 1, 1);
1266 levels.push_back(baseSize);
1267 }
1268
1269 return levels;
1270 }
1271
1272 //! Compute memory consumed by each mip level, including all layers. Sizes include a padding for alignment.
getPerMipLevelStorageSize(const vector<IVec4> & mipLevelSizes,const VkDeviceSize pixelSize)1273 vector<VkDeviceSize> getPerMipLevelStorageSize(const vector<IVec4> &mipLevelSizes, const VkDeviceSize pixelSize)
1274 {
1275 const int64_t levelAlignment = 16;
1276 vector<VkDeviceSize> storageSizes;
1277
1278 for (vector<IVec4>::const_iterator it = mipLevelSizes.begin(); it != mipLevelSizes.end(); ++it)
1279 storageSizes.push_back(deAlign64(pixelSize * product(*it), levelAlignment));
1280
1281 return storageSizes;
1282 }
1283
drawToMipLevel(const Context & context,const CaseDef & caseDef,const int mipLevel,const IVec4 & mipSize,const int numSlices,const VkImage colorImage,const VkImage depthStencilImage,const VkBuffer vertexBuffer,const PipelineLayoutWrapper & pipelineLayout,const ShaderWrapper vertexModule,const ShaderWrapper fragmentModule)1284 void drawToMipLevel(const Context &context, const CaseDef &caseDef, const int mipLevel, const IVec4 &mipSize,
1285 const int numSlices, const VkImage colorImage, const VkImage depthStencilImage,
1286 const VkBuffer vertexBuffer, const PipelineLayoutWrapper &pipelineLayout,
1287 const ShaderWrapper vertexModule, const ShaderWrapper fragmentModule)
1288 {
1289 const InstanceInterface &vki = context.getInstanceInterface();
1290 const DeviceInterface &vk = context.getDeviceInterface();
1291 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
1292 const VkDevice device = context.getDevice();
1293 const VkQueue queue = context.getUniversalQueue();
1294 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1295 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat);
1296 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0;
1297 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
1298 RenderPassWrapper renderPass(makeRenderPass(vk, device, caseDef.pipelineConstructionType, caseDef.colorFormat,
1299 caseDef.depthStencilFormat, static_cast<uint32_t>(numSlices),
1300 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1301 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL));
1302 vector<GraphicsPipelineWrapper> pipelines;
1303 vector<SharedPtrVkImageView> colorAttachments;
1304 vector<SharedPtrVkImageView> depthStencilAttachments;
1305 vector<VkImage> images;
1306 vector<VkImageView> attachmentHandles; // all attachments (color and d/s)
1307
1308 // For each image layer or slice (3D), create an attachment and a pipeline
1309 {
1310 VkPipeline basePipeline = VK_NULL_HANDLE;
1311
1312 // Color attachments are first in the framebuffer
1313 pipelines.reserve(numSlices);
1314 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1315 {
1316 colorAttachments.push_back(makeSharedPtr(
1317 makeImageView(vk, device, colorImage, getImageViewSliceType(caseDef.viewType), caseDef.colorFormat,
1318 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 1u, subpassNdx, 1u))));
1319 images.push_back(colorImage);
1320 attachmentHandles.push_back(**colorAttachments.back());
1321
1322 // We also have to create pipelines for each subpass
1323 #ifndef CTS_USES_VULKANSC // Pipeline derivatives are forbidden in Vulkan SC
1324 pipelines.emplace_back(vki, vk, physDevice, device, context.getDeviceExtensions(),
1325 caseDef.pipelineConstructionType,
1326 (basePipeline == VK_NULL_HANDLE ? VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT :
1327 VK_PIPELINE_CREATE_DERIVATIVE_BIT));
1328 #else
1329 pipelines.emplace_back(vki, vk, physDevice, device, context.getDeviceExtensions(),
1330 caseDef.pipelineConstructionType, 0u);
1331 #endif // CTS_USES_VULKANSC
1332 preparePipelineWrapper(pipelines.back(), basePipeline, pipelineLayout, *renderPass, vertexModule,
1333 fragmentModule, mipSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
1334 static_cast<uint32_t>(subpassNdx), useDepth, useStencil);
1335
1336 if (pipelines.front().wasBuild())
1337 basePipeline = pipelines.front().getPipeline();
1338 }
1339
1340 // Then D/S attachments, if any
1341 if (useDepth || useStencil)
1342 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1343 {
1344 depthStencilAttachments.push_back(makeSharedPtr(
1345 makeImageView(vk, device, depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat,
1346 makeImageSubresourceRange(depthStencilAspect, mipLevel, 1u, subpassNdx, 1u))));
1347 images.push_back(depthStencilImage);
1348 attachmentHandles.push_back(**depthStencilAttachments.back());
1349 }
1350 }
1351
1352 renderPass.createFramebuffer(vk, device, static_cast<uint32_t>(attachmentHandles.size()), &images[0],
1353 &attachmentHandles[0], static_cast<uint32_t>(mipSize.x()),
1354 static_cast<uint32_t>(mipSize.y()));
1355
1356 {
1357 const Unique<VkCommandPool> cmdPool(
1358 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1359 const Unique<VkCommandBuffer> cmdBuffer(makeCommandBuffer(vk, device, *cmdPool));
1360
1361 beginCommandBuffer(vk, *cmdBuffer);
1362 {
1363 vector<VkClearValue> clearValues(numSlices, getClearValue(caseDef.colorFormat));
1364
1365 if (useDepth || useStencil)
1366 clearValues.insert(clearValues.end(), numSlices,
1367 makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE));
1368
1369 const VkDeviceSize vertexBufferOffset = 0ull;
1370
1371 renderPass.begin(vk, *cmdBuffer, makeRect2D(0, 0, mipSize.x(), mipSize.y()), (uint32_t)clearValues.size(),
1372 &clearValues[0]);
1373 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
1374 }
1375
1376 // Draw
1377 for (uint32_t subpassNdx = 0; subpassNdx < static_cast<uint32_t>(numSlices); ++subpassNdx)
1378 {
1379 if (subpassNdx != 0)
1380 renderPass.nextSubpass(vk, *cmdBuffer, VK_SUBPASS_CONTENTS_INLINE);
1381
1382 pipelines[subpassNdx].bind(*cmdBuffer);
1383 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx * 4u, 0u);
1384 }
1385
1386 renderPass.end(vk, *cmdBuffer);
1387
1388 endCommandBuffer(vk, *cmdBuffer);
1389 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1390 }
1391 }
1392
checkSupportRenderToMipMaps(Context & context,const CaseDef caseDef)1393 void checkSupportRenderToMipMaps(Context &context, const CaseDef caseDef)
1394 {
1395 checkImageViewTypeRequirements(context, caseDef.viewType);
1396
1397 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED)
1398 context.requireDeviceFunctionality("VK_KHR_dedicated_allocation");
1399
1400 if (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED &&
1401 !isDepthStencilFormatSupported(context.getInstanceInterface(), context.getPhysicalDevice(),
1402 caseDef.depthStencilFormat))
1403 TCU_THROW(NotSupportedError, "Unsupported depth/stencil format");
1404
1405 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(),
1406 caseDef.pipelineConstructionType);
1407 }
1408
1409 //! Use image mip levels as attachments
testRenderToMipMaps(Context & context,const CaseDef caseDef)1410 tcu::TestStatus testRenderToMipMaps(Context &context, const CaseDef caseDef)
1411 {
1412 const DeviceInterface &vk = context.getDeviceInterface();
1413 const InstanceInterface &vki = context.getInstanceInterface();
1414 const VkDevice device = context.getDevice();
1415 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
1416 const VkQueue queue = context.getUniversalQueue();
1417 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1418 Allocator &allocator = context.getDefaultAllocator();
1419
1420 const IVec4 imageSize = caseDef.imageSizeHint; // MAX_SIZE is not used in this test
1421 const int32_t numSlices = maxLayersOrDepth(imageSize);
1422 const vector<IVec4> mipLevelSizes = getMipLevelSizes(imageSize);
1423 const vector<VkDeviceSize> mipLevelStorageSizes =
1424 getPerMipLevelStorageSize(mipLevelSizes, tcu::getPixelSize(mapVkFormat(caseDef.colorFormat)));
1425 const int numMipLevels = static_cast<int>(mipLevelSizes.size());
1426 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED);
1427
1428 // Create a color buffer big enough to hold all layers and mip levels
1429 const VkDeviceSize colorBufferSize = sum(mipLevelStorageSizes);
1430 const Unique<VkBuffer> colorBuffer(makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
1431 const UniquePtr<Allocation> colorBufferAlloc(bindBuffer(
1432 vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind));
1433
1434 {
1435 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize));
1436 flushAlloc(vk, device, *colorBufferAlloc);
1437 }
1438
1439 const ShaderWrapper vertexModule(ShaderWrapper(vk, device, context.getBinaryCollection().get("vert"), 0u));
1440 const ShaderWrapper fragmentModule(ShaderWrapper(vk, device, context.getBinaryCollection().get("frag"), 0u));
1441 const PipelineLayoutWrapper pipelineLayout(caseDef.pipelineConstructionType, vk, device);
1442
1443 Move<VkImage> colorImage;
1444 MovePtr<Allocation> colorImageAlloc;
1445 Move<VkImage> depthStencilImage;
1446 MovePtr<Allocation> depthStencilImageAlloc;
1447 Move<VkBuffer> vertexBuffer;
1448 MovePtr<Allocation> vertexBufferAlloc;
1449
1450 // Create a color image
1451 {
1452 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1453
1454 colorImage =
1455 makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType),
1456 caseDef.colorFormat, imageSize.swizzle(0, 1, 2), numMipLevels, imageSize.w(), imageUsage);
1457 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator,
1458 caseDef.allocationKind);
1459 }
1460
1461 // Create a depth/stencil image (always a 2D image, optionally layered)
1462 if (useDepthStencil)
1463 {
1464 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
1465
1466 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat,
1467 IVec3(imageSize.x(), imageSize.y(), 1), numMipLevels, numSlices, imageUsage);
1468 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any,
1469 allocator, caseDef.allocationKind);
1470 }
1471
1472 // Create a vertex buffer
1473 {
1474 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices);
1475 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices);
1476
1477 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
1478 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible,
1479 allocator, caseDef.allocationKind);
1480
1481 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize));
1482 flushAlloc(vk, device, *vertexBufferAlloc);
1483 }
1484
1485 // Prepare images
1486 {
1487 const Unique<VkCommandPool> cmdPool(
1488 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1489 const Unique<VkCommandBuffer> cmdBuffer(makeCommandBuffer(vk, device, *cmdPool));
1490
1491 beginCommandBuffer(vk, *cmdBuffer);
1492
1493 const VkImageMemoryBarrier imageBarriers[] = {
1494 {
1495 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1496 nullptr, // const void* pNext;
1497 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1498 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1499 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1500 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1501 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1502 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1503 *colorImage, // VkImage image;
1504 {
1505 // VkImageSubresourceRange subresourceRange;
1506 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1507 0u, // uint32_t baseMipLevel;
1508 static_cast<uint32_t>(numMipLevels), // uint32_t levelCount;
1509 0u, // uint32_t baseArrayLayer;
1510 static_cast<uint32_t>(imageSize.w()), // uint32_t layerCount;
1511 },
1512 },
1513 {
1514 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1515 nullptr, // const void* pNext;
1516 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1517 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1518 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1519 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1520 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1521 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1522 *depthStencilImage, // VkImage image;
1523 {
1524 // VkImageSubresourceRange subresourceRange;
1525 getFormatAspectFlags(caseDef.depthStencilFormat), // VkImageAspectFlags aspectMask;
1526 0u, // uint32_t baseMipLevel;
1527 static_cast<uint32_t>(numMipLevels), // uint32_t levelCount;
1528 0u, // uint32_t baseArrayLayer;
1529 static_cast<uint32_t>(numSlices), // uint32_t layerCount;
1530 },
1531 }};
1532
1533 const uint32_t numImageBarriers =
1534 static_cast<uint32_t>(DE_LENGTH_OF_ARRAY(imageBarriers) - (useDepthStencil ? 0 : 1));
1535
1536 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1537 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
1538 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
1539 0u, 0u, nullptr, 0u, nullptr, numImageBarriers, imageBarriers);
1540
1541 endCommandBuffer(vk, *cmdBuffer);
1542 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1543 }
1544
1545 // Draw
1546 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1547 {
1548 const IVec4 &mipSize = mipLevelSizes[mipLevel];
1549 const int levelSlices = maxLayersOrDepth(mipSize);
1550
1551 drawToMipLevel(context, caseDef, mipLevel, mipSize, levelSlices, *colorImage, *depthStencilImage, *vertexBuffer,
1552 pipelineLayout, vertexModule, fragmentModule);
1553 }
1554
1555 // Copy results: colorImage -> host visible colorBuffer
1556 {
1557 const Unique<VkCommandPool> cmdPool(
1558 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1559 const Unique<VkCommandBuffer> cmdBuffer(makeCommandBuffer(vk, device, *cmdPool));
1560
1561 beginCommandBuffer(vk, *cmdBuffer);
1562
1563 {
1564 const VkImageMemoryBarrier imageBarriers[] = {{
1565 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1566 nullptr, // const void* pNext;
1567 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
1568 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1569 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1570 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1571 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1572 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1573 *colorImage, // VkImage image;
1574 {
1575 // VkImageSubresourceRange subresourceRange;
1576 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1577 0u, // uint32_t baseMipLevel;
1578 static_cast<uint32_t>(numMipLevels), // uint32_t levelCount;
1579 0u, // uint32_t baseArrayLayer;
1580 static_cast<uint32_t>(imageSize.w()), // uint32_t layerCount;
1581 },
1582 }};
1583
1584 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
1585 VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, nullptr, 0u, nullptr,
1586 DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers);
1587 }
1588 {
1589 vector<VkBufferImageCopy> regions;
1590 VkDeviceSize levelOffset = 0ull;
1591 VkBufferImageCopy workRegion = {
1592 0ull, // VkDeviceSize bufferOffset;
1593 0u, // uint32_t bufferRowLength;
1594 0u, // uint32_t bufferImageHeight;
1595 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u,
1596 imageSize.w()), // VkImageSubresourceLayers imageSubresource;
1597 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
1598 makeExtent3D(0, 0, 0), // VkExtent3D imageExtent;
1599 };
1600
1601 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1602 {
1603 workRegion.bufferOffset = levelOffset;
1604 workRegion.imageSubresource.mipLevel = static_cast<uint32_t>(mipLevel);
1605 workRegion.imageExtent = makeExtent3D(mipLevelSizes[mipLevel].swizzle(0, 1, 2));
1606
1607 regions.push_back(workRegion);
1608
1609 levelOffset += mipLevelStorageSizes[mipLevel];
1610 }
1611
1612 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer,
1613 static_cast<uint32_t>(regions.size()), ®ions[0]);
1614 }
1615 {
1616 const VkBufferMemoryBarrier bufferBarriers[] = {
1617 {
1618 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1619 nullptr, // const void* pNext;
1620 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1621 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1622 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1623 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1624 *colorBuffer, // VkBuffer buffer;
1625 0ull, // VkDeviceSize offset;
1626 VK_WHOLE_SIZE, // VkDeviceSize size;
1627 },
1628 };
1629
1630 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u,
1631 nullptr, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, nullptr);
1632 }
1633
1634 endCommandBuffer(vk, *cmdBuffer);
1635 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1636 }
1637
1638 // Verify results (per mip level)
1639 {
1640 invalidateAlloc(vk, device, *colorBufferAlloc);
1641
1642 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat);
1643
1644 VkDeviceSize levelOffset = 0ull;
1645 bool allOk = true;
1646
1647 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1648 {
1649 const IVec4 &mipSize = mipLevelSizes[mipLevel];
1650 const void *const pLevelData = static_cast<const uint8_t *>(colorBufferAlloc->getHostPtr()) + levelOffset;
1651 const int levelDepth = maxLayersOrDepth(mipSize);
1652 const tcu::ConstPixelBufferAccess resultImage(format, mipSize.x(), mipSize.y(), levelDepth, pLevelData);
1653 tcu::TextureLevel textureLevel(format, mipSize.x(), mipSize.y(), levelDepth);
1654 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess();
1655 const std::string comparisonName = "Mip level " + de::toString(mipLevel);
1656 bool ok = false;
1657
1658 generateExpectedImage(expectedImage, mipSize.swizzle(0, 1), 0);
1659
1660 if (isFloatFormat(caseDef.colorFormat))
1661 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison",
1662 comparisonName.c_str(), expectedImage, resultImage, tcu::Vec4(0.01f),
1663 tcu::COMPARE_LOG_RESULT);
1664 else
1665 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison",
1666 comparisonName.c_str(), expectedImage, resultImage, tcu::UVec4(2),
1667 tcu::COMPARE_LOG_RESULT);
1668
1669 allOk = allOk && ok; // keep testing all levels, even if we know it's a fail overall
1670 levelOffset += mipLevelStorageSizes[mipLevel];
1671 }
1672
1673 return allOk ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail");
1674 }
1675 }
1676
getSizeDescription(const IVec4 & size)1677 std::string getSizeDescription(const IVec4 &size)
1678 {
1679 std::ostringstream str;
1680
1681 const char *const description[4] = {"width", "height", "depth", "layers"};
1682
1683 int numMaxComponents = 0;
1684
1685 for (int i = 0; i < 4; ++i)
1686 {
1687 if (size[i] == MAX_SIZE)
1688 {
1689 if (numMaxComponents > 0)
1690 str << "_";
1691
1692 str << description[i];
1693 ++numMaxComponents;
1694 }
1695 }
1696
1697 if (numMaxComponents == 0)
1698 str << "small";
1699
1700 return str.str();
1701 }
1702
getFormatString(const VkFormat format)1703 inline std::string getFormatString(const VkFormat format)
1704 {
1705 std::string name(getFormatName(format));
1706 return de::toLower(name.substr(10));
1707 }
1708
getFormatString(const VkFormat colorFormat,const VkFormat depthStencilFormat)1709 std::string getFormatString(const VkFormat colorFormat, const VkFormat depthStencilFormat)
1710 {
1711 std::ostringstream str;
1712 str << getFormatString(colorFormat);
1713 if (depthStencilFormat != VK_FORMAT_UNDEFINED)
1714 str << "_" << getFormatString(depthStencilFormat);
1715 return str.str();
1716 }
1717
getShortImageViewTypeName(const VkImageViewType imageViewType)1718 std::string getShortImageViewTypeName(const VkImageViewType imageViewType)
1719 {
1720 std::string s(getImageViewTypeName(imageViewType));
1721 return de::toLower(s.substr(19));
1722 }
1723
bvecFromMask(uint32_t mask)1724 inline BVec4 bvecFromMask(uint32_t mask)
1725 {
1726 return BVec4((mask >> 0) & 1, (mask >> 1) & 1, (mask >> 2) & 1, (mask >> 3) & 1);
1727 }
1728
genSizeCombinations(const IVec4 & baselineSize,const uint32_t sizeMask,const VkImageViewType imageViewType)1729 vector<IVec4> genSizeCombinations(const IVec4 &baselineSize, const uint32_t sizeMask,
1730 const VkImageViewType imageViewType)
1731 {
1732 vector<IVec4> sizes;
1733 std::set<uint32_t> masks;
1734
1735 for (uint32_t i = 0; i < (1u << 4); ++i)
1736 {
1737 // Cube images have square faces
1738 if (isCube(imageViewType) && ((i & MASK_WH) != 0))
1739 i |= MASK_WH;
1740
1741 masks.insert(i & sizeMask);
1742 }
1743
1744 for (std::set<uint32_t>::const_iterator it = masks.begin(); it != masks.end(); ++it)
1745 sizes.push_back(tcu::select(IVec4(MAX_SIZE), baselineSize, bvecFromMask(*it)));
1746
1747 return sizes;
1748 }
1749
addTestCasesWithFunctions(tcu::TestCaseGroup * group,PipelineConstructionType pipelineConstructionType,AllocationKind allocationKind)1750 void addTestCasesWithFunctions(tcu::TestCaseGroup *group, PipelineConstructionType pipelineConstructionType,
1751 AllocationKind allocationKind)
1752 {
1753 const struct
1754 {
1755 VkImageViewType viewType;
1756 IVec4 baselineSize; //!< image size: (dimX, dimY, dimZ, arraySize)
1757 uint32_t sizeMask; //!< if a dimension is masked, generate a huge size case for it
1758 } testCase[] = {
1759 {VK_IMAGE_VIEW_TYPE_1D, IVec4(54, 1, 1, 1), MASK_W},
1760 {VK_IMAGE_VIEW_TYPE_1D_ARRAY, IVec4(54, 1, 1, 4), MASK_W_LAYERS},
1761 {VK_IMAGE_VIEW_TYPE_2D, IVec4(44, 23, 1, 1), MASK_WH},
1762 {VK_IMAGE_VIEW_TYPE_2D_ARRAY, IVec4(44, 23, 1, 4), MASK_WH_LAYERS},
1763 {VK_IMAGE_VIEW_TYPE_3D, IVec4(22, 31, 7, 1), MASK_WHD},
1764 {VK_IMAGE_VIEW_TYPE_CUBE, IVec4(35, 35, 1, 6), MASK_WH},
1765 {VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, IVec4(35, 35, 1, 2 * 6), MASK_WH_LAYERS},
1766 };
1767
1768 const VkFormat format[] = {VK_FORMAT_R8G8B8A8_UNORM,
1769 VK_FORMAT_R32_UINT,
1770 VK_FORMAT_R16G16_SINT,
1771 VK_FORMAT_R32G32B32A32_SFLOAT,
1772 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
1773 VK_FORMAT_R5G6B5_UNORM_PACK16,
1774 VK_FORMAT_A2B10G10R10_UINT_PACK32,
1775 VK_FORMAT_A2B10G10R10_UNORM_PACK32};
1776
1777 const VkFormat depthStencilFormat[] = {
1778 VK_FORMAT_UNDEFINED, // don't use a depth/stencil attachment
1779 VK_FORMAT_D16_UNORM, VK_FORMAT_S8_UINT,
1780 VK_FORMAT_D24_UNORM_S8_UINT, // one of the following mixed formats must be supported
1781 VK_FORMAT_D32_SFLOAT_S8_UINT,
1782 };
1783
1784 for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(testCase); ++caseNdx)
1785 {
1786 MovePtr<tcu::TestCaseGroup> imageGroup(new tcu::TestCaseGroup(
1787 group->getTestContext(), getShortImageViewTypeName(testCase[caseNdx].viewType).c_str()));
1788
1789 // Generate attachment size cases
1790 {
1791 vector<IVec4> sizes = genSizeCombinations(testCase[caseNdx].baselineSize, testCase[caseNdx].sizeMask,
1792 testCase[caseNdx].viewType);
1793
1794 #ifdef CTS_USES_VULKANSC
1795 // filter out sizes in which width and height is equal to maximimum values
1796 sizes.erase(std::remove_if(begin(sizes), end(sizes),
1797 [&](const IVec4 &v) { return v.x() == MAX_SIZE && v.y() == MAX_SIZE; }),
1798 end(sizes));
1799 #endif // CTS_USES_VULKANSC
1800
1801 MovePtr<tcu::TestCaseGroup> smallGroup(new tcu::TestCaseGroup(group->getTestContext(), "small"));
1802 MovePtr<tcu::TestCaseGroup> hugeGroup(new tcu::TestCaseGroup(group->getTestContext(), "huge"));
1803
1804 imageGroup->addChild(smallGroup.get());
1805 imageGroup->addChild(hugeGroup.get());
1806
1807 for (vector<IVec4>::const_iterator sizeIter = sizes.begin(); sizeIter != sizes.end(); ++sizeIter)
1808 {
1809 // The first size is the baseline size, put it in a dedicated group
1810 if (sizeIter == sizes.begin())
1811 {
1812 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1813 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx)
1814 {
1815 const CaseDef caseDef{
1816 pipelineConstructionType, // PipelineConstructionType pipelineConstructionType;
1817 testCase[caseNdx].viewType, // VkImageViewType imageType;
1818 *sizeIter, // IVec4 imageSizeHint;
1819 format[formatNdx], // VkFormat colorFormat;
1820 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1821 allocationKind // AllocationKind allocationKind;
1822 };
1823 addFunctionCaseWithPrograms(
1824 smallGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]),
1825 checkSupportAttachmentSize, initPrograms, testAttachmentSize, caseDef);
1826 }
1827 }
1828 else // All huge cases go into a separate group
1829 {
1830 if (allocationKind != ALLOCATION_KIND_DEDICATED)
1831 {
1832 MovePtr<tcu::TestCaseGroup> sizeGroup(
1833 new tcu::TestCaseGroup(group->getTestContext(), getSizeDescription(*sizeIter).c_str()));
1834 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
1835
1836 // Use the same color format for all cases, to reduce the number of permutations
1837 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1838 {
1839 const CaseDef caseDef{
1840 pipelineConstructionType, // PipelineConstructionType pipelineConstructionType;
1841 testCase[caseNdx].viewType, // VkImageViewType viewType;
1842 *sizeIter, // IVec4 imageSizeHint;
1843 colorFormat, // VkFormat colorFormat;
1844 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1845 allocationKind // AllocationKind allocationKind;
1846 };
1847 addFunctionCaseWithPrograms(
1848 sizeGroup.get(), getFormatString(colorFormat, depthStencilFormat[dsFormatNdx]),
1849 checkSupportAttachmentSize, initPrograms, testAttachmentSize, caseDef);
1850 }
1851 hugeGroup->addChild(sizeGroup.release());
1852 }
1853 }
1854 }
1855 smallGroup.release();
1856 hugeGroup.release();
1857 }
1858
1859 // Generate mip map cases
1860 {
1861 MovePtr<tcu::TestCaseGroup> mipmapGroup(new tcu::TestCaseGroup(group->getTestContext(), "mipmap"));
1862
1863 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1864 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx)
1865 {
1866 const CaseDef caseDef{
1867 pipelineConstructionType, // PipelineConstructionType pipelineConstructionType;
1868 testCase[caseNdx].viewType, // VkImageViewType imageType;
1869 testCase[caseNdx].baselineSize, // IVec4 imageSizeHint;
1870 format[formatNdx], // VkFormat colorFormat;
1871 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1872 allocationKind // AllocationKind allocationKind;
1873 };
1874 addFunctionCaseWithPrograms(
1875 mipmapGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]),
1876 checkSupportRenderToMipMaps, initPrograms, testRenderToMipMaps, caseDef);
1877 }
1878 imageGroup->addChild(mipmapGroup.release());
1879 }
1880
1881 group->addChild(imageGroup.release());
1882 }
1883 }
1884
addCoreRenderToImageTests(tcu::TestCaseGroup * group,PipelineConstructionType pipelineConstructionType)1885 void addCoreRenderToImageTests(tcu::TestCaseGroup *group, PipelineConstructionType pipelineConstructionType)
1886 {
1887 addTestCasesWithFunctions(group, pipelineConstructionType, ALLOCATION_KIND_SUBALLOCATED);
1888 }
1889
addDedicatedAllocationRenderToImageTests(tcu::TestCaseGroup * group,PipelineConstructionType pipelineConstructionType)1890 void addDedicatedAllocationRenderToImageTests(tcu::TestCaseGroup *group,
1891 PipelineConstructionType pipelineConstructionType)
1892 {
1893 addTestCasesWithFunctions(group, pipelineConstructionType, ALLOCATION_KIND_DEDICATED);
1894 }
1895
1896 } // namespace
1897
createRenderToImageTests(tcu::TestContext & testCtx,PipelineConstructionType pipelineConstructionType)1898 tcu::TestCaseGroup *createRenderToImageTests(tcu::TestContext &testCtx,
1899 PipelineConstructionType pipelineConstructionType)
1900 {
1901 de::MovePtr<tcu::TestCaseGroup> renderToImageTests(new tcu::TestCaseGroup(testCtx, "render_to_image"));
1902
1903 // Core render to image tests
1904 renderToImageTests->addChild(createTestGroup(testCtx, "core", addCoreRenderToImageTests, pipelineConstructionType));
1905 // Render to image tests for dedicated memory allocation
1906 renderToImageTests->addChild(createTestGroup(testCtx, "dedicated_allocation",
1907 addDedicatedAllocationRenderToImageTests, pipelineConstructionType));
1908
1909 return renderToImageTests.release();
1910 }
1911
1912 } // namespace pipeline
1913 } // namespace vkt
1914