1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file vktPipelineRenderToImageTests.cpp
21 * \brief Render to image tests
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktPipelineRenderToImageTests.hpp"
25 #include "vktPipelineMakeUtil.hpp"
26 #include "vktTestCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vktPipelineVertexUtil.hpp"
29 #include "vktTestGroupUtil.hpp"
30 #include "vkObjUtil.hpp"
31
32 #include "vkMemUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkRefUtil.hpp"
36 #include "vkBuilderUtil.hpp"
37 #include "vkPrograms.hpp"
38 #include "vkImageUtil.hpp"
39 #include "vkCmdUtil.hpp"
40
41 #include "tcuTextureUtil.hpp"
42 #include "tcuImageCompare.hpp"
43 #include "tcuTestLog.hpp"
44 #include "tcuPlatform.hpp"
45 #include "vkPlatform.hpp"
46
47 #include "deUniquePtr.hpp"
48 #include "deSharedPtr.hpp"
49
50 #include <string>
51 #include <vector>
52 #include <set>
53 #include <algorithm>
54
55 namespace vkt
56 {
57 namespace pipeline
58 {
59 namespace
60 {
61 using namespace vk;
62 using de::UniquePtr;
63 using de::MovePtr;
64 using de::SharedPtr;
65 using tcu::IVec3;
66 using tcu::Vec4;
67 using tcu::UVec4;
68 using tcu::IVec2;
69 using tcu::IVec4;
70 using tcu::BVec4;
71 using std::vector;
72
73 typedef SharedPtr<Unique<VkImageView> > SharedPtrVkImageView;
74
75 enum Constants
76 {
77 NUM_CUBE_FACES = 6,
78 REFERENCE_COLOR_VALUE = 125,
79 REFERENCE_STENCIL_VALUE = 42,
80 MAX_SIZE = -1, //!< Should be queried at runtime and replaced with max possible value
81 MAX_VERIFICATION_REGION_SIZE = 32, //!< Limit the checked area to a small size, especially for huge images
82 MAX_VERIFICATION_REGION_DEPTH = 8,
83
84 MASK_W = (1 | 0 | 0 | 0),
85 MASK_W_LAYERS = (1 | 0 | 0 | 8),
86 MASK_WH = (1 | 2 | 0 | 0),
87 MASK_WH_LAYERS = (1 | 2 | 0 | 8),
88 MASK_WHD = (1 | 2 | 4 | 0),
89 };
90
91 enum AllocationKind
92 {
93 ALLOCATION_KIND_SUBALLOCATED = 0,
94 ALLOCATION_KIND_DEDICATED,
95 };
96
97 static const float REFERENCE_DEPTH_VALUE = 1.0f;
98 static const Vec4 COLOR_TABLE[] =
99 {
100 Vec4(0.9f, 0.0f, 0.0f, 1.0f),
101 Vec4(0.6f, 1.0f, 0.0f, 1.0f),
102 Vec4(0.3f, 0.0f, 1.0f, 1.0f),
103 Vec4(0.1f, 1.0f, 1.0f, 1.0f),
104 Vec4(0.8f, 1.0f, 0.0f, 1.0f),
105 Vec4(0.5f, 0.0f, 1.0f, 1.0f),
106 Vec4(0.2f, 0.0f, 0.0f, 1.0f),
107 Vec4(1.0f, 1.0f, 0.0f, 1.0f),
108 };
109
110 struct CaseDef
111 {
112 PipelineConstructionType pipelineConstructionType;
113 VkImageViewType viewType;
114 IVec4 imageSizeHint; //!< (w, h, d, layers), a component may have a symbolic value MAX_SIZE
115 VkFormat colorFormat;
116 VkFormat depthStencilFormat; //! A depth/stencil format, or UNDEFINED if not used
117 AllocationKind allocationKind;
118 };
119
120 template<typename T>
makeSharedPtr(Move<T> move)121 inline SharedPtr<Unique<T> > makeSharedPtr (Move<T> move)
122 {
123 return SharedPtr<Unique<T> >(new Unique<T>(move));
124 }
125
126 template<typename T>
sizeInBytes(const vector<T> & vec)127 inline VkDeviceSize sizeInBytes (const vector<T>& vec)
128 {
129 return vec.size() * sizeof(vec[0]);
130 }
131
isCube(const VkImageViewType viewType)132 inline bool isCube (const VkImageViewType viewType)
133 {
134 return (viewType == VK_IMAGE_VIEW_TYPE_CUBE || viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY);
135 }
136
product(const IVec4 & v)137 inline VkDeviceSize product (const IVec4& v)
138 {
139 return ((static_cast<VkDeviceSize>(v.x()) * v.y()) * v.z()) * v.w();
140 }
141
142 template<typename T>
sum(const vector<T> & v)143 inline T sum (const vector<T>& v)
144 {
145 T total = static_cast<T>(0);
146 for (typename vector<T>::const_iterator it = v.begin(); it != v.end(); ++it)
147 total += *it;
148 return total;
149 }
150
151 template <typename T, int Size>
findIndexOfMaxComponent(const tcu::Vector<T,Size> & vec)152 int findIndexOfMaxComponent (const tcu::Vector<T, Size>& vec)
153 {
154 int index = 0;
155 T value = vec[0];
156
157 for (int i = 1; i < Size; ++i)
158 {
159 if (vec[i] > value)
160 {
161 index = i;
162 value = vec[i];
163 }
164 }
165
166 return index;
167 }
168
maxLayersOrDepth(const IVec4 & size)169 inline int maxLayersOrDepth (const IVec4& size)
170 {
171 // This is safe because 3D images must have layers (w) = 1
172 return deMax32(size.z(), size.w());
173 }
174
bindBuffer(const InstanceInterface & vki,const DeviceInterface & vkd,const VkPhysicalDevice & physDevice,const VkDevice device,const VkBuffer & buffer,const MemoryRequirement requirement,Allocator & allocator,AllocationKind allocationKind)175 de::MovePtr<Allocation> bindBuffer (const InstanceInterface& vki,
176 const DeviceInterface& vkd,
177 const VkPhysicalDevice& physDevice,
178 const VkDevice device,
179 const VkBuffer& buffer,
180 const MemoryRequirement requirement,
181 Allocator& allocator,
182 AllocationKind allocationKind)
183 {
184 switch (allocationKind)
185 {
186 case ALLOCATION_KIND_SUBALLOCATED:
187 {
188 return vk::bindBuffer(vkd, device, allocator, buffer, requirement);
189 }
190
191 case ALLOCATION_KIND_DEDICATED:
192 {
193 return bindBufferDedicated(vki, vkd, physDevice, device, buffer, requirement);
194 }
195
196 default:
197 {
198 TCU_THROW(InternalError, "Invalid allocation kind");
199 }
200 }
201 }
202
bindImage(const InstanceInterface & vki,const DeviceInterface & vkd,const VkPhysicalDevice & physDevice,const VkDevice device,const VkImage & image,const MemoryRequirement requirement,Allocator & allocator,AllocationKind allocationKind)203 de::MovePtr<Allocation> bindImage (const InstanceInterface& vki,
204 const DeviceInterface& vkd,
205 const VkPhysicalDevice& physDevice,
206 const VkDevice device,
207 const VkImage& image,
208 const MemoryRequirement requirement,
209 Allocator& allocator,
210 AllocationKind allocationKind)
211 {
212 switch (allocationKind)
213 {
214 case ALLOCATION_KIND_SUBALLOCATED:
215 {
216 return vk::bindImage(vkd, device, allocator, image, requirement);
217 }
218
219 case ALLOCATION_KIND_DEDICATED:
220 {
221 return bindImageDedicated(vki, vkd, physDevice, device, image, requirement);
222 }
223
224 default:
225 {
226 TCU_THROW(InternalError, "Invalid allocation kind");
227 }
228 }
229 }
230
231 // This is very test specific, so be careful if you want to reuse this code.
preparePipelineWrapper(GraphicsPipelineWrapper & gpw,const VkPipeline basePipeline,const VkPipelineLayout pipelineLayout,const VkRenderPass renderPass,const VkShaderModule vertexModule,const VkShaderModule fragmentModule,const IVec2 & renderSize,const VkPrimitiveTopology topology,const deUint32 subpass,const bool useDepth,const bool useStencil)232 void preparePipelineWrapper(GraphicsPipelineWrapper& gpw,
233 const VkPipeline basePipeline, // for derivatives
234 const VkPipelineLayout pipelineLayout,
235 const VkRenderPass renderPass,
236 const VkShaderModule vertexModule,
237 const VkShaderModule fragmentModule,
238 const IVec2& renderSize,
239 const VkPrimitiveTopology topology,
240 const deUint32 subpass,
241 const bool useDepth,
242 const bool useStencil)
243 {
244 const VkVertexInputBindingDescription vertexInputBindingDescription =
245 {
246 0u, // uint32_t binding;
247 sizeof(Vertex4RGBA), // uint32_t stride;
248 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
249 };
250
251 const VkVertexInputAttributeDescription vertexInputAttributeDescriptions[] =
252 {
253 {
254 0u, // uint32_t location;
255 0u, // uint32_t binding;
256 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
257 0u, // uint32_t offset;
258 },
259 {
260 1u, // uint32_t location;
261 0u, // uint32_t binding;
262 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
263 sizeof(Vec4), // uint32_t offset;
264 }
265 };
266
267 const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo =
268 {
269 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
270 DE_NULL, // const void* pNext;
271 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
272 1u, // uint32_t vertexBindingDescriptionCount;
273 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
274 DE_LENGTH_OF_ARRAY(vertexInputAttributeDescriptions), // uint32_t vertexAttributeDescriptionCount;
275 vertexInputAttributeDescriptions, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
276 };
277
278 const std::vector<VkViewport> viewport { makeViewport(renderSize) };
279 const std::vector<VkRect2D> scissor { makeRect2D(renderSize) };
280
281 const VkStencilOpState stencilOpState = makeStencilOpState(
282 VK_STENCIL_OP_KEEP, // stencil fail
283 VK_STENCIL_OP_KEEP, // depth & stencil pass
284 VK_STENCIL_OP_KEEP, // depth only fail
285 VK_COMPARE_OP_EQUAL, // compare op
286 ~0u, // compare mask
287 ~0u, // write mask
288 static_cast<deUint32>(REFERENCE_STENCIL_VALUE)); // reference
289
290 VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo =
291 {
292 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
293 DE_NULL, // const void* pNext;
294 (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags;
295 useDepth, // VkBool32 depthTestEnable;
296 VK_FALSE, // VkBool32 depthWriteEnable;
297 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
298 VK_FALSE, // VkBool32 depthBoundsTestEnable;
299 useStencil, // VkBool32 stencilTestEnable;
300 stencilOpState, // VkStencilOpState front;
301 stencilOpState, // VkStencilOpState back;
302 0.0f, // float minDepthBounds;
303 1.0f, // float maxDepthBounds;
304 };
305
306 const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
307 // Number of blend attachments must equal the number of color attachments during any subpass.
308 const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState =
309 {
310 VK_FALSE, // VkBool32 blendEnable;
311 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcColorBlendFactor;
312 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor;
313 VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
314 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcAlphaBlendFactor;
315 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor;
316 VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
317 colorComponentsAll, // VkColorComponentFlags colorWriteMask;
318 };
319
320 const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo =
321 {
322 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
323 DE_NULL, // const void* pNext;
324 (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags;
325 VK_FALSE, // VkBool32 logicOpEnable;
326 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
327 1u, // deUint32 attachmentCount;
328 &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
329 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
330 };
331
332 gpw.setDefaultTopology(topology)
333 .setDefaultRasterizationState()
334 .setDefaultMultisampleState()
335 .setupVertexInputState(&vertexInputStateInfo)
336 .setupPreRasterizationShaderState(viewport,
337 scissor,
338 pipelineLayout,
339 renderPass,
340 subpass,
341 vertexModule)
342 .setupFragmentShaderState(pipelineLayout, renderPass, subpass, fragmentModule, &pipelineDepthStencilStateInfo)
343 .setupFragmentOutputState(renderPass, subpass, &pipelineColorBlendStateInfo)
344 .setMonolithicPipelineLayout(pipelineLayout)
345 .buildPipeline(DE_NULL, basePipeline, -1);
346 }
347
348 //! Make a render pass with one subpass per color attachment and depth/stencil attachment (if used).
makeRenderPass(const DeviceInterface & vk,const VkDevice device,const VkFormat colorFormat,const VkFormat depthStencilFormat,const deUint32 numLayers,const VkImageLayout initialColorImageLayout=VK_IMAGE_LAYOUT_UNDEFINED,const VkImageLayout initialDepthStencilImageLayout=VK_IMAGE_LAYOUT_UNDEFINED)349 Move<VkRenderPass> makeRenderPass (const DeviceInterface& vk,
350 const VkDevice device,
351 const VkFormat colorFormat,
352 const VkFormat depthStencilFormat,
353 const deUint32 numLayers,
354 const VkImageLayout initialColorImageLayout = VK_IMAGE_LAYOUT_UNDEFINED,
355 const VkImageLayout initialDepthStencilImageLayout = VK_IMAGE_LAYOUT_UNDEFINED)
356 {
357 const VkAttachmentDescription colorAttachmentDescription =
358 {
359 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
360 colorFormat, // VkFormat format;
361 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
362 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
363 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
364 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
365 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
366 initialColorImageLayout, // VkImageLayout initialLayout;
367 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
368 };
369 vector<VkAttachmentDescription> attachmentDescriptions(numLayers, colorAttachmentDescription);
370
371 const VkAttachmentDescription depthStencilAttachmentDescription =
372 {
373 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
374 depthStencilFormat, // VkFormat format;
375 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
376 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
377 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp storeOp;
378 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp stencilLoadOp;
379 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
380 initialDepthStencilImageLayout, // VkImageLayout initialLayout;
381 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
382 };
383
384 if (depthStencilFormat != VK_FORMAT_UNDEFINED)
385 attachmentDescriptions.insert(attachmentDescriptions.end(), numLayers, depthStencilAttachmentDescription);
386
387 // Create a subpass for each attachment (each attachement is a layer of an arrayed image).
388 vector<VkAttachmentReference> colorAttachmentReferences (numLayers);
389 vector<VkAttachmentReference> depthStencilAttachmentReferences(numLayers);
390 vector<VkSubpassDescription> subpasses;
391
392 // Ordering here must match the framebuffer attachments
393 for (deUint32 i = 0; i < numLayers; ++i)
394 {
395 const VkAttachmentReference attachmentRef =
396 {
397 i, // deUint32 attachment;
398 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
399 };
400 const VkAttachmentReference depthStencilAttachmentRef =
401 {
402 i + numLayers, // deUint32 attachment;
403 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // VkImageLayout layout;
404 };
405
406 colorAttachmentReferences[i] = attachmentRef;
407 depthStencilAttachmentReferences[i] = depthStencilAttachmentRef;
408
409 const VkAttachmentReference* pDepthStencilAttachment = (depthStencilFormat != VK_FORMAT_UNDEFINED ? &depthStencilAttachmentReferences[i] : DE_NULL);
410 const VkSubpassDescription subpassDescription =
411 {
412 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
413 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
414 0u, // deUint32 inputAttachmentCount;
415 DE_NULL, // const VkAttachmentReference* pInputAttachments;
416 1u, // deUint32 colorAttachmentCount;
417 &colorAttachmentReferences[i], // const VkAttachmentReference* pColorAttachments;
418 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
419 pDepthStencilAttachment, // const VkAttachmentReference* pDepthStencilAttachment;
420 0u, // deUint32 preserveAttachmentCount;
421 DE_NULL // const deUint32* pPreserveAttachments;
422 };
423 subpasses.push_back(subpassDescription);
424 }
425
426 const VkRenderPassCreateInfo renderPassInfo =
427 {
428 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
429 DE_NULL, // const void* pNext;
430 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags;
431 static_cast<deUint32>(attachmentDescriptions.size()), // deUint32 attachmentCount;
432 &attachmentDescriptions[0], // const VkAttachmentDescription* pAttachments;
433 static_cast<deUint32>(subpasses.size()), // deUint32 subpassCount;
434 &subpasses[0], // const VkSubpassDescription* pSubpasses;
435 0u, // deUint32 dependencyCount;
436 DE_NULL // const VkSubpassDependency* pDependencies;
437 };
438
439 return createRenderPass(vk, device, &renderPassInfo);
440 }
441
makeImage(const DeviceInterface & vk,const VkDevice device,VkImageCreateFlags flags,VkImageType imageType,const VkFormat format,const IVec3 & size,const deUint32 numMipLevels,const deUint32 numLayers,const VkImageUsageFlags usage)442 Move<VkImage> makeImage (const DeviceInterface& vk,
443 const VkDevice device,
444 VkImageCreateFlags flags,
445 VkImageType imageType,
446 const VkFormat format,
447 const IVec3& size,
448 const deUint32 numMipLevels,
449 const deUint32 numLayers,
450 const VkImageUsageFlags usage)
451 {
452 const VkImageCreateInfo imageParams =
453 {
454 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
455 DE_NULL, // const void* pNext;
456 flags, // VkImageCreateFlags flags;
457 imageType, // VkImageType imageType;
458 format, // VkFormat format;
459 makeExtent3D(size), // VkExtent3D extent;
460 numMipLevels, // deUint32 mipLevels;
461 numLayers, // deUint32 arrayLayers;
462 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
463 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
464 usage, // VkImageUsageFlags usage;
465 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
466 0u, // deUint32 queueFamilyIndexCount;
467 DE_NULL, // const deUint32* pQueueFamilyIndices;
468 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
469 };
470 return createImage(vk, device, &imageParams);
471 }
472
makeColorSubresourceRange(const int baseArrayLayer,const int layerCount)473 inline VkImageSubresourceRange makeColorSubresourceRange (const int baseArrayLayer, const int layerCount)
474 {
475 return makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, static_cast<deUint32>(baseArrayLayer), static_cast<deUint32>(layerCount));
476 }
477
478 //! Get a reference clear value based on color format.
getClearValue(const VkFormat format)479 VkClearValue getClearValue (const VkFormat format)
480 {
481 if (isUintFormat(format) || isIntFormat(format))
482 return makeClearValueColorU32(REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE);
483 else
484 return makeClearValueColorF32(1.0f, 1.0f, 1.0f, 1.0f);
485 }
486
getColorFormatStr(const int numComponents,const bool isUint,const bool isSint)487 std::string getColorFormatStr (const int numComponents, const bool isUint, const bool isSint)
488 {
489 std::ostringstream str;
490 if (numComponents == 1)
491 str << (isUint ? "uint" : isSint ? "int" : "float");
492 else
493 str << (isUint ? "u" : isSint ? "i" : "") << "vec" << numComponents;
494
495 return str.str();
496 }
497
498 //! A half-viewport quad. Use with TRIANGLE_STRIP topology.
genFullQuadVertices(const int subpassCount)499 vector<Vertex4RGBA> genFullQuadVertices (const int subpassCount)
500 {
501 vector<Vertex4RGBA> vectorData;
502 for (int subpassNdx = 0; subpassNdx < subpassCount; ++subpassNdx)
503 {
504 Vertex4RGBA data =
505 {
506 Vec4(0.0f, -1.0f, 0.0f, 1.0f),
507 COLOR_TABLE[subpassNdx % DE_LENGTH_OF_ARRAY(COLOR_TABLE)],
508 };
509 vectorData.push_back(data);
510 data.position = Vec4(0.0f, 1.0f, 0.0f, 1.0f);
511 vectorData.push_back(data);
512 data.position = Vec4(1.0f, -1.0f, 0.0f, 1.0f);
513 vectorData.push_back(data);
514 data.position = Vec4(1.0f, 1.0f, 0.0f, 1.0f);
515 vectorData.push_back(data);
516 }
517 return vectorData;
518 }
519
getImageType(const VkImageViewType viewType)520 VkImageType getImageType (const VkImageViewType viewType)
521 {
522 switch (viewType)
523 {
524 case VK_IMAGE_VIEW_TYPE_1D:
525 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
526 return VK_IMAGE_TYPE_1D;
527
528 case VK_IMAGE_VIEW_TYPE_2D:
529 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
530 case VK_IMAGE_VIEW_TYPE_CUBE:
531 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
532 return VK_IMAGE_TYPE_2D;
533
534 case VK_IMAGE_VIEW_TYPE_3D:
535 return VK_IMAGE_TYPE_3D;
536
537 default:
538 DE_ASSERT(0);
539 return VK_IMAGE_TYPE_LAST;
540 }
541 }
542
543 //! ImageViewType for accessing a single layer/slice of an image
getImageViewSliceType(const VkImageViewType viewType)544 VkImageViewType getImageViewSliceType (const VkImageViewType viewType)
545 {
546 switch (viewType)
547 {
548 case VK_IMAGE_VIEW_TYPE_1D:
549 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
550 return VK_IMAGE_VIEW_TYPE_1D;
551
552 case VK_IMAGE_VIEW_TYPE_2D:
553 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
554 case VK_IMAGE_VIEW_TYPE_CUBE:
555 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
556 case VK_IMAGE_VIEW_TYPE_3D:
557 return VK_IMAGE_VIEW_TYPE_2D;
558
559 default:
560 DE_ASSERT(0);
561 return VK_IMAGE_VIEW_TYPE_LAST;
562 }
563 }
564
getImageCreateFlags(const VkImageViewType viewType)565 VkImageCreateFlags getImageCreateFlags (const VkImageViewType viewType)
566 {
567 VkImageCreateFlags flags = (VkImageCreateFlags)0;
568
569 if (viewType == VK_IMAGE_VIEW_TYPE_3D) flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
570 if (isCube(viewType)) flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
571
572 return flags;
573 }
574
generateExpectedImage(const tcu::PixelBufferAccess & outputImage,const IVec2 & renderSize,const int colorDepthOffset)575 void generateExpectedImage (const tcu::PixelBufferAccess& outputImage, const IVec2& renderSize, const int colorDepthOffset)
576 {
577 const tcu::TextureChannelClass channelClass = tcu::getTextureChannelClass(outputImage.getFormat().type);
578 const bool isInt = (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER || channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER);
579 const VkClearValue clearValue = getClearValue(mapTextureFormat(outputImage.getFormat()));
580
581 if (isInt)
582 tcu::clear(outputImage, IVec4(clearValue.color.int32));
583 else
584 tcu::clear(outputImage, Vec4(clearValue.color.float32));
585
586 for (int z = 0; z < outputImage.getDepth(); ++z)
587 {
588 const Vec4& setColor = COLOR_TABLE[(z + colorDepthOffset) % DE_LENGTH_OF_ARRAY(COLOR_TABLE)];
589 const IVec4 setColorInt = (static_cast<float>(REFERENCE_COLOR_VALUE) * setColor).cast<deInt32>();
590
591 for (int y = 0; y < renderSize.y(); ++y)
592 for (int x = renderSize.x()/2; x < renderSize.x(); ++x)
593 {
594 if (isInt)
595 outputImage.setPixel(setColorInt, x, y, z);
596 else
597 outputImage.setPixel(setColor, x, y, z);
598 }
599 }
600 }
601
getMaxImageSize(const VkImageViewType viewType,const IVec4 & sizeHint)602 IVec4 getMaxImageSize (const VkImageViewType viewType, const IVec4& sizeHint)
603 {
604 //Limits have been taken from the vulkan specification
605 IVec4 size = IVec4(
606 sizeHint.x() != MAX_SIZE ? sizeHint.x() : 4096,
607 sizeHint.y() != MAX_SIZE ? sizeHint.y() : 4096,
608 sizeHint.z() != MAX_SIZE ? sizeHint.z() : 256,
609 sizeHint.w() != MAX_SIZE ? sizeHint.w() : 256);
610
611 switch (viewType)
612 {
613 case VK_IMAGE_VIEW_TYPE_1D:
614 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
615 size.x() = deMin32(4096, size.x());
616 break;
617
618 case VK_IMAGE_VIEW_TYPE_2D:
619 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
620 size.x() = deMin32(4096, size.x());
621 size.y() = deMin32(4096, size.y());
622 break;
623
624 case VK_IMAGE_VIEW_TYPE_3D:
625 size.x() = deMin32(256, size.x());
626 size.y() = deMin32(256, size.y());
627 break;
628
629 case VK_IMAGE_VIEW_TYPE_CUBE:
630 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
631 size.x() = deMin32(4096, size.x());
632 size.y() = deMin32(4096, size.y());
633 size.w() = deMin32(252, size.w());
634 size.w() = NUM_CUBE_FACES * (size.w() / NUM_CUBE_FACES); // round down to 6 faces
635 break;
636
637 default:
638 DE_ASSERT(0);
639 return IVec4();
640 }
641
642 return size;
643 }
644
getMemoryTypeNdx(Context & context,const CaseDef & caseDef)645 deUint32 getMemoryTypeNdx (Context& context, const CaseDef& caseDef)
646 {
647 const DeviceInterface& vk = context.getDeviceInterface();
648 const InstanceInterface& vki = context.getInstanceInterface();
649 const VkDevice device = context.getDevice();
650 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
651
652 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physDevice);
653 Move<VkImage> colorImage;
654 VkMemoryRequirements memReqs;
655
656 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
657 const IVec4 imageSize = getMaxImageSize(caseDef.viewType, caseDef.imageSizeHint);
658
659 //create image, don't bind any memory to it
660 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat,
661 imageSize.swizzle(0, 1, 2), 1u, imageSize.w(), imageUsage);
662
663 vk.getImageMemoryRequirements(device, *colorImage, &memReqs);
664 return selectMatchingMemoryType(memoryProperties, memReqs.memoryTypeBits, MemoryRequirement::Any);
665 }
666
getMaxDeviceHeapSize(Context & context,const CaseDef & caseDef)667 VkDeviceSize getMaxDeviceHeapSize (Context& context, const CaseDef& caseDef)
668 {
669 const InstanceInterface& vki = context.getInstanceInterface();
670 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
671 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physDevice);
672 const deUint32 memoryTypeNdx = getMemoryTypeNdx (context, caseDef);
673
674 return memoryProperties.memoryHeaps[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].size;
675 }
676
677 //! Get a smaller image size. Returns a vector of zeroes, if it can't reduce more.
getReducedImageSize(const CaseDef & caseDef,IVec4 size)678 IVec4 getReducedImageSize (const CaseDef& caseDef, IVec4 size)
679 {
680 const int maxIndex = findIndexOfMaxComponent(size);
681 const int reducedSize = size[maxIndex] >> 1;
682
683 switch (caseDef.viewType)
684 {
685 case VK_IMAGE_VIEW_TYPE_CUBE:
686 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
687 if (maxIndex < 2)
688 size.x() = size.y() = reducedSize;
689 else if (maxIndex == 3 && reducedSize >= NUM_CUBE_FACES)
690 size.w() = NUM_CUBE_FACES * (reducedSize / NUM_CUBE_FACES); // round down to a multiple of 6
691 else
692 size = IVec4(0);
693 break;
694
695 default:
696 size[maxIndex] = reducedSize;
697 break;
698 }
699
700 if (reducedSize == 0)
701 size = IVec4(0);
702
703 return size;
704 }
705
isDepthStencilFormatSupported(const InstanceInterface & vki,const VkPhysicalDevice physDevice,const VkFormat format)706 bool isDepthStencilFormatSupported (const InstanceInterface& vki, const VkPhysicalDevice physDevice, const VkFormat format)
707 {
708 const VkFormatProperties properties = getPhysicalDeviceFormatProperties(vki, physDevice, format);
709 return (properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) != 0;
710 }
711
getFormatAspectFlags(const VkFormat format)712 VkImageAspectFlags getFormatAspectFlags (const VkFormat format)
713 {
714 if (format == VK_FORMAT_UNDEFINED)
715 return 0;
716
717 const tcu::TextureFormat::ChannelOrder order = mapVkFormat(format).order;
718
719 switch (order)
720 {
721 case tcu::TextureFormat::DS: return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
722 case tcu::TextureFormat::D: return VK_IMAGE_ASPECT_DEPTH_BIT;
723 case tcu::TextureFormat::S: return VK_IMAGE_ASPECT_STENCIL_BIT;
724 default: return VK_IMAGE_ASPECT_COLOR_BIT;
725 }
726 }
727
initPrograms(SourceCollections & programCollection,const CaseDef caseDef)728 void initPrograms (SourceCollections& programCollection, const CaseDef caseDef)
729 {
730 const int numComponents = getNumUsedChannels(mapVkFormat(caseDef.colorFormat).order);
731 const bool isUint = isUintFormat(caseDef.colorFormat);
732 const bool isSint = isIntFormat(caseDef.colorFormat);
733
734 // Vertex shader
735 {
736 std::ostringstream src;
737 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
738 << "\n"
739 << "layout(location = 0) in vec4 in_position;\n"
740 << "layout(location = 1) in vec4 in_color;\n"
741 << "layout(location = 0) out vec4 out_color;\n"
742 << "\n"
743 << "out gl_PerVertex {\n"
744 << " vec4 gl_Position;\n"
745 << "};\n"
746 << "\n"
747 << "void main(void)\n"
748 << "{\n"
749 << " gl_Position = in_position;\n"
750 << " out_color = in_color;\n"
751 << "}\n";
752
753 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
754 }
755
756 // Fragment shader
757 {
758 std::ostringstream colorValue;
759 colorValue << REFERENCE_COLOR_VALUE;
760 const std::string colorFormat = getColorFormatStr(numComponents, isUint, isSint);
761 const std::string colorInteger = (isUint || isSint ? " * "+colorFormat+"("+colorValue.str()+")" :"");
762
763 std::ostringstream src;
764 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
765 << "\n"
766 << "layout(location = 0) in vec4 in_color;\n"
767 << "layout(location = 0) out " << colorFormat << " o_color;\n"
768 << "\n"
769 << "void main(void)\n"
770 << "{\n"
771 << " o_color = " << colorFormat << "("
772 << (numComponents == 1 ? "in_color.r" :
773 numComponents == 2 ? "in_color.rg" :
774 numComponents == 3 ? "in_color.rgb" : "in_color")
775 << colorInteger
776 << ");\n"
777 << "}\n";
778
779 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
780 }
781 }
782
783 //! See testAttachmentSize() description
testWithSizeReduction(Context & context,const CaseDef & caseDef)784 tcu::TestStatus testWithSizeReduction (Context& context, const CaseDef& caseDef)
785 {
786 const DeviceInterface& vk = context.getDeviceInterface();
787 const InstanceInterface& vki = context.getInstanceInterface();
788 const VkDevice device = context.getDevice();
789 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
790 const VkQueue queue = context.getUniversalQueue();
791 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
792 Allocator& allocator = context.getDefaultAllocator();
793
794 // The memory might be too small to allocate a largest possible attachment, so try to account for that.
795 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED);
796
797 IVec4 imageSize = getMaxImageSize(caseDef.viewType, caseDef.imageSizeHint);
798 VkDeviceSize colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
799 VkDeviceSize depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull);
800
801 const VkDeviceSize reserveForChecking = 500ull * 1024ull; //left 512KB
802 const float additionalMemory = 1.15f; //left some free memory on device (15%)
803 VkDeviceSize neededMemory = static_cast<VkDeviceSize>(static_cast<float>(colorSize + depthStencilSize) * additionalMemory) + reserveForChecking;
804 VkDeviceSize maxMemory = getMaxDeviceHeapSize(context, caseDef) >> 2;
805
806 tcu::PlatformMemoryLimits memoryLimits;
807 context.getTestContext().getPlatform().getMemoryLimits(memoryLimits);
808 maxMemory = std::min(maxMemory, VkDeviceSize(memoryLimits.totalSystemMemory));
809
810 const VkDeviceSize deviceMemoryBudget = std::min(neededMemory, maxMemory);
811 bool allocationPossible = false;
812
813 // Keep reducing the size, if image size is too big
814 while (neededMemory > deviceMemoryBudget)
815 {
816 imageSize = getReducedImageSize(caseDef, imageSize);
817
818 if (imageSize == IVec4())
819 return tcu::TestStatus::fail("Couldn't create an image with required size");
820
821 colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
822 depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull);
823 neededMemory = static_cast<VkDeviceSize>(static_cast<double>(colorSize + depthStencilSize) * additionalMemory);
824 }
825
826 // Keep reducing the size, if allocation return out of any memory
827 while (!allocationPossible)
828 {
829 VkDeviceMemory object = 0;
830 const VkMemoryAllocateInfo allocateInfo =
831 {
832 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, //VkStructureType sType;
833 DE_NULL, //const void* pNext;
834 neededMemory, //VkDeviceSize allocationSize;
835 getMemoryTypeNdx(context, caseDef) //deUint32 memoryTypeIndex;
836 };
837
838 const VkResult result = vk.allocateMemory(device, &allocateInfo, DE_NULL, &object);
839
840 if (VK_ERROR_OUT_OF_DEVICE_MEMORY == result || VK_ERROR_OUT_OF_HOST_MEMORY == result)
841 {
842 imageSize = getReducedImageSize(caseDef, imageSize);
843
844 if (imageSize == IVec4())
845 return tcu::TestStatus::fail("Couldn't create an image with required size");
846
847 colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
848 depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull);
849 neededMemory = static_cast<VkDeviceSize>(static_cast<double>(colorSize + depthStencilSize) * additionalMemory) + reserveForChecking;
850 }
851 else if (VK_SUCCESS != result)
852 {
853 return tcu::TestStatus::fail("Couldn't allocate memory");
854 }
855 else
856 {
857 //free memory using Move pointer
858 Move<VkDeviceMemory> memoryAllocated (check<VkDeviceMemory>(object), Deleter<VkDeviceMemory>(vk, device, DE_NULL));
859 allocationPossible = true;
860 }
861 }
862
863 context.getTestContext().getLog()
864 << tcu::TestLog::Message << "Using an image with size (width, height, depth, layers) = " << imageSize << tcu::TestLog::EndMessage;
865
866 // "Slices" is either the depth of a 3D image, or the number of layers of an arrayed image
867 const deInt32 numSlices = maxLayersOrDepth(imageSize);
868
869 // Determine the verification bounds. The checked region will be in the center of the rendered image
870 const IVec4 checkSize = tcu::min(imageSize, IVec4(MAX_VERIFICATION_REGION_SIZE,
871 MAX_VERIFICATION_REGION_SIZE,
872 MAX_VERIFICATION_REGION_DEPTH,
873 MAX_VERIFICATION_REGION_DEPTH));
874 const IVec4 checkOffset = (imageSize - checkSize) / 2;
875
876 // Only make enough space for the check region
877 const VkDeviceSize colorBufferSize = product(checkSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
878 const Unique<VkBuffer> colorBuffer (makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
879 const UniquePtr<Allocation> colorBufferAlloc (bindBuffer(vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind));
880
881 {
882 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize));
883 flushAlloc(vk, device, *colorBufferAlloc);
884 }
885
886 const Unique<VkShaderModule> vertexModule (createShaderModule (vk, device, context.getBinaryCollection().get("vert"), 0u));
887 const Unique<VkShaderModule> fragmentModule (createShaderModule (vk, device, context.getBinaryCollection().get("frag"), 0u));
888 const Unique<VkRenderPass> renderPass (makeRenderPass (vk, device, caseDef.colorFormat, caseDef.depthStencilFormat, static_cast<deUint32>(numSlices),
889 (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D) ? VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
890 : VK_IMAGE_LAYOUT_UNDEFINED));
891 const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout (vk, device));
892 vector<GraphicsPipelineWrapper> pipelines;
893
894 Move<VkImage> colorImage;
895 MovePtr<Allocation> colorImageAlloc;
896 vector<SharedPtrVkImageView> colorAttachments;
897 Move<VkImage> depthStencilImage;
898 MovePtr<Allocation> depthStencilImageAlloc;
899 vector<SharedPtrVkImageView> depthStencilAttachments;
900 vector<VkImageView> attachmentHandles; // all attachments (color and d/s)
901 Move<VkBuffer> vertexBuffer;
902 MovePtr<Allocation> vertexBufferAlloc;
903 Move<VkFramebuffer> framebuffer;
904
905 // Create a color image
906 {
907 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
908
909 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat,
910 imageSize.swizzle(0, 1, 2), 1u, imageSize.w(), imageUsage);
911 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
912 }
913
914 // Create a depth/stencil image (always a 2D image, optionally layered)
915 if (useDepthStencil)
916 {
917 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
918
919 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat,
920 IVec3(imageSize.x(), imageSize.y(), 1), 1u, numSlices, imageUsage);
921 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
922 }
923
924 // Create a vertex buffer
925 {
926 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices);
927 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices);
928
929 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
930 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind);
931
932 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize));
933 flushAlloc(vk, device, *vertexBufferAlloc);
934 }
935
936 // Prepare color image upfront for rendering to individual slices. 3D slices aren't separate subresources, so they shouldn't be transitioned
937 // during each subpass like array layers.
938 if (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D)
939 {
940 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
941 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
942
943 beginCommandBuffer(vk, *cmdBuffer);
944
945 const VkImageMemoryBarrier imageBarrier =
946 {
947 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
948 DE_NULL, // const void* pNext;
949 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
950 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
951 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
952 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
953 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
954 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
955 *colorImage, // VkImage image;
956 { // VkImageSubresourceRange subresourceRange;
957 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
958 0u, // uint32_t baseMipLevel;
959 1u, // uint32_t levelCount;
960 0u, // uint32_t baseArrayLayer;
961 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
962 }
963 };
964
965 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0u,
966 0u, DE_NULL, 0u, DE_NULL, 1u, &imageBarrier);
967
968 endCommandBuffer(vk, *cmdBuffer);
969 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
970 }
971
972 // For each image layer or slice (3D), create an attachment and a pipeline
973 {
974 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat);
975 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0;
976 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
977 VkPipeline basePipeline = DE_NULL;
978
979 // Color attachments are first in the framebuffer
980 pipelines.reserve(numSlices);
981 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
982 {
983 colorAttachments.push_back(makeSharedPtr(
984 makeImageView(vk, device, *colorImage, getImageViewSliceType(caseDef.viewType), caseDef.colorFormat, makeColorSubresourceRange(subpassNdx, 1))));
985 attachmentHandles.push_back(**colorAttachments.back());
986
987 #ifndef CTS_USES_VULKANSC // Pipeline derivatives are forbidden in Vulkan SC
988 // We also have to create pipelines for each subpass
989 pipelines.emplace_back(vk, device, caseDef.pipelineConstructionType, (basePipeline == DE_NULL ? VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT
990 : VK_PIPELINE_CREATE_DERIVATIVE_BIT));
991 #else
992 pipelines.emplace_back(vk, device, caseDef.pipelineConstructionType, 0u);
993 #endif // CTS_USES_VULKANSC
994 preparePipelineWrapper(pipelines.back(), basePipeline, *pipelineLayout, *renderPass, *vertexModule, *fragmentModule,
995 imageSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, static_cast<deUint32>(subpassNdx), useDepth, useStencil);
996
997 basePipeline = pipelines.front().getPipeline();
998 }
999
1000 // Then D/S attachments, if any
1001 if (useDepthStencil)
1002 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1003 {
1004 depthStencilAttachments.push_back(makeSharedPtr(
1005 makeImageView(vk, device, *depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat, makeImageSubresourceRange(depthStencilAspect, 0u, 1u, subpassNdx, 1u))));
1006 attachmentHandles.push_back(**depthStencilAttachments.back());
1007 }
1008 }
1009
1010 framebuffer = makeFramebuffer(vk, device, *renderPass, static_cast<deUint32>(attachmentHandles.size()), &attachmentHandles[0], static_cast<deUint32>(imageSize.x()), static_cast<deUint32>(imageSize.y()));
1011
1012 {
1013 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1014 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1015
1016 beginCommandBuffer(vk, *cmdBuffer);
1017 {
1018 vector<VkClearValue> clearValues (numSlices, getClearValue(caseDef.colorFormat));
1019
1020 if (useDepthStencil)
1021 clearValues.insert(clearValues.end(), numSlices, makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE));
1022
1023 const VkDeviceSize vertexBufferOffset = 0ull;
1024
1025 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(0, 0, imageSize.x(), imageSize.y()), (deUint32)clearValues.size(), &clearValues[0]);
1026 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
1027 }
1028
1029 // Draw
1030 for (deUint32 subpassNdx = 0; subpassNdx < static_cast<deUint32>(numSlices); ++subpassNdx)
1031 {
1032 if (subpassNdx != 0)
1033 vk.cmdNextSubpass(*cmdBuffer, VK_SUBPASS_CONTENTS_INLINE);
1034
1035 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelines[subpassNdx].getPipeline());
1036 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx*4u, 0u);
1037 }
1038
1039 endRenderPass(vk, *cmdBuffer);
1040
1041 // Copy colorImage -> host visible colorBuffer
1042 {
1043 const VkImageMemoryBarrier imageBarriers[] =
1044 {
1045 {
1046 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1047 DE_NULL, // const void* pNext;
1048 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags outputMask;
1049 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags inputMask;
1050 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1051 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1052 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1053 VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
1054 *colorImage, // VkImage image;
1055 makeColorSubresourceRange(0, imageSize.w()) // VkImageSubresourceRange subresourceRange;
1056 }
1057 };
1058
1059 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
1060 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers);
1061
1062 // Copy the checked region rather than the whole image
1063 const VkImageSubresourceLayers subresource =
1064 {
1065 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1066 0u, // uint32_t mipLevel;
1067 static_cast<deUint32>(checkOffset.w()), // uint32_t baseArrayLayer;
1068 static_cast<deUint32>(checkSize.w()), // uint32_t layerCount;
1069 };
1070
1071 const VkBufferImageCopy region =
1072 {
1073 0ull, // VkDeviceSize bufferOffset;
1074 0u, // uint32_t bufferRowLength;
1075 0u, // uint32_t bufferImageHeight;
1076 subresource, // VkImageSubresourceLayers imageSubresource;
1077 makeOffset3D(checkOffset.x(), checkOffset.y(), checkOffset.z()), // VkOffset3D imageOffset;
1078 makeExtent3D(checkSize.swizzle(0, 1, 2)), // VkExtent3D imageExtent;
1079 };
1080
1081 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ®ion);
1082
1083 const VkBufferMemoryBarrier bufferBarriers[] =
1084 {
1085 {
1086 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1087 DE_NULL, // const void* pNext;
1088 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1089 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1090 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1091 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1092 *colorBuffer, // VkBuffer buffer;
1093 0ull, // VkDeviceSize offset;
1094 VK_WHOLE_SIZE, // VkDeviceSize size;
1095 },
1096 };
1097
1098 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
1099 0u, DE_NULL, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, DE_NULL);
1100 }
1101
1102 endCommandBuffer(vk, *cmdBuffer);
1103 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1104 }
1105
1106 // Verify results
1107 {
1108 invalidateAlloc(vk, device, *colorBufferAlloc);
1109
1110 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat);
1111 const int checkDepth = maxLayersOrDepth(checkSize);
1112 const int depthOffset = maxLayersOrDepth(checkOffset);
1113 const tcu::ConstPixelBufferAccess resultImage (format, checkSize.x(), checkSize.y(), checkDepth, colorBufferAlloc->getHostPtr());
1114 tcu::TextureLevel textureLevel (format, checkSize.x(), checkSize.y(), checkDepth);
1115 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess();
1116 bool ok = false;
1117
1118 generateExpectedImage(expectedImage, checkSize.swizzle(0, 1), depthOffset);
1119
1120 if (isFloatFormat(caseDef.colorFormat))
1121 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage, resultImage, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
1122 else
1123 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage, resultImage, tcu::UVec4(2), tcu::COMPARE_LOG_RESULT);
1124
1125 return ok ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail");
1126 }
1127 }
1128
checkImageViewTypeRequirements(Context & context,const VkImageViewType viewType)1129 void checkImageViewTypeRequirements (Context& context, const VkImageViewType viewType)
1130 {
1131 #ifndef CTS_USES_VULKANSC
1132 if (viewType == VK_IMAGE_VIEW_TYPE_3D)
1133 {
1134 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
1135 !context.getPortabilitySubsetFeatures().imageView2DOn3DImage)
1136 {
1137 TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: Implementation does not support 2D or 2D array image view to be created on a 3D VkImage");
1138 }
1139
1140 context.requireDeviceFunctionality("VK_KHR_maintenance1");
1141 }
1142 #endif // CTS_USES_VULKANSC
1143
1144 if (viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1145 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
1146 }
1147
checkSupportAttachmentSize(Context & context,const CaseDef caseDef)1148 void checkSupportAttachmentSize (Context& context, const CaseDef caseDef)
1149 {
1150 checkImageViewTypeRequirements(context, caseDef.viewType);
1151
1152 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED)
1153 context.requireDeviceFunctionality("VK_KHR_dedicated_allocation");
1154
1155 if (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED && !isDepthStencilFormatSupported(context.getInstanceInterface(), context.getPhysicalDevice(), caseDef.depthStencilFormat))
1156 TCU_THROW(NotSupportedError, "Unsupported depth/stencil format");
1157
1158 checkPipelineLibraryRequirements(context.getInstanceInterface(), context.getPhysicalDevice(), caseDef.pipelineConstructionType);
1159 }
1160
1161 //! A test that can exercise very big color and depth/stencil attachment sizes.
1162 //! If the total memory consumed by images is too large, or if the implementation returns OUT_OF_MEMORY error somewhere,
1163 //! the test can be retried with a next increment of size reduction index, making the attachments smaller.
testAttachmentSize(Context & context,const CaseDef caseDef)1164 tcu::TestStatus testAttachmentSize (Context& context, const CaseDef caseDef)
1165 {
1166 return testWithSizeReduction(context, caseDef);
1167 // Never reached
1168 }
1169
getMipLevelSizes(IVec4 baseSize)1170 vector<IVec4> getMipLevelSizes (IVec4 baseSize)
1171 {
1172 vector<IVec4> levels;
1173 levels.push_back(baseSize);
1174
1175 while (baseSize.x() != 1 || baseSize.y() != 1 || baseSize.z() != 1)
1176 {
1177 baseSize.x() = deMax32(baseSize.x() >> 1, 1);
1178 baseSize.y() = deMax32(baseSize.y() >> 1, 1);
1179 baseSize.z() = deMax32(baseSize.z() >> 1, 1);
1180 levels.push_back(baseSize);
1181 }
1182
1183 return levels;
1184 }
1185
1186 //! Compute memory consumed by each mip level, including all layers. Sizes include a padding for alignment.
getPerMipLevelStorageSize(const vector<IVec4> & mipLevelSizes,const VkDeviceSize pixelSize)1187 vector<VkDeviceSize> getPerMipLevelStorageSize (const vector<IVec4>& mipLevelSizes, const VkDeviceSize pixelSize)
1188 {
1189 const deInt64 levelAlignment = 16;
1190 vector<VkDeviceSize> storageSizes;
1191
1192 for (vector<IVec4>::const_iterator it = mipLevelSizes.begin(); it != mipLevelSizes.end(); ++it)
1193 storageSizes.push_back(deAlign64(pixelSize * product(*it), levelAlignment));
1194
1195 return storageSizes;
1196 }
1197
drawToMipLevel(const Context & context,const CaseDef & caseDef,const int mipLevel,const IVec4 & mipSize,const int numSlices,const VkImage colorImage,const VkImage depthStencilImage,const VkBuffer vertexBuffer,const VkPipelineLayout pipelineLayout,const VkShaderModule vertexModule,const VkShaderModule fragmentModule)1198 void drawToMipLevel (const Context& context,
1199 const CaseDef& caseDef,
1200 const int mipLevel,
1201 const IVec4& mipSize,
1202 const int numSlices,
1203 const VkImage colorImage,
1204 const VkImage depthStencilImage,
1205 const VkBuffer vertexBuffer,
1206 const VkPipelineLayout pipelineLayout,
1207 const VkShaderModule vertexModule,
1208 const VkShaderModule fragmentModule)
1209 {
1210 const DeviceInterface& vk = context.getDeviceInterface();
1211 const VkDevice device = context.getDevice();
1212 const VkQueue queue = context.getUniversalQueue();
1213 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1214 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat);
1215 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0;
1216 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
1217 const Unique<VkRenderPass> renderPass (makeRenderPass(vk, device, caseDef.colorFormat, caseDef.depthStencilFormat, static_cast<deUint32>(numSlices),
1218 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1219 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL));
1220 vector<GraphicsPipelineWrapper> pipelines;
1221 vector<SharedPtrVkImageView> colorAttachments;
1222 vector<SharedPtrVkImageView> depthStencilAttachments;
1223 vector<VkImageView> attachmentHandles; // all attachments (color and d/s)
1224
1225 // For each image layer or slice (3D), create an attachment and a pipeline
1226 {
1227 VkPipeline basePipeline = DE_NULL;
1228
1229 // Color attachments are first in the framebuffer
1230 pipelines.reserve(numSlices);
1231 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1232 {
1233 colorAttachments.push_back(makeSharedPtr(makeImageView(
1234 vk, device, colorImage, getImageViewSliceType(caseDef.viewType), caseDef.colorFormat,
1235 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 1u, subpassNdx, 1u))));
1236 attachmentHandles.push_back(**colorAttachments.back());
1237
1238 // We also have to create pipelines for each subpass
1239 #ifndef CTS_USES_VULKANSC // Pipeline derivatives are forbidden in Vulkan SC
1240 pipelines.emplace_back(vk, device, caseDef.pipelineConstructionType, (basePipeline == DE_NULL ? VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT
1241 : VK_PIPELINE_CREATE_DERIVATIVE_BIT));
1242 #else
1243 pipelines.emplace_back(vk, device, caseDef.pipelineConstructionType, 0u);
1244 #endif // CTS_USES_VULKANSC
1245 preparePipelineWrapper(pipelines.back(), basePipeline, pipelineLayout, *renderPass, vertexModule, fragmentModule,
1246 mipSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, static_cast<deUint32>(subpassNdx), useDepth, useStencil);
1247
1248 basePipeline = pipelines.front().getPipeline();
1249 }
1250
1251 // Then D/S attachments, if any
1252 if (useDepth || useStencil)
1253 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1254 {
1255 depthStencilAttachments.push_back(makeSharedPtr(makeImageView(
1256 vk, device, depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat,
1257 makeImageSubresourceRange(depthStencilAspect, mipLevel, 1u, subpassNdx, 1u))));
1258 attachmentHandles.push_back(**depthStencilAttachments.back());
1259 }
1260 }
1261
1262 const Unique<VkFramebuffer> framebuffer (makeFramebuffer(vk, device, *renderPass, static_cast<deUint32>(attachmentHandles.size()), &attachmentHandles[0],
1263 static_cast<deUint32>(mipSize.x()), static_cast<deUint32>(mipSize.y())));
1264
1265 {
1266 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1267 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1268
1269 beginCommandBuffer(vk, *cmdBuffer);
1270 {
1271 vector<VkClearValue> clearValues (numSlices, getClearValue(caseDef.colorFormat));
1272
1273 if (useDepth || useStencil)
1274 clearValues.insert(clearValues.end(), numSlices, makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE));
1275
1276 const VkDeviceSize vertexBufferOffset = 0ull;
1277
1278 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(0, 0, mipSize.x(), mipSize.y()), (deUint32)clearValues.size(), &clearValues[0]);
1279 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
1280 }
1281
1282 // Draw
1283 for (deUint32 subpassNdx = 0; subpassNdx < static_cast<deUint32>(numSlices); ++subpassNdx)
1284 {
1285 if (subpassNdx != 0)
1286 vk.cmdNextSubpass(*cmdBuffer, VK_SUBPASS_CONTENTS_INLINE);
1287
1288 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelines[subpassNdx].getPipeline());
1289 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx*4u, 0u);
1290 }
1291
1292 endRenderPass(vk, *cmdBuffer);
1293
1294 endCommandBuffer(vk, *cmdBuffer);
1295 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1296 }
1297 }
1298
checkSupportRenderToMipMaps(Context & context,const CaseDef caseDef)1299 void checkSupportRenderToMipMaps (Context& context, const CaseDef caseDef)
1300 {
1301 checkImageViewTypeRequirements(context, caseDef.viewType);
1302
1303 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED)
1304 context.requireDeviceFunctionality("VK_KHR_dedicated_allocation");
1305
1306 if (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED && !isDepthStencilFormatSupported(context.getInstanceInterface(), context.getPhysicalDevice(), caseDef.depthStencilFormat))
1307 TCU_THROW(NotSupportedError, "Unsupported depth/stencil format");
1308
1309 checkPipelineLibraryRequirements(context.getInstanceInterface(), context.getPhysicalDevice(), caseDef.pipelineConstructionType);
1310 }
1311
1312 //! Use image mip levels as attachments
testRenderToMipMaps(Context & context,const CaseDef caseDef)1313 tcu::TestStatus testRenderToMipMaps (Context& context, const CaseDef caseDef)
1314 {
1315 const DeviceInterface& vk = context.getDeviceInterface();
1316 const InstanceInterface& vki = context.getInstanceInterface();
1317 const VkDevice device = context.getDevice();
1318 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
1319 const VkQueue queue = context.getUniversalQueue();
1320 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1321 Allocator& allocator = context.getDefaultAllocator();
1322
1323 const IVec4 imageSize = caseDef.imageSizeHint; // MAX_SIZE is not used in this test
1324 const deInt32 numSlices = maxLayersOrDepth(imageSize);
1325 const vector<IVec4> mipLevelSizes = getMipLevelSizes(imageSize);
1326 const vector<VkDeviceSize> mipLevelStorageSizes = getPerMipLevelStorageSize(mipLevelSizes, tcu::getPixelSize(mapVkFormat(caseDef.colorFormat)));
1327 const int numMipLevels = static_cast<int>(mipLevelSizes.size());
1328 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED);
1329
1330 // Create a color buffer big enough to hold all layers and mip levels
1331 const VkDeviceSize colorBufferSize = sum(mipLevelStorageSizes);
1332 const Unique<VkBuffer> colorBuffer (makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
1333 const UniquePtr<Allocation> colorBufferAlloc (bindBuffer(vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind));
1334
1335 {
1336 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize));
1337 flushAlloc(vk, device, *colorBufferAlloc);
1338 }
1339
1340 const Unique<VkShaderModule> vertexModule (createShaderModule (vk, device, context.getBinaryCollection().get("vert"), 0u));
1341 const Unique<VkShaderModule> fragmentModule (createShaderModule (vk, device, context.getBinaryCollection().get("frag"), 0u));
1342 const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout (vk, device));
1343
1344 Move<VkImage> colorImage;
1345 MovePtr<Allocation> colorImageAlloc;
1346 Move<VkImage> depthStencilImage;
1347 MovePtr<Allocation> depthStencilImageAlloc;
1348 Move<VkBuffer> vertexBuffer;
1349 MovePtr<Allocation> vertexBufferAlloc;
1350
1351 // Create a color image
1352 {
1353 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1354
1355 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat,
1356 imageSize.swizzle(0, 1, 2), numMipLevels, imageSize.w(), imageUsage);
1357 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1358 }
1359
1360 // Create a depth/stencil image (always a 2D image, optionally layered)
1361 if (useDepthStencil)
1362 {
1363 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
1364
1365 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat,
1366 IVec3(imageSize.x(), imageSize.y(), 1), numMipLevels, numSlices, imageUsage);
1367 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1368 }
1369
1370 // Create a vertex buffer
1371 {
1372 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices);
1373 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices);
1374
1375 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
1376 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind);
1377
1378 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize));
1379 flushAlloc(vk, device, *vertexBufferAlloc);
1380 }
1381
1382 // Prepare images
1383 {
1384 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1385 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1386
1387 beginCommandBuffer(vk, *cmdBuffer);
1388
1389 const VkImageMemoryBarrier imageBarriers[] =
1390 {
1391 {
1392 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1393 DE_NULL, // const void* pNext;
1394 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1395 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1396 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1397 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1398 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1399 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1400 *colorImage, // VkImage image;
1401 { // VkImageSubresourceRange subresourceRange;
1402 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1403 0u, // uint32_t baseMipLevel;
1404 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1405 0u, // uint32_t baseArrayLayer;
1406 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
1407 },
1408 },
1409 {
1410 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1411 DE_NULL, // const void* pNext;
1412 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1413 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1414 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1415 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1416 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1417 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1418 *depthStencilImage, // VkImage image;
1419 { // VkImageSubresourceRange subresourceRange;
1420 getFormatAspectFlags(caseDef.depthStencilFormat), // VkImageAspectFlags aspectMask;
1421 0u, // uint32_t baseMipLevel;
1422 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1423 0u, // uint32_t baseArrayLayer;
1424 static_cast<deUint32>(numSlices), // uint32_t layerCount;
1425 },
1426 }
1427 };
1428
1429 const deUint32 numImageBarriers = static_cast<deUint32>(DE_LENGTH_OF_ARRAY(imageBarriers) - (useDepthStencil ? 0 : 1));
1430
1431 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, 0u,
1432 0u, DE_NULL, 0u, DE_NULL, numImageBarriers, imageBarriers);
1433
1434 endCommandBuffer(vk, *cmdBuffer);
1435 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1436 }
1437
1438 // Draw
1439 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1440 {
1441 const IVec4& mipSize = mipLevelSizes[mipLevel];
1442 const int levelSlices = maxLayersOrDepth(mipSize);
1443
1444 drawToMipLevel (context, caseDef, mipLevel, mipSize, levelSlices, *colorImage, *depthStencilImage, *vertexBuffer, *pipelineLayout,
1445 *vertexModule, *fragmentModule);
1446 }
1447
1448 // Copy results: colorImage -> host visible colorBuffer
1449 {
1450 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1451 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1452
1453 beginCommandBuffer(vk, *cmdBuffer);
1454
1455 {
1456 const VkImageMemoryBarrier imageBarriers[] =
1457 {
1458 {
1459 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1460 DE_NULL, // const void* pNext;
1461 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
1462 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1463 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1464 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1465 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1466 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1467 *colorImage, // VkImage image;
1468 { // VkImageSubresourceRange subresourceRange;
1469 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1470 0u, // uint32_t baseMipLevel;
1471 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1472 0u, // uint32_t baseArrayLayer;
1473 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
1474 },
1475 }
1476 };
1477
1478 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
1479 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers);
1480 }
1481 {
1482 vector<VkBufferImageCopy> regions;
1483 VkDeviceSize levelOffset = 0ull;
1484 VkBufferImageCopy workRegion =
1485 {
1486 0ull, // VkDeviceSize bufferOffset;
1487 0u, // uint32_t bufferRowLength;
1488 0u, // uint32_t bufferImageHeight;
1489 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, imageSize.w()), // VkImageSubresourceLayers imageSubresource;
1490 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
1491 makeExtent3D(0, 0, 0), // VkExtent3D imageExtent;
1492 };
1493
1494 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1495 {
1496 workRegion.bufferOffset = levelOffset;
1497 workRegion.imageSubresource.mipLevel = static_cast<deUint32>(mipLevel);
1498 workRegion.imageExtent = makeExtent3D(mipLevelSizes[mipLevel].swizzle(0, 1, 2));
1499
1500 regions.push_back(workRegion);
1501
1502 levelOffset += mipLevelStorageSizes[mipLevel];
1503 }
1504
1505 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, static_cast<deUint32>(regions.size()), ®ions[0]);
1506 }
1507 {
1508 const VkBufferMemoryBarrier bufferBarriers[] =
1509 {
1510 {
1511 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1512 DE_NULL, // const void* pNext;
1513 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1514 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1515 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1516 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1517 *colorBuffer, // VkBuffer buffer;
1518 0ull, // VkDeviceSize offset;
1519 VK_WHOLE_SIZE, // VkDeviceSize size;
1520 },
1521 };
1522
1523 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
1524 0u, DE_NULL, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, DE_NULL);
1525 }
1526
1527 endCommandBuffer(vk, *cmdBuffer);
1528 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1529 }
1530
1531 // Verify results (per mip level)
1532 {
1533 invalidateAlloc(vk, device, *colorBufferAlloc);
1534
1535 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat);
1536
1537 VkDeviceSize levelOffset = 0ull;
1538 bool allOk = true;
1539
1540 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1541 {
1542 const IVec4& mipSize = mipLevelSizes[mipLevel];
1543 const void* const pLevelData = static_cast<const deUint8*>(colorBufferAlloc->getHostPtr()) + levelOffset;
1544 const int levelDepth = maxLayersOrDepth(mipSize);
1545 const tcu::ConstPixelBufferAccess resultImage (format, mipSize.x(), mipSize.y(), levelDepth, pLevelData);
1546 tcu::TextureLevel textureLevel (format, mipSize.x(), mipSize.y(), levelDepth);
1547 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess();
1548 const std::string comparisonName = "Mip level " + de::toString(mipLevel);
1549 bool ok = false;
1550
1551 generateExpectedImage(expectedImage, mipSize.swizzle(0, 1), 0);
1552
1553 if (isFloatFormat(caseDef.colorFormat))
1554 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison", comparisonName.c_str(), expectedImage, resultImage, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
1555 else
1556 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison", comparisonName.c_str(), expectedImage, resultImage, tcu::UVec4(2), tcu::COMPARE_LOG_RESULT);
1557
1558 allOk = allOk && ok; // keep testing all levels, even if we know it's a fail overall
1559 levelOffset += mipLevelStorageSizes[mipLevel];
1560 }
1561
1562 return allOk ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail");
1563 }
1564 }
1565
getSizeDescription(const IVec4 & size)1566 std::string getSizeDescription (const IVec4& size)
1567 {
1568 std::ostringstream str;
1569
1570 const char* const description[4] =
1571 {
1572 "width", "height", "depth", "layers"
1573 };
1574
1575 int numMaxComponents = 0;
1576
1577 for (int i = 0; i < 4; ++i)
1578 {
1579 if (size[i] == MAX_SIZE)
1580 {
1581 if (numMaxComponents > 0)
1582 str << "_";
1583
1584 str << description[i];
1585 ++numMaxComponents;
1586 }
1587 }
1588
1589 if (numMaxComponents == 0)
1590 str << "small";
1591
1592 return str.str();
1593 }
1594
getFormatString(const VkFormat format)1595 inline std::string getFormatString (const VkFormat format)
1596 {
1597 std::string name(getFormatName(format));
1598 return de::toLower(name.substr(10));
1599 }
1600
getFormatString(const VkFormat colorFormat,const VkFormat depthStencilFormat)1601 std::string getFormatString (const VkFormat colorFormat, const VkFormat depthStencilFormat)
1602 {
1603 std::ostringstream str;
1604 str << getFormatString(colorFormat);
1605 if (depthStencilFormat != VK_FORMAT_UNDEFINED)
1606 str << "_" << getFormatString(depthStencilFormat);
1607 return str.str();
1608 }
1609
getShortImageViewTypeName(const VkImageViewType imageViewType)1610 std::string getShortImageViewTypeName (const VkImageViewType imageViewType)
1611 {
1612 std::string s(getImageViewTypeName(imageViewType));
1613 return de::toLower(s.substr(19));
1614 }
1615
bvecFromMask(deUint32 mask)1616 inline BVec4 bvecFromMask (deUint32 mask)
1617 {
1618 return BVec4((mask >> 0) & 1,
1619 (mask >> 1) & 1,
1620 (mask >> 2) & 1,
1621 (mask >> 3) & 1);
1622 }
1623
genSizeCombinations(const IVec4 & baselineSize,const deUint32 sizeMask,const VkImageViewType imageViewType)1624 vector<IVec4> genSizeCombinations (const IVec4& baselineSize, const deUint32 sizeMask, const VkImageViewType imageViewType)
1625 {
1626 vector<IVec4> sizes;
1627 std::set<deUint32> masks;
1628
1629 for (deUint32 i = 0; i < (1u << 4); ++i)
1630 {
1631 // Cube images have square faces
1632 if (isCube(imageViewType) && ((i & MASK_WH) != 0))
1633 i |= MASK_WH;
1634
1635 masks.insert(i & sizeMask);
1636 }
1637
1638 for (std::set<deUint32>::const_iterator it = masks.begin(); it != masks.end(); ++it)
1639 sizes.push_back(tcu::select(IVec4(MAX_SIZE), baselineSize, bvecFromMask(*it)));
1640
1641 return sizes;
1642 }
1643
addTestCasesWithFunctions(tcu::TestCaseGroup * group,PipelineConstructionType pipelineConstructionType,AllocationKind allocationKind)1644 void addTestCasesWithFunctions (tcu::TestCaseGroup* group, PipelineConstructionType pipelineConstructionType, AllocationKind allocationKind)
1645 {
1646 const struct
1647 {
1648 VkImageViewType viewType;
1649 IVec4 baselineSize; //!< image size: (dimX, dimY, dimZ, arraySize)
1650 deUint32 sizeMask; //!< if a dimension is masked, generate a huge size case for it
1651 } testCase[] =
1652 {
1653 { VK_IMAGE_VIEW_TYPE_1D, IVec4(54, 1, 1, 1), MASK_W },
1654 { VK_IMAGE_VIEW_TYPE_1D_ARRAY, IVec4(54, 1, 1, 4), MASK_W_LAYERS },
1655 { VK_IMAGE_VIEW_TYPE_2D, IVec4(44, 23, 1, 1), MASK_WH },
1656 { VK_IMAGE_VIEW_TYPE_2D_ARRAY, IVec4(44, 23, 1, 4), MASK_WH_LAYERS },
1657 { VK_IMAGE_VIEW_TYPE_3D, IVec4(22, 31, 7, 1), MASK_WHD },
1658 { VK_IMAGE_VIEW_TYPE_CUBE, IVec4(35, 35, 1, 6), MASK_WH },
1659 { VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, IVec4(35, 35, 1, 2*6), MASK_WH_LAYERS },
1660 };
1661
1662 const VkFormat format[] =
1663 {
1664 VK_FORMAT_R8G8B8A8_UNORM,
1665 VK_FORMAT_R32_UINT,
1666 VK_FORMAT_R16G16_SINT,
1667 VK_FORMAT_R32G32B32A32_SFLOAT,
1668 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
1669 VK_FORMAT_R5G6B5_UNORM_PACK16,
1670 VK_FORMAT_A2B10G10R10_UINT_PACK32,
1671 VK_FORMAT_A2B10G10R10_UNORM_PACK32
1672 };
1673
1674 const VkFormat depthStencilFormat[] =
1675 {
1676 VK_FORMAT_UNDEFINED, // don't use a depth/stencil attachment
1677 VK_FORMAT_D16_UNORM,
1678 VK_FORMAT_S8_UINT,
1679 VK_FORMAT_D24_UNORM_S8_UINT, // one of the following mixed formats must be supported
1680 VK_FORMAT_D32_SFLOAT_S8_UINT,
1681 };
1682
1683 for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(testCase); ++caseNdx)
1684 {
1685 MovePtr<tcu::TestCaseGroup> imageGroup(new tcu::TestCaseGroup(group->getTestContext(), getShortImageViewTypeName(testCase[caseNdx].viewType).c_str(), ""));
1686
1687 // Generate attachment size cases
1688 {
1689 vector<IVec4> sizes = genSizeCombinations(testCase[caseNdx].baselineSize, testCase[caseNdx].sizeMask, testCase[caseNdx].viewType);
1690
1691 #ifdef CTS_USES_VULKANSC
1692 // filter out sizes in which width and height is equal to maximimum values
1693 sizes.erase(std::remove_if(begin(sizes), end(sizes), [&](const IVec4& v) { return v.x() == MAX_SIZE && v.y() == MAX_SIZE; }), end(sizes));
1694 #endif // CTS_USES_VULKANSC
1695
1696 MovePtr<tcu::TestCaseGroup> smallGroup(new tcu::TestCaseGroup(group->getTestContext(), "small", ""));
1697 MovePtr<tcu::TestCaseGroup> hugeGroup (new tcu::TestCaseGroup(group->getTestContext(), "huge", ""));
1698
1699 imageGroup->addChild(smallGroup.get());
1700 imageGroup->addChild(hugeGroup.get());
1701
1702 for (vector<IVec4>::const_iterator sizeIter = sizes.begin(); sizeIter != sizes.end(); ++sizeIter)
1703 {
1704 // The first size is the baseline size, put it in a dedicated group
1705 if (sizeIter == sizes.begin())
1706 {
1707 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1708 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx)
1709 {
1710 const CaseDef caseDef
1711 {
1712 pipelineConstructionType, // PipelineConstructionType pipelineConstructionType;
1713 testCase[caseNdx].viewType, // VkImageViewType imageType;
1714 *sizeIter, // IVec4 imageSizeHint;
1715 format[formatNdx], // VkFormat colorFormat;
1716 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1717 allocationKind // AllocationKind allocationKind;
1718 };
1719 addFunctionCaseWithPrograms(smallGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]), "", checkSupportAttachmentSize, initPrograms, testAttachmentSize, caseDef);
1720 }
1721 }
1722 else // All huge cases go into a separate group
1723 {
1724 if (allocationKind != ALLOCATION_KIND_DEDICATED)
1725 {
1726 MovePtr<tcu::TestCaseGroup> sizeGroup (new tcu::TestCaseGroup(group->getTestContext(), getSizeDescription(*sizeIter).c_str(), ""));
1727 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
1728
1729 // Use the same color format for all cases, to reduce the number of permutations
1730 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1731 {
1732 const CaseDef caseDef
1733 {
1734 pipelineConstructionType, // PipelineConstructionType pipelineConstructionType;
1735 testCase[caseNdx].viewType, // VkImageViewType viewType;
1736 *sizeIter, // IVec4 imageSizeHint;
1737 colorFormat, // VkFormat colorFormat;
1738 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1739 allocationKind // AllocationKind allocationKind;
1740 };
1741 addFunctionCaseWithPrograms(sizeGroup.get(), getFormatString(colorFormat, depthStencilFormat[dsFormatNdx]), "", checkSupportAttachmentSize, initPrograms, testAttachmentSize, caseDef);
1742 }
1743 hugeGroup->addChild(sizeGroup.release());
1744 }
1745 }
1746 }
1747 smallGroup.release();
1748 hugeGroup.release();
1749 }
1750
1751 // Generate mip map cases
1752 {
1753 MovePtr<tcu::TestCaseGroup> mipmapGroup(new tcu::TestCaseGroup(group->getTestContext(), "mipmap", ""));
1754
1755 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1756 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx)
1757 {
1758 const CaseDef caseDef
1759 {
1760 pipelineConstructionType, // PipelineConstructionType pipelineConstructionType;
1761 testCase[caseNdx].viewType, // VkImageViewType imageType;
1762 testCase[caseNdx].baselineSize, // IVec4 imageSizeHint;
1763 format[formatNdx], // VkFormat colorFormat;
1764 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1765 allocationKind // AllocationKind allocationKind;
1766 };
1767 addFunctionCaseWithPrograms(mipmapGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]), "", checkSupportRenderToMipMaps, initPrograms, testRenderToMipMaps, caseDef);
1768 }
1769 imageGroup->addChild(mipmapGroup.release());
1770 }
1771
1772 group->addChild(imageGroup.release());
1773 }
1774 }
1775
addCoreRenderToImageTests(tcu::TestCaseGroup * group,PipelineConstructionType pipelineConstructionType)1776 void addCoreRenderToImageTests (tcu::TestCaseGroup* group, PipelineConstructionType pipelineConstructionType)
1777 {
1778 addTestCasesWithFunctions(group, pipelineConstructionType, ALLOCATION_KIND_SUBALLOCATED);
1779 }
1780
addDedicatedAllocationRenderToImageTests(tcu::TestCaseGroup * group,PipelineConstructionType pipelineConstructionType)1781 void addDedicatedAllocationRenderToImageTests (tcu::TestCaseGroup* group, PipelineConstructionType pipelineConstructionType)
1782 {
1783 addTestCasesWithFunctions(group, pipelineConstructionType, ALLOCATION_KIND_DEDICATED);
1784 }
1785
1786 } // anonymous ns
1787
createRenderToImageTests(tcu::TestContext & testCtx,PipelineConstructionType pipelineConstructionType)1788 tcu::TestCaseGroup* createRenderToImageTests (tcu::TestContext& testCtx, PipelineConstructionType pipelineConstructionType)
1789 {
1790 de::MovePtr<tcu::TestCaseGroup> renderToImageTests (new tcu::TestCaseGroup(testCtx, "render_to_image", "Render to image tests"));
1791
1792 renderToImageTests->addChild(createTestGroup(testCtx, "core", "Core render to image tests", addCoreRenderToImageTests, pipelineConstructionType));
1793 renderToImageTests->addChild(createTestGroup(testCtx, "dedicated_allocation", "Render to image tests for dedicated memory allocation", addDedicatedAllocationRenderToImageTests, pipelineConstructionType));
1794
1795 return renderToImageTests.release();
1796 }
1797
1798 } // pipeline
1799 } // vkt
1800