1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file vktPipelineRenderToImageTests.cpp
21 * \brief Render to image tests
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktPipelineRenderToImageTests.hpp"
25 #include "vktPipelineMakeUtil.hpp"
26 #include "vktTestCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vktPipelineVertexUtil.hpp"
29 #include "vktTestGroupUtil.hpp"
30 #include "vkObjUtil.hpp"
31
32 #include "vkMemUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkRefUtil.hpp"
36 #include "vkBuilderUtil.hpp"
37 #include "vkPrograms.hpp"
38 #include "vkImageUtil.hpp"
39 #include "vkCmdUtil.hpp"
40
41 #include "tcuTextureUtil.hpp"
42 #include "tcuImageCompare.hpp"
43 #include "tcuTestLog.hpp"
44 #include "tcuPlatform.hpp"
45 #include "vkPlatform.hpp"
46
47 #include "deUniquePtr.hpp"
48 #include "deSharedPtr.hpp"
49
50 #include <string>
51 #include <vector>
52 #include <set>
53
54 namespace vkt
55 {
56 namespace pipeline
57 {
58 namespace
59 {
60 using namespace vk;
61 using de::UniquePtr;
62 using de::MovePtr;
63 using de::SharedPtr;
64 using tcu::IVec3;
65 using tcu::Vec4;
66 using tcu::UVec4;
67 using tcu::IVec2;
68 using tcu::IVec4;
69 using tcu::BVec4;
70 using std::vector;
71
72 typedef SharedPtr<Unique<VkImageView> > SharedPtrVkImageView;
73 typedef SharedPtr<Unique<VkPipeline> > SharedPtrVkPipeline;
74
75 enum Constants
76 {
77 NUM_CUBE_FACES = 6,
78 REFERENCE_COLOR_VALUE = 125,
79 REFERENCE_STENCIL_VALUE = 42,
80 MAX_SIZE = -1, //!< Should be queried at runtime and replaced with max possible value
81 MAX_VERIFICATION_REGION_SIZE = 32, //!< Limit the checked area to a small size, especially for huge images
82 MAX_VERIFICATION_REGION_DEPTH = 8,
83
84 MASK_W = (1 | 0 | 0 | 0),
85 MASK_W_LAYERS = (1 | 0 | 0 | 8),
86 MASK_WH = (1 | 2 | 0 | 0),
87 MASK_WH_LAYERS = (1 | 2 | 0 | 8),
88 MASK_WHD = (1 | 2 | 4 | 0),
89 };
90
91 enum AllocationKind
92 {
93 ALLOCATION_KIND_SUBALLOCATED = 0,
94 ALLOCATION_KIND_DEDICATED,
95 };
96
97 static const float REFERENCE_DEPTH_VALUE = 1.0f;
98 static const Vec4 COLOR_TABLE[] =
99 {
100 Vec4(0.9f, 0.0f, 0.0f, 1.0f),
101 Vec4(0.6f, 1.0f, 0.0f, 1.0f),
102 Vec4(0.3f, 0.0f, 1.0f, 1.0f),
103 Vec4(0.1f, 1.0f, 1.0f, 1.0f),
104 Vec4(0.8f, 1.0f, 0.0f, 1.0f),
105 Vec4(0.5f, 0.0f, 1.0f, 1.0f),
106 Vec4(0.2f, 0.0f, 0.0f, 1.0f),
107 Vec4(1.0f, 1.0f, 0.0f, 1.0f),
108 };
109
110 struct CaseDef
111 {
112 VkImageViewType viewType;
113 IVec4 imageSizeHint; //!< (w, h, d, layers), a component may have a symbolic value MAX_SIZE
114 VkFormat colorFormat;
115 VkFormat depthStencilFormat; //! A depth/stencil format, or UNDEFINED if not used
116 AllocationKind allocationKind;
117 };
118
119 template<typename T>
makeSharedPtr(Move<T> move)120 inline SharedPtr<Unique<T> > makeSharedPtr (Move<T> move)
121 {
122 return SharedPtr<Unique<T> >(new Unique<T>(move));
123 }
124
125 template<typename T>
sizeInBytes(const vector<T> & vec)126 inline VkDeviceSize sizeInBytes (const vector<T>& vec)
127 {
128 return vec.size() * sizeof(vec[0]);
129 }
130
isCube(const VkImageViewType viewType)131 inline bool isCube (const VkImageViewType viewType)
132 {
133 return (viewType == VK_IMAGE_VIEW_TYPE_CUBE || viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY);
134 }
135
product(const IVec4 & v)136 inline VkDeviceSize product (const IVec4& v)
137 {
138 return ((static_cast<VkDeviceSize>(v.x()) * v.y()) * v.z()) * v.w();
139 }
140
141 template<typename T>
sum(const vector<T> & v)142 inline T sum (const vector<T>& v)
143 {
144 T total = static_cast<T>(0);
145 for (typename vector<T>::const_iterator it = v.begin(); it != v.end(); ++it)
146 total += *it;
147 return total;
148 }
149
150 template <typename T, int Size>
findIndexOfMaxComponent(const tcu::Vector<T,Size> & vec)151 int findIndexOfMaxComponent (const tcu::Vector<T, Size>& vec)
152 {
153 int index = 0;
154 T value = vec[0];
155
156 for (int i = 1; i < Size; ++i)
157 {
158 if (vec[i] > value)
159 {
160 index = i;
161 value = vec[i];
162 }
163 }
164
165 return index;
166 }
167
maxLayersOrDepth(const IVec4 & size)168 inline int maxLayersOrDepth (const IVec4& size)
169 {
170 // This is safe because 3D images must have layers (w) = 1
171 return deMax32(size.z(), size.w());
172 }
173
bindBuffer(const InstanceInterface & vki,const DeviceInterface & vkd,const VkPhysicalDevice & physDevice,const VkDevice device,const VkBuffer & buffer,const MemoryRequirement requirement,Allocator & allocator,AllocationKind allocationKind)174 de::MovePtr<Allocation> bindBuffer (const InstanceInterface& vki,
175 const DeviceInterface& vkd,
176 const VkPhysicalDevice& physDevice,
177 const VkDevice device,
178 const VkBuffer& buffer,
179 const MemoryRequirement requirement,
180 Allocator& allocator,
181 AllocationKind allocationKind)
182 {
183 switch (allocationKind)
184 {
185 case ALLOCATION_KIND_SUBALLOCATED:
186 {
187 return vk::bindBuffer(vkd, device, allocator, buffer, requirement);
188 }
189
190 case ALLOCATION_KIND_DEDICATED:
191 {
192 return bindBufferDedicated(vki, vkd, physDevice, device, buffer, requirement);
193 }
194
195 default:
196 {
197 TCU_THROW(InternalError, "Invalid allocation kind");
198 }
199 }
200 }
201
bindImage(const InstanceInterface & vki,const DeviceInterface & vkd,const VkPhysicalDevice & physDevice,const VkDevice device,const VkImage & image,const MemoryRequirement requirement,Allocator & allocator,AllocationKind allocationKind)202 de::MovePtr<Allocation> bindImage (const InstanceInterface& vki,
203 const DeviceInterface& vkd,
204 const VkPhysicalDevice& physDevice,
205 const VkDevice device,
206 const VkImage& image,
207 const MemoryRequirement requirement,
208 Allocator& allocator,
209 AllocationKind allocationKind)
210 {
211 switch (allocationKind)
212 {
213 case ALLOCATION_KIND_SUBALLOCATED:
214 {
215 return vk::bindImage(vkd, device, allocator, image, requirement);
216 }
217
218 case ALLOCATION_KIND_DEDICATED:
219 {
220 return bindImageDedicated(vki, vkd, physDevice, device, image, requirement);
221 }
222
223 default:
224 {
225 TCU_THROW(InternalError, "Invalid allocation kind");
226 }
227 }
228 }
229
230 // This is very test specific, so be careful if you want to reuse this code.
makeGraphicsPipeline(const DeviceInterface & vk,const VkDevice device,const VkPipeline basePipeline,const VkPipelineLayout pipelineLayout,const VkRenderPass renderPass,const VkShaderModule vertexModule,const VkShaderModule fragmentModule,const IVec2 & renderSize,const VkPrimitiveTopology topology,const deUint32 subpass,const bool useDepth,const bool useStencil)231 Move<VkPipeline> makeGraphicsPipeline (const DeviceInterface& vk,
232 const VkDevice device,
233 const VkPipeline basePipeline, // for derivatives
234 const VkPipelineLayout pipelineLayout,
235 const VkRenderPass renderPass,
236 const VkShaderModule vertexModule,
237 const VkShaderModule fragmentModule,
238 const IVec2& renderSize,
239 const VkPrimitiveTopology topology,
240 const deUint32 subpass,
241 const bool useDepth,
242 const bool useStencil)
243 {
244 const VkVertexInputBindingDescription vertexInputBindingDescription =
245 {
246 0u, // uint32_t binding;
247 sizeof(Vertex4RGBA), // uint32_t stride;
248 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
249 };
250
251 const VkVertexInputAttributeDescription vertexInputAttributeDescriptions[] =
252 {
253 {
254 0u, // uint32_t location;
255 0u, // uint32_t binding;
256 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
257 0u, // uint32_t offset;
258 },
259 {
260 1u, // uint32_t location;
261 0u, // uint32_t binding;
262 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
263 sizeof(Vec4), // uint32_t offset;
264 }
265 };
266
267 const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo =
268 {
269 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
270 DE_NULL, // const void* pNext;
271 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
272 1u, // uint32_t vertexBindingDescriptionCount;
273 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
274 DE_LENGTH_OF_ARRAY(vertexInputAttributeDescriptions), // uint32_t vertexAttributeDescriptionCount;
275 vertexInputAttributeDescriptions, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
276 };
277
278 const VkPipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateInfo =
279 {
280 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
281 DE_NULL, // const void* pNext;
282 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
283 topology, // VkPrimitiveTopology topology;
284 VK_FALSE, // VkBool32 primitiveRestartEnable;
285 };
286
287 const VkViewport viewport = makeViewport(renderSize);
288 const VkRect2D scissor = makeRect2D(renderSize);
289
290 const VkPipelineViewportStateCreateInfo pipelineViewportStateInfo =
291 {
292 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
293 DE_NULL, // const void* pNext;
294 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags;
295 1u, // uint32_t viewportCount;
296 &viewport, // const VkViewport* pViewports;
297 1u, // uint32_t scissorCount;
298 &scissor, // const VkRect2D* pScissors;
299 };
300
301 const VkPipelineRasterizationStateCreateInfo pipelineRasterizationStateInfo =
302 {
303 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
304 DE_NULL, // const void* pNext;
305 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
306 VK_FALSE, // VkBool32 depthClampEnable;
307 VK_FALSE, // VkBool32 rasterizerDiscardEnable;
308 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
309 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
310 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
311 VK_FALSE, // VkBool32 depthBiasEnable;
312 0.0f, // float depthBiasConstantFactor;
313 0.0f, // float depthBiasClamp;
314 0.0f, // float depthBiasSlopeFactor;
315 1.0f, // float lineWidth;
316 };
317
318 const VkPipelineMultisampleStateCreateInfo pipelineMultisampleStateInfo =
319 {
320 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
321 DE_NULL, // const void* pNext;
322 (VkPipelineMultisampleStateCreateFlags)0, // VkPipelineMultisampleStateCreateFlags flags;
323 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
324 VK_FALSE, // VkBool32 sampleShadingEnable;
325 0.0f, // float minSampleShading;
326 DE_NULL, // const VkSampleMask* pSampleMask;
327 VK_FALSE, // VkBool32 alphaToCoverageEnable;
328 VK_FALSE // VkBool32 alphaToOneEnable;
329 };
330
331 const VkStencilOpState stencilOpState = makeStencilOpState(
332 VK_STENCIL_OP_KEEP, // stencil fail
333 VK_STENCIL_OP_KEEP, // depth & stencil pass
334 VK_STENCIL_OP_KEEP, // depth only fail
335 VK_COMPARE_OP_EQUAL, // compare op
336 ~0u, // compare mask
337 ~0u, // write mask
338 static_cast<deUint32>(REFERENCE_STENCIL_VALUE)); // reference
339
340 VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo =
341 {
342 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
343 DE_NULL, // const void* pNext;
344 (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags;
345 useDepth, // VkBool32 depthTestEnable;
346 VK_FALSE, // VkBool32 depthWriteEnable;
347 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
348 VK_FALSE, // VkBool32 depthBoundsTestEnable;
349 useStencil, // VkBool32 stencilTestEnable;
350 stencilOpState, // VkStencilOpState front;
351 stencilOpState, // VkStencilOpState back;
352 0.0f, // float minDepthBounds;
353 1.0f, // float maxDepthBounds;
354 };
355
356 const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
357 // Number of blend attachments must equal the number of color attachments during any subpass.
358 const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState =
359 {
360 VK_FALSE, // VkBool32 blendEnable;
361 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcColorBlendFactor;
362 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor;
363 VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
364 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcAlphaBlendFactor;
365 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor;
366 VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
367 colorComponentsAll, // VkColorComponentFlags colorWriteMask;
368 };
369
370 const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo =
371 {
372 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
373 DE_NULL, // const void* pNext;
374 (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags;
375 VK_FALSE, // VkBool32 logicOpEnable;
376 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
377 1u, // deUint32 attachmentCount;
378 &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
379 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
380 };
381
382 const VkPipelineShaderStageCreateInfo pShaderStages[] =
383 {
384 {
385 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
386 DE_NULL, // const void* pNext;
387 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
388 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage;
389 vertexModule, // VkShaderModule module;
390 "main", // const char* pName;
391 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
392 },
393 {
394 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
395 DE_NULL, // const void* pNext;
396 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
397 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlagBits stage;
398 fragmentModule, // VkShaderModule module;
399 "main", // const char* pName;
400 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
401 }
402 };
403
404 const VkPipelineCreateFlags flags = (basePipeline == DE_NULL ? VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT
405 : VK_PIPELINE_CREATE_DERIVATIVE_BIT);
406
407 const VkGraphicsPipelineCreateInfo graphicsPipelineInfo =
408 {
409 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
410 DE_NULL, // const void* pNext;
411 flags, // VkPipelineCreateFlags flags;
412 DE_LENGTH_OF_ARRAY(pShaderStages), // deUint32 stageCount;
413 pShaderStages, // const VkPipelineShaderStageCreateInfo* pStages;
414 &vertexInputStateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
415 &pipelineInputAssemblyStateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
416 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
417 &pipelineViewportStateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
418 &pipelineRasterizationStateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
419 &pipelineMultisampleStateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
420 &pipelineDepthStencilStateInfo, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
421 &pipelineColorBlendStateInfo, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
422 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
423 pipelineLayout, // VkPipelineLayout layout;
424 renderPass, // VkRenderPass renderPass;
425 subpass, // deUint32 subpass;
426 basePipeline, // VkPipeline basePipelineHandle;
427 -1, // deInt32 basePipelineIndex;
428 };
429
430 return createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineInfo);
431 }
432
433 //! Make a render pass with one subpass per color attachment and depth/stencil attachment (if used).
makeRenderPass(const DeviceInterface & vk,const VkDevice device,const VkFormat colorFormat,const VkFormat depthStencilFormat,const deUint32 numLayers,const VkImageLayout initialColorImageLayout=VK_IMAGE_LAYOUT_UNDEFINED,const VkImageLayout initialDepthStencilImageLayout=VK_IMAGE_LAYOUT_UNDEFINED)434 Move<VkRenderPass> makeRenderPass (const DeviceInterface& vk,
435 const VkDevice device,
436 const VkFormat colorFormat,
437 const VkFormat depthStencilFormat,
438 const deUint32 numLayers,
439 const VkImageLayout initialColorImageLayout = VK_IMAGE_LAYOUT_UNDEFINED,
440 const VkImageLayout initialDepthStencilImageLayout = VK_IMAGE_LAYOUT_UNDEFINED)
441 {
442 const VkAttachmentDescription colorAttachmentDescription =
443 {
444 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
445 colorFormat, // VkFormat format;
446 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
447 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
448 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
449 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
450 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
451 initialColorImageLayout, // VkImageLayout initialLayout;
452 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
453 };
454 vector<VkAttachmentDescription> attachmentDescriptions(numLayers, colorAttachmentDescription);
455
456 const VkAttachmentDescription depthStencilAttachmentDescription =
457 {
458 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
459 depthStencilFormat, // VkFormat format;
460 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
461 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
462 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp storeOp;
463 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp stencilLoadOp;
464 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
465 initialDepthStencilImageLayout, // VkImageLayout initialLayout;
466 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
467 };
468
469 if (depthStencilFormat != VK_FORMAT_UNDEFINED)
470 attachmentDescriptions.insert(attachmentDescriptions.end(), numLayers, depthStencilAttachmentDescription);
471
472 // Create a subpass for each attachment (each attachement is a layer of an arrayed image).
473 vector<VkAttachmentReference> colorAttachmentReferences (numLayers);
474 vector<VkAttachmentReference> depthStencilAttachmentReferences(numLayers);
475 vector<VkSubpassDescription> subpasses;
476
477 // Ordering here must match the framebuffer attachments
478 for (deUint32 i = 0; i < numLayers; ++i)
479 {
480 const VkAttachmentReference attachmentRef =
481 {
482 i, // deUint32 attachment;
483 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
484 };
485 const VkAttachmentReference depthStencilAttachmentRef =
486 {
487 i + numLayers, // deUint32 attachment;
488 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // VkImageLayout layout;
489 };
490
491 colorAttachmentReferences[i] = attachmentRef;
492 depthStencilAttachmentReferences[i] = depthStencilAttachmentRef;
493
494 const VkAttachmentReference* pDepthStencilAttachment = (depthStencilFormat != VK_FORMAT_UNDEFINED ? &depthStencilAttachmentReferences[i] : DE_NULL);
495 const VkSubpassDescription subpassDescription =
496 {
497 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
498 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
499 0u, // deUint32 inputAttachmentCount;
500 DE_NULL, // const VkAttachmentReference* pInputAttachments;
501 1u, // deUint32 colorAttachmentCount;
502 &colorAttachmentReferences[i], // const VkAttachmentReference* pColorAttachments;
503 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
504 pDepthStencilAttachment, // const VkAttachmentReference* pDepthStencilAttachment;
505 0u, // deUint32 preserveAttachmentCount;
506 DE_NULL // const deUint32* pPreserveAttachments;
507 };
508 subpasses.push_back(subpassDescription);
509 }
510
511 const VkRenderPassCreateInfo renderPassInfo =
512 {
513 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
514 DE_NULL, // const void* pNext;
515 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags;
516 static_cast<deUint32>(attachmentDescriptions.size()), // deUint32 attachmentCount;
517 &attachmentDescriptions[0], // const VkAttachmentDescription* pAttachments;
518 static_cast<deUint32>(subpasses.size()), // deUint32 subpassCount;
519 &subpasses[0], // const VkSubpassDescription* pSubpasses;
520 0u, // deUint32 dependencyCount;
521 DE_NULL // const VkSubpassDependency* pDependencies;
522 };
523
524 return createRenderPass(vk, device, &renderPassInfo);
525 }
526
makeImage(const DeviceInterface & vk,const VkDevice device,VkImageCreateFlags flags,VkImageType imageType,const VkFormat format,const IVec3 & size,const deUint32 numMipLevels,const deUint32 numLayers,const VkImageUsageFlags usage)527 Move<VkImage> makeImage (const DeviceInterface& vk,
528 const VkDevice device,
529 VkImageCreateFlags flags,
530 VkImageType imageType,
531 const VkFormat format,
532 const IVec3& size,
533 const deUint32 numMipLevels,
534 const deUint32 numLayers,
535 const VkImageUsageFlags usage)
536 {
537 const VkImageCreateInfo imageParams =
538 {
539 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
540 DE_NULL, // const void* pNext;
541 flags, // VkImageCreateFlags flags;
542 imageType, // VkImageType imageType;
543 format, // VkFormat format;
544 makeExtent3D(size), // VkExtent3D extent;
545 numMipLevels, // deUint32 mipLevels;
546 numLayers, // deUint32 arrayLayers;
547 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
548 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
549 usage, // VkImageUsageFlags usage;
550 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
551 0u, // deUint32 queueFamilyIndexCount;
552 DE_NULL, // const deUint32* pQueueFamilyIndices;
553 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
554 };
555 return createImage(vk, device, &imageParams);
556 }
557
makeColorSubresourceRange(const int baseArrayLayer,const int layerCount)558 inline VkImageSubresourceRange makeColorSubresourceRange (const int baseArrayLayer, const int layerCount)
559 {
560 return makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, static_cast<deUint32>(baseArrayLayer), static_cast<deUint32>(layerCount));
561 }
562
563 //! Get a reference clear value based on color format.
getClearValue(const VkFormat format)564 VkClearValue getClearValue (const VkFormat format)
565 {
566 if (isUintFormat(format) || isIntFormat(format))
567 return makeClearValueColorU32(REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE);
568 else
569 return makeClearValueColorF32(1.0f, 1.0f, 1.0f, 1.0f);
570 }
571
getColorFormatStr(const int numComponents,const bool isUint,const bool isSint)572 std::string getColorFormatStr (const int numComponents, const bool isUint, const bool isSint)
573 {
574 std::ostringstream str;
575 if (numComponents == 1)
576 str << (isUint ? "uint" : isSint ? "int" : "float");
577 else
578 str << (isUint ? "u" : isSint ? "i" : "") << "vec" << numComponents;
579
580 return str.str();
581 }
582
583 //! A half-viewport quad. Use with TRIANGLE_STRIP topology.
genFullQuadVertices(const int subpassCount)584 vector<Vertex4RGBA> genFullQuadVertices (const int subpassCount)
585 {
586 vector<Vertex4RGBA> vectorData;
587 for (int subpassNdx = 0; subpassNdx < subpassCount; ++subpassNdx)
588 {
589 Vertex4RGBA data =
590 {
591 Vec4(0.0f, -1.0f, 0.0f, 1.0f),
592 COLOR_TABLE[subpassNdx % DE_LENGTH_OF_ARRAY(COLOR_TABLE)],
593 };
594 vectorData.push_back(data);
595 data.position = Vec4(0.0f, 1.0f, 0.0f, 1.0f);
596 vectorData.push_back(data);
597 data.position = Vec4(1.0f, -1.0f, 0.0f, 1.0f);
598 vectorData.push_back(data);
599 data.position = Vec4(1.0f, 1.0f, 0.0f, 1.0f);
600 vectorData.push_back(data);
601 }
602 return vectorData;
603 }
604
getImageType(const VkImageViewType viewType)605 VkImageType getImageType (const VkImageViewType viewType)
606 {
607 switch (viewType)
608 {
609 case VK_IMAGE_VIEW_TYPE_1D:
610 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
611 return VK_IMAGE_TYPE_1D;
612
613 case VK_IMAGE_VIEW_TYPE_2D:
614 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
615 case VK_IMAGE_VIEW_TYPE_CUBE:
616 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
617 return VK_IMAGE_TYPE_2D;
618
619 case VK_IMAGE_VIEW_TYPE_3D:
620 return VK_IMAGE_TYPE_3D;
621
622 default:
623 DE_ASSERT(0);
624 return VK_IMAGE_TYPE_LAST;
625 }
626 }
627
628 //! ImageViewType for accessing a single layer/slice of an image
getImageViewSliceType(const VkImageViewType viewType)629 VkImageViewType getImageViewSliceType (const VkImageViewType viewType)
630 {
631 switch (viewType)
632 {
633 case VK_IMAGE_VIEW_TYPE_1D:
634 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
635 return VK_IMAGE_VIEW_TYPE_1D;
636
637 case VK_IMAGE_VIEW_TYPE_2D:
638 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
639 case VK_IMAGE_VIEW_TYPE_CUBE:
640 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
641 case VK_IMAGE_VIEW_TYPE_3D:
642 return VK_IMAGE_VIEW_TYPE_2D;
643
644 default:
645 DE_ASSERT(0);
646 return VK_IMAGE_VIEW_TYPE_LAST;
647 }
648 }
649
getImageCreateFlags(const VkImageViewType viewType)650 VkImageCreateFlags getImageCreateFlags (const VkImageViewType viewType)
651 {
652 VkImageCreateFlags flags = (VkImageCreateFlags)0;
653
654 if (viewType == VK_IMAGE_VIEW_TYPE_3D) flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR;
655 if (isCube(viewType)) flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
656
657 return flags;
658 }
659
generateExpectedImage(const tcu::PixelBufferAccess & outputImage,const IVec2 & renderSize,const int colorDepthOffset)660 void generateExpectedImage (const tcu::PixelBufferAccess& outputImage, const IVec2& renderSize, const int colorDepthOffset)
661 {
662 const tcu::TextureChannelClass channelClass = tcu::getTextureChannelClass(outputImage.getFormat().type);
663 const bool isInt = (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER || channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER);
664 const VkClearValue clearValue = getClearValue(mapTextureFormat(outputImage.getFormat()));
665
666 if (isInt)
667 tcu::clear(outputImage, IVec4(clearValue.color.int32));
668 else
669 tcu::clear(outputImage, Vec4(clearValue.color.float32));
670
671 for (int z = 0; z < outputImage.getDepth(); ++z)
672 {
673 const Vec4& setColor = COLOR_TABLE[(z + colorDepthOffset) % DE_LENGTH_OF_ARRAY(COLOR_TABLE)];
674 const IVec4 setColorInt = (static_cast<float>(REFERENCE_COLOR_VALUE) * setColor).cast<deInt32>();
675
676 for (int y = 0; y < renderSize.y(); ++y)
677 for (int x = renderSize.x()/2; x < renderSize.x(); ++x)
678 {
679 if (isInt)
680 outputImage.setPixel(setColorInt, x, y, z);
681 else
682 outputImage.setPixel(setColor, x, y, z);
683 }
684 }
685 }
686
selectMatchingMemoryType(const VkPhysicalDeviceMemoryProperties & deviceMemProps,deUint32 allowedMemTypeBits,MemoryRequirement requirement)687 deUint32 selectMatchingMemoryType (const VkPhysicalDeviceMemoryProperties& deviceMemProps, deUint32 allowedMemTypeBits, MemoryRequirement requirement)
688 {
689 const deUint32 compatibleTypes = getCompatibleMemoryTypes(deviceMemProps, requirement);
690 const deUint32 candidates = allowedMemTypeBits & compatibleTypes;
691
692 if (candidates == 0)
693 TCU_THROW(NotSupportedError, "No compatible memory type found");
694
695 return (deUint32)deCtz32(candidates);
696 }
697
getMaxImageSize(const VkImageViewType viewType,const IVec4 & sizeHint)698 IVec4 getMaxImageSize (const VkImageViewType viewType, const IVec4& sizeHint)
699 {
700 //Limits have been taken from the vulkan specification
701 IVec4 size = IVec4(
702 sizeHint.x() != MAX_SIZE ? sizeHint.x() : 4096,
703 sizeHint.y() != MAX_SIZE ? sizeHint.y() : 4096,
704 sizeHint.z() != MAX_SIZE ? sizeHint.z() : 256,
705 sizeHint.w() != MAX_SIZE ? sizeHint.w() : 256);
706
707 switch (viewType)
708 {
709 case VK_IMAGE_VIEW_TYPE_1D:
710 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
711 size.x() = deMin32(4096, size.x());
712 break;
713
714 case VK_IMAGE_VIEW_TYPE_2D:
715 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
716 size.x() = deMin32(4096, size.x());
717 size.y() = deMin32(4096, size.y());
718 break;
719
720 case VK_IMAGE_VIEW_TYPE_3D:
721 size.x() = deMin32(256, size.x());
722 size.y() = deMin32(256, size.y());
723 break;
724
725 case VK_IMAGE_VIEW_TYPE_CUBE:
726 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
727 size.x() = deMin32(4096, size.x());
728 size.y() = deMin32(4096, size.y());
729 size.w() = deMin32(252, size.w());
730 size.w() = NUM_CUBE_FACES * (size.w() / NUM_CUBE_FACES); // round down to 6 faces
731 break;
732
733 default:
734 DE_ASSERT(0);
735 return IVec4();
736 }
737
738 return size;
739 }
740
getMemoryTypeNdx(Context & context,const CaseDef & caseDef)741 deUint32 getMemoryTypeNdx (Context& context, const CaseDef& caseDef)
742 {
743 const DeviceInterface& vk = context.getDeviceInterface();
744 const InstanceInterface& vki = context.getInstanceInterface();
745 const VkDevice device = context.getDevice();
746 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
747
748 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physDevice);
749 Move<VkImage> colorImage;
750 VkMemoryRequirements memReqs;
751
752 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
753 const IVec4 imageSize = getMaxImageSize(caseDef.viewType, caseDef.imageSizeHint);
754
755 //create image, don't bind any memory to it
756 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat,
757 imageSize.swizzle(0, 1, 2), 1u, imageSize.w(), imageUsage);
758
759 vk.getImageMemoryRequirements(device, *colorImage, &memReqs);
760 return selectMatchingMemoryType(memoryProperties, memReqs.memoryTypeBits, MemoryRequirement::Any);
761 }
762
getMaxDeviceHeapSize(Context & context,const CaseDef & caseDef)763 VkDeviceSize getMaxDeviceHeapSize (Context& context, const CaseDef& caseDef)
764 {
765 const InstanceInterface& vki = context.getInstanceInterface();
766 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
767 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physDevice);
768 const deUint32 memoryTypeNdx = getMemoryTypeNdx (context, caseDef);
769
770 return memoryProperties.memoryHeaps[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].size;
771 }
772
773 //! Get a smaller image size. Returns a vector of zeroes, if it can't reduce more.
getReducedImageSize(const CaseDef & caseDef,IVec4 size)774 IVec4 getReducedImageSize (const CaseDef& caseDef, IVec4 size)
775 {
776 const int maxIndex = findIndexOfMaxComponent(size);
777 const int reducedSize = size[maxIndex] >> 1;
778
779 switch (caseDef.viewType)
780 {
781 case VK_IMAGE_VIEW_TYPE_CUBE:
782 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
783 if (maxIndex < 2)
784 size.x() = size.y() = reducedSize;
785 else if (maxIndex == 3 && reducedSize >= NUM_CUBE_FACES)
786 size.w() = NUM_CUBE_FACES * (reducedSize / NUM_CUBE_FACES); // round down to a multiple of 6
787 else
788 size = IVec4(0);
789 break;
790
791 default:
792 size[maxIndex] = reducedSize;
793 break;
794 }
795
796 if (reducedSize == 0)
797 size = IVec4(0);
798
799 return size;
800 }
801
isDepthStencilFormatSupported(const InstanceInterface & vki,const VkPhysicalDevice physDevice,const VkFormat format)802 bool isDepthStencilFormatSupported (const InstanceInterface& vki, const VkPhysicalDevice physDevice, const VkFormat format)
803 {
804 const VkFormatProperties properties = getPhysicalDeviceFormatProperties(vki, physDevice, format);
805 return (properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) != 0;
806 }
807
getFormatAspectFlags(const VkFormat format)808 VkImageAspectFlags getFormatAspectFlags (const VkFormat format)
809 {
810 if (format == VK_FORMAT_UNDEFINED)
811 return 0;
812
813 const tcu::TextureFormat::ChannelOrder order = mapVkFormat(format).order;
814
815 switch (order)
816 {
817 case tcu::TextureFormat::DS: return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
818 case tcu::TextureFormat::D: return VK_IMAGE_ASPECT_DEPTH_BIT;
819 case tcu::TextureFormat::S: return VK_IMAGE_ASPECT_STENCIL_BIT;
820 default: return VK_IMAGE_ASPECT_COLOR_BIT;
821 }
822 }
823
initPrograms(SourceCollections & programCollection,const CaseDef caseDef)824 void initPrograms (SourceCollections& programCollection, const CaseDef caseDef)
825 {
826 const int numComponents = getNumUsedChannels(mapVkFormat(caseDef.colorFormat).order);
827 const bool isUint = isUintFormat(caseDef.colorFormat);
828 const bool isSint = isIntFormat(caseDef.colorFormat);
829
830 // Vertex shader
831 {
832 std::ostringstream src;
833 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
834 << "\n"
835 << "layout(location = 0) in vec4 in_position;\n"
836 << "layout(location = 1) in vec4 in_color;\n"
837 << "layout(location = 0) out vec4 out_color;\n"
838 << "\n"
839 << "out gl_PerVertex {\n"
840 << " vec4 gl_Position;\n"
841 << "};\n"
842 << "\n"
843 << "void main(void)\n"
844 << "{\n"
845 << " gl_Position = in_position;\n"
846 << " out_color = in_color;\n"
847 << "}\n";
848
849 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
850 }
851
852 // Fragment shader
853 {
854 std::ostringstream colorValue;
855 colorValue << REFERENCE_COLOR_VALUE;
856 const std::string colorFormat = getColorFormatStr(numComponents, isUint, isSint);
857 const std::string colorInteger = (isUint || isSint ? " * "+colorFormat+"("+colorValue.str()+")" :"");
858
859 std::ostringstream src;
860 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
861 << "\n"
862 << "layout(location = 0) in vec4 in_color;\n"
863 << "layout(location = 0) out " << colorFormat << " o_color;\n"
864 << "\n"
865 << "void main(void)\n"
866 << "{\n"
867 << " o_color = " << colorFormat << "("
868 << (numComponents == 1 ? "in_color.r" :
869 numComponents == 2 ? "in_color.rg" :
870 numComponents == 3 ? "in_color.rgb" : "in_color")
871 << colorInteger
872 << ");\n"
873 << "}\n";
874
875 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
876 }
877 }
878
879 //! See testAttachmentSize() description
testWithSizeReduction(Context & context,const CaseDef & caseDef)880 tcu::TestStatus testWithSizeReduction (Context& context, const CaseDef& caseDef)
881 {
882 const DeviceInterface& vk = context.getDeviceInterface();
883 const InstanceInterface& vki = context.getInstanceInterface();
884 const VkDevice device = context.getDevice();
885 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
886 const VkQueue queue = context.getUniversalQueue();
887 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
888 Allocator& allocator = context.getDefaultAllocator();
889
890 // The memory might be too small to allocate a largest possible attachment, so try to account for that.
891 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED);
892
893 IVec4 imageSize = getMaxImageSize(caseDef.viewType, caseDef.imageSizeHint);
894 VkDeviceSize colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
895 VkDeviceSize depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull);
896
897 const VkDeviceSize reserveForChecking = 500ull * 1024ull; //left 512KB
898 const float additionalMemory = 1.15f; //left some free memory on device (15%)
899 VkDeviceSize neededMemory = static_cast<VkDeviceSize>(static_cast<float>(colorSize + depthStencilSize) * additionalMemory) + reserveForChecking;
900 VkDeviceSize maxMemory = getMaxDeviceHeapSize(context, caseDef) >> 2;
901
902 vk::PlatformMemoryLimits memoryLimits;
903 context.getTestContext().getPlatform().getVulkanPlatform().getMemoryLimits(memoryLimits);
904 maxMemory = std::min(maxMemory, VkDeviceSize(memoryLimits.totalSystemMemory));
905
906 const VkDeviceSize deviceMemoryBudget = std::min(neededMemory, maxMemory);
907 bool allocationPossible = false;
908
909 // Keep reducing the size, if image size is too big
910 while (neededMemory > deviceMemoryBudget)
911 {
912 imageSize = getReducedImageSize(caseDef, imageSize);
913
914 if (imageSize == IVec4())
915 return tcu::TestStatus::fail("Couldn't create an image with required size");
916
917 colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
918 depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull);
919 neededMemory = static_cast<VkDeviceSize>(static_cast<double>(colorSize + depthStencilSize) * additionalMemory);
920 }
921
922 // Keep reducing the size, if allocation return out of any memory
923 while (!allocationPossible)
924 {
925 VkDeviceMemory object = 0;
926 const VkMemoryAllocateInfo allocateInfo =
927 {
928 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, //VkStructureType sType;
929 DE_NULL, //const void* pNext;
930 neededMemory, //VkDeviceSize allocationSize;
931 getMemoryTypeNdx(context, caseDef) //deUint32 memoryTypeIndex;
932 };
933
934 const VkResult result = vk.allocateMemory(device, &allocateInfo, DE_NULL, &object);
935
936 if (VK_ERROR_OUT_OF_DEVICE_MEMORY == result || VK_ERROR_OUT_OF_HOST_MEMORY == result)
937 {
938 imageSize = getReducedImageSize(caseDef, imageSize);
939
940 if (imageSize == IVec4())
941 return tcu::TestStatus::fail("Couldn't create an image with required size");
942
943 colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
944 depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull);
945 neededMemory = static_cast<VkDeviceSize>(static_cast<double>(colorSize + depthStencilSize) * additionalMemory) + reserveForChecking;
946 }
947 else if (VK_SUCCESS != result)
948 {
949 return tcu::TestStatus::fail("Couldn't allocate memory");
950 }
951 else
952 {
953 //free memory using Move pointer
954 Move<VkDeviceMemory> memoryAllocated (check<VkDeviceMemory>(object), Deleter<VkDeviceMemory>(vk, device, DE_NULL));
955 allocationPossible = true;
956 }
957 }
958
959 context.getTestContext().getLog()
960 << tcu::TestLog::Message << "Using an image with size (width, height, depth, layers) = " << imageSize << tcu::TestLog::EndMessage;
961
962 // "Slices" is either the depth of a 3D image, or the number of layers of an arrayed image
963 const deInt32 numSlices = maxLayersOrDepth(imageSize);
964
965 // Determine the verification bounds. The checked region will be in the center of the rendered image
966 const IVec4 checkSize = tcu::min(imageSize, IVec4(MAX_VERIFICATION_REGION_SIZE,
967 MAX_VERIFICATION_REGION_SIZE,
968 MAX_VERIFICATION_REGION_DEPTH,
969 MAX_VERIFICATION_REGION_DEPTH));
970 const IVec4 checkOffset = (imageSize - checkSize) / 2;
971
972 // Only make enough space for the check region
973 const VkDeviceSize colorBufferSize = product(checkSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
974 const Unique<VkBuffer> colorBuffer (makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
975 const UniquePtr<Allocation> colorBufferAlloc (bindBuffer(vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind));
976
977 {
978 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize));
979 flushAlloc(vk, device, *colorBufferAlloc);
980 }
981
982 const Unique<VkShaderModule> vertexModule (createShaderModule (vk, device, context.getBinaryCollection().get("vert"), 0u));
983 const Unique<VkShaderModule> fragmentModule (createShaderModule (vk, device, context.getBinaryCollection().get("frag"), 0u));
984 const Unique<VkRenderPass> renderPass (makeRenderPass (vk, device, caseDef.colorFormat, caseDef.depthStencilFormat, static_cast<deUint32>(numSlices),
985 (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D) ? VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
986 : VK_IMAGE_LAYOUT_UNDEFINED));
987 const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout (vk, device));
988 vector<SharedPtrVkPipeline> pipelines;
989
990 Move<VkImage> colorImage;
991 MovePtr<Allocation> colorImageAlloc;
992 vector<SharedPtrVkImageView> colorAttachments;
993 Move<VkImage> depthStencilImage;
994 MovePtr<Allocation> depthStencilImageAlloc;
995 vector<SharedPtrVkImageView> depthStencilAttachments;
996 vector<VkImageView> attachmentHandles; // all attachments (color and d/s)
997 Move<VkBuffer> vertexBuffer;
998 MovePtr<Allocation> vertexBufferAlloc;
999 Move<VkFramebuffer> framebuffer;
1000
1001 // Create a color image
1002 {
1003 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1004
1005 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat,
1006 imageSize.swizzle(0, 1, 2), 1u, imageSize.w(), imageUsage);
1007 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1008 }
1009
1010 // Create a depth/stencil image (always a 2D image, optionally layered)
1011 if (useDepthStencil)
1012 {
1013 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
1014
1015 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat,
1016 IVec3(imageSize.x(), imageSize.y(), 1), 1u, numSlices, imageUsage);
1017 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1018 }
1019
1020 // Create a vertex buffer
1021 {
1022 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices);
1023 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices);
1024
1025 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
1026 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind);
1027
1028 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize));
1029 flushAlloc(vk, device, *vertexBufferAlloc);
1030 }
1031
1032 // Prepare color image upfront for rendering to individual slices. 3D slices aren't separate subresources, so they shouldn't be transitioned
1033 // during each subpass like array layers.
1034 if (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D)
1035 {
1036 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1037 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1038
1039 beginCommandBuffer(vk, *cmdBuffer);
1040
1041 const VkImageMemoryBarrier imageBarrier =
1042 {
1043 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1044 DE_NULL, // const void* pNext;
1045 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1046 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1047 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1048 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1049 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1050 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1051 *colorImage, // VkImage image;
1052 { // VkImageSubresourceRange subresourceRange;
1053 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1054 0u, // uint32_t baseMipLevel;
1055 1u, // uint32_t levelCount;
1056 0u, // uint32_t baseArrayLayer;
1057 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
1058 }
1059 };
1060
1061 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0u,
1062 0u, DE_NULL, 0u, DE_NULL, 1u, &imageBarrier);
1063
1064 endCommandBuffer(vk, *cmdBuffer);
1065 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1066 }
1067
1068 // For each image layer or slice (3D), create an attachment and a pipeline
1069 {
1070 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat);
1071 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0;
1072 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
1073 VkPipeline basePipeline = DE_NULL;
1074
1075 // Color attachments are first in the framebuffer
1076 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1077 {
1078 colorAttachments.push_back(makeSharedPtr(
1079 makeImageView(vk, device, *colorImage, getImageViewSliceType(caseDef.viewType), caseDef.colorFormat, makeColorSubresourceRange(subpassNdx, 1))));
1080 attachmentHandles.push_back(**colorAttachments.back());
1081
1082 // We also have to create pipelines for each subpass
1083 pipelines.push_back(makeSharedPtr(makeGraphicsPipeline(
1084 vk, device, basePipeline, *pipelineLayout, *renderPass, *vertexModule, *fragmentModule, imageSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
1085 static_cast<deUint32>(subpassNdx), useDepth, useStencil)));
1086
1087 basePipeline = **pipelines.front();
1088 }
1089
1090 // Then D/S attachments, if any
1091 if (useDepthStencil)
1092 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1093 {
1094 depthStencilAttachments.push_back(makeSharedPtr(
1095 makeImageView(vk, device, *depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat, makeImageSubresourceRange(depthStencilAspect, 0u, 1u, subpassNdx, 1u))));
1096 attachmentHandles.push_back(**depthStencilAttachments.back());
1097 }
1098 }
1099
1100 framebuffer = makeFramebuffer(vk, device, *renderPass, static_cast<deUint32>(attachmentHandles.size()), &attachmentHandles[0], static_cast<deUint32>(imageSize.x()), static_cast<deUint32>(imageSize.y()));
1101
1102 {
1103 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1104 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1105
1106 beginCommandBuffer(vk, *cmdBuffer);
1107 {
1108 vector<VkClearValue> clearValues (numSlices, getClearValue(caseDef.colorFormat));
1109
1110 if (useDepthStencil)
1111 clearValues.insert(clearValues.end(), numSlices, makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE));
1112
1113 const VkDeviceSize vertexBufferOffset = 0ull;
1114
1115 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(0, 0, imageSize.x(), imageSize.y()), (deUint32)clearValues.size(), &clearValues[0]);
1116 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
1117 }
1118
1119 // Draw
1120 for (deUint32 subpassNdx = 0; subpassNdx < static_cast<deUint32>(numSlices); ++subpassNdx)
1121 {
1122 if (subpassNdx != 0)
1123 vk.cmdNextSubpass(*cmdBuffer, VK_SUBPASS_CONTENTS_INLINE);
1124
1125 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, **pipelines[subpassNdx]);
1126 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx*4u, 0u);
1127 }
1128
1129 endRenderPass(vk, *cmdBuffer);
1130
1131 // Copy colorImage -> host visible colorBuffer
1132 {
1133 const VkImageMemoryBarrier imageBarriers[] =
1134 {
1135 {
1136 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1137 DE_NULL, // const void* pNext;
1138 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags outputMask;
1139 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags inputMask;
1140 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1141 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1142 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1143 VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
1144 *colorImage, // VkImage image;
1145 makeColorSubresourceRange(0, imageSize.w()) // VkImageSubresourceRange subresourceRange;
1146 }
1147 };
1148
1149 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
1150 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers);
1151
1152 // Copy the checked region rather than the whole image
1153 const VkImageSubresourceLayers subresource =
1154 {
1155 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1156 0u, // uint32_t mipLevel;
1157 static_cast<deUint32>(checkOffset.w()), // uint32_t baseArrayLayer;
1158 static_cast<deUint32>(checkSize.w()), // uint32_t layerCount;
1159 };
1160
1161 const VkBufferImageCopy region =
1162 {
1163 0ull, // VkDeviceSize bufferOffset;
1164 0u, // uint32_t bufferRowLength;
1165 0u, // uint32_t bufferImageHeight;
1166 subresource, // VkImageSubresourceLayers imageSubresource;
1167 makeOffset3D(checkOffset.x(), checkOffset.y(), checkOffset.z()), // VkOffset3D imageOffset;
1168 makeExtent3D(checkSize.swizzle(0, 1, 2)), // VkExtent3D imageExtent;
1169 };
1170
1171 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ®ion);
1172
1173 const VkBufferMemoryBarrier bufferBarriers[] =
1174 {
1175 {
1176 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1177 DE_NULL, // const void* pNext;
1178 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1179 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1180 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1181 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1182 *colorBuffer, // VkBuffer buffer;
1183 0ull, // VkDeviceSize offset;
1184 VK_WHOLE_SIZE, // VkDeviceSize size;
1185 },
1186 };
1187
1188 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
1189 0u, DE_NULL, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, DE_NULL);
1190 }
1191
1192 endCommandBuffer(vk, *cmdBuffer);
1193 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1194 }
1195
1196 // Verify results
1197 {
1198 invalidateAlloc(vk, device, *colorBufferAlloc);
1199
1200 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat);
1201 const int checkDepth = maxLayersOrDepth(checkSize);
1202 const int depthOffset = maxLayersOrDepth(checkOffset);
1203 const tcu::ConstPixelBufferAccess resultImage (format, checkSize.x(), checkSize.y(), checkDepth, colorBufferAlloc->getHostPtr());
1204 tcu::TextureLevel textureLevel (format, checkSize.x(), checkSize.y(), checkDepth);
1205 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess();
1206 bool ok = false;
1207
1208 generateExpectedImage(expectedImage, checkSize.swizzle(0, 1), depthOffset);
1209
1210 if (isFloatFormat(caseDef.colorFormat))
1211 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage, resultImage, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
1212 else
1213 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage, resultImage, tcu::UVec4(2), tcu::COMPARE_LOG_RESULT);
1214
1215 return ok ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail");
1216 }
1217 }
1218
checkImageViewTypeRequirements(Context & context,const VkImageViewType viewType)1219 void checkImageViewTypeRequirements (Context& context, const VkImageViewType viewType)
1220 {
1221 if (viewType == VK_IMAGE_VIEW_TYPE_3D)
1222 {
1223 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
1224 !context.getPortabilitySubsetFeatures().imageView2DOn3DImage)
1225 {
1226 TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: Implementation does not support 2D or 2D array image view to be created on a 3D VkImage");
1227 }
1228
1229 context.requireDeviceFunctionality("VK_KHR_maintenance1");
1230 }
1231
1232 if (viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1233 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
1234 }
1235
checkSupportAttachmentSize(Context & context,const CaseDef caseDef)1236 void checkSupportAttachmentSize (Context& context, const CaseDef caseDef)
1237 {
1238 checkImageViewTypeRequirements(context, caseDef.viewType);
1239
1240 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED)
1241 context.requireDeviceFunctionality("VK_KHR_dedicated_allocation");
1242
1243 if (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED && !isDepthStencilFormatSupported(context.getInstanceInterface(), context.getPhysicalDevice(), caseDef.depthStencilFormat))
1244 TCU_THROW(NotSupportedError, "Unsupported depth/stencil format");
1245 }
1246
1247 //! A test that can exercise very big color and depth/stencil attachment sizes.
1248 //! If the total memory consumed by images is too large, or if the implementation returns OUT_OF_MEMORY error somewhere,
1249 //! the test can be retried with a next increment of size reduction index, making the attachments smaller.
testAttachmentSize(Context & context,const CaseDef caseDef)1250 tcu::TestStatus testAttachmentSize (Context& context, const CaseDef caseDef)
1251 {
1252 return testWithSizeReduction(context, caseDef);
1253 // Never reached
1254 }
1255
getMipLevelSizes(IVec4 baseSize)1256 vector<IVec4> getMipLevelSizes (IVec4 baseSize)
1257 {
1258 vector<IVec4> levels;
1259 levels.push_back(baseSize);
1260
1261 while (baseSize.x() != 1 || baseSize.y() != 1 || baseSize.z() != 1)
1262 {
1263 baseSize.x() = deMax32(baseSize.x() >> 1, 1);
1264 baseSize.y() = deMax32(baseSize.y() >> 1, 1);
1265 baseSize.z() = deMax32(baseSize.z() >> 1, 1);
1266 levels.push_back(baseSize);
1267 }
1268
1269 return levels;
1270 }
1271
1272 //! Compute memory consumed by each mip level, including all layers. Sizes include a padding for alignment.
getPerMipLevelStorageSize(const vector<IVec4> & mipLevelSizes,const VkDeviceSize pixelSize)1273 vector<VkDeviceSize> getPerMipLevelStorageSize (const vector<IVec4>& mipLevelSizes, const VkDeviceSize pixelSize)
1274 {
1275 const deInt64 levelAlignment = 16;
1276 vector<VkDeviceSize> storageSizes;
1277
1278 for (vector<IVec4>::const_iterator it = mipLevelSizes.begin(); it != mipLevelSizes.end(); ++it)
1279 storageSizes.push_back(deAlign64(pixelSize * product(*it), levelAlignment));
1280
1281 return storageSizes;
1282 }
1283
drawToMipLevel(const Context & context,const CaseDef & caseDef,const int mipLevel,const IVec4 & mipSize,const int numSlices,const VkImage colorImage,const VkImage depthStencilImage,const VkBuffer vertexBuffer,const VkPipelineLayout pipelineLayout,const VkShaderModule vertexModule,const VkShaderModule fragmentModule)1284 void drawToMipLevel (const Context& context,
1285 const CaseDef& caseDef,
1286 const int mipLevel,
1287 const IVec4& mipSize,
1288 const int numSlices,
1289 const VkImage colorImage,
1290 const VkImage depthStencilImage,
1291 const VkBuffer vertexBuffer,
1292 const VkPipelineLayout pipelineLayout,
1293 const VkShaderModule vertexModule,
1294 const VkShaderModule fragmentModule)
1295 {
1296 const DeviceInterface& vk = context.getDeviceInterface();
1297 const VkDevice device = context.getDevice();
1298 const VkQueue queue = context.getUniversalQueue();
1299 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1300 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat);
1301 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0;
1302 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
1303 const Unique<VkRenderPass> renderPass (makeRenderPass(vk, device, caseDef.colorFormat, caseDef.depthStencilFormat, static_cast<deUint32>(numSlices),
1304 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1305 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL));
1306 vector<SharedPtrVkPipeline> pipelines;
1307 vector<SharedPtrVkImageView> colorAttachments;
1308 vector<SharedPtrVkImageView> depthStencilAttachments;
1309 vector<VkImageView> attachmentHandles; // all attachments (color and d/s)
1310
1311 // For each image layer or slice (3D), create an attachment and a pipeline
1312 {
1313 VkPipeline basePipeline = DE_NULL;
1314
1315 // Color attachments are first in the framebuffer
1316 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1317 {
1318 colorAttachments.push_back(makeSharedPtr(makeImageView(
1319 vk, device, colorImage, getImageViewSliceType(caseDef.viewType), caseDef.colorFormat,
1320 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 1u, subpassNdx, 1u))));
1321 attachmentHandles.push_back(**colorAttachments.back());
1322
1323 // We also have to create pipelines for each subpass
1324 pipelines.push_back(makeSharedPtr(makeGraphicsPipeline(
1325 vk, device, basePipeline, pipelineLayout, *renderPass, vertexModule, fragmentModule, mipSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
1326 static_cast<deUint32>(subpassNdx), useDepth, useStencil)));
1327
1328 basePipeline = **pipelines.front();
1329 }
1330
1331 // Then D/S attachments, if any
1332 if (useDepth || useStencil)
1333 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1334 {
1335 depthStencilAttachments.push_back(makeSharedPtr(makeImageView(
1336 vk, device, depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat,
1337 makeImageSubresourceRange(depthStencilAspect, mipLevel, 1u, subpassNdx, 1u))));
1338 attachmentHandles.push_back(**depthStencilAttachments.back());
1339 }
1340 }
1341
1342 const Unique<VkFramebuffer> framebuffer (makeFramebuffer(vk, device, *renderPass, static_cast<deUint32>(attachmentHandles.size()), &attachmentHandles[0],
1343 static_cast<deUint32>(mipSize.x()), static_cast<deUint32>(mipSize.y())));
1344
1345 {
1346 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1347 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1348
1349 beginCommandBuffer(vk, *cmdBuffer);
1350 {
1351 vector<VkClearValue> clearValues (numSlices, getClearValue(caseDef.colorFormat));
1352
1353 if (useDepth || useStencil)
1354 clearValues.insert(clearValues.end(), numSlices, makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE));
1355
1356 const VkDeviceSize vertexBufferOffset = 0ull;
1357
1358 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(0, 0, mipSize.x(), mipSize.y()), (deUint32)clearValues.size(), &clearValues[0]);
1359 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
1360 }
1361
1362 // Draw
1363 for (deUint32 subpassNdx = 0; subpassNdx < static_cast<deUint32>(numSlices); ++subpassNdx)
1364 {
1365 if (subpassNdx != 0)
1366 vk.cmdNextSubpass(*cmdBuffer, VK_SUBPASS_CONTENTS_INLINE);
1367
1368 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, **pipelines[subpassNdx]);
1369 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx*4u, 0u);
1370 }
1371
1372 endRenderPass(vk, *cmdBuffer);
1373
1374 endCommandBuffer(vk, *cmdBuffer);
1375 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1376 }
1377 }
1378
checkSupportRenderToMipMaps(Context & context,const CaseDef caseDef)1379 void checkSupportRenderToMipMaps (Context& context, const CaseDef caseDef)
1380 {
1381 checkImageViewTypeRequirements(context, caseDef.viewType);
1382
1383 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED)
1384 context.requireDeviceFunctionality("VK_KHR_dedicated_allocation");
1385
1386 if (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED && !isDepthStencilFormatSupported(context.getInstanceInterface(), context.getPhysicalDevice(), caseDef.depthStencilFormat))
1387 TCU_THROW(NotSupportedError, "Unsupported depth/stencil format");
1388 }
1389
1390 //! Use image mip levels as attachments
testRenderToMipMaps(Context & context,const CaseDef caseDef)1391 tcu::TestStatus testRenderToMipMaps (Context& context, const CaseDef caseDef)
1392 {
1393 const DeviceInterface& vk = context.getDeviceInterface();
1394 const InstanceInterface& vki = context.getInstanceInterface();
1395 const VkDevice device = context.getDevice();
1396 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
1397 const VkQueue queue = context.getUniversalQueue();
1398 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1399 Allocator& allocator = context.getDefaultAllocator();
1400
1401 const IVec4 imageSize = caseDef.imageSizeHint; // MAX_SIZE is not used in this test
1402 const deInt32 numSlices = maxLayersOrDepth(imageSize);
1403 const vector<IVec4> mipLevelSizes = getMipLevelSizes(imageSize);
1404 const vector<VkDeviceSize> mipLevelStorageSizes = getPerMipLevelStorageSize(mipLevelSizes, tcu::getPixelSize(mapVkFormat(caseDef.colorFormat)));
1405 const int numMipLevels = static_cast<int>(mipLevelSizes.size());
1406 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED);
1407
1408 // Create a color buffer big enough to hold all layers and mip levels
1409 const VkDeviceSize colorBufferSize = sum(mipLevelStorageSizes);
1410 const Unique<VkBuffer> colorBuffer (makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
1411 const UniquePtr<Allocation> colorBufferAlloc (bindBuffer(vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind));
1412
1413 {
1414 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize));
1415 flushAlloc(vk, device, *colorBufferAlloc);
1416 }
1417
1418 const Unique<VkShaderModule> vertexModule (createShaderModule (vk, device, context.getBinaryCollection().get("vert"), 0u));
1419 const Unique<VkShaderModule> fragmentModule (createShaderModule (vk, device, context.getBinaryCollection().get("frag"), 0u));
1420 const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout (vk, device));
1421
1422 Move<VkImage> colorImage;
1423 MovePtr<Allocation> colorImageAlloc;
1424 Move<VkImage> depthStencilImage;
1425 MovePtr<Allocation> depthStencilImageAlloc;
1426 Move<VkBuffer> vertexBuffer;
1427 MovePtr<Allocation> vertexBufferAlloc;
1428
1429 // Create a color image
1430 {
1431 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1432
1433 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat,
1434 imageSize.swizzle(0, 1, 2), numMipLevels, imageSize.w(), imageUsage);
1435 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1436 }
1437
1438 // Create a depth/stencil image (always a 2D image, optionally layered)
1439 if (useDepthStencil)
1440 {
1441 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
1442
1443 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat,
1444 IVec3(imageSize.x(), imageSize.y(), 1), numMipLevels, numSlices, imageUsage);
1445 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1446 }
1447
1448 // Create a vertex buffer
1449 {
1450 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices);
1451 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices);
1452
1453 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
1454 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind);
1455
1456 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize));
1457 flushAlloc(vk, device, *vertexBufferAlloc);
1458 }
1459
1460 // Prepare images
1461 {
1462 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1463 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1464
1465 beginCommandBuffer(vk, *cmdBuffer);
1466
1467 const VkImageMemoryBarrier imageBarriers[] =
1468 {
1469 {
1470 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1471 DE_NULL, // const void* pNext;
1472 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1473 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1474 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1475 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1476 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1477 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1478 *colorImage, // VkImage image;
1479 { // VkImageSubresourceRange subresourceRange;
1480 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1481 0u, // uint32_t baseMipLevel;
1482 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1483 0u, // uint32_t baseArrayLayer;
1484 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
1485 },
1486 },
1487 {
1488 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1489 DE_NULL, // const void* pNext;
1490 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1491 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1492 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1493 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1494 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1495 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1496 *depthStencilImage, // VkImage image;
1497 { // VkImageSubresourceRange subresourceRange;
1498 getFormatAspectFlags(caseDef.depthStencilFormat), // VkImageAspectFlags aspectMask;
1499 0u, // uint32_t baseMipLevel;
1500 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1501 0u, // uint32_t baseArrayLayer;
1502 static_cast<deUint32>(numSlices), // uint32_t layerCount;
1503 },
1504 }
1505 };
1506
1507 const deUint32 numImageBarriers = static_cast<deUint32>(DE_LENGTH_OF_ARRAY(imageBarriers) - (useDepthStencil ? 0 : 1));
1508
1509 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, 0u,
1510 0u, DE_NULL, 0u, DE_NULL, numImageBarriers, imageBarriers);
1511
1512 endCommandBuffer(vk, *cmdBuffer);
1513 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1514 }
1515
1516 // Draw
1517 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1518 {
1519 const IVec4& mipSize = mipLevelSizes[mipLevel];
1520 const int levelSlices = maxLayersOrDepth(mipSize);
1521
1522 drawToMipLevel (context, caseDef, mipLevel, mipSize, levelSlices, *colorImage, *depthStencilImage, *vertexBuffer, *pipelineLayout,
1523 *vertexModule, *fragmentModule);
1524 }
1525
1526 // Copy results: colorImage -> host visible colorBuffer
1527 {
1528 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1529 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1530
1531 beginCommandBuffer(vk, *cmdBuffer);
1532
1533 {
1534 const VkImageMemoryBarrier imageBarriers[] =
1535 {
1536 {
1537 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1538 DE_NULL, // const void* pNext;
1539 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
1540 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1541 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1542 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1543 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1544 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1545 *colorImage, // VkImage image;
1546 { // VkImageSubresourceRange subresourceRange;
1547 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1548 0u, // uint32_t baseMipLevel;
1549 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1550 0u, // uint32_t baseArrayLayer;
1551 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
1552 },
1553 }
1554 };
1555
1556 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
1557 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers);
1558 }
1559 {
1560 vector<VkBufferImageCopy> regions;
1561 VkDeviceSize levelOffset = 0ull;
1562 VkBufferImageCopy workRegion =
1563 {
1564 0ull, // VkDeviceSize bufferOffset;
1565 0u, // uint32_t bufferRowLength;
1566 0u, // uint32_t bufferImageHeight;
1567 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, imageSize.w()), // VkImageSubresourceLayers imageSubresource;
1568 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
1569 makeExtent3D(0, 0, 0), // VkExtent3D imageExtent;
1570 };
1571
1572 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1573 {
1574 workRegion.bufferOffset = levelOffset;
1575 workRegion.imageSubresource.mipLevel = static_cast<deUint32>(mipLevel);
1576 workRegion.imageExtent = makeExtent3D(mipLevelSizes[mipLevel].swizzle(0, 1, 2));
1577
1578 regions.push_back(workRegion);
1579
1580 levelOffset += mipLevelStorageSizes[mipLevel];
1581 }
1582
1583 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, static_cast<deUint32>(regions.size()), ®ions[0]);
1584 }
1585 {
1586 const VkBufferMemoryBarrier bufferBarriers[] =
1587 {
1588 {
1589 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1590 DE_NULL, // const void* pNext;
1591 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1592 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1593 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1594 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1595 *colorBuffer, // VkBuffer buffer;
1596 0ull, // VkDeviceSize offset;
1597 VK_WHOLE_SIZE, // VkDeviceSize size;
1598 },
1599 };
1600
1601 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
1602 0u, DE_NULL, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, DE_NULL);
1603 }
1604
1605 endCommandBuffer(vk, *cmdBuffer);
1606 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1607 }
1608
1609 // Verify results (per mip level)
1610 {
1611 invalidateAlloc(vk, device, *colorBufferAlloc);
1612
1613 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat);
1614
1615 VkDeviceSize levelOffset = 0ull;
1616 bool allOk = true;
1617
1618 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1619 {
1620 const IVec4& mipSize = mipLevelSizes[mipLevel];
1621 const void* const pLevelData = static_cast<const deUint8*>(colorBufferAlloc->getHostPtr()) + levelOffset;
1622 const int levelDepth = maxLayersOrDepth(mipSize);
1623 const tcu::ConstPixelBufferAccess resultImage (format, mipSize.x(), mipSize.y(), levelDepth, pLevelData);
1624 tcu::TextureLevel textureLevel (format, mipSize.x(), mipSize.y(), levelDepth);
1625 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess();
1626 const std::string comparisonName = "Mip level " + de::toString(mipLevel);
1627 bool ok = false;
1628
1629 generateExpectedImage(expectedImage, mipSize.swizzle(0, 1), 0);
1630
1631 if (isFloatFormat(caseDef.colorFormat))
1632 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison", comparisonName.c_str(), expectedImage, resultImage, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
1633 else
1634 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison", comparisonName.c_str(), expectedImage, resultImage, tcu::UVec4(2), tcu::COMPARE_LOG_RESULT);
1635
1636 allOk = allOk && ok; // keep testing all levels, even if we know it's a fail overall
1637 levelOffset += mipLevelStorageSizes[mipLevel];
1638 }
1639
1640 return allOk ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail");
1641 }
1642 }
1643
getSizeDescription(const IVec4 & size)1644 std::string getSizeDescription (const IVec4& size)
1645 {
1646 std::ostringstream str;
1647
1648 const char* const description[4] =
1649 {
1650 "width", "height", "depth", "layers"
1651 };
1652
1653 int numMaxComponents = 0;
1654
1655 for (int i = 0; i < 4; ++i)
1656 {
1657 if (size[i] == MAX_SIZE)
1658 {
1659 if (numMaxComponents > 0)
1660 str << "_";
1661
1662 str << description[i];
1663 ++numMaxComponents;
1664 }
1665 }
1666
1667 if (numMaxComponents == 0)
1668 str << "small";
1669
1670 return str.str();
1671 }
1672
getFormatString(const VkFormat format)1673 inline std::string getFormatString (const VkFormat format)
1674 {
1675 std::string name(getFormatName(format));
1676 return de::toLower(name.substr(10));
1677 }
1678
getFormatString(const VkFormat colorFormat,const VkFormat depthStencilFormat)1679 std::string getFormatString (const VkFormat colorFormat, const VkFormat depthStencilFormat)
1680 {
1681 std::ostringstream str;
1682 str << getFormatString(colorFormat);
1683 if (depthStencilFormat != VK_FORMAT_UNDEFINED)
1684 str << "_" << getFormatString(depthStencilFormat);
1685 return str.str();
1686 }
1687
getShortImageViewTypeName(const VkImageViewType imageViewType)1688 std::string getShortImageViewTypeName (const VkImageViewType imageViewType)
1689 {
1690 std::string s(getImageViewTypeName(imageViewType));
1691 return de::toLower(s.substr(19));
1692 }
1693
bvecFromMask(deUint32 mask)1694 inline BVec4 bvecFromMask (deUint32 mask)
1695 {
1696 return BVec4((mask >> 0) & 1,
1697 (mask >> 1) & 1,
1698 (mask >> 2) & 1,
1699 (mask >> 3) & 1);
1700 }
1701
genSizeCombinations(const IVec4 & baselineSize,const deUint32 sizeMask,const VkImageViewType imageViewType)1702 vector<IVec4> genSizeCombinations (const IVec4& baselineSize, const deUint32 sizeMask, const VkImageViewType imageViewType)
1703 {
1704 vector<IVec4> sizes;
1705 std::set<deUint32> masks;
1706
1707 for (deUint32 i = 0; i < (1u << 4); ++i)
1708 {
1709 // Cube images have square faces
1710 if (isCube(imageViewType) && ((i & MASK_WH) != 0))
1711 i |= MASK_WH;
1712
1713 masks.insert(i & sizeMask);
1714 }
1715
1716 for (std::set<deUint32>::const_iterator it = masks.begin(); it != masks.end(); ++it)
1717 sizes.push_back(tcu::select(IVec4(MAX_SIZE), baselineSize, bvecFromMask(*it)));
1718
1719 return sizes;
1720 }
1721
addTestCasesWithFunctions(tcu::TestCaseGroup * group,AllocationKind allocationKind)1722 void addTestCasesWithFunctions (tcu::TestCaseGroup* group, AllocationKind allocationKind)
1723 {
1724 const struct
1725 {
1726 VkImageViewType viewType;
1727 IVec4 baselineSize; //!< image size: (dimX, dimY, dimZ, arraySize)
1728 deUint32 sizeMask; //!< if a dimension is masked, generate a huge size case for it
1729 } testCase[] =
1730 {
1731 { VK_IMAGE_VIEW_TYPE_1D, IVec4(54, 1, 1, 1), MASK_W },
1732 { VK_IMAGE_VIEW_TYPE_1D_ARRAY, IVec4(54, 1, 1, 4), MASK_W_LAYERS },
1733 { VK_IMAGE_VIEW_TYPE_2D, IVec4(44, 23, 1, 1), MASK_WH },
1734 { VK_IMAGE_VIEW_TYPE_2D_ARRAY, IVec4(44, 23, 1, 4), MASK_WH_LAYERS },
1735 { VK_IMAGE_VIEW_TYPE_3D, IVec4(22, 31, 7, 1), MASK_WHD },
1736 { VK_IMAGE_VIEW_TYPE_CUBE, IVec4(35, 35, 1, 6), MASK_WH },
1737 { VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, IVec4(35, 35, 1, 2*6), MASK_WH_LAYERS },
1738 };
1739
1740 const VkFormat format[] =
1741 {
1742 VK_FORMAT_R8G8B8A8_UNORM,
1743 VK_FORMAT_R32_UINT,
1744 VK_FORMAT_R16G16_SINT,
1745 VK_FORMAT_R32G32B32A32_SFLOAT,
1746 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
1747 VK_FORMAT_R5G6B5_UNORM_PACK16,
1748 VK_FORMAT_A2B10G10R10_UINT_PACK32,
1749 VK_FORMAT_A2B10G10R10_UNORM_PACK32
1750 };
1751
1752 const VkFormat depthStencilFormat[] =
1753 {
1754 VK_FORMAT_UNDEFINED, // don't use a depth/stencil attachment
1755 VK_FORMAT_D16_UNORM,
1756 VK_FORMAT_S8_UINT,
1757 VK_FORMAT_D24_UNORM_S8_UINT, // one of the following mixed formats must be supported
1758 VK_FORMAT_D32_SFLOAT_S8_UINT,
1759 };
1760
1761 for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(testCase); ++caseNdx)
1762 {
1763 MovePtr<tcu::TestCaseGroup> imageGroup(new tcu::TestCaseGroup(group->getTestContext(), getShortImageViewTypeName(testCase[caseNdx].viewType).c_str(), ""));
1764
1765 // Generate attachment size cases
1766 {
1767 const vector<IVec4> sizes = genSizeCombinations(testCase[caseNdx].baselineSize, testCase[caseNdx].sizeMask, testCase[caseNdx].viewType);
1768
1769 MovePtr<tcu::TestCaseGroup> smallGroup(new tcu::TestCaseGroup(group->getTestContext(), "small", ""));
1770 MovePtr<tcu::TestCaseGroup> hugeGroup (new tcu::TestCaseGroup(group->getTestContext(), "huge", ""));
1771
1772 imageGroup->addChild(smallGroup.get());
1773 imageGroup->addChild(hugeGroup.get());
1774
1775 for (vector<IVec4>::const_iterator sizeIter = sizes.begin(); sizeIter != sizes.end(); ++sizeIter)
1776 {
1777 // The first size is the baseline size, put it in a dedicated group
1778 if (sizeIter == sizes.begin())
1779 {
1780 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1781 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx)
1782 {
1783 const CaseDef caseDef =
1784 {
1785 testCase[caseNdx].viewType, // VkImageViewType imageType;
1786 *sizeIter, // IVec4 imageSizeHint;
1787 format[formatNdx], // VkFormat colorFormat;
1788 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1789 allocationKind // AllocationKind allocationKind;
1790 };
1791 addFunctionCaseWithPrograms(smallGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]), "", checkSupportAttachmentSize, initPrograms, testAttachmentSize, caseDef);
1792 }
1793 }
1794 else // All huge cases go into a separate group
1795 {
1796 if (allocationKind != ALLOCATION_KIND_DEDICATED)
1797 {
1798 MovePtr<tcu::TestCaseGroup> sizeGroup (new tcu::TestCaseGroup(group->getTestContext(), getSizeDescription(*sizeIter).c_str(), ""));
1799 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
1800
1801 // Use the same color format for all cases, to reduce the number of permutations
1802 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1803 {
1804 const CaseDef caseDef =
1805 {
1806 testCase[caseNdx].viewType, // VkImageViewType viewType;
1807 *sizeIter, // IVec4 imageSizeHint;
1808 colorFormat, // VkFormat colorFormat;
1809 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1810 allocationKind // AllocationKind allocationKind;
1811 };
1812 addFunctionCaseWithPrograms(sizeGroup.get(), getFormatString(colorFormat, depthStencilFormat[dsFormatNdx]), "", checkSupportAttachmentSize, initPrograms, testAttachmentSize, caseDef);
1813 }
1814 hugeGroup->addChild(sizeGroup.release());
1815 }
1816 }
1817 }
1818 smallGroup.release();
1819 hugeGroup.release();
1820 }
1821
1822 // Generate mip map cases
1823 {
1824 MovePtr<tcu::TestCaseGroup> mipmapGroup(new tcu::TestCaseGroup(group->getTestContext(), "mipmap", ""));
1825
1826 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1827 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx)
1828 {
1829 const CaseDef caseDef =
1830 {
1831 testCase[caseNdx].viewType, // VkImageViewType imageType;
1832 testCase[caseNdx].baselineSize, // IVec4 imageSizeHint;
1833 format[formatNdx], // VkFormat colorFormat;
1834 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1835 allocationKind // AllocationKind allocationKind;
1836 };
1837 addFunctionCaseWithPrograms(mipmapGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]), "", checkSupportRenderToMipMaps, initPrograms, testRenderToMipMaps, caseDef);
1838 }
1839 imageGroup->addChild(mipmapGroup.release());
1840 }
1841
1842 group->addChild(imageGroup.release());
1843 }
1844 }
1845
addCoreRenderToImageTests(tcu::TestCaseGroup * group)1846 void addCoreRenderToImageTests (tcu::TestCaseGroup* group)
1847 {
1848 addTestCasesWithFunctions(group, ALLOCATION_KIND_SUBALLOCATED);
1849 }
1850
addDedicatedAllocationRenderToImageTests(tcu::TestCaseGroup * group)1851 void addDedicatedAllocationRenderToImageTests (tcu::TestCaseGroup* group)
1852 {
1853 addTestCasesWithFunctions(group, ALLOCATION_KIND_DEDICATED);
1854 }
1855
1856 } // anonymous ns
1857
createRenderToImageTests(tcu::TestContext & testCtx)1858 tcu::TestCaseGroup* createRenderToImageTests (tcu::TestContext& testCtx)
1859 {
1860 de::MovePtr<tcu::TestCaseGroup> renderToImageTests (new tcu::TestCaseGroup(testCtx, "render_to_image", "Render to image tests"));
1861
1862 renderToImageTests->addChild(createTestGroup(testCtx, "core", "Core render to image tests", addCoreRenderToImageTests));
1863 renderToImageTests->addChild(createTestGroup(testCtx, "dedicated_allocation", "Render to image tests for dedicated memory allocation", addDedicatedAllocationRenderToImageTests));
1864
1865 return renderToImageTests.release();
1866 }
1867
1868 } // pipeline
1869 } // vkt
1870