1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file vktPipelineRenderToImageTests.cpp
21 * \brief Render to image tests
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktPipelineRenderToImageTests.hpp"
25 #include "vktPipelineMakeUtil.hpp"
26 #include "vktTestCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vktPipelineVertexUtil.hpp"
29 #include "vktTestGroupUtil.hpp"
30
31 #include "vkMemUtil.hpp"
32 #include "vkQueryUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkRefUtil.hpp"
35 #include "vkBuilderUtil.hpp"
36 #include "vkPrograms.hpp"
37 #include "vkImageUtil.hpp"
38 #include "vkCmdUtil.hpp"
39
40 #include "tcuTextureUtil.hpp"
41 #include "tcuImageCompare.hpp"
42 #include "tcuTestLog.hpp"
43
44 #include "deUniquePtr.hpp"
45 #include "deSharedPtr.hpp"
46
47 #include <string>
48 #include <vector>
49 #include <set>
50
51 namespace vkt
52 {
53 namespace pipeline
54 {
55 namespace
56 {
57 using namespace vk;
58 using de::UniquePtr;
59 using de::MovePtr;
60 using de::SharedPtr;
61 using tcu::IVec3;
62 using tcu::Vec4;
63 using tcu::UVec4;
64 using tcu::IVec2;
65 using tcu::IVec4;
66 using tcu::BVec4;
67 using std::vector;
68
69 typedef SharedPtr<Unique<VkImageView> > SharedPtrVkImageView;
70 typedef SharedPtr<Unique<VkPipeline> > SharedPtrVkPipeline;
71
72 enum Constants
73 {
74 NUM_CUBE_FACES = 6,
75 REFERENCE_COLOR_VALUE = 125,
76 REFERENCE_STENCIL_VALUE = 42,
77 MAX_SIZE = -1, //!< Should be queried at runtime and replaced with max possible value
78 MAX_VERIFICATION_REGION_SIZE = 32, //!< Limit the checked area to a small size, especially for huge images
79 MAX_VERIFICATION_REGION_DEPTH = 8,
80
81 MASK_W = (1 | 0 | 0 | 0),
82 MASK_W_LAYERS = (1 | 0 | 0 | 8),
83 MASK_WH = (1 | 2 | 0 | 0),
84 MASK_WH_LAYERS = (1 | 2 | 0 | 8),
85 MASK_WHD = (1 | 2 | 4 | 0),
86 };
87
88 enum AllocationKind
89 {
90 ALLOCATION_KIND_SUBALLOCATED = 0,
91 ALLOCATION_KIND_DEDICATED,
92 };
93
94 static const float REFERENCE_DEPTH_VALUE = 1.0f;
95 static const Vec4 COLOR_TABLE[] =
96 {
97 Vec4(0.9f, 0.0f, 0.0f, 1.0f),
98 Vec4(0.6f, 1.0f, 0.0f, 1.0f),
99 Vec4(0.3f, 0.0f, 1.0f, 1.0f),
100 Vec4(0.1f, 1.0f, 1.0f, 1.0f),
101 Vec4(0.8f, 1.0f, 0.0f, 1.0f),
102 Vec4(0.5f, 0.0f, 1.0f, 1.0f),
103 Vec4(0.2f, 0.0f, 0.0f, 1.0f),
104 Vec4(1.0f, 1.0f, 0.0f, 1.0f),
105 };
106
107 struct CaseDef
108 {
109 VkImageViewType viewType;
110 IVec4 imageSizeHint; //!< (w, h, d, layers), a component may have a symbolic value MAX_SIZE
111 VkFormat colorFormat;
112 VkFormat depthStencilFormat; //! A depth/stencil format, or UNDEFINED if not used
113 AllocationKind allocationKind;
114 };
115
116 template<typename T>
makeSharedPtr(Move<T> move)117 inline SharedPtr<Unique<T> > makeSharedPtr (Move<T> move)
118 {
119 return SharedPtr<Unique<T> >(new Unique<T>(move));
120 }
121
122 template<typename T>
sizeInBytes(const vector<T> & vec)123 inline VkDeviceSize sizeInBytes (const vector<T>& vec)
124 {
125 return vec.size() * sizeof(vec[0]);
126 }
127
isCube(const VkImageViewType viewType)128 inline bool isCube (const VkImageViewType viewType)
129 {
130 return (viewType == VK_IMAGE_VIEW_TYPE_CUBE || viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY);
131 }
132
product(const IVec4 & v)133 inline VkDeviceSize product (const IVec4& v)
134 {
135 return ((static_cast<VkDeviceSize>(v.x()) * v.y()) * v.z()) * v.w();
136 }
137
138 template<typename T>
sum(const vector<T> & v)139 inline T sum (const vector<T>& v)
140 {
141 T total = static_cast<T>(0);
142 for (typename vector<T>::const_iterator it = v.begin(); it != v.end(); ++it)
143 total += *it;
144 return total;
145 }
146
147 template <typename T, int Size>
findIndexOfMaxComponent(const tcu::Vector<T,Size> & vec)148 int findIndexOfMaxComponent (const tcu::Vector<T, Size>& vec)
149 {
150 int index = 0;
151 T value = vec[0];
152
153 for (int i = 1; i < Size; ++i)
154 {
155 if (vec[i] > value)
156 {
157 index = i;
158 value = vec[i];
159 }
160 }
161
162 return index;
163 }
164
maxLayersOrDepth(const IVec4 & size)165 inline int maxLayersOrDepth (const IVec4& size)
166 {
167 // This is safe because 3D images must have layers (w) = 1
168 return deMax32(size.z(), size.w());
169 }
170
bindBuffer(const InstanceInterface & vki,const DeviceInterface & vkd,const VkPhysicalDevice & physDevice,const VkDevice device,const VkBuffer & buffer,const MemoryRequirement requirement,Allocator & allocator,AllocationKind allocationKind)171 de::MovePtr<Allocation> bindBuffer (const InstanceInterface& vki,
172 const DeviceInterface& vkd,
173 const VkPhysicalDevice& physDevice,
174 const VkDevice device,
175 const VkBuffer& buffer,
176 const MemoryRequirement requirement,
177 Allocator& allocator,
178 AllocationKind allocationKind)
179 {
180 switch (allocationKind)
181 {
182 case ALLOCATION_KIND_SUBALLOCATED:
183 {
184 return ::vkt::pipeline::bindBuffer(vkd, device, allocator, buffer, requirement);
185 }
186
187 case ALLOCATION_KIND_DEDICATED:
188 {
189 return bindBufferDedicated(vki, vkd, physDevice, device, buffer, requirement);
190 }
191
192 default:
193 {
194 TCU_THROW(InternalError, "Invalid allocation kind");
195 }
196 }
197 }
198
bindImage(const InstanceInterface & vki,const DeviceInterface & vkd,const VkPhysicalDevice & physDevice,const VkDevice device,const VkImage & image,const MemoryRequirement requirement,Allocator & allocator,AllocationKind allocationKind)199 de::MovePtr<Allocation> bindImage (const InstanceInterface& vki,
200 const DeviceInterface& vkd,
201 const VkPhysicalDevice& physDevice,
202 const VkDevice device,
203 const VkImage& image,
204 const MemoryRequirement requirement,
205 Allocator& allocator,
206 AllocationKind allocationKind)
207 {
208 switch (allocationKind)
209 {
210 case ALLOCATION_KIND_SUBALLOCATED:
211 {
212 return ::vkt::pipeline::bindImage(vkd, device, allocator, image, requirement);
213 }
214
215 case ALLOCATION_KIND_DEDICATED:
216 {
217 return bindImageDedicated(vki, vkd, physDevice, device, image, requirement);
218 }
219
220 default:
221 {
222 TCU_THROW(InternalError, "Invalid allocation kind");
223 }
224 }
225 }
226
227 // This is very test specific, so be careful if you want to reuse this code.
makeGraphicsPipeline(const DeviceInterface & vk,const VkDevice device,const VkPipeline basePipeline,const VkPipelineLayout pipelineLayout,const VkRenderPass renderPass,const VkShaderModule vertexModule,const VkShaderModule fragmentModule,const IVec2 & renderSize,const VkPrimitiveTopology topology,const deUint32 subpass,const bool useDepth,const bool useStencil)228 Move<VkPipeline> makeGraphicsPipeline (const DeviceInterface& vk,
229 const VkDevice device,
230 const VkPipeline basePipeline, // for derivatives
231 const VkPipelineLayout pipelineLayout,
232 const VkRenderPass renderPass,
233 const VkShaderModule vertexModule,
234 const VkShaderModule fragmentModule,
235 const IVec2& renderSize,
236 const VkPrimitiveTopology topology,
237 const deUint32 subpass,
238 const bool useDepth,
239 const bool useStencil)
240 {
241 const VkVertexInputBindingDescription vertexInputBindingDescription =
242 {
243 0u, // uint32_t binding;
244 sizeof(Vertex4RGBA), // uint32_t stride;
245 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
246 };
247
248 const VkVertexInputAttributeDescription vertexInputAttributeDescriptions[] =
249 {
250 {
251 0u, // uint32_t location;
252 0u, // uint32_t binding;
253 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
254 0u, // uint32_t offset;
255 },
256 {
257 1u, // uint32_t location;
258 0u, // uint32_t binding;
259 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
260 sizeof(Vec4), // uint32_t offset;
261 }
262 };
263
264 const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo =
265 {
266 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
267 DE_NULL, // const void* pNext;
268 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
269 1u, // uint32_t vertexBindingDescriptionCount;
270 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
271 DE_LENGTH_OF_ARRAY(vertexInputAttributeDescriptions), // uint32_t vertexAttributeDescriptionCount;
272 vertexInputAttributeDescriptions, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
273 };
274
275 const VkPipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateInfo =
276 {
277 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
278 DE_NULL, // const void* pNext;
279 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
280 topology, // VkPrimitiveTopology topology;
281 VK_FALSE, // VkBool32 primitiveRestartEnable;
282 };
283
284 const VkViewport viewport = makeViewport(renderSize);
285 const VkRect2D scissor = makeRect2D(renderSize);
286
287 const VkPipelineViewportStateCreateInfo pipelineViewportStateInfo =
288 {
289 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
290 DE_NULL, // const void* pNext;
291 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags;
292 1u, // uint32_t viewportCount;
293 &viewport, // const VkViewport* pViewports;
294 1u, // uint32_t scissorCount;
295 &scissor, // const VkRect2D* pScissors;
296 };
297
298 const VkPipelineRasterizationStateCreateInfo pipelineRasterizationStateInfo =
299 {
300 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
301 DE_NULL, // const void* pNext;
302 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
303 VK_FALSE, // VkBool32 depthClampEnable;
304 VK_FALSE, // VkBool32 rasterizerDiscardEnable;
305 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
306 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
307 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
308 VK_FALSE, // VkBool32 depthBiasEnable;
309 0.0f, // float depthBiasConstantFactor;
310 0.0f, // float depthBiasClamp;
311 0.0f, // float depthBiasSlopeFactor;
312 1.0f, // float lineWidth;
313 };
314
315 const VkPipelineMultisampleStateCreateInfo pipelineMultisampleStateInfo =
316 {
317 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
318 DE_NULL, // const void* pNext;
319 (VkPipelineMultisampleStateCreateFlags)0, // VkPipelineMultisampleStateCreateFlags flags;
320 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
321 VK_FALSE, // VkBool32 sampleShadingEnable;
322 0.0f, // float minSampleShading;
323 DE_NULL, // const VkSampleMask* pSampleMask;
324 VK_FALSE, // VkBool32 alphaToCoverageEnable;
325 VK_FALSE // VkBool32 alphaToOneEnable;
326 };
327
328 const VkStencilOpState stencilOpState = makeStencilOpState(
329 VK_STENCIL_OP_KEEP, // stencil fail
330 VK_STENCIL_OP_KEEP, // depth & stencil pass
331 VK_STENCIL_OP_KEEP, // depth only fail
332 VK_COMPARE_OP_EQUAL, // compare op
333 ~0u, // compare mask
334 ~0u, // write mask
335 static_cast<deUint32>(REFERENCE_STENCIL_VALUE)); // reference
336
337 VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo =
338 {
339 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
340 DE_NULL, // const void* pNext;
341 (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags;
342 useDepth, // VkBool32 depthTestEnable;
343 VK_FALSE, // VkBool32 depthWriteEnable;
344 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
345 VK_FALSE, // VkBool32 depthBoundsTestEnable;
346 useStencil, // VkBool32 stencilTestEnable;
347 stencilOpState, // VkStencilOpState front;
348 stencilOpState, // VkStencilOpState back;
349 0.0f, // float minDepthBounds;
350 1.0f, // float maxDepthBounds;
351 };
352
353 const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
354 // Number of blend attachments must equal the number of color attachments during any subpass.
355 const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState =
356 {
357 VK_FALSE, // VkBool32 blendEnable;
358 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcColorBlendFactor;
359 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor;
360 VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
361 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcAlphaBlendFactor;
362 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor;
363 VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
364 colorComponentsAll, // VkColorComponentFlags colorWriteMask;
365 };
366
367 const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo =
368 {
369 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
370 DE_NULL, // const void* pNext;
371 (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags;
372 VK_FALSE, // VkBool32 logicOpEnable;
373 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
374 1u, // deUint32 attachmentCount;
375 &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
376 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
377 };
378
379 const VkPipelineShaderStageCreateInfo pShaderStages[] =
380 {
381 {
382 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
383 DE_NULL, // const void* pNext;
384 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
385 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage;
386 vertexModule, // VkShaderModule module;
387 "main", // const char* pName;
388 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
389 },
390 {
391 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
392 DE_NULL, // const void* pNext;
393 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
394 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlagBits stage;
395 fragmentModule, // VkShaderModule module;
396 "main", // const char* pName;
397 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
398 }
399 };
400
401 const VkPipelineCreateFlags flags = (basePipeline == DE_NULL ? VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT
402 : VK_PIPELINE_CREATE_DERIVATIVE_BIT);
403
404 const VkGraphicsPipelineCreateInfo graphicsPipelineInfo =
405 {
406 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
407 DE_NULL, // const void* pNext;
408 flags, // VkPipelineCreateFlags flags;
409 DE_LENGTH_OF_ARRAY(pShaderStages), // deUint32 stageCount;
410 pShaderStages, // const VkPipelineShaderStageCreateInfo* pStages;
411 &vertexInputStateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
412 &pipelineInputAssemblyStateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
413 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
414 &pipelineViewportStateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
415 &pipelineRasterizationStateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
416 &pipelineMultisampleStateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
417 &pipelineDepthStencilStateInfo, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
418 &pipelineColorBlendStateInfo, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
419 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
420 pipelineLayout, // VkPipelineLayout layout;
421 renderPass, // VkRenderPass renderPass;
422 subpass, // deUint32 subpass;
423 basePipeline, // VkPipeline basePipelineHandle;
424 -1, // deInt32 basePipelineIndex;
425 };
426
427 return createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineInfo);
428 }
429
430 //! Make a render pass with one subpass per color attachment and depth/stencil attachment (if used).
makeRenderPass(const DeviceInterface & vk,const VkDevice device,const VkFormat colorFormat,const VkFormat depthStencilFormat,const deUint32 numLayers,const VkImageLayout initialColorImageLayout=VK_IMAGE_LAYOUT_UNDEFINED,const VkImageLayout initialDepthStencilImageLayout=VK_IMAGE_LAYOUT_UNDEFINED)431 Move<VkRenderPass> makeRenderPass (const DeviceInterface& vk,
432 const VkDevice device,
433 const VkFormat colorFormat,
434 const VkFormat depthStencilFormat,
435 const deUint32 numLayers,
436 const VkImageLayout initialColorImageLayout = VK_IMAGE_LAYOUT_UNDEFINED,
437 const VkImageLayout initialDepthStencilImageLayout = VK_IMAGE_LAYOUT_UNDEFINED)
438 {
439 const VkAttachmentDescription colorAttachmentDescription =
440 {
441 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
442 colorFormat, // VkFormat format;
443 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
444 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
445 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
446 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
447 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
448 initialColorImageLayout, // VkImageLayout initialLayout;
449 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
450 };
451 vector<VkAttachmentDescription> attachmentDescriptions(numLayers, colorAttachmentDescription);
452
453 const VkAttachmentDescription depthStencilAttachmentDescription =
454 {
455 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
456 depthStencilFormat, // VkFormat format;
457 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
458 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
459 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp storeOp;
460 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp stencilLoadOp;
461 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
462 initialDepthStencilImageLayout, // VkImageLayout initialLayout;
463 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
464 };
465
466 if (depthStencilFormat != VK_FORMAT_UNDEFINED)
467 attachmentDescriptions.insert(attachmentDescriptions.end(), numLayers, depthStencilAttachmentDescription);
468
469 // Create a subpass for each attachment (each attachement is a layer of an arrayed image).
470 vector<VkAttachmentReference> colorAttachmentReferences (numLayers);
471 vector<VkAttachmentReference> depthStencilAttachmentReferences(numLayers);
472 vector<VkSubpassDescription> subpasses;
473
474 // Ordering here must match the framebuffer attachments
475 for (deUint32 i = 0; i < numLayers; ++i)
476 {
477 const VkAttachmentReference attachmentRef =
478 {
479 i, // deUint32 attachment;
480 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
481 };
482 const VkAttachmentReference depthStencilAttachmentRef =
483 {
484 i + numLayers, // deUint32 attachment;
485 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // VkImageLayout layout;
486 };
487
488 colorAttachmentReferences[i] = attachmentRef;
489 depthStencilAttachmentReferences[i] = depthStencilAttachmentRef;
490
491 const VkAttachmentReference* pDepthStencilAttachment = (depthStencilFormat != VK_FORMAT_UNDEFINED ? &depthStencilAttachmentReferences[i] : DE_NULL);
492 const VkSubpassDescription subpassDescription =
493 {
494 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
495 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
496 0u, // deUint32 inputAttachmentCount;
497 DE_NULL, // const VkAttachmentReference* pInputAttachments;
498 1u, // deUint32 colorAttachmentCount;
499 &colorAttachmentReferences[i], // const VkAttachmentReference* pColorAttachments;
500 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
501 pDepthStencilAttachment, // const VkAttachmentReference* pDepthStencilAttachment;
502 0u, // deUint32 preserveAttachmentCount;
503 DE_NULL // const deUint32* pPreserveAttachments;
504 };
505 subpasses.push_back(subpassDescription);
506 }
507
508 const VkRenderPassCreateInfo renderPassInfo =
509 {
510 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
511 DE_NULL, // const void* pNext;
512 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags;
513 static_cast<deUint32>(attachmentDescriptions.size()), // deUint32 attachmentCount;
514 &attachmentDescriptions[0], // const VkAttachmentDescription* pAttachments;
515 static_cast<deUint32>(subpasses.size()), // deUint32 subpassCount;
516 &subpasses[0], // const VkSubpassDescription* pSubpasses;
517 0u, // deUint32 dependencyCount;
518 DE_NULL // const VkSubpassDependency* pDependencies;
519 };
520
521 return createRenderPass(vk, device, &renderPassInfo);
522 }
523
makeImage(const DeviceInterface & vk,const VkDevice device,VkImageCreateFlags flags,VkImageType imageType,const VkFormat format,const IVec3 & size,const deUint32 numMipLevels,const deUint32 numLayers,const VkImageUsageFlags usage)524 Move<VkImage> makeImage (const DeviceInterface& vk,
525 const VkDevice device,
526 VkImageCreateFlags flags,
527 VkImageType imageType,
528 const VkFormat format,
529 const IVec3& size,
530 const deUint32 numMipLevels,
531 const deUint32 numLayers,
532 const VkImageUsageFlags usage)
533 {
534 const VkImageCreateInfo imageParams =
535 {
536 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
537 DE_NULL, // const void* pNext;
538 flags, // VkImageCreateFlags flags;
539 imageType, // VkImageType imageType;
540 format, // VkFormat format;
541 makeExtent3D(size), // VkExtent3D extent;
542 numMipLevels, // deUint32 mipLevels;
543 numLayers, // deUint32 arrayLayers;
544 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
545 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
546 usage, // VkImageUsageFlags usage;
547 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
548 0u, // deUint32 queueFamilyIndexCount;
549 DE_NULL, // const deUint32* pQueueFamilyIndices;
550 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
551 };
552 return createImage(vk, device, &imageParams);
553 }
554
makeBuffer(const DeviceInterface & vk,const VkDevice device,const VkDeviceSize bufferSize,const VkBufferUsageFlags usage)555 inline Move<VkBuffer> makeBuffer (const DeviceInterface& vk, const VkDevice device, const VkDeviceSize bufferSize, const VkBufferUsageFlags usage)
556 {
557 const VkBufferCreateInfo bufferCreateInfo = makeBufferCreateInfo(bufferSize, usage);
558 return createBuffer(vk, device, &bufferCreateInfo);
559 }
560
makeColorSubresourceRange(const int baseArrayLayer,const int layerCount)561 inline VkImageSubresourceRange makeColorSubresourceRange (const int baseArrayLayer, const int layerCount)
562 {
563 return makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, static_cast<deUint32>(baseArrayLayer), static_cast<deUint32>(layerCount));
564 }
565
566 //! Get a reference clear value based on color format.
getClearValue(const VkFormat format)567 VkClearValue getClearValue (const VkFormat format)
568 {
569 if (isUintFormat(format) || isIntFormat(format))
570 return makeClearValueColorU32(REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE, REFERENCE_COLOR_VALUE);
571 else
572 return makeClearValueColorF32(1.0f, 1.0f, 1.0f, 1.0f);
573 }
574
getColorFormatStr(const int numComponents,const bool isUint,const bool isSint)575 std::string getColorFormatStr (const int numComponents, const bool isUint, const bool isSint)
576 {
577 std::ostringstream str;
578 if (numComponents == 1)
579 str << (isUint ? "uint" : isSint ? "int" : "float");
580 else
581 str << (isUint ? "u" : isSint ? "i" : "") << "vec" << numComponents;
582
583 return str.str();
584 }
585
586 //! A half-viewport quad. Use with TRIANGLE_STRIP topology.
genFullQuadVertices(const int subpassCount)587 vector<Vertex4RGBA> genFullQuadVertices (const int subpassCount)
588 {
589 vector<Vertex4RGBA> vectorData;
590 for (int subpassNdx = 0; subpassNdx < subpassCount; ++subpassNdx)
591 {
592 Vertex4RGBA data =
593 {
594 Vec4(0.0f, -1.0f, 0.0f, 1.0f),
595 COLOR_TABLE[subpassNdx % DE_LENGTH_OF_ARRAY(COLOR_TABLE)],
596 };
597 vectorData.push_back(data);
598 data.position = Vec4(0.0f, 1.0f, 0.0f, 1.0f);
599 vectorData.push_back(data);
600 data.position = Vec4(1.0f, -1.0f, 0.0f, 1.0f);
601 vectorData.push_back(data);
602 data.position = Vec4(1.0f, 1.0f, 0.0f, 1.0f);
603 vectorData.push_back(data);
604 }
605 return vectorData;
606 }
607
getImageType(const VkImageViewType viewType)608 VkImageType getImageType (const VkImageViewType viewType)
609 {
610 switch (viewType)
611 {
612 case VK_IMAGE_VIEW_TYPE_1D:
613 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
614 return VK_IMAGE_TYPE_1D;
615
616 case VK_IMAGE_VIEW_TYPE_2D:
617 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
618 case VK_IMAGE_VIEW_TYPE_CUBE:
619 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
620 return VK_IMAGE_TYPE_2D;
621
622 case VK_IMAGE_VIEW_TYPE_3D:
623 return VK_IMAGE_TYPE_3D;
624
625 default:
626 DE_ASSERT(0);
627 return VK_IMAGE_TYPE_LAST;
628 }
629 }
630
631 //! ImageViewType for accessing a single layer/slice of an image
getImageViewSliceType(const VkImageViewType viewType)632 VkImageViewType getImageViewSliceType (const VkImageViewType viewType)
633 {
634 switch (viewType)
635 {
636 case VK_IMAGE_VIEW_TYPE_1D:
637 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
638 return VK_IMAGE_VIEW_TYPE_1D;
639
640 case VK_IMAGE_VIEW_TYPE_2D:
641 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
642 case VK_IMAGE_VIEW_TYPE_CUBE:
643 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
644 case VK_IMAGE_VIEW_TYPE_3D:
645 return VK_IMAGE_VIEW_TYPE_2D;
646
647 default:
648 DE_ASSERT(0);
649 return VK_IMAGE_VIEW_TYPE_LAST;
650 }
651 }
652
getImageCreateFlags(const VkImageViewType viewType)653 VkImageCreateFlags getImageCreateFlags (const VkImageViewType viewType)
654 {
655 VkImageCreateFlags flags = (VkImageCreateFlags)0;
656
657 if (viewType == VK_IMAGE_VIEW_TYPE_3D) flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR;
658 if (isCube(viewType)) flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
659
660 return flags;
661 }
662
generateExpectedImage(const tcu::PixelBufferAccess & outputImage,const IVec2 & renderSize,const int colorDepthOffset)663 void generateExpectedImage (const tcu::PixelBufferAccess& outputImage, const IVec2& renderSize, const int colorDepthOffset)
664 {
665 const tcu::TextureChannelClass channelClass = tcu::getTextureChannelClass(outputImage.getFormat().type);
666 const bool isInt = (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER || channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER);
667 const VkClearValue clearValue = getClearValue(mapTextureFormat(outputImage.getFormat()));
668
669 if (isInt)
670 tcu::clear(outputImage, IVec4(clearValue.color.int32));
671 else
672 tcu::clear(outputImage, Vec4(clearValue.color.float32));
673
674 for (int z = 0; z < outputImage.getDepth(); ++z)
675 {
676 const Vec4& setColor = COLOR_TABLE[(z + colorDepthOffset) % DE_LENGTH_OF_ARRAY(COLOR_TABLE)];
677 const IVec4 setColorInt = (static_cast<float>(REFERENCE_COLOR_VALUE) * setColor).cast<deInt32>();
678
679 for (int y = 0; y < renderSize.y(); ++y)
680 for (int x = renderSize.x()/2; x < renderSize.x(); ++x)
681 {
682 if (isInt)
683 outputImage.setPixel(setColorInt, x, y, z);
684 else
685 outputImage.setPixel(setColor, x, y, z);
686 }
687 }
688 }
689
selectMatchingMemoryType(const VkPhysicalDeviceMemoryProperties & deviceMemProps,deUint32 allowedMemTypeBits,MemoryRequirement requirement)690 deUint32 selectMatchingMemoryType (const VkPhysicalDeviceMemoryProperties& deviceMemProps, deUint32 allowedMemTypeBits, MemoryRequirement requirement)
691 {
692 const deUint32 compatibleTypes = getCompatibleMemoryTypes(deviceMemProps, requirement);
693 const deUint32 candidates = allowedMemTypeBits & compatibleTypes;
694
695 if (candidates == 0)
696 TCU_THROW(NotSupportedError, "No compatible memory type found");
697
698 return (deUint32)deCtz32(candidates);
699 }
700
getMaxImageSize(const VkImageViewType viewType,const IVec4 & sizeHint)701 IVec4 getMaxImageSize (const VkImageViewType viewType, const IVec4& sizeHint)
702 {
703 //Limits have been taken from the vulkan specification
704 IVec4 size = IVec4(
705 sizeHint.x() != MAX_SIZE ? sizeHint.x() : 4096,
706 sizeHint.y() != MAX_SIZE ? sizeHint.y() : 4096,
707 sizeHint.z() != MAX_SIZE ? sizeHint.z() : 256,
708 sizeHint.w() != MAX_SIZE ? sizeHint.w() : 256);
709
710 switch (viewType)
711 {
712 case VK_IMAGE_VIEW_TYPE_1D:
713 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
714 size.x() = deMin32(4096, size.x());
715 break;
716
717 case VK_IMAGE_VIEW_TYPE_2D:
718 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
719 size.x() = deMin32(4096, size.x());
720 size.y() = deMin32(4096, size.y());
721 break;
722
723 case VK_IMAGE_VIEW_TYPE_3D:
724 size.x() = deMin32(256, size.x());
725 size.y() = deMin32(256, size.y());
726 break;
727
728 case VK_IMAGE_VIEW_TYPE_CUBE:
729 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
730 size.x() = deMin32(4096, size.x());
731 size.y() = deMin32(4096, size.y());
732 size.w() = deMin32(252, size.w());
733 size.w() = NUM_CUBE_FACES * (size.w() / NUM_CUBE_FACES); // round down to 6 faces
734 break;
735
736 default:
737 DE_ASSERT(0);
738 return IVec4();
739 }
740
741 return size;
742 }
743
getMemoryTypeNdx(Context & context,const CaseDef & caseDef)744 deUint32 getMemoryTypeNdx (Context& context, const CaseDef& caseDef)
745 {
746 const DeviceInterface& vk = context.getDeviceInterface();
747 const InstanceInterface& vki = context.getInstanceInterface();
748 const VkDevice device = context.getDevice();
749 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
750
751 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physDevice);
752 Move<VkImage> colorImage;
753 VkMemoryRequirements memReqs;
754
755 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
756 const IVec4 imageSize = getMaxImageSize(caseDef.viewType, caseDef.imageSizeHint);
757
758 //create image, don't bind any memory to it
759 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat,
760 imageSize.swizzle(0, 1, 2), 1u, imageSize.w(), imageUsage);
761
762 vk.getImageMemoryRequirements(device, *colorImage, &memReqs);
763 return selectMatchingMemoryType(memoryProperties, memReqs.memoryTypeBits, MemoryRequirement::Any);
764 }
765
getMaxDeviceHeapSize(Context & context,const CaseDef & caseDef)766 VkDeviceSize getMaxDeviceHeapSize (Context& context, const CaseDef& caseDef)
767 {
768 const InstanceInterface& vki = context.getInstanceInterface();
769 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
770 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physDevice);
771 const deUint32 memoryTypeNdx = getMemoryTypeNdx (context, caseDef);
772
773 return memoryProperties.memoryHeaps[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].size;
774 }
775
776 //! Get a smaller image size. Returns a vector of zeroes, if it can't reduce more.
getReducedImageSize(const CaseDef & caseDef,IVec4 size)777 IVec4 getReducedImageSize (const CaseDef& caseDef, IVec4 size)
778 {
779 const int maxIndex = findIndexOfMaxComponent(size);
780 const int reducedSize = size[maxIndex] >> 1;
781
782 switch (caseDef.viewType)
783 {
784 case VK_IMAGE_VIEW_TYPE_CUBE:
785 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
786 if (maxIndex < 2)
787 size.x() = size.y() = reducedSize;
788 else if (maxIndex == 3 && reducedSize >= NUM_CUBE_FACES)
789 size.w() = NUM_CUBE_FACES * (reducedSize / NUM_CUBE_FACES); // round down to a multiple of 6
790 else
791 size = IVec4(0);
792 break;
793
794 default:
795 size[maxIndex] = reducedSize;
796 break;
797 }
798
799 if (reducedSize == 0)
800 size = IVec4(0);
801
802 return size;
803 }
804
isDepthStencilFormatSupported(const InstanceInterface & vki,const VkPhysicalDevice physDevice,const VkFormat format)805 bool isDepthStencilFormatSupported (const InstanceInterface& vki, const VkPhysicalDevice physDevice, const VkFormat format)
806 {
807 const VkFormatProperties properties = getPhysicalDeviceFormatProperties(vki, physDevice, format);
808 return (properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) != 0;
809 }
810
getFormatAspectFlags(const VkFormat format)811 VkImageAspectFlags getFormatAspectFlags (const VkFormat format)
812 {
813 if (format == VK_FORMAT_UNDEFINED)
814 return 0;
815
816 const tcu::TextureFormat::ChannelOrder order = mapVkFormat(format).order;
817
818 switch (order)
819 {
820 case tcu::TextureFormat::DS: return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
821 case tcu::TextureFormat::D: return VK_IMAGE_ASPECT_DEPTH_BIT;
822 case tcu::TextureFormat::S: return VK_IMAGE_ASPECT_STENCIL_BIT;
823 default: return VK_IMAGE_ASPECT_COLOR_BIT;
824 }
825 }
826
initPrograms(SourceCollections & programCollection,const CaseDef caseDef)827 void initPrograms (SourceCollections& programCollection, const CaseDef caseDef)
828 {
829 const int numComponents = getNumUsedChannels(mapVkFormat(caseDef.colorFormat).order);
830 const bool isUint = isUintFormat(caseDef.colorFormat);
831 const bool isSint = isIntFormat(caseDef.colorFormat);
832
833 // Vertex shader
834 {
835 std::ostringstream src;
836 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
837 << "\n"
838 << "layout(location = 0) in vec4 in_position;\n"
839 << "layout(location = 1) in vec4 in_color;\n"
840 << "layout(location = 0) out vec4 out_color;\n"
841 << "\n"
842 << "out gl_PerVertex {\n"
843 << " vec4 gl_Position;\n"
844 << "};\n"
845 << "\n"
846 << "void main(void)\n"
847 << "{\n"
848 << " gl_Position = in_position;\n"
849 << " out_color = in_color;\n"
850 << "}\n";
851
852 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
853 }
854
855 // Fragment shader
856 {
857 std::ostringstream colorValue;
858 colorValue << REFERENCE_COLOR_VALUE;
859 const std::string colorFormat = getColorFormatStr(numComponents, isUint, isSint);
860 const std::string colorInteger = (isUint || isSint ? " * "+colorFormat+"("+colorValue.str()+")" :"");
861
862 std::ostringstream src;
863 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
864 << "\n"
865 << "layout(location = 0) in vec4 in_color;\n"
866 << "layout(location = 0) out " << colorFormat << " o_color;\n"
867 << "\n"
868 << "void main(void)\n"
869 << "{\n"
870 << " o_color = " << colorFormat << "("
871 << (numComponents == 1 ? "in_color.r" :
872 numComponents == 2 ? "in_color.rg" :
873 numComponents == 3 ? "in_color.rgb" : "in_color")
874 << colorInteger
875 << ");\n"
876 << "}\n";
877
878 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
879 }
880 }
881
882 //! See testAttachmentSize() description
testWithSizeReduction(Context & context,const CaseDef & caseDef)883 tcu::TestStatus testWithSizeReduction (Context& context, const CaseDef& caseDef)
884 {
885 const DeviceInterface& vk = context.getDeviceInterface();
886 const InstanceInterface& vki = context.getInstanceInterface();
887 const VkDevice device = context.getDevice();
888 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
889 const VkQueue queue = context.getUniversalQueue();
890 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
891 Allocator& allocator = context.getDefaultAllocator();
892
893 // The memory might be too small to allocate a largest possible attachment, so try to account for that.
894 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED);
895
896 IVec4 imageSize = getMaxImageSize(caseDef.viewType, caseDef.imageSizeHint);
897 VkDeviceSize colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
898 VkDeviceSize depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull);
899
900 const VkDeviceSize reserveForChecking = 500ull * 1024ull; //left 512KB
901 const float additionalMemory = 1.15f; //left some free memory on device (15%)
902 VkDeviceSize neededMemory = static_cast<VkDeviceSize>(static_cast<float>(colorSize + depthStencilSize) * additionalMemory) + reserveForChecking;
903 VkDeviceSize maxMemory = getMaxDeviceHeapSize(context, caseDef) >> 2;
904
905 const VkDeviceSize deviceMemoryBudget = std::min(neededMemory, maxMemory);
906 bool allocationPossible = false;
907
908 // Keep reducing the size, if image size is too big
909 while (neededMemory > deviceMemoryBudget)
910 {
911 imageSize = getReducedImageSize(caseDef, imageSize);
912
913 if (imageSize == IVec4())
914 return tcu::TestStatus::fail("Couldn't create an image with required size");
915
916 colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
917 depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull);
918 neededMemory = static_cast<VkDeviceSize>(static_cast<double>(colorSize + depthStencilSize) * additionalMemory);
919 }
920
921 // Keep reducing the size, if allocation return out of any memory
922 while (!allocationPossible)
923 {
924 VkDeviceMemory object = 0;
925 const VkMemoryAllocateInfo allocateInfo =
926 {
927 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, //VkStructureType sType;
928 DE_NULL, //const void* pNext;
929 neededMemory, //VkDeviceSize allocationSize;
930 getMemoryTypeNdx(context, caseDef) //deUint32 memoryTypeIndex;
931 };
932
933 const VkResult result = vk.allocateMemory(device, &allocateInfo, DE_NULL, &object);
934
935 if (VK_ERROR_OUT_OF_DEVICE_MEMORY == result || VK_ERROR_OUT_OF_HOST_MEMORY == result)
936 {
937 imageSize = getReducedImageSize(caseDef, imageSize);
938
939 if (imageSize == IVec4())
940 return tcu::TestStatus::fail("Couldn't create an image with required size");
941
942 colorSize = product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
943 depthStencilSize = (useDepthStencil ? product(imageSize) * tcu::getPixelSize(mapVkFormat(caseDef.depthStencilFormat)) : 0ull);
944 neededMemory = static_cast<VkDeviceSize>(static_cast<double>(colorSize + depthStencilSize) * additionalMemory) + reserveForChecking;
945 }
946 else if (VK_SUCCESS != result)
947 {
948 return tcu::TestStatus::fail("Couldn't allocate memory");
949 }
950 else
951 {
952 //free memory using Move pointer
953 Move<VkDeviceMemory> memoryAllocated (check<VkDeviceMemory>(object), Deleter<VkDeviceMemory>(vk, device, DE_NULL));
954 allocationPossible = true;
955 }
956 }
957
958 context.getTestContext().getLog()
959 << tcu::TestLog::Message << "Using an image with size (width, height, depth, layers) = " << imageSize << tcu::TestLog::EndMessage;
960
961 // "Slices" is either the depth of a 3D image, or the number of layers of an arrayed image
962 const deInt32 numSlices = maxLayersOrDepth(imageSize);
963
964
965 if (useDepthStencil && !isDepthStencilFormatSupported(vki, physDevice, caseDef.depthStencilFormat))
966 TCU_THROW(NotSupportedError, "Unsupported depth/stencil format");
967
968 // Determine the verification bounds. The checked region will be in the center of the rendered image
969 const IVec4 checkSize = tcu::min(imageSize, IVec4(MAX_VERIFICATION_REGION_SIZE,
970 MAX_VERIFICATION_REGION_SIZE,
971 MAX_VERIFICATION_REGION_DEPTH,
972 MAX_VERIFICATION_REGION_DEPTH));
973 const IVec4 checkOffset = (imageSize - checkSize) / 2;
974
975 // Only make enough space for the check region
976 const VkDeviceSize colorBufferSize = product(checkSize) * tcu::getPixelSize(mapVkFormat(caseDef.colorFormat));
977 const Unique<VkBuffer> colorBuffer (makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
978 const UniquePtr<Allocation> colorBufferAlloc (bindBuffer(vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind));
979
980 {
981 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize));
982 flushAlloc(vk, device, *colorBufferAlloc);
983 }
984
985 const Unique<VkShaderModule> vertexModule (createShaderModule (vk, device, context.getBinaryCollection().get("vert"), 0u));
986 const Unique<VkShaderModule> fragmentModule (createShaderModule (vk, device, context.getBinaryCollection().get("frag"), 0u));
987 const Unique<VkRenderPass> renderPass (makeRenderPass (vk, device, caseDef.colorFormat, caseDef.depthStencilFormat, static_cast<deUint32>(numSlices),
988 (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D) ? VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
989 : VK_IMAGE_LAYOUT_UNDEFINED));
990 const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout (vk, device));
991 vector<SharedPtrVkPipeline> pipelines;
992
993 Move<VkImage> colorImage;
994 MovePtr<Allocation> colorImageAlloc;
995 vector<SharedPtrVkImageView> colorAttachments;
996 Move<VkImage> depthStencilImage;
997 MovePtr<Allocation> depthStencilImageAlloc;
998 vector<SharedPtrVkImageView> depthStencilAttachments;
999 vector<VkImageView> attachmentHandles; // all attachments (color and d/s)
1000 Move<VkBuffer> vertexBuffer;
1001 MovePtr<Allocation> vertexBufferAlloc;
1002 Move<VkFramebuffer> framebuffer;
1003
1004 // Create a color image
1005 {
1006 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1007
1008 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat,
1009 imageSize.swizzle(0, 1, 2), 1u, imageSize.w(), imageUsage);
1010 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1011 }
1012
1013 // Create a depth/stencil image (always a 2D image, optionally layered)
1014 if (useDepthStencil)
1015 {
1016 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
1017
1018 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat,
1019 IVec3(imageSize.x(), imageSize.y(), 1), 1u, numSlices, imageUsage);
1020 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1021 }
1022
1023 // Create a vertex buffer
1024 {
1025 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices);
1026 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices);
1027
1028 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
1029 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind);
1030
1031 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize));
1032 flushAlloc(vk, device, *vertexBufferAlloc);
1033 }
1034
1035 // Prepare color image upfront for rendering to individual slices. 3D slices aren't separate subresources, so they shouldn't be transitioned
1036 // during each subpass like array layers.
1037 if (caseDef.viewType == VK_IMAGE_VIEW_TYPE_3D)
1038 {
1039 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1040 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1041
1042 beginCommandBuffer(vk, *cmdBuffer);
1043
1044 const VkImageMemoryBarrier imageBarrier =
1045 {
1046 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1047 DE_NULL, // const void* pNext;
1048 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1049 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1050 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1051 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1052 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1053 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1054 *colorImage, // VkImage image;
1055 { // VkImageSubresourceRange subresourceRange;
1056 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1057 0u, // uint32_t baseMipLevel;
1058 1u, // uint32_t levelCount;
1059 0u, // uint32_t baseArrayLayer;
1060 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
1061 }
1062 };
1063
1064 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0u,
1065 0u, DE_NULL, 0u, DE_NULL, 1u, &imageBarrier);
1066
1067 endCommandBuffer(vk, *cmdBuffer);
1068 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1069 }
1070
1071 // For each image layer or slice (3D), create an attachment and a pipeline
1072 {
1073 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat);
1074 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0;
1075 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
1076 VkPipeline basePipeline = DE_NULL;
1077
1078 // Color attachments are first in the framebuffer
1079 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1080 {
1081 colorAttachments.push_back(makeSharedPtr(
1082 makeImageView(vk, device, *colorImage, getImageViewSliceType(caseDef.viewType), caseDef.colorFormat, makeColorSubresourceRange(subpassNdx, 1))));
1083 attachmentHandles.push_back(**colorAttachments.back());
1084
1085 // We also have to create pipelines for each subpass
1086 pipelines.push_back(makeSharedPtr(makeGraphicsPipeline(
1087 vk, device, basePipeline, *pipelineLayout, *renderPass, *vertexModule, *fragmentModule, imageSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
1088 static_cast<deUint32>(subpassNdx), useDepth, useStencil)));
1089
1090 basePipeline = **pipelines.front();
1091 }
1092
1093 // Then D/S attachments, if any
1094 if (useDepthStencil)
1095 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1096 {
1097 depthStencilAttachments.push_back(makeSharedPtr(
1098 makeImageView(vk, device, *depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat, makeImageSubresourceRange(depthStencilAspect, 0u, 1u, subpassNdx, 1u))));
1099 attachmentHandles.push_back(**depthStencilAttachments.back());
1100 }
1101 }
1102
1103 framebuffer = makeFramebuffer(vk, device, *renderPass, static_cast<deUint32>(attachmentHandles.size()), &attachmentHandles[0], static_cast<deUint32>(imageSize.x()), static_cast<deUint32>(imageSize.y()));
1104
1105 {
1106 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1107 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1108
1109 beginCommandBuffer(vk, *cmdBuffer);
1110 {
1111 vector<VkClearValue> clearValues (numSlices, getClearValue(caseDef.colorFormat));
1112
1113 if (useDepthStencil)
1114 clearValues.insert(clearValues.end(), numSlices, makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE));
1115
1116 const VkDeviceSize vertexBufferOffset = 0ull;
1117
1118 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(0, 0, imageSize.x(), imageSize.y()), (deUint32)clearValues.size(), &clearValues[0]);
1119 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
1120 }
1121
1122 // Draw
1123 for (deUint32 subpassNdx = 0; subpassNdx < static_cast<deUint32>(numSlices); ++subpassNdx)
1124 {
1125 if (subpassNdx != 0)
1126 vk.cmdNextSubpass(*cmdBuffer, VK_SUBPASS_CONTENTS_INLINE);
1127
1128 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, **pipelines[subpassNdx]);
1129 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx*4u, 0u);
1130 }
1131
1132 endRenderPass(vk, *cmdBuffer);
1133
1134 // Copy colorImage -> host visible colorBuffer
1135 {
1136 const VkImageMemoryBarrier imageBarriers[] =
1137 {
1138 {
1139 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1140 DE_NULL, // const void* pNext;
1141 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags outputMask;
1142 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags inputMask;
1143 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1144 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1145 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1146 VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
1147 *colorImage, // VkImage image;
1148 makeColorSubresourceRange(0, imageSize.w()) // VkImageSubresourceRange subresourceRange;
1149 }
1150 };
1151
1152 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
1153 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers);
1154
1155 // Copy the checked region rather than the whole image
1156 const VkImageSubresourceLayers subresource =
1157 {
1158 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1159 0u, // uint32_t mipLevel;
1160 static_cast<deUint32>(checkOffset.w()), // uint32_t baseArrayLayer;
1161 static_cast<deUint32>(checkSize.w()), // uint32_t layerCount;
1162 };
1163
1164 const VkBufferImageCopy region =
1165 {
1166 0ull, // VkDeviceSize bufferOffset;
1167 0u, // uint32_t bufferRowLength;
1168 0u, // uint32_t bufferImageHeight;
1169 subresource, // VkImageSubresourceLayers imageSubresource;
1170 makeOffset3D(checkOffset.x(), checkOffset.y(), checkOffset.z()), // VkOffset3D imageOffset;
1171 makeExtent3D(checkSize.swizzle(0, 1, 2)), // VkExtent3D imageExtent;
1172 };
1173
1174 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ®ion);
1175
1176 const VkBufferMemoryBarrier bufferBarriers[] =
1177 {
1178 {
1179 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1180 DE_NULL, // const void* pNext;
1181 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1182 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1183 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1184 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1185 *colorBuffer, // VkBuffer buffer;
1186 0ull, // VkDeviceSize offset;
1187 VK_WHOLE_SIZE, // VkDeviceSize size;
1188 },
1189 };
1190
1191 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
1192 0u, DE_NULL, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, DE_NULL);
1193 }
1194
1195 endCommandBuffer(vk, *cmdBuffer);
1196 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1197 }
1198
1199 // Verify results
1200 {
1201 invalidateAlloc(vk, device, *colorBufferAlloc);
1202
1203 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat);
1204 const int checkDepth = maxLayersOrDepth(checkSize);
1205 const int depthOffset = maxLayersOrDepth(checkOffset);
1206 const tcu::ConstPixelBufferAccess resultImage (format, checkSize.x(), checkSize.y(), checkDepth, colorBufferAlloc->getHostPtr());
1207 tcu::TextureLevel textureLevel (format, checkSize.x(), checkSize.y(), checkDepth);
1208 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess();
1209 bool ok = false;
1210
1211 generateExpectedImage(expectedImage, checkSize.swizzle(0, 1), depthOffset);
1212
1213 if (isFloatFormat(caseDef.colorFormat))
1214 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage, resultImage, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
1215 else
1216 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison", "", expectedImage, resultImage, tcu::UVec4(2), tcu::COMPARE_LOG_RESULT);
1217
1218 return ok ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail");
1219 }
1220 }
1221
checkImageViewTypeRequirements(Context & context,const VkImageViewType viewType)1222 void checkImageViewTypeRequirements (Context& context, const VkImageViewType viewType)
1223 {
1224 if (viewType == VK_IMAGE_VIEW_TYPE_3D &&
1225 (!isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_KHR_maintenance1")))
1226 TCU_THROW(NotSupportedError, "Extension VK_KHR_maintenance1 not supported");
1227
1228 if (viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !context.getDeviceFeatures().imageCubeArray)
1229 TCU_THROW(NotSupportedError, "Missing feature: imageCubeArray");
1230 }
1231
1232 //! A test that can exercise very big color and depth/stencil attachment sizes.
1233 //! If the total memory consumed by images is too large, or if the implementation returns OUT_OF_MEMORY error somewhere,
1234 //! the test can be retried with a next increment of size reduction index, making the attachments smaller.
testAttachmentSize(Context & context,const CaseDef caseDef)1235 tcu::TestStatus testAttachmentSize (Context& context, const CaseDef caseDef)
1236 {
1237 checkImageViewTypeRequirements(context, caseDef.viewType);
1238
1239 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED)
1240 {
1241 if (!isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_KHR_dedicated_allocation"))
1242 TCU_THROW(NotSupportedError, "VK_KHR_dedicated_allocation is not supported");
1243 }
1244
1245 return testWithSizeReduction(context, caseDef);
1246 // Never reached
1247 }
1248
getMipLevelSizes(IVec4 baseSize)1249 vector<IVec4> getMipLevelSizes (IVec4 baseSize)
1250 {
1251 vector<IVec4> levels;
1252 levels.push_back(baseSize);
1253
1254 while (baseSize.x() != 1 || baseSize.y() != 1 || baseSize.z() != 1)
1255 {
1256 baseSize.x() = deMax32(baseSize.x() >> 1, 1);
1257 baseSize.y() = deMax32(baseSize.y() >> 1, 1);
1258 baseSize.z() = deMax32(baseSize.z() >> 1, 1);
1259 levels.push_back(baseSize);
1260 }
1261
1262 return levels;
1263 }
1264
1265 //! Compute memory consumed by each mip level, including all layers. Sizes include a padding for alignment.
getPerMipLevelStorageSize(const vector<IVec4> & mipLevelSizes,const VkDeviceSize pixelSize)1266 vector<VkDeviceSize> getPerMipLevelStorageSize (const vector<IVec4>& mipLevelSizes, const VkDeviceSize pixelSize)
1267 {
1268 const deInt64 levelAlignment = 16;
1269 vector<VkDeviceSize> storageSizes;
1270
1271 for (vector<IVec4>::const_iterator it = mipLevelSizes.begin(); it != mipLevelSizes.end(); ++it)
1272 storageSizes.push_back(deAlign64(pixelSize * product(*it), levelAlignment));
1273
1274 return storageSizes;
1275 }
1276
drawToMipLevel(const Context & context,const CaseDef & caseDef,const int mipLevel,const IVec4 & mipSize,const int numSlices,const VkImage colorImage,const VkImage depthStencilImage,const VkBuffer vertexBuffer,const VkPipelineLayout pipelineLayout,const VkShaderModule vertexModule,const VkShaderModule fragmentModule)1277 void drawToMipLevel (const Context& context,
1278 const CaseDef& caseDef,
1279 const int mipLevel,
1280 const IVec4& mipSize,
1281 const int numSlices,
1282 const VkImage colorImage,
1283 const VkImage depthStencilImage,
1284 const VkBuffer vertexBuffer,
1285 const VkPipelineLayout pipelineLayout,
1286 const VkShaderModule vertexModule,
1287 const VkShaderModule fragmentModule)
1288 {
1289 const DeviceInterface& vk = context.getDeviceInterface();
1290 const VkDevice device = context.getDevice();
1291 const VkQueue queue = context.getUniversalQueue();
1292 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1293 const VkImageAspectFlags depthStencilAspect = getFormatAspectFlags(caseDef.depthStencilFormat);
1294 const bool useDepth = (depthStencilAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0;
1295 const bool useStencil = (depthStencilAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
1296 const Unique<VkRenderPass> renderPass (makeRenderPass(vk, device, caseDef.colorFormat, caseDef.depthStencilFormat, static_cast<deUint32>(numSlices),
1297 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1298 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL));
1299 vector<SharedPtrVkPipeline> pipelines;
1300 vector<SharedPtrVkImageView> colorAttachments;
1301 vector<SharedPtrVkImageView> depthStencilAttachments;
1302 vector<VkImageView> attachmentHandles; // all attachments (color and d/s)
1303
1304 // For each image layer or slice (3D), create an attachment and a pipeline
1305 {
1306 VkPipeline basePipeline = DE_NULL;
1307
1308 // Color attachments are first in the framebuffer
1309 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1310 {
1311 colorAttachments.push_back(makeSharedPtr(makeImageView(
1312 vk, device, colorImage, getImageViewSliceType(caseDef.viewType), caseDef.colorFormat,
1313 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 1u, subpassNdx, 1u))));
1314 attachmentHandles.push_back(**colorAttachments.back());
1315
1316 // We also have to create pipelines for each subpass
1317 pipelines.push_back(makeSharedPtr(makeGraphicsPipeline(
1318 vk, device, basePipeline, pipelineLayout, *renderPass, vertexModule, fragmentModule, mipSize.swizzle(0, 1), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
1319 static_cast<deUint32>(subpassNdx), useDepth, useStencil)));
1320
1321 basePipeline = **pipelines.front();
1322 }
1323
1324 // Then D/S attachments, if any
1325 if (useDepth || useStencil)
1326 for (int subpassNdx = 0; subpassNdx < numSlices; ++subpassNdx)
1327 {
1328 depthStencilAttachments.push_back(makeSharedPtr(makeImageView(
1329 vk, device, depthStencilImage, VK_IMAGE_VIEW_TYPE_2D, caseDef.depthStencilFormat,
1330 makeImageSubresourceRange(depthStencilAspect, mipLevel, 1u, subpassNdx, 1u))));
1331 attachmentHandles.push_back(**depthStencilAttachments.back());
1332 }
1333 }
1334
1335 const Unique<VkFramebuffer> framebuffer (makeFramebuffer(vk, device, *renderPass, static_cast<deUint32>(attachmentHandles.size()), &attachmentHandles[0],
1336 static_cast<deUint32>(mipSize.x()), static_cast<deUint32>(mipSize.y())));
1337
1338 {
1339 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1340 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1341
1342 beginCommandBuffer(vk, *cmdBuffer);
1343 {
1344 vector<VkClearValue> clearValues (numSlices, getClearValue(caseDef.colorFormat));
1345
1346 if (useDepth || useStencil)
1347 clearValues.insert(clearValues.end(), numSlices, makeClearValueDepthStencil(REFERENCE_DEPTH_VALUE, REFERENCE_STENCIL_VALUE));
1348
1349 const VkDeviceSize vertexBufferOffset = 0ull;
1350
1351 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(0, 0, mipSize.x(), mipSize.y()), (deUint32)clearValues.size(), &clearValues[0]);
1352 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
1353 }
1354
1355 // Draw
1356 for (deUint32 subpassNdx = 0; subpassNdx < static_cast<deUint32>(numSlices); ++subpassNdx)
1357 {
1358 if (subpassNdx != 0)
1359 vk.cmdNextSubpass(*cmdBuffer, VK_SUBPASS_CONTENTS_INLINE);
1360
1361 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, **pipelines[subpassNdx]);
1362 vk.cmdDraw(*cmdBuffer, 4u, 1u, subpassNdx*4u, 0u);
1363 }
1364
1365 endRenderPass(vk, *cmdBuffer);
1366
1367 endCommandBuffer(vk, *cmdBuffer);
1368 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1369 }
1370 }
1371
1372 //! Use image mip levels as attachments
testRenderToMipMaps(Context & context,const CaseDef caseDef)1373 tcu::TestStatus testRenderToMipMaps (Context& context, const CaseDef caseDef)
1374 {
1375 checkImageViewTypeRequirements(context, caseDef.viewType);
1376
1377 const DeviceInterface& vk = context.getDeviceInterface();
1378 const InstanceInterface& vki = context.getInstanceInterface();
1379 const VkDevice device = context.getDevice();
1380 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
1381 const VkQueue queue = context.getUniversalQueue();
1382 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1383 Allocator& allocator = context.getDefaultAllocator();
1384
1385 const IVec4 imageSize = caseDef.imageSizeHint; // MAX_SIZE is not used in this test
1386 const deInt32 numSlices = maxLayersOrDepth(imageSize);
1387 const vector<IVec4> mipLevelSizes = getMipLevelSizes(imageSize);
1388 const vector<VkDeviceSize> mipLevelStorageSizes = getPerMipLevelStorageSize(mipLevelSizes, tcu::getPixelSize(mapVkFormat(caseDef.colorFormat)));
1389 const int numMipLevels = static_cast<int>(mipLevelSizes.size());
1390 const bool useDepthStencil = (caseDef.depthStencilFormat != VK_FORMAT_UNDEFINED);
1391
1392 if (caseDef.allocationKind == ALLOCATION_KIND_DEDICATED)
1393 {
1394 if (!isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_KHR_dedicated_allocation"))
1395 TCU_THROW(NotSupportedError, "VK_KHR_dedicated_allocation is not supported");
1396 }
1397
1398 if (useDepthStencil && !isDepthStencilFormatSupported(vki, physDevice, caseDef.depthStencilFormat))
1399 TCU_THROW(NotSupportedError, "Unsupported depth/stencil format");
1400
1401 // Create a color buffer big enough to hold all layers and mip levels
1402 const VkDeviceSize colorBufferSize = sum(mipLevelStorageSizes);
1403 const Unique<VkBuffer> colorBuffer (makeBuffer(vk, device, colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
1404 const UniquePtr<Allocation> colorBufferAlloc (bindBuffer(vki, vk, physDevice, device, *colorBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind));
1405
1406 {
1407 deMemset(colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(colorBufferSize));
1408 flushAlloc(vk, device, *colorBufferAlloc);
1409 }
1410
1411 const Unique<VkShaderModule> vertexModule (createShaderModule (vk, device, context.getBinaryCollection().get("vert"), 0u));
1412 const Unique<VkShaderModule> fragmentModule (createShaderModule (vk, device, context.getBinaryCollection().get("frag"), 0u));
1413 const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout (vk, device));
1414
1415 Move<VkImage> colorImage;
1416 MovePtr<Allocation> colorImageAlloc;
1417 Move<VkImage> depthStencilImage;
1418 MovePtr<Allocation> depthStencilImageAlloc;
1419 Move<VkBuffer> vertexBuffer;
1420 MovePtr<Allocation> vertexBufferAlloc;
1421
1422 // Create a color image
1423 {
1424 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1425
1426 colorImage = makeImage(vk, device, getImageCreateFlags(caseDef.viewType), getImageType(caseDef.viewType), caseDef.colorFormat,
1427 imageSize.swizzle(0, 1, 2), numMipLevels, imageSize.w(), imageUsage);
1428 colorImageAlloc = bindImage(vki, vk, physDevice, device, *colorImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1429 }
1430
1431 // Create a depth/stencil image (always a 2D image, optionally layered)
1432 if (useDepthStencil)
1433 {
1434 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
1435
1436 depthStencilImage = makeImage(vk, device, (VkImageCreateFlags)0, VK_IMAGE_TYPE_2D, caseDef.depthStencilFormat,
1437 IVec3(imageSize.x(), imageSize.y(), 1), numMipLevels, numSlices, imageUsage);
1438 depthStencilImageAlloc = bindImage(vki, vk, physDevice, device, *depthStencilImage, MemoryRequirement::Any, allocator, caseDef.allocationKind);
1439 }
1440
1441 // Create a vertex buffer
1442 {
1443 const vector<Vertex4RGBA> vertices = genFullQuadVertices(numSlices);
1444 const VkDeviceSize vertexBufferSize = sizeInBytes(vertices);
1445
1446 vertexBuffer = makeBuffer(vk, device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
1447 vertexBufferAlloc = bindBuffer(vki, vk, physDevice, device, *vertexBuffer, MemoryRequirement::HostVisible, allocator, caseDef.allocationKind);
1448
1449 deMemcpy(vertexBufferAlloc->getHostPtr(), &vertices[0], static_cast<std::size_t>(vertexBufferSize));
1450 flushAlloc(vk, device, *vertexBufferAlloc);
1451 }
1452
1453 // Prepare images
1454 {
1455 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1456 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1457
1458 beginCommandBuffer(vk, *cmdBuffer);
1459
1460 const VkImageMemoryBarrier imageBarriers[] =
1461 {
1462 {
1463 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1464 DE_NULL, // const void* pNext;
1465 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1466 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1467 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1468 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1469 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1470 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1471 *colorImage, // VkImage image;
1472 { // VkImageSubresourceRange subresourceRange;
1473 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1474 0u, // uint32_t baseMipLevel;
1475 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1476 0u, // uint32_t baseArrayLayer;
1477 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
1478 },
1479 },
1480 {
1481 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1482 DE_NULL, // const void* pNext;
1483 (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
1484 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1485 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1486 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1487 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1488 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1489 *depthStencilImage, // VkImage image;
1490 { // VkImageSubresourceRange subresourceRange;
1491 getFormatAspectFlags(caseDef.depthStencilFormat), // VkImageAspectFlags aspectMask;
1492 0u, // uint32_t baseMipLevel;
1493 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1494 0u, // uint32_t baseArrayLayer;
1495 static_cast<deUint32>(numSlices), // uint32_t layerCount;
1496 },
1497 }
1498 };
1499
1500 const deUint32 numImageBarriers = static_cast<deUint32>(DE_LENGTH_OF_ARRAY(imageBarriers) - (useDepthStencil ? 0 : 1));
1501
1502 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, 0u,
1503 0u, DE_NULL, 0u, DE_NULL, numImageBarriers, imageBarriers);
1504
1505 endCommandBuffer(vk, *cmdBuffer);
1506 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1507 }
1508
1509 // Draw
1510 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1511 {
1512 const IVec4& mipSize = mipLevelSizes[mipLevel];
1513 const int levelSlices = maxLayersOrDepth(mipSize);
1514
1515 drawToMipLevel (context, caseDef, mipLevel, mipSize, levelSlices, *colorImage, *depthStencilImage, *vertexBuffer, *pipelineLayout,
1516 *vertexModule, *fragmentModule);
1517 }
1518
1519 // Copy results: colorImage -> host visible colorBuffer
1520 {
1521 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1522 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
1523
1524 beginCommandBuffer(vk, *cmdBuffer);
1525
1526 {
1527 const VkImageMemoryBarrier imageBarriers[] =
1528 {
1529 {
1530 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1531 DE_NULL, // const void* pNext;
1532 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
1533 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1534 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1535 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1536 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1537 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1538 *colorImage, // VkImage image;
1539 { // VkImageSubresourceRange subresourceRange;
1540 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1541 0u, // uint32_t baseMipLevel;
1542 static_cast<deUint32>(numMipLevels), // uint32_t levelCount;
1543 0u, // uint32_t baseArrayLayer;
1544 static_cast<deUint32>(imageSize.w()), // uint32_t layerCount;
1545 },
1546 }
1547 };
1548
1549 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
1550 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(imageBarriers), imageBarriers);
1551 }
1552 {
1553 vector<VkBufferImageCopy> regions;
1554 VkDeviceSize levelOffset = 0ull;
1555 VkBufferImageCopy workRegion =
1556 {
1557 0ull, // VkDeviceSize bufferOffset;
1558 0u, // uint32_t bufferRowLength;
1559 0u, // uint32_t bufferImageHeight;
1560 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, imageSize.w()), // VkImageSubresourceLayers imageSubresource;
1561 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
1562 makeExtent3D(0, 0, 0), // VkExtent3D imageExtent;
1563 };
1564
1565 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1566 {
1567 workRegion.bufferOffset = levelOffset;
1568 workRegion.imageSubresource.mipLevel = static_cast<deUint32>(mipLevel);
1569 workRegion.imageExtent = makeExtent3D(mipLevelSizes[mipLevel].swizzle(0, 1, 2));
1570
1571 regions.push_back(workRegion);
1572
1573 levelOffset += mipLevelStorageSizes[mipLevel];
1574 }
1575
1576 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, static_cast<deUint32>(regions.size()), ®ions[0]);
1577 }
1578 {
1579 const VkBufferMemoryBarrier bufferBarriers[] =
1580 {
1581 {
1582 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1583 DE_NULL, // const void* pNext;
1584 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1585 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1586 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1587 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1588 *colorBuffer, // VkBuffer buffer;
1589 0ull, // VkDeviceSize offset;
1590 VK_WHOLE_SIZE, // VkDeviceSize size;
1591 },
1592 };
1593
1594 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
1595 0u, DE_NULL, DE_LENGTH_OF_ARRAY(bufferBarriers), bufferBarriers, 0u, DE_NULL);
1596 }
1597
1598 endCommandBuffer(vk, *cmdBuffer);
1599 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1600 }
1601
1602 // Verify results (per mip level)
1603 {
1604 invalidateAlloc(vk, device, *colorBufferAlloc);
1605
1606 const tcu::TextureFormat format = mapVkFormat(caseDef.colorFormat);
1607
1608 VkDeviceSize levelOffset = 0ull;
1609 bool allOk = true;
1610
1611 for (int mipLevel = 0; mipLevel < numMipLevels; ++mipLevel)
1612 {
1613 const IVec4& mipSize = mipLevelSizes[mipLevel];
1614 const void* const pLevelData = static_cast<const deUint8*>(colorBufferAlloc->getHostPtr()) + levelOffset;
1615 const int levelDepth = maxLayersOrDepth(mipSize);
1616 const tcu::ConstPixelBufferAccess resultImage (format, mipSize.x(), mipSize.y(), levelDepth, pLevelData);
1617 tcu::TextureLevel textureLevel (format, mipSize.x(), mipSize.y(), levelDepth);
1618 const tcu::PixelBufferAccess expectedImage = textureLevel.getAccess();
1619 const std::string comparisonName = "Mip level " + de::toString(mipLevel);
1620 bool ok = false;
1621
1622 generateExpectedImage(expectedImage, mipSize.swizzle(0, 1), 0);
1623
1624 if (isFloatFormat(caseDef.colorFormat))
1625 ok = tcu::floatThresholdCompare(context.getTestContext().getLog(), "Image Comparison", comparisonName.c_str(), expectedImage, resultImage, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
1626 else
1627 ok = tcu::intThresholdCompare(context.getTestContext().getLog(), "Image Comparison", comparisonName.c_str(), expectedImage, resultImage, tcu::UVec4(2), tcu::COMPARE_LOG_RESULT);
1628
1629 allOk = allOk && ok; // keep testing all levels, even if we know it's a fail overall
1630 levelOffset += mipLevelStorageSizes[mipLevel];
1631 }
1632
1633 return allOk ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("Fail");
1634 }
1635 }
1636
getSizeDescription(const IVec4 & size)1637 std::string getSizeDescription (const IVec4& size)
1638 {
1639 std::ostringstream str;
1640
1641 const char* const description[4] =
1642 {
1643 "width", "height", "depth", "layers"
1644 };
1645
1646 int numMaxComponents = 0;
1647
1648 for (int i = 0; i < 4; ++i)
1649 {
1650 if (size[i] == MAX_SIZE)
1651 {
1652 if (numMaxComponents > 0)
1653 str << "_";
1654
1655 str << description[i];
1656 ++numMaxComponents;
1657 }
1658 }
1659
1660 if (numMaxComponents == 0)
1661 str << "small";
1662
1663 return str.str();
1664 }
1665
getFormatString(const VkFormat format)1666 inline std::string getFormatString (const VkFormat format)
1667 {
1668 std::string name(getFormatName(format));
1669 return de::toLower(name.substr(10));
1670 }
1671
getFormatString(const VkFormat colorFormat,const VkFormat depthStencilFormat)1672 std::string getFormatString (const VkFormat colorFormat, const VkFormat depthStencilFormat)
1673 {
1674 std::ostringstream str;
1675 str << getFormatString(colorFormat);
1676 if (depthStencilFormat != VK_FORMAT_UNDEFINED)
1677 str << "_" << getFormatString(depthStencilFormat);
1678 return str.str();
1679 }
1680
getShortImageViewTypeName(const VkImageViewType imageViewType)1681 std::string getShortImageViewTypeName (const VkImageViewType imageViewType)
1682 {
1683 std::string s(getImageViewTypeName(imageViewType));
1684 return de::toLower(s.substr(19));
1685 }
1686
bvecFromMask(deUint32 mask)1687 inline BVec4 bvecFromMask (deUint32 mask)
1688 {
1689 return BVec4((mask >> 0) & 1,
1690 (mask >> 1) & 1,
1691 (mask >> 2) & 1,
1692 (mask >> 3) & 1);
1693 }
1694
genSizeCombinations(const IVec4 & baselineSize,const deUint32 sizeMask,const VkImageViewType imageViewType)1695 vector<IVec4> genSizeCombinations (const IVec4& baselineSize, const deUint32 sizeMask, const VkImageViewType imageViewType)
1696 {
1697 vector<IVec4> sizes;
1698 std::set<deUint32> masks;
1699
1700 for (deUint32 i = 0; i < (1u << 4); ++i)
1701 {
1702 // Cube images have square faces
1703 if (isCube(imageViewType) && ((i & MASK_WH) != 0))
1704 i |= MASK_WH;
1705
1706 masks.insert(i & sizeMask);
1707 }
1708
1709 for (std::set<deUint32>::const_iterator it = masks.begin(); it != masks.end(); ++it)
1710 sizes.push_back(tcu::select(IVec4(MAX_SIZE), baselineSize, bvecFromMask(*it)));
1711
1712 return sizes;
1713 }
1714
addTestCasesWithFunctions(tcu::TestCaseGroup * group,AllocationKind allocationKind)1715 void addTestCasesWithFunctions (tcu::TestCaseGroup* group, AllocationKind allocationKind)
1716 {
1717 const struct
1718 {
1719 VkImageViewType viewType;
1720 IVec4 baselineSize; //!< image size: (dimX, dimY, dimZ, arraySize)
1721 deUint32 sizeMask; //!< if a dimension is masked, generate a huge size case for it
1722 } testCase[] =
1723 {
1724 { VK_IMAGE_VIEW_TYPE_1D, IVec4(54, 1, 1, 1), MASK_W },
1725 { VK_IMAGE_VIEW_TYPE_1D_ARRAY, IVec4(54, 1, 1, 4), MASK_W_LAYERS },
1726 { VK_IMAGE_VIEW_TYPE_2D, IVec4(44, 23, 1, 1), MASK_WH },
1727 { VK_IMAGE_VIEW_TYPE_2D_ARRAY, IVec4(44, 23, 1, 4), MASK_WH_LAYERS },
1728 { VK_IMAGE_VIEW_TYPE_3D, IVec4(22, 31, 7, 1), MASK_WHD },
1729 { VK_IMAGE_VIEW_TYPE_CUBE, IVec4(35, 35, 1, 6), MASK_WH },
1730 { VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, IVec4(35, 35, 1, 2*6), MASK_WH_LAYERS },
1731 };
1732
1733 const VkFormat format[] =
1734 {
1735 VK_FORMAT_R8G8B8A8_UNORM,
1736 VK_FORMAT_R32_UINT,
1737 VK_FORMAT_R16G16_SINT,
1738 VK_FORMAT_R32G32B32A32_SFLOAT,
1739 };
1740
1741 const VkFormat depthStencilFormat[] =
1742 {
1743 VK_FORMAT_UNDEFINED, // don't use a depth/stencil attachment
1744 VK_FORMAT_D16_UNORM,
1745 VK_FORMAT_S8_UINT,
1746 VK_FORMAT_D24_UNORM_S8_UINT, // one of the following mixed formats must be supported
1747 VK_FORMAT_D32_SFLOAT_S8_UINT,
1748 };
1749
1750 for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(testCase); ++caseNdx)
1751 {
1752 MovePtr<tcu::TestCaseGroup> imageGroup(new tcu::TestCaseGroup(group->getTestContext(), getShortImageViewTypeName(testCase[caseNdx].viewType).c_str(), ""));
1753
1754 // Generate attachment size cases
1755 {
1756 const vector<IVec4> sizes = genSizeCombinations(testCase[caseNdx].baselineSize, testCase[caseNdx].sizeMask, testCase[caseNdx].viewType);
1757
1758 MovePtr<tcu::TestCaseGroup> smallGroup(new tcu::TestCaseGroup(group->getTestContext(), "small", ""));
1759 MovePtr<tcu::TestCaseGroup> hugeGroup (new tcu::TestCaseGroup(group->getTestContext(), "huge", ""));
1760
1761 imageGroup->addChild(smallGroup.get());
1762 imageGroup->addChild(hugeGroup.get());
1763
1764 for (vector<IVec4>::const_iterator sizeIter = sizes.begin(); sizeIter != sizes.end(); ++sizeIter)
1765 {
1766 // The first size is the baseline size, put it in a dedicated group
1767 if (sizeIter == sizes.begin())
1768 {
1769 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1770 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx)
1771 {
1772 const CaseDef caseDef =
1773 {
1774 testCase[caseNdx].viewType, // VkImageViewType imageType;
1775 *sizeIter, // IVec4 imageSizeHint;
1776 format[formatNdx], // VkFormat colorFormat;
1777 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1778 allocationKind // AllocationKind allocationKind;
1779 };
1780 addFunctionCaseWithPrograms(smallGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]), "", initPrograms, testAttachmentSize, caseDef);
1781 }
1782 }
1783 else // All huge cases go into a separate group
1784 {
1785 if (allocationKind != ALLOCATION_KIND_DEDICATED)
1786 {
1787 MovePtr<tcu::TestCaseGroup> sizeGroup (new tcu::TestCaseGroup(group->getTestContext(), getSizeDescription(*sizeIter).c_str(), ""));
1788 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
1789
1790 // Use the same color format for all cases, to reduce the number of permutations
1791 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1792 {
1793 const CaseDef caseDef =
1794 {
1795 testCase[caseNdx].viewType, // VkImageViewType viewType;
1796 *sizeIter, // IVec4 imageSizeHint;
1797 colorFormat, // VkFormat colorFormat;
1798 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1799 allocationKind // AllocationKind allocationKind;
1800 };
1801 addFunctionCaseWithPrograms(sizeGroup.get(), getFormatString(colorFormat, depthStencilFormat[dsFormatNdx]), "", initPrograms, testAttachmentSize, caseDef);
1802 }
1803 hugeGroup->addChild(sizeGroup.release());
1804 }
1805 }
1806 }
1807 smallGroup.release();
1808 hugeGroup.release();
1809 }
1810
1811 // Generate mip map cases
1812 {
1813 MovePtr<tcu::TestCaseGroup> mipmapGroup(new tcu::TestCaseGroup(group->getTestContext(), "mipmap", ""));
1814
1815 for (int dsFormatNdx = 0; dsFormatNdx < DE_LENGTH_OF_ARRAY(depthStencilFormat); ++dsFormatNdx)
1816 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(format); ++formatNdx)
1817 {
1818 const CaseDef caseDef =
1819 {
1820 testCase[caseNdx].viewType, // VkImageViewType imageType;
1821 testCase[caseNdx].baselineSize, // IVec4 imageSizeHint;
1822 format[formatNdx], // VkFormat colorFormat;
1823 depthStencilFormat[dsFormatNdx], // VkFormat depthStencilFormat;
1824 allocationKind // AllocationKind allocationKind;
1825 };
1826 addFunctionCaseWithPrograms(mipmapGroup.get(), getFormatString(format[formatNdx], depthStencilFormat[dsFormatNdx]), "", initPrograms, testRenderToMipMaps, caseDef);
1827 }
1828 imageGroup->addChild(mipmapGroup.release());
1829 }
1830
1831 group->addChild(imageGroup.release());
1832 }
1833 }
1834
addCoreRenderToImageTests(tcu::TestCaseGroup * group)1835 void addCoreRenderToImageTests (tcu::TestCaseGroup* group)
1836 {
1837 addTestCasesWithFunctions(group, ALLOCATION_KIND_SUBALLOCATED);
1838 }
1839
addDedicatedAllocationRenderToImageTests(tcu::TestCaseGroup * group)1840 void addDedicatedAllocationRenderToImageTests (tcu::TestCaseGroup* group)
1841 {
1842 addTestCasesWithFunctions(group, ALLOCATION_KIND_DEDICATED);
1843 }
1844
1845 } // anonymous ns
1846
createRenderToImageTests(tcu::TestContext & testCtx)1847 tcu::TestCaseGroup* createRenderToImageTests (tcu::TestContext& testCtx)
1848 {
1849 de::MovePtr<tcu::TestCaseGroup> renderToImageTests (new tcu::TestCaseGroup(testCtx, "render_to_image", "Render to image tests"));
1850
1851 renderToImageTests->addChild(createTestGroup(testCtx, "core", "Core render to image tests", addCoreRenderToImageTests));
1852 renderToImageTests->addChild(createTestGroup(testCtx, "dedicated_allocation", "Render to image tests for dedicated memory allocation", addDedicatedAllocationRenderToImageTests));
1853
1854 return renderToImageTests.release();
1855 }
1856
1857 } // pipeline
1858 } // vkt
1859