1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2022 The Khronos Group Inc.
6 * Copyright (c) 2022 Valve Corporation.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktPipelineImageSlicedViewOf3DTests.hpp"
26 #include "vktTestCase.hpp"
27
28 #include "vkImageUtil.hpp"
29 #include "vkTypeUtil.hpp"
30 #include "vkObjUtil.hpp"
31 #include "vkCmdUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkImageWithMemory.hpp"
34 #include "vkBufferWithMemory.hpp"
35 #include "vkBarrierUtil.hpp"
36
37 #include "tcuTexture.hpp"
38 #include "tcuImageCompare.hpp"
39 #include "tcuTextureUtil.hpp"
40
41 #include "deRandom.hpp"
42
43 #include <sstream>
44 #include <vector>
45 #include <tuple>
46 #include <set>
47 #include <limits>
48 #include <string>
49 #include <algorithm>
50
51 namespace vkt
52 {
53 namespace pipeline
54 {
55
56 using namespace vk;
57
58 namespace
59 {
60
61 constexpr uint32_t kWidth = 8u;
62 constexpr uint32_t kHeight = 8u;
63 constexpr VkFormat kFormat = VK_FORMAT_R8G8B8A8_UINT;
64 constexpr uint32_t kVertexCount = 3u;
65 constexpr auto kUsageLayout = VK_IMAGE_LAYOUT_GENERAL;
66
67 enum class TestType
68 {
69 LOAD = 0,
70 STORE,
71 };
72
73 struct TestParams
74 {
75 TestType testType;
76 VkShaderStageFlagBits stage;
77 uint32_t width;
78 uint32_t height;
79 uint32_t depth;
80 uint32_t offset;
81
82 private:
83 // We want to test both normal ranges and VK_REMAINING_3D_SLICES_EXT, but in the latter case we cannot blindly use the range
84 // value for some operations. See getActualRange() and getSlicedViewRange().
85 uint32_t range;
86
87 public:
88 tcu::Maybe<uint32_t> mipLevel;
89 bool sampleImg;
90
TestParamsvkt::pipeline::__anonff48dfd60111::TestParams91 TestParams(TestType testType_, VkShaderStageFlagBits stage_, uint32_t width_, uint32_t height_, uint32_t depth_,
92 uint32_t offset_, uint32_t range_, const tcu::Maybe<uint32_t> &mipLevel_, bool sampleImg_)
93 : testType(testType_)
94 , stage(stage_)
95 , width(width_)
96 , height(height_)
97 , depth(depth_)
98 , offset(offset_)
99 , range(range_)
100 , mipLevel(mipLevel_)
101 , sampleImg(sampleImg_)
102 {
103 DE_ASSERT(stage == VK_SHADER_STAGE_COMPUTE_BIT || stage == VK_SHADER_STAGE_FRAGMENT_BIT);
104 DE_ASSERT(range > 0u);
105
106 const auto selectedLevel = getSelectedLevel();
107
108 if (useMipMaps())
109 {
110 // To simplify things.
111 DE_ASSERT(width == height && width == depth);
112
113 const auto maxMipLevelCount = getMaxMipLevelCount();
114 DE_ASSERT(selectedLevel < maxMipLevelCount);
115 DE_UNREF(maxMipLevelCount); // For release builds.
116 }
117
118 const uint32_t selectedLevelDepth = (depth >> selectedLevel);
119 DE_UNREF(selectedLevelDepth); // For release builds.
120
121 if (!useRemainingSlices())
122 DE_ASSERT(offset + range <= selectedLevelDepth);
123 else
124 DE_ASSERT(offset < selectedLevelDepth);
125 }
126
getSelectedLevelvkt::pipeline::__anonff48dfd60111::TestParams127 uint32_t getSelectedLevel(void) const
128 {
129 return (useMipMaps() ? mipLevel.get() : 0u);
130 }
131
getFullImageLevelsvkt::pipeline::__anonff48dfd60111::TestParams132 uint32_t getFullImageLevels(void) const
133 {
134 return (useMipMaps() ? getMaxMipLevelCount() : 1u);
135 }
136
getActualRangevkt::pipeline::__anonff48dfd60111::TestParams137 uint32_t getActualRange(void) const
138 {
139 const auto levelDepth = (depth >> getSelectedLevel());
140 DE_ASSERT(levelDepth > 0u);
141
142 return (useRemainingSlices() ? (levelDepth - offset) : range);
143 }
144
getSlicedViewRangevkt::pipeline::__anonff48dfd60111::TestParams145 uint32_t getSlicedViewRange(void) const
146 {
147 return range;
148 }
149
getSliceExtentvkt::pipeline::__anonff48dfd60111::TestParams150 VkExtent3D getSliceExtent(void) const
151 {
152 const auto selectedLevel = getSelectedLevel();
153 const auto extent = makeExtent3D((width >> selectedLevel), (height >> selectedLevel), getActualRange());
154
155 DE_ASSERT(extent.width > 0u);
156 DE_ASSERT(extent.height > 0u);
157 DE_ASSERT(extent.depth > 0u);
158 return extent;
159 }
160
getFullLevelExtentvkt::pipeline::__anonff48dfd60111::TestParams161 VkExtent3D getFullLevelExtent(void) const
162 {
163 const auto selectedLevel = getSelectedLevel();
164 const auto extent = makeExtent3D((width >> selectedLevel), (height >> selectedLevel), (depth >> selectedLevel));
165
166 DE_ASSERT(extent.width > 0u);
167 DE_ASSERT(extent.height > 0u);
168 DE_ASSERT(extent.depth > 0u);
169 return extent;
170 }
171
getMaxMipLevelCountForSizevkt::pipeline::__anonff48dfd60111::TestParams172 static uint32_t getMaxMipLevelCountForSize(uint32_t size)
173 {
174 DE_ASSERT(size <= static_cast<uint32_t>(std::numeric_limits<int32_t>::max()));
175 return static_cast<uint32_t>(deLog2Floor32(static_cast<int32_t>(size)) + 1);
176 }
177
178 private:
getMaxMipLevelCountvkt::pipeline::__anonff48dfd60111::TestParams179 uint32_t getMaxMipLevelCount(void) const
180 {
181 return getMaxMipLevelCountForSize(depth);
182 }
183
useMipMapsvkt::pipeline::__anonff48dfd60111::TestParams184 bool useMipMaps(void) const
185 {
186 return static_cast<bool>(mipLevel);
187 }
188
useRemainingSlicesvkt::pipeline::__anonff48dfd60111::TestParams189 bool useRemainingSlices(void) const
190 {
191 return (range == VK_REMAINING_3D_SLICES_EXT);
192 }
193 };
194
195 class SlicedViewTestCase : public vkt::TestCase
196 {
197 public:
SlicedViewTestCase(tcu::TestContext & testCtx,const std::string & name,const TestParams & params)198 SlicedViewTestCase(tcu::TestContext &testCtx, const std::string &name, const TestParams ¶ms)
199 : vkt::TestCase(testCtx, name)
200 , m_params(params)
201 {
202 }
~SlicedViewTestCase(void)203 virtual ~SlicedViewTestCase(void)
204 {
205 }
206
207 void initPrograms(vk::SourceCollections &programCollection) const override;
208 TestInstance *createInstance(Context &context) const override;
209 void checkSupport(Context &context) const override;
210
211 protected:
212 const TestParams m_params;
213 };
214
215 class SlicedViewTestInstance : public vkt::TestInstance
216 {
217 public:
SlicedViewTestInstance(Context & context,const TestParams & params)218 SlicedViewTestInstance(Context &context, const TestParams ¶ms) : vkt::TestInstance(context), m_params(params)
219 {
220 }
~SlicedViewTestInstance(void)221 virtual ~SlicedViewTestInstance(void)
222 {
223 }
224
225 protected:
226 virtual void runPipeline(const DeviceInterface &vkd, const VkDevice device, const VkCommandBuffer cmdBuffer,
227 const VkImageView slicedImage, const VkImageView auxiliarImage);
228 virtual void runGraphicsPipeline(const DeviceInterface &vkd, const VkDevice device,
229 const VkCommandBuffer cmdBuffer);
230 virtual void runComputePipeline(const DeviceInterface &vkd, const VkDevice device, const VkCommandBuffer cmdBuffer);
231 bool runSamplingPipeline(const VkImage fullImage, const VkImageView slicedView, const VkExtent3D &levelExtent);
232
233 const TestParams m_params;
234
235 Move<VkDescriptorSetLayout> m_setLayout;
236 Move<VkDescriptorPool> m_descriptorPool;
237 Move<VkDescriptorSet> m_descriptorSet;
238 Move<VkPipelineLayout> m_pipelineLayout;
239
240 // Only for graphics pipelines.
241 Move<VkRenderPass> m_renderPass;
242 Move<VkFramebuffer> m_framebuffer;
243
244 Move<VkPipeline> m_pipeline;
245 };
246
247 class SlicedViewLoadTestInstance : public SlicedViewTestInstance
248 {
249 public:
SlicedViewLoadTestInstance(Context & context,const TestParams & params)250 SlicedViewLoadTestInstance(Context &context, const TestParams ¶ms) : SlicedViewTestInstance(context, params)
251 {
252 }
~SlicedViewLoadTestInstance(void)253 virtual ~SlicedViewLoadTestInstance(void)
254 {
255 }
256
257 tcu::TestStatus iterate(void);
258 };
259
260 class SlicedViewStoreTestInstance : public SlicedViewTestInstance
261 {
262 public:
SlicedViewStoreTestInstance(Context & context,const TestParams & params)263 SlicedViewStoreTestInstance(Context &context, const TestParams ¶ms) : SlicedViewTestInstance(context, params)
264 {
265 }
~SlicedViewStoreTestInstance(void)266 virtual ~SlicedViewStoreTestInstance(void)
267 {
268 }
269
270 tcu::TestStatus iterate(void);
271 };
272
checkSupport(Context & context) const273 void SlicedViewTestCase::checkSupport(Context &context) const
274 {
275 context.requireDeviceFunctionality(VK_EXT_IMAGE_SLICED_VIEW_OF_3D_EXTENSION_NAME);
276
277 if (m_params.stage == VK_SHADER_STAGE_FRAGMENT_BIT)
278 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_FRAGMENT_STORES_AND_ATOMICS);
279 }
280
initPrograms(vk::SourceCollections & programCollection) const281 void SlicedViewTestCase::initPrograms(vk::SourceCollections &programCollection) const
282 {
283 const std::string bindings = "layout (rgba8ui, set=0, binding=0) uniform uimage3D slicedImage;\n"
284 "layout (rgba8ui, set=0, binding=1) uniform uimage3D auxiliarImage;\n";
285
286 std::string loadFrom;
287 std::string storeTo;
288
289 // We may need to load stuff from the sliced image into an auxiliary image if we're testing load, or we may need to store stuff
290 // to the sliced image, read from the auxiliary image if we're testing stores.
291 if (m_params.testType == TestType::LOAD)
292 {
293 loadFrom = "slicedImage";
294 storeTo = "auxiliarImage";
295 }
296 else if (m_params.testType == TestType::STORE)
297 {
298 loadFrom = "auxiliarImage";
299 storeTo = "slicedImage";
300 }
301 else
302 DE_ASSERT(false);
303
304 std::ostringstream mainOperation;
305
306 // Note: "coords" will vary depending on the shader stage.
307 mainOperation << " const ivec3 size = imageSize(slicedImage);\n"
308 << " const uvec4 badColor = uvec4(0, 0, 0, 0);\n"
309 << " const uvec4 goodColor = imageLoad(" << loadFrom << ", coords);\n"
310 << " const uvec4 storedColor = ((size.z == " << m_params.getActualRange()
311 << ") ? goodColor : badColor);\n"
312 << " imageStore(" << storeTo << ", coords, storedColor);\n";
313
314 if (m_params.stage == VK_SHADER_STAGE_COMPUTE_BIT)
315 {
316 // For compute, we'll launch as many workgroups as slices, and each invocation will handle one pixel.
317 const auto sliceExtent = m_params.getSliceExtent();
318 std::ostringstream comp;
319 comp << "#version 460\n"
320 << "layout (local_size_x=" << sliceExtent.width << ", local_size_y=" << sliceExtent.height
321 << ", local_size_z=1) in;\n"
322 << bindings << "void main (void) {\n"
323 << " const ivec3 coords = ivec3(ivec2(gl_LocalInvocationID.xy), int(gl_WorkGroupID.x));\n"
324 << mainOperation.str() << "}\n";
325 programCollection.glslSources.add("comp") << glu::ComputeSource(comp.str());
326 }
327 else if (m_params.stage == VK_SHADER_STAGE_FRAGMENT_BIT)
328 {
329 // For fragment, we'll draw as many instances as slices, and each draw will use a full-screen triangle to generate as many
330 // fragment shader invocations as pixels in the image (the framebuffer needs to have the same size as the storage images).
331 std::ostringstream frag;
332 frag << "#version 460\n"
333 << "layout (location=0) in flat int zCoord;\n"
334 << bindings << "void main (void) {\n"
335 << " const ivec3 coords = ivec3(ivec2(gl_FragCoord.xy), zCoord);\n"
336 << mainOperation.str() << "}\n";
337
338 std::ostringstream vert;
339 vert << "#version 460\n"
340 << "layout (location=0) out flat int zCoord;\n"
341 << "vec2 positions[3] = vec2[](\n"
342 << " vec2(-1.0, -1.0),\n"
343 << " vec2( 3.0, -1.0),\n"
344 << " vec2(-1.0, 3.0)\n"
345 << ");\n"
346 << "void main() {\n"
347 << " gl_Position = vec4(positions[gl_VertexIndex % 3], 0.0, 1.0);\n"
348 << " zCoord = int(gl_InstanceIndex);\n"
349 << "}\n";
350
351 programCollection.glslSources.add("vert") << glu::VertexSource(vert.str());
352 programCollection.glslSources.add("frag") << glu::FragmentSource(frag.str());
353 }
354 else
355 {
356 DE_ASSERT(false);
357 }
358
359 if (m_params.sampleImg)
360 {
361 // Prepare a compute shader that will sample the whole level to verify it's available.
362 const auto levelExtent = m_params.getFullLevelExtent();
363
364 std::ostringstream comp;
365 comp << "#version 460\n"
366 << "layout (local_size_x=" << levelExtent.width << ", local_size_y=" << levelExtent.height
367 << ", local_size_z=" << levelExtent.depth << ") in;\n"
368 << "layout (set=0, binding=0) uniform usampler3D combinedSampler;\n" // The image being tested.
369 << "layout (set=0, binding=1, rgba8ui) uniform uimage3D auxiliarImage;\n" // Verification storage image.
370 << "void main() {\n"
371 << " const vec3 levelExtent = vec3(" << levelExtent.width << ", " << levelExtent.height << ", "
372 << levelExtent.depth << ");\n"
373 << " const vec3 sampleCoords = vec3(\n"
374 << " (float(gl_LocalInvocationID.x) + 0.5) / levelExtent.x,\n"
375 << " (float(gl_LocalInvocationID.y) + 0.5) / levelExtent.y,\n"
376 << " (float(gl_LocalInvocationID.z) + 0.5) / levelExtent.z);\n"
377 << " const ivec3 storeCoords = ivec3(int(gl_LocalInvocationID.x), int(gl_LocalInvocationID.y), "
378 "int(gl_LocalInvocationID.z));\n"
379 << " const uvec4 sampledColor = texture(combinedSampler, sampleCoords);\n"
380 << " imageStore(auxiliarImage, storeCoords, sampledColor);\n"
381 << "}\n";
382 programCollection.glslSources.add("compSample") << glu::ComputeSource(comp.str());
383 }
384 }
385
createInstance(Context & context) const386 TestInstance *SlicedViewTestCase::createInstance(Context &context) const
387 {
388 if (m_params.testType == TestType::LOAD)
389 return new SlicedViewLoadTestInstance(context, m_params);
390 if (m_params.testType == TestType::STORE)
391 return new SlicedViewStoreTestInstance(context, m_params);
392
393 DE_ASSERT(false);
394 return nullptr;
395 }
396
makeIVec3(uint32_t width,uint32_t height,uint32_t depth)397 tcu::IVec3 makeIVec3(uint32_t width, uint32_t height, uint32_t depth)
398 {
399 return tcu::IVec3(static_cast<int>(width), static_cast<int>(height), static_cast<int>(depth));
400 }
401
makePixelBufferAccess(const BufferWithMemory & buffer,const tcu::IVec3 & size,const tcu::TextureFormat & format)402 de::MovePtr<tcu::PixelBufferAccess> makePixelBufferAccess(const BufferWithMemory &buffer, const tcu::IVec3 &size,
403 const tcu::TextureFormat &format)
404 {
405 de::MovePtr<tcu::PixelBufferAccess> bufferImage(
406 new tcu::PixelBufferAccess(format, size, buffer.getAllocation().getHostPtr()));
407 return bufferImage;
408 }
409
makeTransferBuffer(const VkExtent3D & extent,const tcu::TextureFormat & format,const DeviceInterface & vkd,const VkDevice device,Allocator & alloc)410 de::MovePtr<BufferWithMemory> makeTransferBuffer(const VkExtent3D &extent, const tcu::TextureFormat &format,
411 const DeviceInterface &vkd, const VkDevice device, Allocator &alloc)
412 {
413 DE_ASSERT(extent.width > 0u);
414 DE_ASSERT(extent.height > 0u);
415 DE_ASSERT(extent.depth > 0u);
416
417 const auto pixelSizeBytes = tcu::getPixelSize(format);
418 const auto pixelCount = extent.width * extent.height * extent.depth;
419 const auto bufferSize = static_cast<VkDeviceSize>(pixelCount) * static_cast<VkDeviceSize>(pixelSizeBytes);
420 const auto bufferUsage = (VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT);
421 const auto bufferCreateInfo = makeBufferCreateInfo(bufferSize, bufferUsage);
422
423 de::MovePtr<BufferWithMemory> buffer(
424 new BufferWithMemory(vkd, device, alloc, bufferCreateInfo, MemoryRequirement::HostVisible));
425 return buffer;
426 }
427
makeAndFillTransferBuffer(const VkExtent3D & extent,const tcu::TextureFormat & format,const DeviceInterface & vkd,const VkDevice device,Allocator & alloc)428 de::MovePtr<BufferWithMemory> makeAndFillTransferBuffer(const VkExtent3D &extent, const tcu::TextureFormat &format,
429 const DeviceInterface &vkd, const VkDevice device,
430 Allocator &alloc)
431 {
432 DE_ASSERT(tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER);
433
434 auto buffer = makeTransferBuffer(extent, format, vkd, device, alloc);
435 const auto size = makeIVec3(extent.width, extent.height, extent.depth);
436 auto bufferImg = makePixelBufferAccess(*buffer, size, format);
437
438 // Fill image with predefined pattern.
439 for (int z = 0; z < size.z(); ++z)
440 for (int y = 0; y < size.y(); ++y)
441 for (int x = 0; x < size.x(); ++x)
442 {
443 const tcu::UVec4 color(static_cast<uint32_t>(0x80 | x), static_cast<uint32_t>(0x80 | y),
444 static_cast<uint32_t>(0x80 | z), 1u);
445 bufferImg->setPixel(color, x, y, z);
446 }
447
448 return buffer;
449 }
450
make3DImage(const DeviceInterface & vkd,const VkDevice device,Allocator & alloc,const VkFormat format,const VkExtent3D & extent,uint32_t mipLevels,const bool sampling)451 de::MovePtr<ImageWithMemory> make3DImage(const DeviceInterface &vkd, const VkDevice device, Allocator &alloc,
452 const VkFormat format, const VkExtent3D &extent, uint32_t mipLevels,
453 const bool sampling)
454 {
455 const VkImageUsageFlags imageUsage =
456 (VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
457 (sampling ? VK_IMAGE_USAGE_SAMPLED_BIT : static_cast<VkImageUsageFlagBits>(0)));
458
459 const VkImageCreateInfo imageCreateInfo = {
460 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
461 nullptr, // const void* pNext;
462 0u, // VkImageCreateFlags flags;
463 VK_IMAGE_TYPE_3D, // VkImageType imageType;
464 format, // VkFormat format;
465 extent, // VkExtent3D extent;
466 mipLevels, // uint32_t mipLevels;
467 1u, // uint32_t arrayLayers;
468 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
469 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
470 imageUsage, // VkImageUsageFlags usage;
471 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
472 0u, // uint32_t queueFamilyIndexCount;
473 nullptr, // const uint32_t* pQueueFamilyIndices;
474 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
475 };
476
477 de::MovePtr<ImageWithMemory> image(
478 new ImageWithMemory(vkd, device, alloc, imageCreateInfo, MemoryRequirement::Any));
479 return image;
480 }
481
makeCommonImageSubresourceRange(uint32_t baseLevel,uint32_t levelCount)482 VkImageSubresourceRange makeCommonImageSubresourceRange(uint32_t baseLevel, uint32_t levelCount)
483 {
484 return makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, baseLevel, levelCount, 0u, 1u);
485 }
486
makeCommonImageSubresourceLayers(uint32_t mipLevel)487 VkImageSubresourceLayers makeCommonImageSubresourceLayers(uint32_t mipLevel)
488 {
489 return makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0u, 1u);
490 }
491
make3DImageView(const DeviceInterface & vkd,const VkDevice device,const VkImage image,const VkFormat format,const tcu::Maybe<tcu::UVec2> & slices,uint32_t mipLevel,uint32_t levelCount)492 Move<VkImageView> make3DImageView(const DeviceInterface &vkd, const VkDevice device, const VkImage image,
493 const VkFormat format, const tcu::Maybe<tcu::UVec2> &slices /*x=offset, y=range)*/,
494 uint32_t mipLevel, uint32_t levelCount)
495 {
496 const bool subSlice = static_cast<bool>(slices);
497
498 VkImageViewSlicedCreateInfoEXT sliceCreateInfo = initVulkanStructure();
499
500 if (subSlice)
501 {
502 sliceCreateInfo.sliceOffset = slices->x();
503 sliceCreateInfo.sliceCount = slices->y();
504 }
505
506 const VkImageViewCreateInfo viewCreateInfo = {
507 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
508 (subSlice ? &sliceCreateInfo : nullptr), // const void* pNext;
509 0u, // VkImageViewCreateFlags flags;
510 image, // VkImage image;
511 VK_IMAGE_VIEW_TYPE_3D, // VkImageViewType viewType;
512 format, // VkFormat format;
513 makeComponentMappingRGBA(), // VkComponentMapping components;
514 makeCommonImageSubresourceRange(mipLevel, levelCount), // VkImageSubresourceRange subresourceRange;
515 };
516
517 return createImageView(vkd, device, &viewCreateInfo);
518 }
519
makePipelineStage(VkShaderStageFlagBits shaderStage)520 VkPipelineStageFlagBits makePipelineStage(VkShaderStageFlagBits shaderStage)
521 {
522 if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
523 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
524 if (shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
525 return VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
526
527 DE_ASSERT(false);
528 return VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM;
529 }
530
runPipeline(const DeviceInterface & vkd,const VkDevice device,const VkCommandBuffer cmdBuffer,const VkImageView slicedImage,const VkImageView auxiliarImage)531 void SlicedViewTestInstance::runPipeline(const DeviceInterface &vkd, const VkDevice device,
532 const VkCommandBuffer cmdBuffer, const VkImageView slicedImage,
533 const VkImageView auxiliarImage)
534 {
535 // The layouts created and used here must match the shaders.
536 const auto descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
537
538 DescriptorSetLayoutBuilder layoutBuilder;
539 layoutBuilder.addSingleBinding(descriptorType, m_params.stage);
540 layoutBuilder.addSingleBinding(descriptorType, m_params.stage);
541 m_setLayout = layoutBuilder.build(vkd, device);
542
543 DescriptorPoolBuilder poolBuilder;
544 poolBuilder.addType(descriptorType, 2u);
545 m_descriptorPool = poolBuilder.build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
546
547 m_descriptorSet = makeDescriptorSet(vkd, device, m_descriptorPool.get(), m_setLayout.get());
548 m_pipelineLayout = makePipelineLayout(vkd, device, m_setLayout.get());
549
550 DescriptorSetUpdateBuilder updateBuilder;
551 const auto slicedImageDescInfo = makeDescriptorImageInfo(VK_NULL_HANDLE, slicedImage, kUsageLayout);
552 const auto auxiliarImageDescInfo = makeDescriptorImageInfo(VK_NULL_HANDLE, auxiliarImage, kUsageLayout);
553 updateBuilder.writeSingle(m_descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType,
554 &slicedImageDescInfo);
555 updateBuilder.writeSingle(m_descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(1u), descriptorType,
556 &auxiliarImageDescInfo);
557 updateBuilder.update(vkd, device);
558
559 if (m_params.stage == VK_SHADER_STAGE_FRAGMENT_BIT)
560 runGraphicsPipeline(vkd, device, cmdBuffer);
561 else if (m_params.stage == VK_SHADER_STAGE_COMPUTE_BIT)
562 runComputePipeline(vkd, device, cmdBuffer);
563 else
564 DE_ASSERT(false);
565 }
566
runGraphicsPipeline(const DeviceInterface & vkd,const VkDevice device,const VkCommandBuffer cmdBuffer)567 void SlicedViewTestInstance::runGraphicsPipeline(const DeviceInterface &vkd, const VkDevice device,
568 const VkCommandBuffer cmdBuffer)
569 {
570 const auto sliceExtent = m_params.getSliceExtent();
571 const auto &binaries = m_context.getBinaryCollection();
572 const auto vertShader = createShaderModule(vkd, device, binaries.get("vert"));
573 const auto fragShader = createShaderModule(vkd, device, binaries.get("frag"));
574 const auto extent = makeExtent3D(sliceExtent.width, sliceExtent.height, 1u);
575 const auto bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
576
577 m_renderPass = makeRenderPass(vkd, device);
578 m_framebuffer =
579 makeFramebuffer(vkd, device, m_renderPass.get(), 0u, nullptr, sliceExtent.width, sliceExtent.height);
580
581 const std::vector<VkViewport> viewports(1u, makeViewport(extent));
582 const std::vector<VkRect2D> scissors(1u, makeRect2D(extent));
583
584 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = initVulkanStructure();
585
586 m_pipeline =
587 makeGraphicsPipeline(vkd, device, m_pipelineLayout.get(), vertShader.get(), VK_NULL_HANDLE, VK_NULL_HANDLE,
588 VK_NULL_HANDLE, fragShader.get(), m_renderPass.get(), viewports, scissors,
589 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, 0u, &vertexInputStateCreateInfo);
590
591 beginRenderPass(vkd, cmdBuffer, m_renderPass.get(), m_framebuffer.get(), scissors.at(0u));
592 vkd.cmdBindPipeline(cmdBuffer, bindPoint, m_pipeline.get());
593 vkd.cmdBindDescriptorSets(cmdBuffer, bindPoint, m_pipelineLayout.get(), 0u, 1u, &m_descriptorSet.get(), 0u,
594 nullptr);
595 vkd.cmdDraw(cmdBuffer, kVertexCount, sliceExtent.depth, 0u, 0u);
596 endRenderPass(vkd, cmdBuffer);
597 }
598
runComputePipeline(const DeviceInterface & vkd,const VkDevice device,const VkCommandBuffer cmdBuffer)599 void SlicedViewTestInstance::runComputePipeline(const DeviceInterface &vkd, const VkDevice device,
600 const VkCommandBuffer cmdBuffer)
601 {
602 const auto bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
603 const auto compShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("comp"));
604
605 m_pipeline = makeComputePipeline(vkd, device, m_pipelineLayout.get(), compShader.get());
606
607 vkd.cmdBindPipeline(cmdBuffer, bindPoint, m_pipeline.get());
608 vkd.cmdBindDescriptorSets(cmdBuffer, bindPoint, m_pipelineLayout.get(), 0u, 1u, &m_descriptorSet.get(), 0u,
609 nullptr);
610 vkd.cmdDispatch(cmdBuffer, m_params.getActualRange(), 1u, 1u);
611 }
612
runSamplingPipeline(const VkImage fullImage,const VkImageView slicedView,const VkExtent3D & levelExtent)613 bool SlicedViewTestInstance::runSamplingPipeline(const VkImage fullImage, const VkImageView slicedView,
614 const VkExtent3D &levelExtent)
615 {
616 const auto &vkd = m_context.getDeviceInterface();
617 const auto device = m_context.getDevice();
618 const auto qfIndex = m_context.getUniversalQueueFamilyIndex();
619 const auto queue = m_context.getUniversalQueue();
620 auto &alloc = m_context.getDefaultAllocator();
621
622 const auto bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
623 const auto shaderStage = VK_SHADER_STAGE_COMPUTE_BIT;
624 const auto pipelineStage = makePipelineStage(shaderStage);
625
626 // Command pool and buffer.
627 const auto cmdPool = makeCommandPool(vkd, device, qfIndex);
628 const auto cmdBufferPtr = allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
629 const auto cmdBuffer = cmdBufferPtr.get();
630
631 // Descriptor set layout and pipeline layout.
632 DescriptorSetLayoutBuilder setLayoutBuilder;
633 setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, shaderStage);
634 setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, shaderStage);
635 const auto setLayout = setLayoutBuilder.build(vkd, device);
636 const auto pipelineLayout = makePipelineLayout(vkd, device, setLayout.get());
637
638 // Pipeline.
639 const auto compShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("compSample"));
640 const auto pipeline = makeComputePipeline(vkd, device, pipelineLayout.get(), compShader.get());
641
642 // Descriptor pool and set.
643 DescriptorPoolBuilder poolBuilder;
644 poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
645 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
646 const auto descriptorPool = poolBuilder.build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
647 const auto descriptorSet = makeDescriptorSet(vkd, device, descriptorPool.get(), setLayout.get());
648
649 // Update descriptor set.
650 const VkSamplerCreateInfo samplerCreateInfo = {
651 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
652 nullptr, // const void* pNext;
653 0u, // VkSamplerCreateFlags flags;
654 VK_FILTER_NEAREST, // VkFilter magFilter;
655 VK_FILTER_NEAREST, // VkFilter minFilter;
656 VK_SAMPLER_MIPMAP_MODE_NEAREST, // VkSamplerMipmapMode mipmapMode;
657 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeU;
658 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeV;
659 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeW;
660 0.0f, // float mipLodBias;
661 VK_FALSE, // VkBool32 anisotropyEnable;
662 1.0f, // float maxAnisotropy;
663 VK_FALSE, // VkBool32 compareEnable;
664 VK_COMPARE_OP_NEVER, // VkCompareOp compareOp;
665 0.0f, // float minLod;
666 0.0f, // float maxLod;
667 VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, // VkBorderColor borderColor;
668 VK_FALSE, // VkBool32 unnormalizedCoordinates;
669 };
670 const auto sampler = createSampler(vkd, device, &samplerCreateInfo);
671
672 // This will be used as a storage image to verify the sampling results.
673 // It has the same size as the full level extent, but only a single level and not sliced.
674 const auto auxiliarImage = make3DImage(vkd, device, alloc, kFormat, levelExtent, 1u, false /*sampling*/);
675 const auto auxiliarView = make3DImageView(vkd, device, auxiliarImage->get(), kFormat, tcu::Nothing, 0u, 1u);
676
677 DescriptorSetUpdateBuilder updateBuilder;
678 const auto sampledImageInfo = makeDescriptorImageInfo(sampler.get(), slicedView, kUsageLayout);
679 const auto storageImageInfo = makeDescriptorImageInfo(VK_NULL_HANDLE, auxiliarView.get(), kUsageLayout);
680 updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(0u),
681 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &sampledImageInfo);
682 updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(1u),
683 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &storageImageInfo);
684 updateBuilder.update(vkd, device);
685
686 const auto tcuFormat = mapVkFormat(kFormat);
687 const auto verifBuffer = makeTransferBuffer(levelExtent, tcuFormat, vkd, device, alloc);
688 const auto refBuffer = makeTransferBuffer(levelExtent, tcuFormat, vkd, device, alloc);
689
690 beginCommandBuffer(vkd, cmdBuffer);
691
692 // Move auxiliar image to the proper layout.
693 const auto shaderAccess = (VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT);
694 const auto colorSRR = makeCommonImageSubresourceRange(0u, 1u);
695 const auto preDispatchBarrier = makeImageMemoryBarrier(0u, shaderAccess, VK_IMAGE_LAYOUT_UNDEFINED,
696 VK_IMAGE_LAYOUT_GENERAL, auxiliarImage->get(), colorSRR);
697 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, pipelineStage,
698 &preDispatchBarrier);
699
700 vkd.cmdBindPipeline(cmdBuffer, bindPoint, pipeline.get());
701 vkd.cmdBindDescriptorSets(cmdBuffer, bindPoint, pipelineLayout.get(), 0u, 1u, &descriptorSet.get(), 0u, nullptr);
702 vkd.cmdDispatch(cmdBuffer, 1u, 1u, 1u);
703
704 // Sync shader writes before copying to verification buffer.
705 const auto preCopyBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
706 cmdPipelineMemoryBarrier(vkd, cmdBuffer, pipelineStage, VK_PIPELINE_STAGE_TRANSFER_BIT, &preCopyBarrier);
707
708 // Copy storage image to verification buffer.
709 const auto colorSRL = makeCommonImageSubresourceLayers(0u);
710 const auto copyRegion = makeBufferImageCopy(levelExtent, colorSRL);
711 vkd.cmdCopyImageToBuffer(cmdBuffer, auxiliarImage->get(), kUsageLayout, verifBuffer->get(), 1u, ©Region);
712
713 // Copy full level from the original full image to the reference buffer to compare them.
714 const auto refSRL = makeCommonImageSubresourceLayers(m_params.getSelectedLevel());
715 const auto refCopy = makeBufferImageCopy(levelExtent, refSRL);
716 vkd.cmdCopyImageToBuffer(cmdBuffer, fullImage, kUsageLayout, refBuffer->get(), 1u, &refCopy);
717
718 // Sync copies to host.
719 const auto postCopyBarrier = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
720 cmdPipelineMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
721 &postCopyBarrier);
722
723 endCommandBuffer(vkd, cmdBuffer);
724 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
725
726 // Compare both buffers.
727 auto &verifBufferAlloc = verifBuffer->getAllocation();
728 auto &refBufferAlloc = refBuffer->getAllocation();
729 invalidateAlloc(vkd, device, verifBufferAlloc);
730 invalidateAlloc(vkd, device, refBufferAlloc);
731
732 const auto iExtent = makeIVec3(levelExtent.width, levelExtent.height, levelExtent.depth);
733 const tcu::ConstPixelBufferAccess verifAcces(tcuFormat, iExtent, verifBufferAlloc.getHostPtr());
734 const tcu::ConstPixelBufferAccess refAccess(tcuFormat, iExtent, refBufferAlloc.getHostPtr());
735
736 auto &log = m_context.getTestContext().getLog();
737 const tcu::UVec4 threshold(0u, 0u, 0u, 0u);
738 return tcu::intThresholdCompare(log, "SamplingResult", "", refAccess, verifAcces, threshold,
739 tcu::COMPARE_LOG_ON_ERROR);
740 }
741
iterate(void)742 tcu::TestStatus SlicedViewLoadTestInstance::iterate(void)
743 {
744 const auto &vkd = m_context.getDeviceInterface();
745 const auto device = m_context.getDevice();
746 auto &alloc = m_context.getDefaultAllocator();
747 const auto qfIndex = m_context.getUniversalQueueFamilyIndex();
748 const auto queue = m_context.getUniversalQueue();
749
750 const auto mipLevel = m_params.getSelectedLevel();
751 const auto fullExtent = makeExtent3D(m_params.width, m_params.height, m_params.depth);
752 const auto sliceExtent = m_params.getSliceExtent();
753 const auto tcuFormat = mapVkFormat(kFormat);
754 const auto auxiliarBuffer = makeAndFillTransferBuffer(sliceExtent, tcuFormat, vkd, device, alloc);
755 const auto verifBuffer = makeTransferBuffer(sliceExtent, tcuFormat, vkd, device, alloc);
756 const auto fullImage =
757 make3DImage(vkd, device, alloc, kFormat, fullExtent, m_params.getFullImageLevels(), m_params.sampleImg);
758 const auto fullSRR = makeCommonImageSubresourceRange(0u, VK_REMAINING_MIP_LEVELS);
759 const auto singleSRR = makeCommonImageSubresourceRange(0u, 1u);
760 const auto targetLevelSRL = makeCommonImageSubresourceLayers(mipLevel);
761 const auto baseLevelSRL = makeCommonImageSubresourceLayers(0u);
762 const auto clearColor = makeClearValueColorU32(0u, 0u, 0u, 0u);
763 const auto pipelineStage = makePipelineStage(m_params.stage);
764
765 const auto cmdPool = makeCommandPool(vkd, device, qfIndex);
766 const auto cmdBufferPtr = allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
767 const auto cmdBuffer = cmdBufferPtr.get();
768
769 beginCommandBuffer(vkd, cmdBuffer);
770
771 // Zero-out full image.
772 const auto preClearBarrier =
773 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
774 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, fullImage->get(), fullSRR);
775 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, 0u, VK_PIPELINE_STAGE_TRANSFER_BIT, &preClearBarrier);
776 vkd.cmdClearColorImage(cmdBuffer, fullImage->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearColor.color, 1u,
777 &fullSRR);
778
779 // Copy reference buffer to full image at the right offset.
780 const auto preCopyBarrier = makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
781 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
782 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, fullImage->get(), fullSRR);
783 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
784 &preCopyBarrier);
785
786 const VkBufferImageCopy sliceCopy = {
787 0ull, // VkDeviceSize bufferOffset;
788 0u, // uint32_t bufferRowLength;
789 0u, // uint32_t bufferImageHeight;
790 targetLevelSRL, // VkImageSubresourceLayers imageSubresource;
791 makeOffset3D(0, 0, static_cast<int32_t>(m_params.offset)), // VkOffset3D imageOffset;
792 sliceExtent, // VkExtent3D imageExtent;
793 };
794 vkd.cmdCopyBufferToImage(cmdBuffer, auxiliarBuffer->get(), fullImage->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
795 1u, &sliceCopy);
796
797 // Move full image to the general layout to be able to read from or write to it from the shader.
798 // Note: read-only optimal is not a valid layout for this.
799 const auto postCopyBarrier =
800 makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
801 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, kUsageLayout, fullImage->get(), fullSRR);
802 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, pipelineStage, &postCopyBarrier);
803
804 // Create sliced view of the full image.
805 const auto slicedView =
806 make3DImageView(vkd, device, fullImage->get(), kFormat,
807 tcu::just(tcu::UVec2(m_params.offset, m_params.getSlicedViewRange())), mipLevel, 1u);
808
809 // Create storage image and view with reduced size (this will be the destination image in the shader).
810 const auto auxiliarImage = make3DImage(vkd, device, alloc, kFormat, sliceExtent, 1u, false /*sampling*/);
811 const auto auxiliarView = make3DImageView(vkd, device, auxiliarImage->get(), kFormat, tcu::Nothing, 0u, 1u);
812
813 // Move the auxiliar image to the general layout for writing.
814 const auto preWriteBarrier = makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
815 kUsageLayout, auxiliarImage->get(), singleSRR);
816 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, 0u, pipelineStage, &preWriteBarrier);
817
818 // Run load operation.
819 runPipeline(vkd, device, cmdBuffer, slicedView.get(), auxiliarView.get());
820
821 // Copy auxiliar image (result) to verification buffer.
822 const auto preVerifCopyBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
823 cmdPipelineMemoryBarrier(vkd, cmdBuffer, pipelineStage, VK_PIPELINE_STAGE_TRANSFER_BIT, &preVerifCopyBarrier);
824 const auto verifCopyRegion = makeBufferImageCopy(sliceExtent, baseLevelSRL);
825 vkd.cmdCopyImageToBuffer(cmdBuffer, auxiliarImage->get(), kUsageLayout, verifBuffer->get(), 1u, &verifCopyRegion);
826
827 // Sync verification buffer with host reads.
828 const auto preHostBarrier = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
829 cmdPipelineMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
830 &preHostBarrier);
831
832 endCommandBuffer(vkd, cmdBuffer);
833 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
834
835 const auto sliceExtentIV3 = makeIVec3(sliceExtent.width, sliceExtent.height, sliceExtent.depth);
836 auto &auxiliarBufferAlloc = auxiliarBuffer->getAllocation();
837 auto &verifBufferAlloc = verifBuffer->getAllocation();
838
839 // Invalidate verification buffer allocation.
840 invalidateAlloc(vkd, device, verifBufferAlloc);
841
842 // Compare auxiliar buffer and verification buffer.
843 const tcu::ConstPixelBufferAccess initialImage(tcuFormat, sliceExtentIV3, auxiliarBufferAlloc.getHostPtr());
844 const tcu::ConstPixelBufferAccess finalImage(tcuFormat, sliceExtentIV3, verifBufferAlloc.getHostPtr());
845
846 auto &log = m_context.getTestContext().getLog();
847 const tcu::UVec4 threshold(0u, 0u, 0u, 0u);
848
849 if (!tcu::intThresholdCompare(log, "Comparison", "Comparison of reference and result", initialImage, finalImage,
850 threshold, tcu::COMPARE_LOG_ON_ERROR))
851 return tcu::TestStatus::fail("Image comparison failed; check log for details");
852
853 if (m_params.sampleImg && !runSamplingPipeline(fullImage->get(), slicedView.get(), m_params.getFullLevelExtent()))
854 return tcu::TestStatus::fail("Sampling full level failed; check log for details");
855
856 return tcu::TestStatus::pass("Pass");
857 }
858
iterate(void)859 tcu::TestStatus SlicedViewStoreTestInstance::iterate(void)
860 {
861 const auto &vkd = m_context.getDeviceInterface();
862 const auto device = m_context.getDevice();
863 auto &alloc = m_context.getDefaultAllocator();
864 const auto qfIndex = m_context.getUniversalQueueFamilyIndex();
865 const auto queue = m_context.getUniversalQueue();
866
867 const auto mipLevel = m_params.getSelectedLevel();
868 const auto fullExtent = makeExtent3D(m_params.width, m_params.height, m_params.depth);
869 const auto sliceExtent = m_params.getSliceExtent();
870 const auto tcuFormat = mapVkFormat(kFormat);
871 const auto auxiliarBuffer = makeAndFillTransferBuffer(sliceExtent, tcuFormat, vkd, device, alloc);
872 const auto verifBuffer = makeTransferBuffer(sliceExtent, tcuFormat, vkd, device, alloc);
873 const auto fullImage =
874 make3DImage(vkd, device, alloc, kFormat, fullExtent, m_params.getFullImageLevels(), m_params.sampleImg);
875 const auto fullSRR = makeCommonImageSubresourceRange(0u, VK_REMAINING_MIP_LEVELS);
876 const auto singleSRR = makeCommonImageSubresourceRange(0u, 1u);
877 const auto targetLevelSRL = makeCommonImageSubresourceLayers(mipLevel);
878 const auto baseLevelSRL = makeCommonImageSubresourceLayers(0u);
879 const auto clearColor = makeClearValueColorU32(0u, 0u, 0u, 0u);
880 const auto pipelineStage = makePipelineStage(m_params.stage);
881
882 const auto cmdPool = makeCommandPool(vkd, device, qfIndex);
883 const auto cmdBufferPtr = allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
884 const auto cmdBuffer = cmdBufferPtr.get();
885
886 beginCommandBuffer(vkd, cmdBuffer);
887
888 // Zero-out full image.
889 const auto preClearBarrier =
890 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
891 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, fullImage->get(), fullSRR);
892 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, 0u, VK_PIPELINE_STAGE_TRANSFER_BIT, &preClearBarrier);
893 vkd.cmdClearColorImage(cmdBuffer, fullImage->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearColor.color, 1u,
894 &fullSRR);
895
896 // Create sliced view of the full image.
897 const auto slicedView =
898 make3DImageView(vkd, device, fullImage->get(), kFormat,
899 tcu::just(tcu::UVec2(m_params.offset, m_params.getSlicedViewRange())), mipLevel, 1u);
900
901 // Create storage image and view with reduced size (this will be the source image in the shader).
902 const auto auxiliarImage = make3DImage(vkd, device, alloc, kFormat, sliceExtent, 1u, false /*sampling*/);
903 const auto auxiliarView = make3DImageView(vkd, device, auxiliarImage->get(), kFormat, tcu::Nothing, 0u, 1u);
904
905 // Copy reference buffer into auxiliar image.
906 const auto preCopyBarrier =
907 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
908 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, auxiliarImage->get(), singleSRR);
909 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, 0u, VK_PIPELINE_STAGE_TRANSFER_BIT, &preCopyBarrier);
910 const auto sliceCopy = makeBufferImageCopy(sliceExtent, baseLevelSRL);
911 vkd.cmdCopyBufferToImage(cmdBuffer, auxiliarBuffer->get(), auxiliarImage->get(),
912 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &sliceCopy);
913
914 // Move both images to the general layout for reading and writing.
915 // Note: read-only optimal is not a valid layout for the read image.
916 const auto preShaderBarrierAux =
917 makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
918 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, kUsageLayout, auxiliarImage->get(), singleSRR);
919 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, pipelineStage, &preShaderBarrierAux);
920 const auto preShaderBarrierFull =
921 makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT,
922 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, kUsageLayout, fullImage->get(), fullSRR);
923 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, pipelineStage, &preShaderBarrierFull);
924
925 // Run store operation.
926 runPipeline(vkd, device, cmdBuffer, slicedView.get(), auxiliarView.get());
927
928 // Copy the right section of the full image (result) to verification buffer.
929 const auto preVerifCopyBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
930 cmdPipelineMemoryBarrier(vkd, cmdBuffer, pipelineStage, VK_PIPELINE_STAGE_TRANSFER_BIT, &preVerifCopyBarrier);
931
932 const VkBufferImageCopy verifCopy = {
933 0ull, // VkDeviceSize bufferOffset;
934 0u, // uint32_t bufferRowLength;
935 0u, // uint32_t bufferImageHeight;
936 targetLevelSRL, // VkImageSubresourceLayers imageSubresource;
937 makeOffset3D(0, 0, static_cast<int32_t>(m_params.offset)), // VkOffset3D imageOffset;
938 sliceExtent, // VkExtent3D imageExtent;
939 };
940 vkd.cmdCopyImageToBuffer(cmdBuffer, fullImage->get(), kUsageLayout, verifBuffer->get(), 1u, &verifCopy);
941
942 // Sync verification buffer with host reads.
943 const auto preHostBarrier = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
944 cmdPipelineMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
945 &preHostBarrier);
946
947 endCommandBuffer(vkd, cmdBuffer);
948 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
949
950 const auto sliceExtentIV3 = makeIVec3(sliceExtent.width, sliceExtent.height, sliceExtent.depth);
951 auto &auxiliarBufferAlloc = auxiliarBuffer->getAllocation();
952 auto &verifBufferAlloc = verifBuffer->getAllocation();
953
954 // Invalidate verification buffer allocation.
955 invalidateAlloc(vkd, device, verifBufferAlloc);
956
957 // Compare auxiliar buffer and verification buffer.
958 const tcu::ConstPixelBufferAccess initialImage(tcuFormat, sliceExtentIV3, auxiliarBufferAlloc.getHostPtr());
959 const tcu::ConstPixelBufferAccess finalImage(tcuFormat, sliceExtentIV3, verifBufferAlloc.getHostPtr());
960
961 auto &log = m_context.getTestContext().getLog();
962 const tcu::UVec4 threshold(0u, 0u, 0u, 0u);
963
964 if (!tcu::intThresholdCompare(log, "Comparison", "Comparison of reference and result", initialImage, finalImage,
965 threshold, tcu::COMPARE_LOG_ON_ERROR))
966 return tcu::TestStatus::fail("Image comparison failed; check log for details");
967
968 if (m_params.sampleImg && !runSamplingPipeline(fullImage->get(), slicedView.get(), m_params.getFullLevelExtent()))
969 return tcu::TestStatus::fail("Sampling full level failed; check log for details");
970
971 return tcu::TestStatus::pass("Pass");
972 }
973
974 using TestCaseGroupPtr = de::MovePtr<tcu::TestCaseGroup>;
975
976 } // namespace
977
createImageSlicedViewOf3DTests(tcu::TestContext & testCtx)978 tcu::TestCaseGroup *createImageSlicedViewOf3DTests(tcu::TestContext &testCtx)
979 {
980 TestCaseGroupPtr imageTests(new tcu::TestCaseGroup(testCtx, "sliced_view_of_3d_image"));
981
982 const struct
983 {
984 VkShaderStageFlagBits stage;
985 const char *name;
986 } stageCases[] = {
987 {VK_SHADER_STAGE_COMPUTE_BIT, "comp"},
988 {VK_SHADER_STAGE_FRAGMENT_BIT, "frag"},
989 };
990
991 const struct
992 {
993 TestType testType;
994 const char *name;
995 } testTypeCases[] = {
996 {TestType::LOAD, "load"},
997 {TestType::STORE, "store"},
998 };
999
1000 const struct
1001 {
1002 bool sampleImg;
1003 const char *suffix;
1004 } samplingCases[] = {
1005 {false, ""},
1006 {true, "_with_sampling"},
1007 };
1008
1009 const uint32_t seed = 1667817299u;
1010 de::Random rnd(seed);
1011
1012 // Basic tests with 2 slices and a view of the first or second slice.
1013 {
1014 const uint32_t basicDepth = 2u;
1015 const uint32_t basicRange = 1u;
1016
1017 TestCaseGroupPtr basicTests(new tcu::TestCaseGroup(testCtx, "basic"));
1018
1019 for (const auto &testTypeCase : testTypeCases)
1020 {
1021 TestCaseGroupPtr testTypeGroup(new tcu::TestCaseGroup(testCtx, testTypeCase.name));
1022
1023 for (const auto &stageCase : stageCases)
1024 {
1025 TestCaseGroupPtr stageGroup(new tcu::TestCaseGroup(testCtx, stageCase.name));
1026
1027 for (uint32_t offset = 0u; offset < basicDepth; ++offset)
1028 {
1029 for (const auto &samplingCase : samplingCases)
1030 {
1031 const auto testName = "offset_" + std::to_string(offset) + samplingCase.suffix;
1032 TestParams params(testTypeCase.testType, stageCase.stage, kWidth, kHeight, basicDepth, offset,
1033 basicRange, tcu::Nothing, samplingCase.sampleImg);
1034
1035 stageGroup->addChild(new SlicedViewTestCase(testCtx, testName, params));
1036 }
1037 }
1038
1039 testTypeGroup->addChild(stageGroup.release());
1040 }
1041
1042 basicTests->addChild(testTypeGroup.release());
1043 }
1044
1045 imageTests->addChild(basicTests.release());
1046 }
1047
1048 // Full slice tests.
1049 {
1050 const uint32_t fullDepth = 4u;
1051
1052 TestCaseGroupPtr fullSliceTests(new tcu::TestCaseGroup(testCtx, "full_slice"));
1053
1054 for (const auto &testTypeCase : testTypeCases)
1055 {
1056 TestCaseGroupPtr testTypeGroup(new tcu::TestCaseGroup(testCtx, testTypeCase.name));
1057
1058 for (const auto &stageCase : stageCases)
1059 {
1060 for (const auto &samplingCase : samplingCases)
1061 {
1062 const auto testName = std::string(stageCase.name) + samplingCase.suffix;
1063 TestParams params(testTypeCase.testType, stageCase.stage, kWidth, kHeight, fullDepth, 0u, fullDepth,
1064 tcu::Nothing, samplingCase.sampleImg);
1065 testTypeGroup->addChild(new SlicedViewTestCase(testCtx, testName, params));
1066 }
1067 }
1068
1069 fullSliceTests->addChild(testTypeGroup.release());
1070 }
1071
1072 imageTests->addChild(fullSliceTests.release());
1073 }
1074
1075 // Pseudorandom test cases.
1076 {
1077 using CaseId = std::tuple<uint32_t, uint32_t, uint32_t>; // depth, offset, range
1078 using CaseIdSet = std::set<CaseId>;
1079
1080 const uint32_t depthCases = 5u;
1081 const uint32_t rangeCases = 5u;
1082 const int minDepth = 10u;
1083 const int maxDepth = 32u;
1084
1085 TestCaseGroupPtr randomTests(new tcu::TestCaseGroup(testCtx, "random"));
1086
1087 for (const auto &testTypeCase : testTypeCases)
1088 {
1089 TestCaseGroupPtr testTypeGroup(new tcu::TestCaseGroup(testCtx, testTypeCase.name));
1090
1091 for (const auto &stageCase : stageCases)
1092 {
1093 TestCaseGroupPtr stageGroup(new tcu::TestCaseGroup(testCtx, stageCase.name));
1094
1095 CaseIdSet generatedCases;
1096
1097 for (uint32_t i = 0u; i < depthCases; ++i)
1098 {
1099 const uint32_t depth = static_cast<uint32_t>(rnd.getInt(minDepth, maxDepth));
1100
1101 for (uint32_t j = 0u; j < rangeCases; ++j)
1102 {
1103 uint32_t offset = 0u;
1104 uint32_t range = 0u;
1105
1106 for (;;)
1107 {
1108 DE_ASSERT(depth > 0u);
1109 offset = static_cast<uint32_t>(rnd.getInt(0, static_cast<int>(depth - 1u)));
1110
1111 DE_ASSERT(offset < depth);
1112 range = static_cast<uint32_t>(rnd.getInt(0, static_cast<int>(depth - offset)));
1113
1114 // 0 is interpreted as VK_REMAINING_3D_SLICES_EXT.
1115 if (range == 0u)
1116 range = VK_REMAINING_3D_SLICES_EXT;
1117
1118 // The current seed may generate duplicate cases with non-unique names, so we filter those out.
1119 const CaseId currentCase(depth, offset, range);
1120 if (de::contains(begin(generatedCases), end(generatedCases), currentCase))
1121 continue;
1122
1123 generatedCases.insert(currentCase);
1124 break;
1125 }
1126
1127 const auto rangeStr =
1128 ((range == VK_REMAINING_3D_SLICES_EXT) ? "remaining_3d_slices" : std::to_string(range));
1129 const auto testName = "depth_" + std::to_string(depth) + "_offset_" + std::to_string(offset) +
1130 "_range_" + rangeStr;
1131 TestParams params(testTypeCase.testType, stageCase.stage, kWidth, kHeight, depth, offset, range,
1132 tcu::Nothing, false);
1133
1134 stageGroup->addChild(new SlicedViewTestCase(testCtx, testName, params));
1135 }
1136 }
1137
1138 testTypeGroup->addChild(stageGroup.release());
1139 }
1140
1141 randomTests->addChild(testTypeGroup.release());
1142 }
1143
1144 imageTests->addChild(randomTests.release());
1145 }
1146
1147 // Mip level test cases.
1148 {
1149 using CaseId = std::tuple<uint32_t, uint32_t>; // depth, offset, range
1150 using CaseIdSet = std::set<CaseId>;
1151
1152 const uint32_t casesPerLevel = 2u;
1153 const uint32_t width = kWidth;
1154 const uint32_t height = kWidth;
1155 const uint32_t depth = kWidth;
1156 const uint32_t maxLevels = TestParams::getMaxMipLevelCountForSize(kWidth);
1157
1158 TestCaseGroupPtr mipLevelTests(new tcu::TestCaseGroup(testCtx, "mip_level"));
1159
1160 for (const auto &testTypeCase : testTypeCases)
1161 {
1162 TestCaseGroupPtr testTypeGroup(new tcu::TestCaseGroup(testCtx, testTypeCase.name));
1163
1164 for (const auto &stageCase : stageCases)
1165 {
1166 TestCaseGroupPtr stageGroup(new tcu::TestCaseGroup(testCtx, stageCase.name));
1167
1168 for (uint32_t level = 0u; level < maxLevels; ++level)
1169 {
1170 const auto levelSize = (depth >> level);
1171 const auto groupName = "level_" + std::to_string(level);
1172 CaseIdSet generatedCases;
1173
1174 DE_ASSERT(levelSize > 0u);
1175
1176 TestCaseGroupPtr levelGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
1177
1178 // Generate a few pseudorandom cases per mip level.
1179 for (uint32_t i = 0u; i < casesPerLevel; ++i)
1180 {
1181 uint32_t offset = 0u;
1182 uint32_t range = 0u;
1183
1184 for (;;)
1185 {
1186 offset = static_cast<uint32_t>(rnd.getInt(0, static_cast<int>(levelSize - 1u)));
1187 DE_ASSERT(offset < levelSize);
1188
1189 range = static_cast<uint32_t>(rnd.getInt(0, static_cast<int>(levelSize - offset)));
1190
1191 // 0 is interpreted as VK_REMAINING_3D_SLICES_EXT.
1192 if (range == 0u)
1193 range = VK_REMAINING_3D_SLICES_EXT;
1194
1195 const CaseId currentCase(offset, range);
1196 if (de::contains(begin(generatedCases), end(generatedCases), currentCase))
1197 continue;
1198
1199 generatedCases.insert(currentCase);
1200 break;
1201 }
1202
1203 const auto rangeStr =
1204 ((range == VK_REMAINING_3D_SLICES_EXT) ? "remaining_3d_slices" : std::to_string(range));
1205 const auto testName = "offset_" + std::to_string(offset) + "_range_" + rangeStr;
1206 TestParams params(testTypeCase.testType, stageCase.stage, width, height, depth, offset, range,
1207 tcu::just(level), false);
1208
1209 levelGroup->addChild(new SlicedViewTestCase(testCtx, testName, params));
1210 }
1211
1212 stageGroup->addChild(levelGroup.release());
1213 }
1214
1215 testTypeGroup->addChild(stageGroup.release());
1216 }
1217
1218 mipLevelTests->addChild(testTypeGroup.release());
1219 }
1220
1221 imageTests->addChild(mipLevelTests.release());
1222 }
1223
1224 return imageTests.release();
1225 }
1226
1227 } // namespace pipeline
1228 } // namespace vkt
1229