1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2022 The Khronos Group Inc.
6 * Copyright (c) 2022 NVIDIA Corporation.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Ray Query Opacity Micromap Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktRayQueryOpacityMicromapTests.hpp"
26 #include "vktTestCase.hpp"
27
28 #include "vkRayTracingUtil.hpp"
29 #include "vkObjUtil.hpp"
30 #include "vkCmdUtil.hpp"
31 #include "vkBufferWithMemory.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkBarrierUtil.hpp"
35 #include "vktTestGroupUtil.hpp"
36
37 #include "deUniquePtr.hpp"
38 #include "deRandom.hpp"
39
40 #include <sstream>
41 #include <vector>
42 #include <iostream>
43
44 namespace vkt
45 {
46 namespace RayQuery
47 {
48
49 namespace
50 {
51
52 using namespace vk;
53
54 enum ShaderSourcePipeline
55 {
56 SSP_GRAPHICS_PIPELINE,
57 SSP_COMPUTE_PIPELINE,
58 SSP_RAY_TRACING_PIPELINE
59 };
60
61 enum ShaderSourceType
62 {
63 SST_VERTEX_SHADER,
64 SST_COMPUTE_SHADER,
65 SST_RAY_GENERATION_SHADER,
66 };
67
68 enum TestFlagBits
69 {
70 TEST_FLAG_BIT_FORCE_OPAQUE_INSTANCE = 1U << 0,
71 TEST_FLAG_BIT_FORCE_OPAQUE_RAY_FLAG = 1U << 1,
72 TEST_FLAG_BIT_DISABLE_OPACITY_MICROMAP_INSTANCE = 1U << 2,
73 TEST_FLAG_BIT_FORCE_2_STATE_INSTANCE = 1U << 3,
74 TEST_FLAG_BIT_FORCE_2_STATE_RAY_FLAG = 1U << 4,
75 TEST_FLAG_BIT_LAST = 1U << 5,
76 };
77
78 std::vector<std::string> testFlagBitNames = {
79 "force_opaque_instance", "force_opaque_ray_flag", "disable_opacity_micromap_instance",
80 "force_2_state_instance", "force_2_state_ray_flag",
81 };
82
83 enum CopyType
84 {
85 CT_NONE,
86 CT_FIRST_ACTIVE,
87 CT_CLONE = CT_FIRST_ACTIVE,
88 CT_COMPACT,
89 CT_NUM_COPY_TYPES,
90 };
91
92 std::vector<std::string> copyTypeNames{
93 "None",
94 "Clone",
95 "Compact",
96 };
97
98 struct TestParams
99 {
100 ShaderSourceType shaderSourceType;
101 ShaderSourcePipeline shaderSourcePipeline;
102 bool useSpecialIndex;
103 uint32_t testFlagMask;
104 uint32_t subdivisionLevel; // Must be 0 for useSpecialIndex
105 uint32_t mode; // Special index value if useSpecialIndex, 2 or 4 for number of states otherwise
106 uint32_t seed;
107 CopyType copyType;
108 bool useMaintenance5;
109 };
110
111 static constexpr uint32_t kNumThreadsAtOnce = 1024;
112 static constexpr uint32_t kWorkGroupCount = 8;
113 static constexpr uint32_t kLocalSize = 128;
114 DE_STATIC_ASSERT(kWorkGroupCount *kLocalSize == kNumThreadsAtOnce);
115
116 class OpacityMicromapCase : public TestCase
117 {
118 public:
119 OpacityMicromapCase(tcu::TestContext &testCtx, const std::string &name, const TestParams ¶ms);
~OpacityMicromapCase(void)120 virtual ~OpacityMicromapCase(void)
121 {
122 }
123
124 virtual void checkSupport(Context &context) const;
125 virtual void initPrograms(vk::SourceCollections &programCollection) const;
126 virtual TestInstance *createInstance(Context &context) const;
127
128 protected:
129 TestParams m_params;
130 };
131
132 class OpacityMicromapInstance : public TestInstance
133 {
134 public:
135 OpacityMicromapInstance(Context &context, const TestParams ¶ms);
~OpacityMicromapInstance(void)136 virtual ~OpacityMicromapInstance(void)
137 {
138 }
139
140 virtual tcu::TestStatus iterate(void);
141
142 protected:
143 TestParams m_params;
144 };
145
OpacityMicromapCase(tcu::TestContext & testCtx,const std::string & name,const TestParams & params)146 OpacityMicromapCase::OpacityMicromapCase(tcu::TestContext &testCtx, const std::string &name, const TestParams ¶ms)
147 : TestCase(testCtx, name)
148 , m_params(params)
149 {
150 }
151
checkSupport(Context & context) const152 void OpacityMicromapCase::checkSupport(Context &context) const
153 {
154 context.requireDeviceFunctionality("VK_KHR_ray_query");
155 context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
156 context.requireDeviceFunctionality("VK_EXT_opacity_micromap");
157
158 if (m_params.useMaintenance5)
159 context.requireDeviceFunctionality("VK_KHR_maintenance5");
160
161 const VkPhysicalDeviceRayQueryFeaturesKHR &rayQueryFeaturesKHR = context.getRayQueryFeatures();
162 if (rayQueryFeaturesKHR.rayQuery == false)
163 TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceRayQueryFeaturesKHR.rayQuery");
164
165 const VkPhysicalDeviceAccelerationStructureFeaturesKHR &accelerationStructureFeaturesKHR =
166 context.getAccelerationStructureFeatures();
167 if (accelerationStructureFeaturesKHR.accelerationStructure == false)
168 TCU_THROW(TestError,
169 "VK_KHR_ray_query requires VkPhysicalDeviceAccelerationStructureFeaturesKHR.accelerationStructure");
170
171 const VkPhysicalDeviceOpacityMicromapFeaturesEXT &opacityMicromapFeaturesEXT =
172 context.getOpacityMicromapFeaturesEXT();
173 if (opacityMicromapFeaturesEXT.micromap == false)
174 TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceOpacityMicromapFeaturesEXT.micromap");
175
176 if (m_params.shaderSourceType == SST_RAY_GENERATION_SHADER)
177 {
178 context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
179
180 const VkPhysicalDeviceRayTracingPipelineFeaturesKHR &rayTracingPipelineFeaturesKHR =
181 context.getRayTracingPipelineFeatures();
182
183 if (rayTracingPipelineFeaturesKHR.rayTracingPipeline == false)
184 TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceRayTracingPipelineFeaturesKHR.rayTracingPipeline");
185 }
186
187 switch (m_params.shaderSourceType)
188 {
189 case SST_VERTEX_SHADER:
190 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS);
191 break;
192 default:
193 break;
194 }
195
196 const VkPhysicalDeviceOpacityMicromapPropertiesEXT &opacityMicromapPropertiesEXT =
197 context.getOpacityMicromapPropertiesEXT();
198
199 if (!m_params.useSpecialIndex)
200 {
201 switch (m_params.mode)
202 {
203 case 2:
204 if (m_params.subdivisionLevel > opacityMicromapPropertiesEXT.maxOpacity2StateSubdivisionLevel)
205 TCU_THROW(NotSupportedError, "Requires a higher supported 2 state subdivision level");
206 break;
207 case 4:
208 if (m_params.subdivisionLevel > opacityMicromapPropertiesEXT.maxOpacity4StateSubdivisionLevel)
209 TCU_THROW(NotSupportedError, "Requires a higher supported 4 state subdivision level");
210 break;
211 default:
212 DE_ASSERT(false);
213 break;
214 }
215 }
216 }
217
levelToSubtriangles(uint32_t level)218 static uint32_t levelToSubtriangles(uint32_t level)
219 {
220 return 1 << (2 * level);
221 }
222
initPrograms(vk::SourceCollections & programCollection) const223 void OpacityMicromapCase::initPrograms(vk::SourceCollections &programCollection) const
224 {
225 const vk::ShaderBuildOptions buildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, 0u, true);
226
227 uint32_t numRays = levelToSubtriangles(m_params.subdivisionLevel);
228
229 std::string flagsString =
230 (m_params.testFlagMask & TEST_FLAG_BIT_FORCE_OPAQUE_RAY_FLAG) ? "gl_RayFlagsOpaqueEXT" : "gl_RayFlagsNoneEXT";
231
232 if (m_params.testFlagMask & TEST_FLAG_BIT_FORCE_2_STATE_RAY_FLAG)
233 flagsString += " | gl_RayFlagsForceOpacityMicromap2StateEXT";
234
235 std::ostringstream sharedHeader;
236 sharedHeader << "#version 460 core\n"
237 << "#extension GL_EXT_ray_query : require\n"
238 << "#extension GL_EXT_opacity_micromap : require\n"
239 << "\n"
240 << "layout(set=0, binding=0) uniform accelerationStructureEXT topLevelAS;\n"
241 << "layout(set=0, binding=1, std430) buffer RayOrigins {\n"
242 << " vec4 values[" << numRays << "];\n"
243 << "} origins;\n"
244 << "layout(set=0, binding=2, std430) buffer OutputModes {\n"
245 << " uint values[" << numRays << "];\n"
246 << "} modes;\n";
247
248 std::ostringstream mainLoop;
249 mainLoop
250 << " while (index < " << numRays << ") {\n"
251 << " const uint cullMask = 0xFF;\n"
252 << " const vec3 origin = origins.values[index].xyz;\n"
253 << " const vec3 direction = vec3(0.0, 0.0, -1.0);\n"
254 << " const float tMin = 0.0f;\n"
255 << " const float tMax = 2.0f;\n"
256 << " uint outputVal = 0;\n" // 0 for miss, 1 for non-opaque, 2 for opaque
257 << " rayQueryEXT rq;\n"
258 << " rayQueryInitializeEXT(rq, topLevelAS, " << flagsString
259 << ", cullMask, origin, tMin, direction, tMax);\n"
260 << " while (rayQueryProceedEXT(rq)) {\n"
261 << " if (rayQueryGetIntersectionTypeEXT(rq, false) == gl_RayQueryCandidateIntersectionTriangleEXT) {\n"
262 << " outputVal = 1;\n"
263 << " }\n"
264 << " }\n"
265 << " if (rayQueryGetIntersectionTypeEXT(rq, true) == gl_RayQueryCommittedIntersectionTriangleEXT) {\n"
266 << " outputVal = 2;\n"
267 << " }\n"
268 << " modes.values[index] = outputVal;\n"
269 << " index += " << kNumThreadsAtOnce << ";\n"
270 << " }\n";
271
272 if (m_params.shaderSourceType == SST_VERTEX_SHADER)
273 {
274 std::ostringstream vert;
275 vert << sharedHeader.str() << "void main()\n"
276 << "{\n"
277 << " uint index = gl_VertexIndex.x;\n"
278 << mainLoop.str() << " gl_PointSize = 1.0f;\n"
279 << "}\n";
280
281 programCollection.glslSources.add("vert") << glu::VertexSource(vert.str()) << buildOptions;
282 }
283 else if (m_params.shaderSourceType == SST_RAY_GENERATION_SHADER)
284 {
285 std::ostringstream rgen;
286 rgen << sharedHeader.str() << "#extension GL_EXT_ray_tracing : require\n"
287 << "void main()\n"
288 << "{\n"
289 << " uint index = gl_LaunchIDEXT.x;\n"
290 << mainLoop.str() << "}\n";
291
292 programCollection.glslSources.add("rgen")
293 << glu::RaygenSource(updateRayTracingGLSL(rgen.str())) << buildOptions;
294 }
295 else
296 {
297 DE_ASSERT(m_params.shaderSourceType == SST_COMPUTE_SHADER);
298 std::ostringstream comp;
299 comp << sharedHeader.str() << "layout(local_size_x=" << kLocalSize << ", local_size_y=1, local_size_z=1) in;\n"
300 << "\n"
301 << "void main()\n"
302 << "{\n"
303 << " uint index = gl_GlobalInvocationID.x;\n"
304 << mainLoop.str() << "}\n";
305
306 programCollection.glslSources.add("comp")
307 << glu::ComputeSource(updateRayTracingGLSL(comp.str())) << buildOptions;
308 }
309 }
310
createInstance(Context & context) const311 TestInstance *OpacityMicromapCase::createInstance(Context &context) const
312 {
313 return new OpacityMicromapInstance(context, m_params);
314 }
315
OpacityMicromapInstance(Context & context,const TestParams & params)316 OpacityMicromapInstance::OpacityMicromapInstance(Context &context, const TestParams ¶ms)
317 : TestInstance(context)
318 , m_params(params)
319 {
320 }
321
calcSubtriangleCentroid(const uint32_t index,const uint32_t subdivisionLevel)322 tcu::Vec2 calcSubtriangleCentroid(const uint32_t index, const uint32_t subdivisionLevel)
323 {
324 if (subdivisionLevel == 0)
325 {
326 return tcu::Vec2(1.0f / 3.0f, 1.0f / 3.0f);
327 }
328
329 uint32_t d = index;
330
331 d = ((d >> 1) & 0x22222222u) | ((d << 1) & 0x44444444u) | (d & 0x99999999u);
332 d = ((d >> 2) & 0x0c0c0c0cu) | ((d << 2) & 0x30303030u) | (d & 0xc3c3c3c3u);
333 d = ((d >> 4) & 0x00f000f0u) | ((d << 4) & 0x0f000f00u) | (d & 0xf00ff00fu);
334 d = ((d >> 8) & 0x0000ff00u) | ((d << 8) & 0x00ff0000u) | (d & 0xff0000ffu);
335
336 uint32_t f = (d & 0xffffu) | ((d << 16) & ~d);
337
338 f ^= (f >> 1) & 0x7fff7fffu;
339 f ^= (f >> 2) & 0x3fff3fffu;
340 f ^= (f >> 4) & 0x0fff0fffu;
341 f ^= (f >> 8) & 0x00ff00ffu;
342
343 uint32_t t = (f ^ d) >> 16;
344
345 uint32_t iu = ((f & ~t) | (d & ~t) | (~d & ~f & t)) & 0xffffu;
346 uint32_t iv = ((f >> 16) ^ d) & 0xffffu;
347 uint32_t iw = ((~f & ~t) | (d & ~t) | (~d & f & t)) & ((1 << subdivisionLevel) - 1);
348
349 const float scale = 1.0f / float(1 << subdivisionLevel);
350
351 float u = (1.0f / 3.0f) * scale;
352 float v = (1.0f / 3.0f) * scale;
353
354 // we need to only look at "subdivisionLevel" bits
355 iu = iu & ((1 << subdivisionLevel) - 1);
356 iv = iv & ((1 << subdivisionLevel) - 1);
357 iw = iw & ((1 << subdivisionLevel) - 1);
358
359 bool upright = (iu & 1) ^ (iv & 1) ^ (iw & 1);
360 if (!upright)
361 {
362 iu = iu + 1;
363 iv = iv + 1;
364 }
365
366 if (upright)
367 {
368 return tcu::Vec2(u + (float)iu * scale, v + (float)iv * scale);
369 }
370 else
371 {
372 return tcu::Vec2((float)iu * scale - u, (float)iv * scale - v);
373 }
374 }
375
makeEmptyRenderPass(const DeviceInterface & vk,const VkDevice device)376 static Move<VkRenderPass> makeEmptyRenderPass(const DeviceInterface &vk, const VkDevice device)
377 {
378 std::vector<VkSubpassDescription> subpassDescriptions;
379 std::vector<VkSubpassDependency> subpassDependencies;
380
381 const VkSubpassDescription description = {
382 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
383 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
384 0u, // uint32_t inputAttachmentCount;
385 DE_NULL, // const VkAttachmentReference* pInputAttachments;
386 0u, // uint32_t colorAttachmentCount;
387 DE_NULL, // const VkAttachmentReference* pColorAttachments;
388 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
389 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
390 0, // uint32_t preserveAttachmentCount;
391 DE_NULL // const uint32_t* pPreserveAttachments;
392 };
393 subpassDescriptions.push_back(description);
394
395 const VkSubpassDependency dependency = {
396 0u, // uint32_t srcSubpass;
397 0u, // uint32_t dstSubpass;
398 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, // VkPipelineStageFlags srcStageMask;
399 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, // VkPipelineStageFlags dstStageMask;
400 VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags srcAccessMask;
401 VK_ACCESS_MEMORY_READ_BIT, // VkAccessFlags dstAccessMask;
402 0u // VkDependencyFlags dependencyFlags;
403 };
404 subpassDependencies.push_back(dependency);
405
406 const VkRenderPassCreateInfo renderPassInfo = {
407 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
408 DE_NULL, // const void* pNext;
409 static_cast<VkRenderPassCreateFlags>(0u), // VkRenderPassCreateFlags flags;
410 0u, // uint32_t attachmentCount;
411 DE_NULL, // const VkAttachmentDescription* pAttachments;
412 static_cast<uint32_t>(subpassDescriptions.size()), // uint32_t subpassCount;
413 &subpassDescriptions[0], // const VkSubpassDescription* pSubpasses;
414 static_cast<uint32_t>(subpassDependencies.size()), // uint32_t dependencyCount;
415 subpassDependencies.size() > 0 ? &subpassDependencies[0] : DE_NULL // const VkSubpassDependency* pDependencies;
416 };
417
418 return createRenderPass(vk, device, &renderPassInfo);
419 }
420
makeGraphicsPipeline(const DeviceInterface & vk,const VkDevice device,const VkPipelineLayout pipelineLayout,const VkRenderPass renderPass,const VkShaderModule vertexModule,const uint32_t subpass)421 Move<VkPipeline> makeGraphicsPipeline(const DeviceInterface &vk, const VkDevice device,
422 const VkPipelineLayout pipelineLayout, const VkRenderPass renderPass,
423 const VkShaderModule vertexModule, const uint32_t subpass)
424 {
425 VkExtent2D renderSize{256, 256};
426 VkViewport viewport = makeViewport(renderSize);
427 VkRect2D scissor = makeRect2D(renderSize);
428
429 const VkPipelineViewportStateCreateInfo viewportStateCreateInfo = {
430 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType
431 DE_NULL, // const void* pNext
432 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags
433 1u, // uint32_t viewportCount
434 &viewport, // const VkViewport* pViewports
435 1u, // uint32_t scissorCount
436 &scissor // const VkRect2D* pScissors
437 };
438
439 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = {
440 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType
441 DE_NULL, // const void* pNext
442 0u, // VkPipelineInputAssemblyStateCreateFlags flags
443 VK_PRIMITIVE_TOPOLOGY_POINT_LIST, // VkPrimitiveTopology topology
444 VK_FALSE // VkBool32 primitiveRestartEnable
445 };
446
447 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = {
448 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType
449 DE_NULL, // const void* pNext
450 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags
451 0u, // uint32_t vertexBindingDescriptionCount
452 DE_NULL, // const VkVertexInputBindingDescription* pVertexBindingDescriptions
453 0u, // uint32_t vertexAttributeDescriptionCount
454 DE_NULL, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions
455 };
456
457 const VkPipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = {
458 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType
459 DE_NULL, // const void* pNext
460 0u, // VkPipelineRasterizationStateCreateFlags flags
461 VK_FALSE, // VkBool32 depthClampEnable
462 VK_TRUE, // VkBool32 rasterizerDiscardEnable
463 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode
464 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode
465 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace
466 VK_FALSE, // VkBool32 depthBiasEnable
467 0.0f, // float depthBiasConstantFactor
468 0.0f, // float depthBiasClamp
469 0.0f, // float depthBiasSlopeFactor
470 1.0f // float lineWidth
471 };
472
473 return makeGraphicsPipeline(
474 vk, // const DeviceInterface& vk
475 device, // const VkDevice device
476 pipelineLayout, // const VkPipelineLayout pipelineLayout
477 vertexModule, // const VkShaderModule vertexShaderModule
478 DE_NULL, // const VkShaderModule tessellationControlModule
479 DE_NULL, // const VkShaderModule tessellationEvalModule
480 DE_NULL, // const VkShaderModule geometryShaderModule
481 DE_NULL, // const VkShaderModule fragmentShaderModule
482 renderPass, // const VkRenderPass renderPass
483 subpass, // const uint32_t subpass
484 &vertexInputStateCreateInfo, // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
485 &inputAssemblyStateCreateInfo, // const VkPipelineInputAssemblyStateCreateInfo* inputAssemblyStateCreateInfo
486 DE_NULL, // const VkPipelineTessellationStateCreateInfo* tessStateCreateInfo
487 &viewportStateCreateInfo, // const VkPipelineViewportStateCreateInfo* viewportStateCreateInfo
488 &rasterizationStateCreateInfo); // const VkPipelineRasterizationStateCreateInfo* rasterizationStateCreateInfo
489 }
490
iterate(void)491 tcu::TestStatus OpacityMicromapInstance::iterate(void)
492 {
493 const auto &vkd = m_context.getDeviceInterface();
494 const auto device = m_context.getDevice();
495 auto &alloc = m_context.getDefaultAllocator();
496 const auto qIndex = m_context.getUniversalQueueFamilyIndex();
497 const auto queue = m_context.getUniversalQueue();
498
499 // Command pool and buffer.
500 const auto cmdPool = makeCommandPool(vkd, device, qIndex);
501 const auto cmdBufferPtr = allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
502 const auto cmdBuffer = cmdBufferPtr.get();
503
504 beginCommandBuffer(vkd, cmdBuffer);
505
506 // Build acceleration structures.
507 auto topLevelAS = makeTopLevelAccelerationStructure();
508 auto bottomLevelAS = makeBottomLevelAccelerationStructure();
509
510 uint32_t numSubtriangles = levelToSubtriangles(m_params.subdivisionLevel);
511 uint32_t opacityMicromapBytes = (m_params.mode == 2) ? (numSubtriangles + 3) / 4 : (numSubtriangles + 1) / 2;
512
513 // Generate random micromap data
514 std::vector<uint8_t> opacityMicromapData;
515
516 de::Random rnd(m_params.seed);
517
518 while (opacityMicromapData.size() < opacityMicromapBytes)
519 {
520 opacityMicromapData.push_back(rnd.getUint8());
521 }
522
523 // Build a micromap (ignore infrastructure for now)
524 // Create the buffer with the mask and index data
525 // Allocate a fairly conservative bound for now
526 VkBufferUsageFlags2CreateInfoKHR bufferUsageFlags2 = initVulkanStructure();
527 ;
528 const auto micromapDataBufferSize = static_cast<VkDeviceSize>(1024 + opacityMicromapBytes);
529 auto micromapDataBufferCreateInfo =
530 makeBufferCreateInfo(micromapDataBufferSize, VK_BUFFER_USAGE_MICROMAP_BUILD_INPUT_READ_ONLY_BIT_EXT |
531 VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
532 if (m_params.useMaintenance5)
533 {
534 bufferUsageFlags2.usage = (VkBufferUsageFlagBits2KHR)micromapDataBufferCreateInfo.usage;
535 micromapDataBufferCreateInfo.pNext = &bufferUsageFlags2;
536 micromapDataBufferCreateInfo.usage = 0;
537 }
538
539 BufferWithMemory micromapDataBuffer(vkd, device, alloc, micromapDataBufferCreateInfo,
540 MemoryRequirement::HostVisible | MemoryRequirement::DeviceAddress);
541 auto µmapDataBufferAlloc = micromapDataBuffer.getAllocation();
542 void *micromapDataBufferData = micromapDataBufferAlloc.getHostPtr();
543
544 const int TriangleOffset = 0;
545 const int IndexOffset = 256;
546 const int DataOffset = 512;
547
548 // Fill out VkMicromapUsageEXT with size information
549 VkMicromapUsageEXT mmUsage = {};
550 mmUsage.count = 1;
551 mmUsage.subdivisionLevel = m_params.subdivisionLevel;
552 mmUsage.format =
553 m_params.mode == 2 ? VK_OPACITY_MICROMAP_FORMAT_2_STATE_EXT : VK_OPACITY_MICROMAP_FORMAT_4_STATE_EXT;
554
555 {
556 uint8_t *data = static_cast<uint8_t *>(micromapDataBufferData);
557
558 deMemset(data, 0, size_t(micromapDataBufferCreateInfo.size));
559
560 DE_STATIC_ASSERT(sizeof(VkMicromapTriangleEXT) == 8);
561
562 // Triangle information
563 VkMicromapTriangleEXT *tri = (VkMicromapTriangleEXT *)(&data[TriangleOffset]);
564 tri->dataOffset = 0;
565 tri->subdivisionLevel = uint16_t(mmUsage.subdivisionLevel);
566 tri->format = uint16_t(mmUsage.format);
567
568 // Micromap data
569 {
570 for (size_t i = 0; i < opacityMicromapData.size(); i++)
571 {
572 data[DataOffset + i] = opacityMicromapData[i];
573 }
574 }
575
576 // Index information
577 *((uint32_t *)&data[IndexOffset]) = m_params.useSpecialIndex ? m_params.mode : 0;
578 }
579
580 // Query the size from the build info
581 VkMicromapBuildInfoEXT mmBuildInfo = {
582 VK_STRUCTURE_TYPE_MICROMAP_BUILD_INFO_EXT, // VkStructureType sType;
583 DE_NULL, // const void* pNext;
584 VK_MICROMAP_TYPE_OPACITY_MICROMAP_EXT, // VkMicromapTypeEXT type;
585 0, // VkBuildMicromapFlagsEXT flags;
586 VK_BUILD_MICROMAP_MODE_BUILD_EXT, // VkBuildMicromapModeEXT mode;
587 DE_NULL, // VkMicromapEXT dstMicromap;
588 1, // uint32_t usageCountsCount;
589 &mmUsage, // const VkMicromapUsageEXT* pUsageCounts;
590 DE_NULL, // const VkMicromapUsageEXT* const* ppUsageCounts;
591 makeDeviceOrHostAddressConstKHR(DE_NULL), // VkDeviceOrHostAddressConstKHR data;
592 makeDeviceOrHostAddressKHR(DE_NULL), // VkDeviceOrHostAddressKHR scratchData;
593 makeDeviceOrHostAddressConstKHR(DE_NULL), // VkDeviceOrHostAddressConstKHR triangleArray;
594 0, // VkDeviceSize triangleArrayStride;
595 };
596
597 VkMicromapBuildSizesInfoEXT sizeInfo = {
598 VK_STRUCTURE_TYPE_MICROMAP_BUILD_SIZES_INFO_EXT, // VkStructureType sType;
599 DE_NULL, // const void* pNext;
600 0, // VkDeviceSize micromapSize;
601 0, // VkDeviceSize buildScratchSize;
602 false, // VkBool32 discardable;
603 };
604
605 vkd.getMicromapBuildSizesEXT(device, VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR, &mmBuildInfo, &sizeInfo);
606
607 // Create the backing and scratch storage
608 const auto micromapBackingBufferCreateInfo = makeBufferCreateInfo(
609 sizeInfo.micromapSize, VK_BUFFER_USAGE_MICROMAP_STORAGE_BIT_EXT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
610 BufferWithMemory micromapBackingBuffer(vkd, device, alloc, micromapBackingBufferCreateInfo,
611 MemoryRequirement::Local | MemoryRequirement::DeviceAddress);
612
613 auto micromapScratchBufferCreateInfo =
614 makeBufferCreateInfo(sizeInfo.buildScratchSize,
615 VK_BUFFER_USAGE_MICROMAP_STORAGE_BIT_EXT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
616 if (m_params.useMaintenance5)
617 {
618 bufferUsageFlags2.usage = (VkBufferUsageFlagBits2KHR)micromapScratchBufferCreateInfo.usage;
619 micromapScratchBufferCreateInfo.pNext = &bufferUsageFlags2;
620 micromapScratchBufferCreateInfo.usage = 0;
621 }
622 BufferWithMemory micromapScratchBuffer(vkd, device, alloc, micromapScratchBufferCreateInfo,
623 MemoryRequirement::Local | MemoryRequirement::DeviceAddress);
624
625 de::MovePtr<BufferWithMemory> copyMicromapBackingBuffer;
626
627 // Create the micromap itself
628 VkMicromapCreateInfoEXT maCreateInfo = {
629 VK_STRUCTURE_TYPE_MICROMAP_CREATE_INFO_EXT, // VkStructureType sType;
630 DE_NULL, // const void* pNext;
631 0, // VkMicromapCreateFlagsEXT createFlags;
632 micromapBackingBuffer.get(), // VkBuffer buffer;
633 0, // VkDeviceSize offset;
634 sizeInfo.micromapSize, // VkDeviceSize size;
635 VK_MICROMAP_TYPE_OPACITY_MICROMAP_EXT, // VkMicromapTypeEXT type;
636 0ull // VkDeviceAddress deviceAddress;
637 };
638
639 VkMicromapEXT micromap = VK_NULL_HANDLE, origMicromap = VK_NULL_HANDLE;
640
641 VK_CHECK(vkd.createMicromapEXT(device, &maCreateInfo, nullptr, µmap));
642
643 // Do the build
644 mmBuildInfo.dstMicromap = micromap;
645 mmBuildInfo.data = makeDeviceOrHostAddressConstKHR(vkd, device, micromapDataBuffer.get(), DataOffset);
646 mmBuildInfo.triangleArray = makeDeviceOrHostAddressConstKHR(vkd, device, micromapDataBuffer.get(), TriangleOffset);
647 mmBuildInfo.scratchData = makeDeviceOrHostAddressKHR(vkd, device, micromapScratchBuffer.get(), 0);
648
649 vkd.cmdBuildMicromapsEXT(cmdBuffer, 1, &mmBuildInfo);
650
651 {
652 VkMemoryBarrier2 memoryBarrier = {VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
653 NULL,
654 VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT,
655 VK_ACCESS_2_MICROMAP_WRITE_BIT_EXT,
656 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,
657 VK_ACCESS_2_MICROMAP_READ_BIT_EXT};
658 VkDependencyInfoKHR dependencyInfo = {
659 VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR, // VkStructureType sType;
660 DE_NULL, // const void* pNext;
661 0u, // VkDependencyFlags dependencyFlags;
662 1u, // uint32_t memoryBarrierCount;
663 &memoryBarrier, // const VkMemoryBarrier2KHR* pMemoryBarriers;
664 0u, // uint32_t bufferMemoryBarrierCount;
665 DE_NULL, // const VkBufferMemoryBarrier2KHR* pBufferMemoryBarriers;
666 0u, // uint32_t imageMemoryBarrierCount;
667 DE_NULL, // const VkImageMemoryBarrier2KHR* pImageMemoryBarriers;
668 };
669
670 vkd.cmdPipelineBarrier2(cmdBuffer, &dependencyInfo);
671 }
672
673 if (m_params.copyType != CT_NONE)
674 {
675 copyMicromapBackingBuffer = de::MovePtr<BufferWithMemory>(
676 new BufferWithMemory(vkd, device, alloc, micromapBackingBufferCreateInfo,
677 MemoryRequirement::Local | MemoryRequirement::DeviceAddress));
678
679 origMicromap = micromap;
680
681 maCreateInfo.buffer = copyMicromapBackingBuffer->get();
682
683 VK_CHECK(vkd.createMicromapEXT(device, &maCreateInfo, nullptr, µmap));
684
685 VkCopyMicromapInfoEXT copyMicromapInfo = {
686 VK_STRUCTURE_TYPE_COPY_MICROMAP_INFO_EXT, // VkStructureType sType;
687 DE_NULL, // const void* pNext;
688 origMicromap, // VkMicromapEXT src;
689 micromap, // VkMicromapEXT dst;
690 VK_COPY_MICROMAP_MODE_CLONE_EXT // VkCopyMicromapModeEXT mode;
691 };
692
693 vkd.cmdCopyMicromapEXT(cmdBuffer, ©MicromapInfo);
694
695 {
696 VkMemoryBarrier2 memoryBarrier = {VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
697 NULL,
698 VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT,
699 VK_ACCESS_2_MICROMAP_WRITE_BIT_EXT,
700 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,
701 VK_ACCESS_2_MICROMAP_READ_BIT_EXT};
702 VkDependencyInfoKHR dependencyInfo = {
703 VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR, // VkStructureType sType;
704 DE_NULL, // const void* pNext;
705 0u, // VkDependencyFlags dependencyFlags;
706 1u, // uint32_t memoryBarrierCount;
707 &memoryBarrier, // const VkMemoryBarrier2KHR* pMemoryBarriers;
708 0u, // uint32_t bufferMemoryBarrierCount;
709 DE_NULL, // const VkBufferMemoryBarrier2KHR* pBufferMemoryBarriers;
710 0u, // uint32_t imageMemoryBarrierCount;
711 DE_NULL, // const VkImageMemoryBarrier2KHR* pImageMemoryBarriers;
712 };
713
714 dependencyInfo.memoryBarrierCount = 1;
715 dependencyInfo.pMemoryBarriers = &memoryBarrier;
716
717 vkd.cmdPipelineBarrier2(cmdBuffer, &dependencyInfo);
718 }
719 }
720
721 // Attach the micromap to the geometry
722 VkAccelerationStructureTrianglesOpacityMicromapEXT opacityGeometryMicromap = {
723 VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_TRIANGLES_OPACITY_MICROMAP_EXT, //VkStructureType sType;
724 DE_NULL, //void* pNext;
725 VK_INDEX_TYPE_UINT32, //VkIndexType indexType;
726 makeDeviceOrHostAddressConstKHR(vkd, device, micromapDataBuffer.get(),
727 IndexOffset), //VkDeviceOrHostAddressConstKHR indexBuffer;
728 0u, //VkDeviceSize indexStride;
729 0u, //uint32_t baseTriangle;
730 1u, //uint32_t usageCountsCount;
731 &mmUsage, //const VkMicromapUsageEXT* pUsageCounts;
732 DE_NULL, //const VkMicromapUsageEXT* const* ppUsageCounts;
733 micromap //VkMicromapEXT micromap;
734 };
735
736 const std::vector<tcu::Vec3> triangle = {
737 tcu::Vec3(0.0f, 0.0f, 0.0f),
738 tcu::Vec3(1.0f, 0.0f, 0.0f),
739 tcu::Vec3(0.0f, 1.0f, 0.0f),
740 };
741
742 bottomLevelAS->addGeometry(triangle, true /*is triangles*/, 0, &opacityGeometryMicromap);
743 if (m_params.testFlagMask & TEST_FLAG_BIT_DISABLE_OPACITY_MICROMAP_INSTANCE)
744 bottomLevelAS->setBuildFlags(VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISABLE_OPACITY_MICROMAPS_EXT);
745 bottomLevelAS->createAndBuild(vkd, device, cmdBuffer, alloc);
746 de::SharedPtr<BottomLevelAccelerationStructure> blasSharedPtr(bottomLevelAS.release());
747
748 VkGeometryInstanceFlagsKHR instanceFlags = 0;
749
750 if (m_params.testFlagMask & TEST_FLAG_BIT_FORCE_2_STATE_INSTANCE)
751 instanceFlags |= VK_GEOMETRY_INSTANCE_FORCE_OPACITY_MICROMAP_2_STATE_EXT;
752 if (m_params.testFlagMask & TEST_FLAG_BIT_FORCE_OPAQUE_INSTANCE)
753 instanceFlags |= VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR;
754 if (m_params.testFlagMask & TEST_FLAG_BIT_DISABLE_OPACITY_MICROMAP_INSTANCE)
755 instanceFlags |= VK_GEOMETRY_INSTANCE_DISABLE_OPACITY_MICROMAPS_EXT;
756
757 topLevelAS->setInstanceCount(1);
758 topLevelAS->addInstance(blasSharedPtr, identityMatrix3x4, 0, 0xFFu, 0u, instanceFlags);
759 topLevelAS->createAndBuild(vkd, device, cmdBuffer, alloc);
760
761 // One ray per subtriangle for this test
762 uint32_t numRays = numSubtriangles;
763
764 // SSBO buffer for origins.
765 const auto originsBufferSize = static_cast<VkDeviceSize>(sizeof(tcu::Vec4) * numRays);
766 auto originsBufferInfo = makeBufferCreateInfo(originsBufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
767 if (m_params.useMaintenance5)
768 {
769 bufferUsageFlags2.usage = (VkBufferUsageFlagBits2KHR)originsBufferInfo.usage;
770 originsBufferInfo.pNext = &bufferUsageFlags2;
771 originsBufferInfo.usage = 0;
772 }
773 BufferWithMemory originsBuffer(vkd, device, alloc, originsBufferInfo, MemoryRequirement::HostVisible);
774 auto &originsBufferAlloc = originsBuffer.getAllocation();
775 void *originsBufferData = originsBufferAlloc.getHostPtr();
776
777 std::vector<tcu::Vec4> origins;
778 std::vector<uint32_t> expectedOutputModes;
779 origins.reserve(numRays);
780 expectedOutputModes.reserve(numRays);
781
782 // Fill in vector of expected outputs
783 for (uint32_t index = 0; index < numRays; index++)
784 {
785 uint32_t state =
786 m_params.testFlagMask & (TEST_FLAG_BIT_FORCE_OPAQUE_INSTANCE | TEST_FLAG_BIT_FORCE_OPAQUE_RAY_FLAG) ?
787 VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT :
788 VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_OPAQUE_EXT;
789
790 if (!(m_params.testFlagMask & TEST_FLAG_BIT_DISABLE_OPACITY_MICROMAP_INSTANCE))
791 {
792 if (m_params.useSpecialIndex)
793 {
794 state = m_params.mode;
795 }
796 else
797 {
798 if (m_params.mode == 2)
799 {
800 uint8_t byte = opacityMicromapData[index / 8];
801 state = (byte >> (index % 8)) & 0x1;
802 }
803 else
804 {
805 DE_ASSERT(m_params.mode == 4);
806 uint8_t byte = opacityMicromapData[index / 4];
807 state = (byte >> 2 * (index % 4)) & 0x3;
808 }
809 // Process in SPECIAL_INDEX number space
810 state = ~state;
811 }
812
813 if (m_params.testFlagMask & (TEST_FLAG_BIT_FORCE_2_STATE_INSTANCE | TEST_FLAG_BIT_FORCE_2_STATE_RAY_FLAG))
814 {
815 if (state == uint32_t(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_TRANSPARENT_EXT))
816 state = uint32_t(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_TRANSPARENT_EXT);
817 if (state == uint32_t(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_OPAQUE_EXT))
818 state = uint32_t(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT);
819 }
820 }
821
822 if (state != uint32_t(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_TRANSPARENT_EXT))
823 {
824 if (m_params.testFlagMask & (TEST_FLAG_BIT_FORCE_OPAQUE_INSTANCE | TEST_FLAG_BIT_FORCE_OPAQUE_RAY_FLAG))
825 {
826 state = uint32_t(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT);
827 }
828 else if (state != uint32_t(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT))
829 {
830 state = uint32_t(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_OPAQUE_EXT);
831 }
832 }
833
834 if (state == uint32_t(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_TRANSPARENT_EXT))
835 {
836 expectedOutputModes.push_back(0);
837 }
838 else if (state == uint32_t(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_OPAQUE_EXT))
839 {
840 expectedOutputModes.push_back(1);
841 }
842 else if (state == uint32_t(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT))
843 {
844 expectedOutputModes.push_back(2);
845 }
846 else
847 {
848 DE_ASSERT(false);
849 }
850 }
851
852 for (uint32_t index = 0; index < numRays; index++)
853 {
854 tcu::Vec2 centroid = calcSubtriangleCentroid(index, m_params.subdivisionLevel);
855 origins.push_back(tcu::Vec4(centroid.x(), centroid.y(), 1.0, 0.0));
856 }
857
858 const auto originsBufferSizeSz = static_cast<size_t>(originsBufferSize);
859 deMemcpy(originsBufferData, origins.data(), originsBufferSizeSz);
860 flushAlloc(vkd, device, originsBufferAlloc);
861
862 // Storage buffer for output modes
863 const auto outputModesBufferSize = static_cast<VkDeviceSize>(sizeof(uint32_t) * numRays);
864 const auto outputModesBufferInfo = makeBufferCreateInfo(outputModesBufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
865 BufferWithMemory outputModesBuffer(vkd, device, alloc, outputModesBufferInfo, MemoryRequirement::HostVisible);
866 auto &outputModesBufferAlloc = outputModesBuffer.getAllocation();
867 void *outputModesBufferData = outputModesBufferAlloc.getHostPtr();
868 deMemset(outputModesBufferData, 0xFF, static_cast<size_t>(outputModesBufferSize));
869 flushAlloc(vkd, device, outputModesBufferAlloc);
870
871 // Descriptor set layout.
872 DescriptorSetLayoutBuilder dsLayoutBuilder;
873 dsLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, VK_SHADER_STAGE_ALL);
874 dsLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
875 dsLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
876 const auto setLayout = dsLayoutBuilder.build(vkd, device);
877
878 // Pipeline layout.
879 const auto pipelineLayout = makePipelineLayout(vkd, device, setLayout.get());
880
881 // Descriptor pool and set.
882 DescriptorPoolBuilder poolBuilder;
883 poolBuilder.addType(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
884 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
885 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
886 const auto descriptorPool = poolBuilder.build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
887 const auto descriptorSet = makeDescriptorSet(vkd, device, descriptorPool.get(), setLayout.get());
888
889 // Update descriptor set.
890 {
891 const VkWriteDescriptorSetAccelerationStructureKHR accelDescInfo = {
892 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR,
893 nullptr,
894 1u,
895 topLevelAS.get()->getPtr(),
896 };
897 const auto inStorageBufferInfo = makeDescriptorBufferInfo(originsBuffer.get(), 0ull, VK_WHOLE_SIZE);
898 const auto storageBufferInfo = makeDescriptorBufferInfo(outputModesBuffer.get(), 0ull, VK_WHOLE_SIZE);
899
900 DescriptorSetUpdateBuilder updateBuilder;
901 updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(0u),
902 VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, &accelDescInfo);
903 updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(1u),
904 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inStorageBufferInfo);
905 updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(2u),
906 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &storageBufferInfo);
907 updateBuilder.update(vkd, device);
908 }
909
910 Move<VkPipeline> pipeline;
911 de::MovePtr<BufferWithMemory> raygenSBT;
912 Move<VkRenderPass> renderPass;
913 Move<VkFramebuffer> framebuffer;
914
915 if (m_params.shaderSourceType == SST_VERTEX_SHADER)
916 {
917 auto vertexModule = createShaderModule(vkd, device, m_context.getBinaryCollection().get("vert"), 0);
918
919 renderPass = makeEmptyRenderPass(vkd, device);
920 framebuffer = makeFramebuffer(vkd, device, *renderPass, 0u, DE_NULL, 32, 32);
921 pipeline = makeGraphicsPipeline(vkd, device, *pipelineLayout, *renderPass, *vertexModule, 0);
922
923 beginRenderPass(vkd, cmdBuffer, *renderPass, *framebuffer, makeRect2D(32u, 32u));
924 vkd.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline.get());
925 vkd.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout.get(), 0u, 1u,
926 &descriptorSet.get(), 0u, nullptr);
927 vkd.cmdDraw(cmdBuffer, kNumThreadsAtOnce, 1, 0, 0);
928 endRenderPass(vkd, cmdBuffer);
929 }
930 else if (m_params.shaderSourceType == SST_RAY_GENERATION_SHADER)
931 {
932 const auto &vki = m_context.getInstanceInterface();
933 const auto physDev = m_context.getPhysicalDevice();
934
935 // Shader module.
936 auto rgenModule = createShaderModule(vkd, device, m_context.getBinaryCollection().get("rgen"), 0);
937
938 // Get some ray tracing properties.
939 uint32_t shaderGroupHandleSize = 0u;
940 uint32_t shaderGroupBaseAlignment = 1u;
941 {
942 const auto rayTracingPropertiesKHR = makeRayTracingProperties(vki, physDev);
943 shaderGroupHandleSize = rayTracingPropertiesKHR->getShaderGroupHandleSize();
944 shaderGroupBaseAlignment = rayTracingPropertiesKHR->getShaderGroupBaseAlignment();
945 }
946
947 auto raygenSBTRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
948 auto unusedSBTRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
949
950 {
951 const auto rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
952 rayTracingPipeline->setCreateFlags(VK_PIPELINE_CREATE_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT);
953 if (m_params.useMaintenance5)
954 rayTracingPipeline->setCreateFlags2(VK_PIPELINE_CREATE_2_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT);
955 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, 0);
956
957 pipeline = rayTracingPipeline->createPipeline(vkd, device, pipelineLayout.get());
958
959 raygenSBT = rayTracingPipeline->createShaderBindingTable(
960 vkd, device, pipeline.get(), alloc, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
961 raygenSBTRegion = makeStridedDeviceAddressRegionKHR(
962 getBufferDeviceAddress(vkd, device, raygenSBT->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
963 }
964
965 // Trace rays.
966 vkd.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, pipeline.get());
967 vkd.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, pipelineLayout.get(), 0u, 1u,
968 &descriptorSet.get(), 0u, nullptr);
969 vkd.cmdTraceRaysKHR(cmdBuffer, &raygenSBTRegion, &unusedSBTRegion, &unusedSBTRegion, &unusedSBTRegion,
970 kNumThreadsAtOnce, 1u, 1u);
971 }
972 else
973 {
974 DE_ASSERT(m_params.shaderSourceType == SST_COMPUTE_SHADER);
975 // Shader module.
976 const auto compModule = createShaderModule(vkd, device, m_context.getBinaryCollection().get("comp"), 0);
977
978 // Pipeline.
979 const VkPipelineShaderStageCreateInfo shaderInfo = {
980 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
981 nullptr, // const void* pNext;
982 0u, // VkPipelineShaderStageCreateFlags flags;
983 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
984 compModule.get(), // VkShaderModule module;
985 "main", // const char* pName;
986 nullptr, // const VkSpecializationInfo* pSpecializationInfo;
987 };
988 const VkComputePipelineCreateInfo pipelineInfo = {
989 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
990 nullptr, // const void* pNext;
991 0u, // VkPipelineCreateFlags flags;
992 shaderInfo, // VkPipelineShaderStageCreateInfo stage;
993 pipelineLayout.get(), // VkPipelineLayout layout;
994 DE_NULL, // VkPipeline basePipelineHandle;
995 0, // int32_t basePipelineIndex;
996 };
997 pipeline = createComputePipeline(vkd, device, DE_NULL, &pipelineInfo);
998
999 // Dispatch work with ray queries.
1000 vkd.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline.get());
1001 vkd.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout.get(), 0u, 1u,
1002 &descriptorSet.get(), 0u, nullptr);
1003 vkd.cmdDispatch(cmdBuffer, kWorkGroupCount, 1u, 1u);
1004 }
1005
1006 // Barrier for the output buffer.
1007 const auto bufferBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
1008 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 1u,
1009 &bufferBarrier, 0u, nullptr, 0u, nullptr);
1010
1011 endCommandBuffer(vkd, cmdBuffer);
1012 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
1013
1014 if (micromap != VK_NULL_HANDLE)
1015 vkd.destroyMicromapEXT(device, micromap, DE_NULL);
1016 if (micromap != VK_NULL_HANDLE)
1017 vkd.destroyMicromapEXT(device, origMicromap, DE_NULL);
1018
1019 // Verify results.
1020 std::vector<uint32_t> outputData(expectedOutputModes.size());
1021 const auto outputModesBufferSizeSz = static_cast<size_t>(outputModesBufferSize);
1022
1023 invalidateAlloc(vkd, device, outputModesBufferAlloc);
1024 DE_ASSERT(de::dataSize(outputData) == outputModesBufferSizeSz);
1025 deMemcpy(outputData.data(), outputModesBufferData, outputModesBufferSizeSz);
1026
1027 for (size_t i = 0; i < outputData.size(); ++i)
1028 {
1029 const auto &outVal = outputData[i];
1030 const auto &expectedVal = expectedOutputModes[i];
1031
1032 if (outVal != expectedVal)
1033 {
1034 std::ostringstream msg;
1035 msg << "Unexpected value found for ray " << i << ": expected " << expectedVal << " and found " << outVal
1036 << ";";
1037 TCU_FAIL(msg.str());
1038 }
1039 #if 0
1040 else
1041 {
1042 std::ostringstream msg;
1043 msg << "Expected value found for ray " << i << ": expected " << expectedVal << " and found " << outVal << ";\n"; // XXX Debug remove
1044 std::cout << msg.str();
1045 }
1046 #endif
1047 }
1048
1049 return tcu::TestStatus::pass("Pass");
1050 }
1051
1052 } // namespace
1053
1054 constexpr uint32_t kMaxSubdivisionLevel = 15;
1055
addBasicTests(tcu::TestCaseGroup * group)1056 void addBasicTests(tcu::TestCaseGroup *group)
1057 {
1058 uint32_t seed = 1614674687u;
1059
1060 const struct
1061 {
1062 ShaderSourceType shaderSourceType;
1063 ShaderSourcePipeline shaderSourcePipeline;
1064 std::string name;
1065 } shaderSourceTypes[] = {
1066 {SST_VERTEX_SHADER, SSP_GRAPHICS_PIPELINE, "vertex_shader"},
1067 {
1068 SST_COMPUTE_SHADER,
1069 SSP_COMPUTE_PIPELINE,
1070 "compute_shader",
1071 },
1072 {
1073 SST_RAY_GENERATION_SHADER,
1074 SSP_RAY_TRACING_PIPELINE,
1075 "rgen_shader",
1076 },
1077 };
1078
1079 const struct
1080 {
1081 bool useSpecialIndex;
1082 std::string name;
1083 } specialIndexUse[] = {
1084 {false, "map_value"},
1085 {true, "special_index"},
1086 };
1087
1088 auto &testCtx = group->getTestContext();
1089
1090 for (size_t shaderSourceNdx = 0; shaderSourceNdx < DE_LENGTH_OF_ARRAY(shaderSourceTypes); ++shaderSourceNdx)
1091 {
1092 de::MovePtr<tcu::TestCaseGroup> sourceTypeGroup(
1093 new tcu::TestCaseGroup(group->getTestContext(), shaderSourceTypes[shaderSourceNdx].name.c_str()));
1094
1095 for (uint32_t testFlagMask = 0; testFlagMask < TEST_FLAG_BIT_LAST; testFlagMask++)
1096 {
1097 std::string maskName = "";
1098
1099 for (uint32_t bit = 0; bit < testFlagBitNames.size(); bit++)
1100 {
1101 if (testFlagMask & (1 << bit))
1102 {
1103 if (maskName != "")
1104 maskName += "_";
1105 maskName += testFlagBitNames[bit];
1106 }
1107 }
1108 if (maskName == "")
1109 maskName = "NoFlags";
1110
1111 de::MovePtr<tcu::TestCaseGroup> testFlagGroup(
1112 new tcu::TestCaseGroup(sourceTypeGroup->getTestContext(), maskName.c_str()));
1113
1114 for (size_t specialIndexNdx = 0; specialIndexNdx < DE_LENGTH_OF_ARRAY(specialIndexUse); ++specialIndexNdx)
1115 {
1116 de::MovePtr<tcu::TestCaseGroup> specialGroup(new tcu::TestCaseGroup(
1117 testFlagGroup->getTestContext(), specialIndexUse[specialIndexNdx].name.c_str()));
1118
1119 if (specialIndexUse[specialIndexNdx].useSpecialIndex)
1120 {
1121 for (uint32_t specialIndex = 0; specialIndex < 4; specialIndex++)
1122 {
1123 TestParams testParams{
1124 shaderSourceTypes[shaderSourceNdx].shaderSourceType,
1125 shaderSourceTypes[shaderSourceNdx].shaderSourcePipeline,
1126 specialIndexUse[specialIndexNdx].useSpecialIndex,
1127 testFlagMask,
1128 0,
1129 ~specialIndex,
1130 seed++,
1131 CT_NONE,
1132 false,
1133 };
1134
1135 std::stringstream css;
1136 css << specialIndex;
1137
1138 specialGroup->addChild(new OpacityMicromapCase(testCtx, css.str().c_str(), testParams));
1139 }
1140 testFlagGroup->addChild(specialGroup.release());
1141 }
1142 else
1143 {
1144 struct
1145 {
1146 uint32_t mode;
1147 std::string name;
1148 } modes[] = {{2, "2"}, {4, "4"}};
1149 for (uint32_t modeNdx = 0; modeNdx < DE_LENGTH_OF_ARRAY(modes); ++modeNdx)
1150 {
1151 de::MovePtr<tcu::TestCaseGroup> modeGroup(
1152 new tcu::TestCaseGroup(testFlagGroup->getTestContext(), modes[modeNdx].name.c_str()));
1153
1154 for (uint32_t level = 0; level <= kMaxSubdivisionLevel; level++)
1155 {
1156 TestParams testParams{
1157 shaderSourceTypes[shaderSourceNdx].shaderSourceType,
1158 shaderSourceTypes[shaderSourceNdx].shaderSourcePipeline,
1159 specialIndexUse[specialIndexNdx].useSpecialIndex,
1160 testFlagMask,
1161 level,
1162 modes[modeNdx].mode,
1163 seed++,
1164 CT_NONE,
1165 false,
1166 };
1167
1168 std::stringstream css;
1169 css << "level_" << level;
1170
1171 modeGroup->addChild(new OpacityMicromapCase(testCtx, css.str().c_str(), testParams));
1172 }
1173 specialGroup->addChild(modeGroup.release());
1174 }
1175 testFlagGroup->addChild(specialGroup.release());
1176 }
1177 }
1178
1179 sourceTypeGroup->addChild(testFlagGroup.release());
1180 }
1181
1182 group->addChild(sourceTypeGroup.release());
1183 }
1184 }
1185
addCopyTests(tcu::TestCaseGroup * group)1186 void addCopyTests(tcu::TestCaseGroup *group)
1187 {
1188 uint32_t seed = 1614674688u;
1189
1190 auto &testCtx = group->getTestContext();
1191
1192 for (size_t copyTypeNdx = CT_FIRST_ACTIVE; copyTypeNdx < CT_NUM_COPY_TYPES; ++copyTypeNdx)
1193 {
1194 de::MovePtr<tcu::TestCaseGroup> copyTypeGroup(
1195 new tcu::TestCaseGroup(group->getTestContext(), copyTypeNames[copyTypeNdx].c_str()));
1196
1197 struct
1198 {
1199 uint32_t mode;
1200 std::string name;
1201 } modes[] = {{2, "2"}, {4, "4"}};
1202 for (uint32_t modeNdx = 0; modeNdx < DE_LENGTH_OF_ARRAY(modes); ++modeNdx)
1203 {
1204 de::MovePtr<tcu::TestCaseGroup> modeGroup(
1205 new tcu::TestCaseGroup(copyTypeGroup->getTestContext(), modes[modeNdx].name.c_str()));
1206
1207 for (uint32_t level = 0; level <= kMaxSubdivisionLevel; level++)
1208 {
1209 TestParams testParams{
1210 SST_COMPUTE_SHADER,
1211 SSP_COMPUTE_PIPELINE,
1212 false,
1213 0,
1214 level,
1215 modes[modeNdx].mode,
1216 seed++,
1217 (CopyType)copyTypeNdx,
1218 false,
1219 };
1220
1221 std::stringstream css;
1222 css << "level_" << level;
1223
1224 modeGroup->addChild(new OpacityMicromapCase(testCtx, css.str().c_str(), testParams));
1225 }
1226 copyTypeGroup->addChild(modeGroup.release());
1227 }
1228 group->addChild(copyTypeGroup.release());
1229 }
1230
1231 {
1232 TestParams testParams{
1233 SST_COMPUTE_SHADER, SSP_COMPUTE_PIPELINE, false, 0, 0, 2, 1, CT_FIRST_ACTIVE, true,
1234 };
1235 de::MovePtr<tcu::TestCaseGroup> miscGroup(new tcu::TestCaseGroup(group->getTestContext(), "misc"));
1236 miscGroup->addChild(new OpacityMicromapCase(testCtx, "maintenance5", testParams));
1237 group->addChild(miscGroup.release());
1238 }
1239 }
1240
createOpacityMicromapTests(tcu::TestContext & testCtx)1241 tcu::TestCaseGroup *createOpacityMicromapTests(tcu::TestContext &testCtx)
1242 {
1243 // Test acceleration structures using opacity micromap with ray query
1244 de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, "opacity_micromap"));
1245
1246 // Test accessing all formats of opacity micromaps
1247 addTestGroup(group.get(), "render", addBasicTests);
1248 // Test copying opacity micromaps
1249 addTestGroup(group.get(), "copy", addCopyTests);
1250
1251 return group.release();
1252 }
1253
1254 } // namespace RayQuery
1255 } // namespace vkt
1256