1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2022 The Khronos Group Inc.
6 * Copyright (c) 2022 NVIDIA Corporation.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Ray Query Opacity Micromap Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktRayQueryOpacityMicromapTests.hpp"
26 #include "vktTestCase.hpp"
27
28 #include "vkRayTracingUtil.hpp"
29 #include "vkObjUtil.hpp"
30 #include "vkCmdUtil.hpp"
31 #include "vkBufferWithMemory.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkBarrierUtil.hpp"
35 #include "vktTestGroupUtil.hpp"
36
37 #include "deUniquePtr.hpp"
38 #include "deRandom.hpp"
39
40 #include <sstream>
41 #include <vector>
42 #include <iostream>
43
44 namespace vkt
45 {
46 namespace RayQuery
47 {
48
49 namespace
50 {
51
52 using namespace vk;
53
54 enum ShaderSourcePipeline
55 {
56 SSP_GRAPHICS_PIPELINE,
57 SSP_COMPUTE_PIPELINE,
58 SSP_RAY_TRACING_PIPELINE
59 };
60
61 enum ShaderSourceType
62 {
63 SST_VERTEX_SHADER,
64 SST_COMPUTE_SHADER,
65 SST_RAY_GENERATION_SHADER,
66 };
67
68 enum TestFlagBits
69 {
70 TEST_FLAG_BIT_FORCE_OPAQUE_INSTANCE = 1U << 0,
71 TEST_FLAG_BIT_FORCE_OPAQUE_RAY_FLAG = 1U << 1,
72 TEST_FLAG_BIT_DISABLE_OPACITY_MICROMAP_INSTANCE = 1U << 2,
73 TEST_FLAG_BIT_FORCE_2_STATE_INSTANCE = 1U << 3,
74 TEST_FLAG_BIT_FORCE_2_STATE_RAY_FLAG = 1U << 4,
75 TEST_FLAG_BIT_LAST = 1U << 5,
76 };
77
78 std::vector<std::string> testFlagBitNames =
79 {
80 "force_opaque_instance",
81 "force_opaque_ray_flag",
82 "disable_opacity_micromap_instance",
83 "force_2_state_instance",
84 "force_2_state_ray_flag",
85 };
86
87 enum CopyType {
88 CT_NONE,
89 CT_FIRST_ACTIVE,
90 CT_CLONE = CT_FIRST_ACTIVE,
91 CT_COMPACT,
92 CT_NUM_COPY_TYPES,
93 };
94
95 std::vector<std::string> copyTypeNames
96 {
97 "None",
98 "Clone",
99 "Compact",
100 };
101
102 struct TestParams
103 {
104 ShaderSourceType shaderSourceType;
105 ShaderSourcePipeline shaderSourcePipeline;
106 bool useSpecialIndex;
107 deUint32 testFlagMask;
108 deUint32 subdivisionLevel; // Must be 0 for useSpecialIndex
109 deUint32 mode; // Special index value if useSpecialIndex, 2 or 4 for number of states otherwise
110 deUint32 seed;
111 CopyType copyType;
112 };
113
114 static constexpr deUint32 kNumThreadsAtOnce = 1024;
115
116
117 class OpacityMicromapCase : public TestCase
118 {
119 public:
120 OpacityMicromapCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description, const TestParams& params);
~OpacityMicromapCase(void)121 virtual ~OpacityMicromapCase (void) {}
122
123 virtual void checkSupport (Context& context) const;
124 virtual void initPrograms (vk::SourceCollections& programCollection) const;
125 virtual TestInstance* createInstance (Context& context) const;
126
127 protected:
128 TestParams m_params;
129 };
130
131 class OpacityMicromapInstance : public TestInstance
132 {
133 public:
134 OpacityMicromapInstance (Context& context, const TestParams& params);
~OpacityMicromapInstance(void)135 virtual ~OpacityMicromapInstance (void) {}
136
137 virtual tcu::TestStatus iterate (void);
138
139 protected:
140 TestParams m_params;
141 };
142
OpacityMicromapCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const TestParams & params)143 OpacityMicromapCase::OpacityMicromapCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description, const TestParams& params)
144 : TestCase (testCtx, name, description)
145 , m_params (params)
146 {}
147
checkSupport(Context & context) const148 void OpacityMicromapCase::checkSupport (Context& context) const
149 {
150 context.requireDeviceFunctionality("VK_KHR_ray_query");
151 context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
152 context.requireDeviceFunctionality("VK_EXT_opacity_micromap");
153
154 const VkPhysicalDeviceRayQueryFeaturesKHR& rayQueryFeaturesKHR = context.getRayQueryFeatures();
155 if (rayQueryFeaturesKHR.rayQuery == DE_FALSE)
156 TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceRayQueryFeaturesKHR.rayQuery");
157
158 const VkPhysicalDeviceAccelerationStructureFeaturesKHR& accelerationStructureFeaturesKHR = context.getAccelerationStructureFeatures();
159 if (accelerationStructureFeaturesKHR.accelerationStructure == DE_FALSE)
160 TCU_THROW(TestError, "VK_KHR_ray_query requires VkPhysicalDeviceAccelerationStructureFeaturesKHR.accelerationStructure");
161
162 const VkPhysicalDeviceOpacityMicromapFeaturesEXT& opacityMicromapFeaturesEXT = context.getOpacityMicromapFeaturesEXT();
163 if (opacityMicromapFeaturesEXT.micromap == DE_FALSE)
164 TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceOpacityMicromapFeaturesEXT.micromap");
165
166 if (m_params.shaderSourceType == SST_RAY_GENERATION_SHADER)
167 {
168 context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
169
170 const VkPhysicalDeviceRayTracingPipelineFeaturesKHR& rayTracingPipelineFeaturesKHR = context.getRayTracingPipelineFeatures();
171
172 if (rayTracingPipelineFeaturesKHR.rayTracingPipeline == DE_FALSE)
173 TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceRayTracingPipelineFeaturesKHR.rayTracingPipeline");
174 }
175
176 switch (m_params.shaderSourceType)
177 {
178 case SST_VERTEX_SHADER:
179 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS);
180 break;
181 default:
182 break;
183 }
184
185 const VkPhysicalDeviceOpacityMicromapPropertiesEXT& opacityMicromapPropertiesEXT = context.getOpacityMicromapPropertiesEXT();
186
187 if (!m_params.useSpecialIndex)
188 {
189 switch (m_params.mode)
190 {
191 case 2:
192 if (m_params.subdivisionLevel > opacityMicromapPropertiesEXT.maxOpacity2StateSubdivisionLevel)
193 TCU_THROW(NotSupportedError, "Requires a higher supported 2 state subdivision level");
194 break;
195 case 4:
196 if (m_params.subdivisionLevel > opacityMicromapPropertiesEXT.maxOpacity4StateSubdivisionLevel)
197 TCU_THROW(NotSupportedError, "Requires a higher supported 4 state subdivision level");
198 break;
199 default:
200 DE_ASSERT(false);
201 break;
202 }
203 }
204 }
205
levelToSubtriangles(deUint32 level)206 static deUint32 levelToSubtriangles(deUint32 level)
207 {
208 return 1 << (2 * level);
209 }
210
initPrograms(vk::SourceCollections & programCollection) const211 void OpacityMicromapCase::initPrograms (vk::SourceCollections& programCollection) const
212 {
213 const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, 0u, true);
214
215 deUint32 numRays = levelToSubtriangles(m_params.subdivisionLevel);
216
217 std::string flagsString = (m_params.testFlagMask & TEST_FLAG_BIT_FORCE_OPAQUE_RAY_FLAG) ? "gl_RayFlagsOpaqueEXT" : "gl_RayFlagsNoneEXT";
218
219 if (m_params.testFlagMask & TEST_FLAG_BIT_FORCE_2_STATE_RAY_FLAG)
220 flagsString += " | gl_RayFlagsForceOpacityMicromap2StateEXT";
221
222 std::ostringstream sharedHeader;
223 sharedHeader
224 << "#version 460 core\n"
225 << "#extension GL_EXT_ray_query : require\n"
226 << "#extension GL_EXT_opacity_micromap : require\n"
227 << "\n"
228 << "layout(set=0, binding=0) uniform accelerationStructureEXT topLevelAS;\n"
229 << "layout(set=0, binding=1, std430) buffer RayOrigins {\n"
230 << " vec4 values[" << numRays << "];\n"
231 << "} origins;\n"
232 << "layout(set=0, binding=2, std430) buffer OutputModes {\n"
233 << " uint values[" << numRays << "];\n"
234 << "} modes;\n";
235
236 std::ostringstream mainLoop;
237 mainLoop
238 << " while (index < " << numRays << ") {\n"
239 << " const uint cullMask = 0xFF;\n"
240 << " const vec3 origin = origins.values[index].xyz;\n"
241 << " const vec3 direction = vec3(0.0, 0.0, -1.0);\n"
242 << " const float tMin = 0.0f;\n"
243 << " const float tMax = 2.0f;\n"
244 << " uint outputVal = 0;\n" // 0 for miss, 1 for non-opaque, 2 for opaque
245 << " rayQueryEXT rq;\n"
246 << " rayQueryInitializeEXT(rq, topLevelAS, " << flagsString << ", cullMask, origin, tMin, direction, tMax);\n"
247 << " while (rayQueryProceedEXT(rq)) {\n"
248 << " if (rayQueryGetIntersectionTypeEXT(rq, false) == gl_RayQueryCandidateIntersectionTriangleEXT) {\n"
249 << " outputVal = 1;\n"
250 << " }\n"
251 << " }\n"
252 << " if (rayQueryGetIntersectionTypeEXT(rq, true) == gl_RayQueryCommittedIntersectionTriangleEXT) {\n"
253 << " outputVal = 2;\n"
254 << " }\n"
255 << " modes.values[index] = outputVal;\n"
256 << " index += " << kNumThreadsAtOnce << ";\n"
257 << " }\n";
258
259 if (m_params.shaderSourceType == SST_VERTEX_SHADER) {
260 std::ostringstream vert;
261 vert
262 << sharedHeader.str()
263 << "void main()\n"
264 << "{\n"
265 << " uint index = gl_VertexIndex.x;\n"
266 << mainLoop.str()
267 << "}\n"
268 ;
269
270 programCollection.glslSources.add("vert") << glu::VertexSource(vert.str()) << buildOptions;
271 }
272 else if (m_params.shaderSourceType == SST_RAY_GENERATION_SHADER)
273 {
274 std::ostringstream rgen;
275 rgen
276 << sharedHeader.str()
277 << "#extension GL_EXT_ray_tracing : require\n"
278 << "void main()\n"
279 << "{\n"
280 << " uint index = gl_LaunchIDEXT.x;\n"
281 << mainLoop.str()
282 << "}\n"
283 ;
284
285 programCollection.glslSources.add("rgen") << glu::RaygenSource(updateRayTracingGLSL(rgen.str())) << buildOptions;
286 }
287 else
288 {
289 DE_ASSERT(m_params.shaderSourceType == SST_COMPUTE_SHADER);
290 std::ostringstream comp;
291 comp
292 << sharedHeader.str()
293 << "layout(local_size_x=1024, local_size_y=1, local_size_z=1) in;\n"
294 << "\n"
295 << "void main()\n"
296 << "{\n"
297 << " uint index = gl_LocalInvocationID.x;\n"
298 << mainLoop.str()
299 << "}\n"
300 ;
301
302 programCollection.glslSources.add("comp") << glu::ComputeSource(updateRayTracingGLSL(comp.str())) << buildOptions;
303 }
304 }
305
createInstance(Context & context) const306 TestInstance* OpacityMicromapCase::createInstance (Context& context) const
307 {
308 return new OpacityMicromapInstance(context, m_params);
309 }
310
OpacityMicromapInstance(Context & context,const TestParams & params)311 OpacityMicromapInstance::OpacityMicromapInstance (Context& context, const TestParams& params)
312 : TestInstance (context)
313 , m_params (params)
314 {}
315
calcSubtriangleCentroid(const deUint32 index,const deUint32 subdivisionLevel)316 tcu::Vec2 calcSubtriangleCentroid(const deUint32 index, const deUint32 subdivisionLevel)
317 {
318 if (subdivisionLevel == 0) {
319 return tcu::Vec2(1.0f/3.0f, 1.0f/3.0f);
320 }
321
322 deUint32 d = index;
323
324 d = ((d >> 1) & 0x22222222u) | ((d << 1) & 0x44444444u) | (d & 0x99999999u);
325 d = ((d >> 2) & 0x0c0c0c0cu) | ((d << 2) & 0x30303030u) | (d & 0xc3c3c3c3u);
326 d = ((d >> 4) & 0x00f000f0u) | ((d << 4) & 0x0f000f00u) | (d & 0xf00ff00fu);
327 d = ((d >> 8) & 0x0000ff00u) | ((d << 8) & 0x00ff0000u) | (d & 0xff0000ffu);
328
329 deUint32 f = (d & 0xffffu) | ((d << 16) & ~d);
330
331 f ^= (f >> 1) & 0x7fff7fffu;
332 f ^= (f >> 2) & 0x3fff3fffu;
333 f ^= (f >> 4) & 0x0fff0fffu;
334 f ^= (f >> 8) & 0x00ff00ffu;
335
336 deUint32 t = (f ^ d) >> 16;
337
338 deUint32 iu = ((f & ~t) | (d & ~t) | (~d & ~f & t)) & 0xffffu;
339 deUint32 iv = ((f >> 16) ^ d) & 0xffffu;
340 deUint32 iw = ((~f & ~t) | (d & ~t) | (~d & f & t)) & ((1 << subdivisionLevel) - 1);
341
342 const float scale = 1.0f / float(1 << subdivisionLevel);
343
344 float u = (1.0f / 3.0f) * scale;
345 float v = (1.0f / 3.0f) * scale;
346
347 // we need to only look at "subdivisionLevel" bits
348 iu = iu & ((1 << subdivisionLevel) - 1);
349 iv = iv & ((1 << subdivisionLevel) - 1);
350 iw = iw & ((1 << subdivisionLevel) - 1);
351
352 bool upright = (iu & 1) ^ (iv & 1) ^ (iw & 1);
353 if (!upright)
354 {
355 iu = iu + 1;
356 iv = iv + 1;
357 }
358
359 if (upright)
360 {
361 return tcu::Vec2(
362 u + (float)iu * scale,
363 v + (float)iv * scale
364 );
365 } else
366 {
367 return tcu::Vec2(
368 (float)iu * scale - u,
369 (float)iv * scale - v
370 );
371 }
372 }
373
makeEmptyRenderPass(const DeviceInterface & vk,const VkDevice device)374 static Move<VkRenderPass> makeEmptyRenderPass(const DeviceInterface& vk,
375 const VkDevice device)
376 {
377 std::vector<VkSubpassDescription> subpassDescriptions;
378 std::vector<VkSubpassDependency> subpassDependencies;
379
380 const VkSubpassDescription description =
381 {
382 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
383 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
384 0u, // deUint32 inputAttachmentCount;
385 DE_NULL, // const VkAttachmentReference* pInputAttachments;
386 0u, // deUint32 colorAttachmentCount;
387 DE_NULL, // const VkAttachmentReference* pColorAttachments;
388 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
389 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
390 0, // deUint32 preserveAttachmentCount;
391 DE_NULL // const deUint32* pPreserveAttachments;
392 };
393 subpassDescriptions.push_back(description);
394
395 const VkSubpassDependency dependency =
396 {
397 0u, // deUint32 srcSubpass;
398 0u, // deUint32 dstSubpass;
399 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, // VkPipelineStageFlags srcStageMask;
400 VK_PIPELINE_STAGE_HOST_BIT, // VkPipelineStageFlags dstStageMask;
401 VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags srcAccessMask;
402 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
403 0u // VkDependencyFlags dependencyFlags;
404 };
405 subpassDependencies.push_back(dependency);
406
407 const VkRenderPassCreateInfo renderPassInfo =
408 {
409 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
410 DE_NULL, // const void* pNext;
411 static_cast<VkRenderPassCreateFlags>(0u), // VkRenderPassCreateFlags flags;
412 0u, // deUint32 attachmentCount;
413 DE_NULL, // const VkAttachmentDescription* pAttachments;
414 static_cast<deUint32>(subpassDescriptions.size()), // deUint32 subpassCount;
415 &subpassDescriptions[0], // const VkSubpassDescription* pSubpasses;
416 static_cast<deUint32>(subpassDependencies.size()), // deUint32 dependencyCount;
417 subpassDependencies.size() > 0 ? &subpassDependencies[0] : DE_NULL // const VkSubpassDependency* pDependencies;
418 };
419
420 return createRenderPass(vk, device, &renderPassInfo);
421 }
422
makeGraphicsPipeline(const DeviceInterface & vk,const VkDevice device,const VkPipelineLayout pipelineLayout,const VkRenderPass renderPass,const VkShaderModule vertexModule,const deUint32 subpass)423 Move<VkPipeline> makeGraphicsPipeline(const DeviceInterface& vk,
424 const VkDevice device,
425 const VkPipelineLayout pipelineLayout,
426 const VkRenderPass renderPass,
427 const VkShaderModule vertexModule,
428 const deUint32 subpass)
429 {
430 VkExtent2D renderSize { 256, 256 };
431 VkViewport viewport = makeViewport(renderSize);
432 VkRect2D scissor = makeRect2D(renderSize);
433
434 const VkPipelineViewportStateCreateInfo viewportStateCreateInfo =
435 {
436 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType
437 DE_NULL, // const void* pNext
438 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags
439 1u, // deUint32 viewportCount
440 &viewport, // const VkViewport* pViewports
441 1u, // deUint32 scissorCount
442 &scissor // const VkRect2D* pScissors
443 };
444
445 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo =
446 {
447 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType
448 DE_NULL, // const void* pNext
449 0u, // VkPipelineInputAssemblyStateCreateFlags flags
450 VK_PRIMITIVE_TOPOLOGY_POINT_LIST, // VkPrimitiveTopology topology
451 VK_FALSE // VkBool32 primitiveRestartEnable
452 };
453
454 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo =
455 {
456 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType
457 DE_NULL, // const void* pNext
458 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags
459 0u, // deUint32 vertexBindingDescriptionCount
460 DE_NULL, // const VkVertexInputBindingDescription* pVertexBindingDescriptions
461 0u, // deUint32 vertexAttributeDescriptionCount
462 DE_NULL, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions
463 };
464
465 const VkPipelineRasterizationStateCreateInfo rasterizationStateCreateInfo =
466 {
467 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType
468 DE_NULL, // const void* pNext
469 0u, // VkPipelineRasterizationStateCreateFlags flags
470 VK_FALSE, // VkBool32 depthClampEnable
471 VK_TRUE, // VkBool32 rasterizerDiscardEnable
472 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode
473 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode
474 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace
475 VK_FALSE, // VkBool32 depthBiasEnable
476 0.0f, // float depthBiasConstantFactor
477 0.0f, // float depthBiasClamp
478 0.0f, // float depthBiasSlopeFactor
479 1.0f // float lineWidth
480 };
481
482 return makeGraphicsPipeline(vk, // const DeviceInterface& vk
483 device, // const VkDevice device
484 pipelineLayout, // const VkPipelineLayout pipelineLayout
485 vertexModule, // const VkShaderModule vertexShaderModule
486 DE_NULL, // const VkShaderModule tessellationControlModule
487 DE_NULL, // const VkShaderModule tessellationEvalModule
488 DE_NULL, // const VkShaderModule geometryShaderModule
489 DE_NULL, // const VkShaderModule fragmentShaderModule
490 renderPass, // const VkRenderPass renderPass
491 subpass, // const deUint32 subpass
492 &vertexInputStateCreateInfo, // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
493 &inputAssemblyStateCreateInfo, // const VkPipelineInputAssemblyStateCreateInfo* inputAssemblyStateCreateInfo
494 DE_NULL, // const VkPipelineTessellationStateCreateInfo* tessStateCreateInfo
495 &viewportStateCreateInfo, // const VkPipelineViewportStateCreateInfo* viewportStateCreateInfo
496 &rasterizationStateCreateInfo); // const VkPipelineRasterizationStateCreateInfo* rasterizationStateCreateInfo
497 }
498
iterate(void)499 tcu::TestStatus OpacityMicromapInstance::iterate (void)
500 {
501 const auto& vkd = m_context.getDeviceInterface();
502 const auto device = m_context.getDevice();
503 auto& alloc = m_context.getDefaultAllocator();
504 const auto qIndex = m_context.getUniversalQueueFamilyIndex();
505 const auto queue = m_context.getUniversalQueue();
506
507 // Command pool and buffer.
508 const auto cmdPool = makeCommandPool(vkd, device, qIndex);
509 const auto cmdBufferPtr = allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
510 const auto cmdBuffer = cmdBufferPtr.get();
511
512 beginCommandBuffer(vkd, cmdBuffer);
513
514 // Build acceleration structures.
515 auto topLevelAS = makeTopLevelAccelerationStructure();
516 auto bottomLevelAS = makeBottomLevelAccelerationStructure();
517
518 deUint32 numSubtriangles = levelToSubtriangles(m_params.subdivisionLevel);
519 deUint32 opacityMicromapBytes = (m_params.mode == 2) ? (numSubtriangles + 3) / 4 : (numSubtriangles + 1) / 2;
520
521 // Generate random micromap data
522 std::vector<deUint8> opacityMicromapData;
523
524 de::Random rnd(m_params.seed);
525
526 while (opacityMicromapData.size() < opacityMicromapBytes) {
527 opacityMicromapData.push_back(rnd.getUint8());
528 }
529
530 // Build a micromap (ignore infrastructure for now)
531 // Create the buffer with the mask and index data
532 // Allocate a fairly conservative bound for now
533 const auto micromapDataBufferSize = static_cast<VkDeviceSize>(1024 + opacityMicromapBytes);
534 const auto micromapDataBufferCreateInfo = makeBufferCreateInfo(micromapDataBufferSize,
535 VK_BUFFER_USAGE_MICROMAP_BUILD_INPUT_READ_ONLY_BIT_EXT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
536 BufferWithMemory micromapDataBuffer(vkd, device, alloc, micromapDataBufferCreateInfo, MemoryRequirement::HostVisible | MemoryRequirement::DeviceAddress);
537 auto& micromapDataBufferAlloc = micromapDataBuffer.getAllocation();
538 void* micromapDataBufferData = micromapDataBufferAlloc.getHostPtr();
539
540 const int TriangleOffset = 0;
541 const int IndexOffset = 256;
542 const int DataOffset = 512;
543
544 // Fill out VkMicromapUsageEXT with size information
545 VkMicromapUsageEXT mmUsage = { };
546 mmUsage.count = 1;
547 mmUsage.subdivisionLevel = m_params.subdivisionLevel;
548 mmUsage.format = m_params.mode == 2 ? VK_OPACITY_MICROMAP_FORMAT_2_STATE_EXT : VK_OPACITY_MICROMAP_FORMAT_4_STATE_EXT;
549
550 {
551 deUint8 *data = static_cast<deUint8*>(micromapDataBufferData);
552
553 deMemset(data, 0, size_t(micromapDataBufferCreateInfo.size));
554
555 DE_STATIC_ASSERT(sizeof(VkMicromapTriangleEXT) == 8);
556
557 // Triangle information
558 VkMicromapTriangleEXT* tri = (VkMicromapTriangleEXT*)(&data[TriangleOffset]);
559 tri->dataOffset = 0;
560 tri->subdivisionLevel = uint16_t(mmUsage.subdivisionLevel);
561 tri->format = uint16_t(mmUsage.format);
562
563 // Micromap data
564 {
565 for (size_t i = 0; i < opacityMicromapData.size(); i++) {
566 data[DataOffset + i] = opacityMicromapData[i];
567 }
568 }
569
570 // Index information
571 *((deUint32*)&data[IndexOffset]) = m_params.useSpecialIndex ? m_params.mode : 0;
572 }
573
574 // Query the size from the build info
575 VkMicromapBuildInfoEXT mmBuildInfo = {
576 VK_STRUCTURE_TYPE_MICROMAP_BUILD_INFO_EXT, // VkStructureType sType;
577 DE_NULL, // const void* pNext;
578 VK_MICROMAP_TYPE_OPACITY_MICROMAP_EXT, // VkMicromapTypeEXT type;
579 0, // VkBuildMicromapFlagsEXT flags;
580 VK_BUILD_MICROMAP_MODE_BUILD_EXT, // VkBuildMicromapModeEXT mode;
581 DE_NULL, // VkMicromapEXT dstMicromap;
582 1, // uint32_t usageCountsCount;
583 &mmUsage, // const VkMicromapUsageEXT* pUsageCounts;
584 DE_NULL, // const VkMicromapUsageEXT* const* ppUsageCounts;
585 makeDeviceOrHostAddressConstKHR(DE_NULL), // VkDeviceOrHostAddressConstKHR data;
586 makeDeviceOrHostAddressKHR(DE_NULL), // VkDeviceOrHostAddressKHR scratchData;
587 makeDeviceOrHostAddressConstKHR(DE_NULL), // VkDeviceOrHostAddressConstKHR triangleArray;
588 0, // VkDeviceSize triangleArrayStride;
589 };
590
591 VkMicromapBuildSizesInfoEXT sizeInfo = {
592 VK_STRUCTURE_TYPE_MICROMAP_BUILD_SIZES_INFO_EXT, // VkStructureType sType;
593 DE_NULL, // const void* pNext;
594 0, // VkDeviceSize micromapSize;
595 0, // VkDeviceSize buildScratchSize;
596 DE_FALSE, // VkBool32 discardable;
597 };
598
599 vkd.getMicromapBuildSizesEXT(device, VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR, &mmBuildInfo, &sizeInfo);
600
601 // Create the backing and scratch storage
602 const auto micromapBackingBufferCreateInfo = makeBufferCreateInfo(sizeInfo.micromapSize,
603 VK_BUFFER_USAGE_MICROMAP_STORAGE_BIT_EXT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
604 BufferWithMemory micromapBackingBuffer(vkd, device, alloc, micromapBackingBufferCreateInfo, MemoryRequirement::Local | MemoryRequirement::DeviceAddress);
605
606 const auto micromapScratchBufferCreateInfo = makeBufferCreateInfo(sizeInfo.buildScratchSize,
607 VK_BUFFER_USAGE_MICROMAP_STORAGE_BIT_EXT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
608 BufferWithMemory micromapScratchBuffer(vkd, device, alloc, micromapScratchBufferCreateInfo, MemoryRequirement::Local | MemoryRequirement::DeviceAddress);
609
610 de::MovePtr<BufferWithMemory> copyMicromapBackingBuffer;
611
612 // Create the micromap itself
613 VkMicromapCreateInfoEXT maCreateInfo = {
614 VK_STRUCTURE_TYPE_MICROMAP_CREATE_INFO_EXT, // VkStructureType sType;
615 DE_NULL, // const void* pNext;
616 0, // VkMicromapCreateFlagsEXT createFlags;
617 micromapBackingBuffer.get(), // VkBuffer buffer;
618 0, // VkDeviceSize offset;
619 sizeInfo.micromapSize, // VkDeviceSize size;
620 VK_MICROMAP_TYPE_OPACITY_MICROMAP_EXT, // VkMicromapTypeEXT type;
621 0ull // VkDeviceAddress deviceAddress;
622 };
623
624 VkMicromapEXT micromap, origMicromap;
625
626 VK_CHECK(vkd.createMicromapEXT(device, &maCreateInfo, nullptr, µmap));
627
628 // Do the build
629 mmBuildInfo.dstMicromap = micromap;
630 mmBuildInfo.data = makeDeviceOrHostAddressConstKHR(vkd, device, micromapDataBuffer.get(), DataOffset);
631 mmBuildInfo.triangleArray = makeDeviceOrHostAddressConstKHR(vkd, device, micromapDataBuffer.get(), TriangleOffset);
632 mmBuildInfo.scratchData = makeDeviceOrHostAddressKHR(vkd, device, micromapScratchBuffer.get(), 0);
633
634 vkd.cmdBuildMicromapsEXT(cmdBuffer, 1, &mmBuildInfo);
635
636 {
637 VkMemoryBarrier2 memoryBarrier = { VK_STRUCTURE_TYPE_MEMORY_BARRIER_2, NULL,
638 VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT, VK_ACCESS_2_MICROMAP_WRITE_BIT_EXT,
639 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, VK_ACCESS_2_MICROMAP_READ_BIT_EXT };
640 VkDependencyInfoKHR dependencyInfo = {
641 VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR, // VkStructureType sType;
642 DE_NULL, // const void* pNext;
643 0u, // VkDependencyFlags dependencyFlags;
644 1u, // uint32_t memoryBarrierCount;
645 &memoryBarrier, // const VkMemoryBarrier2KHR* pMemoryBarriers;
646 0u, // uint32_t bufferMemoryBarrierCount;
647 DE_NULL, // const VkBufferMemoryBarrier2KHR* pBufferMemoryBarriers;
648 0u, // uint32_t imageMemoryBarrierCount;
649 DE_NULL, // const VkImageMemoryBarrier2KHR* pImageMemoryBarriers;
650 };
651
652 vkd.cmdPipelineBarrier2(cmdBuffer, &dependencyInfo);
653 }
654
655 if (m_params.copyType != CT_NONE) {
656 copyMicromapBackingBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
657 vkd, device, alloc, micromapBackingBufferCreateInfo, MemoryRequirement::Local));
658
659 origMicromap = micromap;
660
661 maCreateInfo.buffer = copyMicromapBackingBuffer->get();
662
663 VK_CHECK(vkd.createMicromapEXT(device, &maCreateInfo, nullptr, µmap));
664
665 VkCopyMicromapInfoEXT copyMicromapInfo = {
666 VK_STRUCTURE_TYPE_COPY_MICROMAP_INFO_EXT, // VkStructureType sType;
667 DE_NULL, // const void* pNext;
668 origMicromap, // VkMicromapEXT src;
669 micromap, // VkMicromapEXT dst;
670 VK_COPY_MICROMAP_MODE_CLONE_EXT // VkCopyMicromapModeEXT mode;
671 };
672
673 vkd.cmdCopyMicromapEXT(cmdBuffer, ©MicromapInfo);
674
675 {
676 VkMemoryBarrier2 memoryBarrier = { VK_STRUCTURE_TYPE_MEMORY_BARRIER_2, NULL,
677 VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT, VK_ACCESS_2_MICROMAP_WRITE_BIT_EXT,
678 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, VK_ACCESS_2_MICROMAP_READ_BIT_EXT };
679 VkDependencyInfoKHR dependencyInfo = {
680 VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR, // VkStructureType sType;
681 DE_NULL, // const void* pNext;
682 0u, // VkDependencyFlags dependencyFlags;
683 1u, // uint32_t memoryBarrierCount;
684 &memoryBarrier, // const VkMemoryBarrier2KHR* pMemoryBarriers;
685 0u, // uint32_t bufferMemoryBarrierCount;
686 DE_NULL, // const VkBufferMemoryBarrier2KHR* pBufferMemoryBarriers;
687 0u, // uint32_t imageMemoryBarrierCount;
688 DE_NULL, // const VkImageMemoryBarrier2KHR* pImageMemoryBarriers;
689 };
690
691 dependencyInfo.memoryBarrierCount = 1;
692 dependencyInfo.pMemoryBarriers = &memoryBarrier;
693
694 vkd.cmdPipelineBarrier2(cmdBuffer, &dependencyInfo);
695 }
696 }
697
698 // Attach the micromap to the geometry
699 VkAccelerationStructureTrianglesOpacityMicromapEXT opacityGeometryMicromap = {
700 VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_TRIANGLES_OPACITY_MICROMAP_EXT, //VkStructureType sType;
701 DE_NULL, //void* pNext;
702 VK_INDEX_TYPE_UINT32, //VkIndexType indexType;
703 makeDeviceOrHostAddressConstKHR(vkd, device, micromapDataBuffer.get(), IndexOffset), //VkDeviceOrHostAddressConstKHR indexBuffer;
704 0u, //VkDeviceSize indexStride;
705 0u, //uint32_t baseTriangle;
706 1u, //uint32_t usageCountsCount;
707 & mmUsage, //const VkMicromapUsageEXT* pUsageCounts;
708 DE_NULL, //const VkMicromapUsageEXT* const* ppUsageCounts;
709 micromap //VkMicromapEXT micromap;
710 };
711
712 const std::vector<tcu::Vec3> triangle =
713 {
714 tcu::Vec3(0.0f, 0.0f, 0.0f),
715 tcu::Vec3(1.0f, 0.0f, 0.0f),
716 tcu::Vec3(0.0f, 1.0f, 0.0f),
717 };
718
719 bottomLevelAS->addGeometry(triangle, true/*is triangles*/, 0, &opacityGeometryMicromap);
720 if (m_params.testFlagMask & TEST_FLAG_BIT_DISABLE_OPACITY_MICROMAP_INSTANCE)
721 bottomLevelAS->setBuildFlags(VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISABLE_OPACITY_MICROMAPS_EXT);
722 bottomLevelAS->createAndBuild(vkd, device, cmdBuffer, alloc);
723 de::SharedPtr<BottomLevelAccelerationStructure> blasSharedPtr (bottomLevelAS.release());
724
725 VkGeometryInstanceFlagsKHR instanceFlags = 0;
726
727 if (m_params.testFlagMask & TEST_FLAG_BIT_FORCE_2_STATE_INSTANCE)
728 instanceFlags |= VK_GEOMETRY_INSTANCE_FORCE_OPACITY_MICROMAP_2_STATE_EXT;
729 if (m_params.testFlagMask & TEST_FLAG_BIT_FORCE_OPAQUE_INSTANCE)
730 instanceFlags |= VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR;
731 if (m_params.testFlagMask & TEST_FLAG_BIT_DISABLE_OPACITY_MICROMAP_INSTANCE)
732 instanceFlags |= VK_GEOMETRY_INSTANCE_DISABLE_OPACITY_MICROMAPS_EXT;
733
734 topLevelAS->setInstanceCount(1);
735 topLevelAS->addInstance(blasSharedPtr, identityMatrix3x4, 0, 0xFFu, 0u, instanceFlags);
736 topLevelAS->createAndBuild(vkd, device, cmdBuffer, alloc);
737
738 // One ray per subtriangle for this test
739 deUint32 numRays = numSubtriangles;
740
741 // SSBO buffer for origins.
742 const auto originsBufferSize = static_cast<VkDeviceSize>(sizeof(tcu::Vec4) * numRays);
743 const auto originsBufferInfo = makeBufferCreateInfo(originsBufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
744 BufferWithMemory originsBuffer (vkd, device, alloc, originsBufferInfo, MemoryRequirement::HostVisible);
745 auto& originsBufferAlloc = originsBuffer.getAllocation();
746 void* originsBufferData = originsBufferAlloc.getHostPtr();
747
748 std::vector<tcu::Vec4> origins;
749 std::vector<deUint32> expectedOutputModes;
750 origins.reserve(numRays);
751 expectedOutputModes.reserve(numRays);
752
753 // Fill in vector of expected outputs
754 for (deUint32 index = 0; index < numRays; index++) {
755 deUint32 state = m_params.testFlagMask & (TEST_FLAG_BIT_FORCE_OPAQUE_INSTANCE | TEST_FLAG_BIT_FORCE_OPAQUE_RAY_FLAG) ?
756 VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT : VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_OPAQUE_EXT;
757
758 if (!(m_params.testFlagMask & TEST_FLAG_BIT_DISABLE_OPACITY_MICROMAP_INSTANCE))
759 {
760 if (m_params.useSpecialIndex)
761 {
762 state = m_params.mode;
763 }
764 else
765 {
766 if (m_params.mode == 2) {
767 deUint8 byte = opacityMicromapData[index / 8];
768 state = (byte >> (index % 8)) & 0x1;
769 } else {
770 DE_ASSERT(m_params.mode == 4);
771 deUint8 byte = opacityMicromapData[index / 4];
772 state = (byte >> 2*(index % 4)) & 0x3;
773 }
774 // Process in SPECIAL_INDEX number space
775 state = ~state;
776 }
777
778 if (m_params.testFlagMask & (TEST_FLAG_BIT_FORCE_2_STATE_INSTANCE | TEST_FLAG_BIT_FORCE_2_STATE_RAY_FLAG))
779 {
780 if (state == deUint32(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_TRANSPARENT_EXT))
781 state = deUint32(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_TRANSPARENT_EXT);
782 if (state == deUint32(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_OPAQUE_EXT))
783 state = deUint32(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT);
784 }
785 }
786
787 if (state != deUint32(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_TRANSPARENT_EXT))
788 {
789 if (m_params.testFlagMask & (TEST_FLAG_BIT_FORCE_OPAQUE_INSTANCE | TEST_FLAG_BIT_FORCE_OPAQUE_RAY_FLAG))
790 {
791 state = deUint32(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT);
792 } else if (state != deUint32(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT)) {
793 state = deUint32(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_OPAQUE_EXT);
794 }
795 }
796
797 if (state == deUint32(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_TRANSPARENT_EXT))
798 {
799 expectedOutputModes.push_back(0);
800 }
801 else if (state == deUint32(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_OPAQUE_EXT))
802 {
803 expectedOutputModes.push_back(1);
804 }
805 else if (state == deUint32(VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT))
806 {
807 expectedOutputModes.push_back(2);
808 }
809 else
810 {
811 DE_ASSERT(false);
812 }
813 }
814
815 for(deUint32 index = 0; index < numRays; index++) {
816 tcu::Vec2 centroid = calcSubtriangleCentroid(index, m_params.subdivisionLevel);
817 origins.push_back(tcu::Vec4(centroid.x(), centroid.y(), 1.0, 0.0));
818 }
819
820 const auto originsBufferSizeSz = static_cast<size_t>(originsBufferSize);
821 deMemcpy(originsBufferData, origins.data(), originsBufferSizeSz);
822 flushAlloc(vkd, device, originsBufferAlloc);
823
824 // Storage buffer for output modes
825 const auto outputModesBufferSize = static_cast<VkDeviceSize>(sizeof(deUint32) * numRays);
826 const auto outputModesBufferInfo = makeBufferCreateInfo(outputModesBufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
827 BufferWithMemory outputModesBuffer (vkd, device, alloc, outputModesBufferInfo, MemoryRequirement::HostVisible);
828 auto& outputModesBufferAlloc = outputModesBuffer.getAllocation();
829 void* outputModesBufferData = outputModesBufferAlloc.getHostPtr();
830 deMemset(outputModesBufferData, 0xFF, static_cast<size_t>(outputModesBufferSize));
831 flushAlloc(vkd, device, outputModesBufferAlloc);
832
833 // Descriptor set layout.
834 DescriptorSetLayoutBuilder dsLayoutBuilder;
835 dsLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, VK_SHADER_STAGE_ALL);
836 dsLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
837 dsLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
838 const auto setLayout = dsLayoutBuilder.build(vkd, device);
839
840 // Pipeline layout.
841 const auto pipelineLayout = makePipelineLayout(vkd, device, setLayout.get());
842
843 // Descriptor pool and set.
844 DescriptorPoolBuilder poolBuilder;
845 poolBuilder.addType(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
846 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
847 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
848 const auto descriptorPool = poolBuilder.build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
849 const auto descriptorSet = makeDescriptorSet(vkd, device, descriptorPool.get(), setLayout.get());
850
851 // Update descriptor set.
852 {
853 const VkWriteDescriptorSetAccelerationStructureKHR accelDescInfo =
854 {
855 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR,
856 nullptr,
857 1u,
858 topLevelAS.get()->getPtr(),
859 };
860 const auto inStorageBufferInfo = makeDescriptorBufferInfo(originsBuffer.get(), 0ull, VK_WHOLE_SIZE);
861 const auto storageBufferInfo = makeDescriptorBufferInfo(outputModesBuffer.get(), 0ull, VK_WHOLE_SIZE);
862
863 DescriptorSetUpdateBuilder updateBuilder;
864 updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, &accelDescInfo);
865 updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inStorageBufferInfo);
866 updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(2u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &storageBufferInfo);
867 updateBuilder.update(vkd, device);
868 }
869
870 Move<VkPipeline> pipeline;
871 de::MovePtr<BufferWithMemory> raygenSBT;
872 Move<VkRenderPass> renderPass;
873
874 if (m_params.shaderSourceType == SST_VERTEX_SHADER)
875 {
876 auto vertexModule = createShaderModule(vkd, device, m_context.getBinaryCollection().get("vert"), 0);
877
878 renderPass = makeEmptyRenderPass(vkd, device);
879 pipeline = makeGraphicsPipeline(vkd, device, *pipelineLayout, *renderPass, *vertexModule, 0);
880
881 vkd.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline.get());
882 vkd.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout.get(), 0u, 1u, &descriptorSet.get(), 0u, nullptr);
883 vkd.cmdDraw(cmdBuffer, kNumThreadsAtOnce, 1, 0, 0);
884 } else if (m_params.shaderSourceType == SST_RAY_GENERATION_SHADER)
885 {
886 const auto& vki = m_context.getInstanceInterface();
887 const auto physDev = m_context.getPhysicalDevice();
888
889 // Shader module.
890 auto rgenModule = createShaderModule(vkd, device, m_context.getBinaryCollection().get("rgen"), 0);
891
892 // Get some ray tracing properties.
893 deUint32 shaderGroupHandleSize = 0u;
894 deUint32 shaderGroupBaseAlignment = 1u;
895 {
896 const auto rayTracingPropertiesKHR = makeRayTracingProperties(vki, physDev);
897 shaderGroupHandleSize = rayTracingPropertiesKHR->getShaderGroupHandleSize();
898 shaderGroupBaseAlignment = rayTracingPropertiesKHR->getShaderGroupBaseAlignment();
899 }
900
901 auto raygenSBTRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
902 auto unusedSBTRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
903
904 {
905 const auto rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
906 rayTracingPipeline->setCreateFlags(VK_PIPELINE_CREATE_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT);
907 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, 0);
908
909 pipeline = rayTracingPipeline->createPipeline(vkd, device, pipelineLayout.get());
910
911 raygenSBT = rayTracingPipeline->createShaderBindingTable(vkd, device, pipeline.get(), alloc, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1);
912 raygenSBTRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, raygenSBT->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
913 }
914
915 // Trace rays.
916 vkd.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, pipeline.get());
917 vkd.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, pipelineLayout.get(), 0u, 1u, &descriptorSet.get(), 0u, nullptr);
918 vkd.cmdTraceRaysKHR(cmdBuffer, &raygenSBTRegion, &unusedSBTRegion, &unusedSBTRegion, &unusedSBTRegion, kNumThreadsAtOnce, 1u, 1u);
919 }
920 else
921 {
922 DE_ASSERT(m_params.shaderSourceType == SST_COMPUTE_SHADER);
923 // Shader module.
924 const auto compModule = createShaderModule(vkd, device, m_context.getBinaryCollection().get("comp"), 0);
925
926 // Pipeline.
927 const VkPipelineShaderStageCreateInfo shaderInfo =
928 {
929 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
930 nullptr, // const void* pNext;
931 0u, // VkPipelineShaderStageCreateFlags flags;
932 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
933 compModule.get(), // VkShaderModule module;
934 "main", // const char* pName;
935 nullptr, // const VkSpecializationInfo* pSpecializationInfo;
936 };
937 const VkComputePipelineCreateInfo pipelineInfo =
938 {
939 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
940 nullptr, // const void* pNext;
941 0u, // VkPipelineCreateFlags flags;
942 shaderInfo, // VkPipelineShaderStageCreateInfo stage;
943 pipelineLayout.get(), // VkPipelineLayout layout;
944 DE_NULL, // VkPipeline basePipelineHandle;
945 0, // deInt32 basePipelineIndex;
946 };
947 pipeline = createComputePipeline(vkd, device, DE_NULL, &pipelineInfo);
948
949 // Dispatch work with ray queries.
950 vkd.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline.get());
951 vkd.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout.get(), 0u, 1u, &descriptorSet.get(), 0u, nullptr);
952 vkd.cmdDispatch(cmdBuffer, 1u, 1u, 1u);
953 }
954
955 // Barrier for the output buffer.
956 const auto bufferBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
957 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 1u, &bufferBarrier, 0u, nullptr, 0u, nullptr);
958
959 endCommandBuffer(vkd, cmdBuffer);
960 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
961
962 // Verify results.
963 std::vector<deUint32> outputData (expectedOutputModes.size());
964 const auto outputModesBufferSizeSz = static_cast<size_t>(outputModesBufferSize);
965
966 invalidateAlloc(vkd, device, outputModesBufferAlloc);
967 DE_ASSERT(de::dataSize(outputData) == outputModesBufferSizeSz);
968 deMemcpy(outputData.data(), outputModesBufferData, outputModesBufferSizeSz);
969
970 for (size_t i = 0; i < outputData.size(); ++i)
971 {
972 const auto& outVal = outputData[i];
973 const auto& expectedVal = expectedOutputModes[i];
974
975 if (outVal != expectedVal)
976 {
977 std::ostringstream msg;
978 msg << "Unexpected value found for ray " << i << ": expected " << expectedVal << " and found " << outVal << ";";
979 TCU_FAIL(msg.str());
980 }
981 #if 0
982 else
983 {
984 std::ostringstream msg;
985 msg << "Expected value found for ray " << i << ": expected " << expectedVal << " and found " << outVal << ";\n"; // XXX Debug remove
986 std::cout << msg.str();
987 }
988 #endif
989 }
990
991 return tcu::TestStatus::pass("Pass");
992 }
993
994 } // anonymous
995
996 constexpr deUint32 kMaxSubdivisionLevel = 15;
997
addBasicTests(tcu::TestCaseGroup * group)998 void addBasicTests(tcu::TestCaseGroup* group)
999 {
1000 deUint32 seed = 1614674687u;
1001
1002 const struct
1003 {
1004 ShaderSourceType shaderSourceType;
1005 ShaderSourcePipeline shaderSourcePipeline;
1006 std::string name;
1007 } shaderSourceTypes[] =
1008 {
1009 { SST_VERTEX_SHADER, SSP_GRAPHICS_PIPELINE, "vertex_shader" },
1010 { SST_COMPUTE_SHADER, SSP_COMPUTE_PIPELINE, "compute_shader", },
1011 { SST_RAY_GENERATION_SHADER, SSP_RAY_TRACING_PIPELINE, "rgen_shader", },
1012 };
1013
1014 const struct
1015 {
1016 bool useSpecialIndex;
1017 std::string name;
1018 } specialIndexUse[] =
1019 {
1020 { false, "map_value"},
1021 { true, "special_index"},
1022 };
1023
1024 auto& testCtx = group->getTestContext();
1025
1026 for (size_t shaderSourceNdx = 0; shaderSourceNdx < DE_LENGTH_OF_ARRAY(shaderSourceTypes); ++shaderSourceNdx)
1027 {
1028 de::MovePtr<tcu::TestCaseGroup> sourceTypeGroup(new tcu::TestCaseGroup(group->getTestContext(), shaderSourceTypes[shaderSourceNdx].name.c_str(), ""));
1029
1030 for (deUint32 testFlagMask = 0; testFlagMask < TEST_FLAG_BIT_LAST; testFlagMask++)
1031 {
1032 std::string maskName = "";
1033
1034 for (deUint32 bit = 0; bit < testFlagBitNames.size(); bit++)
1035 {
1036 if (testFlagMask & (1 << bit))
1037 {
1038 if (maskName != "")
1039 maskName += "_";
1040 maskName += testFlagBitNames[bit];
1041 }
1042 }
1043 if (maskName == "")
1044 maskName = "NoFlags";
1045
1046 de::MovePtr<tcu::TestCaseGroup> testFlagGroup(new tcu::TestCaseGroup(sourceTypeGroup->getTestContext(), maskName.c_str(), ""));
1047
1048 for (size_t specialIndexNdx = 0; specialIndexNdx < DE_LENGTH_OF_ARRAY(specialIndexUse); ++specialIndexNdx)
1049 {
1050 de::MovePtr<tcu::TestCaseGroup> specialGroup(new tcu::TestCaseGroup(testFlagGroup->getTestContext(), specialIndexUse[specialIndexNdx].name.c_str(), ""));
1051
1052 if (specialIndexUse[specialIndexNdx].useSpecialIndex)
1053 {
1054 for (deUint32 specialIndex = 0; specialIndex < 4; specialIndex++) {
1055 TestParams testParams
1056 {
1057 shaderSourceTypes[shaderSourceNdx].shaderSourceType,
1058 shaderSourceTypes[shaderSourceNdx].shaderSourcePipeline,
1059 specialIndexUse[specialIndexNdx].useSpecialIndex,
1060 testFlagMask,
1061 0,
1062 ~specialIndex,
1063 seed++,
1064 CT_NONE,
1065 };
1066
1067 std::stringstream css;
1068 css << specialIndex;
1069
1070 specialGroup->addChild(new OpacityMicromapCase(testCtx, css.str().c_str(), "", testParams));
1071 }
1072 testFlagGroup->addChild(specialGroup.release());
1073 } else
1074 {
1075 struct {
1076 deUint32 mode;
1077 std::string name;
1078 } modes[] =
1079 {
1080 { 2, "2"},
1081 { 4, "4" }
1082 };
1083 for (deUint32 modeNdx = 0; modeNdx < DE_LENGTH_OF_ARRAY(modes); ++modeNdx)
1084 {
1085 de::MovePtr<tcu::TestCaseGroup> modeGroup(new tcu::TestCaseGroup(testFlagGroup->getTestContext(), modes[modeNdx].name.c_str(), ""));
1086
1087 for (deUint32 level = 0; level <= kMaxSubdivisionLevel; level++)
1088 {
1089 TestParams testParams
1090 {
1091 shaderSourceTypes[shaderSourceNdx].shaderSourceType,
1092 shaderSourceTypes[shaderSourceNdx].shaderSourcePipeline,
1093 specialIndexUse[specialIndexNdx].useSpecialIndex,
1094 testFlagMask,
1095 level,
1096 modes[modeNdx].mode,
1097 seed++,
1098 CT_NONE,
1099 };
1100
1101 std::stringstream css;
1102 css << "level_" << level;
1103
1104 modeGroup->addChild(new OpacityMicromapCase(testCtx, css.str().c_str(), "", testParams));
1105 }
1106 specialGroup->addChild(modeGroup.release());
1107 }
1108 testFlagGroup->addChild(specialGroup.release());
1109 }
1110 }
1111
1112 sourceTypeGroup->addChild(testFlagGroup.release());
1113 }
1114
1115 group->addChild(sourceTypeGroup.release());
1116 }
1117 }
1118
addCopyTests(tcu::TestCaseGroup * group)1119 void addCopyTests(tcu::TestCaseGroup* group)
1120 {
1121 deUint32 seed = 1614674688u;
1122
1123 auto& testCtx = group->getTestContext();
1124
1125 for (size_t copyTypeNdx = CT_FIRST_ACTIVE; copyTypeNdx < CT_NUM_COPY_TYPES; ++copyTypeNdx)
1126 {
1127 de::MovePtr<tcu::TestCaseGroup> copyTypeGroup(new tcu::TestCaseGroup(group->getTestContext(), copyTypeNames[copyTypeNdx].c_str(), ""));
1128
1129 struct {
1130 deUint32 mode;
1131 std::string name;
1132 } modes[] =
1133 {
1134 { 2, "2"},
1135 { 4, "4" }
1136 };
1137 for (deUint32 modeNdx = 0; modeNdx < DE_LENGTH_OF_ARRAY(modes); ++modeNdx)
1138 {
1139 de::MovePtr<tcu::TestCaseGroup> modeGroup(new tcu::TestCaseGroup(copyTypeGroup->getTestContext(), modes[modeNdx].name.c_str(), ""));
1140
1141 for (deUint32 level = 0; level <= kMaxSubdivisionLevel; level++)
1142 {
1143 TestParams testParams
1144 {
1145 SST_COMPUTE_SHADER,
1146 SSP_COMPUTE_PIPELINE,
1147 false,
1148 0,
1149 level,
1150 modes[modeNdx].mode,
1151 seed++,
1152 (CopyType)copyTypeNdx,
1153 };
1154
1155 std::stringstream css;
1156 css << "level_" << level;
1157
1158 modeGroup->addChild(new OpacityMicromapCase(testCtx, css.str().c_str(), "", testParams));
1159 }
1160 copyTypeGroup->addChild(modeGroup.release());
1161 }
1162 group->addChild(copyTypeGroup.release());
1163 }
1164 }
1165
createOpacityMicromapTests(tcu::TestContext & testCtx)1166 tcu::TestCaseGroup* createOpacityMicromapTests(tcu::TestContext& testCtx)
1167 {
1168 de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, "opacity_micromap", "Test acceleration structures using opacity micromap with ray query"));
1169
1170 addTestGroup(group.get(), "render", "Test accessing all formats of opacity micromaps", addBasicTests);
1171 addTestGroup(group.get(), "copy", "Test copying opacity micromaps", addCopyTests);
1172
1173 return group.release();
1174 }
1175
1176 } // RayQuery
1177 } // vkt
1178
1179