1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2019 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief SPIR-V Assembly Tests for indexing with access chain operations.
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSpvAsmFromHlslTests.hpp"
25 #include "vktTestCaseUtil.hpp"
26 #include "vkPrograms.hpp"
27 #include "vkObjUtil.hpp"
28 #include "vkRefUtil.hpp"
29 #include "vkTypeUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkBuilderUtil.hpp"
32 #include "vkBarrierUtil.hpp"
33 #include "vkCmdUtil.hpp"
34
35 namespace vkt
36 {
37 namespace SpirVAssembly
38 {
39
40 namespace
41 {
42
43 using namespace vk;
44
45 enum TestType
46 {
47 TT_CBUFFER_PACKING = 0,
48 };
49
50 struct TestConfig
51 {
52 TestType type;
53 };
54
55 struct Programs
56 {
initvkt::SpirVAssembly::__anona9ca34880111::Programs57 void init (vk::SourceCollections& dst, TestConfig config) const
58 {
59 if (config.type == TT_CBUFFER_PACKING)
60 {
61 // HLSL shaders has a packing corner case that GLSL shaders cannot exhibit.
62 // Below shader, foo has an ArrayStride of 16, which leaves bar effectively
63 // 'within' the end of the foo array. This is entirely valid for HLSL and
64 // with the VK_EXT_scalar_block_layout extension.
65 std::string source(
66 "cbuffer cbIn\n"
67 "{\n"
68 " int foo[2] : packoffset(c0);\n"
69 " int bar : packoffset(c1.y);\n"
70 "};\n"
71 "RWStructuredBuffer<int> result : register(u1);\n"
72 "[numthreads(1, 1, 1)]\n"
73 "void main(uint3 dispatchThreadID : SV_DispatchThreadID)\n"
74 "{\n"
75 " result[0] = bar;\n"
76 "}\n");
77
78 dst.hlslSources.add("comp") << glu::ComputeSource(source)
79 << vk::ShaderBuildOptions(dst.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
80 }
81 }
82 };
83
84 class HlslTest : public TestInstance
85 {
86 public:
87 HlslTest (Context& context, TestConfig config);
88 virtual ~HlslTest (void) = default;
89
90 tcu::TestStatus iterate (void);
91 };
92
93
HlslTest(Context & context,TestConfig config)94 HlslTest::HlslTest(Context& context, TestConfig config)
95 : TestInstance(context)
96 {
97 DE_UNREF(config);
98 }
99
iterate(void)100 tcu::TestStatus HlslTest::iterate(void)
101 {
102 const DeviceInterface& vk = m_context.getDeviceInterface();
103 const VkDevice device = m_context.getDevice();
104 const VkQueue queue = m_context.getUniversalQueue();
105 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
106 Allocator& allocator = m_context.getDefaultAllocator();
107 const int testValue = 5;
108
109 // Create an input buffer
110 const VkBufferUsageFlags inBufferUsageFlags = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
111 const VkDeviceSize inBufferSizeBytes = 32; // 2 element array with 16B stride
112 VkBufferCreateInfo inBufferCreateInfo = makeBufferCreateInfo(inBufferSizeBytes, inBufferUsageFlags);
113 vk::Move<vk::VkBuffer> inBuffer = createBuffer(vk, device, &inBufferCreateInfo);
114 de::MovePtr<vk::Allocation> inAllocation = allocator.allocate(getBufferMemoryRequirements(vk, device, *inBuffer), MemoryRequirement::HostVisible);
115 VK_CHECK(vk.bindBufferMemory(device, *inBuffer, inAllocation->getMemory(), inAllocation->getOffset()));
116
117 // Fill the input structure with data - first attribute is array that has 16B stride,
118 // this means that second attribute has to start at offset 20B (4B + 16B)
119 {
120 int* bufferPtr = static_cast<int*>(inAllocation->getHostPtr());
121 memset(bufferPtr, 0, inBufferSizeBytes);
122 bufferPtr[5] = testValue;
123 flushAlloc(vk, device, *inAllocation);
124 }
125
126 // Create an output buffer
127 const VkBufferUsageFlags outBufferUsageFlags = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
128 const VkDeviceSize outBufferSizeBytes = sizeof(int);
129 VkBufferCreateInfo outBufferCreateInfo = makeBufferCreateInfo(outBufferSizeBytes, outBufferUsageFlags);
130 vk::Move<vk::VkBuffer> outBuffer = createBuffer(vk, device, &outBufferCreateInfo);
131 de::MovePtr<vk::Allocation> outAllocation = allocator.allocate(getBufferMemoryRequirements(vk, device, *outBuffer), MemoryRequirement::HostVisible);
132 VK_CHECK(vk.bindBufferMemory(device, *outBuffer, outAllocation->getMemory(), outAllocation->getOffset()));
133
134 // Create descriptor set
135 const VkDescriptorType uniBufDesc = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
136 const VkDescriptorType storBufDesc = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
137 const Unique<VkDescriptorSetLayout> descriptorSetLayout(
138 DescriptorSetLayoutBuilder()
139 .addSingleBinding(uniBufDesc, VK_SHADER_STAGE_COMPUTE_BIT)
140 .addSingleBinding(storBufDesc, VK_SHADER_STAGE_COMPUTE_BIT)
141 .build(vk, device));
142
143 const Unique<VkDescriptorPool> descriptorPool(
144 DescriptorPoolBuilder()
145 .addType(uniBufDesc)
146 .addType(storBufDesc)
147 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
148
149 const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
150
151 const VkDescriptorBufferInfo inputBufferDescriptorInfo = makeDescriptorBufferInfo(*inBuffer, 0ull, inBufferSizeBytes);
152 const VkDescriptorBufferInfo outputBufferDescriptorInfo = makeDescriptorBufferInfo(*outBuffer, 0ull, outBufferSizeBytes);
153 DescriptorSetUpdateBuilder()
154 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), uniBufDesc, &inputBufferDescriptorInfo)
155 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), storBufDesc, &outputBufferDescriptorInfo)
156 .update(vk, device);
157
158 // Perform the computation
159 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0u));
160 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
161
162 const VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
163 {
164 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
165 DE_NULL,
166 static_cast<VkPipelineShaderStageCreateFlags>(0u),
167 VK_SHADER_STAGE_COMPUTE_BIT,
168 *shaderModule,
169 "main",
170 DE_NULL,
171 };
172 const VkComputePipelineCreateInfo pipelineCreateInfo =
173 {
174 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
175 DE_NULL,
176 static_cast<VkPipelineCreateFlags>(0u),
177 pipelineShaderStageParams,
178 *pipelineLayout,
179 DE_NULL,
180 0,
181 };
182 Unique<VkPipeline> pipeline(createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo));
183 const VkBufferMemoryBarrier hostWriteBarrier = makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, *inBuffer, 0ull, inBufferSizeBytes);
184 const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *outBuffer, 0ull, outBufferSizeBytes);
185
186 const Unique<VkCommandPool> cmdPool(makeCommandPool(vk, device, queueFamilyIndex));
187 const Unique<VkCommandBuffer> cmdBuffer(allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
188
189 // Start recording commands
190 beginCommandBuffer(vk, *cmdBuffer);
191
192 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
193 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
194
195 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &hostWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
196 vk.cmdDispatch(*cmdBuffer, 1, 1, 1);
197 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
198
199 endCommandBuffer(vk, *cmdBuffer);
200
201 // Wait for completion
202 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
203
204 // Validate the results
205 invalidateAlloc(vk, device, *outAllocation);
206 const int* bufferPtr = static_cast<int*>(outAllocation->getHostPtr());
207 if (*bufferPtr != testValue)
208 return tcu::TestStatus::fail("Fail");
209 return tcu::TestStatus::pass("Pass");
210 }
211
checkSupport(Context & context)212 void checkSupport(Context& context)
213 {
214 context.requireDeviceFunctionality("VK_EXT_scalar_block_layout");
215 }
216
217 } // anonymous
218
createHlslComputeGroup(tcu::TestContext & testCtx)219 tcu::TestCaseGroup* createHlslComputeGroup (tcu::TestContext& testCtx)
220 {
221 typedef InstanceFactory1WithSupport<HlslTest, TestConfig, FunctionSupport0, Programs> HlslTestInstance;
222 de::MovePtr<tcu::TestCaseGroup> hlslCasesGroup(new tcu::TestCaseGroup(testCtx, "hlsl_cases", ""));
223
224 TestConfig testConfig = { TT_CBUFFER_PACKING };
225 hlslCasesGroup->addChild(new HlslTestInstance(testCtx, tcu::NODETYPE_SELF_VALIDATE, "cbuffer_packing", "", testConfig, checkSupport));
226
227 return hlslCasesGroup.release();
228 }
229
230 } // SpirVAssembly
231 } // vkt
232