1 // Copyright 2018 The Dawn Authors 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #include "tests/unittests/validation/ValidationTest.h" 16 17 #include "utils/ComboRenderPipelineDescriptor.h" 18 #include "utils/WGPUHelpers.h" 19 20 namespace { 21 22 class QueueSubmitValidationTest : public ValidationTest {}; 23 24 // Test submitting with a mapped buffer is disallowed TEST_F(QueueSubmitValidationTest,SubmitWithMappedBuffer)25 TEST_F(QueueSubmitValidationTest, SubmitWithMappedBuffer) { 26 // Create a map-write buffer. 27 const uint64_t kBufferSize = 4; 28 wgpu::BufferDescriptor descriptor; 29 descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc; 30 descriptor.size = kBufferSize; 31 wgpu::Buffer buffer = device.CreateBuffer(&descriptor); 32 33 // Create a fake copy destination buffer 34 descriptor.usage = wgpu::BufferUsage::CopyDst; 35 wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor); 36 37 // Create a command buffer that reads from the mappable buffer. 38 wgpu::CommandBuffer commands; 39 { 40 wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); 41 encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize); 42 commands = encoder.Finish(); 43 } 44 45 wgpu::Queue queue = device.GetQueue(); 46 47 // Submitting when the buffer has never been mapped should succeed 48 queue.Submit(1, &commands); 49 50 { 51 wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); 52 encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize); 53 commands = encoder.Finish(); 54 } 55 56 // Map the buffer, submitting when the buffer is mapped should fail 57 buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr); 58 59 // Try submitting before the callback is fired. 60 ASSERT_DEVICE_ERROR(queue.Submit(1, &commands)); 61 62 WaitForAllOperations(device); 63 64 { 65 wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); 66 encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize); 67 commands = encoder.Finish(); 68 } 69 70 // Try submitting after the callback is fired. 71 ASSERT_DEVICE_ERROR(queue.Submit(1, &commands)); 72 73 { 74 wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); 75 encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize); 76 commands = encoder.Finish(); 77 } 78 79 // Unmap the buffer, queue submit should succeed 80 buffer.Unmap(); 81 queue.Submit(1, &commands); 82 } 83 84 // Test it is invalid to submit a command buffer twice TEST_F(QueueSubmitValidationTest,CommandBufferSubmittedTwice)85 TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedTwice) { 86 wgpu::CommandBuffer commandBuffer = device.CreateCommandEncoder().Finish(); 87 wgpu::Queue queue = device.GetQueue(); 88 89 // Should succeed 90 queue.Submit(1, &commandBuffer); 91 92 // Should fail because command buffer was already submitted 93 ASSERT_DEVICE_ERROR(queue.Submit(1, &commandBuffer)); 94 } 95 96 // Test resubmitting failed command buffers TEST_F(QueueSubmitValidationTest,CommandBufferSubmittedFailed)97 TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedFailed) { 98 // Create a map-write buffer 99 const uint64_t kBufferSize = 4; 100 wgpu::BufferDescriptor descriptor; 101 descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc; 102 descriptor.size = kBufferSize; 103 wgpu::Buffer buffer = device.CreateBuffer(&descriptor); 104 105 // Create a destination buffer for the b2b copy 106 descriptor.usage = wgpu::BufferUsage::CopyDst; 107 descriptor.size = kBufferSize; 108 wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor); 109 110 // Create a command buffer that reads from the mappable buffer 111 wgpu::CommandBuffer commands; 112 { 113 wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); 114 encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize); 115 commands = encoder.Finish(); 116 } 117 118 wgpu::Queue queue = device.GetQueue(); 119 120 // Map the source buffer to force a failure 121 buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr); 122 123 // Submitting a command buffer with a mapped buffer should fail 124 ASSERT_DEVICE_ERROR(queue.Submit(1, &commands)); 125 126 // Unmap buffer to fix the failure 127 buffer.Unmap(); 128 129 // Resubmitting any command buffer, even if the problem was fixed, should fail 130 ASSERT_DEVICE_ERROR(queue.Submit(1, &commands)); 131 } 132 133 // Test that submitting in a buffer mapping callback doesn't cause re-entrance problems. TEST_F(QueueSubmitValidationTest,SubmitInBufferMapCallback)134 TEST_F(QueueSubmitValidationTest, SubmitInBufferMapCallback) { 135 // Create a buffer for mapping, to run our callback. 136 wgpu::BufferDescriptor descriptor; 137 descriptor.size = 4; 138 descriptor.usage = wgpu::BufferUsage::MapWrite; 139 wgpu::Buffer buffer = device.CreateBuffer(&descriptor); 140 141 struct CallbackData { 142 wgpu::Device device; 143 wgpu::Buffer buffer; 144 } callbackData = {device, buffer}; 145 146 const auto callback = [](WGPUBufferMapAsyncStatus status, void* userdata) { 147 CallbackData* data = reinterpret_cast<CallbackData*>(userdata); 148 149 data->buffer.Unmap(); 150 151 wgpu::Queue queue = data->device.GetQueue(); 152 queue.Submit(0, nullptr); 153 }; 154 155 buffer.MapAsync(wgpu::MapMode::Write, 0, descriptor.size, callback, &callbackData); 156 157 WaitForAllOperations(device); 158 } 159 160 // Test that submitting in a render pipeline creation callback doesn't cause re-entrance 161 // problems. TEST_F(QueueSubmitValidationTest,SubmitInCreateRenderPipelineAsyncCallback)162 TEST_F(QueueSubmitValidationTest, SubmitInCreateRenderPipelineAsyncCallback) { 163 struct CallbackData { 164 wgpu::Device device; 165 } callbackData = {device}; 166 167 const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline, 168 char const* message, void* userdata) { 169 CallbackData* data = reinterpret_cast<CallbackData*>(userdata); 170 171 wgpuRenderPipelineRelease(pipeline); 172 173 wgpu::Queue queue = data->device.GetQueue(); 174 queue.Submit(0, nullptr); 175 }; 176 177 wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"( 178 [[stage(vertex)]] fn main() -> [[builtin(position)]] vec4<f32> { 179 return vec4<f32>(0.0, 0.0, 0.0, 1.0); 180 })"); 181 182 wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"( 183 [[stage(fragment)]] fn main() -> [[location(0)]] vec4<f32> { 184 return vec4<f32>(0.0, 1.0, 0.0, 1.0); 185 })"); 186 187 utils::ComboRenderPipelineDescriptor descriptor; 188 descriptor.vertex.module = vsModule; 189 descriptor.cFragment.module = fsModule; 190 device.CreateRenderPipelineAsync(&descriptor, callback, &callbackData); 191 192 WaitForAllOperations(device); 193 } 194 195 // Test that submitting in a compute pipeline creation callback doesn't cause re-entrance 196 // problems. TEST_F(QueueSubmitValidationTest,SubmitInCreateComputePipelineAsyncCallback)197 TEST_F(QueueSubmitValidationTest, SubmitInCreateComputePipelineAsyncCallback) { 198 struct CallbackData { 199 wgpu::Device device; 200 } callbackData = {device}; 201 202 const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline, 203 char const* message, void* userdata) { 204 CallbackData* data = reinterpret_cast<CallbackData*>(userdata); 205 206 wgpuComputePipelineRelease(pipeline); 207 208 wgpu::Queue queue = data->device.GetQueue(); 209 queue.Submit(0, nullptr); 210 }; 211 212 wgpu::ComputePipelineDescriptor descriptor; 213 descriptor.compute.module = utils::CreateShaderModule(device, R"( 214 [[stage(compute), workgroup_size(1)]] fn main() { 215 })"); 216 descriptor.compute.entryPoint = "main"; 217 device.CreateComputePipelineAsync(&descriptor, callback, &callbackData); 218 219 WaitForAllOperations(device); 220 } 221 222 // Test that buffers in unused compute pass bindgroups are still checked for in 223 // Queue::Submit validation. TEST_F(QueueSubmitValidationTest,SubmitWithUnusedComputeBuffer)224 TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeBuffer) { 225 wgpu::Queue queue = device.GetQueue(); 226 227 wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {}); 228 wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {}); 229 230 wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout( 231 device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}}); 232 233 // In this test we check that BindGroup 1 is checked, the texture test will check 234 // BindGroup 2. This is to provide coverage of for loops in validation code. 235 wgpu::ComputePipelineDescriptor cpDesc; 236 cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, testBGL}); 237 cpDesc.compute.entryPoint = "main"; 238 cpDesc.compute.module = 239 utils::CreateShaderModule(device, "[[stage(compute), workgroup_size(1)]] fn main() {}"); 240 wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc); 241 242 wgpu::BufferDescriptor bufDesc; 243 bufDesc.size = 4; 244 bufDesc.usage = wgpu::BufferUsage::Storage; 245 246 // Test that completely unused bindgroups still have their buffers checked. 247 for (bool destroy : {true, false}) { 248 wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc); 249 wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}}); 250 251 wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); 252 wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); 253 pass.SetBindGroup(1, unusedBG); 254 pass.EndPass(); 255 wgpu::CommandBuffer commands = encoder.Finish(); 256 257 if (destroy) { 258 unusedBuffer.Destroy(); 259 ASSERT_DEVICE_ERROR(queue.Submit(1, &commands)); 260 } else { 261 queue.Submit(1, &commands); 262 } 263 } 264 265 // Test that unused bindgroups because they were replaced still have their buffers checked. 266 for (bool destroy : {true, false}) { 267 wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc); 268 wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}}); 269 270 wgpu::Buffer usedBuffer = device.CreateBuffer(&bufDesc); 271 wgpu::BindGroup usedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}}); 272 273 wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); 274 wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); 275 pass.SetBindGroup(0, emptyBG); 276 pass.SetBindGroup(1, unusedBG); 277 pass.SetBindGroup(1, usedBG); 278 pass.SetPipeline(pipeline); 279 pass.Dispatch(1); 280 pass.EndPass(); 281 wgpu::CommandBuffer commands = encoder.Finish(); 282 283 if (destroy) { 284 unusedBuffer.Destroy(); 285 ASSERT_DEVICE_ERROR(queue.Submit(1, &commands)); 286 } else { 287 queue.Submit(1, &commands); 288 } 289 } 290 } 291 292 // Test that textures in unused compute pass bindgroups are still checked for in 293 // Queue::Submit validation. TEST_F(QueueSubmitValidationTest,SubmitWithUnusedComputeTextures)294 TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeTextures) { 295 wgpu::Queue queue = device.GetQueue(); 296 297 wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {}); 298 wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {}); 299 300 wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout( 301 device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}}); 302 303 wgpu::ComputePipelineDescriptor cpDesc; 304 cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, emptyBGL, testBGL}); 305 cpDesc.compute.entryPoint = "main"; 306 cpDesc.compute.module = 307 utils::CreateShaderModule(device, "[[stage(compute), workgroup_size(1)]] fn main() {}"); 308 wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc); 309 310 wgpu::TextureDescriptor texDesc; 311 texDesc.size = {1, 1, 1}; 312 texDesc.usage = wgpu::TextureUsage::TextureBinding; 313 texDesc.format = wgpu::TextureFormat::RGBA8Unorm; 314 315 // Test that completely unused bindgroups still have their buffers checked. 316 for (bool destroy : {true, false}) { 317 wgpu::Texture unusedTexture = device.CreateTexture(&texDesc); 318 wgpu::BindGroup unusedBG = 319 utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}}); 320 321 wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); 322 wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); 323 pass.SetBindGroup(2, unusedBG); 324 pass.EndPass(); 325 wgpu::CommandBuffer commands = encoder.Finish(); 326 327 if (destroy) { 328 unusedTexture.Destroy(); 329 ASSERT_DEVICE_ERROR(queue.Submit(1, &commands)); 330 } else { 331 queue.Submit(1, &commands); 332 } 333 } 334 335 // Test that unused bindgroups because they were replaced still have their buffers checked. 336 for (bool destroy : {true, false}) { 337 wgpu::Texture unusedTexture = device.CreateTexture(&texDesc); 338 wgpu::BindGroup unusedBG = 339 utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}}); 340 341 wgpu::Texture usedTexture = device.CreateTexture(&texDesc); 342 wgpu::BindGroup usedBG = 343 utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}}); 344 345 wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); 346 wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); 347 pass.SetBindGroup(0, emptyBG); 348 pass.SetBindGroup(1, emptyBG); 349 pass.SetBindGroup(2, unusedBG); 350 pass.SetBindGroup(2, usedBG); 351 pass.SetPipeline(pipeline); 352 pass.Dispatch(1); 353 pass.EndPass(); 354 wgpu::CommandBuffer commands = encoder.Finish(); 355 356 if (destroy) { 357 unusedTexture.Destroy(); 358 ASSERT_DEVICE_ERROR(queue.Submit(1, &commands)); 359 } else { 360 queue.Submit(1, &commands); 361 } 362 } 363 } 364 365 } // anonymous namespace 366