1 // Copyright (c) 2016 The vulkano developers 2 // Licensed under the Apache License, Version 2.0 3 // <LICENSE-APACHE or 4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT 5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, 6 // at your option. All files in the project carrying such 7 // notice may not be copied, modified, or distributed except 8 // according to those terms. 9 10 //! Contains `SyncCommandBufferBuilder` and `SyncCommandBuffer`. 11 //! 12 //! # How pipeline stages work in Vulkan 13 //! 14 //! Imagine you create a command buffer that contains 10 dispatch commands, and submit that command 15 //! buffer. According to the Vulkan specs, the implementation is free to execute the 10 commands 16 //! simultaneously. 17 //! 18 //! Now imagine that the command buffer contains 10 draw commands instead. Contrary to the dispatch 19 //! commands, the draw pipeline contains multiple stages: draw indirect, vertex input, vertex shader, 20 //! ..., fragment shader, late fragment test, color output. When there are multiple stages, the 21 //! implementations must start and end the stages in order. In other words it can start the draw 22 //! indirect stage of all 10 commands, then start the vertex input stage of all 10 commands, and so 23 //! on. But it can't for example start the fragment shader stage of a command before starting the 24 //! vertex shader stage of another command. Same thing for ending the stages in the right order. 25 //! 26 //! Depending on the type of the command, the pipeline stages are different. Compute shaders use the 27 //! compute stage, while transfer commands use the transfer stage. The compute and transfer stages 28 //! aren't ordered. 29 //! 30 //! When you submit multiple command buffers to a queue, the implementation doesn't do anything in 31 //! particular and behaves as if the command buffers were appended to one another. Therefore if you 32 //! submit a command buffer with 10 dispatch commands, followed with another command buffer with 5 33 //! dispatch commands, then the implementation can perform the 15 commands simultaneously. 34 //! 35 //! ## Introducing barriers 36 //! 37 //! In some situations this is not the desired behaviour. If you add a command that writes to a 38 //! buffer followed with another command that reads that buffer, you don't want them to execute 39 //! simultaneously. Instead you want the second one to wait until the first one is finished. This 40 //! is done by adding a pipeline barrier between the two commands. 41 //! 42 //! A pipeline barriers has a source stage and a destination stage (plus various other things). 43 //! A barrier represents a split in the list of commands. When you add it, the stages of the commands 44 //! before the barrier corresponding to the source stage of the barrier, must finish before the 45 //! stages of the commands after the barrier corresponding to the destination stage of the barrier 46 //! can start. 47 //! 48 //! For example if you add a barrier that transitions from the compute stage to the compute stage, 49 //! then the compute stage of all the commands before the barrier must end before the compute stage 50 //! of all the commands after the barrier can start. This is appropriate for the example about 51 //! writing then reading the same buffer. 52 //! 53 //! ## Batching barriers 54 //! 55 //! Since barriers are "expensive" (as the queue must block), vulkano attempts to group as many 56 //! pipeline barriers as possible into one. 57 //! 58 //! Adding a command to a sync command buffer builder does not immediately add it to the underlying 59 //! command buffer builder. Instead the command is added to a queue, and the builder keeps a 60 //! prototype of a barrier that must be added before the commands in the queue are flushed. 61 //! 62 //! Whenever you add a command, the builder will find out whether a barrier is needed before the 63 //! command. If so, it will try to merge this barrier with the prototype and add the command to the 64 //! queue. If not possible, the queue will be entirely flushed and the command added to a fresh new 65 //! queue with a fresh new barrier prototype. 66 67 pub use self::builder::SyncCommandBufferBuilder; 68 pub use self::builder::SyncCommandBufferBuilderBindDescriptorSets; 69 pub use self::builder::SyncCommandBufferBuilderBindVertexBuffer; 70 pub use self::builder::SyncCommandBufferBuilderError; 71 pub use self::builder::SyncCommandBufferBuilderExecuteCommands; 72 use crate::buffer::BufferAccess; 73 use crate::command_buffer::sys::UnsafeCommandBuffer; 74 use crate::command_buffer::sys::UnsafeCommandBufferBuilder; 75 use crate::command_buffer::CommandBufferExecError; 76 use crate::command_buffer::ImageUninitializedSafe; 77 use crate::descriptor_set::DescriptorSet; 78 use crate::device::Device; 79 use crate::device::DeviceOwned; 80 use crate::device::Queue; 81 use crate::image::ImageAccess; 82 use crate::image::ImageLayout; 83 use crate::pipeline::{ComputePipelineAbstract, GraphicsPipelineAbstract}; 84 use crate::sync::AccessCheckError; 85 use crate::sync::AccessError; 86 use crate::sync::AccessFlags; 87 use crate::sync::GpuFuture; 88 use crate::sync::PipelineMemoryAccess; 89 use crate::sync::PipelineStages; 90 use fnv::FnvHashMap; 91 use std::borrow::Cow; 92 use std::ops::Range; 93 use std::sync::Arc; 94 95 mod builder; 96 97 /// Command buffer built from a `SyncCommandBufferBuilder` that provides utilities to handle 98 /// synchronization. 99 pub struct SyncCommandBuffer { 100 // The actual Vulkan command buffer. 101 inner: UnsafeCommandBuffer, 102 103 // List of commands used by the command buffer. Used to hold the various resources that are 104 // being used. 105 commands: Vec<Arc<dyn Command + Send + Sync>>, 106 107 // Locations within commands that pipeline barriers were inserted. For debugging purposes. 108 // TODO: present only in cfg(debug_assertions)? 109 barriers: Vec<usize>, 110 111 // State of all the resources used by this command buffer. 112 resources: FnvHashMap<ResourceKey, ResourceFinalState>, 113 114 // Resources and their accesses. Used for executing secondary command buffers in a primary. 115 buffers: Vec<(ResourceLocation, PipelineMemoryAccess)>, 116 images: Vec<( 117 ResourceLocation, 118 PipelineMemoryAccess, 119 ImageLayout, 120 ImageLayout, 121 ImageUninitializedSafe, 122 )>, 123 } 124 125 impl SyncCommandBuffer { 126 /// Tries to lock the resources used by the command buffer. 127 /// 128 /// > **Note**: You should call this in the implementation of the `CommandBuffer` trait. lock_submit( &self, future: &dyn GpuFuture, queue: &Queue, ) -> Result<(), CommandBufferExecError>129 pub fn lock_submit( 130 &self, 131 future: &dyn GpuFuture, 132 queue: &Queue, 133 ) -> Result<(), CommandBufferExecError> { 134 // Number of resources in `self.resources` that have been successfully locked. 135 let mut locked_resources = 0; 136 // Final return value of this function. 137 let mut ret_value = Ok(()); 138 139 // Try locking resources. Updates `locked_resources` and `ret_value`, and break if an error 140 // happens. 141 for (key, state) in self.resources.iter() { 142 let command = &self.commands[state.command_ids[0]]; 143 144 match key { 145 ResourceKey::Buffer(..) => { 146 let buf = command.buffer(state.resource_index); 147 148 // Because try_gpu_lock needs to be called first, 149 // this should never return Ok without first returning Err 150 let prev_err = match future.check_buffer_access(&buf, state.exclusive, queue) { 151 Ok(_) => { 152 unsafe { 153 buf.increase_gpu_lock(); 154 } 155 locked_resources += 1; 156 continue; 157 } 158 Err(err) => err, 159 }; 160 161 match (buf.try_gpu_lock(state.exclusive, queue), prev_err) { 162 (Ok(_), _) => (), 163 (Err(err), AccessCheckError::Unknown) 164 | (_, AccessCheckError::Denied(err)) => { 165 ret_value = Err(CommandBufferExecError::AccessError { 166 error: err, 167 command_name: command.name().into(), 168 command_param: command.buffer_name(state.resource_index), 169 command_offset: state.command_ids[0], 170 }); 171 break; 172 } 173 }; 174 } 175 176 ResourceKey::Image(..) => { 177 let img = command.image(state.resource_index); 178 179 let prev_err = match future.check_image_access( 180 img, 181 state.initial_layout, 182 state.exclusive, 183 queue, 184 ) { 185 Ok(_) => { 186 unsafe { 187 img.increase_gpu_lock(); 188 } 189 locked_resources += 1; 190 continue; 191 } 192 Err(err) => err, 193 }; 194 195 match ( 196 img.try_gpu_lock( 197 state.exclusive, 198 state.image_uninitialized_safe.is_safe(), 199 state.initial_layout, 200 ), 201 prev_err, 202 ) { 203 (Ok(_), _) => (), 204 (Err(err), AccessCheckError::Unknown) 205 | (_, AccessCheckError::Denied(err)) => { 206 ret_value = Err(CommandBufferExecError::AccessError { 207 error: err, 208 command_name: command.name().into(), 209 command_param: command.image_name(state.resource_index), 210 command_offset: state.command_ids[0], 211 }); 212 break; 213 } 214 }; 215 } 216 } 217 218 locked_resources += 1; 219 } 220 221 // If we are going to return an error, we have to unlock all the resources we locked above. 222 if let Err(_) = ret_value { 223 for (key, state) in self.resources.iter().take(locked_resources) { 224 let command = &self.commands[state.command_ids[0]]; 225 226 match key { 227 ResourceKey::Buffer(..) => { 228 let buf = command.buffer(state.resource_index); 229 unsafe { 230 buf.unlock(); 231 } 232 } 233 234 ResourceKey::Image(..) => { 235 let command = &self.commands[state.command_ids[0]]; 236 let img = command.image(state.resource_index); 237 let trans = if state.final_layout != state.initial_layout { 238 Some(state.final_layout) 239 } else { 240 None 241 }; 242 unsafe { 243 img.unlock(trans); 244 } 245 } 246 } 247 } 248 } 249 250 // TODO: pipeline barriers if necessary? 251 252 ret_value 253 } 254 255 /// Unlocks the resources used by the command buffer. 256 /// 257 /// > **Note**: You should call this in the implementation of the `CommandBuffer` trait. 258 /// 259 /// # Safety 260 /// 261 /// The command buffer must have been successfully locked with `lock_submit()`. 262 /// unlock(&self)263 pub unsafe fn unlock(&self) { 264 for (key, state) in self.resources.iter() { 265 let command = &self.commands[state.command_ids[0]]; 266 267 match key { 268 ResourceKey::Buffer(..) => { 269 let buf = command.buffer(state.resource_index); 270 buf.unlock(); 271 } 272 273 ResourceKey::Image(..) => { 274 let img = command.image(state.resource_index); 275 let trans = if state.final_layout != state.initial_layout { 276 Some(state.final_layout) 277 } else { 278 None 279 }; 280 img.unlock(trans); 281 } 282 } 283 } 284 } 285 286 /// Checks whether this command buffer has access to a buffer. 287 /// 288 /// > **Note**: Suitable when implementing the `CommandBuffer` trait. 289 #[inline] check_buffer_access( &self, buffer: &dyn BufferAccess, exclusive: bool, queue: &Queue, ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError>290 pub fn check_buffer_access( 291 &self, 292 buffer: &dyn BufferAccess, 293 exclusive: bool, 294 queue: &Queue, 295 ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> { 296 // TODO: check the queue family 297 if let Some(value) = self.resources.get(&buffer.into()) { 298 if !value.exclusive && exclusive { 299 return Err(AccessCheckError::Unknown); 300 } 301 302 return Ok(Some((value.final_stages, value.final_access))); 303 } 304 305 Err(AccessCheckError::Unknown) 306 } 307 308 /// Checks whether this command buffer has access to an image. 309 /// 310 /// > **Note**: Suitable when implementing the `CommandBuffer` trait. 311 #[inline] check_image_access( &self, image: &dyn ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue, ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError>312 pub fn check_image_access( 313 &self, 314 image: &dyn ImageAccess, 315 layout: ImageLayout, 316 exclusive: bool, 317 queue: &Queue, 318 ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> { 319 // TODO: check the queue family 320 if let Some(value) = self.resources.get(&image.into()) { 321 if layout != ImageLayout::Undefined && value.final_layout != layout { 322 return Err(AccessCheckError::Denied( 323 AccessError::UnexpectedImageLayout { 324 allowed: value.final_layout, 325 requested: layout, 326 }, 327 )); 328 } 329 330 if !value.exclusive && exclusive { 331 return Err(AccessCheckError::Unknown); 332 } 333 334 return Ok(Some((value.final_stages, value.final_access))); 335 } 336 337 Err(AccessCheckError::Unknown) 338 } 339 340 #[inline] num_buffers(&self) -> usize341 pub fn num_buffers(&self) -> usize { 342 self.buffers.len() 343 } 344 345 #[inline] buffer(&self, index: usize) -> Option<(&dyn BufferAccess, PipelineMemoryAccess)>346 pub fn buffer(&self, index: usize) -> Option<(&dyn BufferAccess, PipelineMemoryAccess)> { 347 self.buffers.get(index).map(|(location, memory)| { 348 let cmd = &self.commands[location.command_id]; 349 (cmd.buffer(location.resource_index), *memory) 350 }) 351 } 352 353 #[inline] num_images(&self) -> usize354 pub fn num_images(&self) -> usize { 355 self.images.len() 356 } 357 358 #[inline] image( &self, index: usize, ) -> Option<( &dyn ImageAccess, PipelineMemoryAccess, ImageLayout, ImageLayout, ImageUninitializedSafe, )>359 pub fn image( 360 &self, 361 index: usize, 362 ) -> Option<( 363 &dyn ImageAccess, 364 PipelineMemoryAccess, 365 ImageLayout, 366 ImageLayout, 367 ImageUninitializedSafe, 368 )> { 369 self.images.get(index).map( 370 |(location, memory, start_layout, end_layout, image_uninitialized_safe)| { 371 let cmd = &self.commands[location.command_id]; 372 ( 373 cmd.image(location.resource_index), 374 *memory, 375 *start_layout, 376 *end_layout, 377 *image_uninitialized_safe, 378 ) 379 }, 380 ) 381 } 382 } 383 384 impl AsRef<UnsafeCommandBuffer> for SyncCommandBuffer { 385 #[inline] as_ref(&self) -> &UnsafeCommandBuffer386 fn as_ref(&self) -> &UnsafeCommandBuffer { 387 &self.inner 388 } 389 } 390 391 unsafe impl DeviceOwned for SyncCommandBuffer { 392 #[inline] device(&self) -> &Arc<Device>393 fn device(&self) -> &Arc<Device> { 394 self.inner.device() 395 } 396 } 397 398 // Key that identifies a resource. Implements `PartialEq`, `Eq` and `Hash` so that two resources 399 // that conflict with each other compare equal. 400 #[derive(Debug, PartialEq, Eq, Hash)] 401 enum ResourceKey { 402 Buffer((u64, u64)), 403 Image(u64, Range<u32>, Range<u32>), 404 } 405 406 impl From<&dyn BufferAccess> for ResourceKey { 407 #[inline] from(buffer: &dyn BufferAccess) -> Self408 fn from(buffer: &dyn BufferAccess) -> Self { 409 Self::Buffer(buffer.conflict_key()) 410 } 411 } 412 413 impl From<&dyn ImageAccess> for ResourceKey { 414 #[inline] from(image: &dyn ImageAccess) -> Self415 fn from(image: &dyn ImageAccess) -> Self { 416 Self::Image( 417 image.conflict_key(), 418 image.current_miplevels_access(), 419 image.current_layer_levels_access(), 420 ) 421 } 422 } 423 424 // Usage of a resource in a finished command buffer. 425 #[derive(Debug, Clone)] 426 struct ResourceFinalState { 427 // Indices of the commands that contain the resource. 428 command_ids: Vec<usize>, 429 430 // Index of the resource within the first command in `command_ids`. 431 resource_index: usize, 432 433 // Stages of the last command that uses the resource. 434 final_stages: PipelineStages, 435 // Access for the last command that uses the resource. 436 final_access: AccessFlags, 437 438 // True if the resource is used in exclusive mode. 439 exclusive: bool, 440 441 // Layout that an image must be in at the start of the command buffer. Can be `Undefined` if we 442 // don't care. 443 initial_layout: ImageLayout, 444 445 // Layout the image will be in at the end of the command buffer. 446 final_layout: ImageLayout, // TODO: maybe wrap in an Option to mean that the layout doesn't change? because of buffers? 447 448 image_uninitialized_safe: ImageUninitializedSafe, 449 } 450 451 // Identifies a resource within the list of commands. 452 #[derive(Clone, Copy, Debug)] 453 struct ResourceLocation { 454 // Index of the command that holds the resource. 455 command_id: usize, 456 // Index of the resource within the command. 457 resource_index: usize, 458 } 459 460 // Trait for single commands within the list of commands. 461 trait Command { 462 // Returns a user-friendly name for the command, for error reporting purposes. name(&self) -> &'static str463 fn name(&self) -> &'static str; 464 465 // Sends the command to the `UnsafeCommandBufferBuilder`. Calling this method twice on the same 466 // object will likely lead to a panic. send(&self, out: &mut UnsafeCommandBufferBuilder)467 unsafe fn send(&self, out: &mut UnsafeCommandBufferBuilder); 468 469 // Gives access to the `num`th buffer used by the command. buffer(&self, _num: usize) -> &dyn BufferAccess470 fn buffer(&self, _num: usize) -> &dyn BufferAccess { 471 panic!() 472 } 473 474 // Gives access to the `num`th image used by the command. image(&self, _num: usize) -> &dyn ImageAccess475 fn image(&self, _num: usize) -> &dyn ImageAccess { 476 panic!() 477 } 478 479 // Returns a user-friendly name for the `num`th buffer used by the command, for error 480 // reporting purposes. buffer_name(&self, _num: usize) -> Cow<'static, str>481 fn buffer_name(&self, _num: usize) -> Cow<'static, str> { 482 panic!() 483 } 484 485 // Returns a user-friendly name for the `num`th image used by the command, for error 486 // reporting purposes. image_name(&self, _num: usize) -> Cow<'static, str>487 fn image_name(&self, _num: usize) -> Cow<'static, str> { 488 panic!() 489 } 490 bound_descriptor_set(&self, set_num: u32) -> (&dyn DescriptorSet, &[u32])491 fn bound_descriptor_set(&self, set_num: u32) -> (&dyn DescriptorSet, &[u32]) { 492 panic!() 493 } 494 bound_index_buffer(&self) -> &dyn BufferAccess495 fn bound_index_buffer(&self) -> &dyn BufferAccess { 496 panic!() 497 } 498 bound_pipeline_compute(&self) -> &dyn ComputePipelineAbstract499 fn bound_pipeline_compute(&self) -> &dyn ComputePipelineAbstract { 500 panic!() 501 } 502 bound_pipeline_graphics(&self) -> &dyn GraphicsPipelineAbstract503 fn bound_pipeline_graphics(&self) -> &dyn GraphicsPipelineAbstract { 504 panic!() 505 } 506 bound_vertex_buffer(&self, binding_num: u32) -> &dyn BufferAccess507 fn bound_vertex_buffer(&self, binding_num: u32) -> &dyn BufferAccess { 508 panic!() 509 } 510 } 511 512 impl std::fmt::Debug for dyn Command + Send + Sync { fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result513 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 514 f.write_str(self.name()) 515 } 516 } 517 518 #[cfg(test)] 519 mod tests { 520 use super::SyncCommandBufferBuilder; 521 use super::SyncCommandBufferBuilderError; 522 use crate::buffer::BufferUsage; 523 use crate::buffer::CpuAccessibleBuffer; 524 use crate::buffer::ImmutableBuffer; 525 use crate::command_buffer::pool::CommandPool; 526 use crate::command_buffer::pool::CommandPoolBuilderAlloc; 527 use crate::command_buffer::AutoCommandBufferBuilder; 528 use crate::command_buffer::CommandBufferLevel; 529 use crate::command_buffer::CommandBufferUsage; 530 use crate::descriptor_set::layout::DescriptorDesc; 531 use crate::descriptor_set::layout::DescriptorDescTy; 532 use crate::descriptor_set::layout::DescriptorSetLayout; 533 use crate::descriptor_set::PersistentDescriptorSet; 534 use crate::device::Device; 535 use crate::pipeline::layout::PipelineLayout; 536 use crate::pipeline::shader::ShaderStages; 537 use crate::pipeline::PipelineBindPoint; 538 use crate::sampler::Sampler; 539 use crate::sync::GpuFuture; 540 use std::sync::Arc; 541 542 #[test] basic_creation()543 fn basic_creation() { 544 unsafe { 545 let (device, queue) = gfx_dev_and_queue!(); 546 let pool = Device::standard_command_pool(&device, queue.family()); 547 let pool_builder_alloc = pool.alloc(false, 1).unwrap().next().unwrap(); 548 549 assert!(matches!( 550 SyncCommandBufferBuilder::new( 551 &pool_builder_alloc.inner(), 552 CommandBufferLevel::primary(), 553 CommandBufferUsage::MultipleSubmit, 554 ), 555 Ok(_) 556 )); 557 } 558 } 559 560 #[test] basic_conflict()561 fn basic_conflict() { 562 unsafe { 563 let (device, queue) = gfx_dev_and_queue!(); 564 565 let pool = Device::standard_command_pool(&device, queue.family()); 566 let pool_builder_alloc = pool.alloc(false, 1).unwrap().next().unwrap(); 567 let mut sync = SyncCommandBufferBuilder::new( 568 &pool_builder_alloc.inner(), 569 CommandBufferLevel::primary(), 570 CommandBufferUsage::MultipleSubmit, 571 ) 572 .unwrap(); 573 let buf = 574 CpuAccessibleBuffer::from_data(device, BufferUsage::all(), false, 0u32).unwrap(); 575 576 assert!(matches!( 577 sync.copy_buffer(buf.clone(), buf.clone(), std::iter::once((0, 0, 4))), 578 Err(SyncCommandBufferBuilderError::Conflict { .. }) 579 )); 580 } 581 } 582 583 #[test] secondary_conflicting_writes()584 fn secondary_conflicting_writes() { 585 unsafe { 586 let (device, queue) = gfx_dev_and_queue!(); 587 588 // Create a tiny test buffer 589 let (buf, future) = ImmutableBuffer::from_data( 590 0u32, 591 BufferUsage::transfer_destination(), 592 queue.clone(), 593 ) 594 .unwrap(); 595 future 596 .then_signal_fence_and_flush() 597 .unwrap() 598 .wait(None) 599 .unwrap(); 600 601 // Two secondary command buffers that both write to the buffer 602 let secondary = (0..2) 603 .map(|_| { 604 let mut builder = AutoCommandBufferBuilder::secondary_compute( 605 device.clone(), 606 queue.family(), 607 CommandBufferUsage::SimultaneousUse, 608 ) 609 .unwrap(); 610 builder.fill_buffer(buf.clone(), 42u32).unwrap(); 611 Arc::new(builder.build().unwrap()) 612 }) 613 .collect::<Vec<_>>(); 614 615 let pool = Device::standard_command_pool(&device, queue.family()); 616 let allocs = pool.alloc(false, 2).unwrap().collect::<Vec<_>>(); 617 618 { 619 let mut builder = SyncCommandBufferBuilder::new( 620 allocs[0].inner(), 621 CommandBufferLevel::primary(), 622 CommandBufferUsage::SimultaneousUse, 623 ) 624 .unwrap(); 625 626 // Add both secondary command buffers using separate execute_commands calls. 627 secondary.iter().cloned().for_each(|secondary| { 628 let mut ec = builder.execute_commands(); 629 ec.add(secondary); 630 ec.submit().unwrap(); 631 }); 632 633 let primary = builder.build().unwrap(); 634 let names = primary 635 .commands 636 .iter() 637 .map(|c| c.name()) 638 .collect::<Vec<_>>(); 639 640 // Ensure that the builder added a barrier between the two writes 641 assert_eq!(&names, &["vkCmdExecuteCommands", "vkCmdExecuteCommands"]); 642 assert_eq!(&primary.barriers, &[0, 1]); 643 } 644 645 { 646 let mut builder = SyncCommandBufferBuilder::new( 647 allocs[1].inner(), 648 CommandBufferLevel::primary(), 649 CommandBufferUsage::SimultaneousUse, 650 ) 651 .unwrap(); 652 653 // Add a single execute_commands for all secondary command buffers at once 654 let mut ec = builder.execute_commands(); 655 secondary.into_iter().for_each(|secondary| { 656 ec.add(secondary); 657 }); 658 659 // The two writes can't be split up by a barrier because they are part of the same 660 // command. Therefore an error. 661 // TODO: Would be nice if SyncCommandBufferBuilder would split the commands 662 // automatically in order to insert a barrier. 663 assert!(matches!( 664 ec.submit(), 665 Err(SyncCommandBufferBuilderError::Conflict { .. }) 666 )); 667 } 668 } 669 } 670 671 #[test] vertex_buffer_binding()672 fn vertex_buffer_binding() { 673 unsafe { 674 let (device, queue) = gfx_dev_and_queue!(); 675 676 let pool = Device::standard_command_pool(&device, queue.family()); 677 let pool_builder_alloc = pool.alloc(false, 1).unwrap().next().unwrap(); 678 let mut sync = SyncCommandBufferBuilder::new( 679 &pool_builder_alloc.inner(), 680 CommandBufferLevel::primary(), 681 CommandBufferUsage::MultipleSubmit, 682 ) 683 .unwrap(); 684 let buf = 685 CpuAccessibleBuffer::from_data(device, BufferUsage::all(), false, 0u32).unwrap(); 686 let mut buf_builder = sync.bind_vertex_buffers(); 687 buf_builder.add(buf); 688 buf_builder.submit(1).unwrap(); 689 690 assert!(sync.bound_vertex_buffer(0).is_none()); 691 assert!(sync.bound_vertex_buffer(1).is_some()); 692 assert!(sync.bound_vertex_buffer(2).is_none()); 693 } 694 } 695 696 #[test] descriptor_set_binding()697 fn descriptor_set_binding() { 698 unsafe { 699 let (device, queue) = gfx_dev_and_queue!(); 700 701 let pool = Device::standard_command_pool(&device, queue.family()); 702 let pool_builder_alloc = pool.alloc(false, 1).unwrap().next().unwrap(); 703 let mut sync = SyncCommandBufferBuilder::new( 704 &pool_builder_alloc.inner(), 705 CommandBufferLevel::primary(), 706 CommandBufferUsage::MultipleSubmit, 707 ) 708 .unwrap(); 709 let set_layout = Arc::new( 710 DescriptorSetLayout::new( 711 device.clone(), 712 [Some(DescriptorDesc { 713 ty: DescriptorDescTy::Sampler, 714 array_count: 1, 715 stages: ShaderStages::all(), 716 readonly: true, 717 })], 718 ) 719 .unwrap(), 720 ); 721 let pipeline_layout = Arc::new( 722 PipelineLayout::new(device.clone(), [set_layout.clone(), set_layout.clone()], []) 723 .unwrap(), 724 ); 725 let set = Arc::new( 726 PersistentDescriptorSet::start(set_layout) 727 .add_sampler(Sampler::simple_repeat_linear(device)) 728 .unwrap() 729 .build() 730 .unwrap(), 731 ); 732 733 let mut set_builder = sync.bind_descriptor_sets(); 734 set_builder.add(set.clone()); 735 set_builder 736 .submit(PipelineBindPoint::Graphics, pipeline_layout.clone(), 1) 737 .unwrap(); 738 739 assert!(sync 740 .bound_descriptor_set(PipelineBindPoint::Compute, 0) 741 .is_none()); 742 assert!(sync 743 .bound_descriptor_set(PipelineBindPoint::Graphics, 0) 744 .is_none()); 745 assert!(sync 746 .bound_descriptor_set(PipelineBindPoint::Graphics, 1) 747 .is_some()); 748 assert!(sync 749 .bound_descriptor_set(PipelineBindPoint::Graphics, 2) 750 .is_none()); 751 752 let mut set_builder = sync.bind_descriptor_sets(); 753 set_builder.add(set); 754 set_builder 755 .submit(PipelineBindPoint::Graphics, pipeline_layout, 0) 756 .unwrap(); 757 758 assert!(sync 759 .bound_descriptor_set(PipelineBindPoint::Graphics, 0) 760 .is_some()); 761 assert!(sync 762 .bound_descriptor_set(PipelineBindPoint::Graphics, 1) 763 .is_none()); 764 } 765 } 766 } 767