1 // Copyright (c) 2022 The vulkano developers 2 // Licensed under the Apache License, Version 2.0 3 // <LICENSE-APACHE or 4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT 5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, 6 // at your option. All files in the project carrying such 7 // notice may not be copied, modified, or distributed except 8 // according to those terms. 9 10 use crate::{ 11 command_buffer::{ 12 synced::{Command, SyncCommandBufferBuilder}, 13 sys::UnsafeCommandBufferBuilder, 14 }, 15 image::ImageLayout, 16 sync::{ 17 event::Event, AccessFlags, BufferMemoryBarrier, DependencyFlags, DependencyInfo, 18 ImageMemoryBarrier, MemoryBarrier, PipelineStages, 19 }, 20 Version, VulkanObject, 21 }; 22 use smallvec::SmallVec; 23 use std::{ptr, sync::Arc}; 24 25 impl SyncCommandBufferBuilder { 26 /// Calls `vkCmdSetEvent` on the builder. 27 #[inline] set_event(&mut self, event: Arc<Event>, dependency_info: DependencyInfo)28 pub unsafe fn set_event(&mut self, event: Arc<Event>, dependency_info: DependencyInfo) { 29 struct Cmd { 30 event: Arc<Event>, 31 dependency_info: DependencyInfo, 32 } 33 34 impl Command for Cmd { 35 fn name(&self) -> &'static str { 36 "set_event" 37 } 38 39 unsafe fn send(&self, out: &mut UnsafeCommandBufferBuilder) { 40 out.set_event(&self.event, &self.dependency_info); 41 } 42 } 43 44 self.commands.push(Box::new(Cmd { 45 event, 46 dependency_info, 47 })); 48 } 49 50 /// Calls `vkCmdWaitEvents` on the builder. 51 #[inline] wait_events( &mut self, events: impl IntoIterator<Item = (Arc<Event>, DependencyInfo)>, )52 pub unsafe fn wait_events( 53 &mut self, 54 events: impl IntoIterator<Item = (Arc<Event>, DependencyInfo)>, 55 ) { 56 struct Cmd { 57 events: SmallVec<[(Arc<Event>, DependencyInfo); 4]>, 58 } 59 60 impl Command for Cmd { 61 fn name(&self) -> &'static str { 62 "wait_events" 63 } 64 65 unsafe fn send(&self, out: &mut UnsafeCommandBufferBuilder) { 66 out.wait_events( 67 self.events 68 .iter() 69 .map(|&(ref event, ref dependency_info)| (event.as_ref(), dependency_info)), 70 ); 71 } 72 } 73 74 self.commands.push(Box::new(Cmd { 75 events: events.into_iter().collect(), 76 })); 77 } 78 79 /// Calls `vkCmdResetEvent` on the builder. 80 #[inline] reset_event(&mut self, event: Arc<Event>, stages: PipelineStages)81 pub unsafe fn reset_event(&mut self, event: Arc<Event>, stages: PipelineStages) { 82 struct Cmd { 83 event: Arc<Event>, 84 stages: PipelineStages, 85 } 86 87 impl Command for Cmd { 88 fn name(&self) -> &'static str { 89 "reset_event" 90 } 91 92 unsafe fn send(&self, out: &mut UnsafeCommandBufferBuilder) { 93 out.reset_event(&self.event, self.stages); 94 } 95 } 96 97 self.commands.push(Box::new(Cmd { event, stages })); 98 } 99 } 100 101 impl UnsafeCommandBufferBuilder { 102 #[inline] pipeline_barrier(&mut self, dependency_info: &DependencyInfo)103 pub unsafe fn pipeline_barrier(&mut self, dependency_info: &DependencyInfo) { 104 if dependency_info.is_empty() { 105 return; 106 } 107 108 let DependencyInfo { 109 mut dependency_flags, 110 memory_barriers, 111 buffer_memory_barriers, 112 image_memory_barriers, 113 _ne: _, 114 } = dependency_info; 115 116 // TODO: Is this needed? 117 dependency_flags |= DependencyFlags::BY_REGION; 118 119 if self.device.enabled_features().synchronization2 { 120 let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers 121 .into_iter() 122 .map(|barrier| { 123 let &MemoryBarrier { 124 src_stages, 125 src_access, 126 dst_stages, 127 dst_access, 128 _ne: _, 129 } = barrier; 130 131 debug_assert!(AccessFlags::from(src_stages).contains(src_access)); 132 debug_assert!(AccessFlags::from(dst_stages).contains(dst_access)); 133 134 ash::vk::MemoryBarrier2 { 135 src_stage_mask: src_stages.into(), 136 src_access_mask: src_access.into(), 137 dst_stage_mask: dst_stages.into(), 138 dst_access_mask: dst_access.into(), 139 ..Default::default() 140 } 141 }) 142 .collect(); 143 144 let buffer_memory_barriers_vk: SmallVec<[_; 8]> = buffer_memory_barriers 145 .into_iter() 146 .map(|barrier| { 147 let &BufferMemoryBarrier { 148 src_stages, 149 src_access, 150 dst_stages, 151 dst_access, 152 queue_family_ownership_transfer, 153 ref buffer, 154 ref range, 155 _ne: _, 156 } = barrier; 157 158 debug_assert!(AccessFlags::from(src_stages).contains(src_access)); 159 debug_assert!(AccessFlags::from(dst_stages).contains(dst_access)); 160 debug_assert!(!range.is_empty()); 161 debug_assert!(range.end <= buffer.size()); 162 163 let (src_queue_family_index, dst_queue_family_index) = 164 queue_family_ownership_transfer.map_or( 165 (ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED), 166 Into::into, 167 ); 168 169 ash::vk::BufferMemoryBarrier2 { 170 src_stage_mask: src_stages.into(), 171 src_access_mask: src_access.into(), 172 dst_stage_mask: dst_stages.into(), 173 dst_access_mask: dst_access.into(), 174 src_queue_family_index, 175 dst_queue_family_index, 176 buffer: buffer.handle(), 177 offset: range.start, 178 size: range.end - range.start, 179 ..Default::default() 180 } 181 }) 182 .collect(); 183 184 let image_memory_barriers_vk: SmallVec<[_; 8]> = image_memory_barriers 185 .into_iter() 186 .map(|barrier| { 187 let &ImageMemoryBarrier { 188 src_stages, 189 src_access, 190 dst_stages, 191 dst_access, 192 old_layout, 193 new_layout, 194 queue_family_ownership_transfer, 195 ref image, 196 ref subresource_range, 197 _ne: _, 198 } = barrier; 199 200 debug_assert!(AccessFlags::from(src_stages).contains(src_access)); 201 debug_assert!(AccessFlags::from(dst_stages).contains(dst_access)); 202 203 debug_assert!( 204 old_layout == new_layout 205 || !matches!( 206 new_layout, 207 ImageLayout::Undefined | ImageLayout::Preinitialized 208 ) 209 ); 210 debug_assert!(image 211 .format() 212 .unwrap() 213 .aspects() 214 .contains(subresource_range.aspects)); 215 debug_assert!(!subresource_range.mip_levels.is_empty()); 216 debug_assert!(subresource_range.mip_levels.end <= image.mip_levels()); 217 debug_assert!(!subresource_range.array_layers.is_empty()); 218 debug_assert!( 219 subresource_range.array_layers.end <= image.dimensions().array_layers() 220 ); 221 222 let (src_queue_family_index, dst_queue_family_index) = 223 queue_family_ownership_transfer.map_or( 224 (ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED), 225 Into::into, 226 ); 227 228 ash::vk::ImageMemoryBarrier2 { 229 src_stage_mask: src_stages.into(), 230 src_access_mask: src_access.into(), 231 dst_stage_mask: dst_stages.into(), 232 dst_access_mask: dst_access.into(), 233 old_layout: old_layout.into(), 234 new_layout: new_layout.into(), 235 src_queue_family_index, 236 dst_queue_family_index, 237 image: image.handle(), 238 subresource_range: subresource_range.clone().into(), 239 ..Default::default() 240 } 241 }) 242 .collect(); 243 244 let dependency_info_vk = ash::vk::DependencyInfo { 245 dependency_flags: dependency_flags.into(), 246 memory_barrier_count: memory_barriers_vk.len() as u32, 247 p_memory_barriers: memory_barriers_vk.as_ptr(), 248 buffer_memory_barrier_count: buffer_memory_barriers_vk.len() as u32, 249 p_buffer_memory_barriers: buffer_memory_barriers_vk.as_ptr(), 250 image_memory_barrier_count: image_memory_barriers_vk.len() as u32, 251 p_image_memory_barriers: image_memory_barriers_vk.as_ptr(), 252 ..Default::default() 253 }; 254 255 let fns = self.device.fns(); 256 257 if self.device.api_version() >= Version::V1_3 { 258 (fns.v1_3.cmd_pipeline_barrier2)(self.handle, &dependency_info_vk); 259 } else { 260 debug_assert!(self.device.enabled_extensions().khr_synchronization2); 261 (fns.khr_synchronization2.cmd_pipeline_barrier2_khr)( 262 self.handle, 263 &dependency_info_vk, 264 ); 265 } 266 } else { 267 let mut src_stage_mask = ash::vk::PipelineStageFlags::empty(); 268 let mut dst_stage_mask = ash::vk::PipelineStageFlags::empty(); 269 270 let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers 271 .into_iter() 272 .map(|barrier| { 273 let &MemoryBarrier { 274 src_stages, 275 src_access, 276 dst_stages, 277 dst_access, 278 _ne: _, 279 } = barrier; 280 281 debug_assert!(AccessFlags::from(src_stages).contains(src_access)); 282 debug_assert!(AccessFlags::from(dst_stages).contains(dst_access)); 283 284 src_stage_mask |= src_stages.into(); 285 dst_stage_mask |= dst_stages.into(); 286 287 ash::vk::MemoryBarrier { 288 src_access_mask: src_access.into(), 289 dst_access_mask: dst_access.into(), 290 ..Default::default() 291 } 292 }) 293 .collect(); 294 295 let buffer_memory_barriers_vk: SmallVec<[_; 8]> = buffer_memory_barriers 296 .into_iter() 297 .map(|barrier| { 298 let &BufferMemoryBarrier { 299 src_stages, 300 src_access, 301 dst_stages, 302 dst_access, 303 queue_family_ownership_transfer, 304 ref buffer, 305 ref range, 306 _ne: _, 307 } = barrier; 308 309 debug_assert!(AccessFlags::from(src_stages).contains(src_access)); 310 debug_assert!(AccessFlags::from(dst_stages).contains(dst_access)); 311 debug_assert!(!range.is_empty()); 312 debug_assert!(range.end <= buffer.size()); 313 314 src_stage_mask |= src_stages.into(); 315 dst_stage_mask |= dst_stages.into(); 316 317 let (src_queue_family_index, dst_queue_family_index) = 318 queue_family_ownership_transfer.map_or( 319 (ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED), 320 Into::into, 321 ); 322 323 ash::vk::BufferMemoryBarrier { 324 src_access_mask: src_access.into(), 325 dst_access_mask: dst_access.into(), 326 src_queue_family_index, 327 dst_queue_family_index, 328 buffer: buffer.handle(), 329 offset: range.start, 330 size: range.end - range.start, 331 ..Default::default() 332 } 333 }) 334 .collect(); 335 336 let image_memory_barriers_vk: SmallVec<[_; 8]> = image_memory_barriers 337 .into_iter() 338 .map(|barrier| { 339 let &ImageMemoryBarrier { 340 src_stages, 341 src_access, 342 dst_stages, 343 dst_access, 344 old_layout, 345 new_layout, 346 queue_family_ownership_transfer, 347 ref image, 348 ref subresource_range, 349 _ne: _, 350 } = barrier; 351 352 debug_assert!(AccessFlags::from(src_stages).contains(src_access)); 353 debug_assert!(AccessFlags::from(dst_stages).contains(dst_access)); 354 debug_assert!(!matches!( 355 new_layout, 356 ImageLayout::Undefined | ImageLayout::Preinitialized 357 )); 358 debug_assert!(image 359 .format() 360 .unwrap() 361 .aspects() 362 .contains(subresource_range.aspects)); 363 debug_assert!(!subresource_range.mip_levels.is_empty()); 364 debug_assert!(subresource_range.mip_levels.end <= image.mip_levels()); 365 debug_assert!(!subresource_range.array_layers.is_empty()); 366 debug_assert!( 367 subresource_range.array_layers.end <= image.dimensions().array_layers() 368 ); 369 370 src_stage_mask |= src_stages.into(); 371 dst_stage_mask |= dst_stages.into(); 372 373 let (src_queue_family_index, dst_queue_family_index) = 374 queue_family_ownership_transfer.map_or( 375 (ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED), 376 Into::into, 377 ); 378 379 ash::vk::ImageMemoryBarrier { 380 src_access_mask: src_access.into(), 381 dst_access_mask: dst_access.into(), 382 old_layout: old_layout.into(), 383 new_layout: new_layout.into(), 384 src_queue_family_index, 385 dst_queue_family_index, 386 image: image.handle(), 387 subresource_range: subresource_range.clone().into(), 388 ..Default::default() 389 } 390 }) 391 .collect(); 392 393 if src_stage_mask.is_empty() { 394 // "VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT is [...] equivalent to 395 // VK_PIPELINE_STAGE_2_NONE in the first scope." 396 src_stage_mask |= ash::vk::PipelineStageFlags::TOP_OF_PIPE; 397 } 398 399 if dst_stage_mask.is_empty() { 400 // "VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT is [...] equivalent to 401 // VK_PIPELINE_STAGE_2_NONE in the second scope." 402 dst_stage_mask |= ash::vk::PipelineStageFlags::BOTTOM_OF_PIPE; 403 } 404 405 let fns = self.device.fns(); 406 (fns.v1_0.cmd_pipeline_barrier)( 407 self.handle, 408 src_stage_mask, 409 dst_stage_mask, 410 dependency_flags.into(), 411 memory_barriers_vk.len() as u32, 412 memory_barriers_vk.as_ptr(), 413 buffer_memory_barriers_vk.len() as u32, 414 buffer_memory_barriers_vk.as_ptr(), 415 image_memory_barriers_vk.len() as u32, 416 image_memory_barriers_vk.as_ptr(), 417 ); 418 } 419 } 420 421 /// Calls `vkCmdSetEvent` on the builder. 422 #[inline] set_event(&mut self, event: &Event, dependency_info: &DependencyInfo)423 pub unsafe fn set_event(&mut self, event: &Event, dependency_info: &DependencyInfo) { 424 let &DependencyInfo { 425 mut dependency_flags, 426 ref memory_barriers, 427 ref buffer_memory_barriers, 428 ref image_memory_barriers, 429 _ne: _, 430 } = dependency_info; 431 432 // TODO: Is this needed? 433 dependency_flags |= DependencyFlags::BY_REGION; 434 435 let fns = self.device.fns(); 436 437 if self.device.enabled_features().synchronization2 { 438 let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers 439 .into_iter() 440 .map(|barrier| { 441 let &MemoryBarrier { 442 src_stages, 443 src_access, 444 dst_stages, 445 dst_access, 446 _ne: _, 447 } = barrier; 448 449 ash::vk::MemoryBarrier2 { 450 src_stage_mask: src_stages.into(), 451 src_access_mask: src_access.into(), 452 dst_stage_mask: dst_stages.into(), 453 dst_access_mask: dst_access.into(), 454 ..Default::default() 455 } 456 }) 457 .collect(); 458 459 let buffer_memory_barriers_vk: SmallVec<[_; 8]> = buffer_memory_barriers 460 .into_iter() 461 .map(|barrier| { 462 let &BufferMemoryBarrier { 463 src_stages, 464 src_access, 465 dst_stages, 466 dst_access, 467 queue_family_ownership_transfer, 468 ref buffer, 469 ref range, 470 _ne: _, 471 } = barrier; 472 473 let (src_queue_family_index, dst_queue_family_index) = 474 queue_family_ownership_transfer.map_or( 475 (ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED), 476 Into::into, 477 ); 478 479 ash::vk::BufferMemoryBarrier2 { 480 src_stage_mask: src_stages.into(), 481 src_access_mask: src_access.into(), 482 dst_stage_mask: dst_stages.into(), 483 dst_access_mask: dst_access.into(), 484 src_queue_family_index, 485 dst_queue_family_index, 486 buffer: buffer.handle(), 487 offset: range.start, 488 size: range.end - range.start, 489 ..Default::default() 490 } 491 }) 492 .collect(); 493 494 let image_memory_barriers_vk: SmallVec<[_; 8]> = image_memory_barriers 495 .into_iter() 496 .map(|barrier| { 497 let &ImageMemoryBarrier { 498 src_stages, 499 src_access, 500 dst_stages, 501 dst_access, 502 old_layout, 503 new_layout, 504 queue_family_ownership_transfer, 505 ref image, 506 ref subresource_range, 507 _ne: _, 508 } = barrier; 509 510 let (src_queue_family_index, dst_queue_family_index) = 511 queue_family_ownership_transfer.map_or( 512 (ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED), 513 Into::into, 514 ); 515 516 ash::vk::ImageMemoryBarrier2 { 517 src_stage_mask: src_stages.into(), 518 src_access_mask: src_access.into(), 519 dst_stage_mask: dst_stages.into(), 520 dst_access_mask: dst_access.into(), 521 old_layout: old_layout.into(), 522 new_layout: new_layout.into(), 523 src_queue_family_index, 524 dst_queue_family_index, 525 image: image.handle(), 526 subresource_range: subresource_range.clone().into(), 527 ..Default::default() 528 } 529 }) 530 .collect(); 531 532 let dependency_info_vk = ash::vk::DependencyInfo { 533 dependency_flags: dependency_flags.into(), 534 memory_barrier_count: memory_barriers_vk.len() as u32, 535 p_memory_barriers: memory_barriers_vk.as_ptr(), 536 buffer_memory_barrier_count: buffer_memory_barriers_vk.len() as u32, 537 p_buffer_memory_barriers: buffer_memory_barriers_vk.as_ptr(), 538 image_memory_barrier_count: image_memory_barriers_vk.len() as u32, 539 p_image_memory_barriers: image_memory_barriers_vk.as_ptr(), 540 ..Default::default() 541 }; 542 543 if self.device.api_version() >= Version::V1_3 { 544 (fns.v1_3.cmd_set_event2)(self.handle, event.handle(), &dependency_info_vk); 545 } else { 546 debug_assert!(self.device.enabled_extensions().khr_synchronization2); 547 (fns.khr_synchronization2.cmd_set_event2_khr)( 548 self.handle, 549 event.handle(), 550 &dependency_info_vk, 551 ); 552 } 553 } else { 554 // The original function only takes a source stage mask; the rest of the info is 555 // provided with `wait_events` instead. Therefore, we condense the source stages 556 // here and ignore the rest. 557 558 let mut stage_mask = ash::vk::PipelineStageFlags::empty(); 559 560 for barrier in memory_barriers { 561 stage_mask |= barrier.src_stages.into(); 562 } 563 564 for barrier in buffer_memory_barriers { 565 stage_mask |= barrier.src_stages.into(); 566 } 567 568 for barrier in image_memory_barriers { 569 stage_mask |= barrier.src_stages.into(); 570 } 571 572 if stage_mask.is_empty() { 573 // "VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT is [...] equivalent to 574 // VK_PIPELINE_STAGE_2_NONE in the first scope." 575 stage_mask |= ash::vk::PipelineStageFlags::TOP_OF_PIPE; 576 } 577 578 (fns.v1_0.cmd_set_event)(self.handle, event.handle(), stage_mask); 579 } 580 } 581 582 /// Calls `vkCmdWaitEvents` on the builder. wait_events<'a>( &mut self, events: impl IntoIterator<Item = (&'a Event, &'a DependencyInfo)>, )583 pub unsafe fn wait_events<'a>( 584 &mut self, 585 events: impl IntoIterator<Item = (&'a Event, &'a DependencyInfo)>, 586 ) { 587 let fns = self.device.fns(); 588 589 if self.device.enabled_features().synchronization2 { 590 struct PerDependencyInfo { 591 memory_barriers_vk: SmallVec<[ash::vk::MemoryBarrier2; 2]>, 592 buffer_memory_barriers_vk: SmallVec<[ash::vk::BufferMemoryBarrier2; 8]>, 593 image_memory_barriers_vk: SmallVec<[ash::vk::ImageMemoryBarrier2; 8]>, 594 } 595 596 let mut events_vk: SmallVec<[_; 4]> = SmallVec::new(); 597 let mut dependency_infos_vk: SmallVec<[_; 4]> = SmallVec::new(); 598 let mut per_dependency_info_vk: SmallVec<[_; 4]> = SmallVec::new(); 599 600 for (event, dependency_info) in events { 601 let &DependencyInfo { 602 mut dependency_flags, 603 ref memory_barriers, 604 ref buffer_memory_barriers, 605 ref image_memory_barriers, 606 _ne: _, 607 } = dependency_info; 608 609 // TODO: Is this needed? 610 dependency_flags |= DependencyFlags::BY_REGION; 611 612 let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers 613 .into_iter() 614 .map(|barrier| { 615 let &MemoryBarrier { 616 src_stages, 617 src_access, 618 dst_stages, 619 dst_access, 620 _ne: _, 621 } = barrier; 622 623 ash::vk::MemoryBarrier2 { 624 src_stage_mask: src_stages.into(), 625 src_access_mask: src_access.into(), 626 dst_stage_mask: dst_stages.into(), 627 dst_access_mask: dst_access.into(), 628 ..Default::default() 629 } 630 }) 631 .collect(); 632 633 let buffer_memory_barriers_vk: SmallVec<[_; 8]> = buffer_memory_barriers 634 .into_iter() 635 .map(|barrier| { 636 let &BufferMemoryBarrier { 637 src_stages, 638 src_access, 639 dst_stages, 640 dst_access, 641 queue_family_ownership_transfer, 642 ref buffer, 643 ref range, 644 _ne: _, 645 } = barrier; 646 647 let (src_queue_family_index, dst_queue_family_index) = 648 queue_family_ownership_transfer.map_or( 649 (ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED), 650 Into::into, 651 ); 652 653 ash::vk::BufferMemoryBarrier2 { 654 src_stage_mask: src_stages.into(), 655 src_access_mask: src_access.into(), 656 dst_stage_mask: dst_stages.into(), 657 dst_access_mask: dst_access.into(), 658 src_queue_family_index, 659 dst_queue_family_index, 660 buffer: buffer.handle(), 661 offset: range.start, 662 size: range.end - range.start, 663 ..Default::default() 664 } 665 }) 666 .collect(); 667 668 let image_memory_barriers_vk: SmallVec<[_; 8]> = image_memory_barriers 669 .into_iter() 670 .map(|barrier| { 671 let &ImageMemoryBarrier { 672 src_stages, 673 src_access, 674 dst_stages, 675 dst_access, 676 old_layout, 677 new_layout, 678 queue_family_ownership_transfer, 679 ref image, 680 ref subresource_range, 681 _ne: _, 682 } = barrier; 683 684 let (src_queue_family_index, dst_queue_family_index) = 685 queue_family_ownership_transfer.map_or( 686 (ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED), 687 Into::into, 688 ); 689 690 ash::vk::ImageMemoryBarrier2 { 691 src_stage_mask: src_stages.into(), 692 src_access_mask: src_access.into(), 693 dst_stage_mask: dst_stages.into(), 694 dst_access_mask: dst_access.into(), 695 old_layout: old_layout.into(), 696 new_layout: new_layout.into(), 697 src_queue_family_index, 698 dst_queue_family_index, 699 image: image.handle(), 700 subresource_range: subresource_range.clone().into(), 701 ..Default::default() 702 } 703 }) 704 .collect(); 705 706 events_vk.push(event.handle()); 707 dependency_infos_vk.push(ash::vk::DependencyInfo { 708 dependency_flags: dependency_flags.into(), 709 memory_barrier_count: 0, 710 p_memory_barriers: ptr::null(), 711 buffer_memory_barrier_count: 0, 712 p_buffer_memory_barriers: ptr::null(), 713 image_memory_barrier_count: 0, 714 p_image_memory_barriers: ptr::null(), 715 ..Default::default() 716 }); 717 per_dependency_info_vk.push(PerDependencyInfo { 718 memory_barriers_vk, 719 buffer_memory_barriers_vk, 720 image_memory_barriers_vk, 721 }); 722 } 723 724 for ( 725 dependency_info_vk, 726 PerDependencyInfo { 727 memory_barriers_vk, 728 buffer_memory_barriers_vk, 729 image_memory_barriers_vk, 730 }, 731 ) in (dependency_infos_vk.iter_mut()).zip(per_dependency_info_vk.iter_mut()) 732 { 733 *dependency_info_vk = ash::vk::DependencyInfo { 734 memory_barrier_count: memory_barriers_vk.len() as u32, 735 p_memory_barriers: memory_barriers_vk.as_ptr(), 736 buffer_memory_barrier_count: buffer_memory_barriers_vk.len() as u32, 737 p_buffer_memory_barriers: buffer_memory_barriers_vk.as_ptr(), 738 image_memory_barrier_count: image_memory_barriers_vk.len() as u32, 739 p_image_memory_barriers: image_memory_barriers_vk.as_ptr(), 740 ..*dependency_info_vk 741 } 742 } 743 744 if self.device.api_version() >= Version::V1_3 { 745 (fns.v1_3.cmd_wait_events2)( 746 self.handle, 747 events_vk.len() as u32, 748 events_vk.as_ptr(), 749 dependency_infos_vk.as_ptr(), 750 ); 751 } else { 752 debug_assert!(self.device.enabled_extensions().khr_synchronization2); 753 (fns.khr_synchronization2.cmd_wait_events2_khr)( 754 self.handle, 755 events_vk.len() as u32, 756 events_vk.as_ptr(), 757 dependency_infos_vk.as_ptr(), 758 ); 759 } 760 } else { 761 // With the original function, you can only specify a single dependency info for all 762 // events at once, rather than separately for each event. Therefore, to achieve the 763 // same behaviour as the "2" function, we split it up into multiple Vulkan API calls, 764 // one per event. 765 766 for (event, dependency_info) in events { 767 let events_vk = [event.handle()]; 768 769 let &DependencyInfo { 770 dependency_flags: _, 771 ref memory_barriers, 772 ref buffer_memory_barriers, 773 ref image_memory_barriers, 774 _ne: _, 775 } = dependency_info; 776 777 let mut src_stage_mask = ash::vk::PipelineStageFlags::empty(); 778 let mut dst_stage_mask = ash::vk::PipelineStageFlags::empty(); 779 780 let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers 781 .into_iter() 782 .map(|barrier| { 783 let &MemoryBarrier { 784 src_stages, 785 src_access, 786 dst_stages, 787 dst_access, 788 _ne: _, 789 } = barrier; 790 791 src_stage_mask |= src_stages.into(); 792 dst_stage_mask |= dst_stages.into(); 793 794 ash::vk::MemoryBarrier { 795 src_access_mask: src_access.into(), 796 dst_access_mask: dst_access.into(), 797 ..Default::default() 798 } 799 }) 800 .collect(); 801 802 let buffer_memory_barriers_vk: SmallVec<[_; 8]> = buffer_memory_barriers 803 .into_iter() 804 .map(|barrier| { 805 let &BufferMemoryBarrier { 806 src_stages, 807 src_access, 808 dst_stages, 809 dst_access, 810 queue_family_ownership_transfer, 811 ref buffer, 812 ref range, 813 _ne: _, 814 } = barrier; 815 816 src_stage_mask |= src_stages.into(); 817 dst_stage_mask |= dst_stages.into(); 818 819 let (src_queue_family_index, dst_queue_family_index) = 820 queue_family_ownership_transfer.map_or( 821 (ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED), 822 Into::into, 823 ); 824 825 ash::vk::BufferMemoryBarrier { 826 src_access_mask: src_access.into(), 827 dst_access_mask: dst_access.into(), 828 src_queue_family_index, 829 dst_queue_family_index, 830 buffer: buffer.handle(), 831 offset: range.start, 832 size: range.end - range.start, 833 ..Default::default() 834 } 835 }) 836 .collect(); 837 838 let image_memory_barriers_vk: SmallVec<[_; 8]> = image_memory_barriers 839 .into_iter() 840 .map(|barrier| { 841 let &ImageMemoryBarrier { 842 src_stages, 843 src_access, 844 dst_stages, 845 dst_access, 846 old_layout, 847 new_layout, 848 queue_family_ownership_transfer, 849 ref image, 850 ref subresource_range, 851 _ne: _, 852 } = barrier; 853 854 src_stage_mask |= src_stages.into(); 855 dst_stage_mask |= dst_stages.into(); 856 857 let (src_queue_family_index, dst_queue_family_index) = 858 queue_family_ownership_transfer.map_or( 859 (ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED), 860 Into::into, 861 ); 862 863 ash::vk::ImageMemoryBarrier { 864 src_access_mask: src_access.into(), 865 dst_access_mask: dst_access.into(), 866 old_layout: old_layout.into(), 867 new_layout: new_layout.into(), 868 src_queue_family_index, 869 dst_queue_family_index, 870 image: image.handle(), 871 subresource_range: subresource_range.clone().into(), 872 ..Default::default() 873 } 874 }) 875 .collect(); 876 877 if src_stage_mask.is_empty() { 878 // "VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT is [...] equivalent to 879 // VK_PIPELINE_STAGE_2_NONE in the first scope." 880 src_stage_mask |= ash::vk::PipelineStageFlags::TOP_OF_PIPE; 881 } 882 883 if dst_stage_mask.is_empty() { 884 // "VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT is [...] equivalent to 885 // VK_PIPELINE_STAGE_2_NONE in the second scope." 886 dst_stage_mask |= ash::vk::PipelineStageFlags::BOTTOM_OF_PIPE; 887 } 888 889 (fns.v1_0.cmd_wait_events)( 890 self.handle, 891 1, 892 events_vk.as_ptr(), 893 src_stage_mask, 894 dst_stage_mask, 895 memory_barriers_vk.len() as u32, 896 memory_barriers_vk.as_ptr(), 897 buffer_memory_barriers_vk.len() as u32, 898 buffer_memory_barriers_vk.as_ptr(), 899 image_memory_barriers_vk.len() as u32, 900 image_memory_barriers_vk.as_ptr(), 901 ); 902 } 903 } 904 } 905 906 /// Calls `vkCmdResetEvent` on the builder. 907 #[inline] reset_event(&mut self, event: &Event, stages: PipelineStages)908 pub unsafe fn reset_event(&mut self, event: &Event, stages: PipelineStages) { 909 debug_assert!(!stages.intersects(PipelineStages::HOST)); 910 debug_assert_ne!(stages, PipelineStages::empty()); 911 912 let fns = self.device.fns(); 913 914 if self.device.enabled_features().synchronization2 { 915 if self.device.api_version() >= Version::V1_3 { 916 (fns.v1_3.cmd_reset_event2)(self.handle, event.handle(), stages.into()); 917 } else { 918 debug_assert!(self.device.enabled_extensions().khr_synchronization2); 919 (fns.khr_synchronization2.cmd_reset_event2_khr)( 920 self.handle, 921 event.handle(), 922 stages.into(), 923 ); 924 } 925 } else { 926 (fns.v1_0.cmd_reset_event)(self.handle, event.handle(), stages.into()); 927 } 928 } 929 930 // TODO: wait_event 931 } 932