• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2016 The vulkano developers
2 // Licensed under the Apache License, Version 2.0
3 // <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT
5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
6 // at your option. All files in the project carrying such
7 // notice may not be copied, modified, or distributed except
8 // according to those terms.
9 
10 //! Traits and types for managing the allocation of command buffers and command pools.
11 //!
12 //! In Vulkano, creating a command buffer requires passing an implementation of the
13 //! [`CommandBufferAllocator`] trait. You can implement this trait yourself, or use the
14 //! Vulkano-provided [`StandardCommandBufferAllocator`].
15 
16 use super::{
17     pool::{
18         CommandBufferAllocateInfo, CommandPool, CommandPoolAlloc, CommandPoolCreateInfo,
19         CommandPoolCreationError,
20     },
21     CommandBufferLevel,
22 };
23 use crate::{
24     device::{Device, DeviceOwned},
25     OomError,
26 };
27 use crossbeam_queue::ArrayQueue;
28 use smallvec::{IntoIter, SmallVec};
29 use std::{
30     cell::{Cell, UnsafeCell},
31     error::Error,
32     fmt::Display,
33     marker::PhantomData,
34     mem::ManuallyDrop,
35     sync::Arc,
36     thread,
37 };
38 use thread_local::ThreadLocal;
39 
40 const MAX_POOLS: usize = 32;
41 
42 /// Types that manage the memory of command buffers.
43 ///
44 /// # Safety
45 ///
46 /// A Vulkan command pool must be externally synchronized as if it owned the command buffers that
47 /// were allocated from it. This includes allocating from the pool, freeing from the pool, resetting
48 /// the pool or individual command buffers, and most importantly recording commands to command
49 /// buffers. The implementation of `CommandBufferAllocator` is expected to manage this.
50 ///
51 /// The destructors of the [`CommandBufferBuilderAlloc`] and the [`CommandBufferAlloc`] are expected
52 /// to free the command buffer, reset the command buffer, or add it to a pool so that it gets
53 /// reused. If the implementation frees or resets the command buffer, it must not forget that this
54 /// operation must be externally synchronized.
55 pub unsafe trait CommandBufferAllocator: DeviceOwned {
56     /// See [`allocate`](Self::allocate).
57     type Iter: Iterator<Item = Self::Builder>;
58 
59     /// Represents a command buffer that has been allocated and that is currently being built.
60     type Builder: CommandBufferBuilderAlloc<Alloc = Self::Alloc>;
61 
62     /// Represents a command buffer that has been allocated and that is pending execution or is
63     /// being executed.
64     type Alloc: CommandBufferAlloc;
65 
66     /// Allocates command buffers.
67     ///
68     /// Returns an iterator that contains the requested amount of allocated command buffers.
allocate( &self, queue_family_index: u32, level: CommandBufferLevel, command_buffer_count: u32, ) -> Result<Self::Iter, OomError>69     fn allocate(
70         &self,
71         queue_family_index: u32,
72         level: CommandBufferLevel,
73         command_buffer_count: u32,
74     ) -> Result<Self::Iter, OomError>;
75 }
76 
77 /// A command buffer allocated from a pool and that can be recorded.
78 ///
79 /// # Safety
80 ///
81 /// See [`CommandBufferAllocator`] for information about safety.
82 pub unsafe trait CommandBufferBuilderAlloc: DeviceOwned {
83     /// Return type of `into_alloc`.
84     type Alloc: CommandBufferAlloc;
85 
86     /// Returns the internal object that contains the command buffer.
inner(&self) -> &CommandPoolAlloc87     fn inner(&self) -> &CommandPoolAlloc;
88 
89     /// Turns this builder into a command buffer that is pending execution.
into_alloc(self) -> Self::Alloc90     fn into_alloc(self) -> Self::Alloc;
91 
92     /// Returns the index of the queue family that the pool targets.
queue_family_index(&self) -> u3293     fn queue_family_index(&self) -> u32;
94 }
95 
96 /// A command buffer allocated from a pool that has finished being recorded.
97 ///
98 /// # Safety
99 ///
100 /// See [`CommandBufferAllocator`] for information about safety.
101 pub unsafe trait CommandBufferAlloc: DeviceOwned + Send + Sync + 'static {
102     /// Returns the internal object that contains the command buffer.
inner(&self) -> &CommandPoolAlloc103     fn inner(&self) -> &CommandPoolAlloc;
104 
105     /// Returns the index of the queue family that the pool targets.
queue_family_index(&self) -> u32106     fn queue_family_index(&self) -> u32;
107 }
108 
109 /// Standard implementation of a command buffer allocator.
110 ///
111 /// The intended way to use this allocator is to have one that is used globally for the duration of
112 /// the program, in order to avoid creating and destroying [`CommandPool`]s, as that is expensive.
113 /// Alternatively, you can have one locally on a thread for the duration of the thread.
114 ///
115 /// Internally, this allocator keeps one or more `CommandPool`s per queue family index per thread,
116 /// using Thread-Local Storage. When a thread first allocates, an entry is reserved for the thread
117 /// and queue family combination. After a thread exits and the allocator wasn't dropped yet, its
118 /// entries are freed, but the pools it used are not dropped. The next time a new thread allocates
119 /// for the first time, the entries are reused along with the pools. If all threads drop their
120 /// reference to the allocator, all entries along with the allocator are dropped, even if the
121 /// threads didn't exit yet, which is why you should keep the allocator alive for as long as you
122 /// need to allocate so that the pools can keep being reused.
123 ///
124 /// This allocator only needs to lock when a thread first allocates or when a thread that
125 /// previously allocated exits. In all other cases, allocation is lock-free.
126 ///
127 /// Command buffers can't be moved between threads during the building process, but finished command
128 /// buffers can. When a command buffer is dropped, it is returned back to the pool for reuse.
129 #[derive(Debug)]
130 pub struct StandardCommandBufferAllocator {
131     device: Arc<Device>,
132     // Each queue family index points directly to its entry.
133     pools: ThreadLocal<SmallVec<[UnsafeCell<Option<Entry>>; 8]>>,
134     create_info: StandardCommandBufferAllocatorCreateInfo,
135 }
136 
137 impl StandardCommandBufferAllocator {
138     /// Creates a new `StandardCommandBufferAllocator`.
139     #[inline]
new(device: Arc<Device>, create_info: StandardCommandBufferAllocatorCreateInfo) -> Self140     pub fn new(device: Arc<Device>, create_info: StandardCommandBufferAllocatorCreateInfo) -> Self {
141         StandardCommandBufferAllocator {
142             device,
143             pools: ThreadLocal::new(),
144             create_info,
145         }
146     }
147 
148     /// Tries to reset the [`CommandPool`] that's currently in use for the given queue family index
149     /// on the current thread.
150     ///
151     /// If successful, the memory of the pool can be reused again along with all command buffers
152     /// allocated from it. This is only possible if all command buffers allocated from the pool
153     /// have been dropped.
154     ///
155     /// This has no effect if the entry wasn't initialized yet or if the entry was [cleared].
156     ///
157     /// # Panics
158     ///
159     /// - Panics if `queue_family_index` is not less than the number of queue families.
160     ///
161     /// [cleared]: Self::clear
162     #[inline]
try_reset_pool( &self, queue_family_index: u32, release_resources: bool, ) -> Result<(), CommandPoolResetError>163     pub fn try_reset_pool(
164         &self,
165         queue_family_index: u32,
166         release_resources: bool,
167     ) -> Result<(), CommandPoolResetError> {
168         if let Some(entry) = unsafe { &mut *self.entry(queue_family_index) }.as_mut() {
169             entry.try_reset_pool(release_resources)
170         } else {
171             Ok(())
172         }
173     }
174 
175     /// Clears the entry for the given queue family index and the current thread. This does not
176     /// mean that the pools are dropped immediately. A pool is kept alive for as long as command
177     /// buffers allocated from it exist.
178     ///
179     /// This has no effect if the entry was not initialized yet.
180     ///
181     /// # Panics
182     ///
183     /// - Panics if `queue_family_index` is not less than the number of queue families.
184     #[inline]
clear(&self, queue_family_index: u32)185     pub fn clear(&self, queue_family_index: u32) {
186         unsafe { *self.entry(queue_family_index) = None };
187     }
188 
entry(&self, queue_family_index: u32) -> *mut Option<Entry>189     fn entry(&self, queue_family_index: u32) -> *mut Option<Entry> {
190         let pools = self.pools.get_or(|| {
191             self.device
192                 .physical_device()
193                 .queue_family_properties()
194                 .iter()
195                 .map(|_| UnsafeCell::new(None))
196                 .collect()
197         });
198 
199         pools[queue_family_index as usize].get()
200     }
201 }
202 
203 unsafe impl CommandBufferAllocator for StandardCommandBufferAllocator {
204     type Iter = IntoIter<[StandardCommandBufferBuilderAlloc; 1]>;
205 
206     type Builder = StandardCommandBufferBuilderAlloc;
207 
208     type Alloc = StandardCommandBufferAlloc;
209 
210     /// Allocates command buffers.
211     ///
212     /// Returns an iterator that contains the requested amount of allocated command buffers.
213     ///
214     /// # Panics
215     ///
216     /// - Panics if the queue family index is not active on the device.
217     /// - Panics if `command_buffer_count` exceeds the count configured for the pool corresponding
218     ///   to `level`.
219     #[inline]
allocate( &self, queue_family_index: u32, level: CommandBufferLevel, command_buffer_count: u32, ) -> Result<Self::Iter, OomError>220     fn allocate(
221         &self,
222         queue_family_index: u32,
223         level: CommandBufferLevel,
224         command_buffer_count: u32,
225     ) -> Result<Self::Iter, OomError> {
226         // VUID-vkCreateCommandPool-queueFamilyIndex-01937
227         assert!(self
228             .device
229             .active_queue_family_indices()
230             .contains(&queue_family_index));
231 
232         let entry = unsafe { &mut *self.entry(queue_family_index) };
233         if entry.is_none() {
234             let reserve = Arc::new(ArrayQueue::new(MAX_POOLS));
235             *entry = Some(Entry {
236                 pool: Pool::new(
237                     self.device.clone(),
238                     queue_family_index,
239                     reserve.clone(),
240                     &self.create_info,
241                 )?,
242                 reserve,
243             });
244         }
245         let entry = entry.as_mut().unwrap();
246 
247         // First try to allocate from existing command buffers.
248         if let Some(allocs) = entry.pool.allocate(level, command_buffer_count) {
249             return Ok(allocs);
250         }
251 
252         // Else try to reset the pool.
253         if entry.try_reset_pool(false).is_err() {
254             // If that fails too try to grab a pool from the reserve.
255             entry.pool = if let Some(inner) = entry.reserve.pop() {
256                 Arc::new(Pool {
257                     inner: ManuallyDrop::new(inner),
258                     reserve: entry.reserve.clone(),
259                 })
260             } else {
261                 // Else we are unfortunately forced to create a new pool.
262                 Pool::new(
263                     self.device.clone(),
264                     queue_family_index,
265                     entry.reserve.clone(),
266                     &self.create_info,
267                 )?
268             };
269         }
270 
271         Ok(entry.pool.allocate(level, command_buffer_count).unwrap())
272     }
273 }
274 
275 unsafe impl CommandBufferAllocator for Arc<StandardCommandBufferAllocator> {
276     type Iter = IntoIter<[StandardCommandBufferBuilderAlloc; 1]>;
277 
278     type Builder = StandardCommandBufferBuilderAlloc;
279 
280     type Alloc = StandardCommandBufferAlloc;
281 
282     #[inline]
allocate( &self, queue_family_index: u32, level: CommandBufferLevel, command_buffer_count: u32, ) -> Result<Self::Iter, OomError>283     fn allocate(
284         &self,
285         queue_family_index: u32,
286         level: CommandBufferLevel,
287         command_buffer_count: u32,
288     ) -> Result<Self::Iter, OomError> {
289         (**self).allocate(queue_family_index, level, command_buffer_count)
290     }
291 }
292 
293 unsafe impl DeviceOwned for StandardCommandBufferAllocator {
294     #[inline]
device(&self) -> &Arc<Device>295     fn device(&self) -> &Arc<Device> {
296         &self.device
297     }
298 }
299 
300 #[derive(Debug)]
301 struct Entry {
302     // Contains the actual Vulkan command pool that is currently in use.
303     pool: Arc<Pool>,
304     // When a `Pool` is dropped, it returns itself here for reuse.
305     reserve: Arc<ArrayQueue<PoolInner>>,
306 }
307 
308 // This is needed because of the blanket impl of `Send` on `Arc<T>`, which requires that `T` is
309 // `Send + Sync`. `Pool` is `Send + !Sync` because `CommandPool` is `!Sync`. That's fine however
310 // because we never access the Vulkan command pool concurrently. Same goes for the `Cell`s.
311 unsafe impl Send for Entry {}
312 
313 impl Entry {
try_reset_pool(&mut self, release_resources: bool) -> Result<(), CommandPoolResetError>314     fn try_reset_pool(&mut self, release_resources: bool) -> Result<(), CommandPoolResetError> {
315         if let Some(pool) = Arc::get_mut(&mut self.pool) {
316             unsafe { pool.inner.inner.reset(release_resources) }
317                 .map_err(|_| CommandPoolResetError::OutOfDeviceMemory)?;
318             *pool.inner.primary_allocations.get_mut() = 0;
319             *pool.inner.secondary_allocations.get_mut() = 0;
320 
321             Ok(())
322         } else {
323             Err(CommandPoolResetError::InUse)
324         }
325     }
326 }
327 
328 #[derive(Debug)]
329 struct Pool {
330     inner: ManuallyDrop<PoolInner>,
331     // Where we return the `PoolInner` in our `Drop` impl.
332     reserve: Arc<ArrayQueue<PoolInner>>,
333 }
334 
335 #[derive(Debug)]
336 struct PoolInner {
337     // The Vulkan pool specific to a device's queue family.
338     inner: CommandPool,
339     // List of existing primary command buffers that are available for reuse.
340     primary_pool: Option<ArrayQueue<CommandPoolAlloc>>,
341     // List of existing secondary command buffers that are available for reuse.
342     secondary_pool: Option<ArrayQueue<CommandPoolAlloc>>,
343     // How many command buffers have been allocated from `self.primary_pool`.
344     primary_allocations: Cell<usize>,
345     // How many command buffers have been allocated from `self.secondary_pool`.
346     secondary_allocations: Cell<usize>,
347 }
348 
349 impl Pool {
new( device: Arc<Device>, queue_family_index: u32, reserve: Arc<ArrayQueue<PoolInner>>, create_info: &StandardCommandBufferAllocatorCreateInfo, ) -> Result<Arc<Self>, OomError>350     fn new(
351         device: Arc<Device>,
352         queue_family_index: u32,
353         reserve: Arc<ArrayQueue<PoolInner>>,
354         create_info: &StandardCommandBufferAllocatorCreateInfo,
355     ) -> Result<Arc<Self>, OomError> {
356         let inner = CommandPool::new(
357             device,
358             CommandPoolCreateInfo {
359                 queue_family_index,
360                 ..Default::default()
361             },
362         )
363         .map_err(|err| match err {
364             CommandPoolCreationError::OomError(err) => err,
365             // We check that the provided queue family index is active on the device, so it can't
366             // be out of range.
367             CommandPoolCreationError::QueueFamilyIndexOutOfRange { .. } => unreachable!(),
368         })?;
369 
370         let primary_pool = if create_info.primary_buffer_count > 0 {
371             let pool = ArrayQueue::new(create_info.primary_buffer_count);
372 
373             for alloc in inner.allocate_command_buffers(CommandBufferAllocateInfo {
374                 level: CommandBufferLevel::Primary,
375                 command_buffer_count: create_info.primary_buffer_count as u32,
376                 ..Default::default()
377             })? {
378                 let _ = pool.push(alloc);
379             }
380 
381             Some(pool)
382         } else {
383             None
384         };
385 
386         let secondary_pool = if create_info.secondary_buffer_count > 0 {
387             let pool = ArrayQueue::new(create_info.secondary_buffer_count);
388 
389             for alloc in inner.allocate_command_buffers(CommandBufferAllocateInfo {
390                 level: CommandBufferLevel::Secondary,
391                 command_buffer_count: create_info.secondary_buffer_count as u32,
392                 ..Default::default()
393             })? {
394                 let _ = pool.push(alloc);
395             }
396 
397             Some(pool)
398         } else {
399             None
400         };
401 
402         Ok(Arc::new(Pool {
403             inner: ManuallyDrop::new(PoolInner {
404                 inner,
405                 primary_pool,
406                 secondary_pool,
407                 primary_allocations: Cell::new(0),
408                 secondary_allocations: Cell::new(0),
409             }),
410             reserve,
411         }))
412     }
413 
allocate( self: &Arc<Self>, level: CommandBufferLevel, command_buffer_count: u32, ) -> Option<IntoIter<[StandardCommandBufferBuilderAlloc; 1]>>414     fn allocate(
415         self: &Arc<Self>,
416         level: CommandBufferLevel,
417         command_buffer_count: u32,
418     ) -> Option<IntoIter<[StandardCommandBufferBuilderAlloc; 1]>> {
419         let command_buffer_count = command_buffer_count as usize;
420 
421         match level {
422             CommandBufferLevel::Primary => {
423                 if let Some(pool) = &self.inner.primary_pool {
424                     let count = self.inner.primary_allocations.get();
425                     if count + command_buffer_count <= pool.capacity() {
426                         let mut output = SmallVec::<[_; 1]>::with_capacity(command_buffer_count);
427                         for _ in 0..command_buffer_count {
428                             output.push(StandardCommandBufferBuilderAlloc {
429                                 inner: StandardCommandBufferAlloc {
430                                     inner: ManuallyDrop::new(pool.pop().unwrap()),
431                                     pool: self.clone(),
432                                 },
433                                 _marker: PhantomData,
434                             });
435                         }
436 
437                         self.inner
438                             .primary_allocations
439                             .set(count + command_buffer_count);
440 
441                         Some(output.into_iter())
442                     } else if command_buffer_count > pool.capacity() {
443                         panic!(
444                             "command buffer count ({}) exceeds the capacity of the primary command \
445                             buffer pool ({})",
446                             command_buffer_count, pool.capacity(),
447                         );
448                     } else {
449                         None
450                     }
451                 } else {
452                     panic!(
453                         "attempted to allocate a primary command buffer when the primary command \
454                         buffer pool was configured to be empty",
455                     );
456                 }
457             }
458             CommandBufferLevel::Secondary => {
459                 if let Some(pool) = &self.inner.secondary_pool {
460                     let count = self.inner.secondary_allocations.get();
461                     if count + command_buffer_count <= pool.capacity() {
462                         let mut output = SmallVec::<[_; 1]>::with_capacity(command_buffer_count);
463                         for _ in 0..command_buffer_count {
464                             output.push(StandardCommandBufferBuilderAlloc {
465                                 inner: StandardCommandBufferAlloc {
466                                     inner: ManuallyDrop::new(pool.pop().unwrap()),
467                                     pool: self.clone(),
468                                 },
469                                 _marker: PhantomData,
470                             });
471                         }
472 
473                         self.inner
474                             .secondary_allocations
475                             .set(count + command_buffer_count);
476 
477                         Some(output.into_iter())
478                     } else if command_buffer_count > pool.capacity() {
479                         panic!(
480                             "command buffer count ({}) exceeds the capacity of the secondary \
481                             command buffer pool ({})",
482                             command_buffer_count,
483                             pool.capacity(),
484                         );
485                     } else {
486                         None
487                     }
488                 } else {
489                     panic!(
490                         "attempted to allocate a secondary command buffer when the secondary \
491                         command buffer pool was configured to be empty",
492                     );
493                 }
494             }
495         }
496     }
497 }
498 
499 impl Drop for Pool {
drop(&mut self)500     fn drop(&mut self) {
501         let inner = unsafe { ManuallyDrop::take(&mut self.inner) };
502 
503         if thread::panicking() {
504             return;
505         }
506 
507         unsafe { inner.inner.reset(false) }.unwrap();
508         inner.primary_allocations.set(0);
509         inner.secondary_allocations.set(0);
510 
511         // If there is not enough space in the reserve, we destroy the pool. The only way this can
512         // happen is if something is resource hogging, forcing new pools to be created such that
513         // the number exceeds `MAX_POOLS`, and then drops them all at once.
514         let _ = self.reserve.push(inner);
515     }
516 }
517 
518 /// Parameters to create a new [`StandardCommandBufferAllocator`].
519 #[derive(Clone, Debug, PartialEq, Eq)]
520 pub struct StandardCommandBufferAllocatorCreateInfo {
521     /// How many primary command buffers should be allocated per pool.
522     ///
523     /// Each time a thread allocates using some queue family index, and either no pools were
524     /// initialized yet or all pools are full, a new pool is created for that thread and queue
525     /// family combination. This option tells the allocator how many primary command buffers should
526     /// be allocated for that pool. It always allocates exactly this many command buffers at once
527     /// for the pool, as that is more performant than allocating them one-by-one. What this means
528     /// is that you should make sure that this is not too large, so that you don't end up wasting
529     /// too much memory. You also don't want this to be too low, because that on the other hand
530     /// would mean that the pool would have to be reset more often, or that more pools would need
531     /// to be created, depending on the lifetime of the command buffers.
532     ///
533     /// The default value is `256`.
534     pub primary_buffer_count: usize,
535 
536     /// Same as `primary_buffer_count` except for secondary command buffers.
537     ///
538     /// The default value is `256`.
539     pub secondary_buffer_count: usize,
540 
541     pub _ne: crate::NonExhaustive,
542 }
543 
544 impl Default for StandardCommandBufferAllocatorCreateInfo {
545     #[inline]
default() -> Self546     fn default() -> Self {
547         StandardCommandBufferAllocatorCreateInfo {
548             primary_buffer_count: 256,
549             secondary_buffer_count: 256,
550             _ne: crate::NonExhaustive(()),
551         }
552     }
553 }
554 
555 /// Command buffer allocated from a [`StandardCommandBufferAllocator`] that is currently being
556 /// built.
557 pub struct StandardCommandBufferBuilderAlloc {
558     // The only difference between a `StandardCommandBufferBuilder` and a
559     // `StandardCommandBufferAlloc` is that the former must not implement `Send` and `Sync`.
560     // Therefore we just share the structs.
561     inner: StandardCommandBufferAlloc,
562     // Unimplemented `Send` and `Sync` from the builder.
563     _marker: PhantomData<*const ()>,
564 }
565 
566 unsafe impl CommandBufferBuilderAlloc for StandardCommandBufferBuilderAlloc {
567     type Alloc = StandardCommandBufferAlloc;
568 
569     #[inline]
inner(&self) -> &CommandPoolAlloc570     fn inner(&self) -> &CommandPoolAlloc {
571         self.inner.inner()
572     }
573 
574     #[inline]
into_alloc(self) -> Self::Alloc575     fn into_alloc(self) -> Self::Alloc {
576         self.inner
577     }
578 
579     #[inline]
queue_family_index(&self) -> u32580     fn queue_family_index(&self) -> u32 {
581         self.inner.queue_family_index()
582     }
583 }
584 
585 unsafe impl DeviceOwned for StandardCommandBufferBuilderAlloc {
586     #[inline]
device(&self) -> &Arc<Device>587     fn device(&self) -> &Arc<Device> {
588         self.inner.device()
589     }
590 }
591 
592 /// Command buffer allocated from a [`StandardCommandBufferAllocator`].
593 pub struct StandardCommandBufferAlloc {
594     // The actual command buffer. Extracted in the `Drop` implementation.
595     inner: ManuallyDrop<CommandPoolAlloc>,
596     // We hold a reference to the pool for our destructor.
597     pool: Arc<Pool>,
598 }
599 
600 // It's fine to share `Pool` between threads because we never access the Vulkan command pool
601 // concurrently. Same goes for the `Cell`s.
602 unsafe impl Send for StandardCommandBufferAlloc {}
603 unsafe impl Sync for StandardCommandBufferAlloc {}
604 
605 unsafe impl CommandBufferAlloc for StandardCommandBufferAlloc {
606     #[inline]
inner(&self) -> &CommandPoolAlloc607     fn inner(&self) -> &CommandPoolAlloc {
608         &self.inner
609     }
610 
611     #[inline]
queue_family_index(&self) -> u32612     fn queue_family_index(&self) -> u32 {
613         self.pool.inner.inner.queue_family_index()
614     }
615 }
616 
617 unsafe impl DeviceOwned for StandardCommandBufferAlloc {
618     #[inline]
device(&self) -> &Arc<Device>619     fn device(&self) -> &Arc<Device> {
620         self.pool.inner.inner.device()
621     }
622 }
623 
624 impl Drop for StandardCommandBufferAlloc {
625     #[inline]
drop(&mut self)626     fn drop(&mut self) {
627         let inner = unsafe { ManuallyDrop::take(&mut self.inner) };
628         let pool = match inner.level() {
629             CommandBufferLevel::Primary => &self.pool.inner.primary_pool,
630             CommandBufferLevel::Secondary => &self.pool.inner.secondary_pool,
631         };
632         // This can't panic, because if an allocation from a particular kind of pool was made, then
633         // the pool must exist.
634         let _ = pool.as_ref().unwrap().push(inner);
635     }
636 }
637 
638 /// Error that can be returned when resetting a [`CommandPool`].
639 #[derive(Clone, Debug, PartialEq, Eq)]
640 pub enum CommandPoolResetError {
641     /// The `CommandPool` is still in use.
642     InUse,
643 
644     /// Out of device memory.
645     OutOfDeviceMemory,
646 }
647 
648 impl Error for CommandPoolResetError {}
649 
650 impl Display for CommandPoolResetError {
fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result651     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
652         match self {
653             Self::InUse => write!(f, "the `CommandPool` is still in use"),
654             Self::OutOfDeviceMemory => write!(f, "out of device memory"),
655         }
656     }
657 }
658 
659 #[cfg(test)]
660 mod tests {
661     use super::*;
662     use crate::VulkanObject;
663     use std::thread;
664 
665     #[test]
threads_use_different_pools()666     fn threads_use_different_pools() {
667         let (device, queue) = gfx_dev_and_queue!();
668 
669         let allocator = StandardCommandBufferAllocator::new(device, Default::default());
670 
671         let pool1 = allocator
672             .allocate(queue.queue_family_index(), CommandBufferLevel::Primary, 1)
673             .unwrap()
674             .next()
675             .unwrap()
676             .into_alloc()
677             .pool
678             .inner
679             .inner
680             .handle();
681 
682         thread::spawn(move || {
683             let pool2 = allocator
684                 .allocate(queue.queue_family_index(), CommandBufferLevel::Primary, 1)
685                 .unwrap()
686                 .next()
687                 .unwrap()
688                 .into_alloc()
689                 .pool
690                 .inner
691                 .inner
692                 .handle();
693             assert_ne!(pool1, pool2);
694         })
695         .join()
696         .unwrap();
697     }
698 }
699