• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2016 The vulkano developers
2 // Licensed under the Apache License, Version 2.0
3 // <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT
5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
6 // at your option. All files in the project carrying such
7 // notice may not be copied, modified, or distributed except
8 // according to those terms.
9 
10 use crate::check_errors;
11 use crate::device::physical::QueueFamily;
12 use crate::device::Device;
13 use crate::device::DeviceOwned;
14 use crate::Error;
15 use crate::OomError;
16 use crate::Version;
17 use crate::VulkanObject;
18 use smallvec::SmallVec;
19 use std::error;
20 use std::fmt;
21 use std::marker::PhantomData;
22 use std::mem::MaybeUninit;
23 use std::ptr;
24 use std::sync::Arc;
25 use std::vec::IntoIter as VecIntoIter;
26 
27 /// Low-level implementation of a command pool.
28 ///
29 /// A command pool is always tied to a specific queue family. Command buffers allocated from a pool
30 /// can only be executed on the corresponding queue family.
31 ///
32 /// This struct doesn't implement the `Sync` trait because Vulkan command pools are not thread
33 /// safe. In other words, you can only use a pool from one thread at a time.
34 #[derive(Debug)]
35 pub struct UnsafeCommandPool {
36     pool: ash::vk::CommandPool,
37     device: Arc<Device>,
38 
39     // Index of the associated queue family in the physical device.
40     queue_family_index: u32,
41 
42     // We don't want `UnsafeCommandPool` to implement Sync.
43     // This marker unimplements both Send and Sync, but we reimplement Send manually right under.
44     dummy_avoid_sync: PhantomData<*const u8>,
45 }
46 
47 unsafe impl Send for UnsafeCommandPool {}
48 
49 impl UnsafeCommandPool {
50     /// Creates a new pool.
51     ///
52     /// The command buffers created with this pool can only be executed on queues of the given
53     /// family.
54     ///
55     /// Setting `transient` to true is a hint to the implementation that the command buffers will
56     /// be short-lived.
57     /// Setting `reset_cb` to true means that command buffers can be reset individually.
58     ///
59     /// # Panic
60     ///
61     /// - Panics if the queue family doesn't belong to the same physical device as `device`.
62     ///
new( device: Arc<Device>, queue_family: QueueFamily, transient: bool, reset_cb: bool, ) -> Result<UnsafeCommandPool, OomError>63     pub fn new(
64         device: Arc<Device>,
65         queue_family: QueueFamily,
66         transient: bool,
67         reset_cb: bool,
68     ) -> Result<UnsafeCommandPool, OomError> {
69         assert_eq!(
70             device.physical_device().internal_object(),
71             queue_family.physical_device().internal_object(),
72             "Device doesn't match physical device when creating a command pool"
73         );
74 
75         let fns = device.fns();
76 
77         let flags = {
78             let flag1 = if transient {
79                 ash::vk::CommandPoolCreateFlags::TRANSIENT
80             } else {
81                 ash::vk::CommandPoolCreateFlags::empty()
82             };
83             let flag2 = if reset_cb {
84                 ash::vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER
85             } else {
86                 ash::vk::CommandPoolCreateFlags::empty()
87             };
88             flag1 | flag2
89         };
90 
91         let pool = unsafe {
92             let infos = ash::vk::CommandPoolCreateInfo {
93                 flags: flags,
94                 queue_family_index: queue_family.id(),
95                 ..Default::default()
96             };
97 
98             let mut output = MaybeUninit::uninit();
99             check_errors(fns.v1_0.create_command_pool(
100                 device.internal_object(),
101                 &infos,
102                 ptr::null(),
103                 output.as_mut_ptr(),
104             ))?;
105             output.assume_init()
106         };
107 
108         Ok(UnsafeCommandPool {
109             pool: pool,
110             device: device.clone(),
111             queue_family_index: queue_family.id(),
112             dummy_avoid_sync: PhantomData,
113         })
114     }
115 
116     /// Resets the pool, which resets all the command buffers that were allocated from it.
117     ///
118     /// If `release_resources` is true, it is a hint to the implementation that it should free all
119     /// the memory internally allocated for this pool.
120     ///
121     /// # Safety
122     ///
123     /// The command buffers allocated from this pool jump to the initial state.
124     ///
reset(&self, release_resources: bool) -> Result<(), OomError>125     pub unsafe fn reset(&self, release_resources: bool) -> Result<(), OomError> {
126         let flags = if release_resources {
127             ash::vk::CommandPoolResetFlags::RELEASE_RESOURCES
128         } else {
129             ash::vk::CommandPoolResetFlags::empty()
130         };
131 
132         let fns = self.device.fns();
133         check_errors(
134             fns.v1_0
135                 .reset_command_pool(self.device.internal_object(), self.pool, flags),
136         )?;
137         Ok(())
138     }
139 
140     /// Trims a command pool, which recycles unused internal memory from the command pool back to
141     /// the system.
142     ///
143     /// Command buffers allocated from the pool are not affected by trimming.
144     ///
145     /// This function is supported only if the `VK_KHR_maintenance1` extension was enabled at
146     /// device creation. Otherwise an error is returned.
147     /// Since this operation is purely an optimization it is legitimate to call this function and
148     /// simply ignore any possible error.
trim(&self) -> Result<(), CommandPoolTrimError>149     pub fn trim(&self) -> Result<(), CommandPoolTrimError> {
150         unsafe {
151             if !(self.device.api_version() >= Version::V1_1
152                 || self.device.enabled_extensions().khr_maintenance1)
153             {
154                 return Err(CommandPoolTrimError::Maintenance1ExtensionNotEnabled);
155             }
156 
157             let fns = self.device.fns();
158 
159             if self.device.api_version() >= Version::V1_1 {
160                 fns.v1_1.trim_command_pool(
161                     self.device.internal_object(),
162                     self.pool,
163                     ash::vk::CommandPoolTrimFlags::empty(),
164                 );
165             } else {
166                 fns.khr_maintenance1.trim_command_pool_khr(
167                     self.device.internal_object(),
168                     self.pool,
169                     ash::vk::CommandPoolTrimFlagsKHR::empty(),
170                 );
171             }
172 
173             Ok(())
174         }
175     }
176 
177     /// Allocates `count` command buffers.
178     ///
179     /// If `secondary` is true, allocates secondary command buffers. Otherwise, allocates primary
180     /// command buffers.
alloc_command_buffers( &self, secondary: bool, count: u32, ) -> Result<UnsafeCommandPoolAllocIter, OomError>181     pub fn alloc_command_buffers(
182         &self,
183         secondary: bool,
184         count: u32,
185     ) -> Result<UnsafeCommandPoolAllocIter, OomError> {
186         if count == 0 {
187             return Ok(UnsafeCommandPoolAllocIter {
188                 device: self.device.clone(),
189                 list: vec![].into_iter(),
190             });
191         }
192 
193         let infos = ash::vk::CommandBufferAllocateInfo {
194             command_pool: self.pool,
195             level: if secondary {
196                 ash::vk::CommandBufferLevel::SECONDARY
197             } else {
198                 ash::vk::CommandBufferLevel::PRIMARY
199             },
200             command_buffer_count: count,
201             ..Default::default()
202         };
203 
204         unsafe {
205             let fns = self.device.fns();
206             let mut out = Vec::with_capacity(count as usize);
207             check_errors(fns.v1_0.allocate_command_buffers(
208                 self.device.internal_object(),
209                 &infos,
210                 out.as_mut_ptr(),
211             ))?;
212 
213             out.set_len(count as usize);
214 
215             Ok(UnsafeCommandPoolAllocIter {
216                 device: self.device.clone(),
217                 list: out.into_iter(),
218             })
219         }
220     }
221 
222     /// Frees individual command buffers.
223     ///
224     /// # Safety
225     ///
226     /// The command buffers must have been allocated from this pool. They must not be in use.
227     ///
free_command_buffers<I>(&self, command_buffers: I) where I: Iterator<Item = UnsafeCommandPoolAlloc>,228     pub unsafe fn free_command_buffers<I>(&self, command_buffers: I)
229     where
230         I: Iterator<Item = UnsafeCommandPoolAlloc>,
231     {
232         let command_buffers: SmallVec<[_; 4]> =
233             command_buffers.map(|cb| cb.command_buffer).collect();
234         let fns = self.device.fns();
235         fns.v1_0.free_command_buffers(
236             self.device.internal_object(),
237             self.pool,
238             command_buffers.len() as u32,
239             command_buffers.as_ptr(),
240         )
241     }
242 
243     /// Returns the queue family on which command buffers of this pool can be executed.
244     #[inline]
queue_family(&self) -> QueueFamily245     pub fn queue_family(&self) -> QueueFamily {
246         self.device
247             .physical_device()
248             .queue_family_by_id(self.queue_family_index)
249             .unwrap()
250     }
251 }
252 
253 unsafe impl DeviceOwned for UnsafeCommandPool {
254     #[inline]
device(&self) -> &Arc<Device>255     fn device(&self) -> &Arc<Device> {
256         &self.device
257     }
258 }
259 
260 unsafe impl VulkanObject for UnsafeCommandPool {
261     type Object = ash::vk::CommandPool;
262 
263     #[inline]
internal_object(&self) -> ash::vk::CommandPool264     fn internal_object(&self) -> ash::vk::CommandPool {
265         self.pool
266     }
267 }
268 
269 impl Drop for UnsafeCommandPool {
270     #[inline]
drop(&mut self)271     fn drop(&mut self) {
272         unsafe {
273             let fns = self.device.fns();
274             fns.v1_0
275                 .destroy_command_pool(self.device.internal_object(), self.pool, ptr::null());
276         }
277     }
278 }
279 
280 /// Opaque type that represents a command buffer allocated from a pool.
281 pub struct UnsafeCommandPoolAlloc {
282     command_buffer: ash::vk::CommandBuffer,
283     device: Arc<Device>,
284 }
285 
286 unsafe impl DeviceOwned for UnsafeCommandPoolAlloc {
287     #[inline]
device(&self) -> &Arc<Device>288     fn device(&self) -> &Arc<Device> {
289         &self.device
290     }
291 }
292 
293 unsafe impl VulkanObject for UnsafeCommandPoolAlloc {
294     type Object = ash::vk::CommandBuffer;
295 
296     #[inline]
internal_object(&self) -> ash::vk::CommandBuffer297     fn internal_object(&self) -> ash::vk::CommandBuffer {
298         self.command_buffer
299     }
300 }
301 
302 /// Iterator for newly-allocated command buffers.
303 #[derive(Debug)]
304 pub struct UnsafeCommandPoolAllocIter {
305     device: Arc<Device>,
306     list: VecIntoIter<ash::vk::CommandBuffer>,
307 }
308 
309 impl Iterator for UnsafeCommandPoolAllocIter {
310     type Item = UnsafeCommandPoolAlloc;
311 
312     #[inline]
next(&mut self) -> Option<UnsafeCommandPoolAlloc>313     fn next(&mut self) -> Option<UnsafeCommandPoolAlloc> {
314         self.list
315             .next()
316             .map(|command_buffer| UnsafeCommandPoolAlloc {
317                 command_buffer,
318                 device: self.device.clone(),
319             })
320     }
321 
322     #[inline]
size_hint(&self) -> (usize, Option<usize>)323     fn size_hint(&self) -> (usize, Option<usize>) {
324         self.list.size_hint()
325     }
326 }
327 
328 impl ExactSizeIterator for UnsafeCommandPoolAllocIter {}
329 
330 /// Error that can happen when trimming command pools.
331 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
332 pub enum CommandPoolTrimError {
333     /// The `KHR_maintenance1` extension was not enabled.
334     Maintenance1ExtensionNotEnabled,
335 }
336 
337 impl error::Error for CommandPoolTrimError {}
338 
339 impl fmt::Display for CommandPoolTrimError {
340     #[inline]
fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error>341     fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
342         write!(
343             fmt,
344             "{}",
345             match *self {
346                 CommandPoolTrimError::Maintenance1ExtensionNotEnabled => {
347                     "the `KHR_maintenance1` extension was not enabled"
348                 }
349             }
350         )
351     }
352 }
353 
354 impl From<Error> for CommandPoolTrimError {
355     #[inline]
from(err: Error) -> CommandPoolTrimError356     fn from(err: Error) -> CommandPoolTrimError {
357         panic!("unexpected error: {:?}", err)
358     }
359 }
360 
361 #[cfg(test)]
362 mod tests {
363     use crate::command_buffer::pool::CommandPoolTrimError;
364     use crate::command_buffer::pool::UnsafeCommandPool;
365     use crate::Version;
366 
367     #[test]
basic_create()368     fn basic_create() {
369         let (device, queue) = gfx_dev_and_queue!();
370         let _ = UnsafeCommandPool::new(device, queue.family(), false, false).unwrap();
371     }
372 
373     #[test]
queue_family_getter()374     fn queue_family_getter() {
375         let (device, queue) = gfx_dev_and_queue!();
376         let pool = UnsafeCommandPool::new(device, queue.family(), false, false).unwrap();
377         assert_eq!(pool.queue_family().id(), queue.family().id());
378     }
379 
380     #[test]
panic_if_not_match_family()381     fn panic_if_not_match_family() {
382         let (device, _) = gfx_dev_and_queue!();
383         let (_, queue) = gfx_dev_and_queue!();
384 
385         assert_should_panic!(
386             "Device doesn't match physical device when creating a command pool",
387             {
388                 let _ = UnsafeCommandPool::new(device, queue.family(), false, false);
389             }
390         );
391     }
392 
393     #[test]
check_maintenance_when_trim()394     fn check_maintenance_when_trim() {
395         let (device, queue) = gfx_dev_and_queue!();
396         let pool = UnsafeCommandPool::new(device.clone(), queue.family(), false, false).unwrap();
397 
398         if device.api_version() >= Version::V1_1 {
399             match pool.trim() {
400                 Err(CommandPoolTrimError::Maintenance1ExtensionNotEnabled) => panic!(),
401                 _ => (),
402             }
403         } else {
404             match pool.trim() {
405                 Err(CommandPoolTrimError::Maintenance1ExtensionNotEnabled) => (),
406                 _ => panic!(),
407             }
408         }
409     }
410 
411     // TODO: test that trim works if VK_KHR_maintenance1 if enabled ; the test macro doesn't
412     //       support enabling extensions yet
413 
414     #[test]
basic_alloc()415     fn basic_alloc() {
416         let (device, queue) = gfx_dev_and_queue!();
417         let pool = UnsafeCommandPool::new(device, queue.family(), false, false).unwrap();
418         let iter = pool.alloc_command_buffers(false, 12).unwrap();
419         assert_eq!(iter.count(), 12);
420     }
421 }
422