1 // Copyright (c) 2016 The vulkano developers
2 // Licensed under the Apache License, Version 2.0
3 // <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT
5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
6 // at your option. All files in the project carrying such
7 // notice may not be copied, modified, or distributed except
8 // according to those terms.
9
10 use crate::check_errors;
11 use crate::device::physical::MemoryType;
12 use crate::device::Device;
13 use crate::device::DeviceOwned;
14 use crate::memory::Content;
15 use crate::memory::DedicatedAlloc;
16 use crate::memory::ExternalMemoryHandleType;
17 use crate::DeviceSize;
18 use crate::Error;
19 use crate::OomError;
20 use crate::Version;
21 use crate::VulkanObject;
22 use std::error;
23 use std::fmt;
24 #[cfg(any(target_os = "android", target_os = "linux"))]
25 use std::fs::File;
26 use std::marker::PhantomData;
27 use std::mem::MaybeUninit;
28 use std::ops::Deref;
29 use std::ops::DerefMut;
30 use std::ops::Range;
31 use std::os::raw::c_void;
32 #[cfg(any(target_os = "android", target_os = "linux"))]
33 use std::os::unix::io::{FromRawFd, IntoRawFd};
34 use std::ptr;
35 use std::sync::Arc;
36 use std::sync::Mutex;
37
38 #[repr(C)]
39 pub struct BaseOutStructure {
40 pub s_type: i32,
41 pub p_next: *mut BaseOutStructure,
42 }
43
ptr_chain_iter<T>(ptr: &mut T) -> impl Iterator<Item = *mut BaseOutStructure>44 pub(crate) unsafe fn ptr_chain_iter<T>(ptr: &mut T) -> impl Iterator<Item = *mut BaseOutStructure> {
45 let ptr: *mut BaseOutStructure = ptr as *mut T as _;
46 (0..).scan(ptr, |p_ptr, _| {
47 if p_ptr.is_null() {
48 return None;
49 }
50 let n_ptr = (**p_ptr).p_next as *mut BaseOutStructure;
51 let old = *p_ptr;
52 *p_ptr = n_ptr;
53 Some(old)
54 })
55 }
56
57 pub unsafe trait ExtendsMemoryAllocateInfo {}
58 unsafe impl ExtendsMemoryAllocateInfo for ash::vk::MemoryDedicatedAllocateInfoKHR {}
59 unsafe impl ExtendsMemoryAllocateInfo for ash::vk::ExportMemoryAllocateInfo {}
60 unsafe impl ExtendsMemoryAllocateInfo for ash::vk::ImportMemoryFdInfoKHR {}
61
62 /// Represents memory that has been allocated.
63 ///
64 /// The destructor of `DeviceMemory` automatically frees the memory.
65 ///
66 /// # Example
67 ///
68 /// ```
69 /// use vulkano::memory::DeviceMemory;
70 ///
71 /// # let device: std::sync::Arc<vulkano::device::Device> = return;
72 /// let mem_ty = device.physical_device().memory_types().next().unwrap();
73 ///
74 /// // Allocates 1KB of memory.
75 /// let memory = DeviceMemory::alloc(device.clone(), mem_ty, 1024).unwrap();
76 /// ```
77 pub struct DeviceMemory {
78 memory: ash::vk::DeviceMemory,
79 device: Arc<Device>,
80 size: DeviceSize,
81 memory_type_index: u32,
82 handle_types: ExternalMemoryHandleType,
83 mapped: Mutex<bool>,
84 }
85
86 /// Represents a builder for the device memory object.
87 ///
88 /// # Example
89 ///
90 /// ```
91 /// use vulkano::memory::DeviceMemoryBuilder;
92 ///
93 /// # let device: std::sync::Arc<vulkano::device::Device> = return;
94 /// let mem_ty = device.physical_device().memory_types().next().unwrap();
95 ///
96 /// // Allocates 1KB of memory.
97 /// let memory = DeviceMemoryBuilder::new(device, mem_ty.id(), 1024).build().unwrap();
98 /// ```
99 pub struct DeviceMemoryBuilder<'a> {
100 device: Arc<Device>,
101 allocate: ash::vk::MemoryAllocateInfo,
102 dedicated_info: Option<ash::vk::MemoryDedicatedAllocateInfoKHR>,
103 export_info: Option<ash::vk::ExportMemoryAllocateInfo>,
104 import_info: Option<ash::vk::ImportMemoryFdInfoKHR>,
105 marker: PhantomData<&'a ()>,
106 }
107
108 impl<'a> DeviceMemoryBuilder<'a> {
109 /// Returns a new `DeviceMemoryBuilder` given the required device, memory type and size fields.
110 /// Validation of parameters is done when the builder is built.
new( device: Arc<Device>, memory_index: u32, size: DeviceSize, ) -> DeviceMemoryBuilder<'a>111 pub fn new(
112 device: Arc<Device>,
113 memory_index: u32,
114 size: DeviceSize,
115 ) -> DeviceMemoryBuilder<'a> {
116 let allocate = ash::vk::MemoryAllocateInfo {
117 allocation_size: size,
118 memory_type_index: memory_index,
119 ..Default::default()
120 };
121
122 DeviceMemoryBuilder {
123 device,
124 allocate,
125 dedicated_info: None,
126 export_info: None,
127 import_info: None,
128 marker: PhantomData,
129 }
130 }
131
132 /// Sets an optional field for dedicated allocations in the `DeviceMemoryBuilder`. To maintain
133 /// backwards compatibility, this function does nothing when dedicated allocation has not been
134 /// enabled on the device.
135 ///
136 /// # Panic
137 ///
138 /// - Panics if the dedicated allocation info has already been set.
dedicated_info(mut self, dedicated: DedicatedAlloc<'a>) -> DeviceMemoryBuilder139 pub fn dedicated_info(mut self, dedicated: DedicatedAlloc<'a>) -> DeviceMemoryBuilder {
140 assert!(self.dedicated_info.is_none());
141
142 if !(self.device.api_version() >= Version::V1_1
143 || self.device.enabled_extensions().khr_dedicated_allocation)
144 {
145 return self;
146 }
147
148 let mut dedicated_info = match dedicated {
149 DedicatedAlloc::Buffer(buffer) => ash::vk::MemoryDedicatedAllocateInfoKHR {
150 image: ash::vk::Image::null(),
151 buffer: buffer.internal_object(),
152 ..Default::default()
153 },
154 DedicatedAlloc::Image(image) => ash::vk::MemoryDedicatedAllocateInfoKHR {
155 image: image.internal_object(),
156 buffer: ash::vk::Buffer::null(),
157 ..Default::default()
158 },
159 DedicatedAlloc::None => return self,
160 };
161
162 self = self.push_next(&mut dedicated_info);
163 self.dedicated_info = Some(dedicated_info);
164 self
165 }
166
167 /// Sets an optional field for exportable allocations in the `DeviceMemoryBuilder`.
168 ///
169 /// # Panic
170 ///
171 /// - Panics if the export info has already been set.
export_info( mut self, handle_types: ExternalMemoryHandleType, ) -> DeviceMemoryBuilder<'a>172 pub fn export_info(
173 mut self,
174 handle_types: ExternalMemoryHandleType,
175 ) -> DeviceMemoryBuilder<'a> {
176 assert!(self.export_info.is_none());
177
178 let mut export_info = ash::vk::ExportMemoryAllocateInfo {
179 handle_types: handle_types.into(),
180 ..Default::default()
181 };
182
183 self = self.push_next(&mut export_info);
184 self.export_info = Some(export_info);
185 self
186 }
187
188 /// Sets an optional field for importable DeviceMemory in the `DeviceMemoryBuilder`.
189 ///
190 /// # Panic
191 ///
192 /// - Panics if the import info has already been set.
193 #[cfg(any(target_os = "android", target_os = "linux"))]
import_info( mut self, fd: File, handle_types: ExternalMemoryHandleType, ) -> DeviceMemoryBuilder<'a>194 pub fn import_info(
195 mut self,
196 fd: File,
197 handle_types: ExternalMemoryHandleType,
198 ) -> DeviceMemoryBuilder<'a> {
199 assert!(self.import_info.is_none());
200
201 let mut import_info = ash::vk::ImportMemoryFdInfoKHR {
202 handle_type: handle_types.into(),
203 fd: fd.into_raw_fd(),
204 ..Default::default()
205 };
206
207 self = self.push_next(&mut import_info);
208 self.import_info = Some(import_info);
209 self
210 }
211
212 // Private function copied shamelessly from Ash.
213 // https://github.com/MaikKlein/ash/blob/4ba8637d018fec6d6e3a90d7fa47d11c085f6b4a/generator/src/lib.rs
214 #[allow(unused_assignments)]
push_next<T: ExtendsMemoryAllocateInfo>(self, next: &mut T) -> DeviceMemoryBuilder<'a>215 fn push_next<T: ExtendsMemoryAllocateInfo>(self, next: &mut T) -> DeviceMemoryBuilder<'a> {
216 unsafe {
217 // `next` here can contain a pointer chain. This means that we must correctly
218 // attach he head to the root and the tail to the rest of the chain
219 // For example:
220 //
221 // next = A -> B
222 // Before: `Root -> C -> D -> E`
223 // After: `Root -> A -> B -> C -> D -> E`
224
225 // Convert next to our ptr structure
226 let next_ptr = next as *mut T as *mut BaseOutStructure;
227 // Previous head (can be null)
228 let mut prev_head = self.allocate.p_next as *mut BaseOutStructure;
229 // Retrieve end of next chain
230 let last_next = ptr_chain_iter(next).last().unwrap();
231 // Set end of next chain's next to be previous head only if previous head's next'
232 if !prev_head.is_null() {
233 (*last_next).p_next = (*prev_head).p_next;
234 }
235 // Set next ptr to be first one
236 prev_head = next_ptr;
237 }
238
239 self
240 }
241
242 /// Creates a `DeviceMemory` object on success, consuming the `DeviceMemoryBuilder`. An error
243 /// is returned if the requested allocation is too large or if the total number of allocations
244 /// would exceed per-device limits.
build(self) -> Result<Arc<DeviceMemory>, DeviceMemoryAllocError>245 pub fn build(self) -> Result<Arc<DeviceMemory>, DeviceMemoryAllocError> {
246 if self.allocate.allocation_size == 0 {
247 return Err(DeviceMemoryAllocError::InvalidSize)?;
248 }
249
250 // VUID-vkAllocateMemory-pAllocateInfo-01714: "pAllocateInfo->memoryTypeIndex must be less
251 // than VkPhysicalDeviceMemoryProperties::memoryTypeCount as returned by
252 // vkGetPhysicalDeviceMemoryProperties for the VkPhysicalDevice that device was created
253 // from."
254 let memory_type = self
255 .device
256 .physical_device()
257 .memory_type_by_id(self.allocate.memory_type_index)
258 .ok_or(DeviceMemoryAllocError::SpecViolation(1714))?;
259
260 if self.device.physical_device().internal_object()
261 != memory_type.physical_device().internal_object()
262 {
263 return Err(DeviceMemoryAllocError::SpecViolation(1714));
264 }
265
266 // Note: This check is disabled because MoltenVK doesn't report correct heap sizes yet.
267 // This check was re-enabled because Mesa aborts if `size` is Very Large.
268 //
269 // Conversions won't panic since it's based on `vkDeviceSize`, which is a u64 in the VK
270 // header. Not sure why we bother with usizes.
271
272 // VUID-vkAllocateMemory-pAllocateInfo-01713: "pAllocateInfo->allocationSize must be less than
273 // or equal to VkPhysicalDeviceMemoryProperties::memoryHeaps[memindex].size where memindex =
274 // VkPhysicalDeviceMemoryProperties::memoryTypes[pAllocateInfo->memoryTypeIndex].heapIndex as
275 // returned by vkGetPhysicalDeviceMemoryProperties for the VkPhysicalDevice that device was created
276 // from".
277 let reported_heap_size = memory_type.heap().size();
278 if reported_heap_size != 0 && self.allocate.allocation_size > reported_heap_size {
279 return Err(DeviceMemoryAllocError::SpecViolation(1713));
280 }
281
282 let mut export_handle_bits = ash::vk::ExternalMemoryHandleTypeFlags::empty();
283
284 if self.export_info.is_some() || self.import_info.is_some() {
285 // TODO: check exportFromImportedHandleTypes
286 export_handle_bits = match self.export_info {
287 Some(export_info) => export_info.handle_types,
288 None => ash::vk::ExternalMemoryHandleTypeFlags::empty(),
289 };
290
291 let import_handle_bits = match self.import_info {
292 Some(import_info) => import_info.handle_type,
293 None => ash::vk::ExternalMemoryHandleTypeFlags::empty(),
294 };
295
296 if !(export_handle_bits & ash::vk::ExternalMemoryHandleTypeFlags::DMA_BUF_EXT)
297 .is_empty()
298 {
299 if !self.device.enabled_extensions().ext_external_memory_dma_buf {
300 return Err(DeviceMemoryAllocError::MissingExtension(
301 "ext_external_memory_dmabuf",
302 ));
303 };
304 }
305
306 if !(export_handle_bits & ash::vk::ExternalMemoryHandleTypeFlags::OPAQUE_FD).is_empty()
307 {
308 if !self.device.enabled_extensions().khr_external_memory_fd {
309 return Err(DeviceMemoryAllocError::MissingExtension(
310 "khr_external_memory_fd",
311 ));
312 }
313 }
314
315 if !(import_handle_bits & ash::vk::ExternalMemoryHandleTypeFlags::DMA_BUF_EXT)
316 .is_empty()
317 {
318 if !self.device.enabled_extensions().ext_external_memory_dma_buf {
319 return Err(DeviceMemoryAllocError::MissingExtension(
320 "ext_external_memory_dmabuf",
321 ));
322 }
323 }
324
325 if !(import_handle_bits & ash::vk::ExternalMemoryHandleTypeFlags::OPAQUE_FD).is_empty()
326 {
327 if !self.device.enabled_extensions().khr_external_memory_fd {
328 return Err(DeviceMemoryAllocError::MissingExtension(
329 "khr_external_memory_fd",
330 ));
331 }
332 }
333 }
334
335 let memory = unsafe {
336 let physical_device = self.device.physical_device();
337 let mut allocation_count = self
338 .device
339 .allocation_count()
340 .lock()
341 .expect("Poisoned mutex");
342
343 if *allocation_count
344 >= physical_device
345 .properties()
346 .max_memory_allocation_count
347 {
348 return Err(DeviceMemoryAllocError::TooManyObjects);
349 }
350 let fns = self.device.fns();
351
352 let mut output = MaybeUninit::uninit();
353 check_errors(fns.v1_0.allocate_memory(
354 self.device.internal_object(),
355 &self.allocate,
356 ptr::null(),
357 output.as_mut_ptr(),
358 ))?;
359 *allocation_count += 1;
360 output.assume_init()
361 };
362
363 Ok(Arc::new(DeviceMemory {
364 memory: memory,
365 device: self.device,
366 size: self.allocate.allocation_size,
367 memory_type_index: self.allocate.memory_type_index,
368 handle_types: ExternalMemoryHandleType::from(export_handle_bits),
369 mapped: Mutex::new(false),
370 }))
371 }
372 }
373
374 impl DeviceMemory {
375 /// Allocates a chunk of memory from the device.
376 ///
377 /// Some platforms may have a limit on the maximum size of a single allocation. For example,
378 /// certain systems may fail to create allocations with a size greater than or equal to 4GB.
379 ///
380 /// # Panic
381 ///
382 /// - Panics if `size` is 0.
383 /// - Panics if `memory_type` doesn't belong to the same physical device as `device`.
384 ///
385 #[inline]
alloc( device: Arc<Device>, memory_type: MemoryType, size: DeviceSize, ) -> Result<DeviceMemory, DeviceMemoryAllocError>386 pub fn alloc(
387 device: Arc<Device>,
388 memory_type: MemoryType,
389 size: DeviceSize,
390 ) -> Result<DeviceMemory, DeviceMemoryAllocError> {
391 let memory = DeviceMemoryBuilder::new(device, memory_type.id(), size).build()?;
392 // Will never panic because we call the DeviceMemoryBuilder internally, and that only
393 // returns an atomically refcounted DeviceMemory object on success.
394 Ok(Arc::try_unwrap(memory).unwrap())
395 }
396
397 /// Same as `alloc`, but allows specifying a resource that will be bound to the memory.
398 ///
399 /// If a buffer or an image is specified in `resource`, then the returned memory must not be
400 /// bound to a different buffer or image.
401 ///
402 /// If the `VK_KHR_dedicated_allocation` extension is enabled on the device, then it will be
403 /// used by this method. Otherwise the `resource` parameter will be ignored.
404 #[inline]
dedicated_alloc( device: Arc<Device>, memory_type: MemoryType, size: DeviceSize, resource: DedicatedAlloc, ) -> Result<DeviceMemory, DeviceMemoryAllocError>405 pub fn dedicated_alloc(
406 device: Arc<Device>,
407 memory_type: MemoryType,
408 size: DeviceSize,
409 resource: DedicatedAlloc,
410 ) -> Result<DeviceMemory, DeviceMemoryAllocError> {
411 let memory = DeviceMemoryBuilder::new(device, memory_type.id(), size)
412 .dedicated_info(resource)
413 .build()?;
414
415 // Will never panic because we call the DeviceMemoryBuilder internally, and that only
416 // returns an atomically refcounted DeviceMemory object on success.
417 Ok(Arc::try_unwrap(memory).unwrap())
418 }
419
420 /// Allocates a chunk of memory and maps it.
421 ///
422 /// # Panic
423 ///
424 /// - Panics if `memory_type` doesn't belong to the same physical device as `device`.
425 /// - Panics if the memory type is not host-visible.
426 ///
427 #[inline]
alloc_and_map( device: Arc<Device>, memory_type: MemoryType, size: DeviceSize, ) -> Result<MappedDeviceMemory, DeviceMemoryAllocError>428 pub fn alloc_and_map(
429 device: Arc<Device>,
430 memory_type: MemoryType,
431 size: DeviceSize,
432 ) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
433 DeviceMemory::dedicated_alloc_and_map(device, memory_type, size, DedicatedAlloc::None)
434 }
435
436 /// Equivalent of `dedicated_alloc` for `alloc_and_map`.
dedicated_alloc_and_map( device: Arc<Device>, memory_type: MemoryType, size: DeviceSize, resource: DedicatedAlloc, ) -> Result<MappedDeviceMemory, DeviceMemoryAllocError>437 pub fn dedicated_alloc_and_map(
438 device: Arc<Device>,
439 memory_type: MemoryType,
440 size: DeviceSize,
441 resource: DedicatedAlloc,
442 ) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
443 let fns = device.fns();
444
445 assert!(memory_type.is_host_visible());
446 let mem = DeviceMemory::dedicated_alloc(device.clone(), memory_type, size, resource)?;
447
448 Self::map_allocation(device.clone(), mem)
449 }
450
451 /// Same as `alloc`, but allows exportable file descriptor on Linux.
452 #[inline]
453 #[cfg(target_os = "linux")]
alloc_with_exportable_fd( device: Arc<Device>, memory_type: MemoryType, size: DeviceSize, ) -> Result<DeviceMemory, DeviceMemoryAllocError>454 pub fn alloc_with_exportable_fd(
455 device: Arc<Device>,
456 memory_type: MemoryType,
457 size: DeviceSize,
458 ) -> Result<DeviceMemory, DeviceMemoryAllocError> {
459 let memory = DeviceMemoryBuilder::new(device, memory_type.id(), size)
460 .export_info(ExternalMemoryHandleType {
461 opaque_fd: true,
462 ..ExternalMemoryHandleType::none()
463 })
464 .build()?;
465
466 // Will never panic because we call the DeviceMemoryBuilder internally, and that only
467 // returns an atomically refcounted DeviceMemory object on success.
468 Ok(Arc::try_unwrap(memory).unwrap())
469 }
470
471 /// Same as `dedicated_alloc`, but allows exportable file descriptor on Linux.
472 #[inline]
473 #[cfg(target_os = "linux")]
dedicated_alloc_with_exportable_fd( device: Arc<Device>, memory_type: MemoryType, size: DeviceSize, resource: DedicatedAlloc, ) -> Result<DeviceMemory, DeviceMemoryAllocError>474 pub fn dedicated_alloc_with_exportable_fd(
475 device: Arc<Device>,
476 memory_type: MemoryType,
477 size: DeviceSize,
478 resource: DedicatedAlloc,
479 ) -> Result<DeviceMemory, DeviceMemoryAllocError> {
480 let memory = DeviceMemoryBuilder::new(device, memory_type.id(), size)
481 .export_info(ExternalMemoryHandleType {
482 opaque_fd: true,
483 ..ExternalMemoryHandleType::none()
484 })
485 .dedicated_info(resource)
486 .build()?;
487
488 // Will never panic because we call the DeviceMemoryBuilder internally, and that only
489 // returns an atomically refcounted DeviceMemory object on success.
490 Ok(Arc::try_unwrap(memory).unwrap())
491 }
492
493 /// Same as `alloc_and_map`, but allows exportable file descriptor on Linux.
494 #[inline]
495 #[cfg(target_os = "linux")]
alloc_and_map_with_exportable_fd( device: Arc<Device>, memory_type: MemoryType, size: DeviceSize, ) -> Result<MappedDeviceMemory, DeviceMemoryAllocError>496 pub fn alloc_and_map_with_exportable_fd(
497 device: Arc<Device>,
498 memory_type: MemoryType,
499 size: DeviceSize,
500 ) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
501 DeviceMemory::dedicated_alloc_and_map_with_exportable_fd(
502 device,
503 memory_type,
504 size,
505 DedicatedAlloc::None,
506 )
507 }
508
509 /// Same as `dedicated_alloc_and_map`, but allows exportable file descriptor on Linux.
510 #[inline]
511 #[cfg(target_os = "linux")]
dedicated_alloc_and_map_with_exportable_fd( device: Arc<Device>, memory_type: MemoryType, size: DeviceSize, resource: DedicatedAlloc, ) -> Result<MappedDeviceMemory, DeviceMemoryAllocError>512 pub fn dedicated_alloc_and_map_with_exportable_fd(
513 device: Arc<Device>,
514 memory_type: MemoryType,
515 size: DeviceSize,
516 resource: DedicatedAlloc,
517 ) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
518 let fns = device.fns();
519
520 assert!(memory_type.is_host_visible());
521 let mem = DeviceMemory::dedicated_alloc_with_exportable_fd(
522 device.clone(),
523 memory_type,
524 size,
525 resource,
526 )?;
527
528 Self::map_allocation(device.clone(), mem)
529 }
530
map_allocation( device: Arc<Device>, mem: DeviceMemory, ) -> Result<MappedDeviceMemory, DeviceMemoryAllocError>531 fn map_allocation(
532 device: Arc<Device>,
533 mem: DeviceMemory,
534 ) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
535 let fns = device.fns();
536 let coherent = mem.memory_type().is_host_coherent();
537 let ptr = unsafe {
538 let mut output = MaybeUninit::uninit();
539 check_errors(fns.v1_0.map_memory(
540 device.internal_object(),
541 mem.memory,
542 0,
543 mem.size,
544 ash::vk::MemoryMapFlags::empty(),
545 output.as_mut_ptr(),
546 ))?;
547 output.assume_init()
548 };
549
550 Ok(MappedDeviceMemory {
551 memory: mem,
552 pointer: ptr,
553 coherent,
554 })
555 }
556
557 /// Returns the memory type this chunk was allocated on.
558 #[inline]
memory_type(&self) -> MemoryType559 pub fn memory_type(&self) -> MemoryType {
560 self.device
561 .physical_device()
562 .memory_type_by_id(self.memory_type_index)
563 .unwrap()
564 }
565
566 /// Returns the size in bytes of that memory chunk.
567 #[inline]
size(&self) -> DeviceSize568 pub fn size(&self) -> DeviceSize {
569 self.size
570 }
571
572 /// Exports the device memory into a Unix file descriptor. The caller retains ownership of the
573 /// file, as per the Vulkan spec.
574 ///
575 /// # Panic
576 ///
577 /// - Panics if the user requests an invalid handle type for this device memory object.
578 #[inline]
579 #[cfg(any(target_os = "android", target_os = "linux"))]
export_fd( &self, handle_type: ExternalMemoryHandleType, ) -> Result<File, DeviceMemoryAllocError>580 pub fn export_fd(
581 &self,
582 handle_type: ExternalMemoryHandleType,
583 ) -> Result<File, DeviceMemoryAllocError> {
584 let fns = self.device.fns();
585
586 // VUID-VkMemoryGetFdInfoKHR-handleType-00672: "handleType must be defined as a POSIX file
587 // descriptor handle".
588 let bits = ash::vk::ExternalMemoryHandleTypeFlags::from(handle_type);
589 if bits != ash::vk::ExternalMemoryHandleTypeFlags::DMA_BUF_EXT
590 && bits != ash::vk::ExternalMemoryHandleTypeFlags::OPAQUE_FD
591 {
592 return Err(DeviceMemoryAllocError::SpecViolation(672))?;
593 }
594
595 // VUID-VkMemoryGetFdInfoKHR-handleType-00671: "handleType must have been included in
596 // VkExportMemoryAllocateInfo::handleTypes when memory was created".
597 if (bits & ash::vk::ExternalMemoryHandleTypeFlags::from(self.handle_types)).is_empty() {
598 return Err(DeviceMemoryAllocError::SpecViolation(671))?;
599 }
600
601 let fd = unsafe {
602 let info = ash::vk::MemoryGetFdInfoKHR {
603 memory: self.memory,
604 handle_type: handle_type.into(),
605 ..Default::default()
606 };
607
608 let mut output = MaybeUninit::uninit();
609 check_errors(fns.khr_external_memory_fd.get_memory_fd_khr(
610 self.device.internal_object(),
611 &info,
612 output.as_mut_ptr(),
613 ))?;
614 output.assume_init()
615 };
616
617 let file = unsafe { File::from_raw_fd(fd) };
618 Ok(file)
619 }
620 }
621
622 unsafe impl DeviceOwned for DeviceMemory {
623 #[inline]
device(&self) -> &Arc<Device>624 fn device(&self) -> &Arc<Device> {
625 &self.device
626 }
627 }
628
629 impl fmt::Debug for DeviceMemory {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result630 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
631 fmt.debug_struct("DeviceMemory")
632 .field("device", &*self.device)
633 .field("memory_type", &self.memory_type())
634 .field("size", &self.size)
635 .finish()
636 }
637 }
638
639 unsafe impl VulkanObject for DeviceMemory {
640 type Object = ash::vk::DeviceMemory;
641
642 #[inline]
internal_object(&self) -> ash::vk::DeviceMemory643 fn internal_object(&self) -> ash::vk::DeviceMemory {
644 self.memory
645 }
646 }
647
648 impl Drop for DeviceMemory {
649 #[inline]
drop(&mut self)650 fn drop(&mut self) {
651 unsafe {
652 let fns = self.device.fns();
653 fns.v1_0
654 .free_memory(self.device.internal_object(), self.memory, ptr::null());
655 let mut allocation_count = self
656 .device
657 .allocation_count()
658 .lock()
659 .expect("Poisoned mutex");
660 *allocation_count -= 1;
661 }
662 }
663 }
664
665 /// Represents memory that has been allocated and mapped in CPU accessible space.
666 ///
667 /// Can be obtained with `DeviceMemory::alloc_and_map`. The function will panic if the memory type
668 /// is not host-accessible.
669 ///
670 /// In order to access the content of the allocated memory, you can use the `read_write` method.
671 /// This method returns a guard object that derefs to the content.
672 ///
673 /// # Example
674 ///
675 /// ```
676 /// use vulkano::memory::DeviceMemory;
677 ///
678 /// # let device: std::sync::Arc<vulkano::device::Device> = return;
679 /// // The memory type must be mappable.
680 /// let mem_ty = device.physical_device().memory_types()
681 /// .filter(|t| t.is_host_visible())
682 /// .next().unwrap(); // Vk specs guarantee that this can't fail
683 ///
684 /// // Allocates 1KB of memory.
685 /// let memory = DeviceMemory::alloc_and_map(device.clone(), mem_ty, 1024).unwrap();
686 ///
687 /// // Get access to the content. Note that this is very unsafe for two reasons: 1) the content is
688 /// // uninitialized, and 2) the access is unsynchronized.
689 /// unsafe {
690 /// let mut content = memory.read_write::<[u8]>(0 .. 1024);
691 /// content[12] = 54; // `content` derefs to a `&[u8]` or a `&mut [u8]`
692 /// }
693 /// ```
694 pub struct MappedDeviceMemory {
695 memory: DeviceMemory,
696 pointer: *mut c_void,
697 coherent: bool,
698 }
699
700 // Note that `MappedDeviceMemory` doesn't implement `Drop`, as we don't need to unmap memory before
701 // freeing it.
702 //
703 // Vulkan specs, documentation of `vkFreeMemory`:
704 // > If a memory object is mapped at the time it is freed, it is implicitly unmapped.
705 //
706
707 impl MappedDeviceMemory {
708 /// Unmaps the memory. It will no longer be accessible from the CPU.
unmap(self) -> DeviceMemory709 pub fn unmap(self) -> DeviceMemory {
710 unsafe {
711 let device = self.memory.device();
712 let fns = device.fns();
713 fns.v1_0
714 .unmap_memory(device.internal_object(), self.memory.memory);
715 }
716
717 self.memory
718 }
719
720 /// Gives access to the content of the memory.
721 ///
722 /// This function takes care of calling `vkInvalidateMappedMemoryRanges` and
723 /// `vkFlushMappedMemoryRanges` on the given range. You are therefore encouraged to use the
724 /// smallest range as possible, and to not call this function multiple times in a row for
725 /// several small changes.
726 ///
727 /// # Safety
728 ///
729 /// - Type safety is not checked. You must ensure that `T` corresponds to the content of the
730 /// buffer.
731 /// - Accesses are not synchronized. Synchronization must be handled outside of
732 /// the `MappedDeviceMemory`.
733 ///
734 #[inline]
read_write<T: ?Sized>(&self, range: Range<DeviceSize>) -> CpuAccess<T> where T: Content,735 pub unsafe fn read_write<T: ?Sized>(&self, range: Range<DeviceSize>) -> CpuAccess<T>
736 where
737 T: Content,
738 {
739 let fns = self.memory.device().fns();
740 let pointer = T::ref_from_ptr(
741 (self.pointer as usize + range.start as usize) as *mut _,
742 (range.end - range.start) as usize,
743 )
744 .unwrap(); // TODO: error
745
746 if !self.coherent {
747 let range = ash::vk::MappedMemoryRange {
748 memory: self.memory.internal_object(),
749 offset: range.start,
750 size: range.end - range.start,
751 ..Default::default()
752 };
753
754 // TODO: return result instead?
755 check_errors(fns.v1_0.invalidate_mapped_memory_ranges(
756 self.memory.device().internal_object(),
757 1,
758 &range,
759 ))
760 .unwrap();
761 }
762
763 CpuAccess {
764 pointer: pointer,
765 mem: self,
766 coherent: self.coherent,
767 range,
768 }
769 }
770 }
771
772 impl AsRef<DeviceMemory> for MappedDeviceMemory {
773 #[inline]
as_ref(&self) -> &DeviceMemory774 fn as_ref(&self) -> &DeviceMemory {
775 &self.memory
776 }
777 }
778
779 impl AsMut<DeviceMemory> for MappedDeviceMemory {
780 #[inline]
as_mut(&mut self) -> &mut DeviceMemory781 fn as_mut(&mut self) -> &mut DeviceMemory {
782 &mut self.memory
783 }
784 }
785
786 unsafe impl DeviceOwned for MappedDeviceMemory {
787 #[inline]
device(&self) -> &Arc<Device>788 fn device(&self) -> &Arc<Device> {
789 self.memory.device()
790 }
791 }
792
793 unsafe impl Send for MappedDeviceMemory {}
794 unsafe impl Sync for MappedDeviceMemory {}
795
796 impl fmt::Debug for MappedDeviceMemory {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result797 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
798 fmt.debug_tuple("MappedDeviceMemory")
799 .field(&self.memory)
800 .finish()
801 }
802 }
803
804 unsafe impl Send for DeviceMemoryMapping {}
805 unsafe impl Sync for DeviceMemoryMapping {}
806
807 /// Represents memory mapped in CPU accessible space.
808 ///
809 /// Takes an additional reference on the underlying device memory and device.
810 pub struct DeviceMemoryMapping {
811 device: Arc<Device>,
812 memory: Arc<DeviceMemory>,
813 pointer: *mut c_void,
814 coherent: bool,
815 }
816
817 impl DeviceMemoryMapping {
818 /// Creates a new `DeviceMemoryMapping` object given the previously allocated `device` and `memory`.
new( device: Arc<Device>, memory: Arc<DeviceMemory>, offset: DeviceSize, size: DeviceSize, flags: u32, ) -> Result<DeviceMemoryMapping, DeviceMemoryAllocError>819 pub fn new(
820 device: Arc<Device>,
821 memory: Arc<DeviceMemory>,
822 offset: DeviceSize,
823 size: DeviceSize,
824 flags: u32,
825 ) -> Result<DeviceMemoryMapping, DeviceMemoryAllocError> {
826 // VUID-vkMapMemory-memory-00678: "memory must not be currently host mapped".
827 let mut mapped = memory.mapped.lock().expect("Poisoned mutex");
828
829 if *mapped {
830 return Err(DeviceMemoryAllocError::SpecViolation(678));
831 }
832
833 // VUID-vkMapMemory-offset-00679: "offset must be less than the size of memory"
834 if size != ash::vk::WHOLE_SIZE && offset >= memory.size() {
835 return Err(DeviceMemoryAllocError::SpecViolation(679));
836 }
837
838 // VUID-vkMapMemory-size-00680: "If size is not equal to VK_WHOLE_SIZE, size must be
839 // greater than 0".
840 if size != ash::vk::WHOLE_SIZE && size == 0 {
841 return Err(DeviceMemoryAllocError::SpecViolation(680));
842 }
843
844 // VUID-vkMapMemory-size-00681: "If size is not equal to VK_WHOLE_SIZE, size must be less
845 // than or equal to the size of the memory minus offset".
846 if size != ash::vk::WHOLE_SIZE && size > memory.size() - offset {
847 return Err(DeviceMemoryAllocError::SpecViolation(681));
848 }
849
850 // VUID-vkMapMemory-memory-00682: "memory must have been created with a memory type
851 // that reports VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT"
852 let coherent = memory.memory_type().is_host_coherent();
853 if !coherent {
854 return Err(DeviceMemoryAllocError::SpecViolation(682));
855 }
856
857 // VUID-vkMapMemory-memory-00683: "memory must not have been allocated with multiple instances".
858 // Confused about this one, so not implemented.
859
860 // VUID-vkMapMemory-memory-parent: "memory must have been created, allocated or retrieved
861 // from device"
862 if device.internal_object() != memory.device().internal_object() {
863 return Err(DeviceMemoryAllocError::ImplicitSpecViolation(
864 "VUID-vkMapMemory-memory-parent",
865 ));
866 }
867
868 // VUID-vkMapMemory-flags-zerobitmask: "flags must be 0".
869 if flags != 0 {
870 return Err(DeviceMemoryAllocError::ImplicitSpecViolation(
871 "VUID-vkMapMemory-flags-zerobitmask",
872 ));
873 }
874
875 // VUID-vkMapMemory-device-parameter, VUID-vkMapMemory-memory-parameter and
876 // VUID-vkMapMemory-ppData-parameter satisfied via Vulkano internally.
877
878 let fns = device.fns();
879 let ptr = unsafe {
880 let mut output = MaybeUninit::uninit();
881 check_errors(fns.v1_0.map_memory(
882 device.internal_object(),
883 memory.memory,
884 0,
885 memory.size,
886 ash::vk::MemoryMapFlags::empty(),
887 output.as_mut_ptr(),
888 ))?;
889 output.assume_init()
890 };
891
892 *mapped = true;
893
894 Ok(DeviceMemoryMapping {
895 device: device.clone(),
896 memory: memory.clone(),
897 pointer: ptr,
898 coherent,
899 })
900 }
901
902 /// Returns the raw pointer associated with the `DeviceMemoryMapping`.
903 ///
904 /// # Safety
905 ///
906 /// The caller of this function must ensure that the use of the raw pointer does not outlive
907 /// the associated `DeviceMemoryMapping`.
as_ptr(&self) -> *mut u8908 pub unsafe fn as_ptr(&self) -> *mut u8 {
909 self.pointer as *mut u8
910 }
911 }
912
913 impl Drop for DeviceMemoryMapping {
914 #[inline]
drop(&mut self)915 fn drop(&mut self) {
916 let mut mapped = self.memory.mapped.lock().expect("Poisoned mutex");
917
918 unsafe {
919 let fns = self.device.fns();
920 fns.v1_0
921 .unmap_memory(self.device.internal_object(), self.memory.memory);
922 }
923
924 *mapped = false;
925 }
926 }
927
928 /// Object that can be used to read or write the content of a `MappedDeviceMemory`.
929 ///
930 /// This object derefs to the content, just like a `MutexGuard` for example.
931 pub struct CpuAccess<'a, T: ?Sized + 'a> {
932 pointer: *mut T,
933 mem: &'a MappedDeviceMemory,
934 coherent: bool,
935 range: Range<DeviceSize>,
936 }
937
938 impl<'a, T: ?Sized + 'a> CpuAccess<'a, T> {
939 /// Builds a new `CpuAccess` to access a sub-part of the current `CpuAccess`.
940 ///
941 /// This function is unstable. Don't use it directly.
942 // TODO: unsafe?
943 // TODO: decide what to do with this
944 #[doc(hidden)]
945 #[inline]
map<U: ?Sized + 'a, F>(self, f: F) -> CpuAccess<'a, U> where F: FnOnce(*mut T) -> *mut U,946 pub fn map<U: ?Sized + 'a, F>(self, f: F) -> CpuAccess<'a, U>
947 where
948 F: FnOnce(*mut T) -> *mut U,
949 {
950 CpuAccess {
951 pointer: f(self.pointer),
952 mem: self.mem,
953 coherent: self.coherent,
954 range: self.range.clone(), // TODO: ?
955 }
956 }
957 }
958
959 unsafe impl<'a, T: ?Sized + 'a> Send for CpuAccess<'a, T> {}
960 unsafe impl<'a, T: ?Sized + 'a> Sync for CpuAccess<'a, T> {}
961
962 impl<'a, T: ?Sized + 'a> Deref for CpuAccess<'a, T> {
963 type Target = T;
964
965 #[inline]
deref(&self) -> &T966 fn deref(&self) -> &T {
967 unsafe { &*self.pointer }
968 }
969 }
970
971 impl<'a, T: ?Sized + 'a> DerefMut for CpuAccess<'a, T> {
972 #[inline]
deref_mut(&mut self) -> &mut T973 fn deref_mut(&mut self) -> &mut T {
974 unsafe { &mut *self.pointer }
975 }
976 }
977
978 impl<'a, T: ?Sized + 'a> Drop for CpuAccess<'a, T> {
979 #[inline]
drop(&mut self)980 fn drop(&mut self) {
981 // If the memory doesn't have the `coherent` flag, we need to flush the data.
982 if !self.coherent {
983 let fns = self.mem.as_ref().device().fns();
984
985 let range = ash::vk::MappedMemoryRange {
986 memory: self.mem.as_ref().internal_object(),
987 offset: self.range.start,
988 size: self.range.end - self.range.start,
989 ..Default::default()
990 };
991
992 unsafe {
993 check_errors(fns.v1_0.flush_mapped_memory_ranges(
994 self.mem.as_ref().device().internal_object(),
995 1,
996 &range,
997 ))
998 .unwrap();
999 }
1000 }
1001 }
1002 }
1003
1004 /// Error type returned by functions related to `DeviceMemory`.
1005 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
1006 pub enum DeviceMemoryAllocError {
1007 /// Not enough memory available.
1008 OomError(OomError),
1009 /// The maximum number of allocations has been exceeded.
1010 TooManyObjects,
1011 /// Memory map failed.
1012 MemoryMapFailed,
1013 /// Invalid Memory Index
1014 MemoryIndexInvalid,
1015 /// Invalid Structure Type
1016 StructureTypeAlreadyPresent,
1017 /// Spec violation, containing the Valid Usage ID (VUID) from the Vulkan spec.
1018 SpecViolation(u32),
1019 /// An implicit violation that's convered in the Vulkan spec.
1020 ImplicitSpecViolation(&'static str),
1021 /// An extension is missing.
1022 MissingExtension(&'static str),
1023 /// Invalid Size
1024 InvalidSize,
1025 }
1026
1027 impl error::Error for DeviceMemoryAllocError {
1028 #[inline]
source(&self) -> Option<&(dyn error::Error + 'static)>1029 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
1030 match *self {
1031 DeviceMemoryAllocError::OomError(ref err) => Some(err),
1032 _ => None,
1033 }
1034 }
1035 }
1036
1037 impl fmt::Display for DeviceMemoryAllocError {
1038 #[inline]
fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error>1039 fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1040 match *self {
1041 DeviceMemoryAllocError::OomError(_) => write!(fmt, "not enough memory available"),
1042 DeviceMemoryAllocError::TooManyObjects => {
1043 write!(fmt, "the maximum number of allocations has been exceeded")
1044 }
1045 DeviceMemoryAllocError::MemoryMapFailed => write!(fmt, "memory map failed"),
1046 DeviceMemoryAllocError::MemoryIndexInvalid => write!(fmt, "memory index invalid"),
1047 DeviceMemoryAllocError::StructureTypeAlreadyPresent => {
1048 write!(fmt, "structure type already present")
1049 }
1050 DeviceMemoryAllocError::SpecViolation(u) => {
1051 write!(fmt, "valid usage ID check {} failed", u)
1052 }
1053 DeviceMemoryAllocError::MissingExtension(s) => {
1054 write!(fmt, "Missing the following extension: {}", s)
1055 }
1056 DeviceMemoryAllocError::ImplicitSpecViolation(e) => {
1057 write!(fmt, "Implicit spec violation failed {}", e)
1058 }
1059 DeviceMemoryAllocError::InvalidSize => write!(fmt, "invalid size"),
1060 }
1061 }
1062 }
1063
1064 impl From<Error> for DeviceMemoryAllocError {
1065 #[inline]
from(err: Error) -> DeviceMemoryAllocError1066 fn from(err: Error) -> DeviceMemoryAllocError {
1067 match err {
1068 e @ Error::OutOfHostMemory | e @ Error::OutOfDeviceMemory => {
1069 DeviceMemoryAllocError::OomError(e.into())
1070 }
1071 Error::TooManyObjects => DeviceMemoryAllocError::TooManyObjects,
1072 Error::MemoryMapFailed => DeviceMemoryAllocError::MemoryMapFailed,
1073 _ => panic!("unexpected error: {:?}", err),
1074 }
1075 }
1076 }
1077
1078 impl From<OomError> for DeviceMemoryAllocError {
1079 #[inline]
from(err: OomError) -> DeviceMemoryAllocError1080 fn from(err: OomError) -> DeviceMemoryAllocError {
1081 DeviceMemoryAllocError::OomError(err)
1082 }
1083 }
1084
1085 #[cfg(test)]
1086 mod tests {
1087 use crate::memory::DeviceMemory;
1088 use crate::memory::DeviceMemoryAllocError;
1089 use crate::OomError;
1090
1091 #[test]
create()1092 fn create() {
1093 let (device, _) = gfx_dev_and_queue!();
1094 let mem_ty = device.physical_device().memory_types().next().unwrap();
1095 let _ = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
1096 }
1097
1098 #[test]
zero_size()1099 fn zero_size() {
1100 let (device, _) = gfx_dev_and_queue!();
1101 let mem_ty = device.physical_device().memory_types().next().unwrap();
1102 assert_should_panic!({
1103 let _ = DeviceMemory::alloc(device.clone(), mem_ty, 0).unwrap();
1104 });
1105 }
1106
1107 #[test]
1108 #[cfg(target_pointer_width = "64")]
oom_single()1109 fn oom_single() {
1110 let (device, _) = gfx_dev_and_queue!();
1111 let mem_ty = device
1112 .physical_device()
1113 .memory_types()
1114 .filter(|m| !m.is_lazily_allocated())
1115 .next()
1116 .unwrap();
1117
1118 match DeviceMemory::alloc(device.clone(), mem_ty, 0xffffffffffffffff) {
1119 Err(DeviceMemoryAllocError::SpecViolation(u)) => (),
1120 _ => panic!(),
1121 }
1122 }
1123
1124 #[test]
1125 #[ignore] // TODO: test fails for now on Mesa+Intel
oom_multi()1126 fn oom_multi() {
1127 let (device, _) = gfx_dev_and_queue!();
1128 let mem_ty = device
1129 .physical_device()
1130 .memory_types()
1131 .filter(|m| !m.is_lazily_allocated())
1132 .next()
1133 .unwrap();
1134 let heap_size = mem_ty.heap().size();
1135
1136 let mut allocs = Vec::new();
1137
1138 for _ in 0..4 {
1139 match DeviceMemory::alloc(device.clone(), mem_ty, heap_size / 3) {
1140 Err(DeviceMemoryAllocError::OomError(OomError::OutOfDeviceMemory)) => return, // test succeeded
1141 Ok(a) => allocs.push(a),
1142 _ => (),
1143 }
1144 }
1145
1146 panic!()
1147 }
1148
1149 #[test]
allocation_count()1150 fn allocation_count() {
1151 let (device, _) = gfx_dev_and_queue!();
1152 let mem_ty = device.physical_device().memory_types().next().unwrap();
1153 assert_eq!(*device.allocation_count().lock().unwrap(), 0);
1154 let mem1 = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
1155 assert_eq!(*device.allocation_count().lock().unwrap(), 1);
1156 {
1157 let mem2 = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
1158 assert_eq!(*device.allocation_count().lock().unwrap(), 2);
1159 }
1160 assert_eq!(*device.allocation_count().lock().unwrap(), 1);
1161 }
1162 }
1163