1 // Copyright (c) 2016 The vulkano developers
2 // Licensed under the Apache License, Version 2.0
3 // <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT
5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
6 // at your option. All files in the project carrying such
7 // notice may not be copied, modified, or distributed except
8 // according to those terms.
9
10 pub use self::host_visible::StdHostVisibleMemoryTypePool;
11 pub use self::host_visible::StdHostVisibleMemoryTypePoolAlloc;
12 pub use self::non_host_visible::StdNonHostVisibleMemoryTypePool;
13 pub use self::non_host_visible::StdNonHostVisibleMemoryTypePoolAlloc;
14 pub use self::pool::StdMemoryPool;
15 pub use self::pool::StdMemoryPoolAlloc;
16 use crate::device::physical::MemoryType;
17 use crate::device::{Device, DeviceOwned};
18 use crate::memory::DedicatedAlloc;
19 use crate::memory::DeviceMemory;
20 use crate::memory::DeviceMemoryAllocError;
21 use crate::memory::MappedDeviceMemory;
22 use crate::memory::MemoryRequirements;
23 use crate::DeviceSize;
24 use std::sync::Arc;
25
26 mod host_visible;
27 mod non_host_visible;
28 mod pool;
29
30 // If the allocation size goes beyond this, then we perform a dedicated allocation which bypasses
31 // the pool. This prevents the pool from overallocating a significant amount of memory.
32 const MAX_POOL_ALLOC: DeviceSize = 256 * 1024 * 1024;
33
choose_allocation_memory_type<'s, F>( device: &'s Arc<Device>, requirements: &MemoryRequirements, mut filter: F, map: MappingRequirement, ) -> MemoryType<'s> where F: FnMut(MemoryType) -> AllocFromRequirementsFilter,34 fn choose_allocation_memory_type<'s, F>(
35 device: &'s Arc<Device>,
36 requirements: &MemoryRequirements,
37 mut filter: F,
38 map: MappingRequirement,
39 ) -> MemoryType<'s>
40 where
41 F: FnMut(MemoryType) -> AllocFromRequirementsFilter,
42 {
43 let mem_ty = {
44 let mut filter = |ty: MemoryType| {
45 if map == MappingRequirement::Map && !ty.is_host_visible() {
46 return AllocFromRequirementsFilter::Forbidden;
47 }
48 filter(ty)
49 };
50 let first_loop = device
51 .physical_device()
52 .memory_types()
53 .map(|t| (t, AllocFromRequirementsFilter::Preferred));
54 let second_loop = device
55 .physical_device()
56 .memory_types()
57 .map(|t| (t, AllocFromRequirementsFilter::Allowed));
58 first_loop
59 .chain(second_loop)
60 .filter(|&(t, _)| (requirements.memory_type_bits & (1 << t.id())) != 0)
61 .filter(|&(t, rq)| filter(t) == rq)
62 .next()
63 .expect("Couldn't find a memory type to allocate from")
64 .0
65 };
66 mem_ty
67 }
68
69 /// Pool of GPU-visible memory that can be allocated from.
70 pub unsafe trait MemoryPool: DeviceOwned {
71 /// Object that represents a single allocation. Its destructor should free the chunk.
72 type Alloc: MemoryPoolAlloc;
73
74 /// Allocates memory from the pool.
75 ///
76 /// # Safety
77 ///
78 /// Implementation safety:
79 ///
80 /// - The returned object must match the requirements.
81 /// - When a linear object is allocated next to an optimal object, it is mandatory that
82 /// the boundary is aligned to the value of the `buffer_image_granularity` limit.
83 ///
84 /// Note that it is not unsafe to *call* this function, but it is unsafe to bind the memory
85 /// returned by this function to a resource.
86 ///
87 /// # Panic
88 ///
89 /// - Panics if `memory_type` doesn't belong to the same physical device as the device which
90 /// was used to create this pool.
91 /// - Panics if the memory type is not host-visible and `map` is `MappingRequirement::Map`.
92 /// - Panics if `size` is 0.
93 /// - Panics if `alignment` is 0.
94 ///
alloc_generic( &self, ty: MemoryType, size: DeviceSize, alignment: DeviceSize, layout: AllocLayout, map: MappingRequirement, ) -> Result<Self::Alloc, DeviceMemoryAllocError>95 fn alloc_generic(
96 &self,
97 ty: MemoryType,
98 size: DeviceSize,
99 alignment: DeviceSize,
100 layout: AllocLayout,
101 map: MappingRequirement,
102 ) -> Result<Self::Alloc, DeviceMemoryAllocError>;
103
104 /// Same as `alloc_generic` but with exportable memory option.
105 #[cfg(target_os = "linux")]
alloc_generic_with_exportable_fd( &self, ty: MemoryType, size: DeviceSize, alignment: DeviceSize, layout: AllocLayout, map: MappingRequirement, ) -> Result<Self::Alloc, DeviceMemoryAllocError>106 fn alloc_generic_with_exportable_fd(
107 &self,
108 ty: MemoryType,
109 size: DeviceSize,
110 alignment: DeviceSize,
111 layout: AllocLayout,
112 map: MappingRequirement,
113 ) -> Result<Self::Alloc, DeviceMemoryAllocError>;
114
115 /// Chooses a memory type and allocates memory from it.
116 ///
117 /// Contrary to `alloc_generic`, this function may allocate a whole new block of memory
118 /// dedicated to a resource based on `requirements.prefer_dedicated`.
119 ///
120 /// `filter` can be used to restrict the memory types and to indicate which are preferred.
121 /// If `map` is `MappingRequirement::Map`, then non-host-visible memory types will
122 /// automatically be filtered out.
123 ///
124 /// # Safety
125 ///
126 /// Implementation safety:
127 ///
128 /// - The returned object must match the requirements.
129 /// - When a linear object is allocated next to an optimal object, it is mandatory that
130 /// the boundary is aligned to the value of the `buffer_image_granularity` limit.
131 /// - If `dedicated` is not `None`, the returned memory must either not be dedicated or be
132 /// dedicated to the resource that was passed.
133 ///
134 /// Note that it is not unsafe to *call* this function, but it is unsafe to bind the memory
135 /// returned by this function to a resource.
136 ///
137 /// # Panic
138 ///
139 /// - Panics if no memory type could be found, which can happen if `filter` is too restrictive.
140 // TODO: ^ is this a good idea?
141 /// - Panics if `size` is 0.
142 /// - Panics if `alignment` is 0.
143 ///
alloc_from_requirements<F>( &self, requirements: &MemoryRequirements, layout: AllocLayout, map: MappingRequirement, dedicated: DedicatedAlloc, filter: F, ) -> Result<PotentialDedicatedAllocation<Self::Alloc>, DeviceMemoryAllocError> where F: FnMut(MemoryType) -> AllocFromRequirementsFilter,144 fn alloc_from_requirements<F>(
145 &self,
146 requirements: &MemoryRequirements,
147 layout: AllocLayout,
148 map: MappingRequirement,
149 dedicated: DedicatedAlloc,
150 filter: F,
151 ) -> Result<PotentialDedicatedAllocation<Self::Alloc>, DeviceMemoryAllocError>
152 where
153 F: FnMut(MemoryType) -> AllocFromRequirementsFilter,
154 {
155 // Choose a suitable memory type.
156 let mem_ty = choose_allocation_memory_type(self.device(), requirements, filter, map);
157
158 // Redirect to `self.alloc_generic` if we don't perform a dedicated allocation.
159 if !requirements.prefer_dedicated && requirements.size <= MAX_POOL_ALLOC {
160 let alloc = self.alloc_generic(
161 mem_ty,
162 requirements.size,
163 requirements.alignment,
164 layout,
165 map,
166 )?;
167 return Ok(alloc.into());
168 }
169 if let DedicatedAlloc::None = dedicated {
170 let alloc = self.alloc_generic(
171 mem_ty,
172 requirements.size,
173 requirements.alignment,
174 layout,
175 map,
176 )?;
177 return Ok(alloc.into());
178 }
179
180 // If we reach here, then we perform a dedicated alloc.
181 match map {
182 MappingRequirement::Map => {
183 let mem = DeviceMemory::dedicated_alloc_and_map(
184 self.device().clone(),
185 mem_ty,
186 requirements.size,
187 dedicated,
188 )?;
189 Ok(PotentialDedicatedAllocation::DedicatedMapped(mem))
190 }
191 MappingRequirement::DoNotMap => {
192 let mem = DeviceMemory::dedicated_alloc(
193 self.device().clone(),
194 mem_ty,
195 requirements.size,
196 dedicated,
197 )?;
198 Ok(PotentialDedicatedAllocation::Dedicated(mem))
199 }
200 }
201 }
202
203 /// Same as `alloc_from_requirements` but with exportable fd option on Linux.
204 #[cfg(target_os = "linux")]
alloc_from_requirements_with_exportable_fd<F>( &self, requirements: &MemoryRequirements, layout: AllocLayout, map: MappingRequirement, dedicated: DedicatedAlloc, filter: F, ) -> Result<PotentialDedicatedAllocation<Self::Alloc>, DeviceMemoryAllocError> where F: FnMut(MemoryType) -> AllocFromRequirementsFilter,205 fn alloc_from_requirements_with_exportable_fd<F>(
206 &self,
207 requirements: &MemoryRequirements,
208 layout: AllocLayout,
209 map: MappingRequirement,
210 dedicated: DedicatedAlloc,
211 filter: F,
212 ) -> Result<PotentialDedicatedAllocation<Self::Alloc>, DeviceMemoryAllocError>
213 where
214 F: FnMut(MemoryType) -> AllocFromRequirementsFilter,
215 {
216 assert!(self.device().enabled_extensions().khr_external_memory_fd);
217 assert!(self.device().enabled_extensions().khr_external_memory);
218
219 let mem_ty = choose_allocation_memory_type(self.device(), requirements, filter, map);
220
221 if !requirements.prefer_dedicated
222 || !self.device().enabled_extensions().khr_dedicated_allocation
223 {
224 let alloc = self.alloc_generic_with_exportable_fd(
225 mem_ty,
226 requirements.size,
227 requirements.alignment,
228 layout,
229 map,
230 )?;
231 return Ok(alloc.into());
232 }
233 if let DedicatedAlloc::None = dedicated {
234 let alloc = self.alloc_generic_with_exportable_fd(
235 mem_ty,
236 requirements.size,
237 requirements.alignment,
238 layout,
239 map,
240 )?;
241 return Ok(alloc.into());
242 }
243
244 match map {
245 MappingRequirement::Map => {
246 let mem = DeviceMemory::dedicated_alloc_and_map_with_exportable_fd(
247 self.device().clone(),
248 mem_ty,
249 requirements.size,
250 dedicated,
251 )?;
252 Ok(PotentialDedicatedAllocation::DedicatedMapped(mem))
253 }
254 MappingRequirement::DoNotMap => {
255 let mem = DeviceMemory::dedicated_alloc_with_exportable_fd(
256 self.device().clone(),
257 mem_ty,
258 requirements.size,
259 dedicated,
260 )?;
261 Ok(PotentialDedicatedAllocation::Dedicated(mem))
262 }
263 }
264 }
265 }
266
267 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
268 pub enum AllocFromRequirementsFilter {
269 Preferred,
270 Allowed,
271 Forbidden,
272 }
273
274 /// Object that represents a single allocation. Its destructor should free the chunk.
275 pub unsafe trait MemoryPoolAlloc {
276 /// Returns the memory object from which this is allocated. Returns `None` if the memory is
277 /// not mapped.
mapped_memory(&self) -> Option<&MappedDeviceMemory>278 fn mapped_memory(&self) -> Option<&MappedDeviceMemory>;
279
280 /// Returns the memory object from which this is allocated.
memory(&self) -> &DeviceMemory281 fn memory(&self) -> &DeviceMemory;
282
283 /// Returns the offset at the start of the memory where the first byte of this allocation
284 /// resides.
offset(&self) -> DeviceSize285 fn offset(&self) -> DeviceSize;
286 }
287
288 /// Whether an allocation should map the memory or not.
289 #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
290 pub enum MappingRequirement {
291 /// Should map.
292 Map,
293 /// Shouldn't map.
294 DoNotMap,
295 }
296
297 /// Layout of the object being allocated.
298 #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
299 pub enum AllocLayout {
300 /// The object has a linear layout.
301 Linear,
302 /// The object has an optimal layout.
303 Optimal,
304 }
305
306 /// Enumeration that can contain either a generic allocation coming from a pool, or a dedicated
307 /// allocation for one specific resource.
308 #[derive(Debug)]
309 pub enum PotentialDedicatedAllocation<A> {
310 Generic(A),
311 Dedicated(DeviceMemory),
312 DedicatedMapped(MappedDeviceMemory),
313 }
314
315 unsafe impl<A> MemoryPoolAlloc for PotentialDedicatedAllocation<A>
316 where
317 A: MemoryPoolAlloc,
318 {
319 #[inline]
mapped_memory(&self) -> Option<&MappedDeviceMemory>320 fn mapped_memory(&self) -> Option<&MappedDeviceMemory> {
321 match *self {
322 PotentialDedicatedAllocation::Generic(ref alloc) => alloc.mapped_memory(),
323 PotentialDedicatedAllocation::Dedicated(_) => None,
324 PotentialDedicatedAllocation::DedicatedMapped(ref mem) => Some(mem),
325 }
326 }
327
328 #[inline]
memory(&self) -> &DeviceMemory329 fn memory(&self) -> &DeviceMemory {
330 match *self {
331 PotentialDedicatedAllocation::Generic(ref alloc) => alloc.memory(),
332 PotentialDedicatedAllocation::Dedicated(ref mem) => mem,
333 PotentialDedicatedAllocation::DedicatedMapped(ref mem) => mem.as_ref(),
334 }
335 }
336
337 #[inline]
offset(&self) -> DeviceSize338 fn offset(&self) -> DeviceSize {
339 match *self {
340 PotentialDedicatedAllocation::Generic(ref alloc) => alloc.offset(),
341 PotentialDedicatedAllocation::Dedicated(_) => 0,
342 PotentialDedicatedAllocation::DedicatedMapped(_) => 0,
343 }
344 }
345 }
346
347 impl<A> From<A> for PotentialDedicatedAllocation<A> {
348 #[inline]
from(alloc: A) -> PotentialDedicatedAllocation<A>349 fn from(alloc: A) -> PotentialDedicatedAllocation<A> {
350 PotentialDedicatedAllocation::Generic(alloc)
351 }
352 }
353