• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! Module for [`MemoryMapOwned`], [`MemoryMapRef`], and [`MemoryMapRefMut`],
2 //! as well as relevant helper types, such as [`MemoryMapBackingMemory`].
3 
4 use super::*;
5 use crate::boot;
6 use core::fmt::{Debug, Display, Formatter};
7 use core::ops::{Index, IndexMut};
8 use core::ptr::NonNull;
9 use core::{mem, ptr};
10 use uefi_raw::PhysicalAddress;
11 
12 /// Errors that may happen when constructing a [`MemoryMapRef`] or
13 /// [`MemoryMapRefMut`].
14 #[derive(Copy, Clone, Debug)]
15 pub enum MemoryMapError {
16     /// The buffer is not 8-byte aligned.
17     Misaligned,
18     /// The memory map size is invalid.
19     InvalidSize,
20 }
21 
22 impl Display for MemoryMapError {
fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result23     fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
24         Debug::fmt(self, f)
25     }
26 }
27 
28 #[cfg(feature = "unstable")]
29 impl core::error::Error for MemoryMapError {}
30 
31 /// Implementation of [`MemoryMap`] for the given buffer.
32 #[derive(Debug)]
33 pub struct MemoryMapRef<'a> {
34     buf: &'a [u8],
35     meta: MemoryMapMeta,
36     len: usize,
37 }
38 
39 impl<'a> MemoryMapRef<'a> {
40     /// Constructs a new [`MemoryMapRef`].
41     ///
42     /// The underlying memory might contain an invalid/malformed memory map
43     /// which can't be checked during construction of this type. The entry
44     /// iterator might yield unexpected results.
new(buffer: &'a [u8], meta: MemoryMapMeta) -> Result<Self, MemoryMapError>45     pub fn new(buffer: &'a [u8], meta: MemoryMapMeta) -> Result<Self, MemoryMapError> {
46         if buffer.as_ptr().align_offset(8) != 0 {
47             return Err(MemoryMapError::Misaligned);
48         }
49         if buffer.len() < meta.map_size {
50             return Err(MemoryMapError::InvalidSize);
51         }
52         Ok(Self {
53             buf: buffer,
54             meta,
55             len: meta.entry_count(),
56         })
57     }
58 }
59 
60 impl<'a> MemoryMap for MemoryMapRef<'a> {
meta(&self) -> MemoryMapMeta61     fn meta(&self) -> MemoryMapMeta {
62         self.meta
63     }
64 
key(&self) -> MemoryMapKey65     fn key(&self) -> MemoryMapKey {
66         self.meta.map_key
67     }
68 
len(&self) -> usize69     fn len(&self) -> usize {
70         self.len
71     }
72 
buffer(&self) -> &[u8]73     fn buffer(&self) -> &[u8] {
74         self.buf
75     }
76 
entries(&self) -> MemoryMapIter<'_>77     fn entries(&self) -> MemoryMapIter<'_> {
78         MemoryMapIter {
79             memory_map: self,
80             index: 0,
81         }
82     }
83 }
84 
85 impl Index<usize> for MemoryMapRef<'_> {
86     type Output = MemoryDescriptor;
87 
index(&self, index: usize) -> &Self::Output88     fn index(&self, index: usize) -> &Self::Output {
89         self.get(index).unwrap()
90     }
91 }
92 
93 /// Implementation of [`MemoryMapMut`] for the given buffer.
94 #[derive(Debug)]
95 pub struct MemoryMapRefMut<'a> {
96     buf: &'a mut [u8],
97     meta: MemoryMapMeta,
98     len: usize,
99 }
100 
101 impl<'a> MemoryMapRefMut<'a> {
102     /// Constructs a new [`MemoryMapRefMut`].
103     ///
104     /// The underlying memory might contain an invalid/malformed memory map
105     /// which can't be checked during construction of this type. The entry
106     /// iterator might yield unexpected results.
new(buffer: &'a mut [u8], meta: MemoryMapMeta) -> Result<Self, MemoryMapError>107     pub fn new(buffer: &'a mut [u8], meta: MemoryMapMeta) -> Result<Self, MemoryMapError> {
108         if buffer.as_ptr().align_offset(8) != 0 {
109             return Err(MemoryMapError::Misaligned);
110         }
111         if buffer.len() < meta.map_size {
112             return Err(MemoryMapError::InvalidSize);
113         }
114         Ok(Self {
115             buf: buffer,
116             meta,
117             len: meta.entry_count(),
118         })
119     }
120 }
121 
122 impl<'a> MemoryMap for MemoryMapRefMut<'a> {
meta(&self) -> MemoryMapMeta123     fn meta(&self) -> MemoryMapMeta {
124         self.meta
125     }
126 
key(&self) -> MemoryMapKey127     fn key(&self) -> MemoryMapKey {
128         self.meta.map_key
129     }
130 
len(&self) -> usize131     fn len(&self) -> usize {
132         self.len
133     }
134 
buffer(&self) -> &[u8]135     fn buffer(&self) -> &[u8] {
136         self.buf
137     }
138 
entries(&self) -> MemoryMapIter<'_>139     fn entries(&self) -> MemoryMapIter<'_> {
140         MemoryMapIter {
141             memory_map: self,
142             index: 0,
143         }
144     }
145 }
146 
147 impl<'a> MemoryMapMut for MemoryMapRefMut<'a> {
sort(&mut self)148     fn sort(&mut self) {
149         unsafe {
150             self.qsort(0, self.len - 1);
151         }
152     }
153 
buffer_mut(&mut self) -> &mut [u8]154     unsafe fn buffer_mut(&mut self) -> &mut [u8] {
155         self.buf
156     }
157 }
158 
159 impl<'a> MemoryMapRefMut<'a> {
160     /// Hoare partition scheme for quicksort.
161     /// Must be called with `low` and `high` being indices within bounds.
qsort(&mut self, low: usize, high: usize)162     unsafe fn qsort(&mut self, low: usize, high: usize) {
163         if low >= high {
164             return;
165         }
166 
167         let p = self.partition(low, high);
168         self.qsort(low, p);
169         self.qsort(p + 1, high);
170     }
171 
partition(&mut self, low: usize, high: usize) -> usize172     unsafe fn partition(&mut self, low: usize, high: usize) -> usize {
173         let pivot = self.get_element_phys_addr(low + (high - low) / 2);
174 
175         let mut left_index = low.wrapping_sub(1);
176         let mut right_index = high.wrapping_add(1);
177 
178         loop {
179             while {
180                 left_index = left_index.wrapping_add(1);
181 
182                 self.get_element_phys_addr(left_index) < pivot
183             } {}
184 
185             while {
186                 right_index = right_index.wrapping_sub(1);
187 
188                 self.get_element_phys_addr(right_index) > pivot
189             } {}
190 
191             if left_index >= right_index {
192                 return right_index;
193             }
194 
195             self.swap(left_index, right_index);
196         }
197     }
198 
199     /// Indices must be smaller than len.
swap(&mut self, index1: usize, index2: usize)200     unsafe fn swap(&mut self, index1: usize, index2: usize) {
201         if index1 == index2 {
202             return;
203         }
204 
205         let base = self.buf.as_mut_ptr();
206 
207         unsafe {
208             ptr::swap_nonoverlapping(
209                 base.add(index1 * self.meta.desc_size),
210                 base.add(index2 * self.meta.desc_size),
211                 self.meta.desc_size,
212             );
213         }
214     }
215 
get_element_phys_addr(&self, index: usize) -> PhysicalAddress216     fn get_element_phys_addr(&self, index: usize) -> PhysicalAddress {
217         let offset = index.checked_mul(self.meta.desc_size).unwrap();
218         let elem = unsafe { &*self.buf.as_ptr().add(offset).cast::<MemoryDescriptor>() };
219         elem.phys_start
220     }
221 }
222 
223 impl Index<usize> for MemoryMapRefMut<'_> {
224     type Output = MemoryDescriptor;
225 
index(&self, index: usize) -> &Self::Output226     fn index(&self, index: usize) -> &Self::Output {
227         self.get(index).unwrap()
228     }
229 }
230 
231 impl IndexMut<usize> for MemoryMapRefMut<'_> {
index_mut(&mut self, index: usize) -> &mut Self::Output232     fn index_mut(&mut self, index: usize) -> &mut Self::Output {
233         self.get_mut(index).unwrap()
234     }
235 }
236 
237 /// The backing memory for the UEFI memory app on the UEFI heap, allocated using
238 /// the UEFI boot services allocator. This occupied memory will also be
239 /// reflected in the memory map itself.
240 ///
241 /// Although untyped, it is similar to the `Box` type in terms of heap
242 /// allocation and deallocation, as well as ownership of the corresponding
243 /// memory. Apart from that, this type only has the semantics of a buffer.
244 ///
245 /// The memory is untyped, which is necessary due to the nature of the UEFI
246 /// spec. It still ensures a correct alignment to hold [`MemoryDescriptor`]. The
247 /// size of the buffer is sufficient to hold the memory map at the point in time
248 /// where this is created. Note that due to (not obvious or asynchronous)
249 /// allocations/deallocations in your environment, this might be outdated at the
250 /// time you store the memory map in it.
251 ///
252 /// Note that due to the nature of the UEFI memory app, this buffer might
253 /// hold (a few) bytes more than necessary. The `map_size` reported by
254 /// `get_memory_map` tells the actual size.
255 ///
256 /// When this type is dropped and boot services are not exited yet, the memory
257 /// is freed.
258 ///
259 /// # Usage
260 /// The type is intended to be used like this:
261 /// 1. create it using [`MemoryMapBackingMemory::new`]
262 /// 2. pass it to [`boot::get_memory_map`]
263 /// 3. construct a [`MemoryMapOwned`] from it
264 ///
265 /// [`boot::get_memory_map`]: crate::boot::get_memory_map
266 #[derive(Debug)]
267 #[allow(clippy::len_without_is_empty)] // this type is never empty
268 pub(crate) struct MemoryMapBackingMemory(NonNull<[u8]>);
269 
270 impl MemoryMapBackingMemory {
271     /// Constructs a new [`MemoryMapBackingMemory`].
272     ///
273     /// # Parameters
274     /// - `memory_type`: The memory type for the memory map allocation.
275     ///   Typically, [`MemoryType::LOADER_DATA`] for regular UEFI applications.
new(memory_type: MemoryType) -> crate::Result<Self>276     pub(crate) fn new(memory_type: MemoryType) -> crate::Result<Self> {
277         let memory_map_meta = boot::memory_map_size();
278         let len = Self::safe_allocation_size_hint(memory_map_meta);
279         let ptr = boot::allocate_pool(memory_type, len)?.as_ptr();
280 
281         // Should be fine as UEFI always has  allocations with a guaranteed
282         // alignment of 8 bytes.
283         assert_eq!(ptr.align_offset(mem::align_of::<MemoryDescriptor>()), 0);
284 
285         // If this panics, the UEFI implementation is broken.
286         assert_eq!(memory_map_meta.map_size % memory_map_meta.desc_size, 0);
287 
288         unsafe { Ok(Self::from_raw(ptr, len)) }
289     }
290 
from_raw(ptr: *mut u8, len: usize) -> Self291     unsafe fn from_raw(ptr: *mut u8, len: usize) -> Self {
292         assert_eq!(ptr.align_offset(mem::align_of::<MemoryDescriptor>()), 0);
293 
294         let ptr = NonNull::new(ptr).expect("UEFI should never return a null ptr. An error should have been reflected via an Err earlier.");
295         let slice = NonNull::slice_from_raw_parts(ptr, len);
296 
297         Self(slice)
298     }
299 
300     /// INTERNAL, for unit tests.
301     ///
302     /// Creates an instance from the provided memory, which is not necessarily
303     /// on the UEFI heap.
304     #[cfg(test)]
from_slice(buffer: &mut [u8]) -> Self305     pub(crate) fn from_slice(buffer: &mut [u8]) -> Self {
306         let len = buffer.len();
307         unsafe { Self::from_raw(buffer.as_mut_ptr(), len) }
308     }
309 
310     /// Returns a "safe" best-effort size hint for the memory map size with
311     /// some additional bytes in buffer compared to the [`MemoryMapMeta`]. This
312     /// takes into account that, as you go, more (small) allocations might
313     /// happen.
314     #[must_use]
safe_allocation_size_hint(mmm: MemoryMapMeta) -> usize315     const fn safe_allocation_size_hint(mmm: MemoryMapMeta) -> usize {
316         // Allocate space for extra entries beyond the current size of the
317         // memory map. The value of 8 matches the value in the Linux kernel:
318         // https://github.com/torvalds/linux/blob/e544a07438/drivers/firmware/efi/libstub/efistub.h#L173
319         const EXTRA_ENTRIES: usize = 8;
320 
321         let extra_size = mmm.desc_size * EXTRA_ENTRIES;
322         mmm.map_size + extra_size
323     }
324 
325     /// Returns a slice to the underlying memory.
326     #[must_use]
as_slice(&self) -> &[u8]327     pub fn as_slice(&self) -> &[u8] {
328         unsafe { self.0.as_ref() }
329     }
330 
331     /// Returns a mutable slice to the underlying memory.
332     #[must_use]
as_mut_slice(&mut self) -> &mut [u8]333     pub fn as_mut_slice(&mut self) -> &mut [u8] {
334         unsafe { self.0.as_mut() }
335     }
336 }
337 
338 // Don't drop when we use this in unit tests.
339 impl Drop for MemoryMapBackingMemory {
drop(&mut self)340     fn drop(&mut self) {
341         if boot::are_boot_services_active() {
342             let res = unsafe { boot::free_pool(self.0.cast()) };
343             if let Err(e) = res {
344                 log::error!("Failed to deallocate memory map: {e:?}");
345             }
346         } else {
347             log::debug!("Boot services are exited. Memory map won't be freed using the UEFI boot services allocator.");
348         }
349     }
350 }
351 
352 /// Implementation of [`MemoryMapMut`] that owns the buffer on the UEFI heap.
353 #[derive(Debug)]
354 pub struct MemoryMapOwned {
355     /// Backing memory, properly initialized at this point.
356     pub(crate) buf: MemoryMapBackingMemory,
357     pub(crate) meta: MemoryMapMeta,
358     pub(crate) len: usize,
359 }
360 
361 impl MemoryMapOwned {
362     /// Creates a [`MemoryMapOwned`] from the given **initialized** memory map
363     /// (stored inside the provided buffer) and the corresponding
364     /// [`MemoryMapMeta`].
from_initialized_mem(buf: MemoryMapBackingMemory, meta: MemoryMapMeta) -> Self365     pub(crate) fn from_initialized_mem(buf: MemoryMapBackingMemory, meta: MemoryMapMeta) -> Self {
366         assert!(meta.desc_size >= mem::size_of::<MemoryDescriptor>());
367         let len = meta.entry_count();
368         Self { buf, meta, len }
369     }
370 }
371 
372 impl MemoryMap for MemoryMapOwned {
meta(&self) -> MemoryMapMeta373     fn meta(&self) -> MemoryMapMeta {
374         self.meta
375     }
376 
key(&self) -> MemoryMapKey377     fn key(&self) -> MemoryMapKey {
378         self.meta.map_key
379     }
380 
len(&self) -> usize381     fn len(&self) -> usize {
382         self.len
383     }
384 
buffer(&self) -> &[u8]385     fn buffer(&self) -> &[u8] {
386         self.buf.as_slice()
387     }
388 
entries(&self) -> MemoryMapIter<'_>389     fn entries(&self) -> MemoryMapIter<'_> {
390         MemoryMapIter {
391             memory_map: self,
392             index: 0,
393         }
394     }
395 }
396 
397 impl MemoryMapMut for MemoryMapOwned {
sort(&mut self)398     fn sort(&mut self) {
399         let mut reference = MemoryMapRefMut {
400             buf: self.buf.as_mut_slice(),
401             meta: self.meta,
402             len: self.len,
403         };
404         reference.sort();
405     }
406 
buffer_mut(&mut self) -> &mut [u8]407     unsafe fn buffer_mut(&mut self) -> &mut [u8] {
408         self.buf.as_mut_slice()
409     }
410 }
411 
412 impl Index<usize> for MemoryMapOwned {
413     type Output = MemoryDescriptor;
414 
index(&self, index: usize) -> &Self::Output415     fn index(&self, index: usize) -> &Self::Output {
416         self.get(index).unwrap()
417     }
418 }
419 
420 impl IndexMut<usize> for MemoryMapOwned {
index_mut(&mut self, index: usize) -> &mut Self::Output421     fn index_mut(&mut self, index: usize) -> &mut Self::Output {
422         self.get_mut(index).unwrap()
423     }
424 }
425 
426 #[cfg(test)]
427 mod tests {
428     use super::*;
429     use alloc::vec::Vec;
430     use core::mem::size_of;
431 
432     const BASE_MMAP_UNSORTED: [MemoryDescriptor; 3] = [
433         MemoryDescriptor {
434             ty: MemoryType::CONVENTIONAL,
435             phys_start: 0x3000,
436             virt_start: 0x3000,
437             page_count: 1,
438             att: MemoryAttribute::WRITE_BACK,
439         },
440         MemoryDescriptor {
441             ty: MemoryType::CONVENTIONAL,
442             phys_start: 0x2000,
443             virt_start: 0x2000,
444             page_count: 1,
445             att: MemoryAttribute::WRITE_BACK,
446         },
447         MemoryDescriptor {
448             ty: MemoryType::CONVENTIONAL,
449             phys_start: 0x1000,
450             virt_start: 0x1000,
451             page_count: 1,
452             att: MemoryAttribute::WRITE_BACK,
453         },
454     ];
455 
456     /// Returns a copy of [`BASE_MMAP_UNSORTED`] owned on the stack.
new_mmap_memory() -> [MemoryDescriptor; 3]457     fn new_mmap_memory() -> [MemoryDescriptor; 3] {
458         BASE_MMAP_UNSORTED
459     }
460 
mmap_raw<'a>(memory: &mut [MemoryDescriptor]) -> (&'a mut [u8], MemoryMapMeta)461     fn mmap_raw<'a>(memory: &mut [MemoryDescriptor]) -> (&'a mut [u8], MemoryMapMeta) {
462         let desc_size = size_of::<MemoryDescriptor>();
463         let len = memory.len() * desc_size;
464         let ptr = memory.as_mut_ptr().cast::<u8>();
465         let slice = unsafe { core::slice::from_raw_parts_mut(ptr, len) };
466         let meta = MemoryMapMeta {
467             map_size: len,
468             desc_size,
469             map_key: Default::default(),
470             desc_version: MemoryDescriptor::VERSION,
471         };
472         (slice, meta)
473     }
474 
475     /// Basic sanity checks for the type [`MemoryMapRef`].
476     #[test]
memory_map_ref()477     fn memory_map_ref() {
478         let mut memory = new_mmap_memory();
479         let (mmap, meta) = mmap_raw(&mut memory);
480         let mmap = MemoryMapRef::new(mmap, meta).unwrap();
481 
482         assert_eq!(mmap.entries().count(), 3);
483         assert_eq!(
484             mmap.entries().copied().collect::<Vec<_>>().as_slice(),
485             &BASE_MMAP_UNSORTED
486         );
487         assert!(!mmap.is_sorted());
488     }
489 
490     /// Basic sanity checks for the type [`MemoryMapRefMut`].
491     #[test]
memory_map_ref_mut()492     fn memory_map_ref_mut() {
493         let mut memory = new_mmap_memory();
494         let (mmap, meta) = mmap_raw(&mut memory);
495         let mut mmap = MemoryMapRefMut::new(mmap, meta).unwrap();
496 
497         assert_eq!(mmap.entries().count(), 3);
498         assert_eq!(
499             mmap.entries().copied().collect::<Vec<_>>().as_slice(),
500             &BASE_MMAP_UNSORTED
501         );
502         assert!(!mmap.is_sorted());
503         mmap.sort();
504         assert!(mmap.is_sorted());
505     }
506 
507     /// Basic sanity checks for the type [`MemoryMapOwned`].
508     #[test]
memory_map_owned()509     fn memory_map_owned() {
510         let mut memory = new_mmap_memory();
511         let (mmap, meta) = mmap_raw(&mut memory);
512         let mmap = MemoryMapBackingMemory::from_slice(mmap);
513         let mut mmap = MemoryMapOwned::from_initialized_mem(mmap, meta);
514 
515         assert_eq!(mmap.entries().count(), 3);
516         assert_eq!(
517             mmap.entries().copied().collect::<Vec<_>>().as_slice(),
518             &BASE_MMAP_UNSORTED
519         );
520         assert!(!mmap.is_sorted());
521         mmap.sort();
522         assert!(mmap.is_sorted());
523     }
524 }
525