• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The aarch64-paging Authors.
2 // This project is dual-licensed under Apache 2.0 and MIT terms.
3 // See LICENSE-APACHE and LICENSE-MIT for details.
4 
5 //! Generic aarch64 page table manipulation functionality which doesn't assume anything about how
6 //! addresses are mapped.
7 
8 use crate::MapError;
9 #[cfg(feature = "alloc")]
10 use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error, Layout};
11 use bitflags::bitflags;
12 use core::fmt::{self, Debug, Display, Formatter};
13 use core::marker::PhantomData;
14 use core::ops::{Add, Range, Sub};
15 use core::ptr::NonNull;
16 
17 const PAGE_SHIFT: usize = 12;
18 
19 /// The pagetable level at which all entries are page mappings.
20 const LEAF_LEVEL: usize = 3;
21 
22 /// The page size in bytes assumed by this library, 4 KiB.
23 pub const PAGE_SIZE: usize = 1 << PAGE_SHIFT;
24 
25 /// The number of address bits resolved in one level of page table lookup. This is a function of the
26 /// page size.
27 pub const BITS_PER_LEVEL: usize = PAGE_SHIFT - 3;
28 
29 /// Which virtual address range a page table is for, i.e. which TTBR register to use for it.
30 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
31 pub enum VaRange {
32     /// The page table covers the bottom of the virtual address space (starting at address 0), so
33     /// will be used with `TTBR0`.
34     Lower,
35     /// The page table covers the top of the virtual address space (ending at address
36     /// 0xffff_ffff_ffff_ffff), so will be used with `TTBR1`.
37     Upper,
38 }
39 
40 /// An aarch64 virtual address, the input type of a stage 1 page table.
41 #[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
42 pub struct VirtualAddress(pub usize);
43 
44 impl Display for VirtualAddress {
fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error>45     fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
46         write!(f, "{:#018x}", self.0)
47     }
48 }
49 
50 impl Debug for VirtualAddress {
fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error>51     fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
52         write!(f, "VirtualAddress({})", self)
53     }
54 }
55 
56 impl Sub for VirtualAddress {
57     type Output = usize;
58 
sub(self, other: Self) -> Self::Output59     fn sub(self, other: Self) -> Self::Output {
60         self.0 - other.0
61     }
62 }
63 
64 impl Add<usize> for VirtualAddress {
65     type Output = Self;
66 
add(self, other: usize) -> Self67     fn add(self, other: usize) -> Self {
68         Self(self.0 + other)
69     }
70 }
71 
72 impl Sub<usize> for VirtualAddress {
73     type Output = Self;
74 
sub(self, other: usize) -> Self75     fn sub(self, other: usize) -> Self {
76         Self(self.0 - other)
77     }
78 }
79 
80 /// A range of virtual addresses which may be mapped in a page table.
81 #[derive(Clone, Eq, PartialEq)]
82 pub struct MemoryRegion(Range<VirtualAddress>);
83 
84 /// An aarch64 physical address or intermediate physical address, the output type of a stage 1 page
85 /// table.
86 #[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
87 pub struct PhysicalAddress(pub usize);
88 
89 impl Display for PhysicalAddress {
fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error>90     fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
91         write!(f, "{:#018x}", self.0)
92     }
93 }
94 
95 impl Debug for PhysicalAddress {
fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error>96     fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
97         write!(f, "PhysicalAddress({})", self)
98     }
99 }
100 
101 impl Sub for PhysicalAddress {
102     type Output = usize;
103 
sub(self, other: Self) -> Self::Output104     fn sub(self, other: Self) -> Self::Output {
105         self.0 - other.0
106     }
107 }
108 
109 impl Add<usize> for PhysicalAddress {
110     type Output = Self;
111 
add(self, other: usize) -> Self112     fn add(self, other: usize) -> Self {
113         Self(self.0 + other)
114     }
115 }
116 
117 impl Sub<usize> for PhysicalAddress {
118     type Output = Self;
119 
sub(self, other: usize) -> Self120     fn sub(self, other: usize) -> Self {
121         Self(self.0 - other)
122     }
123 }
124 
125 /// Returns the size in bytes of the address space covered by a single entry in the page table at
126 /// the given level.
granularity_at_level(level: usize) -> usize127 pub(crate) fn granularity_at_level(level: usize) -> usize {
128     PAGE_SIZE << ((LEAF_LEVEL - level) * BITS_PER_LEVEL)
129 }
130 
131 /// An implementation of this trait needs to be provided to the mapping routines, so that the
132 /// physical addresses used in the page tables can be converted into virtual addresses that can be
133 /// used to access their contents from the code.
134 pub trait Translation {
135     /// Allocates a zeroed page, which is already mapped, to be used for a new subtable of some
136     /// pagetable. Returns both a pointer to the page and its physical address.
allocate_table(&self) -> (NonNull<PageTable>, PhysicalAddress)137     fn allocate_table(&self) -> (NonNull<PageTable>, PhysicalAddress);
138 
139     /// Deallocates the page which was previous allocated by [`allocate_table`](Self::allocate_table).
140     ///
141     /// # Safety
142     ///
143     /// The memory must have been allocated by `allocate_table` on the same `Translation`, and not
144     /// yet deallocated.
deallocate_table(&self, page_table: NonNull<PageTable>)145     unsafe fn deallocate_table(&self, page_table: NonNull<PageTable>);
146 
147     /// Given the physical address of a subtable, returns the virtual address at which it is mapped.
physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable>148     fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable>;
149 }
150 
151 impl MemoryRegion {
152     /// Constructs a new `MemoryRegion` for the given range of virtual addresses.
153     ///
154     /// The start is inclusive and the end is exclusive. Both will be aligned to the [`PAGE_SIZE`],
155     /// with the start being rounded down and the end being rounded up.
new(start: usize, end: usize) -> MemoryRegion156     pub const fn new(start: usize, end: usize) -> MemoryRegion {
157         MemoryRegion(
158             VirtualAddress(align_down(start, PAGE_SIZE))..VirtualAddress(align_up(end, PAGE_SIZE)),
159         )
160     }
161 
162     /// Returns the first virtual address of the memory range.
start(&self) -> VirtualAddress163     pub const fn start(&self) -> VirtualAddress {
164         self.0.start
165     }
166 
167     /// Returns the first virtual address after the memory range.
end(&self) -> VirtualAddress168     pub const fn end(&self) -> VirtualAddress {
169         self.0.end
170     }
171 
172     /// Returns the length of the memory region in bytes.
len(&self) -> usize173     pub const fn len(&self) -> usize {
174         self.0.end.0 - self.0.start.0
175     }
176 
177     /// Returns whether the memory region contains exactly 0 bytes.
is_empty(&self) -> bool178     pub const fn is_empty(&self) -> bool {
179         self.0.start.0 == self.0.end.0
180     }
181 
split(&self, level: usize) -> ChunkedIterator182     fn split(&self, level: usize) -> ChunkedIterator {
183         ChunkedIterator {
184             range: self,
185             granularity: granularity_at_level(level),
186             start: self.0.start.0,
187         }
188     }
189 
190     /// Returns whether this region can be mapped at 'level' using block mappings only.
is_block(&self, level: usize) -> bool191     pub(crate) fn is_block(&self, level: usize) -> bool {
192         let gran = granularity_at_level(level);
193         (self.0.start.0 | self.0.end.0) & (gran - 1) == 0
194     }
195 }
196 
197 impl From<Range<VirtualAddress>> for MemoryRegion {
from(range: Range<VirtualAddress>) -> Self198     fn from(range: Range<VirtualAddress>) -> Self {
199         Self::new(range.start.0, range.end.0)
200     }
201 }
202 
203 impl Display for MemoryRegion {
fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error>204     fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
205         write!(f, "{}..{}", self.0.start, self.0.end)
206     }
207 }
208 
209 impl Debug for MemoryRegion {
fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error>210     fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
211         Display::fmt(self, f)
212     }
213 }
214 
215 bitflags! {
216     /// Constraints on page table mappings
217     #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
218     pub struct Constraints: usize {
219         /// Block mappings are not permitted, only page mappings
220         const NO_BLOCK_MAPPINGS    = 1 << 0;
221         /// Use of the contiguous hint is not permitted
222         const NO_CONTIGUOUS_HINT   = 1 << 1;
223     }
224 }
225 
226 /// A complete hierarchy of page tables including all levels.
227 pub struct RootTable<T: Translation> {
228     table: PageTableWithLevel<T>,
229     translation: T,
230     pa: PhysicalAddress,
231     va_range: VaRange,
232 }
233 
234 impl<T: Translation> RootTable<T> {
235     /// Creates a new page table starting at the given root level.
236     ///
237     /// The level must be between 0 and 3; level -1 (for 52-bit addresses with LPA2) is not
238     /// currently supported by this library. The value of `TCR_EL1.T0SZ` must be set appropriately
239     /// to match.
new(translation: T, level: usize, va_range: VaRange) -> Self240     pub fn new(translation: T, level: usize, va_range: VaRange) -> Self {
241         if level > LEAF_LEVEL {
242             panic!("Invalid root table level {}.", level);
243         }
244         let (table, pa) = PageTableWithLevel::new(&translation, level);
245         RootTable {
246             table,
247             translation,
248             pa,
249             va_range,
250         }
251     }
252 
253     /// Returns the size in bytes of the virtual address space which can be mapped in this page
254     /// table.
255     ///
256     /// This is a function of the chosen root level.
size(&self) -> usize257     pub fn size(&self) -> usize {
258         granularity_at_level(self.table.level) << BITS_PER_LEVEL
259     }
260 
261     /// Recursively maps a range into the pagetable hierarchy starting at the root level, mapping
262     /// the pages to the corresponding physical address range starting at `pa`. Block and page
263     /// entries will be written to, but will only be mapped if `flags` contains `Attributes::VALID`.
264     ///
265     /// Returns an error if the virtual address range is out of the range covered by the pagetable,
266     /// or if the `flags` argument has unsupported attributes set.
map_range( &mut self, range: &MemoryRegion, pa: PhysicalAddress, flags: Attributes, constraints: Constraints, ) -> Result<(), MapError>267     pub fn map_range(
268         &mut self,
269         range: &MemoryRegion,
270         pa: PhysicalAddress,
271         flags: Attributes,
272         constraints: Constraints,
273     ) -> Result<(), MapError> {
274         if flags.contains(Attributes::TABLE_OR_PAGE) {
275             return Err(MapError::InvalidFlags(Attributes::TABLE_OR_PAGE));
276         }
277         self.verify_region(range)?;
278         self.table
279             .map_range(&self.translation, range, pa, flags, constraints);
280         Ok(())
281     }
282 
283     /// Returns the physical address of the root table in memory.
to_physical(&self) -> PhysicalAddress284     pub fn to_physical(&self) -> PhysicalAddress {
285         self.pa
286     }
287 
288     /// Returns the TTBR for which this table is intended.
va_range(&self) -> VaRange289     pub fn va_range(&self) -> VaRange {
290         self.va_range
291     }
292 
293     /// Returns a reference to the translation used for this page table.
translation(&self) -> &T294     pub fn translation(&self) -> &T {
295         &self.translation
296     }
297 
298     /// Applies the provided updater function to the page table descriptors covering a given
299     /// memory range.
300     ///
301     /// This may involve splitting block entries if the provided range is not currently mapped
302     /// down to its precise boundaries. For visiting all the descriptors covering a memory range
303     /// without potential splitting (and no descriptor updates), use
304     /// [`walk_range`](Self::walk_range) instead.
305     ///
306     /// The updater function receives the following arguments:
307     ///
308     /// - The virtual address range mapped by each page table descriptor. A new descriptor will
309     ///   have been allocated before the invocation of the updater function if a page table split
310     ///   was needed.
311     /// - A mutable reference to the page table descriptor that permits modifications.
312     /// - The level of a translation table the descriptor belongs to.
313     ///
314     /// The updater function should return:
315     ///
316     /// - `Ok` to continue updating the remaining entries.
317     /// - `Err` to signal an error and stop updating the remaining entries.
318     ///
319     /// This should generally only be called while the page table is not active. In particular, any
320     /// change that may require break-before-make per the architecture must be made while the page
321     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
322     /// table is active.
323     ///
324     /// # Errors
325     ///
326     /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
327     ///
328     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
329     ///
330     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
331     /// largest virtual address covered by the page table given its root level.
332     ///
333     /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
334     /// and modifying those would violate architectural break-before-make (BBM) requirements.
modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,335     pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
336     where
337         F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
338     {
339         self.verify_region(range)?;
340         self.table.modify_range(&self.translation, range, f)
341     }
342 
343     /// Applies the provided callback function to the page table descriptors covering a given
344     /// memory range.
345     ///
346     /// The callback function receives the following arguments:
347     ///
348     /// - The full virtual address range mapped by each visited page table descriptor, which may
349     ///   exceed the original range passed to `walk_range`, due to alignment to block boundaries.
350     /// - The page table descriptor itself.
351     /// - The level of a translation table the descriptor belongs to.
352     ///
353     /// The callback function should return:
354     ///
355     /// - `Ok` to continue visiting the remaining entries.
356     /// - `Err` to signal an error and stop visiting the remaining entries.
357     ///
358     /// # Errors
359     ///
360     /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
361     ///
362     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
363     ///
364     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
365     /// largest virtual address covered by the page table given its root level.
walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,366     pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
367     where
368         F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
369     {
370         self.verify_region(range)?;
371         self.table.walk_range(&self.translation, range, f)
372     }
373 
374     /// Returns the level of mapping used for the given virtual address:
375     /// - `None` if it is unmapped
376     /// - `Some(LEAF_LEVEL)` if it is mapped as a single page
377     /// - `Some(level)` if it is mapped as a block at `level`
378     #[cfg(test)]
mapping_level(&self, va: VirtualAddress) -> Option<usize>379     pub(crate) fn mapping_level(&self, va: VirtualAddress) -> Option<usize> {
380         self.table.mapping_level(&self.translation, va)
381     }
382 
383     /// Checks whether the region is within range of the page table.
verify_region(&self, region: &MemoryRegion) -> Result<(), MapError>384     fn verify_region(&self, region: &MemoryRegion) -> Result<(), MapError> {
385         if region.end() < region.start() {
386             return Err(MapError::RegionBackwards(region.clone()));
387         }
388         match self.va_range {
389             VaRange::Lower => {
390                 if (region.start().0 as isize) < 0 {
391                     return Err(MapError::AddressRange(region.start()));
392                 } else if region.end().0 > self.size() {
393                     return Err(MapError::AddressRange(region.end()));
394                 }
395             }
396             VaRange::Upper => {
397                 if region.start().0 as isize >= 0
398                     || (region.start().0 as isize).unsigned_abs() > self.size()
399                 {
400                     return Err(MapError::AddressRange(region.start()));
401                 }
402             }
403         }
404         Ok(())
405     }
406 }
407 
408 impl<T: Translation> Debug for RootTable<T> {
fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error>409     fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
410         writeln!(
411             f,
412             "RootTable {{ pa: {}, level: {}, table:",
413             self.pa, self.table.level
414         )?;
415         self.table.fmt_indented(f, &self.translation, 0)?;
416         write!(f, "}}")
417     }
418 }
419 
420 impl<T: Translation> Drop for RootTable<T> {
drop(&mut self)421     fn drop(&mut self) {
422         self.table.free(&self.translation)
423     }
424 }
425 
426 struct ChunkedIterator<'a> {
427     range: &'a MemoryRegion,
428     granularity: usize,
429     start: usize,
430 }
431 
432 impl Iterator for ChunkedIterator<'_> {
433     type Item = MemoryRegion;
434 
next(&mut self) -> Option<MemoryRegion>435     fn next(&mut self) -> Option<MemoryRegion> {
436         if !self.range.0.contains(&VirtualAddress(self.start)) {
437             return None;
438         }
439         let end = self
440             .range
441             .0
442             .end
443             .0
444             .min((self.start | (self.granularity - 1)) + 1);
445         let c = MemoryRegion::new(self.start, end);
446         self.start = end;
447         Some(c)
448     }
449 }
450 
451 bitflags! {
452     /// Attribute bits for a mapping in a page table.
453     #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
454     pub struct Attributes: usize {
455         const VALID         = 1 << 0;
456         const TABLE_OR_PAGE = 1 << 1;
457 
458         // The following memory types assume that the MAIR registers
459         // have been programmed accordingly.
460         const DEVICE_NGNRE  = 0 << 2;
461         const NORMAL        = 1 << 2 | 3 << 8; // inner shareable
462 
463         const USER          = 1 << 6;
464         const READ_ONLY     = 1 << 7;
465         const ACCESSED      = 1 << 10;
466         const NON_GLOBAL    = 1 << 11;
467         const DBM           = 1 << 51;
468         const EXECUTE_NEVER = 3 << 53;
469 
470         /// Software flags in block and page descriptor entries.
471         const SWFLAG_0 = 1 << 55;
472         const SWFLAG_1 = 1 << 56;
473         const SWFLAG_2 = 1 << 57;
474         const SWFLAG_3 = 1 << 58;
475     }
476 }
477 
478 /// Smart pointer which owns a [`PageTable`] and knows what level it is at. This allows it to
479 /// implement `Debug` and `Drop`, as walking the page table hierachy requires knowing the starting
480 /// level.
481 #[derive(Debug)]
482 struct PageTableWithLevel<T: Translation> {
483     table: NonNull<PageTable>,
484     level: usize,
485     _translation: PhantomData<T>,
486 }
487 
488 // SAFETY: The underlying PageTable is process-wide and can be safely accessed from any thread
489 // with appropriate synchronization. This type manages ownership for the raw pointer.
490 unsafe impl<T: Translation + Send> Send for PageTableWithLevel<T> {}
491 
492 impl<T: Translation> PageTableWithLevel<T> {
493     /// Allocates a new, zeroed, appropriately-aligned page table with the given translation,
494     /// returning both a pointer to it and its physical address.
new(translation: &T, level: usize) -> (Self, PhysicalAddress)495     fn new(translation: &T, level: usize) -> (Self, PhysicalAddress) {
496         assert!(level <= LEAF_LEVEL);
497         let (table, pa) = translation.allocate_table();
498         (
499             // Safe because the pointer has been allocated with the appropriate layout, and the
500             // memory is zeroed which is valid initialisation for a PageTable.
501             Self::from_pointer(table, level),
502             pa,
503         )
504     }
505 
from_pointer(table: NonNull<PageTable>, level: usize) -> Self506     fn from_pointer(table: NonNull<PageTable>, level: usize) -> Self {
507         Self {
508             table,
509             level,
510             _translation: PhantomData,
511         }
512     }
513 
514     /// Returns a reference to the descriptor corresponding to a given virtual address.
get_entry(&self, va: VirtualAddress) -> &Descriptor515     fn get_entry(&self, va: VirtualAddress) -> &Descriptor {
516         let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL;
517         let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL);
518         // SAFETY: Safe because we know that the pointer is properly aligned, dereferenced and
519         // initialised, and nothing else can access the page table while we hold a mutable reference
520         // to the PageTableWithLevel (assuming it is not currently active).
521         let table = unsafe { self.table.as_ref() };
522         &table.entries[index]
523     }
524 
525     /// Returns a mutable reference to the descriptor corresponding to a given virtual address.
get_entry_mut(&mut self, va: VirtualAddress) -> &mut Descriptor526     fn get_entry_mut(&mut self, va: VirtualAddress) -> &mut Descriptor {
527         let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL;
528         let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL);
529         // SAFETY: Safe because we know that the pointer is properly aligned, dereferenced and
530         // initialised, and nothing else can access the page table while we hold a mutable reference
531         // to the PageTableWithLevel (assuming it is not currently active).
532         let table = unsafe { self.table.as_mut() };
533         &mut table.entries[index]
534     }
535 
536     /// Convert the descriptor in `entry` from a block mapping to a table mapping of
537     /// the same range with the same attributes
split_entry( translation: &T, chunk: &MemoryRegion, entry: &mut Descriptor, level: usize, ) -> Self538     fn split_entry(
539         translation: &T,
540         chunk: &MemoryRegion,
541         entry: &mut Descriptor,
542         level: usize,
543     ) -> Self {
544         let granularity = granularity_at_level(level);
545         let old = *entry;
546         let (mut subtable, subtable_pa) = Self::new(translation, level + 1);
547         if let Some(old_flags) = old.flags() {
548             if !old_flags.contains(Attributes::TABLE_OR_PAGE) {
549                 let old_pa = old.output_address();
550                 // `old` was a block entry, so we need to split it.
551                 // Recreate the entire block in the newly added table.
552                 let a = align_down(chunk.0.start.0, granularity);
553                 let b = align_up(chunk.0.end.0, granularity);
554                 subtable.map_range(
555                     translation,
556                     &MemoryRegion::new(a, b),
557                     old_pa,
558                     old_flags,
559                     Constraints::empty(),
560                 );
561             }
562         }
563         entry.set(subtable_pa, Attributes::TABLE_OR_PAGE | Attributes::VALID);
564         subtable
565     }
566 
567     /// Maps the the given virtual address range in this pagetable to the corresponding physical
568     /// address range starting at the given `pa`, recursing into any subtables as necessary. To map
569     /// block and page entries, `Attributes::VALID` must be set in `flags`.
570     ///
571     /// Assumes that the entire range is within the range covered by this pagetable.
572     ///
573     /// Panics if the `translation` doesn't provide a corresponding physical address for some
574     /// virtual address within the range, as there is no way to roll back to a safe state so this
575     /// should be checked by the caller beforehand.
map_range( &mut self, translation: &T, range: &MemoryRegion, mut pa: PhysicalAddress, flags: Attributes, constraints: Constraints, )576     fn map_range(
577         &mut self,
578         translation: &T,
579         range: &MemoryRegion,
580         mut pa: PhysicalAddress,
581         flags: Attributes,
582         constraints: Constraints,
583     ) {
584         let level = self.level;
585         let granularity = granularity_at_level(level);
586 
587         for chunk in range.split(level) {
588             let entry = self.get_entry_mut(chunk.0.start);
589 
590             if level == LEAF_LEVEL {
591                 // Put down a page mapping.
592                 entry.set(pa, flags | Attributes::ACCESSED | Attributes::TABLE_OR_PAGE);
593             } else if chunk.is_block(level)
594                 && !entry.is_table_or_page()
595                 && is_aligned(pa.0, granularity)
596                 && !constraints.contains(Constraints::NO_BLOCK_MAPPINGS)
597             {
598                 // Rather than leak the entire subhierarchy, only put down
599                 // a block mapping if the region is not already covered by
600                 // a table mapping.
601                 entry.set(pa, flags | Attributes::ACCESSED);
602             } else {
603                 let mut subtable = entry
604                     .subtable(translation, level)
605                     .unwrap_or_else(|| Self::split_entry(translation, &chunk, entry, level));
606                 subtable.map_range(translation, &chunk, pa, flags, constraints);
607             }
608             pa.0 += chunk.len();
609         }
610     }
611 
fmt_indented( &self, f: &mut Formatter, translation: &T, indentation: usize, ) -> Result<(), fmt::Error>612     fn fmt_indented(
613         &self,
614         f: &mut Formatter,
615         translation: &T,
616         indentation: usize,
617     ) -> Result<(), fmt::Error> {
618         const WIDTH: usize = 3;
619         // SAFETY: Safe because we know that the pointer is aligned, initialised and dereferencable,
620         // and the PageTable won't be mutated while we are using it.
621         let table = unsafe { self.table.as_ref() };
622 
623         let mut i = 0;
624         while i < table.entries.len() {
625             if table.entries[i].0 == 0 {
626                 let first_zero = i;
627                 while i < table.entries.len() && table.entries[i].0 == 0 {
628                     i += 1;
629                 }
630                 if i - 1 == first_zero {
631                     writeln!(f, "{:indentation$}{: <WIDTH$}: 0", "", first_zero)?;
632                 } else {
633                     writeln!(f, "{:indentation$}{: <WIDTH$}-{}: 0", "", first_zero, i - 1)?;
634                 }
635             } else {
636                 writeln!(
637                     f,
638                     "{:indentation$}{: <WIDTH$}: {:?}",
639                     "", i, table.entries[i],
640                 )?;
641                 if let Some(subtable) = table.entries[i].subtable(translation, self.level) {
642                     subtable.fmt_indented(f, translation, indentation + 2)?;
643                 }
644                 i += 1;
645             }
646         }
647         Ok(())
648     }
649 
650     /// Frees the memory used by this pagetable and all subtables. It is not valid to access the
651     /// page table after this.
free(&mut self, translation: &T)652     fn free(&mut self, translation: &T) {
653         // SAFETY: Safe because we know that the pointer is aligned, initialised and dereferencable,
654         // and the PageTable won't be mutated while we are freeing it.
655         let table = unsafe { self.table.as_ref() };
656         for entry in table.entries {
657             if let Some(mut subtable) = entry.subtable(translation, self.level) {
658                 // Safe because the subtable was allocated by `PageTableWithLevel::new` with the
659                 // global allocator and appropriate layout.
660                 subtable.free(translation);
661             }
662         }
663         // SAFETY: Safe because the table was allocated by `PageTableWithLevel::new` with the global
664         // allocator and appropriate layout.
665         unsafe {
666             // Actually free the memory used by the `PageTable`.
667             translation.deallocate_table(self.table);
668         }
669     }
670 
671     /// Modifies a range of page table entries by applying a function to each page table entry.
672     /// If the range is not aligned to block boundaries, block descriptors will be split up.
modify_range<F>( &mut self, translation: &T, range: &MemoryRegion, f: &F, ) -> Result<(), MapError> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,673     fn modify_range<F>(
674         &mut self,
675         translation: &T,
676         range: &MemoryRegion,
677         f: &F,
678     ) -> Result<(), MapError>
679     where
680         F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
681     {
682         let level = self.level;
683         for chunk in range.split(level) {
684             let entry = self.get_entry_mut(chunk.0.start);
685             if let Some(mut subtable) = entry.subtable(translation, level).or_else(|| {
686                 if !chunk.is_block(level) {
687                     // The current chunk is not aligned to the block size at this level
688                     // Split it before recursing to the next level
689                     Some(Self::split_entry(translation, &chunk, entry, level))
690                 } else {
691                     None
692                 }
693             }) {
694                 subtable.modify_range(translation, &chunk, f)?;
695             } else {
696                 f(&chunk, entry, level).map_err(|_| MapError::PteUpdateFault(*entry))?;
697             }
698         }
699         Ok(())
700     }
701 
702     /// Walks a range of page table entries and passes each one to a caller provided function
703     /// If the range is not aligned to block boundaries, it will be expanded.
walk_range<F>( &self, translation: &T, range: &MemoryRegion, f: &mut F, ) -> Result<(), MapError> where F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,704     fn walk_range<F>(
705         &self,
706         translation: &T,
707         range: &MemoryRegion,
708         f: &mut F,
709     ) -> Result<(), MapError>
710     where
711         F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
712     {
713         let level = self.level;
714         for chunk in range.split(level) {
715             let entry = self.get_entry(chunk.0.start);
716             if let Some(subtable) = entry.subtable(translation, level) {
717                 subtable.walk_range(translation, &chunk, f)?;
718             } else {
719                 f(&chunk, entry, level).map_err(|_| MapError::PteUpdateFault(*entry))?;
720             }
721         }
722         Ok(())
723     }
724 
725     /// Returns the level of mapping used for the given virtual address:
726     /// - `None` if it is unmapped
727     /// - `Some(LEAF_LEVEL)` if it is mapped as a single page
728     /// - `Some(level)` if it is mapped as a block at `level`
729     #[cfg(test)]
mapping_level(&self, translation: &T, va: VirtualAddress) -> Option<usize>730     fn mapping_level(&self, translation: &T, va: VirtualAddress) -> Option<usize> {
731         let entry = self.get_entry(va);
732         if let Some(subtable) = entry.subtable(translation, self.level) {
733             subtable.mapping_level(translation, va)
734         } else {
735             if entry.is_valid() {
736                 Some(self.level)
737             } else {
738                 None
739             }
740         }
741     }
742 }
743 
744 /// A single level of a page table.
745 #[repr(C, align(4096))]
746 pub struct PageTable {
747     entries: [Descriptor; 1 << BITS_PER_LEVEL],
748 }
749 
750 impl PageTable {
751     /// Allocates a new zeroed, appropriately-aligned pagetable on the heap using the global
752     /// allocator and returns a pointer to it.
753     #[cfg(feature = "alloc")]
new() -> NonNull<Self>754     pub fn new() -> NonNull<Self> {
755         // SAFETY: Safe because the pointer has been allocated with the appropriate layout by the
756         // global allocator, and the memory is zeroed which is valid initialisation for a PageTable.
757         unsafe { allocate_zeroed() }
758     }
759 }
760 
761 /// An entry in a page table.
762 ///
763 /// A descriptor may be:
764 ///   - Invalid, i.e. the virtual address range is unmapped
765 ///   - A page mapping, if it is in the lowest level page table.
766 ///   - A block mapping, if it is not in the lowest level page table.
767 ///   - A pointer to a lower level pagetable, if it is not in the lowest level page table.
768 #[derive(Clone, Copy, PartialEq, Eq)]
769 #[repr(C)]
770 pub struct Descriptor(usize);
771 
772 impl Descriptor {
773     const PHYSICAL_ADDRESS_BITMASK: usize = !(PAGE_SIZE - 1) & !(0xffff << 48);
774 
output_address(self) -> PhysicalAddress775     pub(crate) fn output_address(self) -> PhysicalAddress {
776         PhysicalAddress(self.0 & Self::PHYSICAL_ADDRESS_BITMASK)
777     }
778 
779     /// Returns the flags of this page table entry, or `None` if its state does not
780     /// contain a valid set of flags.
flags(self) -> Option<Attributes>781     pub fn flags(self) -> Option<Attributes> {
782         Attributes::from_bits(self.0 & !Self::PHYSICAL_ADDRESS_BITMASK)
783     }
784 
785     /// Modifies the page table entry by setting or clearing its flags.
786     /// Panics when attempting to convert a table descriptor into a block/page descriptor or vice
787     /// versa - this is not supported via this API.
modify_flags(&mut self, set: Attributes, clear: Attributes)788     pub fn modify_flags(&mut self, set: Attributes, clear: Attributes) {
789         let flags = (self.0 | set.bits()) & !clear.bits();
790 
791         if (self.0 ^ flags) & Attributes::TABLE_OR_PAGE.bits() != 0 {
792             panic!("Cannot convert between table and block/page descriptors\n");
793         }
794         self.0 = flags;
795     }
796 
797     /// Returns `true` if [`Attributes::VALID`] is set on this entry, e.g. if the entry is mapped.
is_valid(self) -> bool798     pub fn is_valid(self) -> bool {
799         (self.0 & Attributes::VALID.bits()) != 0
800     }
801 
802     /// Returns `true` if this is a valid entry pointing to a next level translation table or a page.
is_table_or_page(self) -> bool803     pub fn is_table_or_page(self) -> bool {
804         if let Some(flags) = self.flags() {
805             flags.contains(Attributes::TABLE_OR_PAGE | Attributes::VALID)
806         } else {
807             false
808         }
809     }
810 
set(&mut self, pa: PhysicalAddress, flags: Attributes)811     pub(crate) fn set(&mut self, pa: PhysicalAddress, flags: Attributes) {
812         self.0 = (pa.0 & Self::PHYSICAL_ADDRESS_BITMASK) | flags.bits();
813     }
814 
subtable<T: Translation>( self, translation: &T, level: usize, ) -> Option<PageTableWithLevel<T>>815     fn subtable<T: Translation>(
816         self,
817         translation: &T,
818         level: usize,
819     ) -> Option<PageTableWithLevel<T>> {
820         if level < LEAF_LEVEL && self.is_table_or_page() {
821             let output_address = self.output_address();
822             let table = translation.physical_to_virtual(output_address);
823             return Some(PageTableWithLevel::from_pointer(table, level + 1));
824         }
825         None
826     }
827 }
828 
829 impl Debug for Descriptor {
fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error>830     fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
831         write!(f, "{:#016x}", self.0)?;
832         if self.is_valid() {
833             if let Some(flags) = self.flags() {
834                 write!(f, " ({}, {:?})", self.output_address(), flags)?;
835             }
836         }
837         Ok(())
838     }
839 }
840 
841 /// Allocates appropriately aligned heap space for a `T` and zeroes it.
842 ///
843 /// # Safety
844 ///
845 /// It must be valid to initialise the type `T` by simply zeroing its memory.
846 #[cfg(feature = "alloc")]
allocate_zeroed<T>() -> NonNull<T>847 unsafe fn allocate_zeroed<T>() -> NonNull<T> {
848     let layout = Layout::new::<T>();
849     // Safe because we know the layout has non-zero size.
850     let pointer = alloc_zeroed(layout);
851     if pointer.is_null() {
852         handle_alloc_error(layout);
853     }
854     // Safe because we just checked that the pointer is non-null.
855     NonNull::new_unchecked(pointer as *mut T)
856 }
857 
858 /// Deallocates the heap space for a `T` which was previously allocated by `allocate_zeroed`.
859 ///
860 /// # Safety
861 ///
862 /// The memory must have been allocated by the global allocator, with the layout for `T`, and not
863 /// yet deallocated.
864 #[cfg(feature = "alloc")]
deallocate<T>(ptr: NonNull<T>)865 pub(crate) unsafe fn deallocate<T>(ptr: NonNull<T>) {
866     let layout = Layout::new::<T>();
867     dealloc(ptr.as_ptr() as *mut u8, layout);
868 }
869 
align_down(value: usize, alignment: usize) -> usize870 const fn align_down(value: usize, alignment: usize) -> usize {
871     value & !(alignment - 1)
872 }
873 
align_up(value: usize, alignment: usize) -> usize874 const fn align_up(value: usize, alignment: usize) -> usize {
875     ((value - 1) | (alignment - 1)) + 1
876 }
877 
is_aligned(value: usize, alignment: usize) -> bool878 pub(crate) const fn is_aligned(value: usize, alignment: usize) -> bool {
879     value & (alignment - 1) == 0
880 }
881 
882 #[cfg(test)]
883 mod tests {
884     use super::*;
885     #[cfg(feature = "alloc")]
886     use alloc::{format, string::ToString, vec, vec::Vec};
887 
888     #[cfg(feature = "alloc")]
889     #[test]
display_memory_region()890     fn display_memory_region() {
891         let region = MemoryRegion::new(0x1234, 0x56789);
892         assert_eq!(
893             &region.to_string(),
894             "0x0000000000001000..0x0000000000057000"
895         );
896         assert_eq!(
897             &format!("{:?}", region),
898             "0x0000000000001000..0x0000000000057000"
899         );
900     }
901 
902     #[test]
subtract_virtual_address()903     fn subtract_virtual_address() {
904         let low = VirtualAddress(0x12);
905         let high = VirtualAddress(0x1234);
906         assert_eq!(high - low, 0x1222);
907     }
908 
909     #[cfg(debug_assertions)]
910     #[test]
911     #[should_panic]
subtract_virtual_address_overflow()912     fn subtract_virtual_address_overflow() {
913         let low = VirtualAddress(0x12);
914         let high = VirtualAddress(0x1234);
915 
916         // This would overflow, so should panic.
917         let _ = low - high;
918     }
919 
920     #[test]
add_virtual_address()921     fn add_virtual_address() {
922         assert_eq!(VirtualAddress(0x1234) + 0x42, VirtualAddress(0x1276));
923     }
924 
925     #[test]
subtract_physical_address()926     fn subtract_physical_address() {
927         let low = PhysicalAddress(0x12);
928         let high = PhysicalAddress(0x1234);
929         assert_eq!(high - low, 0x1222);
930     }
931 
932     #[cfg(debug_assertions)]
933     #[test]
934     #[should_panic]
subtract_physical_address_overflow()935     fn subtract_physical_address_overflow() {
936         let low = PhysicalAddress(0x12);
937         let high = PhysicalAddress(0x1234);
938 
939         // This would overflow, so should panic.
940         let _ = low - high;
941     }
942 
943     #[test]
add_physical_address()944     fn add_physical_address() {
945         assert_eq!(PhysicalAddress(0x1234) + 0x42, PhysicalAddress(0x1276));
946     }
947 
948     #[test]
invalid_descriptor()949     fn invalid_descriptor() {
950         let desc = Descriptor(0usize);
951         assert!(!desc.is_valid());
952         assert!(!desc.flags().unwrap().contains(Attributes::VALID));
953     }
954 
955     #[test]
set_descriptor()956     fn set_descriptor() {
957         const PHYSICAL_ADDRESS: usize = 0x12340000;
958         let mut desc = Descriptor(0usize);
959         assert!(!desc.is_valid());
960         desc.set(
961             PhysicalAddress(PHYSICAL_ADDRESS),
962             Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1 | Attributes::VALID,
963         );
964         assert!(desc.is_valid());
965         assert_eq!(
966             desc.flags().unwrap(),
967             Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1 | Attributes::VALID
968         );
969         assert_eq!(desc.output_address(), PhysicalAddress(PHYSICAL_ADDRESS));
970     }
971 
972     #[test]
modify_descriptor_flags()973     fn modify_descriptor_flags() {
974         let mut desc = Descriptor(0usize);
975         assert!(!desc.is_valid());
976         desc.set(
977             PhysicalAddress(0x12340000),
978             Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1,
979         );
980         desc.modify_flags(
981             Attributes::DBM | Attributes::SWFLAG_3,
982             Attributes::VALID | Attributes::SWFLAG_1,
983         );
984         assert!(!desc.is_valid());
985         assert_eq!(
986             desc.flags().unwrap(),
987             Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_3 | Attributes::DBM
988         );
989     }
990 
991     #[test]
992     #[should_panic]
modify_descriptor_table_or_page_flag()993     fn modify_descriptor_table_or_page_flag() {
994         let mut desc = Descriptor(0usize);
995         assert!(!desc.is_valid());
996         desc.set(
997             PhysicalAddress(0x12340000),
998             Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1,
999         );
1000         desc.modify_flags(Attributes::VALID, Attributes::TABLE_OR_PAGE);
1001     }
1002 
1003     #[cfg(feature = "alloc")]
1004     #[test]
unaligned_chunks()1005     fn unaligned_chunks() {
1006         let region = MemoryRegion::new(0x0000_2000, 0x0020_5000);
1007         let chunks = region.split(LEAF_LEVEL - 1).collect::<Vec<_>>();
1008         assert_eq!(
1009             chunks,
1010             vec![
1011                 MemoryRegion::new(0x0000_2000, 0x0020_0000),
1012                 MemoryRegion::new(0x0020_0000, 0x0020_5000),
1013             ]
1014         );
1015     }
1016 }
1017