• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The aarch64-paging Authors.
2 // This project is dual-licensed under Apache 2.0 and MIT terms.
3 // See LICENSE-APACHE and LICENSE-MIT for details.
4 
5 //! A library to manipulate AArch64 VMSA page tables.
6 //!
7 //! Currently it only supports:
8 //!   - stage 1 page tables
9 //!   - EL1
10 //!   - 4 KiB pages
11 //!
12 //! Full support is provided for identity mapping ([`IdMap`](idmap::IdMap)) and linear mapping
13 //! ([`LinearMap`](linearmap::LinearMap)). If you want to use a different mapping scheme, you must
14 //! provide an implementation of the [`Translation`](paging::Translation) trait and then use
15 //! [`Mapping`] directly.
16 //!
17 //! # Example
18 //!
19 //! ```no_run
20 //! # #[cfg(feature = "alloc")] {
21 //! use aarch64_paging::{
22 //!     idmap::IdMap,
23 //!     paging::{Attributes, MemoryRegion},
24 //! };
25 //!
26 //! const ASID: usize = 1;
27 //! const ROOT_LEVEL: usize = 1;
28 //!
29 //! // Create a new page table with identity mapping.
30 //! let mut idmap = IdMap::new(ASID, ROOT_LEVEL);
31 //! // Map a 2 MiB region of memory as read-write.
32 //! idmap.map_range(
33 //!     &MemoryRegion::new(0x80200000, 0x80400000),
34 //!     Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
35 //! ).unwrap();
36 //! // SAFETY: Everything the program uses is within the 2 MiB region mapped above.
37 //! unsafe {
38 //!     // Set `TTBR0_EL1` to activate the page table.
39 //!     idmap.activate();
40 //! }
41 //! # }
42 //! ```
43 
44 #![no_std]
45 #![deny(clippy::undocumented_unsafe_blocks)]
46 
47 #[cfg(feature = "alloc")]
48 pub mod idmap;
49 #[cfg(feature = "alloc")]
50 pub mod linearmap;
51 pub mod paging;
52 
53 #[cfg(feature = "alloc")]
54 extern crate alloc;
55 
56 #[cfg(target_arch = "aarch64")]
57 use core::arch::asm;
58 use core::fmt::{self, Display, Formatter};
59 use paging::{
60     Attributes, Constraints, Descriptor, MemoryRegion, PhysicalAddress, RootTable, Translation,
61     VaRange, VirtualAddress,
62 };
63 
64 /// An error attempting to map some range in the page table.
65 #[derive(Clone, Debug, Eq, PartialEq)]
66 pub enum MapError {
67     /// The address requested to be mapped was out of the range supported by the page table
68     /// configuration.
69     AddressRange(VirtualAddress),
70     /// The address requested to be mapped was not valid for the mapping in use.
71     InvalidVirtualAddress(VirtualAddress),
72     /// The end of the memory region is before the start.
73     RegionBackwards(MemoryRegion),
74     /// There was an error while updating a page table entry.
75     PteUpdateFault(Descriptor),
76     /// The requested flags are not supported for this mapping
77     InvalidFlags(Attributes),
78     /// Updating the range violates break-before-make rules and the mapping is live
79     BreakBeforeMakeViolation(MemoryRegion),
80 }
81 
82 impl Display for MapError {
fmt(&self, f: &mut Formatter) -> fmt::Result83     fn fmt(&self, f: &mut Formatter) -> fmt::Result {
84         match self {
85             Self::AddressRange(va) => write!(f, "Virtual address {} out of range", va),
86             Self::InvalidVirtualAddress(va) => {
87                 write!(f, "Invalid virtual address {} for mapping", va)
88             }
89             Self::RegionBackwards(region) => {
90                 write!(f, "End of memory region {} is before start.", region)
91             }
92             Self::PteUpdateFault(desc) => {
93                 write!(f, "Error updating page table entry {:?}", desc)
94             }
95             Self::InvalidFlags(flags) => {
96                 write!(f, "Flags {flags:?} unsupported for mapping.")
97             }
98             Self::BreakBeforeMakeViolation(region) => {
99                 write!(f, "Cannot remap region {region} while translation is live.")
100             }
101         }
102     }
103 }
104 
105 /// Manages a level 1 page table and associated state.
106 ///
107 /// Mappings should be added with [`map_range`](Self::map_range) before calling
108 /// [`activate`](Self::activate) to start using the new page table. To make changes which may
109 /// require break-before-make semantics you must first call [`deactivate`](Self::deactivate) to
110 /// switch back to a previous static page table, and then `activate` again after making the desired
111 /// changes.
112 #[derive(Debug)]
113 pub struct Mapping<T: Translation + Clone> {
114     root: RootTable<T>,
115     #[allow(unused)]
116     asid: usize,
117     #[allow(unused)]
118     previous_ttbr: Option<usize>,
119 }
120 
121 impl<T: Translation + Clone> Mapping<T> {
122     /// Creates a new page table with the given ASID, root level and translation mapping.
new(translation: T, asid: usize, rootlevel: usize, va_range: VaRange) -> Self123     pub fn new(translation: T, asid: usize, rootlevel: usize, va_range: VaRange) -> Self {
124         Self {
125             root: RootTable::new(translation, rootlevel, va_range),
126             asid,
127             previous_ttbr: None,
128         }
129     }
130 
131     /// Returns whether this mapping is currently active.
active(&self) -> bool132     pub fn active(&self) -> bool {
133         self.previous_ttbr.is_some()
134     }
135 
136     /// Activates the page table by setting `TTBRn_EL1` to point to it, and saves the previous value
137     /// of `TTBRn_EL1` so that it may later be restored by [`deactivate`](Self::deactivate).
138     ///
139     /// Panics if a previous value of `TTBRn_EL1` is already saved and not yet used by a call to
140     /// `deactivate`.
141     ///
142     /// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
143     ///
144     /// # Safety
145     ///
146     /// The caller must ensure that the page table doesn't unmap any memory which the program is
147     /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
148     /// dropped as long as its mappings are required, as it will automatically be deactivated when
149     /// it is dropped.
activate(&mut self)150     pub unsafe fn activate(&mut self) {
151         assert!(!self.active());
152 
153         #[allow(unused)]
154         let mut previous_ttbr = usize::MAX;
155 
156         #[cfg(all(not(test), target_arch = "aarch64"))]
157         // SAFETY: Safe because we trust that self.root.to_physical() returns a valid physical
158         // address of a page table, and the `Drop` implementation will reset `TTBRn_EL1` before it
159         // becomes invalid.
160         unsafe {
161             match self.root.va_range() {
162                 VaRange::Lower => asm!(
163                     "mrs   {previous_ttbr}, ttbr0_el1",
164                     "msr   ttbr0_el1, {ttbrval}",
165                     "isb",
166                     ttbrval = in(reg) self.root.to_physical().0 | (self.asid << 48),
167                     previous_ttbr = out(reg) previous_ttbr,
168                     options(preserves_flags),
169                 ),
170                 VaRange::Upper => asm!(
171                     "mrs   {previous_ttbr}, ttbr1_el1",
172                     "msr   ttbr1_el1, {ttbrval}",
173                     "isb",
174                     ttbrval = in(reg) self.root.to_physical().0 | (self.asid << 48),
175                     previous_ttbr = out(reg) previous_ttbr,
176                     options(preserves_flags),
177                 ),
178             }
179         }
180         self.previous_ttbr = Some(previous_ttbr);
181     }
182 
183     /// Deactivates the page table, by setting `TTBRn_EL1` back to the value it had before
184     /// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
185     /// configured ASID.
186     ///
187     /// Panics if there is no saved `TTBRn_EL1` value because `activate` has not previously been
188     /// called.
189     ///
190     /// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
191     ///
192     /// # Safety
193     ///
194     /// The caller must ensure that the previous page table which this is switching back to doesn't
195     /// unmap any memory which the program is using.
deactivate(&mut self)196     pub unsafe fn deactivate(&mut self) {
197         assert!(self.active());
198 
199         #[cfg(all(not(test), target_arch = "aarch64"))]
200         // SAFETY: Safe because this just restores the previously saved value of `TTBRn_EL1`, which
201         // must have been valid.
202         unsafe {
203             match self.root.va_range() {
204                 VaRange::Lower => asm!(
205                     "msr   ttbr0_el1, {ttbrval}",
206                     "isb",
207                     "tlbi  aside1, {asid}",
208                     "dsb   nsh",
209                     "isb",
210                     asid = in(reg) self.asid << 48,
211                     ttbrval = in(reg) self.previous_ttbr.unwrap(),
212                     options(preserves_flags),
213                 ),
214                 VaRange::Upper => asm!(
215                     "msr   ttbr1_el1, {ttbrval}",
216                     "isb",
217                     "tlbi  aside1, {asid}",
218                     "dsb   nsh",
219                     "isb",
220                     asid = in(reg) self.asid << 48,
221                     ttbrval = in(reg) self.previous_ttbr.unwrap(),
222                     options(preserves_flags),
223                 ),
224             }
225         }
226         self.previous_ttbr = None;
227     }
228 
229     /// Checks whether the given range can be mapped or updated while the translation is live,
230     /// without violating architectural break-before-make (BBM) requirements.
check_range_bbm<F>(&self, range: &MemoryRegion, updater: &F) -> Result<(), MapError> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,231     fn check_range_bbm<F>(&self, range: &MemoryRegion, updater: &F) -> Result<(), MapError>
232     where
233         F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
234     {
235         self.walk_range(
236             range,
237             &mut |mr: &MemoryRegion, d: &Descriptor, level: usize| {
238                 if d.is_valid() {
239                     if !mr.is_block(level) {
240                         // Cannot split a live block mapping
241                         return Err(());
242                     }
243 
244                     // Get the new flags and output address for this descriptor by applying
245                     // the updater function to a copy
246                     let (flags, oa) = {
247                         let mut dd = *d;
248                         updater(mr, &mut dd, level)?;
249                         (dd.flags().ok_or(())?, dd.output_address())
250                     };
251 
252                     if !flags.contains(Attributes::VALID) {
253                         // Removing the valid bit is always ok
254                         return Ok(());
255                     }
256 
257                     if oa != d.output_address() {
258                         // Cannot change output address on a live mapping
259                         return Err(());
260                     }
261 
262                     let desc_flags = d.flags().unwrap();
263 
264                     if (desc_flags ^ flags).intersects(Attributes::NORMAL) {
265                         // Cannot change memory type
266                         return Err(());
267                     }
268 
269                     if (desc_flags - flags).contains(Attributes::NON_GLOBAL) {
270                         // Cannot convert from non-global to global
271                         return Err(());
272                     }
273                 }
274                 Ok(())
275             },
276         )
277         .map_err(|_| MapError::BreakBeforeMakeViolation(range.clone()))?;
278         Ok(())
279     }
280 
281     /// Maps the given range of virtual addresses to the corresponding range of physical addresses
282     /// starting at `pa`, with the given flags, taking the given constraints into account.
283     ///
284     /// This should generally only be called while the page table is not active. In particular, any
285     /// change that may require break-before-make per the architecture must be made while the page
286     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
287     /// table is active. This function writes block and page entries, but only maps them if `flags`
288     /// contains `Attributes::VALID`, otherwise the entries remain invalid.
289     ///
290     /// # Errors
291     ///
292     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
293     ///
294     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
295     /// largest virtual address covered by the page table given its root level.
296     ///
297     /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
298     ///
299     /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
300     /// and modifying those would violate architectural break-before-make (BBM) requirements.
map_range( &mut self, range: &MemoryRegion, pa: PhysicalAddress, flags: Attributes, constraints: Constraints, ) -> Result<(), MapError>301     pub fn map_range(
302         &mut self,
303         range: &MemoryRegion,
304         pa: PhysicalAddress,
305         flags: Attributes,
306         constraints: Constraints,
307     ) -> Result<(), MapError> {
308         if self.active() {
309             let c = |mr: &MemoryRegion, d: &mut Descriptor, lvl: usize| {
310                 let mask = !(paging::granularity_at_level(lvl) - 1);
311                 let pa = (mr.start() - range.start() + pa.0) & mask;
312                 d.set(PhysicalAddress(pa), flags);
313                 Ok(())
314             };
315             self.check_range_bbm(range, &c)?;
316         }
317         self.root.map_range(range, pa, flags, constraints)?;
318         #[cfg(target_arch = "aarch64")]
319         // SAFETY: Safe because this is just a memory barrier.
320         unsafe {
321             asm!("dsb ishst");
322         }
323         Ok(())
324     }
325 
326     /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
327     ///
328     /// This may involve splitting block entries if the provided range is not currently mapped
329     /// down to its precise boundaries. For visiting all the descriptors covering a memory range
330     /// without potential splitting (and no descriptor updates), use
331     /// [`walk_range`](Self::walk_range) instead.
332     ///
333     /// This should generally only be called while the page table is not active. In particular, any
334     /// change that may require break-before-make per the architecture must be made while the page
335     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
336     /// table is active.
337     ///
338     /// # Errors
339     ///
340     /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
341     ///
342     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
343     ///
344     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
345     /// largest virtual address covered by the page table given its root level.
346     ///
347     /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
348     /// and modifying those would violate architectural break-before-make (BBM) requirements.
modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,349     pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
350     where
351         F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
352     {
353         if self.active() {
354             self.check_range_bbm(range, f)?;
355         }
356         self.root.modify_range(range, f)?;
357         #[cfg(target_arch = "aarch64")]
358         // SAFETY: Safe because this is just a memory barrier.
359         unsafe {
360             asm!("dsb ishst");
361         }
362         Ok(())
363     }
364 
365     /// Applies the provided function to a number of PTEs corresponding to a given memory range.
366     ///
367     /// The virtual address range passed to the callback function may be expanded compared to the
368     /// `range` parameter, due to alignment to block boundaries.
369     ///
370     /// # Errors
371     ///
372     /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
373     ///
374     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
375     ///
376     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
377     /// largest virtual address covered by the page table given its root level.
walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,378     pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
379     where
380         F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
381     {
382         self.root.walk_range(range, f)
383     }
384 }
385 
386 impl<T: Translation + Clone> Drop for Mapping<T> {
drop(&mut self)387     fn drop(&mut self) {
388         if self.previous_ttbr.is_some() {
389             #[cfg(target_arch = "aarch64")]
390             // SAFETY: When activate was called the caller promised that they wouldn't drop the page
391             // table until its mappings were no longer needed.
392             unsafe {
393                 self.deactivate();
394             }
395         }
396     }
397 }
398