• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2024, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Memory management.
16 
17 use super::error::MemoryTrackerError;
18 use super::shared::{SHARED_MEMORY, SHARED_POOL};
19 use crate::arch::aarch64::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
20 use crate::arch::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
21 use crate::arch::VirtualAddress;
22 use crate::dsb;
23 use crate::layout;
24 use crate::memory::shared::{MemoryRange, MemorySharer, MmioSharer};
25 use crate::util::RangeExt as _;
26 use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
27 use alloc::boxed::Box;
28 use buddy_system_allocator::LockedFrameAllocator;
29 use core::mem::size_of;
30 use core::num::NonZeroUsize;
31 use core::ops::Range;
32 use core::result;
33 use hypervisor_backends::{get_mem_sharer, get_mmio_guard};
34 use log::{debug, error, info};
35 use spin::mutex::{SpinMutex, SpinMutexGuard};
36 use tinyvec::ArrayVec;
37 
38 /// A global static variable representing the system memory tracker, protected by a spin mutex.
39 pub(crate) static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
40 
get_va_range(range: &MemoryRange) -> VaRange41 fn get_va_range(range: &MemoryRange) -> VaRange {
42     VaRange::new(range.start, range.end)
43 }
44 
45 type Result<T> = result::Result<T, MemoryTrackerError>;
46 
47 /// Attempts to lock `MEMORY`, returns an error if already deactivated.
try_lock_memory_tracker() -> Result<SpinMutexGuard<'static, Option<MemoryTracker>>>48 fn try_lock_memory_tracker() -> Result<SpinMutexGuard<'static, Option<MemoryTracker>>> {
49     // Being single-threaded, we only spin if `deactivate_dynamic_page_tables()` leaked the lock.
50     MEMORY.try_lock().ok_or(MemoryTrackerError::Unavailable)
51 }
52 
53 /// Switch the MMU to the provided PageTable.
54 ///
55 /// Panics if called more than once.
switch_to_dynamic_page_tables()56 pub(crate) fn switch_to_dynamic_page_tables() {
57     let mut locked_tracker = try_lock_memory_tracker().unwrap();
58     if locked_tracker.is_some() {
59         panic!("switch_to_dynamic_page_tables() called more than once.");
60     }
61 
62     locked_tracker.replace(MemoryTracker::new(
63         layout::crosvm::MEM_START..layout::MAX_VIRT_ADDR,
64         layout::crosvm::MMIO_RANGE,
65     ));
66 }
67 
68 /// Switch the MMU back to the static page tables (see `idmap` C symbol).
69 ///
70 /// Panics if called before `switch_to_dynamic_page_tables()` or more than once.
deactivate_dynamic_page_tables()71 pub fn deactivate_dynamic_page_tables() {
72     let locked_tracker = try_lock_memory_tracker().unwrap();
73     // Force future calls to try_lock_memory_tracker() to fail by leaking this lock guard.
74     let leaked_tracker = SpinMutexGuard::leak(locked_tracker);
75     // Force deallocation/unsharing of all the resources used by the MemoryTracker.
76     drop(leaked_tracker.take())
77 }
78 
79 /// Redefines the actual mappable range of memory.
80 ///
81 /// Fails if a region has already been mapped beyond the new upper limit.
resize_available_memory(memory_range: &Range<usize>) -> Result<()>82 pub fn resize_available_memory(memory_range: &Range<usize>) -> Result<()> {
83     let mut locked_tracker = try_lock_memory_tracker()?;
84     let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
85     tracker.shrink(memory_range)
86 }
87 
88 /// Initialize the memory pool for page sharing with the host.
init_shared_pool(static_range: Option<Range<usize>>) -> Result<()>89 pub fn init_shared_pool(static_range: Option<Range<usize>>) -> Result<()> {
90     let mut locked_tracker = try_lock_memory_tracker()?;
91     let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
92     if let Some(mem_sharer) = get_mem_sharer() {
93         let granule = mem_sharer.granule()?;
94         tracker.init_dynamic_shared_pool(granule)
95     } else if let Some(r) = static_range {
96         tracker.init_static_shared_pool(r)
97     } else {
98         info!("Initialized shared pool from heap memory without MEM_SHARE");
99         tracker.init_heap_shared_pool()
100     }
101 }
102 
103 /// Unshare all MMIO that was previously shared with the host, with the exception of the UART page.
unshare_all_mmio_except_uart() -> Result<()>104 pub fn unshare_all_mmio_except_uart() -> Result<()> {
105     let Ok(mut locked_tracker) = try_lock_memory_tracker() else { return Ok(()) };
106     let Some(tracker) = locked_tracker.as_mut() else { return Ok(()) };
107     if cfg!(feature = "compat_android_13") {
108         info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
109     }
110     tracker.unshare_all_mmio()
111 }
112 
113 /// Unshare all memory that was previously shared with the host.
unshare_all_memory()114 pub fn unshare_all_memory() {
115     let Ok(mut locked_tracker) = try_lock_memory_tracker() else { return };
116     let Some(tracker) = locked_tracker.as_mut() else { return };
117     tracker.unshare_all_memory()
118 }
119 
120 /// Unshare the UART page, previously shared with the host.
unshare_uart() -> Result<()>121 pub fn unshare_uart() -> Result<()> {
122     let Some(mmio_guard) = get_mmio_guard() else { return Ok(()) };
123     Ok(mmio_guard.unmap(layout::crosvm::UART_PAGE_ADDR)?)
124 }
125 
126 /// Map the provided range as normal memory, with R/W permissions.
127 ///
128 /// This fails if the range has already been (partially) mapped.
map_data(addr: usize, size: NonZeroUsize) -> Result<()>129 pub fn map_data(addr: usize, size: NonZeroUsize) -> Result<()> {
130     let mut locked_tracker = try_lock_memory_tracker()?;
131     let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
132     let _ = tracker.alloc_mut(addr, size)?;
133     Ok(())
134 }
135 
136 /// Map the provided range as normal memory, with R/W permissions.
137 ///
138 /// Unlike `map_data()`, `deactivate_dynamic_page_tables()` will not flush caches for the range.
139 ///
140 /// This fails if the range has already been (partially) mapped.
map_data_noflush(addr: usize, size: NonZeroUsize) -> Result<()>141 pub fn map_data_noflush(addr: usize, size: NonZeroUsize) -> Result<()> {
142     let mut locked_tracker = try_lock_memory_tracker()?;
143     let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
144     let _ = tracker.alloc_mut_noflush(addr, size)?;
145     Ok(())
146 }
147 
148 /// Map the region potentially holding data appended to the image, with read-write permissions.
149 ///
150 /// This fails if the footer has already been mapped.
map_image_footer() -> Result<Range<usize>>151 pub fn map_image_footer() -> Result<Range<usize>> {
152     let mut locked_tracker = try_lock_memory_tracker()?;
153     let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
154     let range = tracker.map_image_footer()?;
155     Ok(range)
156 }
157 
158 /// Map the provided range as normal memory, with read-only permissions.
159 ///
160 /// This fails if the range has already been (partially) mapped.
map_rodata(addr: usize, size: NonZeroUsize) -> Result<()>161 pub fn map_rodata(addr: usize, size: NonZeroUsize) -> Result<()> {
162     let mut locked_tracker = try_lock_memory_tracker()?;
163     let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
164     let _ = tracker.alloc(addr, size)?;
165     Ok(())
166 }
167 
168 // TODO(ptosi): Merge this into map_rodata.
169 /// Map the provided range as normal memory, with read-only permissions.
170 ///
171 /// # Safety
172 ///
173 /// Callers of this method need to ensure that the `range` is valid for mapping as read-only data.
map_rodata_outside_main_memory(addr: usize, size: NonZeroUsize) -> Result<()>174 pub unsafe fn map_rodata_outside_main_memory(addr: usize, size: NonZeroUsize) -> Result<()> {
175     let mut locked_tracker = try_lock_memory_tracker()?;
176     let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
177     let end = addr + usize::from(size);
178     // SAFETY: Caller has checked that it is valid to map the range.
179     let _ = unsafe { tracker.alloc_range_outside_main_memory(&(addr..end)) }?;
180     Ok(())
181 }
182 
183 /// Map the provided range as device memory.
184 ///
185 /// This fails if the range has already been (partially) mapped.
map_device(addr: usize, size: NonZeroUsize) -> Result<()>186 pub fn map_device(addr: usize, size: NonZeroUsize) -> Result<()> {
187     let mut locked_tracker = try_lock_memory_tracker()?;
188     let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
189     let range = addr..(addr + usize::from(size));
190     tracker.map_mmio_range(range.clone())
191 }
192 
193 #[derive(Clone, Copy, Debug, Default, PartialEq)]
194 enum MemoryType {
195     #[default]
196     ReadOnly,
197     ReadWrite,
198 }
199 
200 #[derive(Clone, Debug, Default)]
201 struct MemoryRegion {
202     range: MemoryRange,
203     mem_type: MemoryType,
204 }
205 
206 /// Tracks non-overlapping slices of main memory.
207 pub(crate) struct MemoryTracker {
208     total: MemoryRange,
209     page_table: PageTable,
210     regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
211     mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
212     mmio_range: MemoryRange,
213     image_footer_mapped: bool,
214     mmio_sharer: MmioSharer,
215 }
216 
217 impl MemoryTracker {
218     const CAPACITY: usize = 5;
219     const MMIO_CAPACITY: usize = 5;
220 
221     /// Creates a new instance from an active page table, covering the maximum RAM size.
new(total: MemoryRange, mmio_range: MemoryRange) -> Self222     fn new(total: MemoryRange, mmio_range: MemoryRange) -> Self {
223         assert!(
224             !total.overlaps(&mmio_range),
225             "MMIO space should not overlap with the main memory region."
226         );
227 
228         let mut page_table = Self::initialize_dynamic_page_tables();
229         // Activate dirty state management first, otherwise we may get permission faults immediately
230         // after activating the new page table. This has no effect before the new page table is
231         // activated because none of the entries in the initial idmap have the DBM flag.
232         set_dbm_enabled(true);
233 
234         debug!("Activating dynamic page table...");
235         // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
236         // aware of so activating it shouldn't have any visible effect.
237         unsafe { page_table.activate() }
238         debug!("... Success!");
239 
240         Self {
241             total,
242             page_table,
243             regions: ArrayVec::new(),
244             mmio_regions: ArrayVec::new(),
245             mmio_range,
246             image_footer_mapped: false,
247             mmio_sharer: MmioSharer::new().unwrap(),
248         }
249     }
250 
251     /// Resize the total RAM size.
252     ///
253     /// This function fails if it contains regions that are not included within the new size.
shrink(&mut self, range: &MemoryRange) -> Result<()>254     fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
255         if range.start != self.total.start {
256             return Err(MemoryTrackerError::DifferentBaseAddress);
257         }
258         if self.total.end < range.end {
259             return Err(MemoryTrackerError::SizeTooLarge);
260         }
261         if !self.regions.iter().all(|r| r.range.is_within(range)) {
262             return Err(MemoryTrackerError::SizeTooSmall);
263         }
264 
265         self.total = range.clone();
266         Ok(())
267     }
268 
269     /// Allocate the address range for a const slice; returns None if failed.
alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange>270     fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
271         let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
272         self.check_allocatable(&region)?;
273         self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
274             error!("Error during range allocation: {e}");
275             MemoryTrackerError::FailedToMap
276         })?;
277         self.add(region)
278     }
279 
280     /// Allocates the address range for a const slice.
281     ///
282     /// # Safety
283     ///
284     /// Callers of this method need to ensure that the `range` is valid for mapping as read-only
285     /// data.
alloc_range_outside_main_memory( &mut self, range: &MemoryRange, ) -> Result<MemoryRange>286     unsafe fn alloc_range_outside_main_memory(
287         &mut self,
288         range: &MemoryRange,
289     ) -> Result<MemoryRange> {
290         let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
291         self.check_no_overlap(&region)?;
292         self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
293             error!("Error during range allocation: {e}");
294             MemoryTrackerError::FailedToMap
295         })?;
296         self.add(region)
297     }
298 
299     /// Allocate the address range for a mutable slice; returns None if failed.
alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange>300     fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
301         let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
302         self.check_allocatable(&region)?;
303         self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
304             error!("Error during mutable range allocation: {e}");
305             MemoryTrackerError::FailedToMap
306         })?;
307         self.add(region)
308     }
309 
alloc_range_mut_noflush(&mut self, range: &MemoryRange) -> Result<MemoryRange>310     fn alloc_range_mut_noflush(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
311         let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
312         self.check_allocatable(&region)?;
313         self.page_table.map_data(&get_va_range(range)).map_err(|e| {
314             error!("Error during non-flushed mutable range allocation: {e}");
315             MemoryTrackerError::FailedToMap
316         })?;
317         self.add(region)
318     }
319 
320     /// Maps the image footer, with read-write permissions.
map_image_footer(&mut self) -> Result<MemoryRange>321     fn map_image_footer(&mut self) -> Result<MemoryRange> {
322         if self.image_footer_mapped {
323             return Err(MemoryTrackerError::FooterAlreadyMapped);
324         }
325         let range = layout::image_footer_range();
326         self.page_table.map_data_dbm(&range.clone().into()).map_err(|e| {
327             error!("Error during image footer map: {e}");
328             MemoryTrackerError::FailedToMap
329         })?;
330         self.image_footer_mapped = true;
331         Ok(range.start.0..range.end.0)
332     }
333 
334     /// Allocate the address range for a const slice; returns None if failed.
alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange>335     fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
336         self.alloc_range(&(base..(base + size.get())))
337     }
338 
339     /// Allocate the address range for a mutable slice; returns None if failed.
alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange>340     fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
341         self.alloc_range_mut(&(base..(base + size.get())))
342     }
343 
alloc_mut_noflush(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange>344     fn alloc_mut_noflush(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
345         self.alloc_range_mut_noflush(&(base..(base + size.get())))
346     }
347 
348     /// Checks that the given range of addresses is within the MMIO region, and then maps it
349     /// appropriately.
map_mmio_range(&mut self, range: MemoryRange) -> Result<()>350     fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
351         if !range.is_within(&self.mmio_range) {
352             return Err(MemoryTrackerError::OutOfRange);
353         }
354         if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
355             return Err(MemoryTrackerError::Overlaps);
356         }
357         if self.mmio_regions.len() == self.mmio_regions.capacity() {
358             return Err(MemoryTrackerError::Full);
359         }
360 
361         if get_mmio_guard().is_some() {
362             self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
363                 error!("Error during lazy MMIO device mapping: {e}");
364                 MemoryTrackerError::FailedToMap
365             })?;
366         } else {
367             self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
368                 error!("Error during MMIO device mapping: {e}");
369                 MemoryTrackerError::FailedToMap
370             })?;
371         }
372 
373         if self.mmio_regions.try_push(range).is_some() {
374             return Err(MemoryTrackerError::Full);
375         }
376 
377         Ok(())
378     }
379 
380     /// Checks that the memory region meets the following criteria:
381     /// - It is within the range of the `MemoryTracker`.
382     /// - It does not overlap with any previously allocated regions.
383     /// - The `regions` ArrayVec has sufficient capacity to add it.
check_allocatable(&self, region: &MemoryRegion) -> Result<()>384     fn check_allocatable(&self, region: &MemoryRegion) -> Result<()> {
385         if !region.range.is_within(&self.total) {
386             return Err(MemoryTrackerError::OutOfRange);
387         }
388         self.check_no_overlap(region)
389     }
390 
391     /// Checks that the given region doesn't overlap with any other previously allocated regions,
392     /// and that the regions ArrayVec has capacity to add it.
check_no_overlap(&self, region: &MemoryRegion) -> Result<()>393     fn check_no_overlap(&self, region: &MemoryRegion) -> Result<()> {
394         if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
395             return Err(MemoryTrackerError::Overlaps);
396         }
397         if self.regions.len() == self.regions.capacity() {
398             return Err(MemoryTrackerError::Full);
399         }
400         Ok(())
401     }
402 
add(&mut self, region: MemoryRegion) -> Result<MemoryRange>403     fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
404         if self.regions.try_push(region).is_some() {
405             return Err(MemoryTrackerError::Full);
406         }
407 
408         Ok(self.regions.last().unwrap().range.clone())
409     }
410 
411     /// Unshares any MMIO region previously shared with the MMIO guard.
unshare_all_mmio(&mut self) -> Result<()>412     fn unshare_all_mmio(&mut self) -> Result<()> {
413         self.mmio_sharer.unshare_all();
414 
415         Ok(())
416     }
417 
418     /// Initialize the shared heap to dynamically share memory from the global allocator.
init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()>419     fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
420         const INIT_CAP: usize = 10;
421 
422         let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
423         if previous.is_some() {
424             return Err(MemoryTrackerError::SharedMemorySetFailure);
425         }
426 
427         SHARED_POOL
428             .set(Box::new(LockedFrameAllocator::new()))
429             .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
430 
431         Ok(())
432     }
433 
434     /// Initialize the shared heap from a static region of memory.
435     ///
436     /// Some hypervisors such as Gunyah do not support a MemShare API for guest
437     /// to share its memory with host. Instead they allow host to designate part
438     /// of guest memory as "shared" ahead of guest starting its execution. The
439     /// shared memory region is indicated in swiotlb node. On such platforms use
440     /// a separate heap to allocate buffers that can be shared with host.
init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()>441     fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
442         let size = NonZeroUsize::new(range.len()).unwrap();
443         let range = self.alloc_mut(range.start, size)?;
444         let shared_pool = LockedFrameAllocator::<32>::new();
445 
446         shared_pool.lock().insert(range);
447 
448         SHARED_POOL
449             .set(Box::new(shared_pool))
450             .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
451 
452         Ok(())
453     }
454 
455     /// Initialize the shared heap to use heap memory directly.
456     ///
457     /// When running on "non-protected" hypervisors which permit host direct accesses to guest
458     /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
459     /// dedicated region so this function instructs the shared pool to use the global allocator.
init_heap_shared_pool(&mut self) -> Result<()>460     fn init_heap_shared_pool(&mut self) -> Result<()> {
461         // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
462         // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
463         // without any actual "dynamic memory sharing" taking place and, as such, the granule may
464         // be set to the one of the global_allocator i.e. a byte.
465         self.init_dynamic_shared_pool(size_of::<u8>())
466     }
467 
468     /// Unshares any memory that may have been shared.
unshare_all_memory(&mut self)469     pub fn unshare_all_memory(&mut self) {
470         drop(SHARED_MEMORY.lock().take());
471     }
472 
473     /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
474     /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()>475     pub(crate) fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
476         let shared_range = self.mmio_sharer.share(addr)?;
477         self.map_lazy_mmio_as_valid(&shared_range)?;
478 
479         Ok(())
480     }
481 
482     /// Modify the PTEs corresponding to a given range from (invalid) "lazy MMIO" to valid MMIO.
483     ///
484     /// Returns an error if any PTE in the range is not an invalid lazy MMIO mapping.
map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()>485     fn map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()> {
486         // This must be safe and free from break-before-make (BBM) violations, given that the
487         // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
488         // created inside the mapping has the same size and alignment.
489         self.page_table
490             .modify_range(page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
491                 let flags = desc.flags().expect("Unsupported PTE flags set");
492                 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
493                     desc.modify_flags(Attributes::VALID, Attributes::empty());
494                     Ok(())
495                 } else {
496                     Err(())
497                 }
498             })
499             .map_err(|_| MemoryTrackerError::InvalidPte)
500     }
501 
502     /// Flush all memory regions marked as writable-dirty.
flush_dirty_pages(&mut self) -> Result<()>503     fn flush_dirty_pages(&mut self) -> Result<()> {
504         // Collect memory ranges for which dirty state is tracked.
505         let writable_regions =
506             self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
507         // Execute a barrier instruction to ensure all hardware updates to the page table have been
508         // observed before reading PTE flags to determine dirty state.
509         dsb!("ish");
510         // Now flush writable-dirty pages in those regions.
511         for range in writable_regions {
512             self.page_table
513                 .walk_range(&get_va_range(range), &flush_dirty_range)
514                 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
515         }
516         if self.image_footer_mapped {
517             let range = layout::image_footer_range();
518             self.page_table
519                 .walk_range(&range.into(), &flush_dirty_range)
520                 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
521         }
522         Ok(())
523     }
524 
525     /// Handles permission fault for read-only blocks by setting writable-dirty state.
526     /// In general, this should be called from the exception handler when hardware dirty
527     /// state management is disabled or unavailable.
handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()>528     pub(crate) fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
529         self.page_table
530             .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
531             .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
532     }
533 
534     // TODO(ptosi): Move this and `PageTable` references to crate::arch::aarch64
535     /// Produces a `PageTable` that can safely replace the static PTs.
initialize_dynamic_page_tables() -> PageTable536     fn initialize_dynamic_page_tables() -> PageTable {
537         let text = layout::text_range();
538         let rodata = layout::rodata_range();
539         let data_bss = layout::data_bss_range();
540         let eh_stack = layout::eh_stack_range();
541         let stack = layout::stack_range();
542         let console_uart_page = layout::console_uart_page();
543 
544         let mut page_table = PageTable::default();
545 
546         page_table.map_device(&console_uart_page.into()).unwrap();
547         page_table.map_code(&text.into()).unwrap();
548         page_table.map_rodata(&rodata.into()).unwrap();
549         page_table.map_data(&data_bss.into()).unwrap();
550         page_table.map_data(&eh_stack.into()).unwrap();
551         page_table.map_data(&stack.into()).unwrap();
552 
553         page_table
554     }
555 }
556 
557 impl Drop for MemoryTracker {
drop(&mut self)558     fn drop(&mut self) {
559         set_dbm_enabled(false);
560         self.flush_dirty_pages().unwrap();
561         self.unshare_all_memory();
562     }
563 }
564