• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The aarch64-paging Authors.
2 // This project is dual-licensed under Apache 2.0 and MIT terms.
3 // See LICENSE-APACHE and LICENSE-MIT for details.
4 
5 //! Functionality for managing page tables with identity mapping.
6 //!
7 //! See [`IdMap`] for details on how to use it.
8 
9 use crate::{
10     paging::{
11         deallocate, Attributes, Constraints, Descriptor, MemoryRegion, PageTable, PhysicalAddress,
12         Translation, VaRange, VirtualAddress,
13     },
14     MapError, Mapping,
15 };
16 use core::ptr::NonNull;
17 
18 /// Identity mapping, where every virtual address is either unmapped or mapped to the identical IPA.
19 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
20 pub struct IdTranslation;
21 
22 impl IdTranslation {
virtual_to_physical(va: VirtualAddress) -> PhysicalAddress23     fn virtual_to_physical(va: VirtualAddress) -> PhysicalAddress {
24         PhysicalAddress(va.0)
25     }
26 }
27 
28 impl Translation for IdTranslation {
allocate_table(&self) -> (NonNull<PageTable>, PhysicalAddress)29     fn allocate_table(&self) -> (NonNull<PageTable>, PhysicalAddress) {
30         let table = PageTable::new();
31 
32         // Physical address is the same as the virtual address because we are using identity mapping
33         // everywhere.
34         (table, PhysicalAddress(table.as_ptr() as usize))
35     }
36 
deallocate_table(&self, page_table: NonNull<PageTable>)37     unsafe fn deallocate_table(&self, page_table: NonNull<PageTable>) {
38         deallocate(page_table);
39     }
40 
physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable>41     fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable> {
42         NonNull::new(pa.0 as *mut PageTable).expect("Got physical address 0 for pagetable")
43     }
44 }
45 
46 /// Manages a level 1 page table using identity mapping, where every virtual address is either
47 /// unmapped or mapped to the identical IPA.
48 ///
49 /// This assumes that identity mapping is used both for the page table being managed, and for code
50 /// that is managing it.
51 ///
52 /// Mappings should be added with [`map_range`](Self::map_range) before calling
53 /// [`activate`](Self::activate) to start using the new page table. To make changes which may
54 /// require break-before-make semantics you must first call [`deactivate`](Self::deactivate) to
55 /// switch back to a previous static page table, and then `activate` again after making the desired
56 /// changes.
57 ///
58 /// # Example
59 ///
60 /// ```no_run
61 /// use aarch64_paging::{
62 ///     idmap::IdMap,
63 ///     paging::{Attributes, MemoryRegion},
64 /// };
65 ///
66 /// const ASID: usize = 1;
67 /// const ROOT_LEVEL: usize = 1;
68 ///
69 /// // Create a new page table with identity mapping.
70 /// let mut idmap = IdMap::new(ASID, ROOT_LEVEL);
71 /// // Map a 2 MiB region of memory as read-write.
72 /// idmap.map_range(
73 ///     &MemoryRegion::new(0x80200000, 0x80400000),
74 ///     Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
75 /// ).unwrap();
76 /// // SAFETY: Everything the program uses is within the 2 MiB region mapped above.
77 /// unsafe {
78 ///     // Set `TTBR0_EL1` to activate the page table.
79 ///     idmap.activate();
80 /// }
81 ///
82 /// // Write something to the memory...
83 ///
84 /// // SAFETY: The program will only use memory within the initially mapped region until `idmap` is
85 /// // reactivated below.
86 /// unsafe {
87 ///     // Restore `TTBR0_EL1` to its earlier value while we modify the page table.
88 ///     idmap.deactivate();
89 /// }
90 /// // Now change the mapping to read-only and executable.
91 /// idmap.map_range(
92 ///     &MemoryRegion::new(0x80200000, 0x80400000),
93 ///     Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY | Attributes::VALID,
94 /// ).unwrap();
95 /// // SAFETY: Everything the program will used is mapped in by this page table.
96 /// unsafe {
97 ///     idmap.activate();
98 /// }
99 /// ```
100 #[derive(Debug)]
101 pub struct IdMap {
102     mapping: Mapping<IdTranslation>,
103 }
104 
105 impl IdMap {
106     /// Creates a new identity-mapping page table with the given ASID and root level.
new(asid: usize, rootlevel: usize) -> Self107     pub fn new(asid: usize, rootlevel: usize) -> Self {
108         Self {
109             mapping: Mapping::new(IdTranslation, asid, rootlevel, VaRange::Lower),
110         }
111     }
112 
113     /// Activates the page table by setting `TTBR0_EL1` to point to it, and saves the previous value
114     /// of `TTBR0_EL1` so that it may later be restored by [`deactivate`](Self::deactivate).
115     ///
116     /// Panics if a previous value of `TTBR0_EL1` is already saved and not yet used by a call to
117     /// `deactivate`.
118     ///
119     /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
120     ///
121     /// # Safety
122     ///
123     /// The caller must ensure that the page table doesn't unmap any memory which the program is
124     /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
125     /// dropped as long as its mappings are required, as it will automatically be deactivated when
126     /// it is dropped.
activate(&mut self)127     pub unsafe fn activate(&mut self) {
128         self.mapping.activate()
129     }
130 
131     /// Deactivates the page table, by setting `TTBR0_EL1` back to the value it had before
132     /// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
133     /// configured ASID.
134     ///
135     /// Panics if there is no saved `TTBR0_EL1` value because `activate` has not previously been
136     /// called.
137     ///
138     /// In test builds or builds that do not target aarch64, the `TTBR0_EL1` access is omitted.
139     ///
140     /// # Safety
141     ///
142     /// The caller must ensure that the previous page table which this is switching back to doesn't
143     /// unmap any memory which the program is using.
deactivate(&mut self)144     pub unsafe fn deactivate(&mut self) {
145         self.mapping.deactivate()
146     }
147 
148     /// Maps the given range of virtual addresses to the identical physical addresses with the given
149     /// flags.
150     ///
151     /// This should generally only be called while the page table is not active. In particular, any
152     /// change that may require break-before-make per the architecture must be made while the page
153     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
154     /// table is active. This function writes block and page entries, but only maps them if `flags`
155     /// contains `Attributes::VALID`, otherwise the entries remain invalid.
156     ///
157     /// # Errors
158     ///
159     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
160     ///
161     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
162     /// largest virtual address covered by the page table given its root level.
163     ///
164     /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
165     ///
166     /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
167     /// and modifying those would violate architectural break-before-make (BBM) requirements.
map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError>168     pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
169         self.map_range_with_constraints(range, flags, Constraints::empty())
170     }
171 
172     /// Maps the given range of virtual addresses to the identical physical addresses with the given
173     /// given flags, taking the given constraints into account.
174     ///
175     /// This should generally only be called while the page table is not active. In particular, any
176     /// change that may require break-before-make per the architecture must be made while the page
177     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
178     /// table is active. This function writes block and page entries, but only maps them if `flags`
179     /// contains `Attributes::VALID`, otherwise the entries remain invalid.
180     ///
181     /// # Errors
182     ///
183     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
184     ///
185     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
186     /// largest virtual address covered by the page table given its root level.
187     ///
188     /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
189     ///
190     /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
191     /// and modifying those would violate architectural break-before-make (BBM) requirements.
map_range_with_constraints( &mut self, range: &MemoryRegion, flags: Attributes, constraints: Constraints, ) -> Result<(), MapError>192     pub fn map_range_with_constraints(
193         &mut self,
194         range: &MemoryRegion,
195         flags: Attributes,
196         constraints: Constraints,
197     ) -> Result<(), MapError> {
198         let pa = IdTranslation::virtual_to_physical(range.start());
199         self.mapping.map_range(range, pa, flags, constraints)
200     }
201 
202     /// Applies the provided updater function to the page table descriptors covering a given
203     /// memory range.
204     ///
205     /// This may involve splitting block entries if the provided range is not currently mapped
206     /// down to its precise boundaries. For visiting all the descriptors covering a memory range
207     /// without potential splitting (and no descriptor updates), use
208     /// [`walk_range`](Self::walk_range) instead.
209     ///
210     /// The updater function receives the following arguments:
211     ///
212     /// - The virtual address range mapped by each page table descriptor. A new descriptor will
213     ///   have been allocated before the invocation of the updater function if a page table split
214     ///   was needed.
215     /// - A mutable reference to the page table descriptor that permits modifications.
216     /// - The level of a translation table the descriptor belongs to.
217     ///
218     /// The updater function should return:
219     ///
220     /// - `Ok` to continue updating the remaining entries.
221     /// - `Err` to signal an error and stop updating the remaining entries.
222     ///
223     /// This should generally only be called while the page table is not active. In particular, any
224     /// change that may require break-before-make per the architecture must be made while the page
225     /// table is inactive. Mapping a previously unmapped memory range may be done while the page
226     /// table is active.
227     ///
228     /// # Errors
229     ///
230     /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
231     ///
232     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
233     ///
234     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
235     /// largest virtual address covered by the page table given its root level.
236     ///
237     /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
238     /// and modifying those would violate architectural break-before-make (BBM) requirements.
modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,239     pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
240     where
241         F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
242     {
243         self.mapping.modify_range(range, f)
244     }
245 
246     /// Applies the provided callback function to the page table descriptors covering a given
247     /// memory range.
248     ///
249     /// The callback function receives the following arguments:
250     ///
251     /// - The full virtual address range mapped by each visited page table descriptor, which may
252     ///   exceed the original range passed to `walk_range`, due to alignment to block boundaries.
253     /// - The page table descriptor itself.
254     /// - The level of a translation table the descriptor belongs to.
255     ///
256     /// The callback function should return:
257     ///
258     /// - `Ok` to continue visiting the remaining entries.
259     /// - `Err` to signal an error and stop visiting the remaining entries.
260     ///
261     /// # Errors
262     ///
263     /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
264     ///
265     /// Returns [`MapError::RegionBackwards`] if the range is backwards.
266     ///
267     /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
268     /// largest virtual address covered by the page table given its root level.
walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,269     pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
270     where
271         F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
272     {
273         self.mapping.walk_range(range, f)
274     }
275 }
276 
277 #[cfg(test)]
278 mod tests {
279     use super::*;
280     use crate::{
281         paging::{Attributes, MemoryRegion, BITS_PER_LEVEL, PAGE_SIZE},
282         MapError, VirtualAddress,
283     };
284 
285     const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39;
286 
287     #[test]
map_valid()288     fn map_valid() {
289         // A single byte at the start of the address space.
290         let mut idmap = IdMap::new(1, 1);
291         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
292         // active for the sake of BBM rules.
293         unsafe {
294             idmap.activate();
295         }
296         assert_eq!(
297             idmap.map_range(
298                 &MemoryRegion::new(0, 1),
299                 Attributes::NORMAL | Attributes::VALID
300             ),
301             Ok(())
302         );
303 
304         // Two pages at the start of the address space.
305         let mut idmap = IdMap::new(1, 1);
306         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
307         // active for the sake of BBM rules.
308         unsafe {
309             idmap.activate();
310         }
311         assert_eq!(
312             idmap.map_range(
313                 &MemoryRegion::new(0, PAGE_SIZE * 2),
314                 Attributes::NORMAL | Attributes::VALID
315             ),
316             Ok(())
317         );
318 
319         // A single byte at the end of the address space.
320         let mut idmap = IdMap::new(1, 1);
321         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
322         // active for the sake of BBM rules.
323         unsafe {
324             idmap.activate();
325         }
326         assert_eq!(
327             idmap.map_range(
328                 &MemoryRegion::new(
329                     MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
330                     MAX_ADDRESS_FOR_ROOT_LEVEL_1
331                 ),
332                 Attributes::NORMAL | Attributes::VALID
333             ),
334             Ok(())
335         );
336 
337         // Two pages, on the boundary between two subtables.
338         let mut idmap = IdMap::new(1, 1);
339         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
340         // active for the sake of BBM rules.
341         unsafe {
342             idmap.activate();
343         }
344         assert_eq!(
345             idmap.map_range(
346                 &MemoryRegion::new(PAGE_SIZE * 1023, PAGE_SIZE * 1025),
347                 Attributes::NORMAL | Attributes::VALID
348             ),
349             Ok(())
350         );
351 
352         // The entire valid address space.
353         let mut idmap = IdMap::new(1, 1);
354         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
355         // active for the sake of BBM rules.
356         unsafe {
357             idmap.activate();
358         }
359         assert_eq!(
360             idmap.map_range(
361                 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
362                 Attributes::NORMAL | Attributes::VALID
363             ),
364             Ok(())
365         );
366     }
367 
368     #[test]
map_break_before_make()369     fn map_break_before_make() {
370         const BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
371         let mut idmap = IdMap::new(1, 1);
372         idmap
373             .map_range_with_constraints(
374                 &MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE),
375                 Attributes::NORMAL | Attributes::VALID,
376                 Constraints::NO_BLOCK_MAPPINGS,
377             )
378             .unwrap();
379         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
380         // active for the sake of BBM rules.
381         unsafe {
382             idmap.activate();
383         }
384 
385         // Splitting a range is permitted if it was mapped down to pages
386         assert_eq!(
387             idmap.map_range(
388                 &MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE),
389                 Attributes::NORMAL | Attributes::VALID,
390             ),
391             Ok(())
392         );
393 
394         let mut idmap = IdMap::new(1, 1);
395         idmap
396             .map_range(
397                 &MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE),
398                 Attributes::NORMAL | Attributes::VALID,
399             )
400             .ok();
401         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
402         // active for the sake of BBM rules.
403         unsafe {
404             idmap.activate();
405         }
406 
407         // Extending a range is fine even if there are block mappings
408         // in the middle
409         assert_eq!(
410             idmap.map_range(
411                 &MemoryRegion::new(BLOCK_SIZE - PAGE_SIZE, 2 * BLOCK_SIZE + PAGE_SIZE),
412                 Attributes::NORMAL | Attributes::VALID,
413             ),
414             Ok(())
415         );
416 
417         // Splitting a range is not permitted
418         assert_eq!(
419             idmap.map_range(
420                 &MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE),
421                 Attributes::NORMAL | Attributes::VALID,
422             ),
423             Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
424                 BLOCK_SIZE,
425                 BLOCK_SIZE + PAGE_SIZE
426             )))
427         );
428 
429         // Remapping a partially live range read-only is only permitted
430         // if it does not require splitting
431         assert_eq!(
432             idmap.map_range(
433                 &MemoryRegion::new(0, BLOCK_SIZE + PAGE_SIZE),
434                 Attributes::NORMAL | Attributes::VALID | Attributes::READ_ONLY,
435             ),
436             Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
437                 0,
438                 BLOCK_SIZE + PAGE_SIZE
439             )))
440         );
441         assert_eq!(
442             idmap.map_range(
443                 &MemoryRegion::new(0, BLOCK_SIZE),
444                 Attributes::NORMAL | Attributes::VALID | Attributes::READ_ONLY,
445             ),
446             Ok(())
447         );
448 
449         // Changing the memory type is not permitted
450         assert_eq!(
451             idmap.map_range(
452                 &MemoryRegion::new(0, BLOCK_SIZE),
453                 Attributes::DEVICE_NGNRE | Attributes::VALID | Attributes::NON_GLOBAL,
454             ),
455             Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
456                 0, BLOCK_SIZE
457             )))
458         );
459 
460         // Making a range invalid is only permitted if it does not require splitting
461         assert_eq!(
462             idmap.map_range(
463                 &MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE + PAGE_SIZE),
464                 Attributes::NORMAL,
465             ),
466             Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
467                 PAGE_SIZE,
468                 BLOCK_SIZE + PAGE_SIZE
469             )))
470         );
471         assert_eq!(
472             idmap.map_range(
473                 &MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE),
474                 Attributes::NORMAL,
475             ),
476             Ok(())
477         );
478 
479         // Creating a new valid entry is always permitted
480         assert_eq!(
481             idmap.map_range(
482                 &MemoryRegion::new(0, 2 * PAGE_SIZE),
483                 Attributes::NORMAL | Attributes::VALID,
484             ),
485             Ok(())
486         );
487 
488         // Setting the non-global attribute is permitted
489         assert_eq!(
490             idmap.map_range(
491                 &MemoryRegion::new(0, PAGE_SIZE),
492                 Attributes::NORMAL | Attributes::VALID | Attributes::NON_GLOBAL,
493             ),
494             Ok(())
495         );
496 
497         // Removing the non-global attribute from a live mapping is not permitted
498         assert_eq!(
499             idmap.map_range(
500                 &MemoryRegion::new(0, PAGE_SIZE),
501                 Attributes::NORMAL | Attributes::VALID,
502             ),
503             Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
504                 0, PAGE_SIZE
505             )))
506         );
507 
508         // SAFETY: This doesn't actually deactivate the page table in tests, it just treats it as
509         // inactive for the sake of BBM rules.
510         unsafe {
511             idmap.deactivate();
512         }
513         // Removing the non-global attribute from an inactive mapping is permitted
514         assert_eq!(
515             idmap.map_range(
516                 &MemoryRegion::new(0, PAGE_SIZE),
517                 Attributes::NORMAL | Attributes::VALID,
518             ),
519             Ok(())
520         );
521     }
522 
523     #[test]
map_out_of_range()524     fn map_out_of_range() {
525         let mut idmap = IdMap::new(1, 1);
526 
527         // One byte, just past the edge of the valid range.
528         assert_eq!(
529             idmap.map_range(
530                 &MemoryRegion::new(
531                     MAX_ADDRESS_FOR_ROOT_LEVEL_1,
532                     MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
533                 ),
534                 Attributes::NORMAL | Attributes::VALID
535             ),
536             Err(MapError::AddressRange(VirtualAddress(
537                 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
538             )))
539         );
540 
541         // From 0 to just past the valid range.
542         assert_eq!(
543             idmap.map_range(
544                 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,),
545                 Attributes::NORMAL | Attributes::VALID
546             ),
547             Err(MapError::AddressRange(VirtualAddress(
548                 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
549             )))
550         );
551     }
552 
make_map() -> IdMap553     fn make_map() -> IdMap {
554         let mut idmap = IdMap::new(1, 1);
555         idmap
556             .map_range(
557                 &MemoryRegion::new(0, PAGE_SIZE * 2),
558                 Attributes::NORMAL
559                     | Attributes::NON_GLOBAL
560                     | Attributes::READ_ONLY
561                     | Attributes::VALID,
562             )
563             .unwrap();
564         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
565         // active for the sake of BBM rules.
566         unsafe {
567             idmap.activate();
568         }
569         idmap
570     }
571 
572     #[test]
update_backwards_range()573     fn update_backwards_range() {
574         let mut idmap = make_map();
575         assert!(idmap
576             .modify_range(
577                 &MemoryRegion::new(PAGE_SIZE * 2, 1),
578                 &|_range, entry, _level| {
579                     entry
580                         .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
581                     Ok(())
582                 },
583             )
584             .is_err());
585     }
586 
587     #[test]
update_range()588     fn update_range() {
589         let mut idmap = make_map();
590         assert!(idmap
591             .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
592                 if level == 3 || !entry.is_table_or_page() {
593                     entry.modify_flags(Attributes::SWFLAG_0, Attributes::NON_GLOBAL);
594                 }
595                 Ok(())
596             })
597             .is_err());
598         idmap
599             .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
600                 if level == 3 || !entry.is_table_or_page() {
601                     entry
602                         .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
603                 }
604                 Ok(())
605             })
606             .unwrap();
607         idmap
608             .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry, level| {
609                 if level == 3 || !entry.is_table_or_page() {
610                     assert!(entry.flags().unwrap().contains(Attributes::SWFLAG_0));
611                     assert_eq!(range.end() - range.start(), PAGE_SIZE);
612                 }
613                 Ok(())
614             })
615             .unwrap();
616     }
617 
618     #[test]
breakup_invalid_block()619     fn breakup_invalid_block() {
620         const BLOCK_RANGE: usize = 0x200000;
621         let mut idmap = IdMap::new(1, 1);
622         // SAFETY: This doesn't actually activate the page table in tests, it just treats it as
623         // active for the sake of BBM rules.
624         unsafe {
625             idmap.activate();
626         }
627         idmap
628             .map_range(
629                 &MemoryRegion::new(0, BLOCK_RANGE),
630                 Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::SWFLAG_0,
631             )
632             .unwrap();
633         idmap
634             .map_range(
635                 &MemoryRegion::new(0, PAGE_SIZE),
636                 Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
637             )
638             .unwrap();
639         idmap
640             .modify_range(
641                 &MemoryRegion::new(0, BLOCK_RANGE),
642                 &|range, entry, level| {
643                     if level == 3 {
644                         let has_swflag = entry.flags().unwrap().contains(Attributes::SWFLAG_0);
645                         let is_first_page = range.start().0 == 0usize;
646                         assert!(has_swflag != is_first_page);
647                     }
648                     Ok(())
649                 },
650             )
651             .unwrap();
652     }
653 }
654