1 // Copyright 2022 The aarch64-paging Authors.
2 // This project is dual-licensed under Apache 2.0 and MIT terms.
3 // See LICENSE-APACHE and LICENSE-MIT for details.
4
5 //! Functionality for managing page tables with linear mapping.
6 //!
7 //! See [`LinearMap`] for details on how to use it.
8
9 use crate::{
10 paging::{
11 deallocate, is_aligned, Attributes, Constraints, Descriptor, MemoryRegion, PageTable,
12 PhysicalAddress, Translation, VaRange, VirtualAddress, PAGE_SIZE,
13 },
14 MapError, Mapping,
15 };
16 use core::ptr::NonNull;
17
18 /// Linear mapping, where every virtual address is either unmapped or mapped to an IPA with a fixed
19 /// offset.
20 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
21 pub struct LinearTranslation {
22 /// The offset from a virtual address to the corresponding (intermediate) physical address.
23 offset: isize,
24 }
25
26 impl LinearTranslation {
27 /// Constructs a new linear translation, which will map a virtual address `va` to the
28 /// (intermediate) physical address `va + offset`.
29 ///
30 /// The `offset` must be a multiple of [`PAGE_SIZE`]; if not this will panic.
new(offset: isize) -> Self31 pub fn new(offset: isize) -> Self {
32 if !is_aligned(offset.unsigned_abs(), PAGE_SIZE) {
33 panic!(
34 "Invalid offset {}, must be a multiple of page size {}.",
35 offset, PAGE_SIZE,
36 );
37 }
38 Self { offset }
39 }
40
virtual_to_physical(&self, va: VirtualAddress) -> Result<PhysicalAddress, MapError>41 fn virtual_to_physical(&self, va: VirtualAddress) -> Result<PhysicalAddress, MapError> {
42 if let Some(pa) = checked_add_to_unsigned(va.0 as isize, self.offset) {
43 Ok(PhysicalAddress(pa))
44 } else {
45 Err(MapError::InvalidVirtualAddress(va))
46 }
47 }
48 }
49
50 impl Translation for LinearTranslation {
allocate_table(&self) -> (NonNull<PageTable>, PhysicalAddress)51 fn allocate_table(&self) -> (NonNull<PageTable>, PhysicalAddress) {
52 let table = PageTable::new();
53 // Assume that the same linear mapping is used everywhere.
54 let va = VirtualAddress(table.as_ptr() as usize);
55
56 let pa = self.virtual_to_physical(va).expect(
57 "Allocated subtable with virtual address which doesn't correspond to any physical address."
58 );
59 (table, pa)
60 }
61
deallocate_table(&self, page_table: NonNull<PageTable>)62 unsafe fn deallocate_table(&self, page_table: NonNull<PageTable>) {
63 deallocate(page_table);
64 }
65
physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable>66 fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable> {
67 let signed_pa = pa.0 as isize;
68 if signed_pa < 0 {
69 panic!("Invalid physical address {} for pagetable", pa);
70 }
71 if let Some(va) = signed_pa.checked_sub(self.offset) {
72 if let Some(ptr) = NonNull::new(va as *mut PageTable) {
73 ptr
74 } else {
75 panic!(
76 "Invalid physical address {} for pagetable (translated to virtual address 0)",
77 pa
78 )
79 }
80 } else {
81 panic!("Invalid physical address {} for pagetable", pa);
82 }
83 }
84 }
85
86 /// Adds two signed values, returning an unsigned value or `None` if it would overflow.
checked_add_to_unsigned(a: isize, b: isize) -> Option<usize>87 fn checked_add_to_unsigned(a: isize, b: isize) -> Option<usize> {
88 a.checked_add(b)?.try_into().ok()
89 }
90
91 /// Manages a level 1 page table using linear mapping, where every virtual address is either
92 /// unmapped or mapped to an IPA with a fixed offset.
93 ///
94 /// This assumes that the same linear mapping is used both for the page table being managed, and for
95 /// code that is managing it.
96 #[derive(Debug)]
97 pub struct LinearMap {
98 mapping: Mapping<LinearTranslation>,
99 }
100
101 impl LinearMap {
102 /// Creates a new identity-mapping page table with the given ASID, root level and offset, for
103 /// use in the given TTBR.
104 ///
105 /// This will map any virtual address `va` which is added to the table to the physical address
106 /// `va + offset`.
107 ///
108 /// The `offset` must be a multiple of [`PAGE_SIZE`]; if not this will panic.
new(asid: usize, rootlevel: usize, offset: isize, va_range: VaRange) -> Self109 pub fn new(asid: usize, rootlevel: usize, offset: isize, va_range: VaRange) -> Self {
110 Self {
111 mapping: Mapping::new(LinearTranslation::new(offset), asid, rootlevel, va_range),
112 }
113 }
114
115 /// Activates the page table by setting `TTBRn_EL1` to point to it, and saves the previous value
116 /// of `TTBRn_EL1` so that it may later be restored by [`deactivate`](Self::deactivate).
117 ///
118 /// Panics if a previous value of `TTBRn_EL1` is already saved and not yet used by a call to
119 /// `deactivate`.
120 ///
121 /// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
122 ///
123 /// # Safety
124 ///
125 /// The caller must ensure that the page table doesn't unmap any memory which the program is
126 /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
127 /// dropped as long as its mappings are required, as it will automatically be deactivated when
128 /// it is dropped.
activate(&mut self)129 pub unsafe fn activate(&mut self) {
130 self.mapping.activate()
131 }
132
133 /// Deactivates the page table, by setting `TTBRn_EL1` back to the value it had before
134 /// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
135 /// configured ASID.
136 ///
137 /// Panics if there is no saved `TTBRn_EL1` value because `activate` has not previously been
138 /// called.
139 ///
140 /// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
141 ///
142 /// # Safety
143 ///
144 /// The caller must ensure that the previous page table which this is switching back to doesn't
145 /// unmap any memory which the program is using.
deactivate(&mut self)146 pub unsafe fn deactivate(&mut self) {
147 self.mapping.deactivate()
148 }
149
150 /// Maps the given range of virtual addresses to the corresponding physical addresses with the
151 /// given flags.
152 ///
153 /// This should generally only be called while the page table is not active. In particular, any
154 /// change that may require break-before-make per the architecture must be made while the page
155 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
156 /// table is active. This function writes block and page entries, but only maps them if `flags`
157 /// contains `Attributes::VALID`, otherwise the entries remain invalid.
158 ///
159 /// # Errors
160 ///
161 /// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
162 /// address within the `range` would result in overflow.
163 ///
164 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
165 ///
166 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
167 /// largest virtual address covered by the page table given its root level.
168 ///
169 /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
170 ///
171 /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
172 /// and modifying those would violate architectural break-before-make (BBM) requirements.
map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError>173 pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> {
174 self.map_range_with_constraints(range, flags, Constraints::empty())
175 }
176
177 /// Maps the given range of virtual addresses to the corresponding physical addresses with the
178 /// given flags, taking the given constraints into account.
179 ///
180 /// This should generally only be called while the page table is not active. In particular, any
181 /// change that may require break-before-make per the architecture must be made while the page
182 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
183 /// table is active. This function writes block and page entries, but only maps them if `flags`
184 /// contains `Attributes::VALID`, otherwise the entries remain invalid.
185 ///
186 /// # Errors
187 ///
188 /// Returns [`MapError::InvalidVirtualAddress`] if adding the configured offset to any virtual
189 /// address within the `range` would result in overflow.
190 ///
191 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
192 ///
193 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
194 /// largest virtual address covered by the page table given its root level.
195 ///
196 /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
197 ///
198 /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
199 /// and modifying those would violate architectural break-before-make (BBM) requirements.
map_range_with_constraints( &mut self, range: &MemoryRegion, flags: Attributes, constraints: Constraints, ) -> Result<(), MapError>200 pub fn map_range_with_constraints(
201 &mut self,
202 range: &MemoryRegion,
203 flags: Attributes,
204 constraints: Constraints,
205 ) -> Result<(), MapError> {
206 let pa = self
207 .mapping
208 .root
209 .translation()
210 .virtual_to_physical(range.start())?;
211 self.mapping.map_range(range, pa, flags, constraints)
212 }
213
214 /// Applies the provided updater function to the page table descriptors covering a given
215 /// memory range.
216 ///
217 /// This may involve splitting block entries if the provided range is not currently mapped
218 /// down to its precise boundaries. For visiting all the descriptors covering a memory range
219 /// without potential splitting (and no descriptor updates), use
220 /// [`walk_range`](Self::walk_range) instead.
221 ///
222 /// The updater function receives the following arguments:
223 ///
224 /// - The virtual address range mapped by each page table descriptor. A new descriptor will
225 /// have been allocated before the invocation of the updater function if a page table split
226 /// was needed.
227 /// - A mutable reference to the page table descriptor that permits modifications.
228 /// - The level of a translation table the descriptor belongs to.
229 ///
230 /// The updater function should return:
231 ///
232 /// - `Ok` to continue updating the remaining entries.
233 /// - `Err` to signal an error and stop updating the remaining entries.
234 ///
235 /// This should generally only be called while the page table is not active. In particular, any
236 /// change that may require break-before-make per the architecture must be made while the page
237 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
238 /// table is active.
239 ///
240 /// # Errors
241 ///
242 /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
243 ///
244 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
245 ///
246 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
247 /// largest virtual address covered by the page table given its root level.
248 ///
249 /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
250 /// and modifying those would violate architectural break-before-make (BBM) requirements.
modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,251 pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
252 where
253 F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
254 {
255 self.mapping.modify_range(range, f)
256 }
257
258 /// Applies the provided callback function to the page table descriptors covering a given
259 /// memory range.
260 ///
261 /// The callback function receives the following arguments:
262 ///
263 /// - The full virtual address range mapped by each visited page table descriptor, which may
264 /// exceed the original range passed to `walk_range`, due to alignment to block boundaries.
265 /// - The page table descriptor itself.
266 /// - The level of a translation table the descriptor belongs to.
267 ///
268 /// The callback function should return:
269 ///
270 /// - `Ok` to continue visiting the remaining entries.
271 /// - `Err` to signal an error and stop visiting the remaining entries.
272 ///
273 /// # Errors
274 ///
275 /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
276 ///
277 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
278 ///
279 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
280 /// largest virtual address covered by the page table given its root level.
walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,281 pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
282 where
283 F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
284 {
285 self.mapping.walk_range(range, f)
286 }
287 }
288
289 #[cfg(test)]
290 mod tests {
291 use super::*;
292 use crate::{
293 paging::{Attributes, MemoryRegion, BITS_PER_LEVEL, PAGE_SIZE},
294 MapError,
295 };
296
297 const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39;
298 const GIB_512_S: isize = 512 * 1024 * 1024 * 1024;
299 const GIB_512: usize = 512 * 1024 * 1024 * 1024;
300
301 #[test]
map_valid()302 fn map_valid() {
303 // A single byte at the start of the address space.
304 let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
305 assert_eq!(
306 pagetable.map_range(
307 &MemoryRegion::new(0, 1),
308 Attributes::NORMAL | Attributes::VALID
309 ),
310 Ok(())
311 );
312
313 // Two pages at the start of the address space.
314 let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
315 assert_eq!(
316 pagetable.map_range(
317 &MemoryRegion::new(0, PAGE_SIZE * 2),
318 Attributes::NORMAL | Attributes::VALID
319 ),
320 Ok(())
321 );
322
323 // A single byte at the end of the address space.
324 let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
325 assert_eq!(
326 pagetable.map_range(
327 &MemoryRegion::new(
328 MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
329 MAX_ADDRESS_FOR_ROOT_LEVEL_1
330 ),
331 Attributes::NORMAL | Attributes::VALID
332 ),
333 Ok(())
334 );
335
336 // The entire valid address space. Use an offset that is a multiple of the level 2 block
337 // size to avoid mapping everything as pages as that is really slow.
338 const LEVEL_2_BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
339 let mut pagetable = LinearMap::new(1, 1, LEVEL_2_BLOCK_SIZE as isize, VaRange::Lower);
340 assert_eq!(
341 pagetable.map_range(
342 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
343 Attributes::NORMAL | Attributes::VALID
344 ),
345 Ok(())
346 );
347 }
348
349 #[test]
map_valid_negative_offset()350 fn map_valid_negative_offset() {
351 // A single byte which maps to IPA 0.
352 let mut pagetable = LinearMap::new(1, 1, -(PAGE_SIZE as isize), VaRange::Lower);
353 assert_eq!(
354 pagetable.map_range(
355 &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE + 1),
356 Attributes::NORMAL | Attributes::VALID
357 ),
358 Ok(())
359 );
360
361 // Two pages at the start of the address space.
362 let mut pagetable = LinearMap::new(1, 1, -(PAGE_SIZE as isize), VaRange::Lower);
363 assert_eq!(
364 pagetable.map_range(
365 &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE * 3),
366 Attributes::NORMAL | Attributes::VALID
367 ),
368 Ok(())
369 );
370
371 // A single byte at the end of the address space.
372 let mut pagetable = LinearMap::new(1, 1, -(PAGE_SIZE as isize), VaRange::Lower);
373 assert_eq!(
374 pagetable.map_range(
375 &MemoryRegion::new(
376 MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1,
377 MAX_ADDRESS_FOR_ROOT_LEVEL_1
378 ),
379 Attributes::NORMAL | Attributes::VALID
380 ),
381 Ok(())
382 );
383
384 // The entire valid address space. Use an offset that is a multiple of the level 2 block
385 // size to avoid mapping everything as pages as that is really slow.
386 const LEVEL_2_BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
387 let mut pagetable = LinearMap::new(1, 1, -(LEVEL_2_BLOCK_SIZE as isize), VaRange::Lower);
388 assert_eq!(
389 pagetable.map_range(
390 &MemoryRegion::new(LEVEL_2_BLOCK_SIZE, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
391 Attributes::NORMAL | Attributes::VALID
392 ),
393 Ok(())
394 );
395 }
396
397 #[test]
map_out_of_range()398 fn map_out_of_range() {
399 let mut pagetable = LinearMap::new(1, 1, 4096, VaRange::Lower);
400
401 // One byte, just past the edge of the valid range.
402 assert_eq!(
403 pagetable.map_range(
404 &MemoryRegion::new(
405 MAX_ADDRESS_FOR_ROOT_LEVEL_1,
406 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1,
407 ),
408 Attributes::NORMAL | Attributes::VALID
409 ),
410 Err(MapError::AddressRange(VirtualAddress(
411 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
412 )))
413 );
414
415 // From 0 to just past the valid range.
416 assert_eq!(
417 pagetable.map_range(
418 &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1),
419 Attributes::NORMAL | Attributes::VALID
420 ),
421 Err(MapError::AddressRange(VirtualAddress(
422 MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE
423 )))
424 );
425 }
426
427 #[test]
map_invalid_offset()428 fn map_invalid_offset() {
429 let mut pagetable = LinearMap::new(1, 1, -4096, VaRange::Lower);
430
431 // One byte, with an offset which would map it to a negative IPA.
432 assert_eq!(
433 pagetable.map_range(&MemoryRegion::new(0, 1), Attributes::NORMAL,),
434 Err(MapError::InvalidVirtualAddress(VirtualAddress(0)))
435 );
436 }
437
438 #[test]
physical_address_in_range_ttbr0()439 fn physical_address_in_range_ttbr0() {
440 let translation = LinearTranslation::new(4096);
441 assert_eq!(
442 translation.physical_to_virtual(PhysicalAddress(8192)),
443 NonNull::new(4096 as *mut PageTable).unwrap(),
444 );
445 assert_eq!(
446 translation.physical_to_virtual(PhysicalAddress(GIB_512 + 4096)),
447 NonNull::new(GIB_512 as *mut PageTable).unwrap(),
448 );
449 }
450
451 #[test]
452 #[should_panic]
physical_address_to_zero_ttbr0()453 fn physical_address_to_zero_ttbr0() {
454 let translation = LinearTranslation::new(4096);
455 translation.physical_to_virtual(PhysicalAddress(4096));
456 }
457
458 #[test]
459 #[should_panic]
physical_address_out_of_range_ttbr0()460 fn physical_address_out_of_range_ttbr0() {
461 let translation = LinearTranslation::new(4096);
462 translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize));
463 }
464
465 #[test]
physical_address_in_range_ttbr1()466 fn physical_address_in_range_ttbr1() {
467 // Map the 512 GiB region at the top of virtual address space to one page above the bottom
468 // of physical address space.
469 let translation = LinearTranslation::new(GIB_512_S + 4096);
470 assert_eq!(
471 translation.physical_to_virtual(PhysicalAddress(8192)),
472 NonNull::new((4096 - GIB_512_S) as *mut PageTable).unwrap(),
473 );
474 assert_eq!(
475 translation.physical_to_virtual(PhysicalAddress(GIB_512)),
476 NonNull::new(-4096_isize as *mut PageTable).unwrap(),
477 );
478 }
479
480 #[test]
481 #[should_panic]
physical_address_to_zero_ttbr1()482 fn physical_address_to_zero_ttbr1() {
483 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
484 // address space.
485 let translation = LinearTranslation::new(GIB_512_S);
486 translation.physical_to_virtual(PhysicalAddress(GIB_512));
487 }
488
489 #[test]
490 #[should_panic]
physical_address_out_of_range_ttbr1()491 fn physical_address_out_of_range_ttbr1() {
492 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
493 // address space.
494 let translation = LinearTranslation::new(GIB_512_S);
495 translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize));
496 }
497
498 #[test]
virtual_address_out_of_range()499 fn virtual_address_out_of_range() {
500 let translation = LinearTranslation::new(-4096);
501 let va = VirtualAddress(1024);
502 assert_eq!(
503 translation.virtual_to_physical(va),
504 Err(MapError::InvalidVirtualAddress(va))
505 )
506 }
507
508 #[test]
virtual_address_range_ttbr1()509 fn virtual_address_range_ttbr1() {
510 // Map the 512 GiB region at the top of virtual address space to the bottom of physical
511 // address space.
512 let translation = LinearTranslation::new(GIB_512_S);
513
514 // The first page in the region covered by TTBR1.
515 assert_eq!(
516 translation.virtual_to_physical(VirtualAddress(0xffff_ff80_0000_0000)),
517 Ok(PhysicalAddress(0))
518 );
519 // The last page in the region covered by TTBR1.
520 assert_eq!(
521 translation.virtual_to_physical(VirtualAddress(0xffff_ffff_ffff_f000)),
522 Ok(PhysicalAddress(0x7f_ffff_f000))
523 );
524 }
525
526 #[test]
block_mapping()527 fn block_mapping() {
528 // Test that block mapping is used when the PA is appropriately aligned...
529 let mut pagetable = LinearMap::new(1, 1, 1 << 30, VaRange::Lower);
530 pagetable
531 .map_range(
532 &MemoryRegion::new(0, 1 << 30),
533 Attributes::NORMAL | Attributes::VALID,
534 )
535 .unwrap();
536 assert_eq!(
537 pagetable.mapping.root.mapping_level(VirtualAddress(0)),
538 Some(1)
539 );
540
541 // ...but not when it is not.
542 let mut pagetable = LinearMap::new(1, 1, 1 << 29, VaRange::Lower);
543 pagetable
544 .map_range(
545 &MemoryRegion::new(0, 1 << 30),
546 Attributes::NORMAL | Attributes::VALID,
547 )
548 .unwrap();
549 assert_eq!(
550 pagetable.mapping.root.mapping_level(VirtualAddress(0)),
551 Some(2)
552 );
553 }
554
make_map() -> LinearMap555 fn make_map() -> LinearMap {
556 let mut lmap = LinearMap::new(1, 1, 4096, VaRange::Lower);
557 // Mapping VA range 0x0 - 0x2000 to PA range 0x1000 - 0x3000
558 lmap.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), Attributes::NORMAL)
559 .unwrap();
560 lmap
561 }
562
563 #[test]
update_backwards_range()564 fn update_backwards_range() {
565 let mut lmap = make_map();
566 assert!(lmap
567 .modify_range(
568 &MemoryRegion::new(PAGE_SIZE * 2, 1),
569 &|_range, entry, _level| {
570 entry
571 .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
572 Ok(())
573 },
574 )
575 .is_err());
576 }
577
578 #[test]
update_range()579 fn update_range() {
580 let mut lmap = make_map();
581 lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| {
582 if level == 3 || !entry.is_table_or_page() {
583 entry.modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap());
584 }
585 Ok(())
586 })
587 .unwrap();
588 lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry, level| {
589 if level == 3 || !entry.is_table_or_page() {
590 assert!(entry.flags().unwrap().contains(Attributes::SWFLAG_0));
591 assert_eq!(range.end() - range.start(), PAGE_SIZE);
592 }
593 Ok(())
594 })
595 .unwrap();
596 }
597
598 #[test]
breakup_invalid_block()599 fn breakup_invalid_block() {
600 const BLOCK_RANGE: usize = 0x200000;
601
602 let mut lmap = LinearMap::new(1, 1, 0x1000, VaRange::Lower);
603 lmap.map_range(
604 &MemoryRegion::new(0, BLOCK_RANGE),
605 Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::SWFLAG_0,
606 )
607 .unwrap();
608 lmap.map_range(
609 &MemoryRegion::new(0, PAGE_SIZE),
610 Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
611 )
612 .unwrap();
613 lmap.modify_range(
614 &MemoryRegion::new(0, BLOCK_RANGE),
615 &|range, entry, level| {
616 if level == 3 {
617 let has_swflag = entry.flags().unwrap().contains(Attributes::SWFLAG_0);
618 let is_first_page = range.start().0 == 0usize;
619 assert!(has_swflag != is_first_page);
620 }
621 Ok(())
622 },
623 )
624 .unwrap();
625 }
626 }
627