1 // Copyright 2018 The Chromium OS Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 use std::cmp; 6 use std::collections::{BTreeSet, HashMap}; 7 use std::ops::RangeInclusive; 8 9 use crate::{Alloc, Error, Result}; 10 11 /// Manages allocating address ranges. 12 /// Use `AddressAllocator` whenever an address range needs to be allocated to different users. 13 /// Allocations must be uniquely tagged with an Alloc enum, which can be used for lookup. 14 /// An human-readable tag String must also be provided for debugging / reference. 15 #[derive(Debug, Eq, PartialEq)] 16 pub struct AddressAllocator { 17 /// The list of pools from which address are allocated. The union 18 /// of all regions from |allocs| and |regions| equals the pools. 19 pools: Vec<RangeInclusive<u64>>, 20 min_align: u64, 21 preferred_align: u64, 22 /// The region that is allocated. 23 allocs: HashMap<Alloc, (u64, u64, String)>, 24 /// The region that is not allocated yet. 25 regions: BTreeSet<(u64, u64)>, 26 } 27 28 impl AddressAllocator { 29 /// Creates a new `AddressAllocator` for managing a range of addresses. 30 /// Can return an error if `pool` is empty or if alignment isn't a power of two. 31 /// 32 /// * `pool` - The address range to allocate from. 33 /// * `min_align` - The minimum size of an address region to align to, defaults to four. 34 /// * `preferred_align` - The preferred alignment of an address region, used if possible. 35 /// 36 /// If an allocation cannot be satisfied with the preferred alignment, the minimum alignment 37 /// will be used instead. new( pool: RangeInclusive<u64>, min_align: Option<u64>, preferred_align: Option<u64>, ) -> Result<Self>38 pub fn new( 39 pool: RangeInclusive<u64>, 40 min_align: Option<u64>, 41 preferred_align: Option<u64>, 42 ) -> Result<Self> { 43 Self::new_from_list(vec![pool], min_align, preferred_align) 44 } 45 46 /// Creates a new `AddressAllocator` for managing a range of addresses. 47 /// Can return `None` if all pools are empty alignment isn't a power of two. 48 /// 49 /// * `pools` - The list of pools to initialize the allocator with. 50 /// * `min_align` - The minimum size of an address region to align to, defaults to four. 51 /// * `preferred_align` - The preferred alignment of an address region, used if possible. 52 /// 53 /// If an allocation cannot be satisfied with the preferred alignment, the minimum alignment 54 /// will be used instead. new_from_list<T>( pools: T, min_align: Option<u64>, preferred_align: Option<u64>, ) -> Result<Self> where T: IntoIterator<Item = RangeInclusive<u64>>,55 pub fn new_from_list<T>( 56 pools: T, 57 min_align: Option<u64>, 58 preferred_align: Option<u64>, 59 ) -> Result<Self> 60 where 61 T: IntoIterator<Item = RangeInclusive<u64>>, 62 { 63 let pools: Vec<RangeInclusive<u64>> = pools.into_iter().filter(|p| !p.is_empty()).collect(); 64 if pools.is_empty() { 65 return Err(Error::PoolSizeZero); 66 } 67 68 let min_align = min_align.unwrap_or(4); 69 if !min_align.is_power_of_two() || min_align == 0 { 70 return Err(Error::BadAlignment); 71 } 72 73 let preferred_align = preferred_align.unwrap_or(min_align); 74 if !preferred_align.is_power_of_two() || preferred_align < min_align { 75 return Err(Error::BadAlignment); 76 } 77 78 let mut regions = BTreeSet::new(); 79 for r in pools.iter() { 80 regions.insert((*r.start(), *r.end())); 81 } 82 Ok(AddressAllocator { 83 pools, 84 min_align, 85 preferred_align, 86 allocs: HashMap::new(), 87 regions, 88 }) 89 } 90 91 /// Gets the regions managed by the allocator. 92 /// 93 /// This returns the original `pools` value provided to `AddressAllocator::new()`. pools(&self) -> &Vec<RangeInclusive<u64>>94 pub fn pools(&self) -> &Vec<RangeInclusive<u64>> { 95 &self.pools 96 } 97 internal_allocate_with_align( &mut self, size: u64, alloc: Alloc, tag: String, alignment: u64, reverse: bool, ) -> Result<u64>98 fn internal_allocate_with_align( 99 &mut self, 100 size: u64, 101 alloc: Alloc, 102 tag: String, 103 alignment: u64, 104 reverse: bool, 105 ) -> Result<u64> { 106 let alignment = cmp::max(self.min_align, alignment); 107 108 if self.allocs.contains_key(&alloc) { 109 return Err(Error::ExistingAlloc(alloc)); 110 } 111 if size == 0 { 112 return Err(Error::AllocSizeZero); 113 } 114 if !alignment.is_power_of_two() { 115 return Err(Error::BadAlignment); 116 } 117 118 let region = if !reverse { 119 // finds first region matching alignment and size. 120 self.regions 121 .iter() 122 .find(|range| { 123 match range.0 % alignment { 124 0 => range.0.checked_add(size - 1), 125 r => range.0.checked_add(size - 1 + alignment - r), 126 } 127 .map_or(false, |end| end <= range.1) 128 }) 129 .cloned() 130 } else { 131 // finds last region matching alignment and size. 132 self.regions 133 .iter() 134 .rev() 135 .find(|range| { 136 range 137 .1 138 .checked_sub(size - 1) 139 .map_or(false, |start| start & !(alignment - 1) >= range.0) 140 }) 141 .cloned() 142 }; 143 144 match region { 145 Some(slot) => { 146 self.regions.remove(&slot); 147 let start = if !reverse { 148 match slot.0 % alignment { 149 0 => slot.0, 150 r => slot.0 + alignment - r, 151 } 152 } else { 153 (slot.1 - (size - 1)) & !(alignment - 1) 154 }; 155 let end = start + size - 1; 156 if slot.0 < start { 157 self.regions.insert((slot.0, start - 1)); 158 } 159 if slot.1 > end { 160 self.regions.insert((end + 1, slot.1)); 161 } 162 self.allocs.insert(alloc, (start, size, tag)); 163 164 Ok(start) 165 } 166 None => Err(Error::OutOfSpace), 167 } 168 } 169 170 /// Allocates a range of addresses from the reverse managed region with an optional tag 171 /// and minimal alignment. Returns allocated_address. (allocated_address, size, tag) 172 /// can be retrieved through the `get` method. reverse_allocate_with_align( &mut self, size: u64, alloc: Alloc, tag: String, alignment: u64, ) -> Result<u64>173 pub fn reverse_allocate_with_align( 174 &mut self, 175 size: u64, 176 alloc: Alloc, 177 tag: String, 178 alignment: u64, 179 ) -> Result<u64> { 180 self.internal_allocate_with_align(size, alloc, tag, alignment, true) 181 } 182 183 /// Allocates a range of addresses from the managed region with an optional tag 184 /// and minimal alignment. Returns allocated_address. (allocated_address, size, tag) 185 /// can be retrieved through the `get` method. allocate_with_align( &mut self, size: u64, alloc: Alloc, tag: String, alignment: u64, ) -> Result<u64>186 pub fn allocate_with_align( 187 &mut self, 188 size: u64, 189 alloc: Alloc, 190 tag: String, 191 alignment: u64, 192 ) -> Result<u64> { 193 self.internal_allocate_with_align(size, alloc, tag, alignment, false) 194 } 195 allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64>196 pub fn allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64> { 197 if let Ok(pref_alloc) = 198 self.allocate_with_align(size, alloc, tag.clone(), self.preferred_align) 199 { 200 return Ok(pref_alloc); 201 } 202 self.allocate_with_align(size, alloc, tag, self.min_align) 203 } 204 205 /// Allocates a range of addresses from the managed region with an optional tag 206 /// and required location. Allocation alignment is not enforced. 207 /// Returns OutOfSpace if requested range is not available or ExistingAlloc if the requested 208 /// range overlaps an existing allocation. allocate_at(&mut self, start: u64, size: u64, alloc: Alloc, tag: String) -> Result<()>209 pub fn allocate_at(&mut self, start: u64, size: u64, alloc: Alloc, tag: String) -> Result<()> { 210 if self.allocs.contains_key(&alloc) { 211 return Err(Error::ExistingAlloc(alloc)); 212 } 213 if size == 0 { 214 return Err(Error::AllocSizeZero); 215 } 216 217 let end = start.checked_add(size - 1).ok_or(Error::OutOfSpace)?; 218 match self 219 .regions 220 .iter() 221 .find(|range| range.0 <= start && range.1 >= end) 222 .cloned() 223 { 224 Some(slot) => { 225 self.regions.remove(&slot); 226 if slot.0 < start { 227 self.regions.insert((slot.0, start - 1)); 228 } 229 if slot.1 > end { 230 self.regions.insert((end + 1, slot.1)); 231 } 232 self.allocs.insert(alloc, (start, size, tag)); 233 234 Ok(()) 235 } 236 None => { 237 if let Some(existing_alloc) = self.find_overlapping(start, size) { 238 Err(Error::ExistingAlloc(existing_alloc)) 239 } else { 240 Err(Error::OutOfSpace) 241 } 242 } 243 } 244 } 245 246 /// Releases exising allocation back to free pool. release(&mut self, alloc: Alloc) -> Result<()>247 pub fn release(&mut self, alloc: Alloc) -> Result<()> { 248 self.allocs 249 .remove(&alloc) 250 .map_or_else(|| Err(Error::BadAlloc(alloc)), |v| self.insert_at(v.0, v.1)) 251 } 252 253 /// Release a allocation contains the value. release_containing(&mut self, value: u64) -> Result<()>254 pub fn release_containing(&mut self, value: u64) -> Result<()> { 255 if let Some(alloc) = self.find_overlapping(value, 1) { 256 self.release(alloc) 257 } else { 258 Err(Error::OutOfSpace) 259 } 260 } 261 262 // Find an existing allocation that overlaps the region defined by `address` and `size`. If more 263 // than one allocation overlaps the given region, any of them may be returned, since the HashMap 264 // iterator is not ordered in any particular way. find_overlapping(&self, start: u64, size: u64) -> Option<Alloc>265 fn find_overlapping(&self, start: u64, size: u64) -> Option<Alloc> { 266 if size == 0 { 267 return None; 268 } 269 270 let end = start.saturating_add(size - 1); 271 self.allocs 272 .iter() 273 .find(|(_, &(alloc_start, alloc_size, _))| { 274 let alloc_end = alloc_start + alloc_size; 275 start < alloc_end && end >= alloc_start 276 }) 277 .map(|(&alloc, _)| alloc) 278 } 279 280 /// Returns allocation associated with `alloc`, or None if no such allocation exists. get(&self, alloc: &Alloc) -> Option<&(u64, u64, String)>281 pub fn get(&self, alloc: &Alloc) -> Option<&(u64, u64, String)> { 282 self.allocs.get(alloc) 283 } 284 285 /// Insert range of addresses into the pool, coalescing neighboring regions. insert_at(&mut self, start: u64, size: u64) -> Result<()>286 fn insert_at(&mut self, start: u64, size: u64) -> Result<()> { 287 if size == 0 { 288 return Err(Error::AllocSizeZero); 289 } 290 291 let mut slot = (start, start.checked_add(size - 1).ok_or(Error::OutOfSpace)?); 292 let mut left = None; 293 let mut right = None; 294 // simple coalescing with linear search over free regions. 295 // 296 // Calculating the distance between start and end of two regions we can 297 // detect if they are disjoint (>1), adjacent (=1) or possibly 298 // overlapping (<1). Saturating arithmetic is used to avoid overflow. 299 // Overlapping regions are detected if both oposite ends are overlapping. 300 // Algorithm assumes all existing regions are disjoined and represented 301 // as pair of inclusive location point (start, end), where end >= start. 302 for range in self.regions.iter() { 303 match ( 304 slot.0.saturating_sub(range.1), 305 range.0.saturating_sub(slot.1), 306 ) { 307 (1, 0) => { 308 left = Some(*range); 309 } 310 (0, 1) => { 311 right = Some(*range); 312 } 313 (0, 0) => { 314 return Err(Error::RegionOverlap { base: start, size }); 315 } 316 (_, _) => (), 317 } 318 } 319 if let Some(left) = left { 320 self.regions.remove(&left); 321 slot.0 = left.0; 322 } 323 if let Some(right) = right { 324 self.regions.remove(&right); 325 slot.1 = right.1; 326 } 327 self.regions.insert(slot); 328 329 Ok(()) 330 } 331 332 /// Returns an address from associated PCI `alloc` given an allocation offset and size. address_from_pci_offset(&self, alloc: Alloc, offset: u64, size: u64) -> Result<u64>333 pub fn address_from_pci_offset(&self, alloc: Alloc, offset: u64, size: u64) -> Result<u64> { 334 match alloc { 335 Alloc::PciBar { .. } => (), 336 _ => return Err(Error::InvalidAlloc(alloc)), 337 }; 338 339 match self.allocs.get(&alloc) { 340 Some((start_addr, length, _)) => { 341 let address = start_addr.checked_add(offset).ok_or(Error::OutOfBounds)?; 342 let range = *start_addr..*start_addr + *length; 343 let end = address.checked_add(size).ok_or(Error::OutOfBounds)?; 344 match (range.contains(&address), range.contains(&end)) { 345 (true, true) => Ok(address), 346 _ => Err(Error::OutOfBounds), 347 } 348 } 349 None => Err(Error::InvalidAlloc(alloc)), 350 } 351 } 352 } 353 354 /// Contains a set of `AddressAllocator`s for allocating address ranges. 355 /// When attempting an allocation, each allocator will be tried in order until 356 /// the allocation is successful. 357 /// See `AddressAllocator` for function documentation. 358 pub struct AddressAllocatorSet<'a> { 359 allocators: &'a mut [AddressAllocator], 360 } 361 362 impl<'a> AddressAllocatorSet<'a> { new(allocators: &'a mut [AddressAllocator]) -> Self363 pub fn new(allocators: &'a mut [AddressAllocator]) -> Self { 364 AddressAllocatorSet { allocators } 365 } 366 allocate_with_align( &mut self, size: u64, alloc: Alloc, tag: String, alignment: u64, ) -> Result<u64>367 pub fn allocate_with_align( 368 &mut self, 369 size: u64, 370 alloc: Alloc, 371 tag: String, 372 alignment: u64, 373 ) -> Result<u64> { 374 let mut last_res = Err(Error::OutOfSpace); 375 for allocator in self.allocators.iter_mut() { 376 last_res = allocator.allocate_with_align(size, alloc, tag.clone(), alignment); 377 if last_res.is_ok() { 378 return last_res; 379 } 380 } 381 last_res 382 } 383 allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64>384 pub fn allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64> { 385 let mut last_res = Err(Error::OutOfSpace); 386 for allocator in self.allocators.iter_mut() { 387 last_res = allocator.allocate(size, alloc, tag.clone()); 388 if last_res.is_ok() { 389 return last_res; 390 } 391 } 392 last_res 393 } 394 allocate_at(&mut self, start: u64, size: u64, alloc: Alloc, tag: String) -> Result<()>395 pub fn allocate_at(&mut self, start: u64, size: u64, alloc: Alloc, tag: String) -> Result<()> { 396 let mut last_res = Err(Error::OutOfSpace); 397 for allocator in self.allocators.iter_mut() { 398 last_res = allocator.allocate_at(start, size, alloc, tag.clone()); 399 if last_res.is_ok() { 400 return last_res; 401 } 402 } 403 last_res 404 } 405 release(&mut self, alloc: Alloc) -> Result<()>406 pub fn release(&mut self, alloc: Alloc) -> Result<()> { 407 let mut last_res = Err(Error::OutOfSpace); 408 for allocator in self.allocators.iter_mut() { 409 last_res = allocator.release(alloc); 410 if last_res.is_ok() { 411 return last_res; 412 } 413 } 414 last_res 415 } 416 get(&self, alloc: &Alloc) -> Option<&(u64, u64, String)>417 pub fn get(&self, alloc: &Alloc) -> Option<&(u64, u64, String)> { 418 for allocator in self.allocators.iter() { 419 let opt = allocator.get(alloc); 420 if opt.is_some() { 421 return opt; 422 } 423 } 424 None 425 } 426 address_from_pci_offset(&self, alloc: Alloc, offset: u64, size: u64) -> Result<u64>427 pub fn address_from_pci_offset(&self, alloc: Alloc, offset: u64, size: u64) -> Result<u64> { 428 let mut last_res = Err(Error::OutOfSpace); 429 for allocator in self.allocators.iter() { 430 last_res = allocator.address_from_pci_offset(alloc, offset, size); 431 if last_res.is_ok() { 432 return last_res; 433 } 434 } 435 last_res 436 } 437 } 438 439 #[cfg(test)] 440 mod tests { 441 use super::*; 442 443 #[test] example()444 fn example() { 445 // Anon is used for brevity. Don't manually instantiate Anon allocs! 446 let mut pool = 447 AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0x100), None).unwrap(); 448 assert_eq!( 449 pool.allocate(0x110, Alloc::Anon(0), "caps".to_string()), 450 Ok(0x1000) 451 ); 452 assert_eq!( 453 pool.allocate(0x100, Alloc::Anon(1), "cache".to_string()), 454 Ok(0x1200) 455 ); 456 assert_eq!( 457 pool.allocate(0x100, Alloc::Anon(2), "etc".to_string()), 458 Ok(0x1300) 459 ); 460 assert_eq!( 461 pool.get(&Alloc::Anon(1)), 462 Some(&(0x1200, 0x100, "cache".to_string())) 463 ); 464 } 465 466 #[test] new_fails_size_zero()467 fn new_fails_size_zero() { 468 assert!(AddressAllocator::new(RangeInclusive::new(0x1000, 0), None, None).is_err()); 469 } 470 471 #[test] new_fails_alignment_zero()472 fn new_fails_alignment_zero() { 473 assert!(AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0), None).is_err()); 474 } 475 476 #[test] new_fails_alignment_non_power_of_two()477 fn new_fails_alignment_non_power_of_two() { 478 assert!( 479 AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(200), None).is_err() 480 ); 481 } 482 483 #[test] allocate_fails_exising_alloc()484 fn allocate_fails_exising_alloc() { 485 let mut pool = 486 AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), Some(0x100), None).unwrap(); 487 assert_eq!( 488 pool.allocate(0x800, Alloc::Anon(0), String::from("bar0")), 489 Ok(0x1000) 490 ); 491 assert_eq!( 492 pool.allocate(0x800, Alloc::Anon(0), String::from("bar0")), 493 Err(Error::ExistingAlloc(Alloc::Anon(0))) 494 ); 495 } 496 497 #[test] allocate_fails_not_enough_space()498 fn allocate_fails_not_enough_space() { 499 let mut pool = 500 AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), Some(0x100), None).unwrap(); 501 assert_eq!( 502 pool.allocate(0x800, Alloc::Anon(0), String::from("bar0")), 503 Ok(0x1000) 504 ); 505 assert_eq!( 506 pool.allocate(0x900, Alloc::Anon(1), String::from("bar1")), 507 Err(Error::OutOfSpace) 508 ); 509 assert_eq!( 510 pool.allocate(0x800, Alloc::Anon(2), String::from("bar2")), 511 Ok(0x1800) 512 ); 513 } 514 515 #[test] allocate_with_special_alignment()516 fn allocate_with_special_alignment() { 517 let mut pool = 518 AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), Some(0x100), None).unwrap(); 519 assert_eq!( 520 pool.allocate(0x10, Alloc::Anon(0), String::from("bar0")), 521 Ok(0x1000) 522 ); 523 assert_eq!( 524 pool.allocate_at(0x1200, 0x100, Alloc::Anon(1), String::from("bar1")), 525 Ok(()) 526 ); 527 assert_eq!( 528 pool.allocate_with_align(0x800, Alloc::Anon(2), String::from("bar2"), 0x800), 529 Ok(0x1800) 530 ); 531 } 532 533 #[test] allocate_and_split_allocate_at()534 fn allocate_and_split_allocate_at() { 535 let mut pool = 536 AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), Some(1), None).unwrap(); 537 // 0x1200..0x1a00 538 assert_eq!( 539 pool.allocate_at(0x1200, 0x800, Alloc::Anon(0), String::from("bar0")), 540 Ok(()) 541 ); 542 assert_eq!( 543 pool.allocate(0x800, Alloc::Anon(1), String::from("bar1")), 544 Err(Error::OutOfSpace) 545 ); 546 // 0x600..0x2000 547 assert_eq!( 548 pool.allocate(0x600, Alloc::Anon(2), String::from("bar2")), 549 Ok(0x1a00) 550 ); 551 // 0x1000..0x1200 552 assert_eq!( 553 pool.allocate(0x200, Alloc::Anon(3), String::from("bar3")), 554 Ok(0x1000) 555 ); 556 // 0x1b00..0x1c00 (overlaps with 0x600..0x2000) 557 assert_eq!( 558 pool.allocate_at(0x1b00, 0x100, Alloc::Anon(4), String::from("bar4")), 559 Err(Error::ExistingAlloc(Alloc::Anon(2))) 560 ); 561 // 0x1fff..0x2000 (overlaps with 0x600..0x2000) 562 assert_eq!( 563 pool.allocate_at(0x1fff, 1, Alloc::Anon(5), String::from("bar5")), 564 Err(Error::ExistingAlloc(Alloc::Anon(2))) 565 ); 566 // 0x1200..0x1201 (overlaps with 0x1200..0x1a00) 567 assert_eq!( 568 pool.allocate_at(0x1200, 1, Alloc::Anon(6), String::from("bar6")), 569 Err(Error::ExistingAlloc(Alloc::Anon(0))) 570 ); 571 // 0x11ff..0x1200 (overlaps with 0x1000..0x1200) 572 assert_eq!( 573 pool.allocate_at(0x11ff, 1, Alloc::Anon(7), String::from("bar7")), 574 Err(Error::ExistingAlloc(Alloc::Anon(3))) 575 ); 576 // 0x1100..0x1300 (overlaps with 0x1000..0x1200 and 0x1200..0x1a00) 577 match pool.allocate_at(0x1100, 0x200, Alloc::Anon(8), String::from("bar8")) { 578 Err(Error::ExistingAlloc(Alloc::Anon(0) | Alloc::Anon(3))) => {} 579 x => panic!("unexpected result {:?}", x), 580 } 581 } 582 583 #[test] allocate_alignment()584 fn allocate_alignment() { 585 let mut pool = 586 AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0x100), None).unwrap(); 587 assert_eq!( 588 pool.allocate(0x110, Alloc::Anon(0), String::from("bar0")), 589 Ok(0x1000) 590 ); 591 assert_eq!( 592 pool.allocate(0x100, Alloc::Anon(1), String::from("bar1")), 593 Ok(0x1200) 594 ); 595 } 596 597 #[test] allocate_retrieve_alloc()598 fn allocate_retrieve_alloc() { 599 let mut pool = 600 AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0x100), None).unwrap(); 601 assert_eq!( 602 pool.allocate(0x110, Alloc::Anon(0), String::from("bar0")), 603 Ok(0x1000) 604 ); 605 assert_eq!( 606 pool.get(&Alloc::Anon(0)), 607 Some(&(0x1000, 0x110, String::from("bar0"))) 608 ); 609 } 610 611 #[test] allocate_with_alignment_allocator_alignment()612 fn allocate_with_alignment_allocator_alignment() { 613 let mut pool = 614 AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0x100), None).unwrap(); 615 assert_eq!( 616 pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x1), 617 Ok(0x1000) 618 ); 619 assert_eq!( 620 pool.allocate_with_align(0x100, Alloc::Anon(1), String::from("bar1"), 0x1), 621 Ok(0x1200) 622 ); 623 } 624 625 #[test] allocate_with_alignment_custom_alignment()626 fn allocate_with_alignment_custom_alignment() { 627 let mut pool = 628 AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), Some(0x4), None).unwrap(); 629 assert_eq!( 630 pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x100), 631 Ok(0x1000) 632 ); 633 assert_eq!( 634 pool.allocate_with_align(0x100, Alloc::Anon(1), String::from("bar1"), 0x100), 635 Ok(0x1200) 636 ); 637 } 638 639 #[test] allocate_with_alignment_no_allocator_alignment()640 fn allocate_with_alignment_no_allocator_alignment() { 641 let mut pool = 642 AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), None, None).unwrap(); 643 assert_eq!( 644 pool.allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 0x100), 645 Ok(0x1000) 646 ); 647 assert_eq!( 648 pool.allocate_with_align(0x100, Alloc::Anon(1), String::from("bar1"), 0x100), 649 Ok(0x1200) 650 ); 651 } 652 653 #[test] allocate_with_alignment_alignment_non_power_of_two()654 fn allocate_with_alignment_alignment_non_power_of_two() { 655 let mut pool = 656 AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), None, None).unwrap(); 657 assert!(pool 658 .allocate_with_align(0x110, Alloc::Anon(0), String::from("bar0"), 200) 659 .is_err()); 660 } 661 662 #[test] allocate_with_release()663 fn allocate_with_release() { 664 let mut pool = 665 AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), None, None).unwrap(); 666 assert_eq!( 667 pool.allocate_with_align(0x100, Alloc::Anon(0), String::from("bar0"), 0x100), 668 Ok(0x1000) 669 ); 670 assert!(pool.release(Alloc::Anon(0)).is_ok()); 671 assert_eq!( 672 pool.allocate_with_align(0x1000, Alloc::Anon(0), String::from("bar0"), 0x100), 673 Ok(0x1000) 674 ); 675 } 676 677 #[test] coalescing_and_overlap()678 fn coalescing_and_overlap() { 679 let mut pool = 680 AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), None, None).unwrap(); 681 assert!(pool.insert_at(0x3000, 0x1000).is_ok()); 682 assert!(pool.insert_at(0x1fff, 0x20).is_err()); 683 assert!(pool.insert_at(0x2ff1, 0x10).is_err()); 684 assert!(pool.insert_at(0x1800, 0x1000).is_err()); 685 assert!(pool.insert_at(0x2000, 0x1000).is_ok()); 686 assert_eq!( 687 pool.allocate(0x3000, Alloc::Anon(0), String::from("bar0")), 688 Ok(0x1000) 689 ); 690 } 691 692 #[test] coalescing_single_addresses()693 fn coalescing_single_addresses() { 694 let mut pool = 695 AddressAllocator::new(RangeInclusive::new(0x1000, 0x1FFF), None, None).unwrap(); 696 assert!(pool.insert_at(0x2001, 1).is_ok()); 697 assert!(pool.insert_at(0x2003, 1).is_ok()); 698 assert!(pool.insert_at(0x2000, 1).is_ok()); 699 assert!(pool.insert_at(0x2002, 1).is_ok()); 700 assert_eq!( 701 pool.allocate(0x1004, Alloc::Anon(0), String::from("bar0")), 702 Ok(0x1000) 703 ); 704 } 705 706 #[test] allocate_and_verify_pci_offset()707 fn allocate_and_verify_pci_offset() { 708 let mut pool = 709 AddressAllocator::new(RangeInclusive::new(0x1000, 0xFFFF), None, None).unwrap(); 710 let pci_bar0 = Alloc::PciBar { 711 bus: 1, 712 dev: 2, 713 func: 0, 714 bar: 0, 715 }; 716 let pci_bar1 = Alloc::PciBar { 717 bus: 1, 718 dev: 2, 719 func: 0, 720 bar: 1, 721 }; 722 let pci_bar2 = Alloc::PciBar { 723 bus: 1, 724 dev: 2, 725 func: 0, 726 bar: 2, 727 }; 728 let anon = Alloc::Anon(1); 729 730 assert_eq!( 731 pool.allocate(0x800, pci_bar0, String::from("bar0")), 732 Ok(0x1000) 733 ); 734 assert_eq!( 735 pool.allocate(0x800, pci_bar1, String::from("bar1")), 736 Ok(0x1800) 737 ); 738 assert_eq!(pool.allocate(0x800, anon, String::from("anon")), Ok(0x2000)); 739 740 assert_eq!( 741 pool.address_from_pci_offset(pci_bar0, 0x600, 0x100), 742 Ok(0x1600) 743 ); 744 assert_eq!( 745 pool.address_from_pci_offset(pci_bar1, 0x600, 0x100), 746 Ok(0x1E00) 747 ); 748 assert_eq!( 749 pool.address_from_pci_offset(pci_bar0, 0x7FE, 0x001), 750 Ok(0x17FE) 751 ); 752 assert_eq!( 753 pool.address_from_pci_offset(pci_bar0, 0x7FF, 0x001), 754 Err(Error::OutOfBounds) 755 ); 756 757 assert_eq!( 758 pool.address_from_pci_offset(pci_bar2, 0x7FF, 0x001), 759 Err(Error::InvalidAlloc(pci_bar2)) 760 ); 761 762 assert_eq!( 763 pool.address_from_pci_offset(anon, 0x600, 0x100), 764 Err(Error::InvalidAlloc(anon)) 765 ); 766 } 767 } 768