• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Manages allocating system resources such as address space and interrupt numbers.
6 
7 use std::collections::btree_map;
8 use std::collections::BTreeMap;
9 
10 use base::pagesize;
11 
12 use crate::address_allocator::AddressAllocator;
13 use crate::address_allocator::AddressAllocatorSet;
14 use crate::AddressRange;
15 use crate::Alloc;
16 use crate::Error;
17 use crate::PciAddress;
18 use crate::Result;
19 
20 /// MMIO address Type
21 ///    Low: address allocated from low_address_space
22 ///    High: address allocated from high_address_space
23 #[derive(Copy, Clone, PartialEq, Eq)]
24 pub enum MmioType {
25     Low,
26     High,
27 }
28 
29 /// Memory allocation options.
30 #[derive(Copy, Clone, Debug)]
31 pub struct AllocOptions {
32     prefetchable: bool,
33     max_address: u64,
34     alignment: Option<u64>,
35     top_down: bool,
36 }
37 
38 impl Default for AllocOptions {
default() -> Self39     fn default() -> Self {
40         AllocOptions::new()
41     }
42 }
43 
44 impl AllocOptions {
new() -> Self45     pub fn new() -> Self {
46         AllocOptions {
47             prefetchable: false,
48             max_address: u64::MAX,
49             alignment: None,
50             top_down: false,
51         }
52     }
53 
54     /// If `true`, memory may be allocated in a prefetchable/cacheable region.
55     /// If `false`, memory must be allocated within a non-prefetechable region, appropriate for
56     /// device registers.
57     /// Default: `false`
prefetchable(&mut self, prefetchable: bool) -> &mut Self58     pub fn prefetchable(&mut self, prefetchable: bool) -> &mut Self {
59         self.prefetchable = prefetchable;
60         self
61     }
62 
63     /// Largest valid address for the end of the allocated region.
64     /// For example, `u32::MAX` may be used to allocate a region that is addressable with a 32-bit
65     /// pointer.
66     /// Default: `u64::MAX`
max_address(&mut self, max_address: u64) -> &mut Self67     pub fn max_address(&mut self, max_address: u64) -> &mut Self {
68         self.max_address = max_address;
69         self
70     }
71 
72     /// Minimum alignment of the allocated address.
73     /// Default: `None` (allocation preference of the address allocator pool will be used)
align(&mut self, alignment: u64) -> &mut Self74     pub fn align(&mut self, alignment: u64) -> &mut Self {
75         self.alignment = Some(alignment);
76         self
77     }
78 
79     /// If `true`, prefer allocating from the upper end of the region rather than the low end.
80     /// Default: `false`
top_down(&mut self, top_down: bool) -> &mut Self81     pub fn top_down(&mut self, top_down: bool) -> &mut Self {
82         self.top_down = top_down;
83         self
84     }
85 }
86 
87 pub struct SystemAllocatorConfig {
88     /// IO ports. Only for x86_64.
89     pub io: Option<AddressRange>,
90     /// Low (<=4GB) MMIO region.
91     ///
92     /// Parts of this region may be reserved or otherwise excluded from the
93     /// created SystemAllocator's MmioType::Low allocator. However, no new
94     /// regions will be added.
95     pub low_mmio: AddressRange,
96     /// High (>4GB) MMIO region.
97     ///
98     /// Parts of this region may be reserved or otherwise excluded from the
99     /// created SystemAllocator's MmioType::High allocator. However, no new
100     /// regions will be added.
101     pub high_mmio: AddressRange,
102     /// Platform MMIO space. Only for ARM.
103     pub platform_mmio: Option<AddressRange>,
104     /// The first IRQ number to give out.
105     pub first_irq: u32,
106 }
107 
108 #[derive(Debug)]
109 pub struct SystemAllocator {
110     io_address_space: Option<AddressAllocator>,
111 
112     // Indexed by MmioType::Low and MmioType::High.
113     mmio_address_spaces: [AddressAllocator; 2],
114     mmio_platform_address_spaces: Option<AddressAllocator>,
115 
116     reserved_region: Option<AddressRange>,
117 
118     // Each bus number has a AddressAllocator
119     pci_allocator: BTreeMap<u8, AddressAllocator>,
120     irq_allocator: AddressAllocator,
121     gpe_allocator: AddressAllocator,
122     next_anon_id: usize,
123 }
124 
125 impl SystemAllocator {
126     /// Creates a new `SystemAllocator` for managing addresses and irq numbers.
127     /// Will return an error if `base` + `size` overflows u64 (or allowed
128     /// maximum for the specific type), or if alignment isn't a power of two.
129     ///
130     /// If `reserve_region_size` is not None, then a region is reserved from
131     /// the start of `config.high_mmio` before the mmio allocator is created.
132     ///
133     /// If `mmio_address_ranges` is not empty, then `config.low_mmio` and
134     /// `config.high_mmio` are intersected with the ranges specified.
new( config: SystemAllocatorConfig, reserve_region_size: Option<u64>, mmio_address_ranges: &[AddressRange], ) -> Result<Self>135     pub fn new(
136         config: SystemAllocatorConfig,
137         reserve_region_size: Option<u64>,
138         mmio_address_ranges: &[AddressRange],
139     ) -> Result<Self> {
140         let page_size = pagesize() as u64;
141 
142         let (high_mmio, reserved_region) = match reserve_region_size {
143             Some(reserved_len) => {
144                 let high_mmio_len = config.high_mmio.len().ok_or(Error::OutOfBounds)?;
145                 if reserved_len > high_mmio_len {
146                     return Err(Error::OutOfSpace);
147                 }
148                 let reserved_start = config.high_mmio.start;
149                 let reserved_end = reserved_start + reserved_len - 1;
150                 let high_mmio_start = reserved_end + 1;
151                 let high_mmio_end = config.high_mmio.end;
152                 (
153                     AddressRange {
154                         start: high_mmio_start,
155                         end: high_mmio_end,
156                     },
157                     Some(AddressRange {
158                         start: reserved_start,
159                         end: reserved_end,
160                     }),
161                 )
162             }
163             None => (config.high_mmio, None),
164         };
165 
166         let intersect_mmio_range = |src_range: AddressRange| -> Result<Vec<AddressRange>> {
167             Ok(if mmio_address_ranges.is_empty() {
168                 vec![src_range]
169             } else {
170                 mmio_address_ranges
171                     .iter()
172                     .map(|r| r.intersect(src_range))
173                     .collect()
174             })
175         };
176 
177         Ok(SystemAllocator {
178             io_address_space: if let Some(io) = config.io {
179                 // TODO make sure we don't overlap with existing well known
180                 // ports such as 0xcf8 (serial ports).
181                 if io.end > 0xffff {
182                     return Err(Error::IOPortOutOfRange(io));
183                 }
184                 Some(AddressAllocator::new(io, Some(0x400), None)?)
185             } else {
186                 None
187             },
188             mmio_address_spaces: [
189                 // MmioType::Low
190                 AddressAllocator::new_from_list(
191                     intersect_mmio_range(config.low_mmio)?,
192                     Some(page_size),
193                     None,
194                 )?,
195                 // MmioType::High
196                 AddressAllocator::new_from_list(
197                     intersect_mmio_range(high_mmio)?,
198                     Some(page_size),
199                     None,
200                 )?,
201             ],
202 
203             pci_allocator: BTreeMap::new(),
204 
205             mmio_platform_address_spaces: if let Some(platform) = config.platform_mmio {
206                 Some(AddressAllocator::new(platform, Some(page_size), None)?)
207             } else {
208                 None
209             },
210 
211             reserved_region,
212 
213             irq_allocator: AddressAllocator::new(
214                 AddressRange {
215                     start: config.first_irq as u64,
216                     end: 1023,
217                 },
218                 Some(1),
219                 None,
220             )?,
221 
222             // GPE range depends on ACPIPM_RESOURCE_GPE0_BLK_LEN, which is used to determine
223             // ACPIPM_GPE_MAX. The AddressRange should be in sync with ACPIPM_GPE_MAX. The
224             // hard-coded value is used since devices lib (where ACPIPM_* consts are defined)
225             // depends on resource lib. Therefore using ACPI_* const from device lib will not be
226             // possible because it will require introducing cyclic dependencies.
227             gpe_allocator: AddressAllocator::new(
228                 AddressRange { start: 0, end: 255 },
229                 Some(1),
230                 None,
231             )?,
232             next_anon_id: 0,
233         })
234     }
235 
236     /// Reserves the next available system irq number.
allocate_irq(&mut self) -> Option<u32>237     pub fn allocate_irq(&mut self) -> Option<u32> {
238         let id = self.get_anon_alloc();
239         self.irq_allocator
240             .allocate(1, id, "irq-auto".to_string())
241             .map(|v| v as u32)
242             .ok()
243     }
244 
245     /// release irq to system irq number pool
release_irq(&mut self, irq: u32)246     pub fn release_irq(&mut self, irq: u32) {
247         let _ = self.irq_allocator.release_containing(irq.into());
248     }
249 
250     /// Reserves the next available system irq number.
reserve_irq(&mut self, irq: u32) -> bool251     pub fn reserve_irq(&mut self, irq: u32) -> bool {
252         let id = self.get_anon_alloc();
253         self.irq_allocator
254             .allocate_at(
255                 AddressRange {
256                     start: irq.into(),
257                     end: irq.into(),
258                 },
259                 id,
260                 "irq-fixed".to_string(),
261             )
262             .is_ok()
263     }
264 
265     /// Reserve the next available system GPE number
allocate_gpe(&mut self) -> Option<u32>266     pub fn allocate_gpe(&mut self) -> Option<u32> {
267         let id = self.get_anon_alloc();
268         self.gpe_allocator
269             .allocate(1, id, "gpe-auto".to_string())
270             .map(|v| v as u32)
271             .ok()
272     }
273 
get_pci_allocator_mut(&mut self, bus: u8) -> Option<&mut AddressAllocator>274     fn get_pci_allocator_mut(&mut self, bus: u8) -> Option<&mut AddressAllocator> {
275         match self.pci_allocator.entry(bus) {
276             btree_map::Entry::Occupied(entry) => Some(entry.into_mut()),
277             btree_map::Entry::Vacant(entry) => {
278                 // pci root is 00:00.0, Bus 0 next device is 00:01.0 with mandatory function number
279                 // zero.
280                 let base = if bus == 0 { 8 } else { 0 };
281 
282                 // Each bus supports up to 32 (devices) x 8 (functions).
283                 // Prefer allocating at device granularity (preferred_align = 8), but fall back to
284                 // allocating individual functions (min_align = 1) when we run out of devices.
285                 let pci_alloc = AddressAllocator::new(
286                     AddressRange {
287                         start: base,
288                         end: (32 * 8) - 1,
289                     },
290                     Some(1),
291                     Some(8),
292                 )
293                 .ok()?;
294 
295                 Some(entry.insert(pci_alloc))
296             }
297         }
298     }
299 
300     // Check whether devices exist or not on the specified bus
pci_bus_empty(&self, bus: u8) -> bool301     pub fn pci_bus_empty(&self, bus: u8) -> bool {
302         !self.pci_allocator.contains_key(&bus)
303     }
304 
305     /// Allocate PCI slot location.
allocate_pci(&mut self, bus: u8, tag: String) -> Option<PciAddress>306     pub fn allocate_pci(&mut self, bus: u8, tag: String) -> Option<PciAddress> {
307         let id = self.get_anon_alloc();
308         let allocator = self.get_pci_allocator_mut(bus)?;
309         allocator
310             .allocate(1, id, tag)
311             .map(|v| PciAddress {
312                 bus,
313                 dev: (v >> 3) as u8,
314                 func: (v & 7) as u8,
315             })
316             .ok()
317     }
318 
319     /// Reserve PCI slot location.
reserve_pci(&mut self, pci_addr: PciAddress, tag: String) -> bool320     pub fn reserve_pci(&mut self, pci_addr: PciAddress, tag: String) -> bool {
321         let id = self.get_anon_alloc();
322 
323         let allocator = match self.get_pci_allocator_mut(pci_addr.bus) {
324             Some(v) => v,
325             None => return false,
326         };
327         let df = ((pci_addr.dev as u64) << 3) | (pci_addr.func as u64);
328         allocator
329             .allocate_at(AddressRange { start: df, end: df }, id, tag)
330             .is_ok()
331     }
332 
333     /// release PCI slot location.
release_pci(&mut self, pci_addr: PciAddress) -> bool334     pub fn release_pci(&mut self, pci_addr: PciAddress) -> bool {
335         let allocator = match self.get_pci_allocator_mut(pci_addr.bus) {
336             Some(v) => v,
337             None => return false,
338         };
339         let df = ((pci_addr.dev as u64) << 3) | (pci_addr.func as u64);
340         allocator.release_containing(df).is_ok()
341     }
342 
343     /// Allocate a memory-mapped I/O region with properties requested in `opts`.
allocate_mmio( &mut self, size: u64, alloc: Alloc, tag: String, opts: &AllocOptions, ) -> Result<u64>344     pub fn allocate_mmio(
345         &mut self,
346         size: u64,
347         alloc: Alloc,
348         tag: String,
349         opts: &AllocOptions,
350     ) -> Result<u64> {
351         // For now, there is no way to ensure allocations fit in less than 32 bits.
352         // This can be removed once AddressAllocator accepts AllocOptions.
353         if opts.max_address < u32::MAX as u64 {
354             return Err(Error::OutOfSpace);
355         }
356 
357         let mut mmio_type = MmioType::High;
358         if opts.max_address < u64::MAX || !opts.prefetchable {
359             mmio_type = MmioType::Low;
360         }
361 
362         let res = self.allocate_mmio_internal(size, alloc, tag.clone(), opts, mmio_type);
363         // If a high allocation failed, retry in low. The reverse is not valid, since the address
364         // may be out of range and/or prefetchable memory may not be appropriate.
365         if mmio_type == MmioType::High && matches!(res, Err(Error::OutOfSpace)) {
366             self.allocate_mmio_internal(size, alloc, tag, opts, MmioType::Low)
367         } else {
368             res
369         }
370     }
371 
allocate_mmio_internal( &mut self, size: u64, alloc: Alloc, tag: String, opts: &AllocOptions, mmio_type: MmioType, ) -> Result<u64>372     fn allocate_mmio_internal(
373         &mut self,
374         size: u64,
375         alloc: Alloc,
376         tag: String,
377         opts: &AllocOptions,
378         mmio_type: MmioType,
379     ) -> Result<u64> {
380         let allocator = &mut self.mmio_address_spaces[mmio_type as usize];
381         match (opts.alignment, opts.top_down) {
382             (Some(align), true) => allocator.reverse_allocate_with_align(size, alloc, tag, align),
383             (Some(align), false) => allocator.allocate_with_align(size, alloc, tag, align),
384             (None, true) => allocator.reverse_allocate(size, alloc, tag),
385             (None, false) => allocator.allocate(size, alloc, tag),
386         }
387     }
388 
389     /// Reserve specified range from pci mmio, get the overlap of specified
390     /// range with mmio pools, exclude the overlap from mmio allocator.
391     ///
392     /// If any part of the specified range has been allocated, return Error.
reserve_mmio(&mut self, range: AddressRange) -> Result<()>393     pub fn reserve_mmio(&mut self, range: AddressRange) -> Result<()> {
394         let mut pools = Vec::new();
395         for pool in self.mmio_pools() {
396             pools.push(*pool);
397         }
398         pools.sort_by(|a, b| a.start.cmp(&b.start));
399         for pool in &pools {
400             if pool.start > range.end {
401                 break;
402             }
403 
404             let overlap = pool.intersect(range);
405             if !overlap.is_empty() {
406                 let id = self.get_anon_alloc();
407                 self.mmio_allocator_any().allocate_at(
408                     overlap,
409                     id,
410                     "pci mmio reserve".to_string(),
411                 )?;
412             }
413         }
414 
415         Ok(())
416     }
417 
418     /// Gets an allocator to be used for platform device MMIO allocation.
mmio_platform_allocator(&mut self) -> Option<&mut AddressAllocator>419     pub fn mmio_platform_allocator(&mut self) -> Option<&mut AddressAllocator> {
420         self.mmio_platform_address_spaces.as_mut()
421     }
422 
423     /// Gets an allocator to be used for IO memory.
io_allocator(&mut self) -> Option<&mut AddressAllocator>424     pub fn io_allocator(&mut self) -> Option<&mut AddressAllocator> {
425         self.io_address_space.as_mut()
426     }
427 
428     /// Gets an allocator to be used for MMIO allocation.
429     ///    MmioType::Low: low mmio allocator
430     ///    MmioType::High: high mmio allocator
mmio_allocator(&mut self, mmio_type: MmioType) -> &mut AddressAllocator431     pub fn mmio_allocator(&mut self, mmio_type: MmioType) -> &mut AddressAllocator {
432         &mut self.mmio_address_spaces[mmio_type as usize]
433     }
434 
435     /// Gets a set of allocators to be used for MMIO allocation.
436     /// The set of allocators will try the low and high MMIO allocators, in that order.
mmio_allocator_any(&mut self) -> AddressAllocatorSet437     pub fn mmio_allocator_any(&mut self) -> AddressAllocatorSet {
438         AddressAllocatorSet::new(&mut self.mmio_address_spaces)
439     }
440 
441     /// Gets the pools of all mmio allocators.
mmio_pools(&self) -> Vec<&AddressRange>442     pub fn mmio_pools(&self) -> Vec<&AddressRange> {
443         self.mmio_address_spaces
444             .iter()
445             .flat_map(|mmio_as| mmio_as.pools())
446             .collect()
447     }
448 
449     /// Gets the reserved address space region.
reserved_region(&self) -> Option<AddressRange>450     pub fn reserved_region(&self) -> Option<AddressRange> {
451         self.reserved_region
452     }
453 
454     /// Gets a unique anonymous allocation
get_anon_alloc(&mut self) -> Alloc455     pub fn get_anon_alloc(&mut self) -> Alloc {
456         self.next_anon_id += 1;
457         Alloc::Anon(self.next_anon_id)
458     }
459 }
460 
461 #[cfg(test)]
462 mod tests {
463     use super::*;
464 
465     #[test]
example()466     fn example() {
467         let mut a = SystemAllocator::new(
468             SystemAllocatorConfig {
469                 io: Some(AddressRange {
470                     start: 0x1000,
471                     end: 0xffff,
472                 }),
473                 low_mmio: AddressRange {
474                     start: 0x3000_0000,
475                     end: 0x3000_ffff,
476                 },
477                 high_mmio: AddressRange {
478                     start: 0x1000_0000,
479                     end: 0x1fffffff,
480                 },
481                 platform_mmio: None,
482                 first_irq: 5,
483             },
484             None,
485             &[],
486         )
487         .unwrap();
488 
489         assert_eq!(a.allocate_irq(), Some(5));
490         assert_eq!(a.allocate_irq(), Some(6));
491         assert_eq!(a.allocate_gpe(), Some(0));
492         assert_eq!(a.allocate_gpe(), Some(1));
493         assert_eq!(
494             a.mmio_allocator(MmioType::High).allocate(
495                 0x100,
496                 Alloc::PciBar {
497                     bus: 0,
498                     dev: 0,
499                     func: 0,
500                     bar: 0
501                 },
502                 "bar0".to_string()
503             ),
504             Ok(0x10000000)
505         );
506         assert_eq!(
507             a.mmio_allocator(MmioType::High).get(&Alloc::PciBar {
508                 bus: 0,
509                 dev: 0,
510                 func: 0,
511                 bar: 0
512             }),
513             Some(&(
514                 AddressRange {
515                     start: 0x10000000,
516                     end: 0x100000ff
517                 },
518                 "bar0".to_string()
519             ))
520         );
521 
522         let id = a.get_anon_alloc();
523         assert_eq!(
524             a.mmio_allocator(MmioType::Low).allocate_at(
525                 AddressRange {
526                     start: 0x3000_5000,
527                     end: 0x30009fff
528                 },
529                 id,
530                 "Test".to_string()
531             ),
532             Ok(())
533         );
534         assert_eq!(
535             a.mmio_allocator(MmioType::Low).release(id),
536             Ok(AddressRange {
537                 start: 0x3000_5000,
538                 end: 0x30009fff
539             })
540         );
541         assert_eq!(
542             a.reserve_mmio(AddressRange {
543                 start: 0x3000_2000,
544                 end: 0x30005fff
545             }),
546             Ok(())
547         );
548         assert_eq!(
549             a.mmio_allocator(MmioType::Low)
550                 .allocate_at(
551                     AddressRange {
552                         start: 0x3000_5000,
553                         end: 0x3000_9fff
554                     },
555                     id,
556                     "Test".to_string()
557                 )
558                 .is_err(),
559             true
560         );
561     }
562 }
563