• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::collections::BTreeMap;
6 
7 use base::pagesize;
8 
9 use crate::address_allocator::AddressAllocator;
10 use crate::address_allocator::AddressAllocatorSet;
11 use crate::AddressRange;
12 use crate::Alloc;
13 use crate::Error;
14 use crate::Result;
15 
16 /// Manages allocating system resources such as address space and interrupt numbers.
17 
18 /// MMIO address Type
19 ///    Low: address allocated from low_address_space
20 ///    High: address allocated from high_address_space
21 #[derive(Copy, Clone, PartialEq, Eq)]
22 pub enum MmioType {
23     Low,
24     High,
25 }
26 
27 /// Memory allocation options.
28 #[derive(Copy, Clone, Debug)]
29 pub struct AllocOptions {
30     prefetchable: bool,
31     max_address: u64,
32     alignment: Option<u64>,
33     top_down: bool,
34 }
35 
36 impl Default for AllocOptions {
default() -> Self37     fn default() -> Self {
38         AllocOptions::new()
39     }
40 }
41 
42 impl AllocOptions {
new() -> Self43     pub fn new() -> Self {
44         AllocOptions {
45             prefetchable: false,
46             max_address: u64::MAX,
47             alignment: None,
48             top_down: false,
49         }
50     }
51 
52     /// If `true`, memory may be allocated in a prefetchable/cacheable region.
53     /// If `false`, memory must be allocated within a non-prefetechable region, appropriate for
54     /// device registers.
55     /// Default: `false`
prefetchable(&mut self, prefetchable: bool) -> &mut Self56     pub fn prefetchable(&mut self, prefetchable: bool) -> &mut Self {
57         self.prefetchable = prefetchable;
58         self
59     }
60 
61     /// Largest valid address for the end of the allocated region.
62     /// For example, `u32::MAX` may be used to allocate a region that is addressable with a 32-bit
63     /// pointer.
64     /// Default: `u64::MAX`
max_address(&mut self, max_address: u64) -> &mut Self65     pub fn max_address(&mut self, max_address: u64) -> &mut Self {
66         self.max_address = max_address;
67         self
68     }
69 
70     /// Minimum alignment of the allocated address.
71     /// Default: `None` (allocation preference of the address allocator pool will be used)
align(&mut self, alignment: u64) -> &mut Self72     pub fn align(&mut self, alignment: u64) -> &mut Self {
73         self.alignment = Some(alignment);
74         self
75     }
76 
77     /// If `true`, prefer allocating from the upper end of the region rather than the low end.
78     /// Default: `false`
top_down(&mut self, top_down: bool) -> &mut Self79     pub fn top_down(&mut self, top_down: bool) -> &mut Self {
80         self.top_down = top_down;
81         self
82     }
83 }
84 
85 pub struct SystemAllocatorConfig {
86     /// IO ports. Only for x86_64.
87     pub io: Option<AddressRange>,
88     /// Low (<=4GB) MMIO region.
89     ///
90     /// Parts of this region may be reserved or otherwise excluded from the
91     /// created SystemAllocator's MmioType::Low allocator. However, no new
92     /// regions will be added.
93     pub low_mmio: AddressRange,
94     /// High (>4GB) MMIO region.
95     ///
96     /// Parts of this region may be reserved or otherwise excluded from the
97     /// created SystemAllocator's MmioType::High allocator. However, no new
98     /// regions will be added.
99     pub high_mmio: AddressRange,
100     /// Platform MMIO space. Only for ARM.
101     pub platform_mmio: Option<AddressRange>,
102     /// The first IRQ number to give out.
103     pub first_irq: u32,
104 }
105 
106 #[derive(Debug)]
107 pub struct SystemAllocator {
108     io_address_space: Option<AddressAllocator>,
109 
110     // Indexed by MmioType::Low and MmioType::High.
111     mmio_address_spaces: [AddressAllocator; 2],
112     mmio_platform_address_spaces: Option<AddressAllocator>,
113 
114     reserved_region: Option<AddressRange>,
115 
116     // Each bus number has a AddressAllocator
117     pci_allocator: BTreeMap<u8, AddressAllocator>,
118     irq_allocator: AddressAllocator,
119     gpe_allocator: AddressAllocator,
120     next_anon_id: usize,
121 }
122 
123 impl SystemAllocator {
124     /// Creates a new `SystemAllocator` for managing addresses and irq numbers.
125     /// Will return an error if `base` + `size` overflows u64 (or allowed
126     /// maximum for the specific type), or if alignment isn't a power of two.
127     ///
128     /// If `reserve_region_size` is not None, then a region is reserved from
129     /// the start of `config.high_mmio` before the mmio allocator is created.
130     ///
131     /// If `mmio_address_ranges` is not empty, then `config.low_mmio` and
132     /// `config.high_mmio` are intersected with the ranges specified.
new( config: SystemAllocatorConfig, reserve_region_size: Option<u64>, mmio_address_ranges: &[AddressRange], ) -> Result<Self>133     pub fn new(
134         config: SystemAllocatorConfig,
135         reserve_region_size: Option<u64>,
136         mmio_address_ranges: &[AddressRange],
137     ) -> Result<Self> {
138         let page_size = pagesize() as u64;
139 
140         let (high_mmio, reserved_region) = match reserve_region_size {
141             Some(reserved_len) => {
142                 let high_mmio_len = config.high_mmio.len().ok_or(Error::OutOfBounds)?;
143                 if reserved_len > high_mmio_len {
144                     return Err(Error::OutOfSpace);
145                 }
146                 let reserved_start = config.high_mmio.start;
147                 let reserved_end = reserved_start + reserved_len - 1;
148                 let high_mmio_start = reserved_end + 1;
149                 let high_mmio_end = config.high_mmio.end;
150                 (
151                     AddressRange {
152                         start: high_mmio_start,
153                         end: high_mmio_end,
154                     },
155                     Some(AddressRange {
156                         start: reserved_start,
157                         end: reserved_end,
158                     }),
159                 )
160             }
161             None => (config.high_mmio, None),
162         };
163 
164         let intersect_mmio_range = |src_range: AddressRange| -> Result<Vec<AddressRange>> {
165             Ok(if mmio_address_ranges.is_empty() {
166                 vec![src_range]
167             } else {
168                 mmio_address_ranges
169                     .iter()
170                     .map(|r| r.intersect(src_range))
171                     .collect()
172             })
173         };
174 
175         Ok(SystemAllocator {
176             io_address_space: if let Some(io) = config.io {
177                 // TODO make sure we don't overlap with existing well known
178                 // ports such as 0xcf8 (serial ports).
179                 if io.end > 0xffff {
180                     return Err(Error::IOPortOutOfRange(io));
181                 }
182                 Some(AddressAllocator::new(io, Some(0x400), None)?)
183             } else {
184                 None
185             },
186             mmio_address_spaces: [
187                 // MmioType::Low
188                 AddressAllocator::new_from_list(
189                     intersect_mmio_range(config.low_mmio)?,
190                     Some(page_size),
191                     None,
192                 )?,
193                 // MmioType::High
194                 AddressAllocator::new_from_list(
195                     intersect_mmio_range(high_mmio)?,
196                     Some(page_size),
197                     None,
198                 )?,
199             ],
200 
201             pci_allocator: BTreeMap::new(),
202 
203             mmio_platform_address_spaces: if let Some(platform) = config.platform_mmio {
204                 Some(AddressAllocator::new(platform, Some(page_size), None)?)
205             } else {
206                 None
207             },
208 
209             reserved_region,
210 
211             irq_allocator: AddressAllocator::new(
212                 AddressRange {
213                     start: config.first_irq as u64,
214                     end: 1023,
215                 },
216                 Some(1),
217                 None,
218             )?,
219 
220             // GPE range depends on ACPIPM_RESOURCE_GPE0_BLK_LEN, which is used to determine
221             // ACPIPM_GPE_MAX. The AddressRange should be in sync with ACPIPM_GPE_MAX. The
222             // hard-coded value is used since devices lib (where ACPIPM_* consts are defined)
223             // depends on resource lib. Therefore using ACPI_* const from device lib will not be
224             // possible because it will require introducing cyclic dependencies.
225             gpe_allocator: AddressAllocator::new(
226                 AddressRange { start: 0, end: 255 },
227                 Some(1),
228                 None,
229             )?,
230             next_anon_id: 0,
231         })
232     }
233 
234     /// Reserves the next available system irq number.
allocate_irq(&mut self) -> Option<u32>235     pub fn allocate_irq(&mut self) -> Option<u32> {
236         let id = self.get_anon_alloc();
237         self.irq_allocator
238             .allocate(1, id, "irq-auto".to_string())
239             .map(|v| v as u32)
240             .ok()
241     }
242 
243     /// release irq to system irq number pool
release_irq(&mut self, irq: u32)244     pub fn release_irq(&mut self, irq: u32) {
245         let _ = self.irq_allocator.release_containing(irq.into());
246     }
247 
248     /// Reserves the next available system irq number.
reserve_irq(&mut self, irq: u32) -> bool249     pub fn reserve_irq(&mut self, irq: u32) -> bool {
250         let id = self.get_anon_alloc();
251         self.irq_allocator
252             .allocate_at(
253                 AddressRange {
254                     start: irq.into(),
255                     end: irq.into(),
256                 },
257                 id,
258                 "irq-fixed".to_string(),
259             )
260             .is_ok()
261     }
262 
263     /// Reserve the next available system GPE number
allocate_gpe(&mut self) -> Option<u32>264     pub fn allocate_gpe(&mut self) -> Option<u32> {
265         let id = self.get_anon_alloc();
266         self.gpe_allocator
267             .allocate(1, id, "gpe-auto".to_string())
268             .map(|v| v as u32)
269             .ok()
270     }
271 
get_pci_allocator_mut(&mut self, bus: u8) -> Option<&mut AddressAllocator>272     fn get_pci_allocator_mut(&mut self, bus: u8) -> Option<&mut AddressAllocator> {
273         // pci root is 00:00.0, Bus 0 next device is 00:01.0 with mandatory function
274         // number zero.
275         if self.pci_allocator.get(&bus).is_none() {
276             let base = if bus == 0 { 8 } else { 0 };
277 
278             // Each bus supports up to 32 (devices) x 8 (functions).
279             // Prefer allocating at device granularity (preferred_align = 8), but fall back to
280             // allocating individual functions (min_align = 1) when we run out of devices.
281             match AddressAllocator::new(
282                 AddressRange {
283                     start: base,
284                     end: (32 * 8) - 1,
285                 },
286                 Some(1),
287                 Some(8),
288             ) {
289                 Ok(v) => self.pci_allocator.insert(bus, v),
290                 Err(_) => return None,
291             };
292         }
293         self.pci_allocator.get_mut(&bus)
294     }
295 
296     // Check whether devices exist or not on the specified bus
pci_bus_empty(&self, bus: u8) -> bool297     pub fn pci_bus_empty(&self, bus: u8) -> bool {
298         if self.pci_allocator.get(&bus).is_none() {
299             true
300         } else {
301             false
302         }
303     }
304 
305     /// Allocate PCI slot location.
allocate_pci(&mut self, bus: u8, tag: String) -> Option<Alloc>306     pub fn allocate_pci(&mut self, bus: u8, tag: String) -> Option<Alloc> {
307         let id = self.get_anon_alloc();
308         let allocator = match self.get_pci_allocator_mut(bus) {
309             Some(v) => v,
310             None => return None,
311         };
312         allocator
313             .allocate(1, id, tag)
314             .map(|v| Alloc::PciBar {
315                 bus,
316                 dev: (v >> 3) as u8,
317                 func: (v & 7) as u8,
318                 bar: 0,
319             })
320             .ok()
321     }
322 
323     /// Reserve PCI slot location.
reserve_pci(&mut self, alloc: Alloc, tag: String) -> bool324     pub fn reserve_pci(&mut self, alloc: Alloc, tag: String) -> bool {
325         let id = self.get_anon_alloc();
326         match alloc {
327             Alloc::PciBar {
328                 bus,
329                 dev,
330                 func,
331                 bar: _,
332             } => {
333                 let allocator = match self.get_pci_allocator_mut(bus) {
334                     Some(v) => v,
335                     None => return false,
336                 };
337                 let df = ((dev as u64) << 3) | (func as u64);
338                 allocator
339                     .allocate_at(AddressRange { start: df, end: df }, id, tag)
340                     .is_ok()
341             }
342             _ => false,
343         }
344     }
345 
346     /// release PCI slot location.
release_pci(&mut self, bus: u8, dev: u8, func: u8) -> bool347     pub fn release_pci(&mut self, bus: u8, dev: u8, func: u8) -> bool {
348         let allocator = match self.get_pci_allocator_mut(bus) {
349             Some(v) => v,
350             None => return false,
351         };
352         let df = ((dev as u64) << 3) | (func as u64);
353         allocator.release_containing(df).is_ok()
354     }
355 
356     /// Allocate a memory-mapped I/O region with properties requested in `opts`.
allocate_mmio( &mut self, size: u64, alloc: Alloc, tag: String, opts: &AllocOptions, ) -> Result<u64>357     pub fn allocate_mmio(
358         &mut self,
359         size: u64,
360         alloc: Alloc,
361         tag: String,
362         opts: &AllocOptions,
363     ) -> Result<u64> {
364         // For now, there is no way to ensure allocations fit in less than 32 bits.
365         // This can be removed once AddressAllocator accepts AllocOptions.
366         if opts.max_address < u32::MAX as u64 {
367             return Err(Error::OutOfSpace);
368         }
369 
370         let mut mmio_type = MmioType::High;
371         if opts.max_address < u64::MAX || !opts.prefetchable {
372             mmio_type = MmioType::Low;
373         }
374 
375         let res = self.allocate_mmio_internal(size, alloc, tag.clone(), opts, mmio_type);
376         // If a high allocation failed, retry in low. The reverse is not valid, since the address
377         // may be out of range and/or prefetchable memory may not be appropriate.
378         if mmio_type == MmioType::High && matches!(res, Err(Error::OutOfSpace)) {
379             self.allocate_mmio_internal(size, alloc, tag, opts, MmioType::Low)
380         } else {
381             res
382         }
383     }
384 
allocate_mmio_internal( &mut self, size: u64, alloc: Alloc, tag: String, opts: &AllocOptions, mmio_type: MmioType, ) -> Result<u64>385     fn allocate_mmio_internal(
386         &mut self,
387         size: u64,
388         alloc: Alloc,
389         tag: String,
390         opts: &AllocOptions,
391         mmio_type: MmioType,
392     ) -> Result<u64> {
393         let allocator = &mut self.mmio_address_spaces[mmio_type as usize];
394         match (opts.alignment, opts.top_down) {
395             (Some(align), true) => allocator.reverse_allocate_with_align(size, alloc, tag, align),
396             (Some(align), false) => allocator.allocate_with_align(size, alloc, tag, align),
397             (None, true) => allocator.reverse_allocate(size, alloc, tag),
398             (None, false) => allocator.allocate(size, alloc, tag),
399         }
400     }
401 
402     /// Reserve specified range from pci mmio, get the overlap of specified
403     /// range with mmio pools, exclude the overlap from mmio allocator.
404     ///
405     /// If any part of the specified range has been allocated, return Error.
reserve_mmio(&mut self, range: AddressRange) -> Result<()>406     pub fn reserve_mmio(&mut self, range: AddressRange) -> Result<()> {
407         let mut pools = Vec::new();
408         for pool in self.mmio_pools() {
409             pools.push(*pool);
410         }
411         pools.sort_by(|a, b| a.start.cmp(&b.start));
412         for pool in &pools {
413             if pool.start > range.end {
414                 break;
415             }
416 
417             let overlap = pool.intersect(range);
418             if !overlap.is_empty() {
419                 let id = self.get_anon_alloc();
420                 self.mmio_allocator_any().allocate_at(
421                     overlap,
422                     id,
423                     "pci mmio reserve".to_string(),
424                 )?;
425             }
426         }
427 
428         Ok(())
429     }
430 
431     /// Gets an allocator to be used for platform device MMIO allocation.
mmio_platform_allocator(&mut self) -> Option<&mut AddressAllocator>432     pub fn mmio_platform_allocator(&mut self) -> Option<&mut AddressAllocator> {
433         self.mmio_platform_address_spaces.as_mut()
434     }
435 
436     /// Gets an allocator to be used for IO memory.
io_allocator(&mut self) -> Option<&mut AddressAllocator>437     pub fn io_allocator(&mut self) -> Option<&mut AddressAllocator> {
438         self.io_address_space.as_mut()
439     }
440 
441     /// Gets an allocator to be used for MMIO allocation.
442     ///    MmioType::Low: low mmio allocator
443     ///    MmioType::High: high mmio allocator
mmio_allocator(&mut self, mmio_type: MmioType) -> &mut AddressAllocator444     pub fn mmio_allocator(&mut self, mmio_type: MmioType) -> &mut AddressAllocator {
445         &mut self.mmio_address_spaces[mmio_type as usize]
446     }
447 
448     /// Gets a set of allocators to be used for MMIO allocation.
449     /// The set of allocators will try the low and high MMIO allocators, in that order.
mmio_allocator_any(&mut self) -> AddressAllocatorSet450     pub fn mmio_allocator_any(&mut self) -> AddressAllocatorSet {
451         AddressAllocatorSet::new(&mut self.mmio_address_spaces)
452     }
453 
454     /// Gets the pools of all mmio allocators.
mmio_pools(&self) -> Vec<&AddressRange>455     pub fn mmio_pools(&self) -> Vec<&AddressRange> {
456         self.mmio_address_spaces
457             .iter()
458             .flat_map(|mmio_as| mmio_as.pools())
459             .collect()
460     }
461 
462     /// Gets the reserved address space region.
reserved_region(&self) -> Option<AddressRange>463     pub fn reserved_region(&self) -> Option<AddressRange> {
464         self.reserved_region
465     }
466 
467     /// Gets a unique anonymous allocation
get_anon_alloc(&mut self) -> Alloc468     pub fn get_anon_alloc(&mut self) -> Alloc {
469         self.next_anon_id += 1;
470         Alloc::Anon(self.next_anon_id)
471     }
472 }
473 
474 #[cfg(test)]
475 mod tests {
476     use super::*;
477 
478     #[test]
example()479     fn example() {
480         let mut a = SystemAllocator::new(
481             SystemAllocatorConfig {
482                 io: Some(AddressRange {
483                     start: 0x1000,
484                     end: 0xffff,
485                 }),
486                 low_mmio: AddressRange {
487                     start: 0x3000_0000,
488                     end: 0x3000_ffff,
489                 },
490                 high_mmio: AddressRange {
491                     start: 0x1000_0000,
492                     end: 0x1fffffff,
493                 },
494                 platform_mmio: None,
495                 first_irq: 5,
496             },
497             None,
498             &[],
499         )
500         .unwrap();
501 
502         assert_eq!(a.allocate_irq(), Some(5));
503         assert_eq!(a.allocate_irq(), Some(6));
504         assert_eq!(a.allocate_gpe(), Some(0));
505         assert_eq!(a.allocate_gpe(), Some(1));
506         assert_eq!(
507             a.mmio_allocator(MmioType::High).allocate(
508                 0x100,
509                 Alloc::PciBar {
510                     bus: 0,
511                     dev: 0,
512                     func: 0,
513                     bar: 0
514                 },
515                 "bar0".to_string()
516             ),
517             Ok(0x10000000)
518         );
519         assert_eq!(
520             a.mmio_allocator(MmioType::High).get(&Alloc::PciBar {
521                 bus: 0,
522                 dev: 0,
523                 func: 0,
524                 bar: 0
525             }),
526             Some(&(
527                 AddressRange {
528                     start: 0x10000000,
529                     end: 0x100000ff
530                 },
531                 "bar0".to_string()
532             ))
533         );
534 
535         let id = a.get_anon_alloc();
536         assert_eq!(
537             a.mmio_allocator(MmioType::Low).allocate_at(
538                 AddressRange {
539                     start: 0x3000_5000,
540                     end: 0x30009fff
541                 },
542                 id,
543                 "Test".to_string()
544             ),
545             Ok(())
546         );
547         assert_eq!(
548             a.mmio_allocator(MmioType::Low).release(id),
549             Ok(AddressRange {
550                 start: 0x3000_5000,
551                 end: 0x30009fff
552             })
553         );
554         assert_eq!(
555             a.reserve_mmio(AddressRange {
556                 start: 0x3000_2000,
557                 end: 0x30005fff
558             }),
559             Ok(())
560         );
561         assert_eq!(
562             a.mmio_allocator(MmioType::Low)
563                 .allocate_at(
564                     AddressRange {
565                         start: 0x3000_5000,
566                         end: 0x3000_9fff
567                     },
568                     id,
569                     "Test".to_string()
570                 )
571                 .is_err(),
572             true
573         );
574     }
575 }
576