1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 use std::collections::BTreeMap;
5 use std::ops::RangeInclusive;
6
7 use base::pagesize;
8
9 use crate::address_allocator::{AddressAllocator, AddressAllocatorSet};
10 use crate::{Alloc, Error, Result};
11
12 /// Manages allocating system resources such as address space and interrupt numbers.
13
14 /// MMIO address Type
15 /// Low: address allocated from low_address_space
16 /// High: address allocated from high_address_space
17 #[derive(Copy, Clone)]
18 pub enum MmioType {
19 Low,
20 High,
21 }
22
23 /// Region of memory.
24 #[derive(Debug)]
25 pub struct MemRegion {
26 pub base: u64,
27 pub size: u64,
28 }
29
30 pub struct SystemAllocatorConfig {
31 /// IO ports. Only for x86_64.
32 pub io: Option<MemRegion>,
33 /// Low (<=4GB) MMIO region.
34 ///
35 /// Parts of this region may be reserved or otherwise excluded from the
36 /// created SystemAllocator's MmioType::Low allocator. However, no new
37 /// regions will be added.
38 pub low_mmio: MemRegion,
39 /// High (>4GB) MMIO region.
40 ///
41 /// Parts of this region may be reserved or otherwise excluded from the
42 /// created SystemAllocator's MmioType::High allocator. However, no new
43 /// regions will be added.
44 pub high_mmio: MemRegion,
45 /// Platform MMIO space. Only for ARM.
46 pub platform_mmio: Option<MemRegion>,
47 /// The first IRQ number to give out.
48 pub first_irq: u32,
49 }
50
51 #[derive(Debug)]
52 pub struct SystemAllocator {
53 io_address_space: Option<AddressAllocator>,
54
55 // Indexed by MmioType::Low and MmioType::High.
56 mmio_address_spaces: [AddressAllocator; 2],
57 mmio_platform_address_spaces: Option<AddressAllocator>,
58
59 reserved_region: Option<MemRegion>,
60
61 // Each bus number has a AddressAllocator
62 pci_allocator: BTreeMap<u8, AddressAllocator>,
63 irq_allocator: AddressAllocator,
64 next_anon_id: usize,
65 }
66
to_range_inclusive(base: u64, size: u64) -> Result<RangeInclusive<u64>>67 fn to_range_inclusive(base: u64, size: u64) -> Result<RangeInclusive<u64>> {
68 let end = base
69 .checked_add(size.checked_sub(1).ok_or(Error::PoolSizeZero)?)
70 .ok_or(Error::PoolOverflow { base, size })?;
71 Ok(RangeInclusive::new(base, end))
72 }
73
range_intersect(r1: &RangeInclusive<u64>, r2: &RangeInclusive<u64>) -> RangeInclusive<u64>74 fn range_intersect(r1: &RangeInclusive<u64>, r2: &RangeInclusive<u64>) -> RangeInclusive<u64> {
75 RangeInclusive::new(
76 u64::max(*r1.start(), *r2.start()),
77 u64::min(*r1.end(), *r2.end()),
78 )
79 }
80
81 impl SystemAllocator {
82 /// Creates a new `SystemAllocator` for managing addresses and irq numbers.
83 /// Will return an error if `base` + `size` overflows u64 (or allowed
84 /// maximum for the specific type), or if alignment isn't a power of two.
85 ///
86 /// If `reserve_region_size` is not None, then a region is reserved from
87 /// the start of `config.high_mmio` before the mmio allocator is created.
88 ///
89 /// If `mmio_address_ranges` is not empty, then `config.low_mmio` and
90 /// `config.high_mmio` are intersected with the ranges specified.
new( config: SystemAllocatorConfig, reserve_region_size: Option<u64>, mmio_address_ranges: &[RangeInclusive<u64>], ) -> Result<Self>91 pub fn new(
92 config: SystemAllocatorConfig,
93 reserve_region_size: Option<u64>,
94 mmio_address_ranges: &[RangeInclusive<u64>],
95 ) -> Result<Self> {
96 let page_size = pagesize() as u64;
97
98 let (high_mmio, reserved_region) = match reserve_region_size {
99 Some(len) => {
100 if len > config.high_mmio.size {
101 return Err(Error::PoolSizeZero);
102 }
103 (
104 MemRegion {
105 base: config.high_mmio.base + len,
106 size: config.high_mmio.size - len,
107 },
108 Some(MemRegion {
109 base: config.high_mmio.base,
110 size: len,
111 }),
112 )
113 }
114 None => (config.high_mmio, None),
115 };
116
117 let intersect_mmio_range = |src: MemRegion| -> Result<Vec<RangeInclusive<u64>>> {
118 let src_range = to_range_inclusive(src.base, src.size)?;
119 Ok(if mmio_address_ranges.is_empty() {
120 vec![src_range]
121 } else {
122 mmio_address_ranges
123 .iter()
124 .map(|r| range_intersect(r, &src_range))
125 .collect()
126 })
127 };
128
129 Ok(SystemAllocator {
130 io_address_space: if let Some(io) = config.io {
131 // TODO make sure we don't overlap with existing well known
132 // ports such as 0xcf8 (serial ports).
133 if io.base > 0x1_0000 || io.size + io.base > 0x1_0000 {
134 return Err(Error::IOPortOutOfRange(io.base, io.size));
135 }
136 Some(AddressAllocator::new(
137 to_range_inclusive(io.base, io.size)?,
138 Some(0x400),
139 None,
140 )?)
141 } else {
142 None
143 },
144 mmio_address_spaces: [
145 // MmioType::Low
146 AddressAllocator::new_from_list(
147 intersect_mmio_range(config.low_mmio)?,
148 Some(page_size),
149 None,
150 )?,
151 // MmioType::High
152 AddressAllocator::new_from_list(
153 intersect_mmio_range(high_mmio)?,
154 Some(page_size),
155 None,
156 )?,
157 ],
158
159 pci_allocator: BTreeMap::new(),
160
161 mmio_platform_address_spaces: if let Some(platform) = config.platform_mmio {
162 Some(AddressAllocator::new(
163 to_range_inclusive(platform.base, platform.size)?,
164 Some(page_size),
165 None,
166 )?)
167 } else {
168 None
169 },
170
171 reserved_region,
172
173 irq_allocator: AddressAllocator::new(
174 RangeInclusive::new(config.first_irq as u64, 1023),
175 Some(1),
176 None,
177 )?,
178 next_anon_id: 0,
179 })
180 }
181
182 /// Reserves the next available system irq number.
allocate_irq(&mut self) -> Option<u32>183 pub fn allocate_irq(&mut self) -> Option<u32> {
184 let id = self.get_anon_alloc();
185 self.irq_allocator
186 .allocate(1, id, "irq-auto".to_string())
187 .map(|v| v as u32)
188 .ok()
189 }
190
191 /// release irq to system irq number pool
release_irq(&mut self, irq: u32)192 pub fn release_irq(&mut self, irq: u32) {
193 let _ = self.irq_allocator.release_containing(irq.into());
194 }
195
196 /// Reserves the next available system irq number.
reserve_irq(&mut self, irq: u32) -> bool197 pub fn reserve_irq(&mut self, irq: u32) -> bool {
198 let id = self.get_anon_alloc();
199 self.irq_allocator
200 .allocate_at(irq as u64, 1, id, "irq-fixed".to_string())
201 .is_ok()
202 }
203
get_pci_allocator_mut(&mut self, bus: u8) -> Option<&mut AddressAllocator>204 fn get_pci_allocator_mut(&mut self, bus: u8) -> Option<&mut AddressAllocator> {
205 // pci root is 00:00.0, Bus 0 next device is 00:01.0 with mandatory function
206 // number zero.
207 if self.pci_allocator.get(&bus).is_none() {
208 let base = if bus == 0 { 8 } else { 0 };
209
210 // Each bus supports up to 32 (devices) x 8 (functions).
211 // Prefer allocating at device granularity (preferred_align = 8), but fall back to
212 // allocating individual functions (min_align = 1) when we run out of devices.
213 match AddressAllocator::new(RangeInclusive::new(base, (32 * 8) - 1), Some(1), Some(8)) {
214 Ok(v) => self.pci_allocator.insert(bus, v),
215 Err(_) => return None,
216 };
217 }
218 self.pci_allocator.get_mut(&bus)
219 }
220
221 // Check whether devices exist or not on the specified bus
pci_bus_empty(&self, bus: u8) -> bool222 pub fn pci_bus_empty(&self, bus: u8) -> bool {
223 if self.pci_allocator.get(&bus).is_none() {
224 true
225 } else {
226 false
227 }
228 }
229
230 /// Allocate PCI slot location.
allocate_pci(&mut self, bus: u8, tag: String) -> Option<Alloc>231 pub fn allocate_pci(&mut self, bus: u8, tag: String) -> Option<Alloc> {
232 let id = self.get_anon_alloc();
233 let allocator = match self.get_pci_allocator_mut(bus) {
234 Some(v) => v,
235 None => return None,
236 };
237 allocator
238 .allocate(1, id, tag)
239 .map(|v| Alloc::PciBar {
240 bus,
241 dev: (v >> 3) as u8,
242 func: (v & 7) as u8,
243 bar: 0,
244 })
245 .ok()
246 }
247
248 /// Reserve PCI slot location.
reserve_pci(&mut self, alloc: Alloc, tag: String) -> bool249 pub fn reserve_pci(&mut self, alloc: Alloc, tag: String) -> bool {
250 let id = self.get_anon_alloc();
251 match alloc {
252 Alloc::PciBar {
253 bus,
254 dev,
255 func,
256 bar: _,
257 } => {
258 let allocator = match self.get_pci_allocator_mut(bus) {
259 Some(v) => v,
260 None => return false,
261 };
262 let df = ((dev as u64) << 3) | (func as u64);
263 allocator.allocate_at(df, 1, id, tag).is_ok()
264 }
265 _ => false,
266 }
267 }
268
269 /// release PCI slot location.
release_pci(&mut self, bus: u8, dev: u8, func: u8) -> bool270 pub fn release_pci(&mut self, bus: u8, dev: u8, func: u8) -> bool {
271 let allocator = match self.get_pci_allocator_mut(bus) {
272 Some(v) => v,
273 None => return false,
274 };
275 let df = ((dev as u64) << 3) | (func as u64);
276 allocator.release_containing(df).is_ok()
277 }
278
279 /// Gets an allocator to be used for platform device MMIO allocation.
mmio_platform_allocator(&mut self) -> Option<&mut AddressAllocator>280 pub fn mmio_platform_allocator(&mut self) -> Option<&mut AddressAllocator> {
281 self.mmio_platform_address_spaces.as_mut()
282 }
283
284 /// Gets an allocator to be used for IO memory.
io_allocator(&mut self) -> Option<&mut AddressAllocator>285 pub fn io_allocator(&mut self) -> Option<&mut AddressAllocator> {
286 self.io_address_space.as_mut()
287 }
288
289 /// Gets an allocator to be used for MMIO allocation.
290 /// MmioType::Low: low mmio allocator
291 /// MmioType::High: high mmio allocator
mmio_allocator(&mut self, mmio_type: MmioType) -> &mut AddressAllocator292 pub fn mmio_allocator(&mut self, mmio_type: MmioType) -> &mut AddressAllocator {
293 &mut self.mmio_address_spaces[mmio_type as usize]
294 }
295
296 /// Gets a set of allocators to be used for MMIO allocation.
297 /// The set of allocators will try the low and high MMIO allocators, in that order.
mmio_allocator_any(&mut self) -> AddressAllocatorSet298 pub fn mmio_allocator_any(&mut self) -> AddressAllocatorSet {
299 AddressAllocatorSet::new(&mut self.mmio_address_spaces)
300 }
301
302 /// Gets the pools of all mmio allocators.
mmio_pools(&self) -> Vec<&RangeInclusive<u64>>303 pub fn mmio_pools(&self) -> Vec<&RangeInclusive<u64>> {
304 self.mmio_address_spaces
305 .iter()
306 .flat_map(|mmio_as| mmio_as.pools())
307 .collect()
308 }
309
310 /// Gets the reserved address space region.
reserved_region(&self) -> Option<&MemRegion>311 pub fn reserved_region(&self) -> Option<&MemRegion> {
312 self.reserved_region.as_ref()
313 }
314
315 /// Gets a unique anonymous allocation
get_anon_alloc(&mut self) -> Alloc316 pub fn get_anon_alloc(&mut self) -> Alloc {
317 self.next_anon_id += 1;
318 Alloc::Anon(self.next_anon_id)
319 }
320 }
321
322 #[cfg(test)]
323 mod tests {
324 use super::*;
325
326 #[test]
example()327 fn example() {
328 let mut a = SystemAllocator::new(
329 SystemAllocatorConfig {
330 io: Some(MemRegion {
331 base: 0x1000,
332 size: 0xf000,
333 }),
334 low_mmio: MemRegion {
335 base: 0x3000_0000,
336 size: 0x1_0000,
337 },
338 high_mmio: MemRegion {
339 base: 0x1000_0000,
340 size: 0x1000_0000,
341 },
342 platform_mmio: None,
343 first_irq: 5,
344 },
345 None,
346 &[],
347 )
348 .unwrap();
349
350 assert_eq!(a.allocate_irq(), Some(5));
351 assert_eq!(a.allocate_irq(), Some(6));
352 assert_eq!(
353 a.mmio_allocator(MmioType::High).allocate(
354 0x100,
355 Alloc::PciBar {
356 bus: 0,
357 dev: 0,
358 func: 0,
359 bar: 0
360 },
361 "bar0".to_string()
362 ),
363 Ok(0x10000000)
364 );
365 assert_eq!(
366 a.mmio_allocator(MmioType::High).get(&Alloc::PciBar {
367 bus: 0,
368 dev: 0,
369 func: 0,
370 bar: 0
371 }),
372 Some(&(0x10000000, 0x100, "bar0".to_string()))
373 );
374 }
375 }
376