• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Validate device assignment written in crosvm DT with VM DTBO, and apply it
16 //! to platform DT.
17 //! Declared in separated libs for adding unit tests, which requires libstd.
18 
19 #[cfg(test)]
20 extern crate alloc;
21 
22 use alloc::collections::{BTreeMap, BTreeSet};
23 use alloc::ffi::CString;
24 use alloc::fmt;
25 use alloc::vec;
26 use alloc::vec::Vec;
27 use core::ffi::CStr;
28 use core::iter::Iterator;
29 use core::mem;
30 use core::ops::Range;
31 use libfdt::{Fdt, FdtError, FdtNode, FdtNodeMut, Phandle, Reg};
32 use log::error;
33 // TODO(b/308694211): Use vmbase::hyp::{DeviceAssigningHypervisor, Error} proper for tests.
34 #[cfg(not(test))]
35 use vmbase::hyp::DeviceAssigningHypervisor;
36 use zerocopy::byteorder::big_endian::U32;
37 use zerocopy::FromBytes as _;
38 
39 // TODO(b/308694211): Use cstr! from vmbase instead.
40 macro_rules! cstr {
41     ($str:literal) => {{
42         const S: &str = concat!($str, "\0");
43         const C: &::core::ffi::CStr = match ::core::ffi::CStr::from_bytes_with_nul(S.as_bytes()) {
44             Ok(v) => v,
45             Err(_) => panic!("string contains interior NUL"),
46         };
47         C
48     }};
49 }
50 
51 // TODO(b/277993056): Keep constants derived from platform.dts in one place.
52 const CELLS_PER_INTERRUPT: usize = 3; // from /intc node in platform.dts
53 
54 /// Errors in device assignment.
55 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
56 pub enum DeviceAssignmentError {
57     /// Invalid VM DTBO
58     InvalidDtbo,
59     /// Invalid __symbols__
60     InvalidSymbols,
61     /// Malformed <reg>. Can't parse.
62     MalformedReg,
63     /// Invalid physical <reg> of assigned device.
64     InvalidPhysReg(u64, u64),
65     /// Invalid virtual <reg> of assigned device.
66     InvalidReg(u64, u64),
67     /// Invalid <interrupts>
68     InvalidInterrupts,
69     /// Malformed <iommus>
70     MalformedIommus,
71     /// Invalid <iommus>
72     InvalidIommus,
73     /// Invalid phys IOMMU node
74     InvalidPhysIommu,
75     /// Invalid pvIOMMU node
76     InvalidPvIommu,
77     /// Too many pvIOMMU
78     TooManyPvIommu,
79     /// Duplicated phys IOMMU IDs exist
80     DuplicatedIommuIds,
81     /// Duplicated pvIOMMU IDs exist
82     DuplicatedPvIommuIds,
83     /// Unsupported path format. Only supports full path.
84     UnsupportedPathFormat,
85     /// Unsupported overlay target syntax. Only supports <target-path> with full path.
86     UnsupportedOverlayTarget,
87     /// Unsupported PhysIommu,
88     UnsupportedPhysIommu,
89     /// Unsupported (pvIOMMU id, vSID) duplication. Currently the pair should be unique.
90     UnsupportedPvIommusDuplication,
91     /// Unsupported (IOMMU token, SID) duplication. Currently the pair should be unique.
92     UnsupportedIommusDuplication,
93     /// Internal error
94     Internal,
95     /// Unexpected error from libfdt
96     UnexpectedFdtError(FdtError),
97 }
98 
99 impl From<FdtError> for DeviceAssignmentError {
from(e: FdtError) -> Self100     fn from(e: FdtError) -> Self {
101         DeviceAssignmentError::UnexpectedFdtError(e)
102     }
103 }
104 
105 impl fmt::Display for DeviceAssignmentError {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result106     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
107         match self {
108             Self::InvalidDtbo => write!(f, "Invalid DTBO"),
109             Self::InvalidSymbols => write!(
110                 f,
111                 "Invalid property in /__symbols__. Must point to valid assignable device node."
112             ),
113             Self::MalformedReg => write!(f, "Malformed <reg>. Can't parse"),
114             Self::InvalidReg(addr, size) => {
115                 write!(f, "Invalid guest MMIO region (addr: {addr:#x}, size: {size:#x})")
116             }
117             Self::InvalidPhysReg(addr, size) => {
118                 write!(f, "Invalid physical MMIO region (addr: {addr:#x}, size: {size:#x})")
119             }
120             Self::InvalidInterrupts => write!(f, "Invalid <interrupts>"),
121             Self::MalformedIommus => write!(f, "Malformed <iommus>. Can't parse."),
122             Self::InvalidIommus => {
123                 write!(f, "Invalid <iommus>. Failed to validate with hypervisor")
124             }
125             Self::InvalidPhysIommu => write!(f, "Invalid phys IOMMU node"),
126             Self::InvalidPvIommu => write!(f, "Invalid pvIOMMU node"),
127             Self::TooManyPvIommu => write!(
128                 f,
129                 "Too many pvIOMMU node. Insufficient pre-populated pvIOMMUs in platform DT"
130             ),
131             Self::DuplicatedIommuIds => {
132                 write!(f, "Duplicated IOMMU IDs exist. IDs must unique among iommu node")
133             }
134             Self::DuplicatedPvIommuIds => {
135                 write!(f, "Duplicated pvIOMMU IDs exist. IDs must unique among iommu node")
136             }
137             Self::UnsupportedPathFormat => {
138                 write!(f, "Unsupported UnsupportedPathFormat. Only supports full path")
139             }
140             Self::UnsupportedOverlayTarget => {
141                 write!(f, "Unsupported overlay target. Only supports 'target-path = \"/\"'")
142             }
143             Self::UnsupportedPhysIommu => {
144                 write!(f, "Unsupported Phys IOMMU. Currently only supports #iommu-cells = <1>")
145             }
146             Self::UnsupportedPvIommusDuplication => {
147                 write!(f, "Unsupported (pvIOMMU id, vSID) duplication. Currently the pair should be unique.")
148             }
149             Self::UnsupportedIommusDuplication => {
150                 write!(f, "Unsupported (IOMMU token, SID) duplication. Currently the pair should be unique.")
151             }
152             Self::Internal => write!(f, "Internal error"),
153             Self::UnexpectedFdtError(e) => write!(f, "Unexpected Error from libfdt: {e}"),
154         }
155     }
156 }
157 
158 pub type Result<T> = core::result::Result<T, DeviceAssignmentError>;
159 
160 #[derive(Clone, Default, Ord, PartialOrd, Eq, PartialEq)]
161 pub struct DtPathTokens<'a> {
162     tokens: Vec<&'a [u8]>,
163 }
164 
165 impl<'a> fmt::Debug for DtPathTokens<'a> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result166     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
167         let mut list = f.debug_list();
168         for token in &self.tokens {
169             let mut bytes = token.to_vec();
170             bytes.push(b'\0');
171             match CString::from_vec_with_nul(bytes) {
172                 Ok(string) => list.entry(&string),
173                 Err(_) => list.entry(token),
174             };
175         }
176         list.finish()
177     }
178 }
179 
180 impl<'a> DtPathTokens<'a> {
new(path: &'a CStr) -> Result<Self>181     fn new(path: &'a CStr) -> Result<Self> {
182         if path.to_bytes().first() != Some(&b'/') {
183             return Err(DeviceAssignmentError::UnsupportedPathFormat);
184         }
185         let tokens: Vec<_> = path
186             .to_bytes()
187             .split(|char| *char == b'/')
188             .filter(|&component| !component.is_empty())
189             .collect();
190         Ok(Self { tokens })
191     }
192 
to_overlay_target_path(&self) -> Result<Self>193     fn to_overlay_target_path(&self) -> Result<Self> {
194         if !self.is_overlayable_node() {
195             return Err(DeviceAssignmentError::InvalidDtbo);
196         }
197         Ok(Self { tokens: self.tokens.as_slice()[2..].to_vec() })
198     }
199 
to_cstring(&self) -> CString200     fn to_cstring(&self) -> CString {
201         if self.tokens.is_empty() {
202             return CString::new(*b"/\0").unwrap();
203         }
204 
205         let size = self.tokens.iter().fold(0, |sum, token| sum + token.len() + 1);
206         let mut path = Vec::with_capacity(size + 1);
207         for token in &self.tokens {
208             path.push(b'/');
209             path.extend_from_slice(token);
210         }
211         path.push(b'\0');
212 
213         CString::from_vec_with_nul(path).unwrap()
214     }
215 
is_overlayable_node(&self) -> bool216     fn is_overlayable_node(&self) -> bool {
217         self.tokens.get(1) == Some(&&b"__overlay__"[..])
218     }
219 }
220 
221 #[derive(Debug, Eq, PartialEq)]
222 enum DeviceTreeChildrenMask {
223     Partial(Vec<DeviceTreeMask>),
224     All,
225 }
226 
227 #[derive(Eq, PartialEq)]
228 struct DeviceTreeMask {
229     name_bytes: Vec<u8>,
230     children: DeviceTreeChildrenMask,
231 }
232 
233 impl fmt::Debug for DeviceTreeMask {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result234     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
235         let name_bytes = [self.name_bytes.as_slice(), b"\0"].concat();
236 
237         f.debug_struct("DeviceTreeMask")
238             .field("name", &CStr::from_bytes_with_nul(&name_bytes).unwrap())
239             .field("children", &self.children)
240             .finish()
241     }
242 }
243 
244 impl DeviceTreeMask {
new() -> Self245     fn new() -> Self {
246         Self { name_bytes: b"/".to_vec(), children: DeviceTreeChildrenMask::Partial(Vec::new()) }
247     }
248 
mask_internal(&mut self, path: &DtPathTokens, leaf_mask: DeviceTreeChildrenMask) -> bool249     fn mask_internal(&mut self, path: &DtPathTokens, leaf_mask: DeviceTreeChildrenMask) -> bool {
250         let mut iter = self;
251         let mut newly_masked = false;
252         'next_token: for path_token in &path.tokens {
253             let DeviceTreeChildrenMask::Partial(ref mut children) = &mut iter.children else {
254                 return false;
255             };
256 
257             // Note: Can't use iterator for 'get or insert'. (a.k.a. polonius Rust)
258             #[allow(clippy::needless_range_loop)]
259             for i in 0..children.len() {
260                 if children[i].name_bytes.as_slice() == *path_token {
261                     iter = &mut children[i];
262                     newly_masked = false;
263                     continue 'next_token;
264                 }
265             }
266             let child = Self {
267                 name_bytes: path_token.to_vec(),
268                 children: DeviceTreeChildrenMask::Partial(Vec::new()),
269             };
270             children.push(child);
271             newly_masked = true;
272             iter = children.last_mut().unwrap()
273         }
274         iter.children = leaf_mask;
275         newly_masked
276     }
277 
mask(&mut self, path: &DtPathTokens) -> bool278     fn mask(&mut self, path: &DtPathTokens) -> bool {
279         self.mask_internal(path, DeviceTreeChildrenMask::Partial(Vec::new()))
280     }
281 
mask_all(&mut self, path: &DtPathTokens)282     fn mask_all(&mut self, path: &DtPathTokens) {
283         self.mask_internal(path, DeviceTreeChildrenMask::All);
284     }
285 }
286 
287 /// Represents VM DTBO
288 #[repr(transparent)]
289 pub struct VmDtbo(Fdt);
290 
291 impl VmDtbo {
292     /// Wraps a mutable slice containing a VM DTBO.
293     ///
294     /// Fails if the VM DTBO does not pass validation.
from_mut_slice(dtbo: &mut [u8]) -> Result<&mut Self>295     pub fn from_mut_slice(dtbo: &mut [u8]) -> Result<&mut Self> {
296         // This validates DTBO
297         let fdt = Fdt::from_mut_slice(dtbo)?;
298         // SAFETY: VmDtbo is a transparent wrapper around Fdt, so representation is the same.
299         Ok(unsafe { mem::transmute::<&mut Fdt, &mut Self>(fdt) })
300     }
301 
302     // Locates device node path as if the given dtbo node path is assigned and VM DTBO is overlaid.
303     // For given dtbo node path, this concatenates <target-path> of the enclosing fragment and
304     // relative path from __overlay__ node.
305     //
306     // Here's an example with sample VM DTBO:
307     //    / {
308     //       fragment@rng {
309     //         target-path = "/";  // Always 'target-path = "/"'. Disallows <target> or other path.
310     //         __overlay__ {
311     //           rng { ... };      // Actual device node is here. If overlaid, path would be "/rng"
312     //         };
313     //       };
314     //       __symbols__ {         // Contains list of assignable devices
315     //         rng = "/fragment@rng/__overlay__/rng";
316     //       };
317     //    };
318     //
319     // Then locate_overlay_target_path(cstr!("/fragment@rng/__overlay__/rng")) is Ok("/rng")
320     //
321     // Contrary to fdt_overlay_target_offset(), this API enforces overlay target property
322     // 'target-path = "/"', so the overlay doesn't modify and/or append platform DT's existing
323     // node and/or properties. The enforcement is for compatibility reason.
locate_overlay_target_path( &self, dtbo_node_path: &DtPathTokens, dtbo_node: &FdtNode, ) -> Result<CString>324     fn locate_overlay_target_path(
325         &self,
326         dtbo_node_path: &DtPathTokens,
327         dtbo_node: &FdtNode,
328     ) -> Result<CString> {
329         let fragment_node = dtbo_node.supernode_at_depth(1)?;
330         let target_path = fragment_node
331             .getprop_str(cstr!("target-path"))?
332             .ok_or(DeviceAssignmentError::InvalidDtbo)?;
333         if target_path != cstr!("/") {
334             return Err(DeviceAssignmentError::UnsupportedOverlayTarget);
335         }
336 
337         let overlaid_path = dtbo_node_path.to_overlay_target_path()?;
338         Ok(overlaid_path.to_cstring())
339     }
340 
parse_physical_iommus(physical_node: &FdtNode) -> Result<BTreeMap<Phandle, PhysIommu>>341     fn parse_physical_iommus(physical_node: &FdtNode) -> Result<BTreeMap<Phandle, PhysIommu>> {
342         let mut phys_iommus = BTreeMap::new();
343         for (node, _) in physical_node.descendants() {
344             let Some(phandle) = node.get_phandle()? else {
345                 continue; // Skips unreachable IOMMU node
346             };
347             let Some(iommu) = PhysIommu::parse(&node)? else {
348                 continue; // Skip if not a PhysIommu.
349             };
350             if phys_iommus.insert(phandle, iommu).is_some() {
351                 return Err(FdtError::BadPhandle.into());
352             }
353         }
354         Self::validate_physical_iommus(&phys_iommus)?;
355         Ok(phys_iommus)
356     }
357 
validate_physical_iommus(phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<()>358     fn validate_physical_iommus(phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<()> {
359         let unique_iommus: BTreeSet<_> = phys_iommus.values().cloned().collect();
360         if phys_iommus.len() != unique_iommus.len() {
361             return Err(DeviceAssignmentError::DuplicatedIommuIds);
362         }
363         Ok(())
364     }
365 
validate_physical_devices( physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>, ) -> Result<()>366     fn validate_physical_devices(
367         physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>,
368     ) -> Result<()> {
369         // Only need to validate iommus because <reg> will be validated together with PV <reg>
370         // see: DeviceAssignmentInfo::validate_all_regs().
371         let mut all_iommus = BTreeSet::new();
372         for physical_device in physical_devices.values() {
373             for iommu in &physical_device.iommus {
374                 if !all_iommus.insert(iommu) {
375                     error!("Unsupported phys IOMMU duplication found, <iommus> = {iommu:?}");
376                     return Err(DeviceAssignmentError::UnsupportedIommusDuplication);
377                 }
378             }
379         }
380         Ok(())
381     }
382 
parse_physical_devices_with_iommus( physical_node: &FdtNode, phys_iommus: &BTreeMap<Phandle, PhysIommu>, ) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>>383     fn parse_physical_devices_with_iommus(
384         physical_node: &FdtNode,
385         phys_iommus: &BTreeMap<Phandle, PhysIommu>,
386     ) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>> {
387         let mut physical_devices = BTreeMap::new();
388         for (node, _) in physical_node.descendants() {
389             let Some(info) = PhysicalDeviceInfo::parse(&node, phys_iommus)? else {
390                 continue;
391             };
392             if physical_devices.insert(info.target, info).is_some() {
393                 return Err(DeviceAssignmentError::InvalidDtbo);
394             }
395         }
396         Self::validate_physical_devices(&physical_devices)?;
397         Ok(physical_devices)
398     }
399 
400     /// Parses Physical devices in VM DTBO
parse_physical_devices(&self) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>>401     fn parse_physical_devices(&self) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>> {
402         let Some(physical_node) = self.as_ref().node(cstr!("/host"))? else {
403             return Ok(BTreeMap::new());
404         };
405 
406         let phys_iommus = Self::parse_physical_iommus(&physical_node)?;
407         Self::parse_physical_devices_with_iommus(&physical_node, &phys_iommus)
408     }
409 
node(&self, path: &DtPathTokens) -> Result<Option<FdtNode>>410     fn node(&self, path: &DtPathTokens) -> Result<Option<FdtNode>> {
411         let mut node = self.as_ref().root();
412         for token in &path.tokens {
413             let Some(subnode) = node.subnode_with_name_bytes(token)? else {
414                 return Ok(None);
415             };
416             node = subnode;
417         }
418         Ok(Some(node))
419     }
420 
collect_overlayable_nodes_with_phandle(&self) -> Result<BTreeMap<Phandle, DtPathTokens>>421     fn collect_overlayable_nodes_with_phandle(&self) -> Result<BTreeMap<Phandle, DtPathTokens>> {
422         let mut paths = BTreeMap::new();
423         let mut path: DtPathTokens = Default::default();
424         let root = self.as_ref().root();
425         for (node, depth) in root.descendants() {
426             path.tokens.truncate(depth - 1);
427             path.tokens.push(node.name()?.to_bytes());
428             if !path.is_overlayable_node() {
429                 continue;
430             }
431             if let Some(phandle) = node.get_phandle()? {
432                 paths.insert(phandle, path.clone());
433             }
434         }
435         Ok(paths)
436     }
437 
collect_phandle_references_from_overlayable_nodes( &self, ) -> Result<BTreeMap<DtPathTokens, Vec<Phandle>>>438     fn collect_phandle_references_from_overlayable_nodes(
439         &self,
440     ) -> Result<BTreeMap<DtPathTokens, Vec<Phandle>>> {
441         const CELL_SIZE: usize = core::mem::size_of::<u32>();
442 
443         let vm_dtbo = self.as_ref();
444 
445         let mut phandle_map = BTreeMap::new();
446         let Some(local_fixups) = vm_dtbo.node(cstr!("/__local_fixups__"))? else {
447             return Ok(phandle_map);
448         };
449 
450         let mut path: DtPathTokens = Default::default();
451         for (fixup_node, depth) in local_fixups.descendants() {
452             let node_name = fixup_node.name()?;
453             path.tokens.truncate(depth - 1);
454             path.tokens.push(node_name.to_bytes());
455             if path.tokens.len() != depth {
456                 return Err(DeviceAssignmentError::Internal);
457             }
458             if !path.is_overlayable_node() {
459                 continue;
460             }
461             let target_node = self.node(&path)?.ok_or(DeviceAssignmentError::InvalidDtbo)?;
462 
463             let mut phandles = vec![];
464             for fixup_prop in fixup_node.properties()? {
465                 let target_prop = target_node
466                     .getprop(fixup_prop.name()?)
467                     .or(Err(DeviceAssignmentError::InvalidDtbo))?
468                     .ok_or(DeviceAssignmentError::InvalidDtbo)?;
469                 let fixup_prop_values = fixup_prop.value()?;
470                 if fixup_prop_values.is_empty() || fixup_prop_values.len() % CELL_SIZE != 0 {
471                     return Err(DeviceAssignmentError::InvalidDtbo);
472                 }
473 
474                 for fixup_prop_cell in fixup_prop_values.chunks(CELL_SIZE) {
475                     let phandle_offset: usize = u32::from_be_bytes(
476                         fixup_prop_cell.try_into().or(Err(DeviceAssignmentError::InvalidDtbo))?,
477                     )
478                     .try_into()
479                     .or(Err(DeviceAssignmentError::InvalidDtbo))?;
480                     if phandle_offset % CELL_SIZE != 0 {
481                         return Err(DeviceAssignmentError::InvalidDtbo);
482                     }
483                     let phandle_value = target_prop
484                         .get(phandle_offset..phandle_offset + CELL_SIZE)
485                         .ok_or(DeviceAssignmentError::InvalidDtbo)?;
486                     let phandle: Phandle = U32::ref_from(phandle_value)
487                         .unwrap()
488                         .get()
489                         .try_into()
490                         .or(Err(DeviceAssignmentError::InvalidDtbo))?;
491 
492                     phandles.push(phandle);
493                 }
494             }
495             if !phandles.is_empty() {
496                 phandle_map.insert(path.clone(), phandles);
497             }
498         }
499 
500         Ok(phandle_map)
501     }
502 
build_mask(&self, assigned_devices: Vec<DtPathTokens>) -> Result<DeviceTreeMask>503     fn build_mask(&self, assigned_devices: Vec<DtPathTokens>) -> Result<DeviceTreeMask> {
504         if assigned_devices.is_empty() {
505             return Err(DeviceAssignmentError::Internal);
506         }
507 
508         let dependencies = self.collect_phandle_references_from_overlayable_nodes()?;
509         let paths = self.collect_overlayable_nodes_with_phandle()?;
510 
511         let mut mask = DeviceTreeMask::new();
512         let mut stack = assigned_devices;
513         while let Some(path) = stack.pop() {
514             if !mask.mask(&path) {
515                 continue;
516             }
517             let Some(dst_phandles) = dependencies.get(&path) else {
518                 continue;
519             };
520             for dst_phandle in dst_phandles {
521                 let dst_path = paths.get(dst_phandle).ok_or(DeviceAssignmentError::Internal)?;
522                 stack.push(dst_path.clone());
523             }
524         }
525 
526         Ok(mask)
527     }
528 }
529 
filter_dangling_symbols(fdt: &mut Fdt) -> Result<()>530 fn filter_dangling_symbols(fdt: &mut Fdt) -> Result<()> {
531     if let Some(symbols) = fdt.symbols()? {
532         let mut removed = vec![];
533         for prop in symbols.properties()? {
534             let path = CStr::from_bytes_with_nul(prop.value()?)
535                 .map_err(|_| DeviceAssignmentError::Internal)?;
536             if fdt.node(path)?.is_none() {
537                 let name = prop.name()?;
538                 removed.push(CString::from(name));
539             }
540         }
541 
542         let mut symbols = fdt.symbols_mut()?.unwrap();
543         for name in removed {
544             symbols.nop_property(&name)?;
545         }
546     }
547     Ok(())
548 }
549 
550 impl AsRef<Fdt> for VmDtbo {
as_ref(&self) -> &Fdt551     fn as_ref(&self) -> &Fdt {
552         &self.0
553     }
554 }
555 
556 impl AsMut<Fdt> for VmDtbo {
as_mut(&mut self) -> &mut Fdt557     fn as_mut(&mut self) -> &mut Fdt {
558         &mut self.0
559     }
560 }
561 
562 // Filter any node that isn't masked by DeviceTreeMask.
filter_with_mask(anchor: FdtNodeMut, mask: &DeviceTreeMask) -> Result<()>563 fn filter_with_mask(anchor: FdtNodeMut, mask: &DeviceTreeMask) -> Result<()> {
564     let mut stack = vec![mask];
565     let mut iter = anchor.next_node(0)?;
566     while let Some((node, depth)) = iter {
567         stack.truncate(depth);
568         let parent_mask = stack.last().unwrap();
569         let DeviceTreeChildrenMask::Partial(parent_mask_children) = &parent_mask.children else {
570             // Shouldn't happen. We only step-in if parent has DeviceTreeChildrenMask::Partial.
571             return Err(DeviceAssignmentError::Internal);
572         };
573 
574         let name = node.as_node().name()?.to_bytes();
575         let mask = parent_mask_children.iter().find(|child_mask| child_mask.name_bytes == name);
576         if let Some(masked) = mask {
577             if let DeviceTreeChildrenMask::Partial(_) = &masked.children {
578                 // This node is partially masked. Stepping-in.
579                 stack.push(masked);
580                 iter = node.next_node(depth)?;
581             } else {
582                 // This node is fully masked. Stepping-out.
583                 iter = node.next_node_skip_subnodes(depth)?;
584             }
585         } else {
586             // This node isn't masked.
587             iter = node.delete_and_next_node(depth)?;
588         }
589     }
590 
591     Ok(())
592 }
593 
594 #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
595 struct PvIommu {
596     // ID from pvIOMMU node
597     id: u32,
598 }
599 
600 impl PvIommu {
parse(node: &FdtNode) -> Result<Self>601     fn parse(node: &FdtNode) -> Result<Self> {
602         let iommu_cells = node
603             .getprop_u32(cstr!("#iommu-cells"))?
604             .ok_or(DeviceAssignmentError::InvalidPvIommu)?;
605         // Ensures #iommu-cells = <1>. It means that `<iommus>` entry contains pair of
606         // (pvIOMMU ID, vSID)
607         if iommu_cells != 1 {
608             return Err(DeviceAssignmentError::InvalidPvIommu);
609         }
610         let id = node.getprop_u32(cstr!("id"))?.ok_or(DeviceAssignmentError::InvalidPvIommu)?;
611         Ok(Self { id })
612     }
613 }
614 
615 #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
616 struct Vsid(u32);
617 
618 #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
619 struct Sid(u64);
620 
621 impl From<u32> for Sid {
from(sid: u32) -> Self622     fn from(sid: u32) -> Self {
623         Self(sid.into())
624     }
625 }
626 
627 #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
628 struct DeviceReg {
629     addr: u64,
630     size: u64,
631 }
632 
633 impl DeviceReg {
overlaps(&self, range: &Range<u64>) -> bool634     pub fn overlaps(&self, range: &Range<u64>) -> bool {
635         self.addr < range.end && range.start < self.addr.checked_add(self.size).unwrap()
636     }
637 }
638 
639 impl TryFrom<Reg<u64>> for DeviceReg {
640     type Error = DeviceAssignmentError;
641 
try_from(reg: Reg<u64>) -> Result<Self>642     fn try_from(reg: Reg<u64>) -> Result<Self> {
643         Ok(Self { addr: reg.addr, size: reg.size.ok_or(DeviceAssignmentError::MalformedReg)? })
644     }
645 }
646 
parse_node_reg(node: &FdtNode) -> Result<Vec<DeviceReg>>647 fn parse_node_reg(node: &FdtNode) -> Result<Vec<DeviceReg>> {
648     node.reg()?
649         .ok_or(DeviceAssignmentError::MalformedReg)?
650         .map(DeviceReg::try_from)
651         .collect::<Result<Vec<_>>>()
652 }
653 
to_be_bytes(reg: &[DeviceReg]) -> Vec<u8>654 fn to_be_bytes(reg: &[DeviceReg]) -> Vec<u8> {
655     let mut reg_cells = vec![];
656     for x in reg {
657         reg_cells.extend_from_slice(&x.addr.to_be_bytes());
658         reg_cells.extend_from_slice(&x.size.to_be_bytes());
659     }
660     reg_cells
661 }
662 
663 #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
664 struct PhysIommu {
665     token: u64,
666 }
667 
668 impl PhysIommu {
parse(node: &FdtNode) -> Result<Option<Self>>669     fn parse(node: &FdtNode) -> Result<Option<Self>> {
670         let Some(token) = node.getprop_u64(cstr!("android,pvmfw,token"))? else {
671             return Ok(None);
672         };
673         let Some(iommu_cells) = node.getprop_u32(cstr!("#iommu-cells"))? else {
674             return Err(DeviceAssignmentError::InvalidPhysIommu);
675         };
676         // Currently only supports #iommu-cells = <1>.
677         // In that case `<iommus>` entry contains pair of (pIOMMU phandle, Sid token)
678         if iommu_cells != 1 {
679             return Err(DeviceAssignmentError::UnsupportedPhysIommu);
680         }
681         Ok(Some(Self { token }))
682     }
683 }
684 
685 #[derive(Debug)]
686 struct PhysicalDeviceInfo {
687     target: Phandle,
688     reg: Vec<DeviceReg>,
689     iommus: Vec<(PhysIommu, Sid)>,
690 }
691 
692 impl PhysicalDeviceInfo {
parse_iommus( node: &FdtNode, phys_iommus: &BTreeMap<Phandle, PhysIommu>, ) -> Result<Vec<(PhysIommu, Sid)>>693     fn parse_iommus(
694         node: &FdtNode,
695         phys_iommus: &BTreeMap<Phandle, PhysIommu>,
696     ) -> Result<Vec<(PhysIommu, Sid)>> {
697         let mut iommus = vec![];
698         let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? else {
699             return Ok(iommus);
700         };
701         while let Some(cell) = cells.next() {
702             // Parse pIOMMU ID
703             let phandle =
704                 Phandle::try_from(cell).or(Err(DeviceAssignmentError::MalformedIommus))?;
705             let iommu = phys_iommus.get(&phandle).ok_or(DeviceAssignmentError::MalformedIommus)?;
706 
707             // Parse Sid
708             let Some(cell) = cells.next() else {
709                 return Err(DeviceAssignmentError::MalformedIommus);
710             };
711 
712             iommus.push((*iommu, Sid::from(cell)));
713         }
714         Ok(iommus)
715     }
716 
parse(node: &FdtNode, phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<Option<Self>>717     fn parse(node: &FdtNode, phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<Option<Self>> {
718         let Some(phandle) = node.getprop_u32(cstr!("android,pvmfw,target"))? else {
719             return Ok(None);
720         };
721         let target = Phandle::try_from(phandle)?;
722         let reg = parse_node_reg(node)?;
723         let iommus = Self::parse_iommus(node, phys_iommus)?;
724         Ok(Some(Self { target, reg, iommus }))
725     }
726 }
727 
728 /// Assigned device information parsed from crosvm DT.
729 /// Keeps everything in the owned data because underlying FDT will be reused for platform DT.
730 #[derive(Debug, Eq, PartialEq)]
731 struct AssignedDeviceInfo {
732     // Node path of assigned device (e.g. "/rng")
733     node_path: CString,
734     // <reg> property from the crosvm DT
735     reg: Vec<DeviceReg>,
736     // <interrupts> property from the crosvm DT
737     interrupts: Vec<u8>,
738     // Parsed <iommus> property from the crosvm DT. Tuple of PvIommu and vSID.
739     iommus: Vec<(PvIommu, Vsid)>,
740 }
741 
742 impl AssignedDeviceInfo {
validate_reg( device_reg: &[DeviceReg], physical_device_reg: &[DeviceReg], hypervisor: &dyn DeviceAssigningHypervisor, ) -> Result<()>743     fn validate_reg(
744         device_reg: &[DeviceReg],
745         physical_device_reg: &[DeviceReg],
746         hypervisor: &dyn DeviceAssigningHypervisor,
747     ) -> Result<()> {
748         let mut virt_regs = device_reg.iter();
749         let mut phys_regs = physical_device_reg.iter();
750         // TODO(b/308694211): Move this constant to vmbase::layout once vmbase is std-compatible.
751         const PVMFW_RANGE: Range<u64> = 0x7fc0_0000..0x8000_0000;
752         // PV reg and physical reg should have 1:1 match in order.
753         for (reg, phys_reg) in virt_regs.by_ref().zip(phys_regs.by_ref()) {
754             if reg.overlaps(&PVMFW_RANGE) {
755                 return Err(DeviceAssignmentError::InvalidReg(reg.addr, reg.size));
756             }
757             // If this call returns successfully, hyp has mapped the MMIO region at `reg`.
758             let addr = hypervisor.get_phys_mmio_token(reg.addr, reg.size).map_err(|e| {
759                 error!("Hypervisor error while requesting MMIO token: {e}");
760                 DeviceAssignmentError::InvalidReg(reg.addr, reg.size)
761             })?;
762             // Only check address because hypervisor guarantees size match when success.
763             if phys_reg.addr != addr {
764                 error!("Assigned device {reg:x?} has unexpected physical address");
765                 return Err(DeviceAssignmentError::InvalidPhysReg(addr, reg.size));
766             }
767         }
768 
769         if let Some(DeviceReg { addr, size }) = virt_regs.next() {
770             return Err(DeviceAssignmentError::InvalidReg(*addr, *size));
771         }
772 
773         if let Some(DeviceReg { addr, size }) = phys_regs.next() {
774             return Err(DeviceAssignmentError::InvalidPhysReg(*addr, *size));
775         }
776 
777         Ok(())
778     }
779 
parse_interrupts(node: &FdtNode) -> Result<Vec<u8>>780     fn parse_interrupts(node: &FdtNode) -> Result<Vec<u8>> {
781         // Validation: Validate if interrupts cell numbers are multiple of #interrupt-cells.
782         // We can't know how many interrupts would exist.
783         let interrupts_cells = node
784             .getprop_cells(cstr!("interrupts"))?
785             .ok_or(DeviceAssignmentError::InvalidInterrupts)?
786             .count();
787         if interrupts_cells % CELLS_PER_INTERRUPT != 0 {
788             return Err(DeviceAssignmentError::InvalidInterrupts);
789         }
790 
791         // Once validated, keep the raw bytes so patch can be done with setprop()
792         Ok(node.getprop(cstr!("interrupts")).unwrap().unwrap().into())
793     }
794 
795     // TODO(b/277993056): Also validate /__local_fixups__ to ensure that <iommus> has phandle.
parse_iommus( node: &FdtNode, pviommus: &BTreeMap<Phandle, PvIommu>, ) -> Result<Vec<(PvIommu, Vsid)>>796     fn parse_iommus(
797         node: &FdtNode,
798         pviommus: &BTreeMap<Phandle, PvIommu>,
799     ) -> Result<Vec<(PvIommu, Vsid)>> {
800         let mut iommus = vec![];
801         let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? else {
802             return Ok(iommus);
803         };
804         while let Some(cell) = cells.next() {
805             // Parse pvIOMMU ID
806             let phandle =
807                 Phandle::try_from(cell).or(Err(DeviceAssignmentError::MalformedIommus))?;
808             let pviommu = pviommus.get(&phandle).ok_or(DeviceAssignmentError::MalformedIommus)?;
809 
810             // Parse vSID
811             let Some(cell) = cells.next() else {
812                 return Err(DeviceAssignmentError::MalformedIommus);
813             };
814             let vsid = Vsid(cell);
815 
816             iommus.push((*pviommu, vsid));
817         }
818         Ok(iommus)
819     }
820 
validate_iommus( iommus: &[(PvIommu, Vsid)], physical_device_iommu: &[(PhysIommu, Sid)], hypervisor: &dyn DeviceAssigningHypervisor, ) -> Result<()>821     fn validate_iommus(
822         iommus: &[(PvIommu, Vsid)],
823         physical_device_iommu: &[(PhysIommu, Sid)],
824         hypervisor: &dyn DeviceAssigningHypervisor,
825     ) -> Result<()> {
826         if iommus.len() != physical_device_iommu.len() {
827             return Err(DeviceAssignmentError::InvalidIommus);
828         }
829         // pvIOMMU can be reordered, and hypervisor may not guarantee 1:1 mapping.
830         // So we need to mark what's matched or not.
831         let mut physical_device_iommu = physical_device_iommu.to_vec();
832         for (pviommu, vsid) in iommus {
833             let (id, sid) =
834                 hypervisor.get_phys_iommu_token(pviommu.id.into(), vsid.0.into()).map_err(|e| {
835                     error!("Hypervisor error while requesting IOMMU token ({pviommu:?}, {vsid:?}): {e}");
836                     DeviceAssignmentError::InvalidIommus
837                 })?;
838 
839             let pos = physical_device_iommu
840                 .iter()
841                 .position(|(phys_iommu, phys_sid)| (phys_iommu.token, phys_sid.0) == (id, sid));
842             match pos {
843                 Some(pos) => physical_device_iommu.remove(pos),
844                 None => {
845                     error!("Failed to validate device <iommus>. No matching phys iommu or duplicated mapping for pviommu={pviommu:?}, vsid={vsid:?}");
846                     return Err(DeviceAssignmentError::InvalidIommus);
847                 }
848             };
849         }
850         Ok(())
851     }
852 
parse( fdt: &Fdt, vm_dtbo: &VmDtbo, dtbo_node_path: &DtPathTokens, physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>, pviommus: &BTreeMap<Phandle, PvIommu>, hypervisor: &dyn DeviceAssigningHypervisor, ) -> Result<Option<Self>>853     fn parse(
854         fdt: &Fdt,
855         vm_dtbo: &VmDtbo,
856         dtbo_node_path: &DtPathTokens,
857         physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>,
858         pviommus: &BTreeMap<Phandle, PvIommu>,
859         hypervisor: &dyn DeviceAssigningHypervisor,
860     ) -> Result<Option<Self>> {
861         let dtbo_node =
862             vm_dtbo.node(dtbo_node_path)?.ok_or(DeviceAssignmentError::InvalidSymbols)?;
863         let node_path = vm_dtbo.locate_overlay_target_path(dtbo_node_path, &dtbo_node)?;
864 
865         let Some(node) = fdt.node(&node_path)? else { return Ok(None) };
866 
867         // Currently can only assign devices backed by physical devices.
868         let phandle = dtbo_node.get_phandle()?.ok_or(DeviceAssignmentError::InvalidDtbo)?;
869         let Some(physical_device) = physical_devices.get(&phandle) else {
870             // If labeled DT node isn't backed by physical device node, then just return None.
871             // It's not an error because such node can be a dependency of assignable device nodes.
872             return Ok(None);
873         };
874 
875         let reg = parse_node_reg(&node)?;
876         Self::validate_reg(&reg, &physical_device.reg, hypervisor)?;
877 
878         let interrupts = Self::parse_interrupts(&node)?;
879 
880         let iommus = Self::parse_iommus(&node, pviommus)?;
881         Self::validate_iommus(&iommus, &physical_device.iommus, hypervisor)?;
882 
883         Ok(Some(Self { node_path, reg, interrupts, iommus }))
884     }
885 
patch(&self, fdt: &mut Fdt, pviommu_phandles: &BTreeMap<PvIommu, Phandle>) -> Result<()>886     fn patch(&self, fdt: &mut Fdt, pviommu_phandles: &BTreeMap<PvIommu, Phandle>) -> Result<()> {
887         let mut dst = fdt.node_mut(&self.node_path)?.unwrap();
888         dst.setprop(cstr!("reg"), &to_be_bytes(&self.reg))?;
889         dst.setprop(cstr!("interrupts"), &self.interrupts)?;
890         let mut iommus = Vec::with_capacity(8 * self.iommus.len());
891         for (pviommu, vsid) in &self.iommus {
892             let phandle = pviommu_phandles.get(pviommu).unwrap();
893             iommus.extend_from_slice(&u32::from(*phandle).to_be_bytes());
894             iommus.extend_from_slice(&vsid.0.to_be_bytes());
895         }
896         dst.setprop(cstr!("iommus"), &iommus)?;
897 
898         Ok(())
899     }
900 }
901 
902 #[derive(Debug, Eq, PartialEq)]
903 pub struct DeviceAssignmentInfo {
904     pviommus: BTreeSet<PvIommu>,
905     assigned_devices: Vec<AssignedDeviceInfo>,
906     vm_dtbo_mask: DeviceTreeMask,
907 }
908 
909 impl DeviceAssignmentInfo {
910     const PVIOMMU_COMPATIBLE: &'static CStr = cstr!("pkvm,pviommu");
911 
912     /// Parses pvIOMMUs in fdt
913     // Note: This will validate pvIOMMU ids' uniqueness, even when unassigned.
parse_pviommus(fdt: &Fdt) -> Result<BTreeMap<Phandle, PvIommu>>914     fn parse_pviommus(fdt: &Fdt) -> Result<BTreeMap<Phandle, PvIommu>> {
915         let mut pviommus = BTreeMap::new();
916         for compatible in fdt.compatible_nodes(Self::PVIOMMU_COMPATIBLE)? {
917             let Some(phandle) = compatible.get_phandle()? else {
918                 continue; // Skips unreachable pvIOMMU node
919             };
920             let pviommu = PvIommu::parse(&compatible)?;
921             if pviommus.insert(phandle, pviommu).is_some() {
922                 return Err(FdtError::BadPhandle.into());
923             }
924         }
925         Ok(pviommus)
926     }
927 
validate_pviommu_topology(assigned_devices: &[AssignedDeviceInfo]) -> Result<()>928     fn validate_pviommu_topology(assigned_devices: &[AssignedDeviceInfo]) -> Result<()> {
929         let mut all_iommus = BTreeSet::new();
930         for assigned_device in assigned_devices {
931             for iommu in &assigned_device.iommus {
932                 if !all_iommus.insert(iommu) {
933                     error!("Unsupported pvIOMMU duplication found, <iommus> = {iommu:?}");
934                     return Err(DeviceAssignmentError::UnsupportedPvIommusDuplication);
935                 }
936             }
937         }
938         Ok(())
939     }
940 
941     // TODO(b/308694211): Remove this workaround for visibility once using
942     // vmbase::hyp::DeviceAssigningHypervisor for tests.
943     #[cfg(test)]
parse( fdt: &Fdt, vm_dtbo: &VmDtbo, hypervisor: &dyn DeviceAssigningHypervisor, ) -> Result<Option<Self>>944     fn parse(
945         fdt: &Fdt,
946         vm_dtbo: &VmDtbo,
947         hypervisor: &dyn DeviceAssigningHypervisor,
948     ) -> Result<Option<Self>> {
949         Self::internal_parse(fdt, vm_dtbo, hypervisor)
950     }
951 
952     #[cfg(not(test))]
953     /// Parses fdt and vm_dtbo, and creates new DeviceAssignmentInfo
954     // TODO(b/277993056): Parse __local_fixups__
955     // TODO(b/277993056): Parse __fixups__
parse( fdt: &Fdt, vm_dtbo: &VmDtbo, hypervisor: &dyn DeviceAssigningHypervisor, ) -> Result<Option<Self>>956     pub fn parse(
957         fdt: &Fdt,
958         vm_dtbo: &VmDtbo,
959         hypervisor: &dyn DeviceAssigningHypervisor,
960     ) -> Result<Option<Self>> {
961         Self::internal_parse(fdt, vm_dtbo, hypervisor)
962     }
963 
internal_parse( fdt: &Fdt, vm_dtbo: &VmDtbo, hypervisor: &dyn DeviceAssigningHypervisor, ) -> Result<Option<Self>>964     fn internal_parse(
965         fdt: &Fdt,
966         vm_dtbo: &VmDtbo,
967         hypervisor: &dyn DeviceAssigningHypervisor,
968     ) -> Result<Option<Self>> {
969         let Some(symbols_node) = vm_dtbo.as_ref().symbols()? else {
970             // /__symbols__ should contain all assignable devices.
971             // If empty, then nothing can be assigned.
972             return Ok(None);
973         };
974 
975         let pviommus = Self::parse_pviommus(fdt)?;
976         let unique_pviommus: BTreeSet<_> = pviommus.values().cloned().collect();
977         if pviommus.len() != unique_pviommus.len() {
978             return Err(DeviceAssignmentError::DuplicatedPvIommuIds);
979         }
980 
981         let physical_devices = vm_dtbo.parse_physical_devices()?;
982 
983         let mut assigned_devices = vec![];
984         let mut assigned_device_paths = vec![];
985         for symbol_prop in symbols_node.properties()? {
986             let symbol_prop_value = symbol_prop.value()?;
987             let dtbo_node_path = CStr::from_bytes_with_nul(symbol_prop_value)
988                 .or(Err(DeviceAssignmentError::InvalidSymbols))?;
989             let dtbo_node_path = DtPathTokens::new(dtbo_node_path)?;
990             if !dtbo_node_path.is_overlayable_node() {
991                 continue;
992             }
993             let assigned_device = AssignedDeviceInfo::parse(
994                 fdt,
995                 vm_dtbo,
996                 &dtbo_node_path,
997                 &physical_devices,
998                 &pviommus,
999                 hypervisor,
1000             )?;
1001             if let Some(assigned_device) = assigned_device {
1002                 assigned_devices.push(assigned_device);
1003                 assigned_device_paths.push(dtbo_node_path);
1004             }
1005         }
1006         if assigned_devices.is_empty() {
1007             return Ok(None);
1008         }
1009 
1010         Self::validate_pviommu_topology(&assigned_devices)?;
1011 
1012         let mut vm_dtbo_mask = vm_dtbo.build_mask(assigned_device_paths)?;
1013         vm_dtbo_mask.mask_all(&DtPathTokens::new(cstr!("/__local_fixups__"))?);
1014         vm_dtbo_mask.mask_all(&DtPathTokens::new(cstr!("/__symbols__"))?);
1015 
1016         // Note: Any node without __overlay__ will be ignored by fdt_apply_overlay,
1017         // so doesn't need to be filtered.
1018 
1019         Ok(Some(Self { pviommus: unique_pviommus, assigned_devices, vm_dtbo_mask }))
1020     }
1021 
1022     /// Filters VM DTBO to only contain necessary information for booting pVM
filter(&self, vm_dtbo: &mut VmDtbo) -> Result<()>1023     pub fn filter(&self, vm_dtbo: &mut VmDtbo) -> Result<()> {
1024         let vm_dtbo = vm_dtbo.as_mut();
1025 
1026         // Filter unused references in /__local_fixups__
1027         if let Some(local_fixups) = vm_dtbo.node_mut(cstr!("/__local_fixups__"))? {
1028             filter_with_mask(local_fixups, &self.vm_dtbo_mask)?;
1029         }
1030 
1031         // Filter unused nodes in rest of tree
1032         let root = vm_dtbo.root_mut();
1033         filter_with_mask(root, &self.vm_dtbo_mask)?;
1034 
1035         filter_dangling_symbols(vm_dtbo)
1036     }
1037 
patch_pviommus(&self, fdt: &mut Fdt) -> Result<BTreeMap<PvIommu, Phandle>>1038     fn patch_pviommus(&self, fdt: &mut Fdt) -> Result<BTreeMap<PvIommu, Phandle>> {
1039         let mut compatible = fdt.root_mut().next_compatible(Self::PVIOMMU_COMPATIBLE)?;
1040         let mut pviommu_phandles = BTreeMap::new();
1041 
1042         for pviommu in &self.pviommus {
1043             let mut node = compatible.ok_or(DeviceAssignmentError::TooManyPvIommu)?;
1044             let phandle = node.as_node().get_phandle()?.ok_or(DeviceAssignmentError::Internal)?;
1045             node.setprop_inplace(cstr!("id"), &pviommu.id.to_be_bytes())?;
1046             if pviommu_phandles.insert(*pviommu, phandle).is_some() {
1047                 return Err(DeviceAssignmentError::Internal);
1048             }
1049             compatible = node.next_compatible(Self::PVIOMMU_COMPATIBLE)?;
1050         }
1051 
1052         // Filters pre-populated but unassigned pvIOMMUs.
1053         while let Some(filtered_pviommu) = compatible {
1054             compatible = filtered_pviommu.delete_and_next_compatible(Self::PVIOMMU_COMPATIBLE)?;
1055         }
1056 
1057         Ok(pviommu_phandles)
1058     }
1059 
patch(&self, fdt: &mut Fdt) -> Result<()>1060     pub fn patch(&self, fdt: &mut Fdt) -> Result<()> {
1061         let pviommu_phandles = self.patch_pviommus(fdt)?;
1062 
1063         // Patches assigned devices
1064         for device in &self.assigned_devices {
1065             device.patch(fdt, &pviommu_phandles)?;
1066         }
1067 
1068         // Removes any dangling references in __symbols__ (e.g. removed pvIOMMUs)
1069         filter_dangling_symbols(fdt)
1070     }
1071 }
1072 
1073 /// Cleans device trees not to contain any pre-populated nodes/props for device assignment.
clean(fdt: &mut Fdt) -> Result<()>1074 pub fn clean(fdt: &mut Fdt) -> Result<()> {
1075     let mut compatible = fdt.root_mut().next_compatible(cstr!("pkvm,pviommu"))?;
1076     // Filters pre-populated
1077     while let Some(filtered_pviommu) = compatible {
1078         compatible = filtered_pviommu.delete_and_next_compatible(cstr!("pkvm,pviommu"))?;
1079     }
1080 
1081     // Removes any dangling references in __symbols__ (e.g. removed pvIOMMUs)
1082     filter_dangling_symbols(fdt)
1083 }
1084 
1085 #[cfg(test)]
1086 #[derive(Clone, Copy, Debug)]
1087 enum MockHypervisorError {
1088     FailedGetPhysMmioToken,
1089     FailedGetPhysIommuToken,
1090 }
1091 
1092 #[cfg(test)]
1093 type MockHypervisorResult<T> = core::result::Result<T, MockHypervisorError>;
1094 
1095 #[cfg(test)]
1096 impl fmt::Display for MockHypervisorError {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result1097     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1098         match self {
1099             MockHypervisorError::FailedGetPhysMmioToken => {
1100                 write!(f, "Failed to get physical MMIO token")
1101             }
1102             MockHypervisorError::FailedGetPhysIommuToken => {
1103                 write!(f, "Failed to get physical IOMMU token")
1104             }
1105         }
1106     }
1107 }
1108 
1109 #[cfg(test)]
1110 trait DeviceAssigningHypervisor {
1111     /// Returns MMIO token.
get_phys_mmio_token(&self, base_ipa: u64, size: u64) -> MockHypervisorResult<u64>1112     fn get_phys_mmio_token(&self, base_ipa: u64, size: u64) -> MockHypervisorResult<u64>;
1113 
1114     /// Returns DMA token as a tuple of (phys_iommu_id, phys_sid).
get_phys_iommu_token(&self, pviommu_id: u64, vsid: u64) -> MockHypervisorResult<(u64, u64)>1115     fn get_phys_iommu_token(&self, pviommu_id: u64, vsid: u64) -> MockHypervisorResult<(u64, u64)>;
1116 }
1117 
1118 #[cfg(test)]
1119 mod tests {
1120     use super::*;
1121     use alloc::collections::{BTreeMap, BTreeSet};
1122     use dts::Dts;
1123     use std::fs;
1124     use std::path::Path;
1125 
1126     const VM_DTBO_FILE_PATH: &str = "test_pvmfw_devices_vm_dtbo.dtbo";
1127     const VM_DTBO_WITHOUT_SYMBOLS_FILE_PATH: &str =
1128         "test_pvmfw_devices_vm_dtbo_without_symbols.dtbo";
1129     const VM_DTBO_WITH_DUPLICATED_IOMMUS_FILE_PATH: &str =
1130         "test_pvmfw_devices_vm_dtbo_with_duplicated_iommus.dtbo";
1131     const VM_DTBO_WITH_DEPENDENCIES_FILE_PATH: &str =
1132         "test_pvmfw_devices_vm_dtbo_with_dependencies.dtbo";
1133     const FDT_WITHOUT_IOMMUS_FILE_PATH: &str = "test_pvmfw_devices_without_iommus.dtb";
1134     const FDT_WITHOUT_DEVICE_FILE_PATH: &str = "test_pvmfw_devices_without_device.dtb";
1135     const FDT_FILE_PATH: &str = "test_pvmfw_devices_with_rng.dtb";
1136     const FDT_WITH_DEVICE_OVERLAPPING_PVMFW: &str = "test_pvmfw_devices_overlapping_pvmfw.dtb";
1137     const FDT_WITH_MULTIPLE_DEVICES_IOMMUS_FILE_PATH: &str =
1138         "test_pvmfw_devices_with_multiple_devices_iommus.dtb";
1139     const FDT_WITH_IOMMU_SHARING: &str = "test_pvmfw_devices_with_iommu_sharing.dtb";
1140     const FDT_WITH_IOMMU_ID_CONFLICT: &str = "test_pvmfw_devices_with_iommu_id_conflict.dtb";
1141     const FDT_WITH_DUPLICATED_PVIOMMUS_FILE_PATH: &str =
1142         "test_pvmfw_devices_with_duplicated_pviommus.dtb";
1143     const FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH: &str =
1144         "test_pvmfw_devices_with_multiple_reg_iommus.dtb";
1145     const FDT_WITH_DEPENDENCY_FILE_PATH: &str = "test_pvmfw_devices_with_dependency.dtb";
1146     const FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH: &str =
1147         "test_pvmfw_devices_with_multiple_dependencies.dtb";
1148     const FDT_WITH_DEPENDENCY_LOOP_FILE_PATH: &str = "test_pvmfw_devices_with_dependency_loop.dtb";
1149 
1150     const EXPECTED_FDT_WITH_DEPENDENCY_FILE_PATH: &str = "expected_dt_with_dependency.dtb";
1151     const EXPECTED_FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH: &str =
1152         "expected_dt_with_multiple_dependencies.dtb";
1153     const EXPECTED_FDT_WITH_DEPENDENCY_LOOP_FILE_PATH: &str =
1154         "expected_dt_with_dependency_loop.dtb";
1155 
1156     #[derive(Debug, Default)]
1157     struct MockHypervisor {
1158         mmio_tokens: BTreeMap<(u64, u64), u64>,
1159         iommu_tokens: BTreeMap<(u64, u64), (u64, u64)>,
1160     }
1161 
1162     impl DeviceAssigningHypervisor for MockHypervisor {
get_phys_mmio_token(&self, base_ipa: u64, size: u64) -> MockHypervisorResult<u64>1163         fn get_phys_mmio_token(&self, base_ipa: u64, size: u64) -> MockHypervisorResult<u64> {
1164             let token = self.mmio_tokens.get(&(base_ipa, size));
1165 
1166             Ok(*token.ok_or(MockHypervisorError::FailedGetPhysMmioToken)?)
1167         }
1168 
get_phys_iommu_token( &self, pviommu_id: u64, vsid: u64, ) -> MockHypervisorResult<(u64, u64)>1169         fn get_phys_iommu_token(
1170             &self,
1171             pviommu_id: u64,
1172             vsid: u64,
1173         ) -> MockHypervisorResult<(u64, u64)> {
1174             let token = self.iommu_tokens.get(&(pviommu_id, vsid));
1175 
1176             Ok(*token.ok_or(MockHypervisorError::FailedGetPhysIommuToken)?)
1177         }
1178     }
1179 
1180     #[derive(Debug, Eq, PartialEq)]
1181     struct AssignedDeviceNode {
1182         path: CString,
1183         reg: Vec<u8>,
1184         interrupts: Vec<u8>,
1185         iommus: Vec<u32>, // pvIOMMU id and vSID
1186     }
1187 
1188     impl AssignedDeviceNode {
parse(fdt: &Fdt, path: &CStr) -> Result<Self>1189         fn parse(fdt: &Fdt, path: &CStr) -> Result<Self> {
1190             let Some(node) = fdt.node(path)? else {
1191                 return Err(FdtError::NotFound.into());
1192             };
1193 
1194             let reg = node.getprop(cstr!("reg"))?.ok_or(DeviceAssignmentError::MalformedReg)?;
1195             let interrupts = node
1196                 .getprop(cstr!("interrupts"))?
1197                 .ok_or(DeviceAssignmentError::InvalidInterrupts)?;
1198             let mut iommus = vec![];
1199             if let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? {
1200                 while let Some(pviommu_id) = cells.next() {
1201                     // pvIOMMU id
1202                     let phandle = Phandle::try_from(pviommu_id)?;
1203                     let pviommu = fdt
1204                         .node_with_phandle(phandle)?
1205                         .ok_or(DeviceAssignmentError::MalformedIommus)?;
1206                     let compatible = pviommu.getprop_str(cstr!("compatible"));
1207                     if compatible != Ok(Some(cstr!("pkvm,pviommu"))) {
1208                         return Err(DeviceAssignmentError::MalformedIommus);
1209                     }
1210                     let id = pviommu
1211                         .getprop_u32(cstr!("id"))?
1212                         .ok_or(DeviceAssignmentError::MalformedIommus)?;
1213                     iommus.push(id);
1214 
1215                     // vSID
1216                     let Some(vsid) = cells.next() else {
1217                         return Err(DeviceAssignmentError::MalformedIommus);
1218                     };
1219                     iommus.push(vsid);
1220                 }
1221             }
1222             Ok(Self { path: path.into(), reg: reg.into(), interrupts: interrupts.into(), iommus })
1223         }
1224     }
1225 
collect_pviommus(fdt: &Fdt) -> Result<Vec<u32>>1226     fn collect_pviommus(fdt: &Fdt) -> Result<Vec<u32>> {
1227         let mut pviommus = BTreeSet::new();
1228         for pviommu in fdt.compatible_nodes(cstr!("pkvm,pviommu"))? {
1229             if let Ok(Some(id)) = pviommu.getprop_u32(cstr!("id")) {
1230                 pviommus.insert(id);
1231             }
1232         }
1233         Ok(pviommus.iter().cloned().collect())
1234     }
1235 
into_fdt_prop(native_bytes: Vec<u32>) -> Vec<u8>1236     fn into_fdt_prop(native_bytes: Vec<u32>) -> Vec<u8> {
1237         let mut v = Vec::with_capacity(native_bytes.len() * 4);
1238         for byte in native_bytes {
1239             v.extend_from_slice(&byte.to_be_bytes());
1240         }
1241         v
1242     }
1243 
1244     impl From<[u64; 2]> for DeviceReg {
from(fdt_cells: [u64; 2]) -> Self1245         fn from(fdt_cells: [u64; 2]) -> Self {
1246             DeviceReg { addr: fdt_cells[0], size: fdt_cells[1] }
1247         }
1248     }
1249 
1250     #[test]
device_info_new_without_symbols()1251     fn device_info_new_without_symbols() {
1252         let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1253         let mut vm_dtbo_data = fs::read(VM_DTBO_WITHOUT_SYMBOLS_FILE_PATH).unwrap();
1254         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1255         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1256 
1257         let hypervisor: MockHypervisor = Default::default();
1258         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap();
1259         assert_eq!(device_info, None);
1260     }
1261 
1262     #[test]
device_info_new_without_device()1263     fn device_info_new_without_device() {
1264         let mut fdt_data = fs::read(FDT_WITHOUT_DEVICE_FILE_PATH).unwrap();
1265         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1266         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1267         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1268 
1269         let hypervisor: MockHypervisor = Default::default();
1270         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap();
1271         assert_eq!(device_info, None);
1272     }
1273 
1274     #[test]
device_info_assigned_info_without_iommus()1275     fn device_info_assigned_info_without_iommus() {
1276         let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
1277         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1278         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1279         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1280 
1281         let hypervisor = MockHypervisor {
1282             mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1283             iommu_tokens: BTreeMap::new(),
1284         };
1285         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1286 
1287         let expected = [AssignedDeviceInfo {
1288             node_path: CString::new("/bus0/backlight").unwrap(),
1289             reg: vec![[0x9, 0xFF].into()],
1290             interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1291             iommus: vec![],
1292         }];
1293 
1294         assert_eq!(device_info.assigned_devices, expected);
1295     }
1296 
1297     #[test]
device_info_assigned_info()1298     fn device_info_assigned_info() {
1299         let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1300         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1301         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1302         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1303 
1304         let hypervisor = MockHypervisor {
1305             mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1306             iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1307         };
1308         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1309 
1310         let expected = [AssignedDeviceInfo {
1311             node_path: CString::new("/rng").unwrap(),
1312             reg: vec![[0x9, 0xFF].into()],
1313             interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1314             iommus: vec![(PvIommu { id: 0x4 }, Vsid(0xFF0))],
1315         }];
1316 
1317         assert_eq!(device_info.assigned_devices, expected);
1318     }
1319 
1320     #[test]
device_info_filter()1321     fn device_info_filter() {
1322         let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1323         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1324         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1325         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1326 
1327         let hypervisor = MockHypervisor {
1328             mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1329             iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1330         };
1331         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1332         device_info.filter(vm_dtbo).unwrap();
1333 
1334         let vm_dtbo = vm_dtbo.as_mut();
1335 
1336         let symbols = vm_dtbo.symbols().unwrap().unwrap();
1337 
1338         let rng = vm_dtbo.node(cstr!("/fragment@0/__overlay__/rng")).unwrap();
1339         assert_ne!(rng, None);
1340         let rng_symbol = symbols.getprop_str(cstr!("rng")).unwrap();
1341         assert_eq!(Some(cstr!("/fragment@0/__overlay__/rng")), rng_symbol);
1342 
1343         let light = vm_dtbo.node(cstr!("/fragment@0/__overlay__/light")).unwrap();
1344         assert_eq!(light, None);
1345         let light_symbol = symbols.getprop_str(cstr!("light")).unwrap();
1346         assert_eq!(None, light_symbol);
1347 
1348         let led = vm_dtbo.node(cstr!("/fragment@0/__overlay__/led")).unwrap();
1349         assert_eq!(led, None);
1350         let led_symbol = symbols.getprop_str(cstr!("led")).unwrap();
1351         assert_eq!(None, led_symbol);
1352 
1353         let backlight = vm_dtbo.node(cstr!("/fragment@0/__overlay__/bus0/backlight")).unwrap();
1354         assert_eq!(backlight, None);
1355         let backlight_symbol = symbols.getprop_str(cstr!("backlight")).unwrap();
1356         assert_eq!(None, backlight_symbol);
1357     }
1358 
1359     #[test]
device_info_patch()1360     fn device_info_patch() {
1361         let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
1362         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1363         let mut data = vec![0_u8; fdt_data.len() + vm_dtbo_data.len()];
1364         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1365         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1366         let platform_dt = Fdt::create_empty_tree(data.as_mut_slice()).unwrap();
1367 
1368         let hypervisor = MockHypervisor {
1369             mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1370             iommu_tokens: BTreeMap::new(),
1371         };
1372         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1373         device_info.filter(vm_dtbo).unwrap();
1374 
1375         // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1376         unsafe {
1377             platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1378         }
1379         device_info.patch(platform_dt).unwrap();
1380 
1381         let rng_node = platform_dt.node(cstr!("/bus0/backlight")).unwrap().unwrap();
1382         let phandle = rng_node.getprop_u32(cstr!("phandle")).unwrap();
1383         assert_ne!(None, phandle);
1384 
1385         // Note: Intentionally not using AssignedDeviceNode for matching all props.
1386         type FdtResult<T> = libfdt::Result<T>;
1387         let expected: Vec<(FdtResult<&CStr>, FdtResult<Vec<u8>>)> = vec![
1388             (Ok(cstr!("android,backlight,ignore-gctrl-reset")), Ok(Vec::new())),
1389             (Ok(cstr!("compatible")), Ok(Vec::from(*b"android,backlight\0"))),
1390             (Ok(cstr!("interrupts")), Ok(into_fdt_prop(vec![0x0, 0xF, 0x4]))),
1391             (Ok(cstr!("iommus")), Ok(Vec::new())),
1392             (Ok(cstr!("phandle")), Ok(into_fdt_prop(vec![phandle.unwrap()]))),
1393             (Ok(cstr!("reg")), Ok(into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]))),
1394         ];
1395 
1396         let mut properties: Vec<_> = rng_node
1397             .properties()
1398             .unwrap()
1399             .map(|prop| (prop.name(), prop.value().map(|x| x.into())))
1400             .collect();
1401         properties.sort_by(|a, b| {
1402             let lhs = a.0.unwrap_or_default();
1403             let rhs = b.0.unwrap_or_default();
1404             lhs.partial_cmp(rhs).unwrap()
1405         });
1406 
1407         assert_eq!(properties, expected);
1408     }
1409 
1410     #[test]
device_info_patch_no_pviommus()1411     fn device_info_patch_no_pviommus() {
1412         let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
1413         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1414         let mut data = vec![0_u8; fdt_data.len() + vm_dtbo_data.len()];
1415         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1416         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1417         let platform_dt = Fdt::create_empty_tree(data.as_mut_slice()).unwrap();
1418 
1419         let hypervisor = MockHypervisor {
1420             mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1421             iommu_tokens: BTreeMap::new(),
1422         };
1423         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1424         device_info.filter(vm_dtbo).unwrap();
1425 
1426         // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1427         unsafe {
1428             platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1429         }
1430         device_info.patch(platform_dt).unwrap();
1431 
1432         let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu")).unwrap();
1433         assert_eq!(None, compatible);
1434 
1435         if let Some(symbols) = platform_dt.symbols().unwrap() {
1436             for prop in symbols.properties().unwrap() {
1437                 let path = CStr::from_bytes_with_nul(prop.value().unwrap()).unwrap();
1438                 assert_ne!(None, platform_dt.node(path).unwrap());
1439             }
1440         }
1441     }
1442 
1443     #[test]
device_info_overlay_iommu()1444     fn device_info_overlay_iommu() {
1445         let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1446         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1447         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1448         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1449         let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1450         platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1451         let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1452         platform_dt.unpack().unwrap();
1453 
1454         let hypervisor = MockHypervisor {
1455             mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1456             iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1457         };
1458         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1459         device_info.filter(vm_dtbo).unwrap();
1460 
1461         // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1462         unsafe {
1463             platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1464         }
1465         device_info.patch(platform_dt).unwrap();
1466 
1467         let expected = AssignedDeviceNode {
1468             path: CString::new("/rng").unwrap(),
1469             reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1470             interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1471             iommus: vec![0x4, 0xFF0],
1472         };
1473 
1474         let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1475         assert_eq!(node, Ok(expected));
1476 
1477         let pviommus = collect_pviommus(platform_dt);
1478         assert_eq!(pviommus, Ok(vec![0x4]));
1479     }
1480 
1481     #[test]
device_info_multiple_devices_iommus()1482     fn device_info_multiple_devices_iommus() {
1483         let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_DEVICES_IOMMUS_FILE_PATH).unwrap();
1484         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1485         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1486         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1487         let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1488         platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1489         let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1490         platform_dt.unpack().unwrap();
1491 
1492         let hypervisor = MockHypervisor {
1493             mmio_tokens: [
1494                 ((0x9, 0xFF), 0x12F00000),
1495                 ((0x10000, 0x1000), 0xF00000),
1496                 ((0x20000, 0x1000), 0xF10000),
1497             ]
1498             .into(),
1499             iommu_tokens: [
1500                 ((0x4, 0xFF0), (0x12E40000, 3)),
1501                 ((0x40, 0xFFA), (0x40000, 0x4)),
1502                 ((0x50, 0xFFB), (0x50000, 0x5)),
1503             ]
1504             .into(),
1505         };
1506         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1507         device_info.filter(vm_dtbo).unwrap();
1508 
1509         // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1510         unsafe {
1511             platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1512         }
1513         device_info.patch(platform_dt).unwrap();
1514 
1515         let expected_devices = [
1516             AssignedDeviceNode {
1517                 path: CString::new("/rng").unwrap(),
1518                 reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1519                 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1520                 iommus: vec![0x4, 0xFF0],
1521             },
1522             AssignedDeviceNode {
1523                 path: CString::new("/light").unwrap(),
1524                 reg: into_fdt_prop(vec![0x0, 0x10000, 0x0, 0x1000, 0x0, 0x20000, 0x0, 0x1000]),
1525                 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x5]),
1526                 iommus: vec![0x40, 0xFFA, 0x50, 0xFFB],
1527             },
1528         ];
1529 
1530         for expected in expected_devices {
1531             let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1532             assert_eq!(node, Ok(expected));
1533         }
1534         let pviommus = collect_pviommus(platform_dt);
1535         assert_eq!(pviommus, Ok(vec![0x4, 0x40, 0x50]));
1536     }
1537 
1538     #[test]
device_info_iommu_sharing()1539     fn device_info_iommu_sharing() {
1540         let mut fdt_data = fs::read(FDT_WITH_IOMMU_SHARING).unwrap();
1541         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1542         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1543         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1544         let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1545         platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1546         let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1547         platform_dt.unpack().unwrap();
1548 
1549         let hypervisor = MockHypervisor {
1550             mmio_tokens: [((0x9, 0xFF), 0x12F00000), ((0x1000, 0x9), 0x12000000)].into(),
1551             iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 3)), ((0x4, 0xFF1), (0x12E40000, 9))].into(),
1552         };
1553         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1554         device_info.filter(vm_dtbo).unwrap();
1555 
1556         // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1557         unsafe {
1558             platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1559         }
1560         device_info.patch(platform_dt).unwrap();
1561 
1562         let expected_devices = [
1563             AssignedDeviceNode {
1564                 path: CString::new("/rng").unwrap(),
1565                 reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1566                 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1567                 iommus: vec![0x4, 0xFF0],
1568             },
1569             AssignedDeviceNode {
1570                 path: CString::new("/led").unwrap(),
1571                 reg: into_fdt_prop(vec![0x0, 0x1000, 0x0, 0x9]),
1572                 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x5]),
1573                 iommus: vec![0x4, 0xFF1],
1574             },
1575         ];
1576 
1577         for expected in expected_devices {
1578             let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1579             assert_eq!(node, Ok(expected));
1580         }
1581 
1582         let pviommus = collect_pviommus(platform_dt);
1583         assert_eq!(pviommus, Ok(vec![0x4]));
1584     }
1585 
1586     #[test]
device_info_iommu_id_conflict()1587     fn device_info_iommu_id_conflict() {
1588         let mut fdt_data = fs::read(FDT_WITH_IOMMU_ID_CONFLICT).unwrap();
1589         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1590         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1591         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1592 
1593         let hypervisor = MockHypervisor {
1594             mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1595             iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1596         };
1597         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1598 
1599         assert_eq!(device_info, Err(DeviceAssignmentError::DuplicatedPvIommuIds));
1600     }
1601 
1602     #[test]
device_info_invalid_reg()1603     fn device_info_invalid_reg() {
1604         let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1605         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1606         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1607         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1608 
1609         let hypervisor = MockHypervisor {
1610             mmio_tokens: BTreeMap::new(),
1611             iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1612         };
1613         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1614 
1615         assert_eq!(device_info, Err(DeviceAssignmentError::InvalidReg(0x9, 0xFF)));
1616     }
1617 
1618     #[test]
device_info_invalid_reg_out_of_order()1619     fn device_info_invalid_reg_out_of_order() {
1620         let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH).unwrap();
1621         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1622         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1623         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1624 
1625         let hypervisor = MockHypervisor {
1626             mmio_tokens: [((0xF000, 0x1000), 0xF10000), ((0xF100, 0x1000), 0xF00000)].into(),
1627             iommu_tokens: [((0xFF0, 0xF0), (0x40000, 0x4)), ((0xFF1, 0xF1), (0x50000, 0x5))].into(),
1628         };
1629         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1630 
1631         assert_eq!(device_info, Err(DeviceAssignmentError::InvalidPhysReg(0xF10000, 0x1000)));
1632     }
1633 
1634     #[test]
device_info_invalid_iommus()1635     fn device_info_invalid_iommus() {
1636         let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1637         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1638         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1639         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1640 
1641         let hypervisor = MockHypervisor {
1642             mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1643             iommu_tokens: BTreeMap::new(),
1644         };
1645         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1646 
1647         assert_eq!(device_info, Err(DeviceAssignmentError::InvalidIommus));
1648     }
1649 
1650     #[test]
device_info_duplicated_pv_iommus()1651     fn device_info_duplicated_pv_iommus() {
1652         let mut fdt_data = fs::read(FDT_WITH_DUPLICATED_PVIOMMUS_FILE_PATH).unwrap();
1653         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1654         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1655         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1656 
1657         let hypervisor = MockHypervisor {
1658             mmio_tokens: [((0x10000, 0x1000), 0xF00000), ((0x20000, 0xFF), 0xF10000)].into(),
1659             iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1660         };
1661         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1662 
1663         assert_eq!(device_info, Err(DeviceAssignmentError::DuplicatedPvIommuIds));
1664     }
1665 
1666     #[test]
device_info_duplicated_iommus()1667     fn device_info_duplicated_iommus() {
1668         let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1669         let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DUPLICATED_IOMMUS_FILE_PATH).unwrap();
1670         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1671         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1672 
1673         let hypervisor = MockHypervisor {
1674             mmio_tokens: [((0x10000, 0x1000), 0xF00000), ((0x20000, 0xFF), 0xF10000)].into(),
1675             iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1676         };
1677         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1678 
1679         assert_eq!(device_info, Err(DeviceAssignmentError::UnsupportedIommusDuplication));
1680     }
1681 
1682     #[test]
device_info_duplicated_iommu_mapping()1683     fn device_info_duplicated_iommu_mapping() {
1684         let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH).unwrap();
1685         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1686         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1687         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1688 
1689         let hypervisor = MockHypervisor {
1690             mmio_tokens: [((0xF000, 0x1000), 0xF00000), ((0xF100, 0x1000), 0xF10000)].into(),
1691             iommu_tokens: [((0xFF0, 0xF0), (0x40000, 0x4)), ((0xFF1, 0xF1), (0x40000, 0x4))].into(),
1692         };
1693         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1694 
1695         assert_eq!(device_info, Err(DeviceAssignmentError::InvalidIommus));
1696     }
1697 
1698     #[test]
device_info_overlaps_pvmfw()1699     fn device_info_overlaps_pvmfw() {
1700         let mut fdt_data = fs::read(FDT_WITH_DEVICE_OVERLAPPING_PVMFW).unwrap();
1701         let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1702         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1703         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1704 
1705         let hypervisor = MockHypervisor {
1706             mmio_tokens: [((0x7fee0000, 0x1000), 0xF00000)].into(),
1707             iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1708         };
1709         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1710 
1711         assert_eq!(device_info, Err(DeviceAssignmentError::InvalidReg(0x7fee0000, 0x1000)));
1712     }
1713 
1714     #[test]
device_assignment_clean()1715     fn device_assignment_clean() {
1716         let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1717         let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1718 
1719         let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu"));
1720         assert_ne!(None, compatible.unwrap());
1721 
1722         clean(platform_dt).unwrap();
1723 
1724         let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu"));
1725         assert_eq!(Ok(None), compatible);
1726     }
1727 
1728     #[test]
device_info_dependency()1729     fn device_info_dependency() {
1730         let mut fdt_data = fs::read(FDT_WITH_DEPENDENCY_FILE_PATH).unwrap();
1731         let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1732         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1733         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1734         let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1735         platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1736         let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1737         platform_dt.unpack().unwrap();
1738 
1739         let hypervisor = MockHypervisor {
1740             mmio_tokens: [((0xFF000, 0x1), 0xF000)].into(),
1741             iommu_tokens: Default::default(),
1742         };
1743 
1744         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1745         device_info.filter(vm_dtbo).unwrap();
1746 
1747         // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1748         unsafe {
1749             platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1750         }
1751         device_info.patch(platform_dt).unwrap();
1752 
1753         let expected = Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_DEPENDENCY_FILE_PATH)).unwrap();
1754         let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1755 
1756         assert_eq!(expected, platform_dt);
1757     }
1758 
1759     #[test]
device_info_multiple_dependencies()1760     fn device_info_multiple_dependencies() {
1761         let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH).unwrap();
1762         let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1763         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1764         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1765         let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1766         platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1767         let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1768         platform_dt.unpack().unwrap();
1769 
1770         let hypervisor = MockHypervisor {
1771             mmio_tokens: [((0xFF000, 0x1), 0xF000), ((0xFF100, 0x1), 0xF100)].into(),
1772             iommu_tokens: Default::default(),
1773         };
1774         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1775         device_info.filter(vm_dtbo).unwrap();
1776 
1777         // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1778         unsafe {
1779             platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1780         }
1781         device_info.patch(platform_dt).unwrap();
1782 
1783         let expected =
1784             Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH)).unwrap();
1785         let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1786 
1787         assert_eq!(expected, platform_dt);
1788     }
1789 
1790     #[test]
device_info_dependency_loop()1791     fn device_info_dependency_loop() {
1792         let mut fdt_data = fs::read(FDT_WITH_DEPENDENCY_LOOP_FILE_PATH).unwrap();
1793         let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1794         let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1795         let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1796         let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1797         platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1798         let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1799         platform_dt.unpack().unwrap();
1800 
1801         let hypervisor = MockHypervisor {
1802             mmio_tokens: [((0xFF200, 0x1), 0xF200)].into(),
1803             iommu_tokens: Default::default(),
1804         };
1805         let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1806         device_info.filter(vm_dtbo).unwrap();
1807 
1808         // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1809         unsafe {
1810             platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1811         }
1812         device_info.patch(platform_dt).unwrap();
1813 
1814         let expected =
1815             Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_DEPENDENCY_LOOP_FILE_PATH)).unwrap();
1816         let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1817 
1818         assert_eq!(expected, platform_dt);
1819     }
1820 }
1821