• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! Device Path protocol
2 //!
3 //! A UEFI device path is a very flexible structure for encoding a
4 //! programmatic path such as a hard drive or console.
5 //!
6 //! A device path is made up of a packed list of variable-length nodes of
7 //! various types. The entire device path is terminated with an
8 //! [`END_ENTIRE`] node. A device path _may_ contain multiple device-path
9 //! instances separated by [`END_INSTANCE`] nodes, but typical paths contain
10 //! only a single instance (in which case no `END_INSTANCE` node is needed).
11 //!
12 //! Example of what a device path containing two instances (each comprised of
13 //! three nodes) might look like:
14 //!
15 //! ```text
16 //! ┌──────┬─────┬──────────────╥───────┬──────────┬────────────┐
17 //! │ ACPI │ PCI │ END_INSTANCE ║ CDROM │ FILEPATH │ END_ENTIRE │
18 //! └──────┴─────┴──────────────╨───────┴──────────┴────────────┘
19 //! ↑                           ↑                               ↑
20 //! ├─── DevicePathInstance ────╨────── DevicePathInstance ─────┤
21 //! │                                                           │
22 //! └─────────────────── Entire DevicePath ─────────────────────┘
23 //! ```
24 //!
25 //! # Types
26 //!
27 //! To represent device paths, this module provides several types:
28 //!
29 //! * [`DevicePath`] is the root type that represents a full device
30 //!   path, containing one or more device path instance. It ends with an
31 //!   [`END_ENTIRE`] node. It implements [`Protocol`] (corresponding to
32 //!   `EFI_DEVICE_PATH_PROTOCOL`).
33 //!
34 //! * [`DevicePathInstance`] represents a single path instance within a
35 //!   device path. It ends with either an [`END_INSTANCE`] or [`END_ENTIRE`]
36 //!   node.
37 //!
38 //! * [`DevicePathNode`] represents a single node within a path. The
39 //!   node's [`device_type`] and [`sub_type`] must be examined to
40 //!   determine what type of data it contains.
41 //!
42 //!   Specific node types have their own structures in these submodules:
43 //!   * [`acpi`]
44 //!   * [`bios_boot_spec`]
45 //!   * [`end`]
46 //!   * [`hardware`]
47 //!   * [`media`]
48 //!   * [`messaging`]
49 //!
50 //! * [`DevicePathNodeEnum`] contains variants for references to each
51 //!   type of node. Call [`DevicePathNode::as_enum`] to convert from a
52 //!   [`DevicePathNode`] reference to a `DevicePathNodeEnum`.
53 //!
54 //! * [`DevicePathHeader`] is a header present at the start of every
55 //!   node. It describes the type of node as well as the node's size.
56 //!
57 //! * [`FfiDevicePath`] is an opaque type used whenever a device path
58 //!   pointer is passed to or from external UEFI interfaces (i.e. where
59 //!   the UEFI spec uses `const* EFI_DEVICE_PATH_PROTOCOL`, `*const
60 //!   FfiDevicePath` should be used in the Rust definition). Many of the
61 //!   other types in this module are DSTs, so pointers to the type are
62 //!   "fat" and not suitable for FFI.
63 //!
64 //! All of these types use a packed layout and may appear on any byte
65 //! boundary.
66 //!
67 //! Note: the API provided by this module is currently mostly limited to
68 //! reading existing device paths rather than constructing new ones.
69 //!
70 //! [`END_ENTIRE`]: DeviceSubType::END_ENTIRE
71 //! [`END_INSTANCE`]: DeviceSubType::END_INSTANCE
72 //! [`Protocol`]: crate::proto::Protocol
73 //! [`device_type`]: DevicePathNode::device_type
74 //! [`sub_type`]: DevicePathNode::sub_type
75 
76 pub mod build;
77 pub mod text;
78 
79 mod device_path_gen;
80 pub use device_path_gen::{
81     acpi, bios_boot_spec, end, hardware, media, messaging, DevicePathNodeEnum,
82 };
83 pub use uefi_raw::protocol::device_path::{DeviceSubType, DeviceType};
84 
85 use crate::proto::{unsafe_protocol, ProtocolPointer};
86 use core::ffi::c_void;
87 use core::fmt::{self, Debug, Display, Formatter};
88 use core::mem;
89 use core::ops::Deref;
90 use ptr_meta::Pointee;
91 
92 #[cfg(feature = "alloc")]
93 use {
94     crate::boot::{self, OpenProtocolAttributes, OpenProtocolParams, ScopedProtocol, SearchType},
95     crate::proto::device_path::text::{AllowShortcuts, DevicePathToText, DisplayOnly},
96     crate::{CString16, Identify},
97     alloc::borrow::ToOwned,
98     alloc::boxed::Box,
99 };
100 
101 opaque_type! {
102     /// Opaque type that should be used to represent a pointer to a
103     /// [`DevicePath`] or [`DevicePathNode`] in foreign function interfaces. This
104     /// type produces a thin pointer, unlike [`DevicePath`] and
105     /// [`DevicePathNode`].
106     pub struct FfiDevicePath;
107 }
108 
109 /// Header that appears at the start of every [`DevicePathNode`].
110 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
111 #[repr(C, packed)]
112 pub struct DevicePathHeader {
113     /// Type of device
114     pub device_type: DeviceType,
115     /// Sub type of device
116     pub sub_type: DeviceSubType,
117     /// Size (in bytes) of the [`DevicePathNode`], including this header.
118     pub length: u16,
119 }
120 
121 impl<'a> TryFrom<&'a [u8]> for &'a DevicePathHeader {
122     type Error = ByteConversionError;
123 
try_from(bytes: &[u8]) -> Result<Self, Self::Error>124     fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
125         if mem::size_of::<DevicePathHeader>() <= bytes.len() {
126             unsafe { Ok(&*bytes.as_ptr().cast::<DevicePathHeader>()) }
127         } else {
128             Err(ByteConversionError::InvalidLength)
129         }
130     }
131 }
132 
133 /// A single node within a [`DevicePath`].
134 ///
135 /// Each node starts with a [`DevicePathHeader`]. The rest of the data
136 /// in the node depends on the type of node. You can "cast" a node to a specific
137 /// one like this:
138 /// ```no_run
139 /// use uefi::proto::device_path::DevicePath;
140 /// use uefi::proto::device_path::media::FilePath;
141 ///
142 /// let image_device_path: &DevicePath = unsafe { DevicePath::from_ffi_ptr(0x1337 as *const _) };
143 /// let file_path = image_device_path
144 ///         .node_iter()
145 ///         .find_map(|node| {
146 ///             let node: &FilePath = node.try_into().ok()?;
147 ///             let path = node.path_name().to_cstring16().ok()?;
148 ///             Some(path.to_string().to_uppercase())
149 ///         });
150 /// ```
151 /// More types are available in [`uefi::proto::device_path`]. Builder types
152 /// can be found in [`uefi::proto::device_path::build`]
153 ///
154 /// See the [module-level documentation] for more details.
155 ///
156 /// [module-level documentation]: crate::proto::device_path
157 #[derive(Eq, Pointee)]
158 #[repr(C, packed)]
159 pub struct DevicePathNode {
160     header: DevicePathHeader,
161     data: [u8],
162 }
163 
164 impl DevicePathNode {
165     /// Create a [`DevicePathNode`] reference from an opaque pointer.
166     ///
167     /// # Safety
168     ///
169     /// The input pointer must point to valid data. That data must
170     /// remain valid for the lifetime `'a`, and cannot be mutated during
171     /// that lifetime.
172     #[must_use]
from_ffi_ptr<'a>(ptr: *const FfiDevicePath) -> &'a Self173     pub unsafe fn from_ffi_ptr<'a>(ptr: *const FfiDevicePath) -> &'a Self {
174         let header = *ptr.cast::<DevicePathHeader>();
175 
176         let data_len = usize::from(header.length) - mem::size_of::<DevicePathHeader>();
177         &*ptr_meta::from_raw_parts(ptr.cast(), data_len)
178     }
179 
180     /// Cast to a [`FfiDevicePath`] pointer.
181     #[must_use]
as_ffi_ptr(&self) -> *const FfiDevicePath182     pub const fn as_ffi_ptr(&self) -> *const FfiDevicePath {
183         let ptr: *const Self = self;
184         ptr.cast::<FfiDevicePath>()
185     }
186 
187     /// Type of device
188     #[must_use]
device_type(&self) -> DeviceType189     pub const fn device_type(&self) -> DeviceType {
190         self.header.device_type
191     }
192 
193     /// Sub type of device
194     #[must_use]
sub_type(&self) -> DeviceSubType195     pub const fn sub_type(&self) -> DeviceSubType {
196         self.header.sub_type
197     }
198 
199     /// Tuple of the node's type and subtype.
200     #[must_use]
full_type(&self) -> (DeviceType, DeviceSubType)201     pub const fn full_type(&self) -> (DeviceType, DeviceSubType) {
202         (self.header.device_type, self.header.sub_type)
203     }
204 
205     /// Size (in bytes) of the full [`DevicePathNode`], including the header.
206     #[must_use]
length(&self) -> u16207     pub const fn length(&self) -> u16 {
208         self.header.length
209     }
210 
211     /// True if this node ends an entire [`DevicePath`].
212     #[must_use]
is_end_entire(&self) -> bool213     pub fn is_end_entire(&self) -> bool {
214         self.full_type() == (DeviceType::END, DeviceSubType::END_ENTIRE)
215     }
216 
217     /// Returns the payload data of this node.
218     #[must_use]
data(&self) -> &[u8]219     pub const fn data(&self) -> &[u8] {
220         &self.data
221     }
222 
223     /// Convert from a generic [`DevicePathNode`] reference to an enum
224     /// of more specific node types.
as_enum(&self) -> Result<DevicePathNodeEnum, NodeConversionError>225     pub fn as_enum(&self) -> Result<DevicePathNodeEnum, NodeConversionError> {
226         DevicePathNodeEnum::try_from(self)
227     }
228 
229     /// Transforms the device path node to its string representation using the
230     /// [`DevicePathToText`] protocol.
231     #[cfg(feature = "alloc")]
to_string( &self, display_only: DisplayOnly, allow_shortcuts: AllowShortcuts, ) -> Result<CString16, DevicePathToTextError>232     pub fn to_string(
233         &self,
234         display_only: DisplayOnly,
235         allow_shortcuts: AllowShortcuts,
236     ) -> Result<CString16, DevicePathToTextError> {
237         let to_text_protocol = open_text_protocol()?;
238 
239         to_text_protocol
240             .convert_device_node_to_text(self, display_only, allow_shortcuts)
241             .map(|pool_string| {
242                 let cstr16 = &*pool_string;
243                 // Another allocation; pool string is dropped. This overhead
244                 // is negligible. CString16 is more convenient to use.
245                 CString16::from(cstr16)
246             })
247             .map_err(|_| DevicePathToTextError::OutOfMemory)
248     }
249 }
250 
251 impl Debug for DevicePathNode {
fmt(&self, f: &mut Formatter) -> fmt::Result252     fn fmt(&self, f: &mut Formatter) -> fmt::Result {
253         f.debug_struct("DevicePathNode")
254             .field("header", &self.header)
255             .field("data", &&self.data)
256             .finish()
257     }
258 }
259 
260 impl PartialEq for DevicePathNode {
eq(&self, other: &Self) -> bool261     fn eq(&self, other: &Self) -> bool {
262         self.header == other.header && self.data == other.data
263     }
264 }
265 
266 impl<'a> TryFrom<&'a [u8]> for &'a DevicePathNode {
267     type Error = ByteConversionError;
268 
try_from(bytes: &[u8]) -> Result<Self, Self::Error>269     fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
270         let dp = <&DevicePathHeader>::try_from(bytes)?;
271         if usize::from(dp.length) <= bytes.len() {
272             unsafe { Ok(DevicePathNode::from_ffi_ptr(bytes.as_ptr().cast())) }
273         } else {
274             Err(ByteConversionError::InvalidLength)
275         }
276     }
277 }
278 
279 /// A single device path instance that ends with either an [`END_INSTANCE`]
280 /// or [`END_ENTIRE`] node. Use [`DevicePath::instance_iter`] to get the
281 /// path instances in a [`DevicePath`].
282 ///
283 /// See the [module-level documentation] for more details.
284 ///
285 /// [`END_ENTIRE`]: DeviceSubType::END_ENTIRE
286 /// [`END_INSTANCE`]: DeviceSubType::END_INSTANCE
287 /// [module-level documentation]: crate::proto::device_path
288 #[repr(C, packed)]
289 #[derive(Eq, Pointee)]
290 pub struct DevicePathInstance {
291     data: [u8],
292 }
293 
294 impl DevicePathInstance {
295     /// Get an iterator over the [`DevicePathNodes`] in this
296     /// instance. Iteration ends when any [`DeviceType::END`] node is
297     /// reached.
298     ///
299     /// [`DevicePathNodes`]: DevicePathNode
300     #[must_use]
node_iter(&self) -> DevicePathNodeIterator301     pub const fn node_iter(&self) -> DevicePathNodeIterator {
302         DevicePathNodeIterator {
303             nodes: &self.data,
304             stop_condition: StopCondition::AnyEndNode,
305         }
306     }
307 
308     /// Returns a slice of the underlying bytes.
309     #[must_use]
as_bytes(&self) -> &[u8]310     pub const fn as_bytes(&self) -> &[u8] {
311         &self.data
312     }
313 
314     /// Returns a boxed copy of that value.
315     #[cfg(feature = "alloc")]
316     #[must_use]
to_boxed(&self) -> Box<Self>317     pub fn to_boxed(&self) -> Box<Self> {
318         let data = self.data.to_owned();
319         let data = data.into_boxed_slice();
320         unsafe { mem::transmute(data) }
321     }
322 }
323 
324 impl Debug for DevicePathInstance {
fmt(&self, f: &mut Formatter) -> fmt::Result325     fn fmt(&self, f: &mut Formatter) -> fmt::Result {
326         f.debug_struct("DevicePathInstance")
327             .field("data", &&self.data)
328             .finish()
329     }
330 }
331 
332 impl PartialEq for DevicePathInstance {
eq(&self, other: &Self) -> bool333     fn eq(&self, other: &Self) -> bool {
334         self.data == other.data
335     }
336 }
337 
338 #[cfg(feature = "alloc")]
339 impl ToOwned for DevicePathInstance {
340     type Owned = Box<Self>;
341 
to_owned(&self) -> Self::Owned342     fn to_owned(&self) -> Self::Owned {
343         self.to_boxed()
344     }
345 }
346 
347 /// Device path protocol.
348 ///
349 /// Can be used on any device handle to obtain generic path/location information
350 /// concerning the physical device or logical device. If the handle does not
351 /// logically map to a physical device, the handle may not necessarily support
352 /// the device path protocol. The device path describes the location of the
353 /// device the handle is for. The size of the Device Path can be determined from
354 /// the structures that make up the Device Path.
355 ///
356 /// See the [module-level documentation] for more details.
357 ///
358 /// [module-level documentation]: crate::proto::device_path
359 /// [`END_ENTIRE`]: DeviceSubType::END_ENTIRE
360 #[repr(C, packed)]
361 #[unsafe_protocol(uefi_raw::protocol::device_path::DevicePathProtocol::GUID)]
362 #[derive(Eq, Pointee)]
363 pub struct DevicePath {
364     data: [u8],
365 }
366 
367 impl ProtocolPointer for DevicePath {
ptr_from_ffi(ptr: *const c_void) -> *const Self368     unsafe fn ptr_from_ffi(ptr: *const c_void) -> *const Self {
369         ptr_meta::from_raw_parts(ptr.cast(), Self::size_in_bytes_from_ptr(ptr))
370     }
371 
mut_ptr_from_ffi(ptr: *mut c_void) -> *mut Self372     unsafe fn mut_ptr_from_ffi(ptr: *mut c_void) -> *mut Self {
373         ptr_meta::from_raw_parts_mut(ptr.cast(), Self::size_in_bytes_from_ptr(ptr))
374     }
375 }
376 
377 impl DevicePath {
378     /// Calculate the size in bytes of the entire `DevicePath` starting
379     /// at `ptr`. This adds up each node's length, including the
380     /// end-entire node.
size_in_bytes_from_ptr(ptr: *const c_void) -> usize381     unsafe fn size_in_bytes_from_ptr(ptr: *const c_void) -> usize {
382         let mut ptr = ptr.cast::<u8>();
383         let mut total_size_in_bytes: usize = 0;
384         loop {
385             let node = DevicePathNode::from_ffi_ptr(ptr.cast::<FfiDevicePath>());
386             let node_size_in_bytes = usize::from(node.length());
387             total_size_in_bytes += node_size_in_bytes;
388             if node.is_end_entire() {
389                 break;
390             }
391             ptr = ptr.add(node_size_in_bytes);
392         }
393 
394         total_size_in_bytes
395     }
396 
397     /// Calculate the size in bytes of the entire `DevicePath` starting
398     /// at `bytes`. This adds up each node's length, including the
399     /// end-entire node.
400     ///
401     /// # Errors
402     ///
403     /// The [`ByteConversionError::InvalidLength`] error will be returned
404     /// when the length of the given bytes slice cannot contain the full
405     /// [`DevicePath`] represented by the slice.
size_in_bytes_from_slice(mut bytes: &[u8]) -> Result<usize, ByteConversionError>406     fn size_in_bytes_from_slice(mut bytes: &[u8]) -> Result<usize, ByteConversionError> {
407         let max_size_in_bytes = bytes.len();
408         let mut total_size_in_bytes: usize = 0;
409         loop {
410             let node = <&DevicePathNode>::try_from(bytes)?;
411             let node_size_in_bytes = usize::from(node.length());
412             total_size_in_bytes += node_size_in_bytes;
413             // Length of last processed node extends past the bytes slice.
414             if total_size_in_bytes > max_size_in_bytes {
415                 return Err(ByteConversionError::InvalidLength);
416             }
417             if node.is_end_entire() {
418                 break;
419             }
420             bytes = &bytes[node_size_in_bytes..];
421         }
422 
423         Ok(total_size_in_bytes)
424     }
425 
426     /// Create a [`DevicePath`] reference from an opaque pointer.
427     ///
428     /// # Safety
429     ///
430     /// The input pointer must point to valid data. That data must
431     /// remain valid for the lifetime `'a`, and cannot be mutated during
432     /// that lifetime.
433     #[must_use]
from_ffi_ptr<'a>(ptr: *const FfiDevicePath) -> &'a Self434     pub unsafe fn from_ffi_ptr<'a>(ptr: *const FfiDevicePath) -> &'a Self {
435         &*Self::ptr_from_ffi(ptr.cast::<c_void>())
436     }
437 
438     /// Cast to a [`FfiDevicePath`] pointer.
439     #[must_use]
as_ffi_ptr(&self) -> *const FfiDevicePath440     pub const fn as_ffi_ptr(&self) -> *const FfiDevicePath {
441         let p = self as *const Self;
442         p.cast()
443     }
444 
445     /// Get an iterator over the [`DevicePathInstance`]s in this path.
446     #[must_use]
instance_iter(&self) -> DevicePathInstanceIterator447     pub const fn instance_iter(&self) -> DevicePathInstanceIterator {
448         DevicePathInstanceIterator {
449             remaining_path: Some(self),
450         }
451     }
452 
453     /// Get an iterator over the [`DevicePathNode`]s starting at
454     /// `self`. Iteration ends when a path is reached where
455     /// [`is_end_entire`][DevicePathNode::is_end_entire] is true. That ending
456     /// path is not returned by the iterator.
457     #[must_use]
node_iter(&self) -> DevicePathNodeIterator458     pub const fn node_iter(&self) -> DevicePathNodeIterator {
459         DevicePathNodeIterator {
460             nodes: &self.data,
461             stop_condition: StopCondition::EndEntireNode,
462         }
463     }
464 
465     /// Returns a slice of the underlying bytes.
466     #[must_use]
as_bytes(&self) -> &[u8]467     pub const fn as_bytes(&self) -> &[u8] {
468         &self.data
469     }
470 
471     /// Returns a boxed copy of that value.
472     #[cfg(feature = "alloc")]
473     #[must_use]
to_boxed(&self) -> Box<Self>474     pub fn to_boxed(&self) -> Box<Self> {
475         let data = self.data.to_owned();
476         let data = data.into_boxed_slice();
477         unsafe { mem::transmute(data) }
478     }
479 
480     /// Transforms the device path to its string representation using the
481     /// [`DevicePathToText`] protocol.
482     #[cfg(feature = "alloc")]
to_string( &self, display_only: DisplayOnly, allow_shortcuts: AllowShortcuts, ) -> Result<CString16, DevicePathToTextError>483     pub fn to_string(
484         &self,
485         display_only: DisplayOnly,
486         allow_shortcuts: AllowShortcuts,
487     ) -> Result<CString16, DevicePathToTextError> {
488         let to_text_protocol = open_text_protocol()?;
489 
490         to_text_protocol
491             .convert_device_path_to_text(self, display_only, allow_shortcuts)
492             .map(|pool_string| {
493                 let cstr16 = &*pool_string;
494                 // Another allocation; pool string is dropped. This overhead
495                 // is negligible. CString16 is more convenient to use.
496                 CString16::from(cstr16)
497             })
498             .map_err(|_| DevicePathToTextError::OutOfMemory)
499     }
500 }
501 
502 impl Debug for DevicePath {
fmt(&self, f: &mut Formatter) -> fmt::Result503     fn fmt(&self, f: &mut Formatter) -> fmt::Result {
504         f.debug_struct("DevicePath")
505             .field("data", &&self.data)
506             .finish()
507     }
508 }
509 
510 impl PartialEq for DevicePath {
eq(&self, other: &Self) -> bool511     fn eq(&self, other: &Self) -> bool {
512         self.data == other.data
513     }
514 }
515 
516 impl<'a> TryFrom<&'a [u8]> for &'a DevicePath {
517     type Error = ByteConversionError;
518 
try_from(bytes: &[u8]) -> Result<Self, Self::Error>519     fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
520         let len = DevicePath::size_in_bytes_from_slice(bytes)?;
521         unsafe { Ok(&*ptr_meta::from_raw_parts(bytes.as_ptr().cast(), len)) }
522     }
523 }
524 
525 #[cfg(feature = "alloc")]
526 impl ToOwned for DevicePath {
527     type Owned = Box<Self>;
528 
to_owned(&self) -> Self::Owned529     fn to_owned(&self) -> Self::Owned {
530         self.to_boxed()
531     }
532 }
533 
534 /// Iterator over the [`DevicePathInstance`]s in a [`DevicePath`].
535 ///
536 /// This struct is returned by [`DevicePath::instance_iter`].
537 #[derive(Debug)]
538 pub struct DevicePathInstanceIterator<'a> {
539     remaining_path: Option<&'a DevicePath>,
540 }
541 
542 impl<'a> Iterator for DevicePathInstanceIterator<'a> {
543     type Item = &'a DevicePathInstance;
544 
next(&mut self) -> Option<Self::Item>545     fn next(&mut self) -> Option<Self::Item> {
546         let remaining_path = self.remaining_path?;
547 
548         let mut instance_size: usize = 0;
549 
550         // Find the end of the instance, which can be either kind of end
551         // node (end-instance or end-entire). Count the number of bytes
552         // up to and including that end node.
553         let node_iter = DevicePathNodeIterator {
554             nodes: &remaining_path.data,
555             stop_condition: StopCondition::NoMoreNodes,
556         };
557         for node in node_iter {
558             instance_size += usize::from(node.length());
559             if node.device_type() == DeviceType::END {
560                 break;
561             }
562         }
563 
564         let (head, rest) = remaining_path.data.split_at(instance_size);
565 
566         if rest.is_empty() {
567             self.remaining_path = None;
568         } else {
569             self.remaining_path = unsafe {
570                 Some(&*ptr_meta::from_raw_parts(
571                     rest.as_ptr().cast::<()>(),
572                     rest.len(),
573                 ))
574             };
575         }
576 
577         unsafe {
578             Some(&*ptr_meta::from_raw_parts(
579                 head.as_ptr().cast::<()>(),
580                 head.len(),
581             ))
582         }
583     }
584 }
585 
586 #[derive(Debug)]
587 enum StopCondition {
588     AnyEndNode,
589     EndEntireNode,
590     NoMoreNodes,
591 }
592 
593 /// Iterator over [`DevicePathNode`]s.
594 ///
595 /// This struct is returned by [`DevicePath::node_iter`] and
596 /// [`DevicePathInstance::node_iter`].
597 #[derive(Debug)]
598 pub struct DevicePathNodeIterator<'a> {
599     nodes: &'a [u8],
600     stop_condition: StopCondition,
601 }
602 
603 impl<'a> Iterator for DevicePathNodeIterator<'a> {
604     type Item = &'a DevicePathNode;
605 
next(&mut self) -> Option<Self::Item>606     fn next(&mut self) -> Option<Self::Item> {
607         if self.nodes.is_empty() {
608             return None;
609         }
610 
611         let node =
612             unsafe { DevicePathNode::from_ffi_ptr(self.nodes.as_ptr().cast::<FfiDevicePath>()) };
613 
614         // Check if an early stop condition has been reached.
615         let stop = match self.stop_condition {
616             StopCondition::AnyEndNode => node.device_type() == DeviceType::END,
617             StopCondition::EndEntireNode => node.is_end_entire(),
618             StopCondition::NoMoreNodes => false,
619         };
620 
621         if stop {
622             // Clear the remaining node data so that future calls to
623             // next() immediately return `None`.
624             self.nodes = &[];
625             None
626         } else {
627             // Advance to next node.
628             let node_size = usize::from(node.length());
629             self.nodes = &self.nodes[node_size..];
630             Some(node)
631         }
632     }
633 }
634 
635 /// Error returned when attempting to convert from a `&[u8]` to a
636 /// [`DevicePath`] type.
637 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
638 pub enum ByteConversionError {
639     /// The length of the given slice is not valid for its [`DevicePath`] type.
640     InvalidLength,
641 }
642 
643 /// Error returned when converting from a [`DevicePathNode`] to a more
644 /// specific node type.
645 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
646 pub enum NodeConversionError {
647     /// The length of the node data is not valid for its type.
648     InvalidLength,
649 
650     /// The node type is not currently supported.
651     UnsupportedType,
652 }
653 
654 /// Protocol for accessing the device path that was passed in to [`load_image`]
655 /// when loading a PE/COFF image.
656 ///
657 /// The layout of this type is the same as a [`DevicePath`].
658 ///
659 /// [`load_image`]: crate::boot::load_image
660 #[repr(transparent)]
661 #[unsafe_protocol("bc62157e-3e33-4fec-9920-2d3b36d750df")]
662 #[derive(Debug, Pointee)]
663 pub struct LoadedImageDevicePath(DevicePath);
664 
665 impl ProtocolPointer for LoadedImageDevicePath {
ptr_from_ffi(ptr: *const c_void) -> *const Self666     unsafe fn ptr_from_ffi(ptr: *const c_void) -> *const Self {
667         ptr_meta::from_raw_parts(ptr.cast(), DevicePath::size_in_bytes_from_ptr(ptr))
668     }
669 
mut_ptr_from_ffi(ptr: *mut c_void) -> *mut Self670     unsafe fn mut_ptr_from_ffi(ptr: *mut c_void) -> *mut Self {
671         ptr_meta::from_raw_parts_mut(ptr.cast(), DevicePath::size_in_bytes_from_ptr(ptr))
672     }
673 }
674 
675 impl Deref for LoadedImageDevicePath {
676     type Target = DevicePath;
677 
deref(&self) -> &DevicePath678     fn deref(&self) -> &DevicePath {
679         &self.0
680     }
681 }
682 
683 /// Errors that may happen when a device path is transformed to a string
684 /// representation using:
685 /// - [`DevicePath::to_string`]
686 /// - [`DevicePathNode::to_string`]
687 #[derive(Debug)]
688 pub enum DevicePathToTextError {
689     /// Can't locate a handle buffer with handles associated with the
690     /// [`DevicePathToText`] protocol.
691     CantLocateHandleBuffer(crate::Error),
692     /// There is no handle supporting the [`DevicePathToText`] protocol.
693     NoHandle,
694     /// The handle supporting the [`DevicePathToText`] protocol exists but it
695     /// could not be opened.
696     CantOpenProtocol(crate::Error),
697     /// Failed to allocate pool memory.
698     OutOfMemory,
699 }
700 
701 impl Display for DevicePathToTextError {
fmt(&self, f: &mut Formatter<'_>) -> fmt::Result702     fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
703         write!(f, "{self:?}")
704     }
705 }
706 
707 #[cfg(feature = "unstable")]
708 impl core::error::Error for DevicePathToTextError {
source(&self) -> Option<&(dyn core::error::Error + 'static)>709     fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
710         match self {
711             DevicePathToTextError::CantLocateHandleBuffer(e) => Some(e),
712             DevicePathToTextError::CantOpenProtocol(e) => Some(e),
713             _ => None,
714         }
715     }
716 }
717 
718 /// Helper function to open the [`DevicePathToText`] protocol using the boot
719 /// services.
720 #[cfg(feature = "alloc")]
open_text_protocol() -> Result<ScopedProtocol<DevicePathToText>, DevicePathToTextError>721 fn open_text_protocol() -> Result<ScopedProtocol<DevicePathToText>, DevicePathToTextError> {
722     let &handle = boot::locate_handle_buffer(SearchType::ByProtocol(&DevicePathToText::GUID))
723         .map_err(DevicePathToTextError::CantLocateHandleBuffer)?
724         .first()
725         .ok_or(DevicePathToTextError::NoHandle)?;
726 
727     unsafe {
728         boot::open_protocol::<DevicePathToText>(
729             OpenProtocolParams {
730                 handle,
731                 agent: boot::image_handle(),
732                 controller: None,
733             },
734             OpenProtocolAttributes::GetProtocol,
735         )
736     }
737     .map_err(DevicePathToTextError::CantOpenProtocol)
738 }
739 
740 #[cfg(test)]
741 mod tests {
742     use super::*;
743     use alloc::vec::Vec;
744     use core::mem::{size_of, size_of_val};
745 
746     /// Create a node to `path` from raw data.
add_node(path: &mut Vec<u8>, device_type: u8, sub_type: u8, node_data: &[u8])747     fn add_node(path: &mut Vec<u8>, device_type: u8, sub_type: u8, node_data: &[u8]) {
748         path.push(device_type);
749         path.push(sub_type);
750         path.extend(
751             u16::try_from(mem::size_of::<DevicePathHeader>() + node_data.len())
752                 .unwrap()
753                 .to_le_bytes(),
754         );
755         path.extend(node_data);
756     }
757 
758     /// Create a test device path list as raw bytes.
create_raw_device_path() -> Vec<u8>759     fn create_raw_device_path() -> Vec<u8> {
760         let mut raw_data = Vec::new();
761 
762         // First path instance.
763         add_node(&mut raw_data, 0xa0, 0xb0, &[10, 11]);
764         add_node(&mut raw_data, 0xa1, 0xb1, &[20, 21, 22, 23]);
765         add_node(
766             &mut raw_data,
767             DeviceType::END.0,
768             DeviceSubType::END_INSTANCE.0,
769             &[],
770         );
771         // Second path instance.
772         add_node(&mut raw_data, 0xa2, 0xb2, &[30, 31]);
773         add_node(&mut raw_data, 0xa3, 0xb3, &[40, 41, 42, 43]);
774         add_node(
775             &mut raw_data,
776             DeviceType::END.0,
777             DeviceSubType::END_ENTIRE.0,
778             &[],
779         );
780 
781         raw_data
782     }
783 
784     /// Check that `node` has the expected content.
check_node(node: &DevicePathNode, device_type: u8, sub_type: u8, node_data: &[u8])785     fn check_node(node: &DevicePathNode, device_type: u8, sub_type: u8, node_data: &[u8]) {
786         assert_eq!(node.device_type().0, device_type);
787         assert_eq!(node.sub_type().0, sub_type);
788         assert_eq!(
789             node.length(),
790             u16::try_from(mem::size_of::<DevicePathHeader>() + node_data.len()).unwrap()
791         );
792         assert_eq!(&node.data, node_data);
793     }
794 
795     #[test]
test_device_path_nodes()796     fn test_device_path_nodes() {
797         let raw_data = create_raw_device_path();
798         let dp = unsafe { DevicePath::from_ffi_ptr(raw_data.as_ptr().cast()) };
799 
800         // Check that the size is the sum of the nodes' lengths.
801         assert_eq!(mem::size_of_val(dp), 6 + 8 + 4 + 6 + 8 + 4);
802 
803         // Check the list's node iter.
804         let nodes: Vec<_> = dp.node_iter().collect();
805         check_node(nodes[0], 0xa0, 0xb0, &[10, 11]);
806         check_node(nodes[1], 0xa1, 0xb1, &[20, 21, 22, 23]);
807         check_node(
808             nodes[2],
809             DeviceType::END.0,
810             DeviceSubType::END_INSTANCE.0,
811             &[],
812         );
813         check_node(nodes[3], 0xa2, 0xb2, &[30, 31]);
814         check_node(nodes[4], 0xa3, 0xb3, &[40, 41, 42, 43]);
815         // The end-entire node is not returned by the iterator.
816         assert_eq!(nodes.len(), 5);
817     }
818 
819     #[test]
test_device_path_instances()820     fn test_device_path_instances() {
821         let raw_data = create_raw_device_path();
822         let dp = unsafe { DevicePath::from_ffi_ptr(raw_data.as_ptr().cast()) };
823 
824         // Check the list's instance iter.
825         let mut iter = dp.instance_iter();
826         let mut instance = iter.next().unwrap();
827         assert_eq!(mem::size_of_val(instance), 6 + 8 + 4);
828 
829         // Check the first instance's node iter.
830         let nodes: Vec<_> = instance.node_iter().collect();
831         check_node(nodes[0], 0xa0, 0xb0, &[10, 11]);
832         check_node(nodes[1], 0xa1, 0xb1, &[20, 21, 22, 23]);
833         // The end node is not returned by the iterator.
834         assert_eq!(nodes.len(), 2);
835 
836         // Check second instance.
837         instance = iter.next().unwrap();
838         assert_eq!(mem::size_of_val(instance), 6 + 8 + 4);
839 
840         let nodes: Vec<_> = instance.node_iter().collect();
841         check_node(nodes[0], 0xa2, 0xb2, &[30, 31]);
842         check_node(nodes[1], 0xa3, 0xb3, &[40, 41, 42, 43]);
843         // The end node is not returned by the iterator.
844         assert_eq!(nodes.len(), 2);
845 
846         // Only two instances.
847         assert!(iter.next().is_none());
848     }
849 
850     #[test]
test_to_owned()851     fn test_to_owned() {
852         // Relevant assertion to verify the transmute is fine.
853         assert_eq!(size_of::<&DevicePath>(), size_of::<&[u8]>());
854 
855         let raw_data = create_raw_device_path();
856         let dp = unsafe { DevicePath::from_ffi_ptr(raw_data.as_ptr().cast()) };
857 
858         // Relevant assertion to verify the transmute is fine.
859         assert_eq!(size_of_val(dp), size_of_val(&dp.data));
860 
861         let owned_dp = dp.to_owned();
862         let owned_dp_ref = &*owned_dp;
863         assert_eq!(owned_dp_ref, dp)
864     }
865 
866     #[test]
test_device_path_node_from_bytes()867     fn test_device_path_node_from_bytes() {
868         let mut raw_data = Vec::new();
869         let node = [0xa0, 0xb0];
870         let node_data = &[10, 11];
871 
872         // Raw data is less than size of a [`DevicePathNode`].
873         raw_data.push(node[0]);
874         assert!(<&DevicePathNode>::try_from(raw_data.as_slice()).is_err());
875 
876         // Raw data is long enough to hold a [`DevicePathNode`].
877         raw_data.push(node[1]);
878         raw_data.extend(
879             u16::try_from(mem::size_of::<DevicePathHeader>() + node_data.len())
880                 .unwrap()
881                 .to_le_bytes(),
882         );
883         raw_data.extend(node_data);
884         let dp = <&DevicePathNode>::try_from(raw_data.as_slice()).unwrap();
885 
886         // Relevant assertions to verify the conversion is fine.
887         assert_eq!(mem::size_of_val(dp), 6);
888         check_node(dp, 0xa0, 0xb0, &[10, 11]);
889 
890         // [`DevicePathNode`] data length exceeds the raw_data slice.
891         raw_data[2] += 1;
892         assert!(<&DevicePathNode>::try_from(raw_data.as_slice()).is_err());
893     }
894 
895     #[test]
test_device_path_nodes_from_bytes()896     fn test_device_path_nodes_from_bytes() {
897         let raw_data = create_raw_device_path();
898         let dp = <&DevicePath>::try_from(raw_data.as_slice()).unwrap();
899 
900         // Check that the size is the sum of the nodes' lengths.
901         assert_eq!(mem::size_of_val(dp), 6 + 8 + 4 + 6 + 8 + 4);
902 
903         // Check the list's node iter.
904         let nodes: Vec<_> = dp.node_iter().collect();
905         check_node(nodes[0], 0xa0, 0xb0, &[10, 11]);
906         check_node(nodes[1], 0xa1, 0xb1, &[20, 21, 22, 23]);
907         check_node(
908             nodes[2],
909             DeviceType::END.0,
910             DeviceSubType::END_INSTANCE.0,
911             &[],
912         );
913         check_node(nodes[3], 0xa2, 0xb2, &[30, 31]);
914         check_node(nodes[4], 0xa3, 0xb3, &[40, 41, 42, 43]);
915         // The end-entire node is not returned by the iterator.
916         assert_eq!(nodes.len(), 5);
917     }
918 }
919